xref: /linux/fs/bcachefs/recovery.c (revision afcefc58fdfd687e3a9a9bef0be5846b96f710b7)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "bkey_buf.h"
6 #include "btree_journal_iter.h"
7 #include "btree_node_scan.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_io.h"
11 #include "buckets.h"
12 #include "dirent.h"
13 #include "disk_accounting.h"
14 #include "errcode.h"
15 #include "error.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "logged_ops.h"
20 #include "move.h"
21 #include "movinggc.h"
22 #include "namei.h"
23 #include "quota.h"
24 #include "rebalance.h"
25 #include "recovery.h"
26 #include "recovery_passes.h"
27 #include "replicas.h"
28 #include "sb-clean.h"
29 #include "sb-downgrade.h"
30 #include "snapshot.h"
31 #include "super-io.h"
32 
33 #include <linux/sort.h>
34 #include <linux/stat.h>
35 
36 int bch2_btree_lost_data(struct bch_fs *c,
37 			 struct printbuf *msg,
38 			 enum btree_id btree)
39 {
40 	u64 b = BIT_ULL(btree);
41 	int ret = 0;
42 
43 	mutex_lock(&c->sb_lock);
44 	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
45 
46 	if (!(c->sb.btrees_lost_data & b)) {
47 		prt_printf(msg, "flagging btree ");
48 		bch2_btree_id_to_text(msg, btree);
49 		prt_printf(msg, " lost data\n");
50 
51 		ext->btrees_lost_data |= cpu_to_le64(b);
52 	}
53 
54 	/* Once we have runtime self healing for topology errors we won't need this: */
55 	ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret;
56 
57 	/* Btree node accounting will be off: */
58 	__set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent);
59 	ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_allocations, 0) ?: ret;
60 
61 #ifdef CONFIG_BCACHEFS_DEBUG
62 	/*
63 	 * These are much more minor, and don't need to be corrected right away,
64 	 * but in debug mode we want the next fsck run to be clean:
65 	 */
66 	ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_lrus, 0) ?: ret;
67 	ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_backpointers_to_extents, 0) ?: ret;
68 #endif
69 
70 	switch (btree) {
71 	case BTREE_ID_alloc:
72 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret;
73 
74 		__set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
75 		__set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
76 		__set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
77 		__set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
78 		__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
79 		__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
80 		goto out;
81 	case BTREE_ID_backpointers:
82 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_btree_backpointers, 0) ?: ret;
83 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_extents_to_backpointers, 0) ?: ret;
84 		goto out;
85 	case BTREE_ID_need_discard:
86 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret;
87 		goto out;
88 	case BTREE_ID_freespace:
89 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret;
90 		goto out;
91 	case BTREE_ID_bucket_gens:
92 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret;
93 		goto out;
94 	case BTREE_ID_lru:
95 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret;
96 		goto out;
97 	case BTREE_ID_accounting:
98 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_allocations, 0) ?: ret;
99 		goto out;
100 	case BTREE_ID_snapshots:
101 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_reconstruct_snapshots, 0) ?: ret;
102 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret;
103 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret;
104 		goto out;
105 	default:
106 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret;
107 		ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret;
108 		goto out;
109 	}
110 out:
111 	bch2_write_super(c);
112 	mutex_unlock(&c->sb_lock);
113 
114 	return ret;
115 }
116 
117 static void kill_btree(struct bch_fs *c, enum btree_id btree)
118 {
119 	bch2_btree_id_root(c, btree)->alive = false;
120 	bch2_shoot_down_journal_keys(c, btree, 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
121 }
122 
123 /* for -o reconstruct_alloc: */
124 void bch2_reconstruct_alloc(struct bch_fs *c)
125 {
126 	mutex_lock(&c->sb_lock);
127 	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
128 
129 	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required);
130 	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_info, ext->recovery_passes_required);
131 	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_lrus, ext->recovery_passes_required);
132 	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_extents_to_backpointers, ext->recovery_passes_required);
133 	__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_to_lru_refs, ext->recovery_passes_required);
134 
135 	__set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent);
136 	__set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent);
137 	__set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent);
138 
139 	__set_bit_le64(BCH_FSCK_ERR_dev_usage_buckets_wrong, ext->errors_silent);
140 	__set_bit_le64(BCH_FSCK_ERR_dev_usage_sectors_wrong, ext->errors_silent);
141 	__set_bit_le64(BCH_FSCK_ERR_dev_usage_fragmented_wrong, ext->errors_silent);
142 
143 	__set_bit_le64(BCH_FSCK_ERR_fs_usage_btree_wrong, ext->errors_silent);
144 	__set_bit_le64(BCH_FSCK_ERR_fs_usage_cached_wrong, ext->errors_silent);
145 	__set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent);
146 	__set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent);
147 
148 	__set_bit_le64(BCH_FSCK_ERR_alloc_key_to_missing_lru_entry, ext->errors_silent);
149 
150 	__set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
151 	__set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
152 	__set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
153 	__set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
154 	__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
155 	__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
156 	__set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent);
157 	__set_bit_le64(BCH_FSCK_ERR_freespace_key_wrong, ext->errors_silent);
158 	__set_bit_le64(BCH_FSCK_ERR_bucket_gens_key_wrong, ext->errors_silent);
159 	__set_bit_le64(BCH_FSCK_ERR_freespace_hole_missing, ext->errors_silent);
160 	__set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_backpointer, ext->errors_silent);
161 	__set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent);
162 	__set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent);
163 	c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
164 
165 	c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
166 
167 	c->disk_sb.sb->features[0] &= ~cpu_to_le64(BIT_ULL(BCH_FEATURE_no_alloc_info));
168 
169 	bch2_write_super(c);
170 	mutex_unlock(&c->sb_lock);
171 
172 	for (unsigned i = 0; i < btree_id_nr_alive(c); i++)
173 		if (btree_id_is_alloc(i))
174 			kill_btree(c, i);
175 }
176 
177 /*
178  * Btree node pointers have a field to stack a pointer to the in memory btree
179  * node; we need to zero out this field when reading in btree nodes, or when
180  * reading in keys from the journal:
181  */
182 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
183 {
184 	darray_for_each(*keys, i)
185 		if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
186 			bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
187 }
188 
189 /* journal replay: */
190 
191 static void replay_now_at(struct journal *j, u64 seq)
192 {
193 	BUG_ON(seq < j->replay_journal_seq);
194 
195 	seq = min(seq, j->replay_journal_seq_end);
196 
197 	while (j->replay_journal_seq < seq)
198 		bch2_journal_pin_put(j, j->replay_journal_seq++);
199 }
200 
201 static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
202 					      struct journal_key *k)
203 {
204 	struct btree_iter iter;
205 	bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
206 				  BTREE_MAX_DEPTH, k->level,
207 				  BTREE_ITER_intent);
208 	int ret = bch2_btree_iter_traverse(trans, &iter);
209 	if (ret)
210 		goto out;
211 
212 	struct bkey u;
213 	struct bkey_s_c old = bch2_btree_path_peek_slot(btree_iter_path(trans, &iter), &u);
214 
215 	/* Has this delta already been applied to the btree? */
216 	if (bversion_cmp(old.k->bversion, k->k->k.bversion) >= 0) {
217 		ret = 0;
218 		goto out;
219 	}
220 
221 	struct bkey_i *new = k->k;
222 	if (old.k->type == KEY_TYPE_accounting) {
223 		new = bch2_bkey_make_mut_noupdate(trans, bkey_i_to_s_c(k->k));
224 		ret = PTR_ERR_OR_ZERO(new);
225 		if (ret)
226 			goto out;
227 
228 		bch2_accounting_accumulate(bkey_i_to_accounting(new),
229 					   bkey_s_c_to_accounting(old));
230 	}
231 
232 	trans->journal_res.seq = k->journal_seq;
233 
234 	ret = bch2_trans_update(trans, &iter, new, BTREE_TRIGGER_norun);
235 out:
236 	bch2_trans_iter_exit(trans, &iter);
237 	return ret;
238 }
239 
240 static int bch2_journal_replay_key(struct btree_trans *trans,
241 				   struct journal_key *k)
242 {
243 	struct btree_iter iter;
244 	unsigned iter_flags =
245 		BTREE_ITER_intent|
246 		BTREE_ITER_not_extents;
247 	unsigned update_flags = BTREE_TRIGGER_norun;
248 	int ret;
249 
250 	if (k->overwritten)
251 		return 0;
252 
253 	trans->journal_res.seq = k->journal_seq;
254 
255 	/*
256 	 * BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to
257 	 * keep the key cache coherent with the underlying btree. Nothing
258 	 * besides the allocator is doing updates yet so we don't need key cache
259 	 * coherency for non-alloc btrees, and key cache fills for snapshots
260 	 * btrees use BTREE_ITER_filter_snapshots, which isn't available until
261 	 * the snapshots recovery pass runs.
262 	 */
263 	if (!k->level && k->btree_id == BTREE_ID_alloc)
264 		iter_flags |= BTREE_ITER_cached;
265 	else
266 		update_flags |= BTREE_UPDATE_key_cache_reclaim;
267 
268 	bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
269 				  BTREE_MAX_DEPTH, k->level,
270 				  iter_flags);
271 	ret = bch2_btree_iter_traverse(trans, &iter);
272 	if (ret)
273 		goto out;
274 
275 	struct btree_path *path = btree_iter_path(trans, &iter);
276 	if (unlikely(!btree_path_node(path, k->level) &&
277 		     !k->allocated)) {
278 		struct bch_fs *c = trans->c;
279 
280 		if (!(c->recovery.passes_complete & (BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes)|
281 						     BIT_ULL(BCH_RECOVERY_PASS_check_topology)))) {
282 			bch_err(c, "have key in journal replay for btree depth that does not exist, confused");
283 			ret = -EINVAL;
284 		}
285 #if 0
286 		bch2_trans_iter_exit(trans, &iter);
287 		bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
288 					  BTREE_MAX_DEPTH, 0, iter_flags);
289 		ret =   bch2_btree_iter_traverse(trans, &iter) ?:
290 			bch2_btree_increase_depth(trans, iter.path, 0) ?:
291 			-BCH_ERR_transaction_restart_nested;
292 #endif
293 		k->overwritten = true;
294 		goto out;
295 	}
296 
297 	/* Must be checked with btree locked: */
298 	if (k->overwritten)
299 		goto out;
300 
301 	if (k->k->k.type == KEY_TYPE_accounting) {
302 		struct bkey_i *n = bch2_trans_subbuf_alloc(trans, &trans->accounting, k->k->k.u64s);
303 		ret = PTR_ERR_OR_ZERO(n);
304 		if (ret)
305 			goto out;
306 
307 		bkey_copy(n, k->k);
308 		goto out;
309 	}
310 
311 	ret = bch2_trans_update(trans, &iter, k->k, update_flags);
312 out:
313 	bch2_trans_iter_exit(trans, &iter);
314 	return ret;
315 }
316 
317 static int journal_sort_seq_cmp(const void *_l, const void *_r)
318 {
319 	const struct journal_key *l = *((const struct journal_key **)_l);
320 	const struct journal_key *r = *((const struct journal_key **)_r);
321 
322 	/*
323 	 * Map 0 to U64_MAX, so that keys with journal_seq === 0 come last
324 	 *
325 	 * journal_seq == 0 means that the key comes from early repair, and
326 	 * should be inserted last so as to avoid overflowing the journal
327 	 */
328 	return cmp_int(l->journal_seq - 1, r->journal_seq - 1);
329 }
330 
331 int bch2_journal_replay(struct bch_fs *c)
332 {
333 	struct journal_keys *keys = &c->journal_keys;
334 	DARRAY(struct journal_key *) keys_sorted = { 0 };
335 	struct journal *j = &c->journal;
336 	u64 start_seq	= c->journal_replay_seq_start;
337 	u64 end_seq	= c->journal_replay_seq_start;
338 	struct btree_trans *trans = NULL;
339 	bool immediate_flush = false;
340 	int ret = 0;
341 
342 	if (keys->nr) {
343 		ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
344 					   keys->nr, start_seq, end_seq);
345 		if (ret)
346 			goto err;
347 	}
348 
349 	BUG_ON(!atomic_read(&keys->ref));
350 
351 	move_gap(keys, keys->nr);
352 	trans = bch2_trans_get(c);
353 
354 	/*
355 	 * Replay accounting keys first: we can't allow the write buffer to
356 	 * flush accounting keys until we're done
357 	 */
358 	darray_for_each(*keys, k) {
359 		if (!(k->k->k.type == KEY_TYPE_accounting && !k->allocated))
360 			continue;
361 
362 		cond_resched();
363 
364 		ret = commit_do(trans, NULL, NULL,
365 				BCH_TRANS_COMMIT_no_enospc|
366 				BCH_TRANS_COMMIT_journal_reclaim|
367 				BCH_TRANS_COMMIT_skip_accounting_apply|
368 				BCH_TRANS_COMMIT_no_journal_res|
369 				BCH_WATERMARK_reclaim,
370 			     bch2_journal_replay_accounting_key(trans, k));
371 		if (bch2_fs_fatal_err_on(ret, c, "error replaying accounting; %s", bch2_err_str(ret)))
372 			goto err;
373 
374 		k->overwritten = true;
375 	}
376 
377 	set_bit(BCH_FS_accounting_replay_done, &c->flags);
378 
379 	/*
380 	 * First, attempt to replay keys in sorted order. This is more
381 	 * efficient - better locality of btree access -  but some might fail if
382 	 * that would cause a journal deadlock.
383 	 */
384 	darray_for_each(*keys, k) {
385 		cond_resched();
386 
387 		/*
388 		 * k->allocated means the key wasn't read in from the journal,
389 		 * rather it was from early repair code
390 		 */
391 		if (k->allocated)
392 			immediate_flush = true;
393 
394 		/* Skip fastpath if we're low on space in the journal */
395 		ret = c->journal.watermark ? -1 :
396 			commit_do(trans, NULL, NULL,
397 				  BCH_TRANS_COMMIT_no_enospc|
398 				  BCH_TRANS_COMMIT_journal_reclaim|
399 				  BCH_TRANS_COMMIT_skip_accounting_apply|
400 				  (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0),
401 			     bch2_journal_replay_key(trans, k));
402 		BUG_ON(!ret && !k->overwritten && k->k->k.type != KEY_TYPE_accounting);
403 		if (ret) {
404 			ret = darray_push(&keys_sorted, k);
405 			if (ret)
406 				goto err;
407 		}
408 	}
409 
410 	bch2_trans_unlock_long(trans);
411 	/*
412 	 * Now, replay any remaining keys in the order in which they appear in
413 	 * the journal, unpinning those journal entries as we go:
414 	 */
415 	sort_nonatomic(keys_sorted.data, keys_sorted.nr,
416 		       sizeof(keys_sorted.data[0]),
417 		       journal_sort_seq_cmp, NULL);
418 
419 	darray_for_each(keys_sorted, kp) {
420 		cond_resched();
421 
422 		struct journal_key *k = *kp;
423 
424 		if (k->journal_seq)
425 			replay_now_at(j, k->journal_seq);
426 		else
427 			replay_now_at(j, j->replay_journal_seq_end);
428 
429 		ret = commit_do(trans, NULL, NULL,
430 				BCH_TRANS_COMMIT_no_enospc|
431 				BCH_TRANS_COMMIT_skip_accounting_apply|
432 				(!k->allocated
433 				 ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim
434 				 : 0),
435 			     bch2_journal_replay_key(trans, k));
436 		if (ret) {
437 			struct printbuf buf = PRINTBUF;
438 			bch2_btree_id_level_to_text(&buf, k->btree_id, k->level);
439 			bch_err_msg(c, ret, "while replaying key at %s:", buf.buf);
440 			printbuf_exit(&buf);
441 			goto err;
442 		}
443 
444 		BUG_ON(k->btree_id != BTREE_ID_accounting && !k->overwritten);
445 	}
446 
447 	/*
448 	 * We need to put our btree_trans before calling flush_all_pins(), since
449 	 * that will use a btree_trans internally
450 	 */
451 	bch2_trans_put(trans);
452 	trans = NULL;
453 
454 	if (!c->opts.retain_recovery_info &&
455 	    c->recovery.pass_done >= BCH_RECOVERY_PASS_journal_replay)
456 		bch2_journal_keys_put_initial(c);
457 
458 	replay_now_at(j, j->replay_journal_seq_end);
459 	j->replay_journal_seq = 0;
460 
461 	bch2_journal_set_replay_done(j);
462 
463 	/* if we did any repair, flush it immediately */
464 	if (immediate_flush) {
465 		bch2_journal_flush_all_pins(&c->journal);
466 		ret = bch2_journal_meta(&c->journal);
467 	}
468 
469 	if (keys->nr)
470 		bch2_journal_log_msg(c, "journal replay finished");
471 err:
472 	if (trans)
473 		bch2_trans_put(trans);
474 	darray_exit(&keys_sorted);
475 	bch_err_fn(c, ret);
476 	return ret;
477 }
478 
479 /* journal replay early: */
480 
481 static int journal_replay_entry_early(struct bch_fs *c,
482 				      struct jset_entry *entry)
483 {
484 	int ret = 0;
485 
486 	switch (entry->type) {
487 	case BCH_JSET_ENTRY_btree_root: {
488 
489 		if (unlikely(!entry->u64s))
490 			return 0;
491 
492 		if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX,
493 				c, invalid_btree_id,
494 				"invalid btree id %u (max %u)",
495 				entry->btree_id, BTREE_ID_NR_MAX))
496 			return 0;
497 
498 		while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
499 			ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
500 			if (ret)
501 				return ret;
502 		}
503 
504 		struct btree_root *r = bch2_btree_id_root(c, entry->btree_id);
505 
506 		r->level = entry->level;
507 		bkey_copy(&r->key, (struct bkey_i *) entry->start);
508 		r->error = 0;
509 		r->alive = true;
510 		break;
511 	}
512 	case BCH_JSET_ENTRY_usage: {
513 		struct jset_entry_usage *u =
514 			container_of(entry, struct jset_entry_usage, entry);
515 
516 		switch (entry->btree_id) {
517 		case BCH_FS_USAGE_key_version:
518 			atomic64_set(&c->key_version, le64_to_cpu(u->v));
519 			break;
520 		}
521 		break;
522 	}
523 	case BCH_JSET_ENTRY_blacklist: {
524 		struct jset_entry_blacklist *bl_entry =
525 			container_of(entry, struct jset_entry_blacklist, entry);
526 
527 		ret = bch2_journal_seq_blacklist_add(c,
528 				le64_to_cpu(bl_entry->seq),
529 				le64_to_cpu(bl_entry->seq) + 1);
530 		break;
531 	}
532 	case BCH_JSET_ENTRY_blacklist_v2: {
533 		struct jset_entry_blacklist_v2 *bl_entry =
534 			container_of(entry, struct jset_entry_blacklist_v2, entry);
535 
536 		ret = bch2_journal_seq_blacklist_add(c,
537 				le64_to_cpu(bl_entry->start),
538 				le64_to_cpu(bl_entry->end) + 1);
539 		break;
540 	}
541 	case BCH_JSET_ENTRY_clock: {
542 		struct jset_entry_clock *clock =
543 			container_of(entry, struct jset_entry_clock, entry);
544 
545 		atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
546 	}
547 	}
548 fsck_err:
549 	return ret;
550 }
551 
552 static int journal_replay_early(struct bch_fs *c,
553 				struct bch_sb_field_clean *clean)
554 {
555 	if (clean) {
556 		for (struct jset_entry *entry = clean->start;
557 		     entry != vstruct_end(&clean->field);
558 		     entry = vstruct_next(entry)) {
559 			int ret = journal_replay_entry_early(c, entry);
560 			if (ret)
561 				return ret;
562 		}
563 	} else {
564 		struct genradix_iter iter;
565 		struct journal_replay *i, **_i;
566 
567 		genradix_for_each(&c->journal_entries, iter, _i) {
568 			i = *_i;
569 
570 			if (journal_replay_ignore(i))
571 				continue;
572 
573 			vstruct_for_each(&i->j, entry) {
574 				int ret = journal_replay_entry_early(c, entry);
575 				if (ret)
576 					return ret;
577 			}
578 		}
579 	}
580 
581 	return 0;
582 }
583 
584 /* sb clean section: */
585 
586 static int read_btree_roots(struct bch_fs *c)
587 {
588 	struct printbuf buf = PRINTBUF;
589 	int ret = 0;
590 
591 	for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
592 		struct btree_root *r = bch2_btree_id_root(c, i);
593 
594 		if (!r->alive)
595 			continue;
596 
597 		printbuf_reset(&buf);
598 		bch2_btree_id_level_to_text(&buf, i, r->level);
599 
600 		if (mustfix_fsck_err_on((ret = r->error),
601 					c, btree_root_bkey_invalid,
602 					"invalid btree root %s",
603 					buf.buf) ||
604 		    mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)),
605 					c, btree_root_read_error,
606 					"error reading btree root %s: %s",
607 					buf.buf, bch2_err_str(ret))) {
608 			if (btree_id_is_alloc(i))
609 				r->error = 0;
610 		}
611 	}
612 
613 	for (unsigned i = 0; i < BTREE_ID_NR; i++) {
614 		struct btree_root *r = bch2_btree_id_root(c, i);
615 
616 		if (!r->b && !r->error) {
617 			r->alive = false;
618 			r->level = 0;
619 			bch2_btree_root_alloc_fake(c, i, 0);
620 		}
621 	}
622 fsck_err:
623 	printbuf_exit(&buf);
624 	return ret;
625 }
626 
627 static bool check_version_upgrade(struct bch_fs *c)
628 {
629 	unsigned latest_version	= bcachefs_metadata_version_current;
630 	unsigned latest_compatible = min(latest_version,
631 					 bch2_latest_compatible_version(c->sb.version));
632 	unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
633 	unsigned new_version = 0;
634 	bool ret = false;
635 
636 	if (old_version < bcachefs_metadata_required_upgrade_below) {
637 		if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
638 		    latest_compatible < bcachefs_metadata_required_upgrade_below)
639 			new_version = latest_version;
640 		else
641 			new_version = latest_compatible;
642 	} else {
643 		switch (c->opts.version_upgrade) {
644 		case BCH_VERSION_UPGRADE_compatible:
645 			new_version = latest_compatible;
646 			break;
647 		case BCH_VERSION_UPGRADE_incompatible:
648 			new_version = latest_version;
649 			break;
650 		case BCH_VERSION_UPGRADE_none:
651 			new_version = min(old_version, latest_version);
652 			break;
653 		}
654 	}
655 
656 	if (new_version > old_version) {
657 		struct printbuf buf = PRINTBUF;
658 
659 		if (old_version < bcachefs_metadata_required_upgrade_below)
660 			prt_str(&buf, "Version upgrade required:\n");
661 
662 		if (old_version != c->sb.version) {
663 			prt_str(&buf, "Version upgrade from ");
664 			bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
665 			prt_str(&buf, " to ");
666 			bch2_version_to_text(&buf, c->sb.version);
667 			prt_str(&buf, " incomplete\n");
668 		}
669 
670 		prt_printf(&buf, "Doing %s version upgrade from ",
671 			   BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
672 			   ? "incompatible" : "compatible");
673 		bch2_version_to_text(&buf, old_version);
674 		prt_str(&buf, " to ");
675 		bch2_version_to_text(&buf, new_version);
676 		prt_newline(&buf);
677 
678 		struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
679 		__le64 passes = ext->recovery_passes_required[0];
680 		bch2_sb_set_upgrade(c, old_version, new_version);
681 		passes = ext->recovery_passes_required[0] & ~passes;
682 
683 		if (passes) {
684 			prt_str(&buf, "  running recovery passes: ");
685 			prt_bitflags(&buf, bch2_recovery_passes,
686 				     bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
687 		}
688 
689 		bch_notice(c, "%s", buf.buf);
690 		printbuf_exit(&buf);
691 
692 		ret = true;
693 	}
694 
695 	if (new_version > c->sb.version_incompat &&
696 	    c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible) {
697 		struct printbuf buf = PRINTBUF;
698 
699 		prt_str(&buf, "Now allowing incompatible features up to ");
700 		bch2_version_to_text(&buf, new_version);
701 		prt_str(&buf, ", previously allowed up to ");
702 		bch2_version_to_text(&buf, c->sb.version_incompat_allowed);
703 		prt_newline(&buf);
704 
705 		bch_notice(c, "%s", buf.buf);
706 		printbuf_exit(&buf);
707 
708 		ret = true;
709 	}
710 
711 	if (ret)
712 		bch2_sb_upgrade(c, new_version,
713 				c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible);
714 
715 	return ret;
716 }
717 
718 int bch2_fs_recovery(struct bch_fs *c)
719 {
720 	struct bch_sb_field_clean *clean = NULL;
721 	struct jset *last_journal_entry = NULL;
722 	u64 last_seq = 0, blacklist_seq, journal_seq;
723 	int ret = 0;
724 
725 	if (c->sb.clean) {
726 		clean = bch2_read_superblock_clean(c);
727 		ret = PTR_ERR_OR_ZERO(clean);
728 		if (ret)
729 			goto err;
730 
731 		bch_info(c, "recovering from clean shutdown, journal seq %llu",
732 			 le64_to_cpu(clean->journal_seq));
733 	} else {
734 		bch_info(c, "recovering from unclean shutdown");
735 	}
736 
737 	if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
738 		bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
739 		ret = -EINVAL;
740 		goto err;
741 	}
742 
743 	if (!c->sb.clean &&
744 	    !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
745 		bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
746 		ret = -EINVAL;
747 		goto err;
748 	}
749 
750 	if (c->opts.norecovery) {
751 		c->opts.recovery_pass_last = c->opts.recovery_pass_last
752 			? min(c->opts.recovery_pass_last, BCH_RECOVERY_PASS_snapshots_read)
753 			: BCH_RECOVERY_PASS_snapshots_read;
754 		c->opts.nochanges = true;
755 	}
756 
757 	if (c->opts.nochanges)
758 		c->opts.read_only = true;
759 
760 	mutex_lock(&c->sb_lock);
761 	struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
762 	bool write_sb = false;
763 
764 	if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
765 		ext->recovery_passes_required[0] |=
766 			cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
767 		write_sb = true;
768 	}
769 
770 	u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
771 	if (sb_passes) {
772 		struct printbuf buf = PRINTBUF;
773 		prt_str(&buf, "superblock requires following recovery passes to be run:\n  ");
774 		prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
775 		bch_info(c, "%s", buf.buf);
776 		printbuf_exit(&buf);
777 	}
778 
779 	if (bch2_check_version_downgrade(c)) {
780 		struct printbuf buf = PRINTBUF;
781 
782 		prt_str(&buf, "Version downgrade required:");
783 
784 		__le64 passes = ext->recovery_passes_required[0];
785 		bch2_sb_set_downgrade(c,
786 				      BCH_VERSION_MINOR(bcachefs_metadata_version_current),
787 				      BCH_VERSION_MINOR(c->sb.version));
788 		passes = ext->recovery_passes_required[0] & ~passes;
789 		if (passes) {
790 			prt_str(&buf, "\n  running recovery passes: ");
791 			prt_bitflags(&buf, bch2_recovery_passes,
792 				     bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
793 		}
794 
795 		bch_info(c, "%s", buf.buf);
796 		printbuf_exit(&buf);
797 		write_sb = true;
798 	}
799 
800 	if (check_version_upgrade(c))
801 		write_sb = true;
802 
803 	c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
804 
805 	if (c->sb.version_upgrade_complete < bcachefs_metadata_version_autofix_errors) {
806 		SET_BCH_SB_ERROR_ACTION(c->disk_sb.sb, BCH_ON_ERROR_fix_safe);
807 		write_sb = true;
808 	}
809 
810 	if (write_sb)
811 		bch2_write_super(c);
812 	mutex_unlock(&c->sb_lock);
813 
814 	if (c->sb.clean)
815 		set_bit(BCH_FS_clean_recovery, &c->flags);
816 	if (c->opts.fsck)
817 		set_bit(BCH_FS_in_fsck, &c->flags);
818 	set_bit(BCH_FS_in_recovery, &c->flags);
819 
820 	ret = bch2_blacklist_table_initialize(c);
821 	if (ret) {
822 		bch_err(c, "error initializing blacklist table");
823 		goto err;
824 	}
825 
826 	bch2_journal_pos_from_member_info_resume(c);
827 
828 	if (!c->sb.clean || c->opts.retain_recovery_info) {
829 		struct genradix_iter iter;
830 		struct journal_replay **i;
831 
832 		bch_verbose(c, "starting journal read");
833 		ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
834 		if (ret)
835 			goto err;
836 
837 		/*
838 		 * note: cmd_list_journal needs the blacklist table fully up to date so
839 		 * it can asterisk ignored journal entries:
840 		 */
841 		if (c->opts.read_journal_only)
842 			goto out;
843 
844 		genradix_for_each_reverse(&c->journal_entries, iter, i)
845 			if (!journal_replay_ignore(*i)) {
846 				last_journal_entry = &(*i)->j;
847 				break;
848 			}
849 
850 		if (mustfix_fsck_err_on(c->sb.clean &&
851 					last_journal_entry &&
852 					!journal_entry_empty(last_journal_entry), c,
853 				clean_but_journal_not_empty,
854 				"filesystem marked clean but journal not empty")) {
855 			c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
856 			SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
857 			c->sb.clean = false;
858 		}
859 
860 		if (!last_journal_entry) {
861 			fsck_err_on(!c->sb.clean, c,
862 				    dirty_but_no_journal_entries,
863 				    "no journal entries found");
864 			if (clean)
865 				goto use_clean;
866 
867 			genradix_for_each_reverse(&c->journal_entries, iter, i)
868 				if (*i) {
869 					last_journal_entry = &(*i)->j;
870 					(*i)->ignore_blacklisted = false;
871 					(*i)->ignore_not_dirty= false;
872 					/*
873 					 * This was probably a NO_FLUSH entry,
874 					 * so last_seq was garbage - but we know
875 					 * we're only using a single journal
876 					 * entry, set it here:
877 					 */
878 					(*i)->j.last_seq = (*i)->j.seq;
879 					break;
880 				}
881 		}
882 
883 		ret = bch2_journal_keys_sort(c);
884 		if (ret)
885 			goto err;
886 
887 		if (c->sb.clean && last_journal_entry) {
888 			ret = bch2_verify_superblock_clean(c, &clean,
889 						      last_journal_entry);
890 			if (ret)
891 				goto err;
892 		}
893 	} else {
894 use_clean:
895 		if (!clean) {
896 			bch_err(c, "no superblock clean section found");
897 			ret = bch_err_throw(c, fsck_repair_impossible);
898 			goto err;
899 
900 		}
901 		blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
902 	}
903 
904 	c->journal_replay_seq_start	= last_seq;
905 	c->journal_replay_seq_end	= blacklist_seq - 1;
906 
907 	zero_out_btree_mem_ptr(&c->journal_keys);
908 
909 	ret = journal_replay_early(c, clean);
910 	if (ret)
911 		goto err;
912 
913 	ret = bch2_fs_resize_on_mount(c);
914 	if (ret) {
915 		up_write(&c->state_lock);
916 		goto err;
917 	}
918 
919 	if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) {
920 		bch_info(c, "filesystem is an unresized image file, mounting ro");
921 		c->opts.read_only = true;
922 	}
923 
924 	if (!c->opts.read_only &&
925 	    (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))) {
926 		bch_info(c, "mounting a filesystem with no alloc info read-write; will recreate");
927 
928 		bch2_reconstruct_alloc(c);
929 	} else if (c->opts.reconstruct_alloc) {
930 		bch2_journal_log_msg(c, "dropping alloc info");
931 		bch_info(c, "dropping and reconstructing all alloc info");
932 
933 		bch2_reconstruct_alloc(c);
934 	}
935 
936 	if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) {
937 		/* We can't go RW to fix errors without alloc info */
938 		if (c->opts.fix_errors == FSCK_FIX_yes ||
939 		    c->opts.fix_errors == FSCK_FIX_ask)
940 			c->opts.fix_errors = FSCK_FIX_no;
941 		if (c->opts.errors == BCH_ON_ERROR_fix_safe)
942 			c->opts.errors = BCH_ON_ERROR_continue;
943 	}
944 
945 	/*
946 	 * After an unclean shutdown, skip then next few journal sequence
947 	 * numbers as they may have been referenced by btree writes that
948 	 * happened before their corresponding journal writes - those btree
949 	 * writes need to be ignored, by skipping and blacklisting the next few
950 	 * journal sequence numbers:
951 	 */
952 	if (!c->sb.clean)
953 		journal_seq += JOURNAL_BUF_NR * 4;
954 
955 	if (blacklist_seq != journal_seq) {
956 		ret =   bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
957 					     blacklist_seq, journal_seq) ?:
958 			bch2_journal_seq_blacklist_add(c,
959 					blacklist_seq, journal_seq);
960 		if (ret) {
961 			bch_err_msg(c, ret, "error creating new journal seq blacklist entry");
962 			goto err;
963 		}
964 	}
965 
966 	ret =   bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
967 				     journal_seq, last_seq, blacklist_seq - 1) ?:
968 		bch2_fs_journal_start(&c->journal, journal_seq);
969 	if (ret)
970 		goto err;
971 
972 	/*
973 	 * Skip past versions that might have possibly been used (as nonces),
974 	 * but hadn't had their pointers written:
975 	 */
976 	if (c->sb.encryption_type && !c->sb.clean)
977 		atomic64_add(1 << 16, &c->key_version);
978 
979 	ret = read_btree_roots(c);
980 	if (ret)
981 		goto err;
982 
983 	set_bit(BCH_FS_btree_running, &c->flags);
984 
985 	ret = bch2_sb_set_upgrade_extra(c);
986 	if (ret)
987 		goto err;
988 
989 	ret = bch2_run_recovery_passes(c, 0);
990 	if (ret)
991 		goto err;
992 
993 	/*
994 	 * Normally set by the appropriate recovery pass: when cleared, this
995 	 * indicates we're in early recovery and btree updates should be done by
996 	 * being applied to the journal replay keys. _Must_ be cleared before
997 	 * multithreaded use:
998 	 */
999 	set_bit(BCH_FS_may_go_rw, &c->flags);
1000 	clear_bit(BCH_FS_in_fsck, &c->flags);
1001 
1002 	/* in case we don't run journal replay, i.e. norecovery mode */
1003 	set_bit(BCH_FS_accounting_replay_done, &c->flags);
1004 
1005 	bch2_async_btree_node_rewrites_flush(c);
1006 
1007 	/* fsync if we fixed errors */
1008 	if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
1009 		bch2_journal_flush_all_pins(&c->journal);
1010 		bch2_journal_meta(&c->journal);
1011 	}
1012 
1013 	/* If we fixed errors, verify that fs is actually clean now: */
1014 	if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
1015 	    test_bit(BCH_FS_errors_fixed, &c->flags) &&
1016 	    !test_bit(BCH_FS_errors_not_fixed, &c->flags) &&
1017 	    !test_bit(BCH_FS_error, &c->flags)) {
1018 		bch2_flush_fsck_errs(c);
1019 
1020 		bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
1021 		clear_bit(BCH_FS_errors_fixed, &c->flags);
1022 
1023 		ret = bch2_run_recovery_passes(c,
1024 			BCH_RECOVERY_PASS_check_alloc_info);
1025 		if (ret)
1026 			goto err;
1027 
1028 		if (test_bit(BCH_FS_errors_fixed, &c->flags) ||
1029 		    test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
1030 			bch_err(c, "Second fsck run was not clean");
1031 			set_bit(BCH_FS_errors_not_fixed, &c->flags);
1032 		}
1033 
1034 		set_bit(BCH_FS_errors_fixed, &c->flags);
1035 	}
1036 
1037 	if (enabled_qtypes(c)) {
1038 		bch_verbose(c, "reading quotas");
1039 		ret = bch2_fs_quota_read(c);
1040 		if (ret)
1041 			goto err;
1042 		bch_verbose(c, "quotas done");
1043 	}
1044 
1045 	mutex_lock(&c->sb_lock);
1046 	ext = bch2_sb_field_get(c->disk_sb.sb, ext);
1047 	write_sb = false;
1048 
1049 	if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
1050 		SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version));
1051 		write_sb = true;
1052 	}
1053 
1054 	if (!test_bit(BCH_FS_error, &c->flags) &&
1055 	    !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) {
1056 		c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1057 		write_sb = true;
1058 	}
1059 
1060 	if (!test_bit(BCH_FS_error, &c->flags) &&
1061 	    !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent))) {
1062 		memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
1063 		write_sb = true;
1064 	}
1065 
1066 	if (c->opts.fsck &&
1067 	    !test_bit(BCH_FS_error, &c->flags) &&
1068 	    c->recovery.pass_done == BCH_RECOVERY_PASS_NR - 1 &&
1069 	    ext->btrees_lost_data) {
1070 		ext->btrees_lost_data = 0;
1071 		write_sb = true;
1072 	}
1073 
1074 	if (c->opts.fsck &&
1075 	    !test_bit(BCH_FS_error, &c->flags) &&
1076 	    !test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
1077 		SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1078 		SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1079 		write_sb = true;
1080 	}
1081 
1082 	if (bch2_blacklist_entries_gc(c))
1083 		write_sb = true;
1084 
1085 	if (write_sb)
1086 		bch2_write_super(c);
1087 	mutex_unlock(&c->sb_lock);
1088 
1089 	if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1090 	    c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
1091 		struct bch_move_stats stats;
1092 
1093 		bch2_move_stats_init(&stats, "recovery");
1094 
1095 		struct printbuf buf = PRINTBUF;
1096 		bch2_version_to_text(&buf, c->sb.version_min);
1097 		bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf);
1098 		printbuf_exit(&buf);
1099 
1100 		ret =   bch2_fs_read_write_early(c) ?:
1101 			bch2_scan_old_btree_nodes(c, &stats);
1102 		if (ret)
1103 			goto err;
1104 		bch_info(c, "scanning for old btree nodes done");
1105 	}
1106 
1107 	ret = 0;
1108 out:
1109 	bch2_flush_fsck_errs(c);
1110 
1111 	if (!ret &&
1112 	    test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) &&
1113 	    !c->opts.nochanges) {
1114 		bch2_fs_read_write_early(c);
1115 		bch2_delete_dead_snapshots_async(c);
1116 	}
1117 
1118 	bch_err_fn(c, ret);
1119 final_out:
1120 	if (!IS_ERR(clean))
1121 		kfree(clean);
1122 	return ret;
1123 err:
1124 fsck_err:
1125 	{
1126 		struct printbuf buf = PRINTBUF;
1127 		bch2_log_msg_start(c, &buf);
1128 
1129 		prt_printf(&buf, "error in recovery: %s", bch2_err_str(ret));
1130 		bch2_fs_emergency_read_only2(c, &buf);
1131 
1132 		bch2_print_str(c, KERN_ERR, buf.buf);
1133 		printbuf_exit(&buf);
1134 	}
1135 	goto final_out;
1136 }
1137 
1138 int bch2_fs_initialize(struct bch_fs *c)
1139 {
1140 	struct bch_inode_unpacked root_inode, lostfound_inode;
1141 	struct bkey_inode_buf packed_inode;
1142 	struct qstr lostfound = QSTR("lost+found");
1143 	struct bch_member *m;
1144 	int ret;
1145 
1146 	bch_notice(c, "initializing new filesystem");
1147 	set_bit(BCH_FS_new_fs, &c->flags);
1148 
1149 	mutex_lock(&c->sb_lock);
1150 	c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1151 	c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1152 
1153 	bch2_check_version_downgrade(c);
1154 
1155 	if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
1156 		bch2_sb_upgrade(c, bcachefs_metadata_version_current, false);
1157 		SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
1158 		bch2_write_super(c);
1159 	}
1160 
1161 	for_each_member_device(c, ca) {
1162 		m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1163 		SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, false);
1164 		ca->mi = bch2_mi_to_cpu(m);
1165 	}
1166 
1167 	bch2_write_super(c);
1168 	mutex_unlock(&c->sb_lock);
1169 
1170 	set_bit(BCH_FS_btree_running, &c->flags);
1171 	set_bit(BCH_FS_may_go_rw, &c->flags);
1172 
1173 	for (unsigned i = 0; i < BTREE_ID_NR; i++)
1174 		bch2_btree_root_alloc_fake(c, i, 0);
1175 
1176 	ret = bch2_fs_journal_alloc(c);
1177 	if (ret)
1178 		goto err;
1179 
1180 	/*
1181 	 * journal_res_get() will crash if called before this has
1182 	 * set up the journal.pin FIFO and journal.cur pointer:
1183 	 */
1184 	ret = bch2_fs_journal_start(&c->journal, 1);
1185 	if (ret)
1186 		goto err;
1187 
1188 	ret = bch2_fs_read_write_early(c);
1189 	if (ret)
1190 		goto err;
1191 
1192 	set_bit(BCH_FS_accounting_replay_done, &c->flags);
1193 	bch2_journal_set_replay_done(&c->journal);
1194 
1195 	for_each_member_device(c, ca) {
1196 		ret = bch2_dev_usage_init(ca, false);
1197 		if (ret) {
1198 			bch2_dev_put(ca);
1199 			goto err;
1200 		}
1201 	}
1202 
1203 	/*
1204 	 * Write out the superblock and journal buckets, now that we can do
1205 	 * btree updates
1206 	 */
1207 	bch_verbose(c, "marking superblocks");
1208 	ret = bch2_trans_mark_dev_sbs(c);
1209 	bch_err_msg(c, ret, "marking superblocks");
1210 	if (ret)
1211 		goto err;
1212 
1213 	ret = bch2_fs_freespace_init(c);
1214 	if (ret)
1215 		goto err;
1216 
1217 	ret = bch2_initialize_subvolumes(c);
1218 	if (ret)
1219 		goto err;
1220 
1221 	bch_verbose(c, "reading snapshots table");
1222 	ret = bch2_snapshots_read(c);
1223 	if (ret)
1224 		goto err;
1225 	bch_verbose(c, "reading snapshots done");
1226 
1227 	bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1228 	root_inode.bi_inum	= BCACHEFS_ROOT_INO;
1229 	root_inode.bi_subvol	= BCACHEFS_ROOT_SUBVOL;
1230 	bch2_inode_pack(&packed_inode, &root_inode);
1231 	packed_inode.inode.k.p.snapshot = U32_MAX;
1232 
1233 	ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0, 0);
1234 	bch_err_msg(c, ret, "creating root directory");
1235 	if (ret)
1236 		goto err;
1237 
1238 	bch2_inode_init_early(c, &lostfound_inode);
1239 
1240 	ret = bch2_trans_commit_do(c, NULL, NULL, 0,
1241 		bch2_create_trans(trans,
1242 				  BCACHEFS_ROOT_SUBVOL_INUM,
1243 				  &root_inode, &lostfound_inode,
1244 				  &lostfound,
1245 				  0, 0, S_IFDIR|0700, 0,
1246 				  NULL, NULL, (subvol_inum) { 0 }, 0));
1247 	bch_err_msg(c, ret, "creating lost+found");
1248 	if (ret)
1249 		goto err;
1250 
1251 	c->recovery.pass_done = BCH_RECOVERY_PASS_NR - 1;
1252 
1253 	bch2_copygc_wakeup(c);
1254 	bch2_rebalance_wakeup(c);
1255 
1256 	if (enabled_qtypes(c)) {
1257 		ret = bch2_fs_quota_read(c);
1258 		if (ret)
1259 			goto err;
1260 	}
1261 
1262 	ret = bch2_journal_flush(&c->journal);
1263 	bch_err_msg(c, ret, "writing first journal entry");
1264 	if (ret)
1265 		goto err;
1266 
1267 	mutex_lock(&c->sb_lock);
1268 	SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1269 	SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1270 
1271 	bch2_write_super(c);
1272 	mutex_unlock(&c->sb_lock);
1273 
1274 	c->recovery.curr_pass = BCH_RECOVERY_PASS_NR;
1275 	return 0;
1276 err:
1277 	bch_err_fn(c, ret);
1278 	return ret;
1279 }
1280