xref: /linux/fs/bcachefs/fsck.c (revision 1a562c0d44974d3cf89c6cc5c34c708c08af420e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "darray.h"
9 #include "dirent.h"
10 #include "error.h"
11 #include "fs-common.h"
12 #include "fsck.h"
13 #include "inode.h"
14 #include "keylist.h"
15 #include "recovery.h"
16 #include "snapshot.h"
17 #include "super.h"
18 #include "xattr.h"
19 
20 #include <linux/bsearch.h>
21 #include <linux/dcache.h> /* struct qstr */
22 
23 /*
24  * XXX: this is handling transaction restarts without returning
25  * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
26  */
27 static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
28 				    u32 snapshot)
29 {
30 	u64 sectors = 0;
31 
32 	int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
33 				SPOS(inum, 0, snapshot),
34 				POS(inum, U64_MAX),
35 				0, k, ({
36 		if (bkey_extent_is_allocation(k.k))
37 			sectors += k.k->size;
38 		0;
39 	}));
40 
41 	return ret ?: sectors;
42 }
43 
44 static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
45 				    u32 snapshot)
46 {
47 	u64 subdirs = 0;
48 
49 	int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
50 				    SPOS(inum, 0, snapshot),
51 				    POS(inum, U64_MAX),
52 				    0, k, ({
53 		if (k.k->type == KEY_TYPE_dirent &&
54 		    bkey_s_c_to_dirent(k).v->d_type == DT_DIR)
55 			subdirs++;
56 		0;
57 	}));
58 
59 	return ret ?: subdirs;
60 }
61 
62 static int subvol_lookup(struct btree_trans *trans, u32 subvol,
63 			 u32 *snapshot, u64 *inum)
64 {
65 	struct bch_subvolume s;
66 	int ret;
67 
68 	ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
69 
70 	*snapshot = le32_to_cpu(s.snapshot);
71 	*inum = le64_to_cpu(s.inode);
72 	return ret;
73 }
74 
75 static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
76 			      struct bch_inode_unpacked *inode)
77 {
78 	struct btree_iter iter;
79 	struct bkey_s_c k;
80 	int ret;
81 
82 	bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
83 			     POS(0, inode_nr),
84 			     BTREE_ITER_ALL_SNAPSHOTS);
85 	k = bch2_btree_iter_peek(&iter);
86 	ret = bkey_err(k);
87 	if (ret)
88 		goto err;
89 
90 	if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
91 		ret = -BCH_ERR_ENOENT_inode;
92 		goto err;
93 	}
94 
95 	ret = bch2_inode_unpack(k, inode);
96 err:
97 	bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
98 	bch2_trans_iter_exit(trans, &iter);
99 	return ret;
100 }
101 
102 static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
103 			  struct bch_inode_unpacked *inode,
104 			  u32 *snapshot)
105 {
106 	struct btree_iter iter;
107 	struct bkey_s_c k;
108 	int ret;
109 
110 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
111 			       SPOS(0, inode_nr, *snapshot), 0);
112 	ret = bkey_err(k);
113 	if (ret)
114 		goto err;
115 
116 	ret = bkey_is_inode(k.k)
117 		? bch2_inode_unpack(k, inode)
118 		: -BCH_ERR_ENOENT_inode;
119 	if (!ret)
120 		*snapshot = iter.pos.snapshot;
121 err:
122 	bch_err_msg(trans->c, ret, "fetching inode %llu:%u", inode_nr, *snapshot);
123 	bch2_trans_iter_exit(trans, &iter);
124 	return ret;
125 }
126 
127 static int __lookup_dirent(struct btree_trans *trans,
128 			   struct bch_hash_info hash_info,
129 			   subvol_inum dir, struct qstr *name,
130 			   u64 *target, unsigned *type)
131 {
132 	struct btree_iter iter;
133 	struct bkey_s_c_dirent d;
134 	int ret;
135 
136 	ret = bch2_hash_lookup(trans, &iter, bch2_dirent_hash_desc,
137 			       &hash_info, dir, name, 0);
138 	if (ret)
139 		return ret;
140 
141 	d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
142 	*target = le64_to_cpu(d.v->d_inum);
143 	*type = d.v->d_type;
144 	bch2_trans_iter_exit(trans, &iter);
145 	return 0;
146 }
147 
148 static int __write_inode(struct btree_trans *trans,
149 			 struct bch_inode_unpacked *inode,
150 			 u32 snapshot)
151 {
152 	struct bkey_inode_buf *inode_p =
153 		bch2_trans_kmalloc(trans, sizeof(*inode_p));
154 
155 	if (IS_ERR(inode_p))
156 		return PTR_ERR(inode_p);
157 
158 	bch2_inode_pack(inode_p, inode);
159 	inode_p->inode.k.p.snapshot = snapshot;
160 
161 	return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
162 				&inode_p->inode.k_i,
163 				BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
164 }
165 
166 static int fsck_write_inode(struct btree_trans *trans,
167 			    struct bch_inode_unpacked *inode,
168 			    u32 snapshot)
169 {
170 	int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
171 			    __write_inode(trans, inode, snapshot));
172 	bch_err_fn(trans->c, ret);
173 	return ret;
174 }
175 
176 static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
177 {
178 	struct bch_fs *c = trans->c;
179 	struct btree_iter iter;
180 	struct bch_inode_unpacked dir_inode;
181 	struct bch_hash_info dir_hash_info;
182 	int ret;
183 
184 	ret = lookup_first_inode(trans, pos.inode, &dir_inode);
185 	if (ret)
186 		goto err;
187 
188 	dir_hash_info = bch2_hash_info_init(c, &dir_inode);
189 
190 	bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
191 
192 	ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
193 				  &dir_hash_info, &iter,
194 				  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
195 	bch2_trans_iter_exit(trans, &iter);
196 err:
197 	bch_err_fn(c, ret);
198 	return ret;
199 }
200 
201 /* Get lost+found, create if it doesn't exist: */
202 static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
203 			    struct bch_inode_unpacked *lostfound)
204 {
205 	struct bch_fs *c = trans->c;
206 	struct qstr lostfound_str = QSTR("lost+found");
207 	u64 inum = 0;
208 	unsigned d_type = 0;
209 	int ret;
210 
211 	struct bch_snapshot_tree st;
212 	ret = bch2_snapshot_tree_lookup(trans,
213 			bch2_snapshot_tree(c, snapshot), &st);
214 	if (ret)
215 		return ret;
216 
217 	subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) };
218 	u32 subvol_snapshot;
219 
220 	ret = subvol_lookup(trans, le32_to_cpu(st.master_subvol),
221 			    &subvol_snapshot, &root_inum.inum);
222 	bch_err_msg(c, ret, "looking up root subvol");
223 	if (ret)
224 		return ret;
225 
226 	struct bch_inode_unpacked root_inode;
227 	struct bch_hash_info root_hash_info;
228 	ret = lookup_inode(trans, root_inum.inum, &root_inode, &snapshot);
229 	bch_err_msg(c, ret, "looking up root inode");
230 	if (ret)
231 		return ret;
232 
233 	root_hash_info = bch2_hash_info_init(c, &root_inode);
234 
235 	ret = __lookup_dirent(trans, root_hash_info, root_inum,
236 			      &lostfound_str, &inum, &d_type);
237 	if (bch2_err_matches(ret, ENOENT))
238 		goto create_lostfound;
239 
240 	bch_err_fn(c, ret);
241 	if (ret)
242 		return ret;
243 
244 	if (d_type != DT_DIR) {
245 		bch_err(c, "error looking up lost+found: not a directory");
246 		return -BCH_ERR_ENOENT_not_directory;
247 	}
248 
249 	/*
250 	 * The bch2_check_dirents pass has already run, dangling dirents
251 	 * shouldn't exist here:
252 	 */
253 	return lookup_inode(trans, inum, lostfound, &snapshot);
254 
255 create_lostfound:
256 	/*
257 	 * XXX: we could have a nicer log message here  if we had a nice way to
258 	 * walk backpointers to print a path
259 	 */
260 	bch_notice(c, "creating lost+found in snapshot %u", le32_to_cpu(st.root_snapshot));
261 
262 	u64 now = bch2_current_time(c);
263 	struct btree_iter lostfound_iter = { NULL };
264 	u64 cpu = raw_smp_processor_id();
265 
266 	bch2_inode_init_early(c, lostfound);
267 	bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode);
268 	lostfound->bi_dir = root_inode.bi_inum;
269 
270 	root_inode.bi_nlink++;
271 
272 	ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu);
273 	if (ret)
274 		goto err;
275 
276 	bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
277 	ret = bch2_btree_iter_traverse(&lostfound_iter);
278 	if (ret)
279 		goto err;
280 
281 	ret =   bch2_dirent_create_snapshot(trans,
282 				root_inode.bi_inum, snapshot, &root_hash_info,
283 				mode_to_type(lostfound->bi_mode),
284 				&lostfound_str,
285 				lostfound->bi_inum,
286 				&lostfound->bi_dir_offset,
287 				BCH_HASH_SET_MUST_CREATE) ?:
288 		bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
289 				       BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
290 err:
291 	bch_err_msg(c, ret, "creating lost+found");
292 	bch2_trans_iter_exit(trans, &lostfound_iter);
293 	return ret;
294 }
295 
296 static int reattach_inode(struct btree_trans *trans,
297 			  struct bch_inode_unpacked *inode,
298 			  u32 inode_snapshot)
299 {
300 	struct bch_hash_info dir_hash;
301 	struct bch_inode_unpacked lostfound;
302 	char name_buf[20];
303 	struct qstr name;
304 	u64 dir_offset = 0;
305 	int ret;
306 
307 	ret = lookup_lostfound(trans, inode_snapshot, &lostfound);
308 	if (ret)
309 		return ret;
310 
311 	if (S_ISDIR(inode->bi_mode)) {
312 		lostfound.bi_nlink++;
313 
314 		ret = __write_inode(trans, &lostfound, U32_MAX);
315 		if (ret)
316 			return ret;
317 	}
318 
319 	dir_hash = bch2_hash_info_init(trans->c, &lostfound);
320 
321 	snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
322 	name = (struct qstr) QSTR(name_buf);
323 
324 	ret = bch2_dirent_create_snapshot(trans,
325 				lostfound.bi_inum, inode_snapshot,
326 				&dir_hash,
327 				inode_d_type(inode),
328 				&name, inode->bi_inum, &dir_offset,
329 				BCH_HASH_SET_MUST_CREATE);
330 	if (ret)
331 		return ret;
332 
333 	inode->bi_dir		= lostfound.bi_inum;
334 	inode->bi_dir_offset	= dir_offset;
335 
336 	return __write_inode(trans, inode, inode_snapshot);
337 }
338 
339 static int remove_backpointer(struct btree_trans *trans,
340 			      struct bch_inode_unpacked *inode)
341 {
342 	struct btree_iter iter;
343 	struct bkey_s_c_dirent d;
344 	int ret;
345 
346 	d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
347 				     POS(inode->bi_dir, inode->bi_dir_offset), 0,
348 				     dirent);
349 	ret =   bkey_err(d) ?:
350 		__remove_dirent(trans, d.k->p);
351 	bch2_trans_iter_exit(trans, &iter);
352 	return ret;
353 }
354 
355 struct snapshots_seen_entry {
356 	u32				id;
357 	u32				equiv;
358 };
359 
360 struct snapshots_seen {
361 	struct bpos			pos;
362 	DARRAY(struct snapshots_seen_entry) ids;
363 };
364 
365 static inline void snapshots_seen_exit(struct snapshots_seen *s)
366 {
367 	darray_exit(&s->ids);
368 }
369 
370 static inline void snapshots_seen_init(struct snapshots_seen *s)
371 {
372 	memset(s, 0, sizeof(*s));
373 }
374 
375 static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
376 {
377 	struct snapshots_seen_entry *i, n = {
378 		.id	= id,
379 		.equiv	= bch2_snapshot_equiv(c, id),
380 	};
381 	int ret = 0;
382 
383 	__darray_for_each(s->ids, i) {
384 		if (i->id == id)
385 			return 0;
386 		if (i->id > id)
387 			break;
388 	}
389 
390 	ret = darray_insert_item(&s->ids, i - s->ids.data, n);
391 	if (ret)
392 		bch_err(c, "error reallocating snapshots_seen table (size %zu)",
393 			s->ids.size);
394 	return ret;
395 }
396 
397 static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
398 				 enum btree_id btree_id, struct bpos pos)
399 {
400 	struct snapshots_seen_entry n = {
401 		.id	= pos.snapshot,
402 		.equiv	= bch2_snapshot_equiv(c, pos.snapshot),
403 	};
404 	int ret = 0;
405 
406 	if (!bkey_eq(s->pos, pos))
407 		s->ids.nr = 0;
408 
409 	s->pos = pos;
410 	s->pos.snapshot = n.equiv;
411 
412 	darray_for_each(s->ids, i) {
413 		if (i->id == n.id)
414 			return 0;
415 
416 		/*
417 		 * We currently don't rigorously track for snapshot cleanup
418 		 * needing to be run, so it shouldn't be a fsck error yet:
419 		 */
420 		if (i->equiv == n.equiv) {
421 			bch_err(c, "snapshot deletion did not finish:\n"
422 				"  duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
423 				bch2_btree_id_str(btree_id),
424 				pos.inode, pos.offset,
425 				i->id, n.id, n.equiv);
426 			set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
427 			return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
428 		}
429 	}
430 
431 	ret = darray_push(&s->ids, n);
432 	if (ret)
433 		bch_err(c, "error reallocating snapshots_seen table (size %zu)",
434 			s->ids.size);
435 	return ret;
436 }
437 
438 /**
439  * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
440  * and @ancestor hasn't been overwritten in @seen
441  *
442  * @c:		filesystem handle
443  * @seen:	list of snapshot ids already seen at current position
444  * @id:		descendent snapshot id
445  * @ancestor:	ancestor snapshot id
446  *
447  * Returns:	whether key in @ancestor snapshot is visible in @id snapshot
448  */
449 static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
450 				    u32 id, u32 ancestor)
451 {
452 	ssize_t i;
453 
454 	EBUG_ON(id > ancestor);
455 	EBUG_ON(!bch2_snapshot_is_equiv(c, id));
456 	EBUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
457 
458 	/* @ancestor should be the snapshot most recently added to @seen */
459 	EBUG_ON(ancestor != seen->pos.snapshot);
460 	EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv);
461 
462 	if (id == ancestor)
463 		return true;
464 
465 	if (!bch2_snapshot_is_ancestor(c, id, ancestor))
466 		return false;
467 
468 	/*
469 	 * We know that @id is a descendant of @ancestor, we're checking if
470 	 * we've seen a key that overwrote @ancestor - i.e. also a descendent of
471 	 * @ascestor and with @id as a descendent.
472 	 *
473 	 * But we already know that we're scanning IDs between @id and @ancestor
474 	 * numerically, since snapshot ID lists are kept sorted, so if we find
475 	 * an id that's an ancestor of @id we're done:
476 	 */
477 
478 	for (i = seen->ids.nr - 2;
479 	     i >= 0 && seen->ids.data[i].equiv >= id;
480 	     --i)
481 		if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv))
482 			return false;
483 
484 	return true;
485 }
486 
487 /**
488  * ref_visible - given a key with snapshot id @src that points to a key with
489  * snapshot id @dst, test whether there is some snapshot in which @dst is
490  * visible.
491  *
492  * @c:		filesystem handle
493  * @s:		list of snapshot IDs already seen at @src
494  * @src:	snapshot ID of src key
495  * @dst:	snapshot ID of dst key
496  * Returns:	true if there is some snapshot in which @dst is visible
497  *
498  * Assumes we're visiting @src keys in natural key order
499  */
500 static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
501 			u32 src, u32 dst)
502 {
503 	return dst <= src
504 		? key_visible_in_snapshot(c, s, dst, src)
505 		: bch2_snapshot_is_ancestor(c, src, dst);
506 }
507 
508 static int ref_visible2(struct bch_fs *c,
509 			u32 src, struct snapshots_seen *src_seen,
510 			u32 dst, struct snapshots_seen *dst_seen)
511 {
512 	src = bch2_snapshot_equiv(c, src);
513 	dst = bch2_snapshot_equiv(c, dst);
514 
515 	if (dst > src) {
516 		swap(dst, src);
517 		swap(dst_seen, src_seen);
518 	}
519 	return key_visible_in_snapshot(c, src_seen, dst, src);
520 }
521 
522 #define for_each_visible_inode(_c, _s, _w, _snapshot, _i)				\
523 	for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr &&	\
524 	     (_i)->snapshot <= (_snapshot); _i++)					\
525 		if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
526 
527 struct inode_walker_entry {
528 	struct bch_inode_unpacked inode;
529 	u32			snapshot;
530 	bool			seen_this_pos;
531 	u64			count;
532 };
533 
534 struct inode_walker {
535 	bool				first_this_inode;
536 	bool				recalculate_sums;
537 	struct bpos			last_pos;
538 
539 	DARRAY(struct inode_walker_entry) inodes;
540 };
541 
542 static void inode_walker_exit(struct inode_walker *w)
543 {
544 	darray_exit(&w->inodes);
545 }
546 
547 static struct inode_walker inode_walker_init(void)
548 {
549 	return (struct inode_walker) { 0, };
550 }
551 
552 static int add_inode(struct bch_fs *c, struct inode_walker *w,
553 		     struct bkey_s_c inode)
554 {
555 	struct bch_inode_unpacked u;
556 
557 	BUG_ON(bch2_inode_unpack(inode, &u));
558 
559 	return darray_push(&w->inodes, ((struct inode_walker_entry) {
560 		.inode		= u,
561 		.snapshot	= bch2_snapshot_equiv(c, inode.k->p.snapshot),
562 	}));
563 }
564 
565 static int get_inodes_all_snapshots(struct btree_trans *trans,
566 				    struct inode_walker *w, u64 inum)
567 {
568 	struct bch_fs *c = trans->c;
569 	struct btree_iter iter;
570 	struct bkey_s_c k;
571 	int ret;
572 
573 	w->recalculate_sums = false;
574 	w->inodes.nr = 0;
575 
576 	for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
577 				     BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
578 		if (k.k->p.offset != inum)
579 			break;
580 
581 		if (bkey_is_inode(k.k))
582 			add_inode(c, w, k);
583 	}
584 	bch2_trans_iter_exit(trans, &iter);
585 
586 	if (ret)
587 		return ret;
588 
589 	w->first_this_inode = true;
590 	return 0;
591 }
592 
593 static struct inode_walker_entry *
594 lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w,
595 			  u32 snapshot, bool is_whiteout)
596 {
597 	struct inode_walker_entry *i;
598 
599 	snapshot = bch2_snapshot_equiv(c, snapshot);
600 
601 	__darray_for_each(w->inodes, i)
602 		if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
603 			goto found;
604 
605 	return NULL;
606 found:
607 	BUG_ON(snapshot > i->snapshot);
608 
609 	if (snapshot != i->snapshot && !is_whiteout) {
610 		struct inode_walker_entry new = *i;
611 		size_t pos;
612 		int ret;
613 
614 		new.snapshot = snapshot;
615 		new.count = 0;
616 
617 		bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u",
618 			 w->last_pos.inode, snapshot, i->snapshot);
619 
620 		while (i > w->inodes.data && i[-1].snapshot > snapshot)
621 			--i;
622 
623 		pos = i - w->inodes.data;
624 		ret = darray_insert_item(&w->inodes, pos, new);
625 		if (ret)
626 			return ERR_PTR(ret);
627 
628 		i = w->inodes.data + pos;
629 	}
630 
631 	return i;
632 }
633 
634 static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
635 					     struct inode_walker *w, struct bpos pos,
636 					     bool is_whiteout)
637 {
638 	if (w->last_pos.inode != pos.inode) {
639 		int ret = get_inodes_all_snapshots(trans, w, pos.inode);
640 		if (ret)
641 			return ERR_PTR(ret);
642 	} else if (bkey_cmp(w->last_pos, pos)) {
643 		darray_for_each(w->inodes, i)
644 			i->seen_this_pos = false;
645 	}
646 
647 	w->last_pos = pos;
648 
649 	return lookup_inode_for_snapshot(trans->c, w, pos.snapshot, is_whiteout);
650 }
651 
652 static int __get_visible_inodes(struct btree_trans *trans,
653 				struct inode_walker *w,
654 				struct snapshots_seen *s,
655 				u64 inum)
656 {
657 	struct bch_fs *c = trans->c;
658 	struct btree_iter iter;
659 	struct bkey_s_c k;
660 	int ret;
661 
662 	w->inodes.nr = 0;
663 
664 	for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
665 			   BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
666 		u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
667 
668 		if (k.k->p.offset != inum)
669 			break;
670 
671 		if (!ref_visible(c, s, s->pos.snapshot, equiv))
672 			continue;
673 
674 		if (bkey_is_inode(k.k))
675 			add_inode(c, w, k);
676 
677 		if (equiv >= s->pos.snapshot)
678 			break;
679 	}
680 	bch2_trans_iter_exit(trans, &iter);
681 
682 	return ret;
683 }
684 
685 static int check_key_has_snapshot(struct btree_trans *trans,
686 				  struct btree_iter *iter,
687 				  struct bkey_s_c k)
688 {
689 	struct bch_fs *c = trans->c;
690 	struct printbuf buf = PRINTBUF;
691 	int ret = 0;
692 
693 	if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
694 				bkey_in_missing_snapshot,
695 				"key in missing snapshot: %s",
696 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
697 		ret = bch2_btree_delete_at(trans, iter,
698 					    BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
699 fsck_err:
700 	printbuf_exit(&buf);
701 	return ret;
702 }
703 
704 static int hash_redo_key(struct btree_trans *trans,
705 			 const struct bch_hash_desc desc,
706 			 struct bch_hash_info *hash_info,
707 			 struct btree_iter *k_iter, struct bkey_s_c k)
708 {
709 	struct bkey_i *delete;
710 	struct bkey_i *tmp;
711 
712 	delete = bch2_trans_kmalloc(trans, sizeof(*delete));
713 	if (IS_ERR(delete))
714 		return PTR_ERR(delete);
715 
716 	tmp = bch2_bkey_make_mut_noupdate(trans, k);
717 	if (IS_ERR(tmp))
718 		return PTR_ERR(tmp);
719 
720 	bkey_init(&delete->k);
721 	delete->k.p = k_iter->pos;
722 	return  bch2_btree_iter_traverse(k_iter) ?:
723 		bch2_trans_update(trans, k_iter, delete, 0) ?:
724 		bch2_hash_set_snapshot(trans, desc, hash_info,
725 				       (subvol_inum) { 0, k.k->p.inode },
726 				       k.k->p.snapshot, tmp,
727 				       BCH_HASH_SET_MUST_CREATE,
728 				       BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
729 		bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
730 }
731 
732 static int hash_check_key(struct btree_trans *trans,
733 			  const struct bch_hash_desc desc,
734 			  struct bch_hash_info *hash_info,
735 			  struct btree_iter *k_iter, struct bkey_s_c hash_k)
736 {
737 	struct bch_fs *c = trans->c;
738 	struct btree_iter iter = { NULL };
739 	struct printbuf buf = PRINTBUF;
740 	struct bkey_s_c k;
741 	u64 hash;
742 	int ret = 0;
743 
744 	if (hash_k.k->type != desc.key_type)
745 		return 0;
746 
747 	hash = desc.hash_bkey(hash_info, hash_k);
748 
749 	if (likely(hash == hash_k.k->p.offset))
750 		return 0;
751 
752 	if (hash_k.k->p.offset < hash)
753 		goto bad_hash;
754 
755 	for_each_btree_key_norestart(trans, iter, desc.btree_id,
756 				     SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
757 				     BTREE_ITER_SLOTS, k, ret) {
758 		if (bkey_eq(k.k->p, hash_k.k->p))
759 			break;
760 
761 		if (fsck_err_on(k.k->type == desc.key_type &&
762 				!desc.cmp_bkey(k, hash_k), c,
763 				hash_table_key_duplicate,
764 				"duplicate hash table keys:\n%s",
765 				(printbuf_reset(&buf),
766 				 bch2_bkey_val_to_text(&buf, c, hash_k),
767 				 buf.buf))) {
768 			ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0) ?: 1;
769 			break;
770 		}
771 
772 		if (bkey_deleted(k.k)) {
773 			bch2_trans_iter_exit(trans, &iter);
774 			goto bad_hash;
775 		}
776 	}
777 out:
778 	bch2_trans_iter_exit(trans, &iter);
779 	printbuf_exit(&buf);
780 	return ret;
781 bad_hash:
782 	if (fsck_err(c, hash_table_key_wrong_offset,
783 		     "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
784 		     bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
785 		     (printbuf_reset(&buf),
786 		      bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
787 		ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
788 		bch_err_fn(c, ret);
789 		if (ret)
790 			return ret;
791 		ret = -BCH_ERR_transaction_restart_nested;
792 	}
793 fsck_err:
794 	goto out;
795 }
796 
797 static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
798 {
799 	struct btree_iter iter;
800 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0);
801 	int ret = bkey_err(k);
802 	if (ret)
803 		return ret;
804 
805 	bch2_trans_iter_exit(trans, &iter);
806 	return k.k->type == KEY_TYPE_set;
807 }
808 
809 static int check_inode(struct btree_trans *trans,
810 		       struct btree_iter *iter,
811 		       struct bkey_s_c k,
812 		       struct bch_inode_unpacked *prev,
813 		       struct snapshots_seen *s,
814 		       bool full)
815 {
816 	struct bch_fs *c = trans->c;
817 	struct bch_inode_unpacked u;
818 	bool do_update = false;
819 	int ret;
820 
821 	ret = check_key_has_snapshot(trans, iter, k);
822 	if (ret < 0)
823 		goto err;
824 	if (ret)
825 		return 0;
826 
827 	ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
828 	if (ret)
829 		goto err;
830 
831 	if (!bkey_is_inode(k.k))
832 		return 0;
833 
834 	BUG_ON(bch2_inode_unpack(k, &u));
835 
836 	if (!full &&
837 	    !(u.bi_flags & (BCH_INODE_i_size_dirty|
838 			    BCH_INODE_i_sectors_dirty|
839 			    BCH_INODE_unlinked)))
840 		return 0;
841 
842 	if (prev->bi_inum != u.bi_inum)
843 		*prev = u;
844 
845 	if (fsck_err_on(prev->bi_hash_seed	!= u.bi_hash_seed ||
846 			inode_d_type(prev)	!= inode_d_type(&u),
847 			c, inode_snapshot_mismatch,
848 			"inodes in different snapshots don't match")) {
849 		bch_err(c, "repair not implemented yet");
850 		return -BCH_ERR_fsck_repair_unimplemented;
851 	}
852 
853 	if ((u.bi_flags & (BCH_INODE_i_size_dirty|BCH_INODE_unlinked)) &&
854 	    bch2_key_has_snapshot_overwrites(trans, BTREE_ID_inodes, k.k->p)) {
855 		struct bpos new_min_pos;
856 
857 		ret = bch2_propagate_key_to_snapshot_leaves(trans, iter->btree_id, k, &new_min_pos);
858 		if (ret)
859 			goto err;
860 
861 		u.bi_flags &= ~BCH_INODE_i_size_dirty|BCH_INODE_unlinked;
862 
863 		ret = __write_inode(trans, &u, iter->pos.snapshot);
864 		bch_err_msg(c, ret, "in fsck updating inode");
865 		if (ret)
866 			return ret;
867 
868 		if (!bpos_eq(new_min_pos, POS_MIN))
869 			bch2_btree_iter_set_pos(iter, bpos_predecessor(new_min_pos));
870 		return 0;
871 	}
872 
873 	if (u.bi_flags & BCH_INODE_unlinked) {
874 		ret = check_inode_deleted_list(trans, k.k->p);
875 		if (ret < 0)
876 			return ret;
877 
878 		fsck_err_on(ret, c, unlinked_inode_not_on_deleted_list,
879 			    "inode %llu:%u unlinked, but not on deleted list",
880 			    u.bi_inum, k.k->p.snapshot);
881 		ret = 0;
882 	}
883 
884 	if (u.bi_flags & BCH_INODE_unlinked &&
885 	    (!c->sb.clean ||
886 	     fsck_err(c, inode_unlinked_but_clean,
887 		      "filesystem marked clean, but inode %llu unlinked",
888 		      u.bi_inum))) {
889 		ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
890 		bch_err_msg(c, ret, "in fsck deleting inode");
891 		return ret;
892 	}
893 
894 	if (u.bi_flags & BCH_INODE_i_size_dirty &&
895 	    (!c->sb.clean ||
896 	     fsck_err(c, inode_i_size_dirty_but_clean,
897 		      "filesystem marked clean, but inode %llu has i_size dirty",
898 		      u.bi_inum))) {
899 		bch_verbose(c, "truncating inode %llu", u.bi_inum);
900 
901 		/*
902 		 * XXX: need to truncate partial blocks too here - or ideally
903 		 * just switch units to bytes and that issue goes away
904 		 */
905 		ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
906 				SPOS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9,
907 				     iter->pos.snapshot),
908 				POS(u.bi_inum, U64_MAX),
909 				0, NULL);
910 		bch_err_msg(c, ret, "in fsck truncating inode");
911 		if (ret)
912 			return ret;
913 
914 		/*
915 		 * We truncated without our normal sector accounting hook, just
916 		 * make sure we recalculate it:
917 		 */
918 		u.bi_flags |= BCH_INODE_i_sectors_dirty;
919 
920 		u.bi_flags &= ~BCH_INODE_i_size_dirty;
921 		do_update = true;
922 	}
923 
924 	if (u.bi_flags & BCH_INODE_i_sectors_dirty &&
925 	    (!c->sb.clean ||
926 	     fsck_err(c, inode_i_sectors_dirty_but_clean,
927 		      "filesystem marked clean, but inode %llu has i_sectors dirty",
928 		      u.bi_inum))) {
929 		s64 sectors;
930 
931 		bch_verbose(c, "recounting sectors for inode %llu",
932 			    u.bi_inum);
933 
934 		sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
935 		if (sectors < 0) {
936 			bch_err_msg(c, sectors, "in fsck recounting inode sectors");
937 			return sectors;
938 		}
939 
940 		u.bi_sectors = sectors;
941 		u.bi_flags &= ~BCH_INODE_i_sectors_dirty;
942 		do_update = true;
943 	}
944 
945 	if (u.bi_flags & BCH_INODE_backptr_untrusted) {
946 		u.bi_dir = 0;
947 		u.bi_dir_offset = 0;
948 		u.bi_flags &= ~BCH_INODE_backptr_untrusted;
949 		do_update = true;
950 	}
951 
952 	if (do_update) {
953 		ret = __write_inode(trans, &u, iter->pos.snapshot);
954 		bch_err_msg(c, ret, "in fsck updating inode");
955 		if (ret)
956 			return ret;
957 	}
958 err:
959 fsck_err:
960 	bch_err_fn(c, ret);
961 	return ret;
962 }
963 
964 int bch2_check_inodes(struct bch_fs *c)
965 {
966 	bool full = c->opts.fsck;
967 	struct bch_inode_unpacked prev = { 0 };
968 	struct snapshots_seen s;
969 
970 	snapshots_seen_init(&s);
971 
972 	int ret = bch2_trans_run(c,
973 		for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
974 				POS_MIN,
975 				BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
976 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
977 			check_inode(trans, &iter, k, &prev, &s, full)));
978 
979 	snapshots_seen_exit(&s);
980 	bch_err_fn(c, ret);
981 	return ret;
982 }
983 
984 static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
985 						struct btree_iter *iter,
986 						struct bpos pos)
987 {
988 	return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
989 }
990 
991 static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
992 				   struct bkey_s_c_dirent d)
993 {
994 	return  inode->bi_dir		== d.k->p.inode &&
995 		inode->bi_dir_offset	== d.k->p.offset;
996 }
997 
998 static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
999 				   struct bch_inode_unpacked *inode)
1000 {
1001 	return d.v->d_type == DT_SUBVOL
1002 		? le32_to_cpu(d.v->d_child_subvol)	== inode->bi_subvol
1003 		: le64_to_cpu(d.v->d_inum)		== inode->bi_inum;
1004 }
1005 
1006 static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
1007 {
1008 	struct bch_fs *c = trans->c;
1009 	u32 restart_count = trans->restart_count;
1010 	int ret = 0;
1011 	s64 count2;
1012 
1013 	darray_for_each(w->inodes, i) {
1014 		if (i->inode.bi_sectors == i->count)
1015 			continue;
1016 
1017 		count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
1018 
1019 		if (w->recalculate_sums)
1020 			i->count = count2;
1021 
1022 		if (i->count != count2) {
1023 			bch_err(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
1024 				w->last_pos.inode, i->snapshot, i->count, count2);
1025 			return -BCH_ERR_internal_fsck_err;
1026 		}
1027 
1028 		if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
1029 				c, inode_i_sectors_wrong,
1030 				"inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
1031 				w->last_pos.inode, i->snapshot,
1032 				i->inode.bi_sectors, i->count)) {
1033 			i->inode.bi_sectors = i->count;
1034 			ret = fsck_write_inode(trans, &i->inode, i->snapshot);
1035 			if (ret)
1036 				break;
1037 		}
1038 	}
1039 fsck_err:
1040 	bch_err_fn(c, ret);
1041 	return ret ?: trans_was_restarted(trans, restart_count);
1042 }
1043 
1044 struct extent_end {
1045 	u32			snapshot;
1046 	u64			offset;
1047 	struct snapshots_seen	seen;
1048 };
1049 
1050 struct extent_ends {
1051 	struct bpos			last_pos;
1052 	DARRAY(struct extent_end)	e;
1053 };
1054 
1055 static void extent_ends_reset(struct extent_ends *extent_ends)
1056 {
1057 	darray_for_each(extent_ends->e, i)
1058 		snapshots_seen_exit(&i->seen);
1059 	extent_ends->e.nr = 0;
1060 }
1061 
1062 static void extent_ends_exit(struct extent_ends *extent_ends)
1063 {
1064 	extent_ends_reset(extent_ends);
1065 	darray_exit(&extent_ends->e);
1066 }
1067 
1068 static void extent_ends_init(struct extent_ends *extent_ends)
1069 {
1070 	memset(extent_ends, 0, sizeof(*extent_ends));
1071 }
1072 
1073 static int extent_ends_at(struct bch_fs *c,
1074 			  struct extent_ends *extent_ends,
1075 			  struct snapshots_seen *seen,
1076 			  struct bkey_s_c k)
1077 {
1078 	struct extent_end *i, n = (struct extent_end) {
1079 		.offset		= k.k->p.offset,
1080 		.snapshot	= k.k->p.snapshot,
1081 		.seen		= *seen,
1082 	};
1083 
1084 	n.seen.ids.data = kmemdup(seen->ids.data,
1085 			      sizeof(seen->ids.data[0]) * seen->ids.size,
1086 			      GFP_KERNEL);
1087 	if (!n.seen.ids.data)
1088 		return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
1089 
1090 	__darray_for_each(extent_ends->e, i) {
1091 		if (i->snapshot == k.k->p.snapshot) {
1092 			snapshots_seen_exit(&i->seen);
1093 			*i = n;
1094 			return 0;
1095 		}
1096 
1097 		if (i->snapshot >= k.k->p.snapshot)
1098 			break;
1099 	}
1100 
1101 	return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
1102 }
1103 
1104 static int overlapping_extents_found(struct btree_trans *trans,
1105 				     enum btree_id btree,
1106 				     struct bpos pos1, struct snapshots_seen *pos1_seen,
1107 				     struct bkey pos2,
1108 				     bool *fixed,
1109 				     struct extent_end *extent_end)
1110 {
1111 	struct bch_fs *c = trans->c;
1112 	struct printbuf buf = PRINTBUF;
1113 	struct btree_iter iter1, iter2 = { NULL };
1114 	struct bkey_s_c k1, k2;
1115 	int ret;
1116 
1117 	BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
1118 
1119 	bch2_trans_iter_init(trans, &iter1, btree, pos1,
1120 			     BTREE_ITER_ALL_SNAPSHOTS|
1121 			     BTREE_ITER_NOT_EXTENTS);
1122 	k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
1123 	ret = bkey_err(k1);
1124 	if (ret)
1125 		goto err;
1126 
1127 	prt_str(&buf, "\n  ");
1128 	bch2_bkey_val_to_text(&buf, c, k1);
1129 
1130 	if (!bpos_eq(pos1, k1.k->p)) {
1131 		prt_str(&buf, "\n  wanted\n  ");
1132 		bch2_bpos_to_text(&buf, pos1);
1133 		prt_str(&buf, "\n  ");
1134 		bch2_bkey_to_text(&buf, &pos2);
1135 
1136 		bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
1137 			__func__, buf.buf);
1138 		ret = -BCH_ERR_internal_fsck_err;
1139 		goto err;
1140 	}
1141 
1142 	bch2_trans_copy_iter(&iter2, &iter1);
1143 
1144 	while (1) {
1145 		bch2_btree_iter_advance(&iter2);
1146 
1147 		k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX));
1148 		ret = bkey_err(k2);
1149 		if (ret)
1150 			goto err;
1151 
1152 		if (bpos_ge(k2.k->p, pos2.p))
1153 			break;
1154 	}
1155 
1156 	prt_str(&buf, "\n  ");
1157 	bch2_bkey_val_to_text(&buf, c, k2);
1158 
1159 	if (bpos_gt(k2.k->p, pos2.p) ||
1160 	    pos2.size != k2.k->size) {
1161 		bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
1162 			__func__, buf.buf);
1163 		ret = -BCH_ERR_internal_fsck_err;
1164 		goto err;
1165 	}
1166 
1167 	prt_printf(&buf, "\n  overwriting %s extent",
1168 		   pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
1169 
1170 	if (fsck_err(c, extent_overlapping,
1171 		     "overlapping extents%s", buf.buf)) {
1172 		struct btree_iter *old_iter = &iter1;
1173 		struct disk_reservation res = { 0 };
1174 
1175 		if (pos1.snapshot < pos2.p.snapshot) {
1176 			old_iter = &iter2;
1177 			swap(k1, k2);
1178 		}
1179 
1180 		trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
1181 
1182 		ret =   bch2_trans_update_extent_overwrite(trans, old_iter,
1183 				BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
1184 				k1, k2) ?:
1185 			bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
1186 		bch2_disk_reservation_put(c, &res);
1187 
1188 		if (ret)
1189 			goto err;
1190 
1191 		*fixed = true;
1192 
1193 		if (pos1.snapshot == pos2.p.snapshot) {
1194 			/*
1195 			 * We overwrote the first extent, and did the overwrite
1196 			 * in the same snapshot:
1197 			 */
1198 			extent_end->offset = bkey_start_offset(&pos2);
1199 		} else if (pos1.snapshot > pos2.p.snapshot) {
1200 			/*
1201 			 * We overwrote the first extent in pos2's snapshot:
1202 			 */
1203 			ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
1204 		} else {
1205 			/*
1206 			 * We overwrote the second extent - restart
1207 			 * check_extent() from the top:
1208 			 */
1209 			ret = -BCH_ERR_transaction_restart_nested;
1210 		}
1211 	}
1212 fsck_err:
1213 err:
1214 	bch2_trans_iter_exit(trans, &iter2);
1215 	bch2_trans_iter_exit(trans, &iter1);
1216 	printbuf_exit(&buf);
1217 	return ret;
1218 }
1219 
1220 static int check_overlapping_extents(struct btree_trans *trans,
1221 			      struct snapshots_seen *seen,
1222 			      struct extent_ends *extent_ends,
1223 			      struct bkey_s_c k,
1224 			      u32 equiv,
1225 			      struct btree_iter *iter,
1226 			      bool *fixed)
1227 {
1228 	struct bch_fs *c = trans->c;
1229 	int ret = 0;
1230 
1231 	/* transaction restart, running again */
1232 	if (bpos_eq(extent_ends->last_pos, k.k->p))
1233 		return 0;
1234 
1235 	if (extent_ends->last_pos.inode != k.k->p.inode)
1236 		extent_ends_reset(extent_ends);
1237 
1238 	darray_for_each(extent_ends->e, i) {
1239 		if (i->offset <= bkey_start_offset(k.k))
1240 			continue;
1241 
1242 		if (!ref_visible2(c,
1243 				  k.k->p.snapshot, seen,
1244 				  i->snapshot, &i->seen))
1245 			continue;
1246 
1247 		ret = overlapping_extents_found(trans, iter->btree_id,
1248 						SPOS(iter->pos.inode,
1249 						     i->offset,
1250 						     i->snapshot),
1251 						&i->seen,
1252 						*k.k, fixed, i);
1253 		if (ret)
1254 			goto err;
1255 	}
1256 
1257 	ret = extent_ends_at(c, extent_ends, seen, k);
1258 	if (ret)
1259 		goto err;
1260 
1261 	extent_ends->last_pos = k.k->p;
1262 err:
1263 	return ret;
1264 }
1265 
1266 static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
1267 				struct bkey_s_c k)
1268 {
1269 	struct bch_fs *c = trans->c;
1270 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1271 	struct bch_extent_crc_unpacked crc;
1272 	const union bch_extent_entry *i;
1273 	unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
1274 
1275 	bkey_for_each_crc(k.k, ptrs, crc, i)
1276 		if (crc_is_encoded(crc) &&
1277 		    crc.uncompressed_size > encoded_extent_max_sectors) {
1278 			struct printbuf buf = PRINTBUF;
1279 
1280 			bch2_bkey_val_to_text(&buf, c, k);
1281 			bch_err(c, "overbig encoded extent, please report this:\n  %s", buf.buf);
1282 			printbuf_exit(&buf);
1283 		}
1284 
1285 	return 0;
1286 }
1287 
1288 static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
1289 			struct bkey_s_c k,
1290 			struct inode_walker *inode,
1291 			struct snapshots_seen *s,
1292 			struct extent_ends *extent_ends)
1293 {
1294 	struct bch_fs *c = trans->c;
1295 	struct inode_walker_entry *i;
1296 	struct printbuf buf = PRINTBUF;
1297 	struct bpos equiv = k.k->p;
1298 	int ret = 0;
1299 
1300 	equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1301 
1302 	ret = check_key_has_snapshot(trans, iter, k);
1303 	if (ret) {
1304 		ret = ret < 0 ? ret : 0;
1305 		goto out;
1306 	}
1307 
1308 	if (inode->last_pos.inode != k.k->p.inode) {
1309 		ret = check_i_sectors(trans, inode);
1310 		if (ret)
1311 			goto err;
1312 	}
1313 
1314 	i = walk_inode(trans, inode, equiv, k.k->type == KEY_TYPE_whiteout);
1315 	ret = PTR_ERR_OR_ZERO(i);
1316 	if (ret)
1317 		goto err;
1318 
1319 	ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1320 	if (ret)
1321 		goto err;
1322 
1323 	if (k.k->type != KEY_TYPE_whiteout) {
1324 		if (fsck_err_on(!i, c, extent_in_missing_inode,
1325 				"extent in missing inode:\n  %s",
1326 				(printbuf_reset(&buf),
1327 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1328 			goto delete;
1329 
1330 		if (fsck_err_on(i &&
1331 				!S_ISREG(i->inode.bi_mode) &&
1332 				!S_ISLNK(i->inode.bi_mode),
1333 				c, extent_in_non_reg_inode,
1334 				"extent in non regular inode mode %o:\n  %s",
1335 				i->inode.bi_mode,
1336 				(printbuf_reset(&buf),
1337 				 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1338 			goto delete;
1339 
1340 		ret = check_overlapping_extents(trans, s, extent_ends, k,
1341 						equiv.snapshot, iter,
1342 						&inode->recalculate_sums);
1343 		if (ret)
1344 			goto err;
1345 	}
1346 
1347 	/*
1348 	 * Check inodes in reverse order, from oldest snapshots to newest,
1349 	 * starting from the inode that matches this extent's snapshot. If we
1350 	 * didn't have one, iterate over all inodes:
1351 	 */
1352 	if (!i)
1353 		i = inode->inodes.data + inode->inodes.nr - 1;
1354 
1355 	for (;
1356 	     inode->inodes.data && i >= inode->inodes.data;
1357 	     --i) {
1358 		if (i->snapshot > equiv.snapshot ||
1359 		    !key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
1360 			continue;
1361 
1362 		if (k.k->type != KEY_TYPE_whiteout) {
1363 			if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_size_dirty) &&
1364 					k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
1365 					!bkey_extent_is_reservation(k),
1366 					c, extent_past_end_of_inode,
1367 					"extent type past end of inode %llu:%u, i_size %llu\n  %s",
1368 					i->inode.bi_inum, i->snapshot, i->inode.bi_size,
1369 					(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1370 				struct btree_iter iter2;
1371 
1372 				bch2_trans_copy_iter(&iter2, iter);
1373 				bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
1374 				ret =   bch2_btree_iter_traverse(&iter2) ?:
1375 					bch2_btree_delete_at(trans, &iter2,
1376 						BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1377 				bch2_trans_iter_exit(trans, &iter2);
1378 				if (ret)
1379 					goto err;
1380 
1381 				iter->k.type = KEY_TYPE_whiteout;
1382 			}
1383 
1384 			if (bkey_extent_is_allocation(k.k))
1385 				i->count += k.k->size;
1386 		}
1387 
1388 		i->seen_this_pos = true;
1389 	}
1390 out:
1391 err:
1392 fsck_err:
1393 	printbuf_exit(&buf);
1394 	bch_err_fn(c, ret);
1395 	return ret;
1396 delete:
1397 	ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1398 	goto out;
1399 }
1400 
1401 /*
1402  * Walk extents: verify that extents have a corresponding S_ISREG inode, and
1403  * that i_size an i_sectors are consistent
1404  */
1405 int bch2_check_extents(struct bch_fs *c)
1406 {
1407 	struct inode_walker w = inode_walker_init();
1408 	struct snapshots_seen s;
1409 	struct extent_ends extent_ends;
1410 	struct disk_reservation res = { 0 };
1411 
1412 	snapshots_seen_init(&s);
1413 	extent_ends_init(&extent_ends);
1414 
1415 	int ret = bch2_trans_run(c,
1416 		for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
1417 				POS(BCACHEFS_ROOT_INO, 0),
1418 				BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1419 				&res, NULL,
1420 				BCH_TRANS_COMMIT_no_enospc, ({
1421 			bch2_disk_reservation_put(c, &res);
1422 			check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
1423 			check_extent_overbig(trans, &iter, k);
1424 		})) ?:
1425 		check_i_sectors(trans, &w));
1426 
1427 	bch2_disk_reservation_put(c, &res);
1428 	extent_ends_exit(&extent_ends);
1429 	inode_walker_exit(&w);
1430 	snapshots_seen_exit(&s);
1431 
1432 	bch_err_fn(c, ret);
1433 	return ret;
1434 }
1435 
1436 int bch2_check_indirect_extents(struct bch_fs *c)
1437 {
1438 	struct disk_reservation res = { 0 };
1439 
1440 	int ret = bch2_trans_run(c,
1441 		for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
1442 				POS_MIN,
1443 				BTREE_ITER_PREFETCH, k,
1444 				&res, NULL,
1445 				BCH_TRANS_COMMIT_no_enospc, ({
1446 			bch2_disk_reservation_put(c, &res);
1447 			check_extent_overbig(trans, &iter, k);
1448 		})));
1449 
1450 	bch2_disk_reservation_put(c, &res);
1451 	bch_err_fn(c, ret);
1452 	return ret;
1453 }
1454 
1455 static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
1456 {
1457 	struct bch_fs *c = trans->c;
1458 	u32 restart_count = trans->restart_count;
1459 	int ret = 0;
1460 	s64 count2;
1461 
1462 	darray_for_each(w->inodes, i) {
1463 		if (i->inode.bi_nlink == i->count)
1464 			continue;
1465 
1466 		count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
1467 		if (count2 < 0)
1468 			return count2;
1469 
1470 		if (i->count != count2) {
1471 			bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
1472 				i->count, count2);
1473 			i->count = count2;
1474 			if (i->inode.bi_nlink == i->count)
1475 				continue;
1476 		}
1477 
1478 		if (fsck_err_on(i->inode.bi_nlink != i->count,
1479 				c, inode_dir_wrong_nlink,
1480 				"directory %llu:%u with wrong i_nlink: got %u, should be %llu",
1481 				w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
1482 			i->inode.bi_nlink = i->count;
1483 			ret = fsck_write_inode(trans, &i->inode, i->snapshot);
1484 			if (ret)
1485 				break;
1486 		}
1487 	}
1488 fsck_err:
1489 	bch_err_fn(c, ret);
1490 	return ret ?: trans_was_restarted(trans, restart_count);
1491 }
1492 
1493 static int check_dirent_target(struct btree_trans *trans,
1494 			       struct btree_iter *iter,
1495 			       struct bkey_s_c_dirent d,
1496 			       struct bch_inode_unpacked *target,
1497 			       u32 target_snapshot)
1498 {
1499 	struct bch_fs *c = trans->c;
1500 	struct bkey_i_dirent *n;
1501 	struct printbuf buf = PRINTBUF;
1502 	struct btree_iter bp_iter = { NULL };
1503 	int ret = 0;
1504 
1505 	if (!target->bi_dir &&
1506 	    !target->bi_dir_offset) {
1507 		target->bi_dir		= d.k->p.inode;
1508 		target->bi_dir_offset	= d.k->p.offset;
1509 
1510 		ret = __write_inode(trans, target, target_snapshot);
1511 		if (ret)
1512 			goto err;
1513 	}
1514 
1515 	if (!inode_points_to_dirent(target, d)) {
1516 		struct bkey_s_c_dirent bp_dirent = dirent_get_by_pos(trans, &bp_iter,
1517 				      SPOS(target->bi_dir, target->bi_dir_offset, target_snapshot));
1518 		ret = bkey_err(bp_dirent);
1519 		if (ret && !bch2_err_matches(ret, ENOENT))
1520 			goto err;
1521 
1522 		bool backpointer_exists = !ret;
1523 		ret = 0;
1524 
1525 		bch2_bkey_val_to_text(&buf, c, d.s_c);
1526 		prt_newline(&buf);
1527 		if (backpointer_exists)
1528 			bch2_bkey_val_to_text(&buf, c, bp_dirent.s_c);
1529 
1530 		if (fsck_err_on(S_ISDIR(target->bi_mode) && backpointer_exists,
1531 				c, inode_dir_multiple_links,
1532 				"directory %llu:%u with multiple links\n%s",
1533 				target->bi_inum, target_snapshot, buf.buf)) {
1534 			ret = __remove_dirent(trans, d.k->p);
1535 			goto out;
1536 		}
1537 
1538 		/*
1539 		 * hardlinked file with nlink 0:
1540 		 * We're just adjusting nlink here so check_nlinks() will pick
1541 		 * it up, it ignores inodes with nlink 0
1542 		 */
1543 		if (fsck_err_on(backpointer_exists && !target->bi_nlink,
1544 				c, inode_multiple_links_but_nlink_0,
1545 				"inode %llu:%u type %s has multiple links but i_nlink 0\n%s",
1546 				target->bi_inum, target_snapshot, bch2_d_types[d.v->d_type], buf.buf)) {
1547 			target->bi_nlink++;
1548 			target->bi_flags &= ~BCH_INODE_unlinked;
1549 
1550 			ret = __write_inode(trans, target, target_snapshot);
1551 			if (ret)
1552 				goto err;
1553 		}
1554 
1555 		if (fsck_err_on(!backpointer_exists,
1556 				c, inode_wrong_backpointer,
1557 				"inode %llu:%u has wrong backpointer:\n"
1558 				"got       %llu:%llu\n"
1559 				"should be %llu:%llu",
1560 				target->bi_inum, target_snapshot,
1561 				target->bi_dir,
1562 				target->bi_dir_offset,
1563 				d.k->p.inode,
1564 				d.k->p.offset)) {
1565 			target->bi_dir		= d.k->p.inode;
1566 			target->bi_dir_offset	= d.k->p.offset;
1567 
1568 			ret = __write_inode(trans, target, target_snapshot);
1569 			if (ret)
1570 				goto err;
1571 		}
1572 	}
1573 
1574 	if (fsck_err_on(d.v->d_type != inode_d_type(target),
1575 			c, dirent_d_type_wrong,
1576 			"incorrect d_type: got %s, should be %s:\n%s",
1577 			bch2_d_type_str(d.v->d_type),
1578 			bch2_d_type_str(inode_d_type(target)),
1579 			(printbuf_reset(&buf),
1580 			 bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
1581 		n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1582 		ret = PTR_ERR_OR_ZERO(n);
1583 		if (ret)
1584 			goto err;
1585 
1586 		bkey_reassemble(&n->k_i, d.s_c);
1587 		n->v.d_type = inode_d_type(target);
1588 
1589 		ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1590 		if (ret)
1591 			goto err;
1592 
1593 		d = dirent_i_to_s_c(n);
1594 	}
1595 
1596 	if (fsck_err_on(d.v->d_type == DT_SUBVOL &&
1597 			target->bi_parent_subvol != le32_to_cpu(d.v->d_parent_subvol),
1598 			c, dirent_d_parent_subvol_wrong,
1599 			"dirent has wrong d_parent_subvol field: got %u, should be %u",
1600 			le32_to_cpu(d.v->d_parent_subvol),
1601 			target->bi_parent_subvol)) {
1602 		n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1603 		ret = PTR_ERR_OR_ZERO(n);
1604 		if (ret)
1605 			goto err;
1606 
1607 		bkey_reassemble(&n->k_i, d.s_c);
1608 		n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
1609 
1610 		ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1611 		if (ret)
1612 			goto err;
1613 
1614 		d = dirent_i_to_s_c(n);
1615 	}
1616 out:
1617 err:
1618 fsck_err:
1619 	bch2_trans_iter_exit(trans, &bp_iter);
1620 	printbuf_exit(&buf);
1621 	bch_err_fn(c, ret);
1622 	return ret;
1623 }
1624 
1625 static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
1626 			struct bkey_s_c k,
1627 			struct bch_hash_info *hash_info,
1628 			struct inode_walker *dir,
1629 			struct inode_walker *target,
1630 			struct snapshots_seen *s)
1631 {
1632 	struct bch_fs *c = trans->c;
1633 	struct bkey_s_c_dirent d;
1634 	struct inode_walker_entry *i;
1635 	struct printbuf buf = PRINTBUF;
1636 	struct bpos equiv;
1637 	int ret = 0;
1638 
1639 	ret = check_key_has_snapshot(trans, iter, k);
1640 	if (ret) {
1641 		ret = ret < 0 ? ret : 0;
1642 		goto out;
1643 	}
1644 
1645 	equiv = k.k->p;
1646 	equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1647 
1648 	ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1649 	if (ret)
1650 		goto err;
1651 
1652 	if (k.k->type == KEY_TYPE_whiteout)
1653 		goto out;
1654 
1655 	if (dir->last_pos.inode != k.k->p.inode) {
1656 		ret = check_subdir_count(trans, dir);
1657 		if (ret)
1658 			goto err;
1659 	}
1660 
1661 	BUG_ON(!btree_iter_path(trans, iter)->should_be_locked);
1662 
1663 	i = walk_inode(trans, dir, equiv, k.k->type == KEY_TYPE_whiteout);
1664 	ret = PTR_ERR_OR_ZERO(i);
1665 	if (ret < 0)
1666 		goto err;
1667 
1668 	if (dir->first_this_inode && dir->inodes.nr)
1669 		*hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
1670 	dir->first_this_inode = false;
1671 
1672 	if (fsck_err_on(!i, c, dirent_in_missing_dir_inode,
1673 			"dirent in nonexisting directory:\n%s",
1674 			(printbuf_reset(&buf),
1675 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1676 		ret = bch2_btree_delete_at(trans, iter,
1677 				BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1678 		goto out;
1679 	}
1680 
1681 	if (!i)
1682 		goto out;
1683 
1684 	if (fsck_err_on(!S_ISDIR(i->inode.bi_mode),
1685 			c, dirent_in_non_dir_inode,
1686 			"dirent in non directory inode type %s:\n%s",
1687 			bch2_d_type_str(inode_d_type(&i->inode)),
1688 			(printbuf_reset(&buf),
1689 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1690 		ret = bch2_btree_delete_at(trans, iter, 0);
1691 		goto out;
1692 	}
1693 
1694 	ret = hash_check_key(trans, bch2_dirent_hash_desc, hash_info, iter, k);
1695 	if (ret < 0)
1696 		goto err;
1697 	if (ret) {
1698 		/* dirent has been deleted */
1699 		ret = 0;
1700 		goto out;
1701 	}
1702 
1703 	if (k.k->type != KEY_TYPE_dirent)
1704 		goto out;
1705 
1706 	d = bkey_s_c_to_dirent(k);
1707 
1708 	if (d.v->d_type == DT_SUBVOL) {
1709 		struct bch_inode_unpacked subvol_root;
1710 		u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
1711 		u32 target_snapshot;
1712 		u64 target_inum;
1713 
1714 		ret = subvol_lookup(trans, target_subvol,
1715 				      &target_snapshot, &target_inum);
1716 		if (ret && !bch2_err_matches(ret, ENOENT))
1717 			goto err;
1718 
1719 		if (fsck_err_on(ret, c, dirent_to_missing_subvol,
1720 				"dirent points to missing subvolume %u",
1721 				le32_to_cpu(d.v->d_child_subvol))) {
1722 			ret = __remove_dirent(trans, d.k->p);
1723 			goto err;
1724 		}
1725 
1726 		ret = lookup_inode(trans, target_inum,
1727 				   &subvol_root, &target_snapshot);
1728 		if (ret && !bch2_err_matches(ret, ENOENT))
1729 			goto err;
1730 
1731 		if (fsck_err_on(ret, c, subvol_to_missing_root,
1732 				"subvolume %u points to missing subvolume root %llu",
1733 				target_subvol,
1734 				target_inum)) {
1735 			bch_err(c, "repair not implemented yet");
1736 			ret = -EINVAL;
1737 			goto err;
1738 		}
1739 
1740 		if (fsck_err_on(subvol_root.bi_subvol != target_subvol,
1741 				c, subvol_root_wrong_bi_subvol,
1742 				"subvol root %llu has wrong bi_subvol field: got %u, should be %u",
1743 				target_inum,
1744 				subvol_root.bi_subvol, target_subvol)) {
1745 			subvol_root.bi_subvol = target_subvol;
1746 			ret = __write_inode(trans, &subvol_root, target_snapshot);
1747 			if (ret)
1748 				goto err;
1749 		}
1750 
1751 		ret = check_dirent_target(trans, iter, d, &subvol_root,
1752 					  target_snapshot);
1753 		if (ret)
1754 			goto err;
1755 	} else {
1756 		ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
1757 		if (ret)
1758 			goto err;
1759 
1760 		if (fsck_err_on(!target->inodes.nr,
1761 				c, dirent_to_missing_inode,
1762 				"dirent points to missing inode: (equiv %u)\n%s",
1763 				equiv.snapshot,
1764 				(printbuf_reset(&buf),
1765 				 bch2_bkey_val_to_text(&buf, c, k),
1766 				 buf.buf))) {
1767 			ret = __remove_dirent(trans, d.k->p);
1768 			if (ret)
1769 				goto err;
1770 		}
1771 
1772 		darray_for_each(target->inodes, i) {
1773 			ret = check_dirent_target(trans, iter, d,
1774 						  &i->inode, i->snapshot);
1775 			if (ret)
1776 				goto err;
1777 		}
1778 	}
1779 
1780 	if (d.v->d_type == DT_DIR)
1781 		for_each_visible_inode(c, s, dir, equiv.snapshot, i)
1782 			i->count++;
1783 
1784 out:
1785 err:
1786 fsck_err:
1787 	printbuf_exit(&buf);
1788 	bch_err_fn(c, ret);
1789 	return ret;
1790 }
1791 
1792 /*
1793  * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
1794  * validate d_type
1795  */
1796 int bch2_check_dirents(struct bch_fs *c)
1797 {
1798 	struct inode_walker dir = inode_walker_init();
1799 	struct inode_walker target = inode_walker_init();
1800 	struct snapshots_seen s;
1801 	struct bch_hash_info hash_info;
1802 
1803 	snapshots_seen_init(&s);
1804 
1805 	int ret = bch2_trans_run(c,
1806 		for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
1807 				POS(BCACHEFS_ROOT_INO, 0),
1808 				BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1809 				k,
1810 				NULL, NULL,
1811 				BCH_TRANS_COMMIT_no_enospc,
1812 			check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)));
1813 
1814 	snapshots_seen_exit(&s);
1815 	inode_walker_exit(&dir);
1816 	inode_walker_exit(&target);
1817 	bch_err_fn(c, ret);
1818 	return ret;
1819 }
1820 
1821 static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
1822 		       struct bkey_s_c k,
1823 		       struct bch_hash_info *hash_info,
1824 		       struct inode_walker *inode)
1825 {
1826 	struct bch_fs *c = trans->c;
1827 	struct inode_walker_entry *i;
1828 	int ret;
1829 
1830 	ret = check_key_has_snapshot(trans, iter, k);
1831 	if (ret)
1832 		return ret;
1833 
1834 	i = walk_inode(trans, inode, k.k->p, k.k->type == KEY_TYPE_whiteout);
1835 	ret = PTR_ERR_OR_ZERO(i);
1836 	if (ret)
1837 		return ret;
1838 
1839 	if (inode->first_this_inode && inode->inodes.nr)
1840 		*hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
1841 	inode->first_this_inode = false;
1842 
1843 	if (fsck_err_on(!i, c, xattr_in_missing_inode,
1844 			"xattr for missing inode %llu",
1845 			k.k->p.inode))
1846 		return bch2_btree_delete_at(trans, iter, 0);
1847 
1848 	if (!i)
1849 		return 0;
1850 
1851 	ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
1852 fsck_err:
1853 	bch_err_fn(c, ret);
1854 	return ret;
1855 }
1856 
1857 /*
1858  * Walk xattrs: verify that they all have a corresponding inode
1859  */
1860 int bch2_check_xattrs(struct bch_fs *c)
1861 {
1862 	struct inode_walker inode = inode_walker_init();
1863 	struct bch_hash_info hash_info;
1864 	int ret = 0;
1865 
1866 	ret = bch2_trans_run(c,
1867 		for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
1868 			POS(BCACHEFS_ROOT_INO, 0),
1869 			BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1870 			k,
1871 			NULL, NULL,
1872 			BCH_TRANS_COMMIT_no_enospc,
1873 		check_xattr(trans, &iter, k, &hash_info, &inode)));
1874 	bch_err_fn(c, ret);
1875 	return ret;
1876 }
1877 
1878 static int check_root_trans(struct btree_trans *trans)
1879 {
1880 	struct bch_fs *c = trans->c;
1881 	struct bch_inode_unpacked root_inode;
1882 	u32 snapshot;
1883 	u64 inum;
1884 	int ret;
1885 
1886 	ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
1887 	if (ret && !bch2_err_matches(ret, ENOENT))
1888 		return ret;
1889 
1890 	if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
1891 				"root subvol missing")) {
1892 		struct bkey_i_subvolume root_subvol;
1893 
1894 		snapshot	= U32_MAX;
1895 		inum		= BCACHEFS_ROOT_INO;
1896 
1897 		bkey_subvolume_init(&root_subvol.k_i);
1898 		root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
1899 		root_subvol.v.flags	= 0;
1900 		root_subvol.v.snapshot	= cpu_to_le32(snapshot);
1901 		root_subvol.v.inode	= cpu_to_le64(inum);
1902 		ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol.k_i, 0);
1903 		bch_err_msg(c, ret, "writing root subvol");
1904 		if (ret)
1905 			goto err;
1906 	}
1907 
1908 	ret = lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
1909 	if (ret && !bch2_err_matches(ret, ENOENT))
1910 		return ret;
1911 
1912 	if (mustfix_fsck_err_on(ret, c, root_dir_missing,
1913 				"root directory missing") ||
1914 	    mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
1915 				c, root_inode_not_dir,
1916 				"root inode not a directory")) {
1917 		bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
1918 				0, NULL);
1919 		root_inode.bi_inum = inum;
1920 
1921 		ret = __write_inode(trans, &root_inode, snapshot);
1922 		bch_err_msg(c, ret, "writing root inode");
1923 	}
1924 err:
1925 fsck_err:
1926 	return ret;
1927 }
1928 
1929 /* Get root directory, create if it doesn't exist: */
1930 int bch2_check_root(struct bch_fs *c)
1931 {
1932 	int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1933 		check_root_trans(trans));
1934 	bch_err_fn(c, ret);
1935 	return ret;
1936 }
1937 
1938 struct pathbuf_entry {
1939 	u64	inum;
1940 	u32	snapshot;
1941 };
1942 
1943 typedef DARRAY(struct pathbuf_entry) pathbuf;
1944 
1945 static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
1946 {
1947 	darray_for_each(*p, i)
1948 		if (i->inum	== inum &&
1949 		    i->snapshot	== snapshot)
1950 			return true;
1951 	return false;
1952 }
1953 
1954 static int path_down(struct bch_fs *c, pathbuf *p,
1955 		     u64 inum, u32 snapshot)
1956 {
1957 	int ret = darray_push(p, ((struct pathbuf_entry) {
1958 		.inum		= inum,
1959 		.snapshot	= snapshot,
1960 	}));
1961 
1962 	if (ret)
1963 		bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
1964 			p->size);
1965 	return ret;
1966 }
1967 
1968 /*
1969  * Check that a given inode is reachable from the root:
1970  *
1971  * XXX: we should also be verifying that inodes are in the right subvolumes
1972  */
1973 static int check_path(struct btree_trans *trans,
1974 		      pathbuf *p,
1975 		      struct bch_inode_unpacked *inode,
1976 		      u32 snapshot)
1977 {
1978 	struct bch_fs *c = trans->c;
1979 	int ret = 0;
1980 
1981 	snapshot = bch2_snapshot_equiv(c, snapshot);
1982 	p->nr = 0;
1983 
1984 	while (!(inode->bi_inum == BCACHEFS_ROOT_INO &&
1985 		 inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)) {
1986 		struct btree_iter dirent_iter;
1987 		struct bkey_s_c_dirent d;
1988 		u32 parent_snapshot = snapshot;
1989 
1990 		if (inode->bi_subvol) {
1991 			u64 inum;
1992 
1993 			ret = subvol_lookup(trans, inode->bi_parent_subvol,
1994 					    &parent_snapshot, &inum);
1995 			if (ret)
1996 				break;
1997 		}
1998 
1999 		d = dirent_get_by_pos(trans, &dirent_iter,
2000 				      SPOS(inode->bi_dir, inode->bi_dir_offset,
2001 					   parent_snapshot));
2002 		ret = bkey_err(d.s_c);
2003 		if (ret && !bch2_err_matches(ret, ENOENT))
2004 			break;
2005 
2006 		if (!ret && !dirent_points_to_inode(d, inode)) {
2007 			bch2_trans_iter_exit(trans, &dirent_iter);
2008 			ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
2009 		}
2010 
2011 		if (bch2_err_matches(ret, ENOENT)) {
2012 			if (fsck_err(c,  inode_unreachable,
2013 				     "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
2014 				     inode->bi_inum, snapshot,
2015 				     bch2_d_type_str(inode_d_type(inode)),
2016 				     inode->bi_nlink,
2017 				     inode->bi_dir,
2018 				     inode->bi_dir_offset))
2019 				ret = reattach_inode(trans, inode, snapshot);
2020 			break;
2021 		}
2022 
2023 		bch2_trans_iter_exit(trans, &dirent_iter);
2024 
2025 		if (!S_ISDIR(inode->bi_mode))
2026 			break;
2027 
2028 		ret = path_down(c, p, inode->bi_inum, snapshot);
2029 		if (ret) {
2030 			bch_err(c, "memory allocation failure");
2031 			return ret;
2032 		}
2033 
2034 		snapshot = parent_snapshot;
2035 
2036 		ret = lookup_inode(trans, inode->bi_dir, inode, &snapshot);
2037 		if (ret) {
2038 			/* Should have been caught in dirents pass */
2039 			if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
2040 				bch_err(c, "error looking up parent directory: %i", ret);
2041 			break;
2042 		}
2043 
2044 		if (path_is_dup(p, inode->bi_inum, snapshot)) {
2045 			/* XXX print path */
2046 			bch_err(c, "directory structure loop");
2047 
2048 			darray_for_each(*p, i)
2049 				pr_err("%llu:%u", i->inum, i->snapshot);
2050 			pr_err("%llu:%u", inode->bi_inum, snapshot);
2051 
2052 			if (!fsck_err(c, dir_loop, "directory structure loop"))
2053 				return 0;
2054 
2055 			ret = remove_backpointer(trans, inode);
2056 			if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
2057 				bch_err_msg(c, ret, "removing dirent");
2058 			if (ret)
2059 				break;
2060 
2061 			ret = reattach_inode(trans, inode, snapshot);
2062 			if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
2063 				bch_err_msg(c, ret, "reattaching inode %llu", inode->bi_inum);
2064 			break;
2065 		}
2066 	}
2067 fsck_err:
2068 	bch_err_fn(c, ret);
2069 	return ret;
2070 }
2071 
2072 /*
2073  * Check for unreachable inodes, as well as loops in the directory structure:
2074  * After bch2_check_dirents(), if an inode backpointer doesn't exist that means it's
2075  * unreachable:
2076  */
2077 int bch2_check_directory_structure(struct bch_fs *c)
2078 {
2079 	struct bch_inode_unpacked u;
2080 	pathbuf path = { 0, };
2081 	int ret;
2082 
2083 	ret = bch2_trans_run(c,
2084 		for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
2085 					  BTREE_ITER_INTENT|
2086 					  BTREE_ITER_PREFETCH|
2087 					  BTREE_ITER_ALL_SNAPSHOTS, k,
2088 					  NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
2089 			if (!bkey_is_inode(k.k))
2090 				continue;
2091 
2092 			BUG_ON(bch2_inode_unpack(k, &u));
2093 
2094 			if (u.bi_flags & BCH_INODE_unlinked)
2095 				continue;
2096 
2097 			check_path(trans, &path, &u, iter.pos.snapshot);
2098 		})));
2099 	darray_exit(&path);
2100 
2101 	bch_err_fn(c, ret);
2102 	return ret;
2103 }
2104 
2105 struct nlink_table {
2106 	size_t		nr;
2107 	size_t		size;
2108 
2109 	struct nlink {
2110 		u64	inum;
2111 		u32	snapshot;
2112 		u32	count;
2113 	}		*d;
2114 };
2115 
2116 static int add_nlink(struct bch_fs *c, struct nlink_table *t,
2117 		     u64 inum, u32 snapshot)
2118 {
2119 	if (t->nr == t->size) {
2120 		size_t new_size = max_t(size_t, 128UL, t->size * 2);
2121 		void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
2122 
2123 		if (!d) {
2124 			bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
2125 				new_size);
2126 			return -BCH_ERR_ENOMEM_fsck_add_nlink;
2127 		}
2128 
2129 		if (t->d)
2130 			memcpy(d, t->d, t->size * sizeof(t->d[0]));
2131 		kvfree(t->d);
2132 
2133 		t->d = d;
2134 		t->size = new_size;
2135 	}
2136 
2137 
2138 	t->d[t->nr++] = (struct nlink) {
2139 		.inum		= inum,
2140 		.snapshot	= snapshot,
2141 	};
2142 
2143 	return 0;
2144 }
2145 
2146 static int nlink_cmp(const void *_l, const void *_r)
2147 {
2148 	const struct nlink *l = _l;
2149 	const struct nlink *r = _r;
2150 
2151 	return cmp_int(l->inum, r->inum);
2152 }
2153 
2154 static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
2155 		     struct nlink_table *links,
2156 		     u64 range_start, u64 range_end, u64 inum, u32 snapshot)
2157 {
2158 	struct nlink *link, key = {
2159 		.inum = inum, .snapshot = U32_MAX,
2160 	};
2161 
2162 	if (inum < range_start || inum >= range_end)
2163 		return;
2164 
2165 	link = __inline_bsearch(&key, links->d, links->nr,
2166 				sizeof(links->d[0]), nlink_cmp);
2167 	if (!link)
2168 		return;
2169 
2170 	while (link > links->d && link[0].inum == link[-1].inum)
2171 		--link;
2172 
2173 	for (; link < links->d + links->nr && link->inum == inum; link++)
2174 		if (ref_visible(c, s, snapshot, link->snapshot)) {
2175 			link->count++;
2176 			if (link->snapshot >= snapshot)
2177 				break;
2178 		}
2179 }
2180 
2181 noinline_for_stack
2182 static int check_nlinks_find_hardlinks(struct bch_fs *c,
2183 				       struct nlink_table *t,
2184 				       u64 start, u64 *end)
2185 {
2186 	int ret = bch2_trans_run(c,
2187 		for_each_btree_key(trans, iter, BTREE_ID_inodes,
2188 				   POS(0, start),
2189 				   BTREE_ITER_INTENT|
2190 				   BTREE_ITER_PREFETCH|
2191 				   BTREE_ITER_ALL_SNAPSHOTS, k, ({
2192 			if (!bkey_is_inode(k.k))
2193 				continue;
2194 
2195 			/* Should never fail, checked by bch2_inode_invalid: */
2196 			struct bch_inode_unpacked u;
2197 			BUG_ON(bch2_inode_unpack(k, &u));
2198 
2199 			/*
2200 			 * Backpointer and directory structure checks are sufficient for
2201 			 * directories, since they can't have hardlinks:
2202 			 */
2203 			if (S_ISDIR(u.bi_mode))
2204 				continue;
2205 
2206 			if (!u.bi_nlink)
2207 				continue;
2208 
2209 			ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
2210 			if (ret) {
2211 				*end = k.k->p.offset;
2212 				ret = 0;
2213 				break;
2214 			}
2215 			0;
2216 		})));
2217 
2218 	bch_err_fn(c, ret);
2219 	return ret;
2220 }
2221 
2222 noinline_for_stack
2223 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
2224 				     u64 range_start, u64 range_end)
2225 {
2226 	struct snapshots_seen s;
2227 
2228 	snapshots_seen_init(&s);
2229 
2230 	int ret = bch2_trans_run(c,
2231 		for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
2232 				   BTREE_ITER_INTENT|
2233 				   BTREE_ITER_PREFETCH|
2234 				   BTREE_ITER_ALL_SNAPSHOTS, k, ({
2235 			ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
2236 			if (ret)
2237 				break;
2238 
2239 			if (k.k->type == KEY_TYPE_dirent) {
2240 				struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
2241 
2242 				if (d.v->d_type != DT_DIR &&
2243 				    d.v->d_type != DT_SUBVOL)
2244 					inc_link(c, &s, links, range_start, range_end,
2245 						 le64_to_cpu(d.v->d_inum),
2246 						 bch2_snapshot_equiv(c, d.k->p.snapshot));
2247 			}
2248 			0;
2249 		})));
2250 
2251 	snapshots_seen_exit(&s);
2252 
2253 	bch_err_fn(c, ret);
2254 	return ret;
2255 }
2256 
2257 static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
2258 				     struct bkey_s_c k,
2259 				     struct nlink_table *links,
2260 				     size_t *idx, u64 range_end)
2261 {
2262 	struct bch_fs *c = trans->c;
2263 	struct bch_inode_unpacked u;
2264 	struct nlink *link = &links->d[*idx];
2265 	int ret = 0;
2266 
2267 	if (k.k->p.offset >= range_end)
2268 		return 1;
2269 
2270 	if (!bkey_is_inode(k.k))
2271 		return 0;
2272 
2273 	BUG_ON(bch2_inode_unpack(k, &u));
2274 
2275 	if (S_ISDIR(u.bi_mode))
2276 		return 0;
2277 
2278 	if (!u.bi_nlink)
2279 		return 0;
2280 
2281 	while ((cmp_int(link->inum, k.k->p.offset) ?:
2282 		cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
2283 		BUG_ON(*idx == links->nr);
2284 		link = &links->d[++*idx];
2285 	}
2286 
2287 	if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
2288 			c, inode_wrong_nlink,
2289 			"inode %llu type %s has wrong i_nlink (%u, should be %u)",
2290 			u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
2291 			bch2_inode_nlink_get(&u), link->count)) {
2292 		bch2_inode_nlink_set(&u, link->count);
2293 		ret = __write_inode(trans, &u, k.k->p.snapshot);
2294 	}
2295 fsck_err:
2296 	return ret;
2297 }
2298 
2299 noinline_for_stack
2300 static int check_nlinks_update_hardlinks(struct bch_fs *c,
2301 			       struct nlink_table *links,
2302 			       u64 range_start, u64 range_end)
2303 {
2304 	size_t idx = 0;
2305 
2306 	int ret = bch2_trans_run(c,
2307 		for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
2308 				POS(0, range_start),
2309 				BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
2310 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2311 			check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
2312 	if (ret < 0) {
2313 		bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret));
2314 		return ret;
2315 	}
2316 
2317 	return 0;
2318 }
2319 
2320 int bch2_check_nlinks(struct bch_fs *c)
2321 {
2322 	struct nlink_table links = { 0 };
2323 	u64 this_iter_range_start, next_iter_range_start = 0;
2324 	int ret = 0;
2325 
2326 	do {
2327 		this_iter_range_start = next_iter_range_start;
2328 		next_iter_range_start = U64_MAX;
2329 
2330 		ret = check_nlinks_find_hardlinks(c, &links,
2331 						  this_iter_range_start,
2332 						  &next_iter_range_start);
2333 
2334 		ret = check_nlinks_walk_dirents(c, &links,
2335 					  this_iter_range_start,
2336 					  next_iter_range_start);
2337 		if (ret)
2338 			break;
2339 
2340 		ret = check_nlinks_update_hardlinks(c, &links,
2341 					 this_iter_range_start,
2342 					 next_iter_range_start);
2343 		if (ret)
2344 			break;
2345 
2346 		links.nr = 0;
2347 	} while (next_iter_range_start != U64_MAX);
2348 
2349 	kvfree(links.d);
2350 	bch_err_fn(c, ret);
2351 	return ret;
2352 }
2353 
2354 static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
2355 			     struct bkey_s_c k)
2356 {
2357 	struct bkey_s_c_reflink_p p;
2358 	struct bkey_i_reflink_p *u;
2359 
2360 	if (k.k->type != KEY_TYPE_reflink_p)
2361 		return 0;
2362 
2363 	p = bkey_s_c_to_reflink_p(k);
2364 
2365 	if (!p.v->front_pad && !p.v->back_pad)
2366 		return 0;
2367 
2368 	u = bch2_trans_kmalloc(trans, sizeof(*u));
2369 	int ret = PTR_ERR_OR_ZERO(u);
2370 	if (ret)
2371 		return ret;
2372 
2373 	bkey_reassemble(&u->k_i, k);
2374 	u->v.front_pad	= 0;
2375 	u->v.back_pad	= 0;
2376 
2377 	return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
2378 }
2379 
2380 int bch2_fix_reflink_p(struct bch_fs *c)
2381 {
2382 	if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
2383 		return 0;
2384 
2385 	int ret = bch2_trans_run(c,
2386 		for_each_btree_key_commit(trans, iter,
2387 				BTREE_ID_extents, POS_MIN,
2388 				BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
2389 				BTREE_ITER_ALL_SNAPSHOTS, k,
2390 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2391 			fix_reflink_p_key(trans, &iter, k)));
2392 	bch_err_fn(c, ret);
2393 	return ret;
2394 }
2395