xref: /linux/fs/bcachefs/fsck.c (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bcachefs_ioctl.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "darray.h"
10 #include "dirent.h"
11 #include "error.h"
12 #include "fs.h"
13 #include "fsck.h"
14 #include "inode.h"
15 #include "keylist.h"
16 #include "namei.h"
17 #include "recovery_passes.h"
18 #include "snapshot.h"
19 #include "super.h"
20 #include "thread_with_file.h"
21 #include "xattr.h"
22 
23 #include <linux/bsearch.h>
24 #include <linux/dcache.h> /* struct qstr */
25 
26 static int dirent_points_to_inode_nowarn(struct bkey_s_c_dirent d,
27 					 struct bch_inode_unpacked *inode)
28 {
29 	if (d.v->d_type == DT_SUBVOL
30 	    ? le32_to_cpu(d.v->d_child_subvol)	== inode->bi_subvol
31 	    : le64_to_cpu(d.v->d_inum)		== inode->bi_inum)
32 		return 0;
33 	return -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
34 }
35 
36 static void dirent_inode_mismatch_msg(struct printbuf *out,
37 				      struct bch_fs *c,
38 				      struct bkey_s_c_dirent dirent,
39 				      struct bch_inode_unpacked *inode)
40 {
41 	prt_str(out, "inode points to dirent that does not point back:");
42 	prt_newline(out);
43 	bch2_bkey_val_to_text(out, c, dirent.s_c);
44 	prt_newline(out);
45 	bch2_inode_unpacked_to_text(out, inode);
46 }
47 
48 static int dirent_points_to_inode(struct bch_fs *c,
49 				  struct bkey_s_c_dirent dirent,
50 				  struct bch_inode_unpacked *inode)
51 {
52 	int ret = dirent_points_to_inode_nowarn(dirent, inode);
53 	if (ret) {
54 		struct printbuf buf = PRINTBUF;
55 		dirent_inode_mismatch_msg(&buf, c, dirent, inode);
56 		bch_warn(c, "%s", buf.buf);
57 		printbuf_exit(&buf);
58 	}
59 	return ret;
60 }
61 
62 /*
63  * XXX: this is handling transaction restarts without returning
64  * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
65  */
66 static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
67 				    u32 snapshot)
68 {
69 	u64 sectors = 0;
70 
71 	int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents,
72 				SPOS(inum, 0, snapshot),
73 				POS(inum, U64_MAX),
74 				0, k, ({
75 		if (bkey_extent_is_allocation(k.k))
76 			sectors += k.k->size;
77 		0;
78 	}));
79 
80 	return ret ?: sectors;
81 }
82 
83 static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
84 				    u32 snapshot)
85 {
86 	u64 subdirs = 0;
87 
88 	int ret = for_each_btree_key_max(trans, iter, BTREE_ID_dirents,
89 				    SPOS(inum, 0, snapshot),
90 				    POS(inum, U64_MAX),
91 				    0, k, ({
92 		if (k.k->type == KEY_TYPE_dirent &&
93 		    bkey_s_c_to_dirent(k).v->d_type == DT_DIR)
94 			subdirs++;
95 		0;
96 	}));
97 
98 	return ret ?: subdirs;
99 }
100 
101 static int subvol_lookup(struct btree_trans *trans, u32 subvol,
102 			 u32 *snapshot, u64 *inum)
103 {
104 	struct bch_subvolume s;
105 	int ret = bch2_subvolume_get(trans, subvol, false, &s);
106 
107 	*snapshot = le32_to_cpu(s.snapshot);
108 	*inum = le64_to_cpu(s.inode);
109 	return ret;
110 }
111 
112 static int lookup_inode(struct btree_trans *trans, u64 inode_nr, u32 snapshot,
113 			struct bch_inode_unpacked *inode)
114 {
115 	struct btree_iter iter;
116 	struct bkey_s_c k;
117 	int ret;
118 
119 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
120 			       SPOS(0, inode_nr, snapshot), 0);
121 	ret = bkey_err(k);
122 	if (ret)
123 		goto err;
124 
125 	ret = bkey_is_inode(k.k)
126 		? bch2_inode_unpack(k, inode)
127 		: -BCH_ERR_ENOENT_inode;
128 err:
129 	bch2_trans_iter_exit(trans, &iter);
130 	return ret;
131 }
132 
133 static int lookup_dirent_in_snapshot(struct btree_trans *trans,
134 			   struct bch_hash_info hash_info,
135 			   subvol_inum dir, struct qstr *name,
136 			   u64 *target, unsigned *type, u32 snapshot)
137 {
138 	struct btree_iter iter;
139 	struct bkey_s_c k = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
140 							 &hash_info, dir, name, 0, snapshot);
141 	int ret = bkey_err(k);
142 	if (ret)
143 		return ret;
144 
145 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
146 	*target = le64_to_cpu(d.v->d_inum);
147 	*type = d.v->d_type;
148 	bch2_trans_iter_exit(trans, &iter);
149 	return 0;
150 }
151 
152 /*
153  * Find any subvolume associated with a tree of snapshots
154  * We can't rely on master_subvol - it might have been deleted.
155  */
156 static int find_snapshot_tree_subvol(struct btree_trans *trans,
157 				     u32 tree_id, u32 *subvol)
158 {
159 	struct btree_iter iter;
160 	struct bkey_s_c k;
161 	int ret;
162 
163 	for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshots, POS_MIN, 0, k, ret) {
164 		if (k.k->type != KEY_TYPE_snapshot)
165 			continue;
166 
167 		struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
168 		if (le32_to_cpu(s.v->tree) != tree_id)
169 			continue;
170 
171 		if (s.v->subvol) {
172 			*subvol = le32_to_cpu(s.v->subvol);
173 			goto found;
174 		}
175 	}
176 	ret = -BCH_ERR_ENOENT_no_snapshot_tree_subvol;
177 found:
178 	bch2_trans_iter_exit(trans, &iter);
179 	return ret;
180 }
181 
182 /* Get lost+found, create if it doesn't exist: */
183 static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
184 			    struct bch_inode_unpacked *lostfound,
185 			    u64 reattaching_inum)
186 {
187 	struct bch_fs *c = trans->c;
188 	struct qstr lostfound_str = QSTR("lost+found");
189 	struct btree_iter lostfound_iter = {};
190 	u64 inum = 0;
191 	unsigned d_type = 0;
192 	int ret;
193 
194 	struct bch_snapshot_tree st;
195 	ret = bch2_snapshot_tree_lookup(trans,
196 			bch2_snapshot_tree(c, snapshot), &st);
197 	if (ret)
198 		return ret;
199 
200 	u32 subvolid;
201 	ret = find_snapshot_tree_subvol(trans,
202 				bch2_snapshot_tree(c, snapshot), &subvolid);
203 	bch_err_msg(c, ret, "finding subvol associated with snapshot tree %u",
204 		    bch2_snapshot_tree(c, snapshot));
205 	if (ret)
206 		return ret;
207 
208 	struct bch_subvolume subvol;
209 	ret = bch2_subvolume_get(trans, subvolid, false, &subvol);
210 	bch_err_msg(c, ret, "looking up subvol %u for snapshot %u", subvolid, snapshot);
211 	if (ret)
212 		return ret;
213 
214 	if (!subvol.inode) {
215 		struct btree_iter iter;
216 		struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter,
217 				BTREE_ID_subvolumes, POS(0, subvolid),
218 				0, subvolume);
219 		ret = PTR_ERR_OR_ZERO(subvol);
220 		if (ret)
221 			return ret;
222 
223 		subvol->v.inode = cpu_to_le64(reattaching_inum);
224 		bch2_trans_iter_exit(trans, &iter);
225 	}
226 
227 	subvol_inum root_inum = {
228 		.subvol = subvolid,
229 		.inum = le64_to_cpu(subvol.inode)
230 	};
231 
232 	struct bch_inode_unpacked root_inode;
233 	struct bch_hash_info root_hash_info;
234 	ret = lookup_inode(trans, root_inum.inum, snapshot, &root_inode);
235 	bch_err_msg(c, ret, "looking up root inode %llu for subvol %u",
236 		    root_inum.inum, subvolid);
237 	if (ret)
238 		return ret;
239 
240 	root_hash_info = bch2_hash_info_init(c, &root_inode);
241 
242 	ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
243 			      &lostfound_str, &inum, &d_type, snapshot);
244 	if (bch2_err_matches(ret, ENOENT))
245 		goto create_lostfound;
246 
247 	bch_err_fn(c, ret);
248 	if (ret)
249 		return ret;
250 
251 	if (d_type != DT_DIR) {
252 		bch_err(c, "error looking up lost+found: not a directory");
253 		return -BCH_ERR_ENOENT_not_directory;
254 	}
255 
256 	/*
257 	 * The bch2_check_dirents pass has already run, dangling dirents
258 	 * shouldn't exist here:
259 	 */
260 	ret = lookup_inode(trans, inum, snapshot, lostfound);
261 	bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
262 		    inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
263 	return ret;
264 
265 create_lostfound:
266 	/*
267 	 * we always create lost+found in the root snapshot; we don't want
268 	 * different branches of the snapshot tree to have different lost+found
269 	 */
270 	snapshot = le32_to_cpu(st.root_snapshot);
271 	/*
272 	 * XXX: we could have a nicer log message here  if we had a nice way to
273 	 * walk backpointers to print a path
274 	 */
275 	struct printbuf path = PRINTBUF;
276 	ret = bch2_inum_to_path(trans, root_inum, &path);
277 	if (ret)
278 		goto err;
279 
280 	bch_notice(c, "creating %s/lost+found in subvol %llu snapshot %u",
281 		   path.buf, root_inum.subvol, snapshot);
282 	printbuf_exit(&path);
283 
284 	u64 now = bch2_current_time(c);
285 	u64 cpu = raw_smp_processor_id();
286 
287 	bch2_inode_init_early(c, lostfound);
288 	bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode);
289 	lostfound->bi_dir = root_inode.bi_inum;
290 	lostfound->bi_snapshot = le32_to_cpu(st.root_snapshot);
291 
292 	root_inode.bi_nlink++;
293 
294 	ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu);
295 	if (ret)
296 		goto err;
297 
298 	bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot);
299 	ret = bch2_btree_iter_traverse(trans, &lostfound_iter);
300 	if (ret)
301 		goto err;
302 
303 	ret =   bch2_dirent_create_snapshot(trans,
304 				0, root_inode.bi_inum, snapshot, &root_hash_info,
305 				mode_to_type(lostfound->bi_mode),
306 				&lostfound_str,
307 				lostfound->bi_inum,
308 				&lostfound->bi_dir_offset,
309 				STR_HASH_must_create) ?:
310 		bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
311 				       BTREE_UPDATE_internal_snapshot_node);
312 err:
313 	bch_err_msg(c, ret, "creating lost+found");
314 	bch2_trans_iter_exit(trans, &lostfound_iter);
315 	return ret;
316 }
317 
318 static inline bool inode_should_reattach(struct bch_inode_unpacked *inode)
319 {
320 	if (inode->bi_inum == BCACHEFS_ROOT_INO &&
321 	    inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)
322 		return false;
323 
324 	/*
325 	 * Subvolume roots are special: older versions of subvolume roots may be
326 	 * disconnected, it's only the newest version that matters.
327 	 *
328 	 * We only keep a single dirent pointing to a subvolume root, i.e.
329 	 * older versions of snapshots will not have a different dirent pointing
330 	 * to the same subvolume root.
331 	 *
332 	 * This is because dirents that point to subvolumes are only visible in
333 	 * the parent subvolume - versioning is not needed - and keeping them
334 	 * around would break fsck, because when we're crossing subvolumes we
335 	 * don't have a consistent snapshot ID to do check the inode <-> dirent
336 	 * relationships.
337 	 *
338 	 * Thus, a subvolume root that's been renamed after a snapshot will have
339 	 * a disconnected older version - that's expected.
340 	 *
341 	 * Note that taking a snapshot always updates the root inode (to update
342 	 * the dirent backpointer), so a subvolume root inode with
343 	 * BCH_INODE_has_child_snapshot is never visible.
344 	 */
345 	if (inode->bi_subvol &&
346 	    (inode->bi_flags & BCH_INODE_has_child_snapshot))
347 		return false;
348 
349 	return !inode->bi_dir && !(inode->bi_flags & BCH_INODE_unlinked);
350 }
351 
352 static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 snapshot)
353 {
354 	struct btree_iter iter;
355 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_dirents,
356 					SPOS(d_pos.inode, d_pos.offset, snapshot),
357 					BTREE_ITER_intent|
358 					BTREE_ITER_with_updates);
359 	int ret = bkey_err(k);
360 	if (ret)
361 		return ret;
362 
363 	if (bpos_eq(k.k->p, d_pos)) {
364 		/*
365 		 * delet_at() doesn't work because the update path doesn't
366 		 * internally use BTREE_ITER_with_updates yet
367 		 */
368 		struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
369 		ret = PTR_ERR_OR_ZERO(k);
370 		if (ret)
371 			goto err;
372 
373 		bkey_init(&k->k);
374 		k->k.type = KEY_TYPE_whiteout;
375 		k->k.p = iter.pos;
376 		ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node);
377 	}
378 err:
379 	bch2_trans_iter_exit(trans, &iter);
380 	return ret;
381 }
382 
383 static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode)
384 {
385 	struct bch_fs *c = trans->c;
386 	struct bch_inode_unpacked lostfound;
387 	char name_buf[20];
388 	int ret;
389 
390 	u32 dirent_snapshot = inode->bi_snapshot;
391 	if (inode->bi_subvol) {
392 		inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL;
393 
394 		u64 root_inum;
395 		ret = subvol_lookup(trans, inode->bi_parent_subvol,
396 				    &dirent_snapshot, &root_inum);
397 		if (ret)
398 			return ret;
399 
400 		snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol);
401 	} else {
402 		snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
403 	}
404 
405 	ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum);
406 	if (ret)
407 		return ret;
408 
409 	lostfound.bi_nlink += S_ISDIR(inode->bi_mode);
410 
411 	/* ensure lost+found inode is also present in inode snapshot */
412 	if (!inode->bi_subvol) {
413 		BUG_ON(!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, lostfound.bi_snapshot));
414 		lostfound.bi_snapshot = inode->bi_snapshot;
415 	}
416 
417 	ret = __bch2_fsck_write_inode(trans, &lostfound);
418 	if (ret)
419 		return ret;
420 
421 	struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound);
422 	struct qstr name = QSTR(name_buf);
423 
424 	inode->bi_dir = lostfound.bi_inum;
425 
426 	ret = bch2_dirent_create_snapshot(trans,
427 				inode->bi_parent_subvol, lostfound.bi_inum,
428 				dirent_snapshot,
429 				&dir_hash,
430 				inode_d_type(inode),
431 				&name,
432 				inode->bi_subvol ?: inode->bi_inum,
433 				&inode->bi_dir_offset,
434 				STR_HASH_must_create);
435 	if (ret) {
436 		bch_err_msg(c, ret, "error creating dirent");
437 		return ret;
438 	}
439 
440 	ret = __bch2_fsck_write_inode(trans, inode);
441 	if (ret)
442 		return ret;
443 
444 	/*
445 	 * Fix up inodes in child snapshots: if they should also be reattached
446 	 * update the backpointer field, if they should not be we need to emit
447 	 * whiteouts for the dirent we just created.
448 	 */
449 	if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) {
450 		snapshot_id_list whiteouts_done;
451 		struct btree_iter iter;
452 		struct bkey_s_c k;
453 
454 		darray_init(&whiteouts_done);
455 
456 		for_each_btree_key_reverse_norestart(trans, iter,
457 				BTREE_ID_inodes, SPOS(0, inode->bi_inum, inode->bi_snapshot - 1),
458 				BTREE_ITER_all_snapshots|BTREE_ITER_intent, k, ret) {
459 			if (k.k->p.offset != inode->bi_inum)
460 				break;
461 
462 			if (!bkey_is_inode(k.k) ||
463 			    !bch2_snapshot_is_ancestor(c, k.k->p.snapshot, inode->bi_snapshot) ||
464 			    snapshot_list_has_ancestor(c, &whiteouts_done, k.k->p.snapshot))
465 				continue;
466 
467 			struct bch_inode_unpacked child_inode;
468 			ret = bch2_inode_unpack(k, &child_inode);
469 			if (ret)
470 				break;
471 
472 			if (!inode_should_reattach(&child_inode)) {
473 				ret = maybe_delete_dirent(trans,
474 							  SPOS(lostfound.bi_inum, inode->bi_dir_offset,
475 							       dirent_snapshot),
476 							  k.k->p.snapshot);
477 				if (ret)
478 					break;
479 
480 				ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot);
481 				if (ret)
482 					break;
483 			} else {
484 				iter.snapshot = k.k->p.snapshot;
485 				child_inode.bi_dir = inode->bi_dir;
486 				child_inode.bi_dir_offset = inode->bi_dir_offset;
487 
488 				ret = bch2_inode_write_flags(trans, &iter, &child_inode,
489 							     BTREE_UPDATE_internal_snapshot_node);
490 				if (ret)
491 					break;
492 			}
493 		}
494 		darray_exit(&whiteouts_done);
495 		bch2_trans_iter_exit(trans, &iter);
496 	}
497 
498 	return ret;
499 }
500 
501 static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
502 						struct btree_iter *iter,
503 						struct bpos pos)
504 {
505 	return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
506 }
507 
508 static int remove_backpointer(struct btree_trans *trans,
509 			      struct bch_inode_unpacked *inode)
510 {
511 	if (!inode->bi_dir)
512 		return 0;
513 
514 	struct bch_fs *c = trans->c;
515 	struct btree_iter iter;
516 	struct bkey_s_c_dirent d = dirent_get_by_pos(trans, &iter,
517 				     SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot));
518 	int ret = bkey_err(d) ?:
519 		  dirent_points_to_inode(c, d, inode) ?:
520 		  bch2_fsck_remove_dirent(trans, d.k->p);
521 	bch2_trans_iter_exit(trans, &iter);
522 	return ret;
523 }
524 
525 static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume s)
526 {
527 	struct bch_fs *c = trans->c;
528 
529 	struct bch_inode_unpacked inode;
530 	int ret = bch2_inode_find_by_inum_trans(trans,
531 				(subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
532 				&inode);
533 	if (ret)
534 		return ret;
535 
536 	ret = remove_backpointer(trans, &inode);
537 	if (!bch2_err_matches(ret, ENOENT))
538 		bch_err_msg(c, ret, "removing dirent");
539 	if (ret)
540 		return ret;
541 
542 	ret = reattach_inode(trans, &inode);
543 	bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
544 	return ret;
545 }
546 
547 static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum)
548 {
549 	struct bch_fs *c = trans->c;
550 
551 	if (!bch2_snapshot_is_leaf(c, snapshotid)) {
552 		bch_err(c, "need to reconstruct subvol, but have interior node snapshot");
553 		return -BCH_ERR_fsck_repair_unimplemented;
554 	}
555 
556 	/*
557 	 * If inum isn't set, that means we're being called from check_dirents,
558 	 * not check_inodes - the root of this subvolume doesn't exist or we
559 	 * would have found it there:
560 	 */
561 	if (!inum) {
562 		struct btree_iter inode_iter = {};
563 		struct bch_inode_unpacked new_inode;
564 		u64 cpu = raw_smp_processor_id();
565 
566 		bch2_inode_init_early(c, &new_inode);
567 		bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL);
568 
569 		new_inode.bi_subvol = subvolid;
570 
571 		int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
572 			  bch2_btree_iter_traverse(trans, &inode_iter) ?:
573 			  bch2_inode_write(trans, &inode_iter, &new_inode);
574 		bch2_trans_iter_exit(trans, &inode_iter);
575 		if (ret)
576 			return ret;
577 
578 		inum = new_inode.bi_inum;
579 	}
580 
581 	bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum);
582 
583 	struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
584 	int ret = PTR_ERR_OR_ZERO(new_subvol);
585 	if (ret)
586 		return ret;
587 
588 	bkey_subvolume_init(&new_subvol->k_i);
589 	new_subvol->k.p.offset	= subvolid;
590 	new_subvol->v.snapshot	= cpu_to_le32(snapshotid);
591 	new_subvol->v.inode	= cpu_to_le64(inum);
592 	ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0);
593 	if (ret)
594 		return ret;
595 
596 	struct btree_iter iter;
597 	struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter,
598 			BTREE_ID_snapshots, POS(0, snapshotid),
599 			0, snapshot);
600 	ret = PTR_ERR_OR_ZERO(s);
601 	bch_err_msg(c, ret, "getting snapshot %u", snapshotid);
602 	if (ret)
603 		return ret;
604 
605 	u32 snapshot_tree = le32_to_cpu(s->v.tree);
606 
607 	s->v.subvol = cpu_to_le32(subvolid);
608 	SET_BCH_SNAPSHOT_SUBVOL(&s->v, true);
609 	bch2_trans_iter_exit(trans, &iter);
610 
611 	struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter,
612 			BTREE_ID_snapshot_trees, POS(0, snapshot_tree),
613 			0, snapshot_tree);
614 	ret = PTR_ERR_OR_ZERO(st);
615 	bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree);
616 	if (ret)
617 		return ret;
618 
619 	if (!st->v.master_subvol)
620 		st->v.master_subvol = cpu_to_le32(subvolid);
621 
622 	bch2_trans_iter_exit(trans, &iter);
623 	return 0;
624 }
625 
626 static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 snapshot, u64 inum)
627 {
628 	struct bch_fs *c = trans->c;
629 	unsigned i_mode = S_IFREG;
630 	u64 i_size = 0;
631 
632 	switch (btree) {
633 	case BTREE_ID_extents: {
634 		struct btree_iter iter = {};
635 
636 		bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
637 		struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0));
638 		bch2_trans_iter_exit(trans, &iter);
639 		int ret = bkey_err(k);
640 		if (ret)
641 			return ret;
642 
643 		i_size = k.k->p.offset << 9;
644 		break;
645 	}
646 	case BTREE_ID_dirents:
647 		i_mode = S_IFDIR;
648 		break;
649 	case BTREE_ID_xattrs:
650 		break;
651 	default:
652 		BUG();
653 	}
654 
655 	struct bch_inode_unpacked new_inode;
656 	bch2_inode_init_early(c, &new_inode);
657 	bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, i_mode|0600, 0, NULL);
658 	new_inode.bi_size = i_size;
659 	new_inode.bi_inum = inum;
660 	new_inode.bi_snapshot = snapshot;
661 
662 	return __bch2_fsck_write_inode(trans, &new_inode);
663 }
664 
665 struct snapshots_seen {
666 	struct bpos			pos;
667 	snapshot_id_list		ids;
668 };
669 
670 static inline void snapshots_seen_exit(struct snapshots_seen *s)
671 {
672 	darray_exit(&s->ids);
673 }
674 
675 static inline void snapshots_seen_init(struct snapshots_seen *s)
676 {
677 	memset(s, 0, sizeof(*s));
678 }
679 
680 static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
681 {
682 	u32 *i;
683 	__darray_for_each(s->ids, i) {
684 		if (*i == id)
685 			return 0;
686 		if (*i > id)
687 			break;
688 	}
689 
690 	int ret = darray_insert_item(&s->ids, i - s->ids.data, id);
691 	if (ret)
692 		bch_err(c, "error reallocating snapshots_seen table (size %zu)",
693 			s->ids.size);
694 	return ret;
695 }
696 
697 static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
698 				 enum btree_id btree_id, struct bpos pos)
699 {
700 	if (!bkey_eq(s->pos, pos))
701 		s->ids.nr = 0;
702 	s->pos = pos;
703 
704 	return snapshot_list_add_nodup(c, &s->ids, pos.snapshot);
705 }
706 
707 /**
708  * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
709  * and @ancestor hasn't been overwritten in @seen
710  *
711  * @c:		filesystem handle
712  * @seen:	list of snapshot ids already seen at current position
713  * @id:		descendent snapshot id
714  * @ancestor:	ancestor snapshot id
715  *
716  * Returns:	whether key in @ancestor snapshot is visible in @id snapshot
717  */
718 static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
719 				    u32 id, u32 ancestor)
720 {
721 	ssize_t i;
722 
723 	EBUG_ON(id > ancestor);
724 
725 	/* @ancestor should be the snapshot most recently added to @seen */
726 	EBUG_ON(ancestor != seen->pos.snapshot);
727 	EBUG_ON(ancestor != darray_last(seen->ids));
728 
729 	if (id == ancestor)
730 		return true;
731 
732 	if (!bch2_snapshot_is_ancestor(c, id, ancestor))
733 		return false;
734 
735 	/*
736 	 * We know that @id is a descendant of @ancestor, we're checking if
737 	 * we've seen a key that overwrote @ancestor - i.e. also a descendent of
738 	 * @ascestor and with @id as a descendent.
739 	 *
740 	 * But we already know that we're scanning IDs between @id and @ancestor
741 	 * numerically, since snapshot ID lists are kept sorted, so if we find
742 	 * an id that's an ancestor of @id we're done:
743 	 */
744 
745 	for (i = seen->ids.nr - 2;
746 	     i >= 0 && seen->ids.data[i] >= id;
747 	     --i)
748 		if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i]))
749 			return false;
750 
751 	return true;
752 }
753 
754 /**
755  * ref_visible - given a key with snapshot id @src that points to a key with
756  * snapshot id @dst, test whether there is some snapshot in which @dst is
757  * visible.
758  *
759  * @c:		filesystem handle
760  * @s:		list of snapshot IDs already seen at @src
761  * @src:	snapshot ID of src key
762  * @dst:	snapshot ID of dst key
763  * Returns:	true if there is some snapshot in which @dst is visible
764  *
765  * Assumes we're visiting @src keys in natural key order
766  */
767 static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
768 			u32 src, u32 dst)
769 {
770 	return dst <= src
771 		? key_visible_in_snapshot(c, s, dst, src)
772 		: bch2_snapshot_is_ancestor(c, src, dst);
773 }
774 
775 static int ref_visible2(struct bch_fs *c,
776 			u32 src, struct snapshots_seen *src_seen,
777 			u32 dst, struct snapshots_seen *dst_seen)
778 {
779 	if (dst > src) {
780 		swap(dst, src);
781 		swap(dst_seen, src_seen);
782 	}
783 	return key_visible_in_snapshot(c, src_seen, dst, src);
784 }
785 
786 #define for_each_visible_inode(_c, _s, _w, _snapshot, _i)				\
787 	for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr &&	\
788 	     (_i)->snapshot <= (_snapshot); _i++)					\
789 		if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
790 
791 struct inode_walker_entry {
792 	struct bch_inode_unpacked inode;
793 	u32			snapshot;
794 	u64			count;
795 	u64			i_size;
796 };
797 
798 struct inode_walker {
799 	bool				first_this_inode;
800 	bool				have_inodes;
801 	bool				recalculate_sums;
802 	struct bpos			last_pos;
803 
804 	DARRAY(struct inode_walker_entry) inodes;
805 	snapshot_id_list		deletes;
806 };
807 
808 static void inode_walker_exit(struct inode_walker *w)
809 {
810 	darray_exit(&w->inodes);
811 	darray_exit(&w->deletes);
812 }
813 
814 static struct inode_walker inode_walker_init(void)
815 {
816 	return (struct inode_walker) { 0, };
817 }
818 
819 static int add_inode(struct bch_fs *c, struct inode_walker *w,
820 		     struct bkey_s_c inode)
821 {
822 	struct bch_inode_unpacked u;
823 
824 	return bch2_inode_unpack(inode, &u) ?:
825 		darray_push(&w->inodes, ((struct inode_walker_entry) {
826 		.inode		= u,
827 		.snapshot	= inode.k->p.snapshot,
828 	}));
829 }
830 
831 static int get_inodes_all_snapshots(struct btree_trans *trans,
832 				    struct inode_walker *w, u64 inum)
833 {
834 	struct bch_fs *c = trans->c;
835 	struct btree_iter iter;
836 	struct bkey_s_c k;
837 	int ret;
838 
839 	/*
840 	 * We no longer have inodes for w->last_pos; clear this to avoid
841 	 * screwing up check_i_sectors/check_subdir_count if we take a
842 	 * transaction restart here:
843 	 */
844 	w->have_inodes = false;
845 	w->recalculate_sums = false;
846 	w->inodes.nr = 0;
847 
848 	for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
849 				     BTREE_ITER_all_snapshots, k, ret) {
850 		if (k.k->p.offset != inum)
851 			break;
852 
853 		if (bkey_is_inode(k.k))
854 			add_inode(c, w, k);
855 	}
856 	bch2_trans_iter_exit(trans, &iter);
857 
858 	if (ret)
859 		return ret;
860 
861 	w->first_this_inode = true;
862 	w->have_inodes = true;
863 	return 0;
864 }
865 
866 static struct inode_walker_entry *
867 lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c k)
868 {
869 	bool is_whiteout = k.k->type == KEY_TYPE_whiteout;
870 
871 	struct inode_walker_entry *i;
872 	__darray_for_each(w->inodes, i)
873 		if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->snapshot))
874 			goto found;
875 
876 	return NULL;
877 found:
878 	BUG_ON(k.k->p.snapshot > i->snapshot);
879 
880 	if (k.k->p.snapshot != i->snapshot && !is_whiteout) {
881 		struct inode_walker_entry new = *i;
882 
883 		new.snapshot	= k.k->p.snapshot;
884 		new.count	= 0;
885 		new.i_size	= 0;
886 
887 		struct printbuf buf = PRINTBUF;
888 		bch2_bkey_val_to_text(&buf, c, k);
889 
890 		bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n"
891 			 "unexpected because we should always update the inode when we update a key in that inode\n"
892 			 "%s",
893 			 w->last_pos.inode, k.k->p.snapshot, i->snapshot, buf.buf);
894 		printbuf_exit(&buf);
895 
896 		while (i > w->inodes.data && i[-1].snapshot > k.k->p.snapshot)
897 			--i;
898 
899 		size_t pos = i - w->inodes.data;
900 		int ret = darray_insert_item(&w->inodes, pos, new);
901 		if (ret)
902 			return ERR_PTR(ret);
903 
904 		i = w->inodes.data + pos;
905 	}
906 
907 	return i;
908 }
909 
910 static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
911 					     struct inode_walker *w,
912 					     struct bkey_s_c k)
913 {
914 	if (w->last_pos.inode != k.k->p.inode) {
915 		int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode);
916 		if (ret)
917 			return ERR_PTR(ret);
918 	}
919 
920 	w->last_pos = k.k->p;
921 
922 	return lookup_inode_for_snapshot(trans->c, w, k);
923 }
924 
925 static int get_visible_inodes(struct btree_trans *trans,
926 			      struct inode_walker *w,
927 			      struct snapshots_seen *s,
928 			      u64 inum)
929 {
930 	struct bch_fs *c = trans->c;
931 	struct btree_iter iter;
932 	struct bkey_s_c k;
933 	int ret;
934 
935 	w->inodes.nr = 0;
936 	w->deletes.nr = 0;
937 
938 	for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot),
939 			   BTREE_ITER_all_snapshots, k, ret) {
940 		if (k.k->p.offset != inum)
941 			break;
942 
943 		if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot))
944 			continue;
945 
946 		if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot))
947 			continue;
948 
949 		ret = bkey_is_inode(k.k)
950 			? add_inode(c, w, k)
951 			: snapshot_list_add(c, &w->deletes, k.k->p.snapshot);
952 		if (ret)
953 			break;
954 	}
955 	bch2_trans_iter_exit(trans, &iter);
956 
957 	return ret;
958 }
959 
960 /*
961  * Prefer to delete the first one, since that will be the one at the wrong
962  * offset:
963  * return value: 0 -> delete k1, 1 -> delete k2
964  */
965 int bch2_fsck_update_backpointers(struct btree_trans *trans,
966 				  struct snapshots_seen *s,
967 				  const struct bch_hash_desc desc,
968 				  struct bch_hash_info *hash_info,
969 				  struct bkey_i *new)
970 {
971 	if (new->k.type != KEY_TYPE_dirent)
972 		return 0;
973 
974 	struct bkey_i_dirent *d = bkey_i_to_dirent(new);
975 	struct inode_walker target = inode_walker_init();
976 	int ret = 0;
977 
978 	if (d->v.d_type == DT_SUBVOL) {
979 		BUG();
980 	} else {
981 		ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum));
982 		if (ret)
983 			goto err;
984 
985 		darray_for_each(target.inodes, i) {
986 			i->inode.bi_dir_offset = d->k.p.offset;
987 			ret = __bch2_fsck_write_inode(trans, &i->inode);
988 			if (ret)
989 				goto err;
990 		}
991 	}
992 err:
993 	inode_walker_exit(&target);
994 	return ret;
995 }
996 
997 static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans,
998 					       struct btree_iter *iter,
999 					       struct bch_inode_unpacked *inode,
1000 					       u32 *snapshot)
1001 {
1002 	if (inode->bi_subvol) {
1003 		u64 inum;
1004 		int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum);
1005 		if (ret)
1006 			return ((struct bkey_s_c_dirent) { .k = ERR_PTR(ret) });
1007 	}
1008 
1009 	return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot));
1010 }
1011 
1012 static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
1013 {
1014 	struct btree_iter iter;
1015 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0);
1016 	int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set;
1017 	bch2_trans_iter_exit(trans, &iter);
1018 	return ret;
1019 }
1020 
1021 static int check_inode_dirent_inode(struct btree_trans *trans,
1022 				    struct bch_inode_unpacked *inode,
1023 				    bool *write_inode)
1024 {
1025 	struct bch_fs *c = trans->c;
1026 	struct printbuf buf = PRINTBUF;
1027 
1028 	u32 inode_snapshot = inode->bi_snapshot;
1029 	struct btree_iter dirent_iter = {};
1030 	struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
1031 	int ret = bkey_err(d);
1032 	if (ret && !bch2_err_matches(ret, ENOENT))
1033 		return ret;
1034 
1035 	if ((ret || dirent_points_to_inode_nowarn(d, inode)) &&
1036 	    inode->bi_subvol &&
1037 	    (inode->bi_flags & BCH_INODE_has_child_snapshot)) {
1038 		/* Older version of a renamed subvolume root: we won't have a
1039 		 * correct dirent for it. That's expected, see
1040 		 * inode_should_reattach().
1041 		 *
1042 		 * We don't clear the backpointer field when doing the rename
1043 		 * because there might be arbitrarily many versions in older
1044 		 * snapshots.
1045 		 */
1046 		inode->bi_dir = 0;
1047 		inode->bi_dir_offset = 0;
1048 		*write_inode = true;
1049 		goto out;
1050 	}
1051 
1052 	if (fsck_err_on(ret,
1053 			trans, inode_points_to_missing_dirent,
1054 			"inode points to missing dirent\n%s",
1055 			(bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) ||
1056 	    fsck_err_on(!ret && dirent_points_to_inode_nowarn(d, inode),
1057 			trans, inode_points_to_wrong_dirent,
1058 			"%s",
1059 			(printbuf_reset(&buf),
1060 			 dirent_inode_mismatch_msg(&buf, c, d, inode),
1061 			 buf.buf))) {
1062 		/*
1063 		 * We just clear the backpointer fields for now. If we find a
1064 		 * dirent that points to this inode in check_dirents(), we'll
1065 		 * update it then; then when we get to check_path() if the
1066 		 * backpointer is still 0 we'll reattach it.
1067 		 */
1068 		inode->bi_dir = 0;
1069 		inode->bi_dir_offset = 0;
1070 		*write_inode = true;
1071 	}
1072 out:
1073 	ret = 0;
1074 fsck_err:
1075 	bch2_trans_iter_exit(trans, &dirent_iter);
1076 	printbuf_exit(&buf);
1077 	bch_err_fn(c, ret);
1078 	return ret;
1079 }
1080 
1081 static int get_snapshot_root_inode(struct btree_trans *trans,
1082 				   struct bch_inode_unpacked *root,
1083 				   u64 inum)
1084 {
1085 	struct btree_iter iter;
1086 	struct bkey_s_c k;
1087 	int ret = 0;
1088 
1089 	for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes,
1090 					     SPOS(0, inum, U32_MAX),
1091 					     BTREE_ITER_all_snapshots, k, ret) {
1092 		if (k.k->p.offset != inum)
1093 			break;
1094 		if (bkey_is_inode(k.k))
1095 			goto found_root;
1096 	}
1097 	if (ret)
1098 		goto err;
1099 	BUG();
1100 found_root:
1101 	ret = bch2_inode_unpack(k, root);
1102 err:
1103 	bch2_trans_iter_exit(trans, &iter);
1104 	return ret;
1105 }
1106 
1107 static int check_inode(struct btree_trans *trans,
1108 		       struct btree_iter *iter,
1109 		       struct bkey_s_c k,
1110 		       struct bch_inode_unpacked *snapshot_root,
1111 		       struct snapshots_seen *s)
1112 {
1113 	struct bch_fs *c = trans->c;
1114 	struct printbuf buf = PRINTBUF;
1115 	struct bch_inode_unpacked u;
1116 	bool do_update = false;
1117 	int ret;
1118 
1119 	ret = bch2_check_key_has_snapshot(trans, iter, k);
1120 	if (ret < 0)
1121 		goto err;
1122 	if (ret)
1123 		return 0;
1124 
1125 	ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1126 	if (ret)
1127 		goto err;
1128 
1129 	if (!bkey_is_inode(k.k))
1130 		return 0;
1131 
1132 	ret = bch2_inode_unpack(k, &u);
1133 	if (ret)
1134 		goto err;
1135 
1136 	if (snapshot_root->bi_inum != u.bi_inum) {
1137 		ret = get_snapshot_root_inode(trans, snapshot_root, u.bi_inum);
1138 		if (ret)
1139 			goto err;
1140 	}
1141 
1142 	if (fsck_err_on(u.bi_hash_seed		!= snapshot_root->bi_hash_seed ||
1143 			INODE_STR_HASH(&u)	!= INODE_STR_HASH(snapshot_root),
1144 			trans, inode_snapshot_mismatch,
1145 			"inode hash info in different snapshots don't match")) {
1146 		u.bi_hash_seed = snapshot_root->bi_hash_seed;
1147 		SET_INODE_STR_HASH(&u, INODE_STR_HASH(snapshot_root));
1148 		do_update = true;
1149 	}
1150 
1151 	if (u.bi_dir || u.bi_dir_offset) {
1152 		ret = check_inode_dirent_inode(trans, &u, &do_update);
1153 		if (ret)
1154 			goto err;
1155 	}
1156 
1157 	if (fsck_err_on(u.bi_dir && (u.bi_flags & BCH_INODE_unlinked),
1158 			trans, inode_unlinked_but_has_dirent,
1159 			"inode unlinked but has dirent\n%s",
1160 			(printbuf_reset(&buf),
1161 			 bch2_inode_unpacked_to_text(&buf, &u),
1162 			 buf.buf))) {
1163 		u.bi_flags &= ~BCH_INODE_unlinked;
1164 		do_update = true;
1165 	}
1166 
1167 	if (S_ISDIR(u.bi_mode) && (u.bi_flags & BCH_INODE_unlinked)) {
1168 		/* Check for this early so that check_unreachable_inode() will reattach it */
1169 
1170 		ret = bch2_empty_dir_snapshot(trans, k.k->p.offset, 0, k.k->p.snapshot);
1171 		if (ret && ret != -BCH_ERR_ENOTEMPTY_dir_not_empty)
1172 			goto err;
1173 
1174 		fsck_err_on(ret, trans, inode_dir_unlinked_but_not_empty,
1175 			    "dir unlinked but not empty\n%s",
1176 			    (printbuf_reset(&buf),
1177 			     bch2_inode_unpacked_to_text(&buf, &u),
1178 			     buf.buf));
1179 		u.bi_flags &= ~BCH_INODE_unlinked;
1180 		do_update = true;
1181 		ret = 0;
1182 	}
1183 
1184 	ret = bch2_inode_has_child_snapshots(trans, k.k->p);
1185 	if (ret < 0)
1186 		goto err;
1187 
1188 	if (fsck_err_on(ret != !!(u.bi_flags & BCH_INODE_has_child_snapshot),
1189 			trans, inode_has_child_snapshots_wrong,
1190 			"inode has_child_snapshots flag wrong (should be %u)\n%s",
1191 			ret,
1192 			(printbuf_reset(&buf),
1193 			 bch2_inode_unpacked_to_text(&buf, &u),
1194 			 buf.buf))) {
1195 		if (ret)
1196 			u.bi_flags |= BCH_INODE_has_child_snapshot;
1197 		else
1198 			u.bi_flags &= ~BCH_INODE_has_child_snapshot;
1199 		do_update = true;
1200 	}
1201 	ret = 0;
1202 
1203 	if ((u.bi_flags & BCH_INODE_unlinked) &&
1204 	    !(u.bi_flags & BCH_INODE_has_child_snapshot)) {
1205 		if (!test_bit(BCH_FS_started, &c->flags)) {
1206 			/*
1207 			 * If we're not in online fsck, don't delete unlinked
1208 			 * inodes, just make sure they're on the deleted list.
1209 			 *
1210 			 * They might be referred to by a logged operation -
1211 			 * i.e. we might have crashed in the middle of a
1212 			 * truncate on an unlinked but open file - so we want to
1213 			 * let the delete_dead_inodes kill it after resuming
1214 			 * logged ops.
1215 			 */
1216 			ret = check_inode_deleted_list(trans, k.k->p);
1217 			if (ret < 0)
1218 				goto err_noprint;
1219 
1220 			fsck_err_on(!ret,
1221 				    trans, unlinked_inode_not_on_deleted_list,
1222 				    "inode %llu:%u unlinked, but not on deleted list",
1223 				    u.bi_inum, k.k->p.snapshot);
1224 
1225 			ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1);
1226 			if (ret)
1227 				goto err;
1228 		} else {
1229 			ret = bch2_inode_or_descendents_is_open(trans, k.k->p);
1230 			if (ret < 0)
1231 				goto err;
1232 
1233 			if (fsck_err_on(!ret,
1234 					trans, inode_unlinked_and_not_open,
1235 				      "inode %llu:%u unlinked and not open",
1236 				      u.bi_inum, u.bi_snapshot)) {
1237 				ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
1238 				bch_err_msg(c, ret, "in fsck deleting inode");
1239 				goto err_noprint;
1240 			}
1241 			ret = 0;
1242 		}
1243 	}
1244 
1245 	if (fsck_err_on(u.bi_parent_subvol &&
1246 			(u.bi_subvol == 0 ||
1247 			 u.bi_subvol == BCACHEFS_ROOT_SUBVOL),
1248 			trans, inode_bi_parent_nonzero,
1249 			"inode %llu:%u has subvol %u but nonzero parent subvol %u",
1250 			u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) {
1251 		u.bi_parent_subvol = 0;
1252 		do_update = true;
1253 	}
1254 
1255 	if (u.bi_subvol) {
1256 		struct bch_subvolume s;
1257 
1258 		ret = bch2_subvolume_get(trans, u.bi_subvol, false, &s);
1259 		if (ret && !bch2_err_matches(ret, ENOENT))
1260 			goto err;
1261 
1262 		if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
1263 			ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum);
1264 			goto do_update;
1265 		}
1266 
1267 		if (fsck_err_on(ret,
1268 				trans, inode_bi_subvol_missing,
1269 				"inode %llu:%u bi_subvol points to missing subvolume %u",
1270 				u.bi_inum, k.k->p.snapshot, u.bi_subvol) ||
1271 		    fsck_err_on(le64_to_cpu(s.inode) != u.bi_inum ||
1272 				!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.snapshot),
1273 							   k.k->p.snapshot),
1274 				trans, inode_bi_subvol_wrong,
1275 				"inode %llu:%u points to subvol %u, but subvol points to %llu:%u",
1276 				u.bi_inum, k.k->p.snapshot, u.bi_subvol,
1277 				le64_to_cpu(s.inode),
1278 				le32_to_cpu(s.snapshot))) {
1279 			u.bi_subvol = 0;
1280 			u.bi_parent_subvol = 0;
1281 			do_update = true;
1282 		}
1283 	}
1284 
1285 	if (fsck_err_on(u.bi_journal_seq > journal_cur_seq(&c->journal),
1286 			trans, inode_journal_seq_in_future,
1287 			"inode journal seq in future (currently at %llu)\n%s",
1288 			journal_cur_seq(&c->journal),
1289 			(printbuf_reset(&buf),
1290 			 bch2_inode_unpacked_to_text(&buf, &u),
1291 			buf.buf))) {
1292 		u.bi_journal_seq = journal_cur_seq(&c->journal);
1293 		do_update = true;
1294 	}
1295 do_update:
1296 	if (do_update) {
1297 		ret = __bch2_fsck_write_inode(trans, &u);
1298 		bch_err_msg(c, ret, "in fsck updating inode");
1299 		if (ret)
1300 			goto err_noprint;
1301 	}
1302 err:
1303 fsck_err:
1304 	bch_err_fn(c, ret);
1305 err_noprint:
1306 	printbuf_exit(&buf);
1307 	return ret;
1308 }
1309 
1310 int bch2_check_inodes(struct bch_fs *c)
1311 {
1312 	struct bch_inode_unpacked snapshot_root = {};
1313 	struct snapshots_seen s;
1314 
1315 	snapshots_seen_init(&s);
1316 
1317 	int ret = bch2_trans_run(c,
1318 		for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
1319 				POS_MIN,
1320 				BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
1321 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1322 			check_inode(trans, &iter, k, &snapshot_root, &s)));
1323 
1324 	snapshots_seen_exit(&s);
1325 	bch_err_fn(c, ret);
1326 	return ret;
1327 }
1328 
1329 static int find_oldest_inode_needs_reattach(struct btree_trans *trans,
1330 					    struct bch_inode_unpacked *inode)
1331 {
1332 	struct bch_fs *c = trans->c;
1333 	struct btree_iter iter;
1334 	struct bkey_s_c k;
1335 	int ret = 0;
1336 
1337 	/*
1338 	 * We look for inodes to reattach in natural key order, leaves first,
1339 	 * but we should do the reattach at the oldest version that needs to be
1340 	 * reattached:
1341 	 */
1342 	for_each_btree_key_norestart(trans, iter,
1343 				     BTREE_ID_inodes,
1344 				     SPOS(0, inode->bi_inum, inode->bi_snapshot + 1),
1345 				     BTREE_ITER_all_snapshots, k, ret) {
1346 		if (k.k->p.offset != inode->bi_inum)
1347 			break;
1348 
1349 		if (!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, k.k->p.snapshot))
1350 			continue;
1351 
1352 		if (!bkey_is_inode(k.k))
1353 			break;
1354 
1355 		struct bch_inode_unpacked parent_inode;
1356 		ret = bch2_inode_unpack(k, &parent_inode);
1357 		if (ret)
1358 			break;
1359 
1360 		if (!inode_should_reattach(&parent_inode))
1361 			break;
1362 
1363 		*inode = parent_inode;
1364 	}
1365 	bch2_trans_iter_exit(trans, &iter);
1366 
1367 	return ret;
1368 }
1369 
1370 static int check_unreachable_inode(struct btree_trans *trans,
1371 				   struct btree_iter *iter,
1372 				   struct bkey_s_c k)
1373 {
1374 	struct printbuf buf = PRINTBUF;
1375 	int ret = 0;
1376 
1377 	if (!bkey_is_inode(k.k))
1378 		return 0;
1379 
1380 	struct bch_inode_unpacked inode;
1381 	ret = bch2_inode_unpack(k, &inode);
1382 	if (ret)
1383 		return ret;
1384 
1385 	if (!inode_should_reattach(&inode))
1386 		return 0;
1387 
1388 	ret = find_oldest_inode_needs_reattach(trans, &inode);
1389 	if (ret)
1390 		return ret;
1391 
1392 	if (fsck_err(trans, inode_unreachable,
1393 		     "unreachable inode:\n%s",
1394 		     (bch2_inode_unpacked_to_text(&buf, &inode),
1395 		      buf.buf)))
1396 		ret = reattach_inode(trans, &inode);
1397 fsck_err:
1398 	printbuf_exit(&buf);
1399 	return ret;
1400 }
1401 
1402 /*
1403  * Reattach unreachable (but not unlinked) inodes
1404  *
1405  * Run after check_inodes() and check_dirents(), so we node that inode
1406  * backpointer fields point to valid dirents, and every inode that has a dirent
1407  * that points to it has its backpointer field set - so we're just looking for
1408  * non-unlinked inodes without backpointers:
1409  *
1410  * XXX: this is racy w.r.t. hardlink removal in online fsck
1411  */
1412 int bch2_check_unreachable_inodes(struct bch_fs *c)
1413 {
1414 	int ret = bch2_trans_run(c,
1415 		for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
1416 				POS_MIN,
1417 				BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
1418 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1419 			check_unreachable_inode(trans, &iter, k)));
1420 	bch_err_fn(c, ret);
1421 	return ret;
1422 }
1423 
1424 static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode)
1425 {
1426 	switch (btree) {
1427 	case BTREE_ID_extents:
1428 		return S_ISREG(mode) || S_ISLNK(mode);
1429 	case BTREE_ID_dirents:
1430 		return S_ISDIR(mode);
1431 	case BTREE_ID_xattrs:
1432 		return true;
1433 	default:
1434 		BUG();
1435 	}
1436 }
1437 
1438 static int check_key_has_inode(struct btree_trans *trans,
1439 			       struct btree_iter *iter,
1440 			       struct inode_walker *inode,
1441 			       struct inode_walker_entry *i,
1442 			       struct bkey_s_c k)
1443 {
1444 	struct bch_fs *c = trans->c;
1445 	struct printbuf buf = PRINTBUF;
1446 	int ret = PTR_ERR_OR_ZERO(i);
1447 	if (ret)
1448 		return ret;
1449 
1450 	if (k.k->type == KEY_TYPE_whiteout)
1451 		goto out;
1452 
1453 	if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
1454 		ret =   reconstruct_inode(trans, iter->btree_id, k.k->p.snapshot, k.k->p.inode) ?:
1455 			bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
1456 		if (ret)
1457 			goto err;
1458 
1459 		inode->last_pos.inode--;
1460 		ret = -BCH_ERR_transaction_restart_nested;
1461 		goto err;
1462 	}
1463 
1464 	if (fsck_err_on(!i,
1465 			trans, key_in_missing_inode,
1466 			"key in missing inode:\n%s",
1467 			(printbuf_reset(&buf),
1468 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1469 		goto delete;
1470 
1471 	if (fsck_err_on(i && !btree_matches_i_mode(iter->btree_id, i->inode.bi_mode),
1472 			trans, key_in_wrong_inode_type,
1473 			"key for wrong inode mode %o:\n%s",
1474 			i->inode.bi_mode,
1475 			(printbuf_reset(&buf),
1476 			 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1477 		goto delete;
1478 out:
1479 err:
1480 fsck_err:
1481 	printbuf_exit(&buf);
1482 	bch_err_fn(c, ret);
1483 	return ret;
1484 delete:
1485 	ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node);
1486 	goto out;
1487 }
1488 
1489 static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_walker *w)
1490 {
1491 	struct bch_fs *c = trans->c;
1492 	int ret = 0;
1493 	s64 count2;
1494 
1495 	darray_for_each(w->inodes, i) {
1496 		if (i->inode.bi_sectors == i->count)
1497 			continue;
1498 
1499 		count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
1500 
1501 		if (w->recalculate_sums)
1502 			i->count = count2;
1503 
1504 		if (i->count != count2) {
1505 			bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
1506 					    w->last_pos.inode, i->snapshot, i->count, count2);
1507 			i->count = count2;
1508 		}
1509 
1510 		if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
1511 				trans, inode_i_sectors_wrong,
1512 				"inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
1513 				w->last_pos.inode, i->snapshot,
1514 				i->inode.bi_sectors, i->count)) {
1515 			i->inode.bi_sectors = i->count;
1516 			ret = bch2_fsck_write_inode(trans, &i->inode);
1517 			if (ret)
1518 				break;
1519 		}
1520 	}
1521 fsck_err:
1522 	bch_err_fn(c, ret);
1523 	return ret;
1524 }
1525 
1526 static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
1527 {
1528 	u32 restart_count = trans->restart_count;
1529 	return check_i_sectors_notnested(trans, w) ?:
1530 		trans_was_restarted(trans, restart_count);
1531 }
1532 
1533 struct extent_end {
1534 	u32			snapshot;
1535 	u64			offset;
1536 	struct snapshots_seen	seen;
1537 };
1538 
1539 struct extent_ends {
1540 	struct bpos			last_pos;
1541 	DARRAY(struct extent_end)	e;
1542 };
1543 
1544 static void extent_ends_reset(struct extent_ends *extent_ends)
1545 {
1546 	darray_for_each(extent_ends->e, i)
1547 		snapshots_seen_exit(&i->seen);
1548 	extent_ends->e.nr = 0;
1549 }
1550 
1551 static void extent_ends_exit(struct extent_ends *extent_ends)
1552 {
1553 	extent_ends_reset(extent_ends);
1554 	darray_exit(&extent_ends->e);
1555 }
1556 
1557 static void extent_ends_init(struct extent_ends *extent_ends)
1558 {
1559 	memset(extent_ends, 0, sizeof(*extent_ends));
1560 }
1561 
1562 static int extent_ends_at(struct bch_fs *c,
1563 			  struct extent_ends *extent_ends,
1564 			  struct snapshots_seen *seen,
1565 			  struct bkey_s_c k)
1566 {
1567 	struct extent_end *i, n = (struct extent_end) {
1568 		.offset		= k.k->p.offset,
1569 		.snapshot	= k.k->p.snapshot,
1570 		.seen		= *seen,
1571 	};
1572 
1573 	n.seen.ids.data = kmemdup(seen->ids.data,
1574 			      sizeof(seen->ids.data[0]) * seen->ids.size,
1575 			      GFP_KERNEL);
1576 	if (!n.seen.ids.data)
1577 		return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
1578 
1579 	__darray_for_each(extent_ends->e, i) {
1580 		if (i->snapshot == k.k->p.snapshot) {
1581 			snapshots_seen_exit(&i->seen);
1582 			*i = n;
1583 			return 0;
1584 		}
1585 
1586 		if (i->snapshot >= k.k->p.snapshot)
1587 			break;
1588 	}
1589 
1590 	return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
1591 }
1592 
1593 static int overlapping_extents_found(struct btree_trans *trans,
1594 				     enum btree_id btree,
1595 				     struct bpos pos1, struct snapshots_seen *pos1_seen,
1596 				     struct bkey pos2,
1597 				     bool *fixed,
1598 				     struct extent_end *extent_end)
1599 {
1600 	struct bch_fs *c = trans->c;
1601 	struct printbuf buf = PRINTBUF;
1602 	struct btree_iter iter1, iter2 = {};
1603 	struct bkey_s_c k1, k2;
1604 	int ret;
1605 
1606 	BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
1607 
1608 	bch2_trans_iter_init(trans, &iter1, btree, pos1,
1609 			     BTREE_ITER_all_snapshots|
1610 			     BTREE_ITER_not_extents);
1611 	k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX));
1612 	ret = bkey_err(k1);
1613 	if (ret)
1614 		goto err;
1615 
1616 	prt_newline(&buf);
1617 	bch2_bkey_val_to_text(&buf, c, k1);
1618 
1619 	if (!bpos_eq(pos1, k1.k->p)) {
1620 		prt_str(&buf, "\nwanted\n  ");
1621 		bch2_bpos_to_text(&buf, pos1);
1622 		prt_str(&buf, "\n");
1623 		bch2_bkey_to_text(&buf, &pos2);
1624 
1625 		bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
1626 			__func__, buf.buf);
1627 		ret = -BCH_ERR_internal_fsck_err;
1628 		goto err;
1629 	}
1630 
1631 	bch2_trans_copy_iter(trans, &iter2, &iter1);
1632 
1633 	while (1) {
1634 		bch2_btree_iter_advance(trans, &iter2);
1635 
1636 		k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX));
1637 		ret = bkey_err(k2);
1638 		if (ret)
1639 			goto err;
1640 
1641 		if (bpos_ge(k2.k->p, pos2.p))
1642 			break;
1643 	}
1644 
1645 	prt_newline(&buf);
1646 	bch2_bkey_val_to_text(&buf, c, k2);
1647 
1648 	if (bpos_gt(k2.k->p, pos2.p) ||
1649 	    pos2.size != k2.k->size) {
1650 		bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
1651 			__func__, buf.buf);
1652 		ret = -BCH_ERR_internal_fsck_err;
1653 		goto err;
1654 	}
1655 
1656 	prt_printf(&buf, "\noverwriting %s extent",
1657 		   pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
1658 
1659 	if (fsck_err(trans, extent_overlapping,
1660 		     "overlapping extents%s", buf.buf)) {
1661 		struct btree_iter *old_iter = &iter1;
1662 		struct disk_reservation res = { 0 };
1663 
1664 		if (pos1.snapshot < pos2.p.snapshot) {
1665 			old_iter = &iter2;
1666 			swap(k1, k2);
1667 		}
1668 
1669 		trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
1670 
1671 		ret =   bch2_trans_update_extent_overwrite(trans, old_iter,
1672 				BTREE_UPDATE_internal_snapshot_node,
1673 				k1, k2) ?:
1674 			bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
1675 		bch2_disk_reservation_put(c, &res);
1676 
1677 		bch_info(c, "repair ret %s", bch2_err_str(ret));
1678 
1679 		if (ret)
1680 			goto err;
1681 
1682 		*fixed = true;
1683 
1684 		if (pos1.snapshot == pos2.p.snapshot) {
1685 			/*
1686 			 * We overwrote the first extent, and did the overwrite
1687 			 * in the same snapshot:
1688 			 */
1689 			extent_end->offset = bkey_start_offset(&pos2);
1690 		} else if (pos1.snapshot > pos2.p.snapshot) {
1691 			/*
1692 			 * We overwrote the first extent in pos2's snapshot:
1693 			 */
1694 			ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
1695 		} else {
1696 			/*
1697 			 * We overwrote the second extent - restart
1698 			 * check_extent() from the top:
1699 			 */
1700 			ret = -BCH_ERR_transaction_restart_nested;
1701 		}
1702 	}
1703 fsck_err:
1704 err:
1705 	bch2_trans_iter_exit(trans, &iter2);
1706 	bch2_trans_iter_exit(trans, &iter1);
1707 	printbuf_exit(&buf);
1708 	return ret;
1709 }
1710 
1711 static int check_overlapping_extents(struct btree_trans *trans,
1712 			      struct snapshots_seen *seen,
1713 			      struct extent_ends *extent_ends,
1714 			      struct bkey_s_c k,
1715 			      struct btree_iter *iter,
1716 			      bool *fixed)
1717 {
1718 	struct bch_fs *c = trans->c;
1719 	int ret = 0;
1720 
1721 	/* transaction restart, running again */
1722 	if (bpos_eq(extent_ends->last_pos, k.k->p))
1723 		return 0;
1724 
1725 	if (extent_ends->last_pos.inode != k.k->p.inode)
1726 		extent_ends_reset(extent_ends);
1727 
1728 	darray_for_each(extent_ends->e, i) {
1729 		if (i->offset <= bkey_start_offset(k.k))
1730 			continue;
1731 
1732 		if (!ref_visible2(c,
1733 				  k.k->p.snapshot, seen,
1734 				  i->snapshot, &i->seen))
1735 			continue;
1736 
1737 		ret = overlapping_extents_found(trans, iter->btree_id,
1738 						SPOS(iter->pos.inode,
1739 						     i->offset,
1740 						     i->snapshot),
1741 						&i->seen,
1742 						*k.k, fixed, i);
1743 		if (ret)
1744 			goto err;
1745 	}
1746 
1747 	extent_ends->last_pos = k.k->p;
1748 err:
1749 	return ret;
1750 }
1751 
1752 static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
1753 				struct bkey_s_c k)
1754 {
1755 	struct bch_fs *c = trans->c;
1756 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1757 	struct bch_extent_crc_unpacked crc;
1758 	const union bch_extent_entry *i;
1759 	unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
1760 
1761 	bkey_for_each_crc(k.k, ptrs, crc, i)
1762 		if (crc_is_encoded(crc) &&
1763 		    crc.uncompressed_size > encoded_extent_max_sectors) {
1764 			struct printbuf buf = PRINTBUF;
1765 
1766 			bch2_bkey_val_to_text(&buf, c, k);
1767 			bch_err(c, "overbig encoded extent, please report this:\n  %s", buf.buf);
1768 			printbuf_exit(&buf);
1769 		}
1770 
1771 	return 0;
1772 }
1773 
1774 static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
1775 			struct bkey_s_c k,
1776 			struct inode_walker *inode,
1777 			struct snapshots_seen *s,
1778 			struct extent_ends *extent_ends,
1779 			struct disk_reservation *res)
1780 {
1781 	struct bch_fs *c = trans->c;
1782 	struct printbuf buf = PRINTBUF;
1783 	int ret = 0;
1784 
1785 	ret = bch2_check_key_has_snapshot(trans, iter, k);
1786 	if (ret) {
1787 		ret = ret < 0 ? ret : 0;
1788 		goto out;
1789 	}
1790 
1791 	if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) {
1792 		ret = check_i_sectors(trans, inode);
1793 		if (ret)
1794 			goto err;
1795 	}
1796 
1797 	ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1798 	if (ret)
1799 		goto err;
1800 
1801 	struct inode_walker_entry *extent_i = walk_inode(trans, inode, k);
1802 	ret = PTR_ERR_OR_ZERO(extent_i);
1803 	if (ret)
1804 		goto err;
1805 
1806 	ret = check_key_has_inode(trans, iter, inode, extent_i, k);
1807 	if (ret)
1808 		goto err;
1809 
1810 	if (k.k->type != KEY_TYPE_whiteout) {
1811 		ret = check_overlapping_extents(trans, s, extent_ends, k, iter,
1812 						&inode->recalculate_sums);
1813 		if (ret)
1814 			goto err;
1815 
1816 		/*
1817 		 * Check inodes in reverse order, from oldest snapshots to
1818 		 * newest, starting from the inode that matches this extent's
1819 		 * snapshot. If we didn't have one, iterate over all inodes:
1820 		 */
1821 		for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
1822 		     inode->inodes.data && i >= inode->inodes.data;
1823 		     --i) {
1824 			if (i->snapshot > k.k->p.snapshot ||
1825 			    !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
1826 				continue;
1827 
1828 			if (fsck_err_on(k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
1829 					!bkey_extent_is_reservation(k),
1830 					trans, extent_past_end_of_inode,
1831 					"extent type past end of inode %llu:%u, i_size %llu\n%s",
1832 					i->inode.bi_inum, i->snapshot, i->inode.bi_size,
1833 					(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1834 				struct btree_iter iter2;
1835 
1836 				bch2_trans_copy_iter(trans, &iter2, iter);
1837 				bch2_btree_iter_set_snapshot(trans, &iter2, i->snapshot);
1838 				ret =   bch2_btree_iter_traverse(trans, &iter2) ?:
1839 					bch2_btree_delete_at(trans, &iter2,
1840 						BTREE_UPDATE_internal_snapshot_node);
1841 				bch2_trans_iter_exit(trans, &iter2);
1842 				if (ret)
1843 					goto err;
1844 
1845 				iter->k.type = KEY_TYPE_whiteout;
1846 				break;
1847 			}
1848 		}
1849 	}
1850 
1851 	ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc);
1852 	if (ret)
1853 		goto err;
1854 
1855 	if (bkey_extent_is_allocation(k.k)) {
1856 		for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
1857 		     inode->inodes.data && i >= inode->inodes.data;
1858 		     --i) {
1859 			if (i->snapshot > k.k->p.snapshot ||
1860 			    !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
1861 				continue;
1862 
1863 			i->count += k.k->size;
1864 		}
1865 	}
1866 
1867 	if (k.k->type != KEY_TYPE_whiteout) {
1868 		ret = extent_ends_at(c, extent_ends, s, k);
1869 		if (ret)
1870 			goto err;
1871 	}
1872 out:
1873 err:
1874 fsck_err:
1875 	printbuf_exit(&buf);
1876 	bch_err_fn(c, ret);
1877 	return ret;
1878 }
1879 
1880 /*
1881  * Walk extents: verify that extents have a corresponding S_ISREG inode, and
1882  * that i_size an i_sectors are consistent
1883  */
1884 int bch2_check_extents(struct bch_fs *c)
1885 {
1886 	struct inode_walker w = inode_walker_init();
1887 	struct snapshots_seen s;
1888 	struct extent_ends extent_ends;
1889 	struct disk_reservation res = { 0 };
1890 
1891 	snapshots_seen_init(&s);
1892 	extent_ends_init(&extent_ends);
1893 
1894 	int ret = bch2_trans_run(c,
1895 		for_each_btree_key(trans, iter, BTREE_ID_extents,
1896 				POS(BCACHEFS_ROOT_INO, 0),
1897 				BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({
1898 			bch2_disk_reservation_put(c, &res);
1899 			check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?:
1900 			check_extent_overbig(trans, &iter, k);
1901 		})) ?:
1902 		check_i_sectors_notnested(trans, &w));
1903 
1904 	bch2_disk_reservation_put(c, &res);
1905 	extent_ends_exit(&extent_ends);
1906 	inode_walker_exit(&w);
1907 	snapshots_seen_exit(&s);
1908 
1909 	bch_err_fn(c, ret);
1910 	return ret;
1911 }
1912 
1913 int bch2_check_indirect_extents(struct bch_fs *c)
1914 {
1915 	struct disk_reservation res = { 0 };
1916 
1917 	int ret = bch2_trans_run(c,
1918 		for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
1919 				POS_MIN,
1920 				BTREE_ITER_prefetch, k,
1921 				&res, NULL,
1922 				BCH_TRANS_COMMIT_no_enospc, ({
1923 			bch2_disk_reservation_put(c, &res);
1924 			check_extent_overbig(trans, &iter, k);
1925 		})));
1926 
1927 	bch2_disk_reservation_put(c, &res);
1928 	bch_err_fn(c, ret);
1929 	return ret;
1930 }
1931 
1932 static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_walker *w)
1933 {
1934 	struct bch_fs *c = trans->c;
1935 	int ret = 0;
1936 	s64 count2;
1937 
1938 	darray_for_each(w->inodes, i) {
1939 		if (i->inode.bi_nlink == i->count)
1940 			continue;
1941 
1942 		count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
1943 		if (count2 < 0)
1944 			return count2;
1945 
1946 		if (i->count != count2) {
1947 			bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu",
1948 					    w->last_pos.inode, i->snapshot, i->count, count2);
1949 			i->count = count2;
1950 			if (i->inode.bi_nlink == i->count)
1951 				continue;
1952 		}
1953 
1954 		if (fsck_err_on(i->inode.bi_nlink != i->count,
1955 				trans, inode_dir_wrong_nlink,
1956 				"directory %llu:%u with wrong i_nlink: got %u, should be %llu",
1957 				w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
1958 			i->inode.bi_nlink = i->count;
1959 			ret = bch2_fsck_write_inode(trans, &i->inode);
1960 			if (ret)
1961 				break;
1962 		}
1963 	}
1964 fsck_err:
1965 	bch_err_fn(c, ret);
1966 	return ret;
1967 }
1968 
1969 static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w)
1970 {
1971 	u32 restart_count = trans->restart_count;
1972 	return check_subdir_count_notnested(trans, w) ?:
1973 		trans_was_restarted(trans, restart_count);
1974 }
1975 
1976 /* find a subvolume that's a descendent of @snapshot: */
1977 static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid)
1978 {
1979 	struct btree_iter iter;
1980 	struct bkey_s_c k;
1981 	int ret;
1982 
1983 	for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 0, k, ret) {
1984 		if (k.k->type != KEY_TYPE_subvolume)
1985 			continue;
1986 
1987 		struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
1988 		if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) {
1989 			bch2_trans_iter_exit(trans, &iter);
1990 			*subvolid = k.k->p.offset;
1991 			goto found;
1992 		}
1993 	}
1994 	if (!ret)
1995 		ret = -ENOENT;
1996 found:
1997 	bch2_trans_iter_exit(trans, &iter);
1998 	return ret;
1999 }
2000 
2001 noinline_for_stack
2002 static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter,
2003 				  struct bkey_s_c_dirent d)
2004 {
2005 	struct bch_fs *c = trans->c;
2006 	struct btree_iter subvol_iter = {};
2007 	struct bch_inode_unpacked subvol_root;
2008 	u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol);
2009 	u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
2010 	u32 parent_snapshot;
2011 	u32 new_parent_subvol = 0;
2012 	u64 parent_inum;
2013 	struct printbuf buf = PRINTBUF;
2014 	int ret = 0;
2015 
2016 	ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum);
2017 	if (ret && !bch2_err_matches(ret, ENOENT))
2018 		return ret;
2019 
2020 	if (ret ||
2021 	    (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) {
2022 		int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
2023 		if (ret2 && !bch2_err_matches(ret, ENOENT))
2024 			return ret2;
2025 	}
2026 
2027 	if (ret &&
2028 	    !new_parent_subvol &&
2029 	    (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
2030 		/*
2031 		 * Couldn't find a subvol for dirent's snapshot - but we lost
2032 		 * subvols, so we need to reconstruct:
2033 		 */
2034 		ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0);
2035 		if (ret)
2036 			return ret;
2037 
2038 		parent_snapshot = d.k->p.snapshot;
2039 	}
2040 
2041 	if (fsck_err_on(ret,
2042 			trans, dirent_to_missing_parent_subvol,
2043 			"dirent parent_subvol points to missing subvolume\n%s",
2044 			(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) ||
2045 	    fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot),
2046 			trans, dirent_not_visible_in_parent_subvol,
2047 			"dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s",
2048 			parent_snapshot,
2049 			(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
2050 		if (!new_parent_subvol) {
2051 			bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot);
2052 			return -BCH_ERR_fsck_repair_unimplemented;
2053 		}
2054 
2055 		struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
2056 		ret = PTR_ERR_OR_ZERO(new_dirent);
2057 		if (ret)
2058 			goto err;
2059 
2060 		new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol);
2061 	}
2062 
2063 	struct bkey_s_c_subvolume s =
2064 		bch2_bkey_get_iter_typed(trans, &subvol_iter,
2065 					 BTREE_ID_subvolumes, POS(0, target_subvol),
2066 					 0, subvolume);
2067 	ret = bkey_err(s.s_c);
2068 	if (ret && !bch2_err_matches(ret, ENOENT))
2069 		return ret;
2070 
2071 	if (ret) {
2072 		if (fsck_err(trans, dirent_to_missing_subvol,
2073 			     "dirent points to missing subvolume\n%s",
2074 			     (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)))
2075 			return bch2_fsck_remove_dirent(trans, d.k->p);
2076 		ret = 0;
2077 		goto out;
2078 	}
2079 
2080 	if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol,
2081 			trans, subvol_fs_path_parent_wrong,
2082 			"subvol with wrong fs_path_parent, should be be %u\n%s",
2083 			parent_subvol,
2084 			(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
2085 		struct bkey_i_subvolume *n =
2086 			bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume);
2087 		ret = PTR_ERR_OR_ZERO(n);
2088 		if (ret)
2089 			goto err;
2090 
2091 		n->v.fs_path_parent = cpu_to_le32(parent_subvol);
2092 	}
2093 
2094 	u64 target_inum = le64_to_cpu(s.v->inode);
2095 	u32 target_snapshot = le32_to_cpu(s.v->snapshot);
2096 
2097 	ret = lookup_inode(trans, target_inum, target_snapshot, &subvol_root);
2098 	if (ret && !bch2_err_matches(ret, ENOENT))
2099 		goto err;
2100 
2101 	if (ret) {
2102 		bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum);
2103 		ret = -BCH_ERR_fsck_repair_unimplemented;
2104 		goto err;
2105 	}
2106 
2107 	if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol,
2108 			trans, inode_bi_parent_wrong,
2109 			"subvol root %llu has wrong bi_parent_subvol: got %u, should be %u",
2110 			target_inum,
2111 			subvol_root.bi_parent_subvol, parent_subvol)) {
2112 		subvol_root.bi_parent_subvol = parent_subvol;
2113 		subvol_root.bi_snapshot = le32_to_cpu(s.v->snapshot);
2114 		ret = __bch2_fsck_write_inode(trans, &subvol_root);
2115 		if (ret)
2116 			goto err;
2117 	}
2118 
2119 	ret = bch2_check_dirent_target(trans, iter, d, &subvol_root, true);
2120 	if (ret)
2121 		goto err;
2122 out:
2123 err:
2124 fsck_err:
2125 	bch2_trans_iter_exit(trans, &subvol_iter);
2126 	printbuf_exit(&buf);
2127 	return ret;
2128 }
2129 
2130 static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
2131 			struct bkey_s_c k,
2132 			struct bch_hash_info *hash_info,
2133 			struct inode_walker *dir,
2134 			struct inode_walker *target,
2135 			struct snapshots_seen *s)
2136 {
2137 	struct bch_fs *c = trans->c;
2138 	struct inode_walker_entry *i;
2139 	struct printbuf buf = PRINTBUF;
2140 	int ret = 0;
2141 
2142 	ret = bch2_check_key_has_snapshot(trans, iter, k);
2143 	if (ret) {
2144 		ret = ret < 0 ? ret : 0;
2145 		goto out;
2146 	}
2147 
2148 	ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
2149 	if (ret)
2150 		goto err;
2151 
2152 	if (k.k->type == KEY_TYPE_whiteout)
2153 		goto out;
2154 
2155 	if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) {
2156 		ret = check_subdir_dirents_count(trans, dir);
2157 		if (ret)
2158 			goto err;
2159 	}
2160 
2161 	i = walk_inode(trans, dir, k);
2162 	ret = PTR_ERR_OR_ZERO(i);
2163 	if (ret < 0)
2164 		goto err;
2165 
2166 	ret = check_key_has_inode(trans, iter, dir, i, k);
2167 	if (ret)
2168 		goto err;
2169 
2170 	if (!i)
2171 		goto out;
2172 
2173 	if (dir->first_this_inode)
2174 		*hash_info = bch2_hash_info_init(c, &i->inode);
2175 	dir->first_this_inode = false;
2176 
2177 	ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info, iter, k);
2178 	if (ret < 0)
2179 		goto err;
2180 	if (ret) {
2181 		/* dirent has been deleted */
2182 		ret = 0;
2183 		goto out;
2184 	}
2185 
2186 	if (k.k->type != KEY_TYPE_dirent)
2187 		goto out;
2188 
2189 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
2190 
2191 	if (d.v->d_type == DT_SUBVOL) {
2192 		ret = check_dirent_to_subvol(trans, iter, d);
2193 		if (ret)
2194 			goto err;
2195 	} else {
2196 		ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
2197 		if (ret)
2198 			goto err;
2199 
2200 		if (fsck_err_on(!target->inodes.nr,
2201 				trans, dirent_to_missing_inode,
2202 				"dirent points to missing inode:\n%s",
2203 				(printbuf_reset(&buf),
2204 				 bch2_bkey_val_to_text(&buf, c, k),
2205 				 buf.buf))) {
2206 			ret = bch2_fsck_remove_dirent(trans, d.k->p);
2207 			if (ret)
2208 				goto err;
2209 		}
2210 
2211 		darray_for_each(target->inodes, i) {
2212 			ret = bch2_check_dirent_target(trans, iter, d, &i->inode, true);
2213 			if (ret)
2214 				goto err;
2215 		}
2216 
2217 		darray_for_each(target->deletes, i)
2218 			if (fsck_err_on(!snapshot_list_has_id(&s->ids, *i),
2219 					trans, dirent_to_overwritten_inode,
2220 					"dirent points to inode overwritten in snapshot %u:\n%s",
2221 					*i,
2222 					(printbuf_reset(&buf),
2223 					 bch2_bkey_val_to_text(&buf, c, k),
2224 					 buf.buf))) {
2225 				struct btree_iter delete_iter;
2226 				bch2_trans_iter_init(trans, &delete_iter,
2227 						     BTREE_ID_dirents,
2228 						     SPOS(k.k->p.inode, k.k->p.offset, *i),
2229 						     BTREE_ITER_intent);
2230 				ret =   bch2_btree_iter_traverse(trans, &delete_iter) ?:
2231 					bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
2232 							  hash_info,
2233 							  &delete_iter,
2234 							  BTREE_UPDATE_internal_snapshot_node);
2235 				bch2_trans_iter_exit(trans, &delete_iter);
2236 				if (ret)
2237 					goto err;
2238 
2239 			}
2240 	}
2241 
2242 	ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
2243 	if (ret)
2244 		goto err;
2245 
2246 	for_each_visible_inode(c, s, dir, d.k->p.snapshot, i) {
2247 		if (d.v->d_type == DT_DIR)
2248 			i->count++;
2249 		i->i_size += bkey_bytes(d.k);
2250 	}
2251 out:
2252 err:
2253 fsck_err:
2254 	printbuf_exit(&buf);
2255 	bch_err_fn(c, ret);
2256 	return ret;
2257 }
2258 
2259 /*
2260  * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
2261  * validate d_type
2262  */
2263 int bch2_check_dirents(struct bch_fs *c)
2264 {
2265 	struct inode_walker dir = inode_walker_init();
2266 	struct inode_walker target = inode_walker_init();
2267 	struct snapshots_seen s;
2268 	struct bch_hash_info hash_info;
2269 
2270 	snapshots_seen_init(&s);
2271 
2272 	int ret = bch2_trans_run(c,
2273 		for_each_btree_key(trans, iter, BTREE_ID_dirents,
2274 				POS(BCACHEFS_ROOT_INO, 0),
2275 				BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
2276 			check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)) ?:
2277 		check_subdir_count_notnested(trans, &dir));
2278 
2279 	snapshots_seen_exit(&s);
2280 	inode_walker_exit(&dir);
2281 	inode_walker_exit(&target);
2282 	bch_err_fn(c, ret);
2283 	return ret;
2284 }
2285 
2286 static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
2287 		       struct bkey_s_c k,
2288 		       struct bch_hash_info *hash_info,
2289 		       struct inode_walker *inode)
2290 {
2291 	struct bch_fs *c = trans->c;
2292 	struct inode_walker_entry *i;
2293 	int ret;
2294 
2295 	ret = bch2_check_key_has_snapshot(trans, iter, k);
2296 	if (ret < 0)
2297 		return ret;
2298 	if (ret)
2299 		return 0;
2300 
2301 	i = walk_inode(trans, inode, k);
2302 	ret = PTR_ERR_OR_ZERO(i);
2303 	if (ret)
2304 		return ret;
2305 
2306 	ret = check_key_has_inode(trans, iter, inode, i, k);
2307 	if (ret)
2308 		return ret;
2309 
2310 	if (!i)
2311 		return 0;
2312 
2313 	if (inode->first_this_inode)
2314 		*hash_info = bch2_hash_info_init(c, &i->inode);
2315 	inode->first_this_inode = false;
2316 
2317 	ret = bch2_str_hash_check_key(trans, NULL, &bch2_xattr_hash_desc, hash_info, iter, k);
2318 	bch_err_fn(c, ret);
2319 	return ret;
2320 }
2321 
2322 /*
2323  * Walk xattrs: verify that they all have a corresponding inode
2324  */
2325 int bch2_check_xattrs(struct bch_fs *c)
2326 {
2327 	struct inode_walker inode = inode_walker_init();
2328 	struct bch_hash_info hash_info;
2329 	int ret = 0;
2330 
2331 	ret = bch2_trans_run(c,
2332 		for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
2333 			POS(BCACHEFS_ROOT_INO, 0),
2334 			BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
2335 			k,
2336 			NULL, NULL,
2337 			BCH_TRANS_COMMIT_no_enospc,
2338 		check_xattr(trans, &iter, k, &hash_info, &inode)));
2339 
2340 	inode_walker_exit(&inode);
2341 	bch_err_fn(c, ret);
2342 	return ret;
2343 }
2344 
2345 static int check_root_trans(struct btree_trans *trans)
2346 {
2347 	struct bch_fs *c = trans->c;
2348 	struct bch_inode_unpacked root_inode;
2349 	u32 snapshot;
2350 	u64 inum;
2351 	int ret;
2352 
2353 	ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
2354 	if (ret && !bch2_err_matches(ret, ENOENT))
2355 		return ret;
2356 
2357 	if (mustfix_fsck_err_on(ret, trans, root_subvol_missing,
2358 				"root subvol missing")) {
2359 		struct bkey_i_subvolume *root_subvol =
2360 			bch2_trans_kmalloc(trans, sizeof(*root_subvol));
2361 		ret = PTR_ERR_OR_ZERO(root_subvol);
2362 		if (ret)
2363 			goto err;
2364 
2365 		snapshot	= U32_MAX;
2366 		inum		= BCACHEFS_ROOT_INO;
2367 
2368 		bkey_subvolume_init(&root_subvol->k_i);
2369 		root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL;
2370 		root_subvol->v.flags	= 0;
2371 		root_subvol->v.snapshot	= cpu_to_le32(snapshot);
2372 		root_subvol->v.inode	= cpu_to_le64(inum);
2373 		ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0);
2374 		bch_err_msg(c, ret, "writing root subvol");
2375 		if (ret)
2376 			goto err;
2377 	}
2378 
2379 	ret = lookup_inode(trans, BCACHEFS_ROOT_INO, snapshot, &root_inode);
2380 	if (ret && !bch2_err_matches(ret, ENOENT))
2381 		return ret;
2382 
2383 	if (mustfix_fsck_err_on(ret,
2384 				trans, root_dir_missing,
2385 				"root directory missing") ||
2386 	    mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
2387 				trans, root_inode_not_dir,
2388 				"root inode not a directory")) {
2389 		bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
2390 				0, NULL);
2391 		root_inode.bi_inum = inum;
2392 		root_inode.bi_snapshot = snapshot;
2393 
2394 		ret = __bch2_fsck_write_inode(trans, &root_inode);
2395 		bch_err_msg(c, ret, "writing root inode");
2396 	}
2397 err:
2398 fsck_err:
2399 	return ret;
2400 }
2401 
2402 /* Get root directory, create if it doesn't exist: */
2403 int bch2_check_root(struct bch_fs *c)
2404 {
2405 	int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2406 		check_root_trans(trans));
2407 	bch_err_fn(c, ret);
2408 	return ret;
2409 }
2410 
2411 typedef DARRAY(u32) darray_u32;
2412 
2413 static bool darray_u32_has(darray_u32 *d, u32 v)
2414 {
2415 	darray_for_each(*d, i)
2416 		if (*i == v)
2417 			return true;
2418 	return false;
2419 }
2420 
2421 static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
2422 {
2423 	struct bch_fs *c = trans->c;
2424 	struct btree_iter parent_iter = {};
2425 	darray_u32 subvol_path = {};
2426 	struct printbuf buf = PRINTBUF;
2427 	int ret = 0;
2428 
2429 	if (k.k->type != KEY_TYPE_subvolume)
2430 		return 0;
2431 
2432 	while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) {
2433 		ret = darray_push(&subvol_path, k.k->p.offset);
2434 		if (ret)
2435 			goto err;
2436 
2437 		struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
2438 
2439 		struct bch_inode_unpacked subvol_root;
2440 		ret = bch2_inode_find_by_inum_trans(trans,
2441 					(subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
2442 					&subvol_root);
2443 		if (ret)
2444 			break;
2445 
2446 		u32 parent = le32_to_cpu(s.v->fs_path_parent);
2447 
2448 		if (darray_u32_has(&subvol_path, parent)) {
2449 			if (fsck_err(c, subvol_loop, "subvolume loop"))
2450 				ret = reattach_subvol(trans, s);
2451 			break;
2452 		}
2453 
2454 		bch2_trans_iter_exit(trans, &parent_iter);
2455 		bch2_trans_iter_init(trans, &parent_iter,
2456 				     BTREE_ID_subvolumes, POS(0, parent), 0);
2457 		k = bch2_btree_iter_peek_slot(trans, &parent_iter);
2458 		ret = bkey_err(k);
2459 		if (ret)
2460 			goto err;
2461 
2462 		if (fsck_err_on(k.k->type != KEY_TYPE_subvolume,
2463 				trans, subvol_unreachable,
2464 				"unreachable subvolume %s",
2465 				(bch2_bkey_val_to_text(&buf, c, s.s_c),
2466 				 buf.buf))) {
2467 			ret = reattach_subvol(trans, s);
2468 			break;
2469 		}
2470 	}
2471 fsck_err:
2472 err:
2473 	printbuf_exit(&buf);
2474 	darray_exit(&subvol_path);
2475 	bch2_trans_iter_exit(trans, &parent_iter);
2476 	return ret;
2477 }
2478 
2479 int bch2_check_subvolume_structure(struct bch_fs *c)
2480 {
2481 	int ret = bch2_trans_run(c,
2482 		for_each_btree_key_commit(trans, iter,
2483 				BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
2484 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2485 			check_subvol_path(trans, &iter, k)));
2486 	bch_err_fn(c, ret);
2487 	return ret;
2488 }
2489 
2490 struct pathbuf_entry {
2491 	u64	inum;
2492 	u32	snapshot;
2493 };
2494 
2495 typedef DARRAY(struct pathbuf_entry) pathbuf;
2496 
2497 static int bch2_bi_depth_renumber_one(struct btree_trans *trans, struct pathbuf_entry *p,
2498 				      u32 new_depth)
2499 {
2500 	struct btree_iter iter;
2501 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
2502 					       SPOS(0, p->inum, p->snapshot), 0);
2503 
2504 	struct bch_inode_unpacked inode;
2505 	int ret = bkey_err(k) ?:
2506 		!bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode
2507 		: bch2_inode_unpack(k, &inode);
2508 	if (ret)
2509 		goto err;
2510 
2511 	if (inode.bi_depth != new_depth) {
2512 		inode.bi_depth = new_depth;
2513 		ret = __bch2_fsck_write_inode(trans, &inode) ?:
2514 			bch2_trans_commit(trans, NULL, NULL, 0);
2515 	}
2516 err:
2517 	bch2_trans_iter_exit(trans, &iter);
2518 	return ret;
2519 }
2520 
2521 static int bch2_bi_depth_renumber(struct btree_trans *trans, pathbuf *path, u32 new_bi_depth)
2522 {
2523 	u32 restart_count = trans->restart_count;
2524 	int ret = 0;
2525 
2526 	darray_for_each_reverse(*path, i) {
2527 		ret = nested_lockrestart_do(trans,
2528 				bch2_bi_depth_renumber_one(trans, i, new_bi_depth));
2529 		bch_err_fn(trans->c, ret);
2530 		if (ret)
2531 			break;
2532 
2533 		new_bi_depth++;
2534 	}
2535 
2536 	return ret ?: trans_was_restarted(trans, restart_count);
2537 }
2538 
2539 static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
2540 {
2541 	darray_for_each(*p, i)
2542 		if (i->inum	== inum &&
2543 		    i->snapshot	== snapshot)
2544 			return true;
2545 	return false;
2546 }
2547 
2548 static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k)
2549 {
2550 	struct bch_fs *c = trans->c;
2551 	struct btree_iter inode_iter = {};
2552 	pathbuf path = {};
2553 	struct printbuf buf = PRINTBUF;
2554 	u32 snapshot = inode_k.k->p.snapshot;
2555 	bool redo_bi_depth = false;
2556 	u32 min_bi_depth = U32_MAX;
2557 	int ret = 0;
2558 
2559 	struct bch_inode_unpacked inode;
2560 	ret = bch2_inode_unpack(inode_k, &inode);
2561 	if (ret)
2562 		return ret;
2563 
2564 	while (!inode.bi_subvol) {
2565 		struct btree_iter dirent_iter;
2566 		struct bkey_s_c_dirent d;
2567 		u32 parent_snapshot = snapshot;
2568 
2569 		d = inode_get_dirent(trans, &dirent_iter, &inode, &parent_snapshot);
2570 		ret = bkey_err(d.s_c);
2571 		if (ret && !bch2_err_matches(ret, ENOENT))
2572 			goto out;
2573 
2574 		if (!ret && (ret = dirent_points_to_inode(c, d, &inode)))
2575 			bch2_trans_iter_exit(trans, &dirent_iter);
2576 
2577 		if (bch2_err_matches(ret, ENOENT)) {
2578 			printbuf_reset(&buf);
2579 			bch2_bkey_val_to_text(&buf, c, inode_k);
2580 			bch_err(c, "unreachable inode in check_directory_structure: %s\n%s",
2581 				bch2_err_str(ret), buf.buf);
2582 			goto out;
2583 		}
2584 
2585 		bch2_trans_iter_exit(trans, &dirent_iter);
2586 
2587 		ret = darray_push(&path, ((struct pathbuf_entry) {
2588 			.inum		= inode.bi_inum,
2589 			.snapshot	= snapshot,
2590 		}));
2591 		if (ret)
2592 			return ret;
2593 
2594 		snapshot = parent_snapshot;
2595 
2596 		bch2_trans_iter_exit(trans, &inode_iter);
2597 		inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
2598 					     SPOS(0, inode.bi_dir, snapshot), 0);
2599 
2600 		struct bch_inode_unpacked parent_inode;
2601 		ret = bkey_err(inode_k) ?:
2602 			!bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode
2603 			: bch2_inode_unpack(inode_k, &parent_inode);
2604 		if (ret) {
2605 			/* Should have been caught in dirents pass */
2606 			bch_err_msg(c, ret, "error looking up parent directory");
2607 			goto out;
2608 		}
2609 
2610 		min_bi_depth = parent_inode.bi_depth;
2611 
2612 		if (parent_inode.bi_depth < inode.bi_depth &&
2613 		    min_bi_depth < U16_MAX)
2614 			break;
2615 
2616 		inode = parent_inode;
2617 		snapshot = inode_k.k->p.snapshot;
2618 		redo_bi_depth = true;
2619 
2620 		if (path_is_dup(&path, inode.bi_inum, snapshot)) {
2621 			/* XXX print path */
2622 			bch_err(c, "directory structure loop");
2623 
2624 			darray_for_each(path, i)
2625 				pr_err("%llu:%u", i->inum, i->snapshot);
2626 			pr_err("%llu:%u", inode.bi_inum, snapshot);
2627 
2628 			if (fsck_err(trans, dir_loop, "directory structure loop")) {
2629 				ret = remove_backpointer(trans, &inode);
2630 				bch_err_msg(c, ret, "removing dirent");
2631 				if (ret)
2632 					break;
2633 
2634 				ret = reattach_inode(trans, &inode);
2635 				bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
2636 			}
2637 
2638 			goto out;
2639 		}
2640 	}
2641 
2642 	if (inode.bi_subvol)
2643 		min_bi_depth = 0;
2644 
2645 	if (redo_bi_depth)
2646 		ret = bch2_bi_depth_renumber(trans, &path, min_bi_depth);
2647 out:
2648 fsck_err:
2649 	bch2_trans_iter_exit(trans, &inode_iter);
2650 	darray_exit(&path);
2651 	printbuf_exit(&buf);
2652 	bch_err_fn(c, ret);
2653 	return ret;
2654 }
2655 
2656 /*
2657  * Check for loops in the directory structure: all other connectivity issues
2658  * have been fixed by prior passes
2659  */
2660 int bch2_check_directory_structure(struct bch_fs *c)
2661 {
2662 	int ret = bch2_trans_run(c,
2663 		for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
2664 					  BTREE_ITER_intent|
2665 					  BTREE_ITER_prefetch|
2666 					  BTREE_ITER_all_snapshots, k,
2667 					  NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
2668 			if (!S_ISDIR(bkey_inode_mode(k)))
2669 				continue;
2670 
2671 			if (bch2_inode_flags(k) & BCH_INODE_unlinked)
2672 				continue;
2673 
2674 			check_path_loop(trans, k);
2675 		})));
2676 
2677 	bch_err_fn(c, ret);
2678 	return ret;
2679 }
2680 
2681 struct nlink_table {
2682 	size_t		nr;
2683 	size_t		size;
2684 
2685 	struct nlink {
2686 		u64	inum;
2687 		u32	snapshot;
2688 		u32	count;
2689 	}		*d;
2690 };
2691 
2692 static int add_nlink(struct bch_fs *c, struct nlink_table *t,
2693 		     u64 inum, u32 snapshot)
2694 {
2695 	if (t->nr == t->size) {
2696 		size_t new_size = max_t(size_t, 128UL, t->size * 2);
2697 		void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
2698 
2699 		if (!d) {
2700 			bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
2701 				new_size);
2702 			return -BCH_ERR_ENOMEM_fsck_add_nlink;
2703 		}
2704 
2705 		if (t->d)
2706 			memcpy(d, t->d, t->size * sizeof(t->d[0]));
2707 		kvfree(t->d);
2708 
2709 		t->d = d;
2710 		t->size = new_size;
2711 	}
2712 
2713 
2714 	t->d[t->nr++] = (struct nlink) {
2715 		.inum		= inum,
2716 		.snapshot	= snapshot,
2717 	};
2718 
2719 	return 0;
2720 }
2721 
2722 static int nlink_cmp(const void *_l, const void *_r)
2723 {
2724 	const struct nlink *l = _l;
2725 	const struct nlink *r = _r;
2726 
2727 	return cmp_int(l->inum, r->inum);
2728 }
2729 
2730 static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
2731 		     struct nlink_table *links,
2732 		     u64 range_start, u64 range_end, u64 inum, u32 snapshot)
2733 {
2734 	struct nlink *link, key = {
2735 		.inum = inum, .snapshot = U32_MAX,
2736 	};
2737 
2738 	if (inum < range_start || inum >= range_end)
2739 		return;
2740 
2741 	link = __inline_bsearch(&key, links->d, links->nr,
2742 				sizeof(links->d[0]), nlink_cmp);
2743 	if (!link)
2744 		return;
2745 
2746 	while (link > links->d && link[0].inum == link[-1].inum)
2747 		--link;
2748 
2749 	for (; link < links->d + links->nr && link->inum == inum; link++)
2750 		if (ref_visible(c, s, snapshot, link->snapshot)) {
2751 			link->count++;
2752 			if (link->snapshot >= snapshot)
2753 				break;
2754 		}
2755 }
2756 
2757 noinline_for_stack
2758 static int check_nlinks_find_hardlinks(struct bch_fs *c,
2759 				       struct nlink_table *t,
2760 				       u64 start, u64 *end)
2761 {
2762 	int ret = bch2_trans_run(c,
2763 		for_each_btree_key(trans, iter, BTREE_ID_inodes,
2764 				   POS(0, start),
2765 				   BTREE_ITER_intent|
2766 				   BTREE_ITER_prefetch|
2767 				   BTREE_ITER_all_snapshots, k, ({
2768 			if (!bkey_is_inode(k.k))
2769 				continue;
2770 
2771 			/* Should never fail, checked by bch2_inode_invalid: */
2772 			struct bch_inode_unpacked u;
2773 			_ret3 = bch2_inode_unpack(k, &u);
2774 			if (_ret3)
2775 				break;
2776 
2777 			/*
2778 			 * Backpointer and directory structure checks are sufficient for
2779 			 * directories, since they can't have hardlinks:
2780 			 */
2781 			if (S_ISDIR(u.bi_mode))
2782 				continue;
2783 
2784 			/*
2785 			 * Previous passes ensured that bi_nlink is nonzero if
2786 			 * it had multiple hardlinks:
2787 			 */
2788 			if (!u.bi_nlink)
2789 				continue;
2790 
2791 			ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
2792 			if (ret) {
2793 				*end = k.k->p.offset;
2794 				ret = 0;
2795 				break;
2796 			}
2797 			0;
2798 		})));
2799 
2800 	bch_err_fn(c, ret);
2801 	return ret;
2802 }
2803 
2804 noinline_for_stack
2805 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
2806 				     u64 range_start, u64 range_end)
2807 {
2808 	struct snapshots_seen s;
2809 
2810 	snapshots_seen_init(&s);
2811 
2812 	int ret = bch2_trans_run(c,
2813 		for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
2814 				   BTREE_ITER_intent|
2815 				   BTREE_ITER_prefetch|
2816 				   BTREE_ITER_all_snapshots, k, ({
2817 			ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
2818 			if (ret)
2819 				break;
2820 
2821 			if (k.k->type == KEY_TYPE_dirent) {
2822 				struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
2823 
2824 				if (d.v->d_type != DT_DIR &&
2825 				    d.v->d_type != DT_SUBVOL)
2826 					inc_link(c, &s, links, range_start, range_end,
2827 						 le64_to_cpu(d.v->d_inum), d.k->p.snapshot);
2828 			}
2829 			0;
2830 		})));
2831 
2832 	snapshots_seen_exit(&s);
2833 
2834 	bch_err_fn(c, ret);
2835 	return ret;
2836 }
2837 
2838 static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
2839 				     struct bkey_s_c k,
2840 				     struct nlink_table *links,
2841 				     size_t *idx, u64 range_end)
2842 {
2843 	struct bch_inode_unpacked u;
2844 	struct nlink *link = &links->d[*idx];
2845 	int ret = 0;
2846 
2847 	if (k.k->p.offset >= range_end)
2848 		return 1;
2849 
2850 	if (!bkey_is_inode(k.k))
2851 		return 0;
2852 
2853 	ret = bch2_inode_unpack(k, &u);
2854 	if (ret)
2855 		return ret;
2856 
2857 	if (S_ISDIR(u.bi_mode))
2858 		return 0;
2859 
2860 	if (!u.bi_nlink)
2861 		return 0;
2862 
2863 	while ((cmp_int(link->inum, k.k->p.offset) ?:
2864 		cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
2865 		BUG_ON(*idx == links->nr);
2866 		link = &links->d[++*idx];
2867 	}
2868 
2869 	if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
2870 			trans, inode_wrong_nlink,
2871 			"inode %llu type %s has wrong i_nlink (%u, should be %u)",
2872 			u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
2873 			bch2_inode_nlink_get(&u), link->count)) {
2874 		bch2_inode_nlink_set(&u, link->count);
2875 		ret = __bch2_fsck_write_inode(trans, &u);
2876 	}
2877 fsck_err:
2878 	return ret;
2879 }
2880 
2881 noinline_for_stack
2882 static int check_nlinks_update_hardlinks(struct bch_fs *c,
2883 			       struct nlink_table *links,
2884 			       u64 range_start, u64 range_end)
2885 {
2886 	size_t idx = 0;
2887 
2888 	int ret = bch2_trans_run(c,
2889 		for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
2890 				POS(0, range_start),
2891 				BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
2892 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2893 			check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
2894 	if (ret < 0) {
2895 		bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret));
2896 		return ret;
2897 	}
2898 
2899 	return 0;
2900 }
2901 
2902 int bch2_check_nlinks(struct bch_fs *c)
2903 {
2904 	struct nlink_table links = { 0 };
2905 	u64 this_iter_range_start, next_iter_range_start = 0;
2906 	int ret = 0;
2907 
2908 	do {
2909 		this_iter_range_start = next_iter_range_start;
2910 		next_iter_range_start = U64_MAX;
2911 
2912 		ret = check_nlinks_find_hardlinks(c, &links,
2913 						  this_iter_range_start,
2914 						  &next_iter_range_start);
2915 
2916 		ret = check_nlinks_walk_dirents(c, &links,
2917 					  this_iter_range_start,
2918 					  next_iter_range_start);
2919 		if (ret)
2920 			break;
2921 
2922 		ret = check_nlinks_update_hardlinks(c, &links,
2923 					 this_iter_range_start,
2924 					 next_iter_range_start);
2925 		if (ret)
2926 			break;
2927 
2928 		links.nr = 0;
2929 	} while (next_iter_range_start != U64_MAX);
2930 
2931 	kvfree(links.d);
2932 	bch_err_fn(c, ret);
2933 	return ret;
2934 }
2935 
2936 static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
2937 			     struct bkey_s_c k)
2938 {
2939 	struct bkey_s_c_reflink_p p;
2940 	struct bkey_i_reflink_p *u;
2941 
2942 	if (k.k->type != KEY_TYPE_reflink_p)
2943 		return 0;
2944 
2945 	p = bkey_s_c_to_reflink_p(k);
2946 
2947 	if (!p.v->front_pad && !p.v->back_pad)
2948 		return 0;
2949 
2950 	u = bch2_trans_kmalloc(trans, sizeof(*u));
2951 	int ret = PTR_ERR_OR_ZERO(u);
2952 	if (ret)
2953 		return ret;
2954 
2955 	bkey_reassemble(&u->k_i, k);
2956 	u->v.front_pad	= 0;
2957 	u->v.back_pad	= 0;
2958 
2959 	return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun);
2960 }
2961 
2962 int bch2_fix_reflink_p(struct bch_fs *c)
2963 {
2964 	if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
2965 		return 0;
2966 
2967 	int ret = bch2_trans_run(c,
2968 		for_each_btree_key_commit(trans, iter,
2969 				BTREE_ID_extents, POS_MIN,
2970 				BTREE_ITER_intent|BTREE_ITER_prefetch|
2971 				BTREE_ITER_all_snapshots, k,
2972 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2973 			fix_reflink_p_key(trans, &iter, k)));
2974 	bch_err_fn(c, ret);
2975 	return ret;
2976 }
2977 
2978 #ifndef NO_BCACHEFS_CHARDEV
2979 
2980 struct fsck_thread {
2981 	struct thread_with_stdio thr;
2982 	struct bch_fs		*c;
2983 	struct bch_opts		opts;
2984 };
2985 
2986 static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr)
2987 {
2988 	struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr);
2989 	kfree(thr);
2990 }
2991 
2992 static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
2993 {
2994 	struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
2995 	struct bch_fs *c = thr->c;
2996 
2997 	int ret = PTR_ERR_OR_ZERO(c);
2998 	if (ret)
2999 		return ret;
3000 
3001 	ret = bch2_fs_start(thr->c);
3002 	if (ret)
3003 		goto err;
3004 
3005 	if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
3006 		bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name);
3007 		ret |= 1;
3008 	}
3009 	if (test_bit(BCH_FS_error, &c->flags)) {
3010 		bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name);
3011 		ret |= 4;
3012 	}
3013 err:
3014 	bch2_fs_stop(c);
3015 	return ret;
3016 }
3017 
3018 static const struct thread_with_stdio_ops bch2_offline_fsck_ops = {
3019 	.exit		= bch2_fsck_thread_exit,
3020 	.fn		= bch2_fsck_offline_thread_fn,
3021 };
3022 
3023 long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg)
3024 {
3025 	struct bch_ioctl_fsck_offline arg;
3026 	struct fsck_thread *thr = NULL;
3027 	darray_str(devs) = {};
3028 	long ret = 0;
3029 
3030 	if (copy_from_user(&arg, user_arg, sizeof(arg)))
3031 		return -EFAULT;
3032 
3033 	if (arg.flags)
3034 		return -EINVAL;
3035 
3036 	if (!capable(CAP_SYS_ADMIN))
3037 		return -EPERM;
3038 
3039 	for (size_t i = 0; i < arg.nr_devs; i++) {
3040 		u64 dev_u64;
3041 		ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64));
3042 		if (ret)
3043 			goto err;
3044 
3045 		char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX);
3046 		ret = PTR_ERR_OR_ZERO(dev_str);
3047 		if (ret)
3048 			goto err;
3049 
3050 		ret = darray_push(&devs, dev_str);
3051 		if (ret) {
3052 			kfree(dev_str);
3053 			goto err;
3054 		}
3055 	}
3056 
3057 	thr = kzalloc(sizeof(*thr), GFP_KERNEL);
3058 	if (!thr) {
3059 		ret = -ENOMEM;
3060 		goto err;
3061 	}
3062 
3063 	thr->opts = bch2_opts_empty();
3064 
3065 	if (arg.opts) {
3066 		char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
3067 		ret =   PTR_ERR_OR_ZERO(optstr) ?:
3068 			bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr, false);
3069 		if (!IS_ERR(optstr))
3070 			kfree(optstr);
3071 
3072 		if (ret)
3073 			goto err;
3074 	}
3075 
3076 	opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio);
3077 	opt_set(thr->opts, read_only, 1);
3078 	opt_set(thr->opts, ratelimit_errors, 0);
3079 
3080 	/* We need request_key() to be called before we punt to kthread: */
3081 	opt_set(thr->opts, nostart, true);
3082 
3083 	bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops);
3084 
3085 	thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts);
3086 
3087 	if (!IS_ERR(thr->c) &&
3088 	    thr->c->opts.errors == BCH_ON_ERROR_panic)
3089 		thr->c->opts.errors = BCH_ON_ERROR_ro;
3090 
3091 	ret = __bch2_run_thread_with_stdio(&thr->thr);
3092 out:
3093 	darray_for_each(devs, i)
3094 		kfree(*i);
3095 	darray_exit(&devs);
3096 	return ret;
3097 err:
3098 	if (thr)
3099 		bch2_fsck_thread_exit(&thr->thr);
3100 	pr_err("ret %s", bch2_err_str(ret));
3101 	goto out;
3102 }
3103 
3104 static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio)
3105 {
3106 	struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
3107 	struct bch_fs *c = thr->c;
3108 
3109 	c->stdio_filter = current;
3110 	c->stdio = &thr->thr.stdio;
3111 
3112 	/*
3113 	 * XXX: can we figure out a way to do this without mucking with c->opts?
3114 	 */
3115 	unsigned old_fix_errors = c->opts.fix_errors;
3116 	if (opt_defined(thr->opts, fix_errors))
3117 		c->opts.fix_errors = thr->opts.fix_errors;
3118 	else
3119 		c->opts.fix_errors = FSCK_FIX_ask;
3120 
3121 	c->opts.fsck = true;
3122 	set_bit(BCH_FS_fsck_running, &c->flags);
3123 
3124 	c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
3125 	int ret = bch2_run_online_recovery_passes(c);
3126 
3127 	clear_bit(BCH_FS_fsck_running, &c->flags);
3128 	bch_err_fn(c, ret);
3129 
3130 	c->stdio = NULL;
3131 	c->stdio_filter = NULL;
3132 	c->opts.fix_errors = old_fix_errors;
3133 
3134 	up(&c->online_fsck_mutex);
3135 	bch2_ro_ref_put(c);
3136 	return ret;
3137 }
3138 
3139 static const struct thread_with_stdio_ops bch2_online_fsck_ops = {
3140 	.exit		= bch2_fsck_thread_exit,
3141 	.fn		= bch2_fsck_online_thread_fn,
3142 };
3143 
3144 long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg)
3145 {
3146 	struct fsck_thread *thr = NULL;
3147 	long ret = 0;
3148 
3149 	if (arg.flags)
3150 		return -EINVAL;
3151 
3152 	if (!capable(CAP_SYS_ADMIN))
3153 		return -EPERM;
3154 
3155 	if (!bch2_ro_ref_tryget(c))
3156 		return -EROFS;
3157 
3158 	if (down_trylock(&c->online_fsck_mutex)) {
3159 		bch2_ro_ref_put(c);
3160 		return -EAGAIN;
3161 	}
3162 
3163 	thr = kzalloc(sizeof(*thr), GFP_KERNEL);
3164 	if (!thr) {
3165 		ret = -ENOMEM;
3166 		goto err;
3167 	}
3168 
3169 	thr->c = c;
3170 	thr->opts = bch2_opts_empty();
3171 
3172 	if (arg.opts) {
3173 		char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
3174 
3175 		ret =   PTR_ERR_OR_ZERO(optstr) ?:
3176 			bch2_parse_mount_opts(c, &thr->opts, NULL, optstr, false);
3177 		if (!IS_ERR(optstr))
3178 			kfree(optstr);
3179 
3180 		if (ret)
3181 			goto err;
3182 	}
3183 
3184 	ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops);
3185 err:
3186 	if (ret < 0) {
3187 		bch_err_fn(c, ret);
3188 		if (thr)
3189 			bch2_fsck_thread_exit(&thr->thr);
3190 		up(&c->online_fsck_mutex);
3191 		bch2_ro_ref_put(c);
3192 	}
3193 	return ret;
3194 }
3195 
3196 #endif /* NO_BCACHEFS_CHARDEV */
3197