Lines Matching +full:fixed +full:- +full:links
1 // SPDX-License-Identifier: GPL-2.0
13 #include "fs-common.h"
29 return inode->bi_dir == d.k->p.inode && in inode_points_to_dirent()
30 inode->bi_dir_offset == d.k->p.offset; in inode_points_to_dirent()
36 if (d.v->d_type == DT_SUBVOL in dirent_points_to_inode_nowarn()
37 ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol in dirent_points_to_inode_nowarn()
38 : le64_to_cpu(d.v->d_inum) == inode->bi_inum) in dirent_points_to_inode_nowarn()
40 return -BCH_ERR_ENOENT_dirent_doesnt_match_inode; in dirent_points_to_inode_nowarn()
71 * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
83 sectors += k.k->size; in bch2_count_inode_sectors()
99 if (k.k->type == KEY_TYPE_dirent && in bch2_count_subdirs()
100 bkey_s_c_to_dirent(k).v->d_type == DT_DIR) in bch2_count_subdirs()
128 if (k.k->p.offset != inode_nr) in lookup_first_inode()
135 ret = -BCH_ERR_ENOENT_inode; in lookup_first_inode()
137 bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr); in lookup_first_inode()
157 : -BCH_ERR_ENOENT_inode; in lookup_inode()
176 *target = le64_to_cpu(d.v->d_inum); in lookup_dirent_in_snapshot()
177 *type = d.v->d_type; in lookup_dirent_in_snapshot()
184 struct bch_fs *c = trans->c; in __remove_dirent()
210 * We can't rely on master_subvol - it might have been deleted.
220 if (k.k->type != KEY_TYPE_snapshot) in find_snapshot_tree_subvol()
224 if (le32_to_cpu(s.v->tree) != tree_id) in find_snapshot_tree_subvol()
227 if (s.v->subvol) { in find_snapshot_tree_subvol()
228 *subvol = le32_to_cpu(s.v->subvol); in find_snapshot_tree_subvol()
232 ret = -BCH_ERR_ENOENT_no_snapshot_tree_subvol; in find_snapshot_tree_subvol()
243 struct bch_fs *c = trans->c; in lookup_lostfound()
279 subvol->v.inode = cpu_to_le64(reattaching_inum); in lookup_lostfound()
309 return -BCH_ERR_ENOENT_not_directory; in lookup_lostfound()
345 lostfound->bi_dir = root_inode.bi_inum; in lookup_lostfound()
346 lostfound->bi_snapshot = le32_to_cpu(st.root_snapshot); in lookup_lostfound()
361 mode_to_type(lostfound->bi_mode), in lookup_lostfound()
363 lostfound->bi_inum, in lookup_lostfound()
364 &lostfound->bi_dir_offset, in lookup_lostfound()
376 if (inode->bi_inum == BCACHEFS_ROOT_INO && in inode_should_reattach()
377 inode->bi_subvol == BCACHEFS_ROOT_SUBVOL) in inode_should_reattach()
380 return !inode->bi_dir && !(inode->bi_flags & BCH_INODE_unlinked); in inode_should_reattach()
394 if (bpos_eq(k.k->p, d_pos)) { in maybe_delete_dirent()
404 bkey_init(&k->k); in maybe_delete_dirent()
405 k->k.type = KEY_TYPE_whiteout; in maybe_delete_dirent()
406 k->k.p = iter.pos; in maybe_delete_dirent()
416 struct bch_fs *c = trans->c; in reattach_inode()
421 u32 dirent_snapshot = inode->bi_snapshot; in reattach_inode()
422 if (inode->bi_subvol) { in reattach_inode()
423 inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL; in reattach_inode()
426 ret = subvol_lookup(trans, inode->bi_parent_subvol, in reattach_inode()
431 snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol); in reattach_inode()
433 snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum); in reattach_inode()
436 ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum); in reattach_inode()
440 lostfound.bi_nlink += S_ISDIR(inode->bi_mode); in reattach_inode()
443 if (!inode->bi_subvol) { in reattach_inode()
444 BUG_ON(!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, lostfound.bi_snapshot)); in reattach_inode()
445 lostfound.bi_snapshot = inode->bi_snapshot; in reattach_inode()
455 inode->bi_dir = lostfound.bi_inum; in reattach_inode()
458 inode->bi_parent_subvol, lostfound.bi_inum, in reattach_inode()
463 inode->bi_subvol ?: inode->bi_inum, in reattach_inode()
464 &inode->bi_dir_offset, in reattach_inode()
480 if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) { in reattach_inode()
488 BTREE_ID_inodes, SPOS(0, inode->bi_inum, inode->bi_snapshot - 1), in reattach_inode()
490 if (k.k->p.offset != inode->bi_inum) in reattach_inode()
494 !bch2_snapshot_is_ancestor(c, k.k->p.snapshot, inode->bi_snapshot) || in reattach_inode()
495 snapshot_list_has_ancestor(c, &whiteouts_done, k.k->p.snapshot)) in reattach_inode()
505 SPOS(lostfound.bi_inum, inode->bi_dir_offset, in reattach_inode()
507 k.k->p.snapshot); in reattach_inode()
511 ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot); in reattach_inode()
515 iter.snapshot = k.k->p.snapshot; in reattach_inode()
516 child_inode.bi_dir = inode->bi_dir; in reattach_inode()
517 child_inode.bi_dir_offset = inode->bi_dir_offset; in reattach_inode()
542 if (!inode->bi_dir) in remove_backpointer()
545 struct bch_fs *c = trans->c; in remove_backpointer()
548 SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot)); in remove_backpointer()
551 __remove_dirent(trans, d.k->p); in remove_backpointer()
558 struct bch_fs *c = trans->c; in reattach_subvol()
562 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) }, in reattach_subvol()
580 struct bch_fs *c = trans->c; in reconstruct_subvol()
584 return -BCH_ERR_fsck_repair_unimplemented; in reconstruct_subvol()
589 * not check_inodes - the root of this subvolume doesn't exist or we in reconstruct_subvol()
619 bkey_subvolume_init(&new_subvol->k_i); in reconstruct_subvol()
620 new_subvol->k.p.offset = subvolid; in reconstruct_subvol()
621 new_subvol->v.snapshot = cpu_to_le32(snapshotid); in reconstruct_subvol()
622 new_subvol->v.inode = cpu_to_le64(inum); in reconstruct_subvol()
623 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0); in reconstruct_subvol()
636 u32 snapshot_tree = le32_to_cpu(s->v.tree); in reconstruct_subvol()
638 s->v.subvol = cpu_to_le32(subvolid); in reconstruct_subvol()
639 SET_BCH_SNAPSHOT_SUBVOL(&s->v, true); in reconstruct_subvol()
650 if (!st->v.master_subvol) in reconstruct_subvol()
651 st->v.master_subvol = cpu_to_le32(subvolid); in reconstruct_subvol()
659 struct bch_fs *c = trans->c; in reconstruct_inode()
674 i_size = k.k->p.offset << 9; in reconstruct_inode()
703 darray_exit(&s->ids); in snapshots_seen_exit()
714 __darray_for_each(s->ids, i) { in snapshots_seen_add_inorder()
721 int ret = darray_insert_item(&s->ids, i - s->ids.data, id); in snapshots_seen_add_inorder()
724 s->ids.size); in snapshots_seen_add_inorder()
731 if (!bkey_eq(s->pos, pos)) in snapshots_seen_update()
732 s->ids.nr = 0; in snapshots_seen_update()
733 s->pos = pos; in snapshots_seen_update()
735 return snapshot_list_add_nodup(c, &s->ids, pos.snapshot); in snapshots_seen_update()
739 * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
757 EBUG_ON(ancestor != seen->pos.snapshot); in key_visible_in_snapshot()
758 EBUG_ON(ancestor != darray_last(seen->ids)); in key_visible_in_snapshot()
768 * we've seen a key that overwrote @ancestor - i.e. also a descendent of in key_visible_in_snapshot()
776 for (i = seen->ids.nr - 2; in key_visible_in_snapshot()
777 i >= 0 && seen->ids.data[i] >= id; in key_visible_in_snapshot()
778 --i) in key_visible_in_snapshot()
779 if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i])) in key_visible_in_snapshot()
786 * ref_visible - given a key with snapshot id @src that points to a key with
818 for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \
819 (_i)->snapshot <= (_snapshot); _i++) \
820 if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
841 darray_exit(&w->inodes); in inode_walker_exit()
842 darray_exit(&w->deletes); in inode_walker_exit()
856 darray_push(&w->inodes, ((struct inode_walker_entry) { in add_inode()
858 .snapshot = inode.k->p.snapshot, in add_inode()
865 struct bch_fs *c = trans->c; in get_inodes_all_snapshots()
871 * We no longer have inodes for w->last_pos; clear this to avoid in get_inodes_all_snapshots()
875 w->have_inodes = false; in get_inodes_all_snapshots()
876 w->recalculate_sums = false; in get_inodes_all_snapshots()
877 w->inodes.nr = 0; in get_inodes_all_snapshots()
881 if (k.k->p.offset != inum) in get_inodes_all_snapshots()
892 w->first_this_inode = true; in get_inodes_all_snapshots()
893 w->have_inodes = true; in get_inodes_all_snapshots()
900 bool is_whiteout = k.k->type == KEY_TYPE_whiteout; in lookup_inode_for_snapshot()
903 __darray_for_each(w->inodes, i) in lookup_inode_for_snapshot()
904 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->snapshot)) in lookup_inode_for_snapshot()
909 BUG_ON(k.k->p.snapshot > i->snapshot); in lookup_inode_for_snapshot()
911 if (k.k->p.snapshot != i->snapshot && !is_whiteout) { in lookup_inode_for_snapshot()
914 new.snapshot = k.k->p.snapshot; in lookup_inode_for_snapshot()
924 w->last_pos.inode, k.k->p.snapshot, i->snapshot, buf.buf); in lookup_inode_for_snapshot()
927 while (i > w->inodes.data && i[-1].snapshot > k.k->p.snapshot) in lookup_inode_for_snapshot()
928 --i; in lookup_inode_for_snapshot()
930 size_t pos = i - w->inodes.data; in lookup_inode_for_snapshot()
931 int ret = darray_insert_item(&w->inodes, pos, new); in lookup_inode_for_snapshot()
935 i = w->inodes.data + pos; in lookup_inode_for_snapshot()
945 if (w->last_pos.inode != k.k->p.inode) { in walk_inode()
946 int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode); in walk_inode()
951 w->last_pos = k.k->p; in walk_inode()
953 return lookup_inode_for_snapshot(trans->c, w, k); in walk_inode()
961 struct bch_fs *c = trans->c; in get_visible_inodes()
966 w->inodes.nr = 0; in get_visible_inodes()
967 w->deletes.nr = 0; in get_visible_inodes()
969 for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot), in get_visible_inodes()
971 if (k.k->p.offset != inum) in get_visible_inodes()
974 if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot)) in get_visible_inodes()
977 if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot)) in get_visible_inodes()
982 : snapshot_list_add(c, &w->deletes, k.k->p.snapshot); in get_visible_inodes()
994 * return value: 0 -> delete k1, 1 -> delete k2
1002 if (new->k.type != KEY_TYPE_dirent) in bch2_fsck_update_backpointers()
1009 if (d->v.d_type == DT_SUBVOL) { in bch2_fsck_update_backpointers()
1012 ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum)); in bch2_fsck_update_backpointers()
1017 i->inode.bi_dir_offset = d->k.p.offset; in bch2_fsck_update_backpointers()
1018 ret = __bch2_fsck_write_inode(trans, &i->inode); in bch2_fsck_update_backpointers()
1033 if (inode->bi_subvol) { in inode_get_dirent()
1035 int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum); in inode_get_dirent()
1040 return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot)); in inode_get_dirent()
1047 int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set; in check_inode_deleted_list()
1056 struct bch_fs *c = trans->c; in check_inode_dirent_inode()
1059 u32 inode_snapshot = inode->bi_snapshot; in check_inode_dirent_inode()
1082 inode->bi_dir = 0; in check_inode_dirent_inode()
1083 inode->bi_dir_offset = 0; in check_inode_dirent_inode()
1106 if (k.k->p.offset != inum) in get_snapshot_root_inode()
1127 struct bch_fs *c = trans->c; in check_inode()
1139 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); in check_inode()
1150 if (snapshot_root->bi_inum != u.bi_inum) { in check_inode()
1156 if (fsck_err_on(u.bi_hash_seed != snapshot_root->bi_hash_seed || in check_inode()
1160 u.bi_hash_seed = snapshot_root->bi_hash_seed; in check_inode()
1184 ret = bch2_empty_dir_snapshot(trans, k.k->p.offset, 0, k.k->p.snapshot); in check_inode()
1185 if (ret && ret != -BCH_ERR_ENOTEMPTY_dir_not_empty) in check_inode()
1198 ret = bch2_inode_has_child_snapshots(trans, k.k->p); in check_inode()
1219 if (!test_bit(BCH_FS_started, &c->flags)) { in check_inode()
1224 * They might be referred to by a logged operation - in check_inode()
1226 * truncate on an unlinked but open file - so we want to in check_inode()
1230 ret = check_inode_deleted_list(trans, k.k->p); in check_inode()
1237 u.bi_inum, k.k->p.snapshot); in check_inode()
1239 ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1); in check_inode()
1243 ret = bch2_inode_or_descendents_is_open(trans, k.k->p); in check_inode()
1251 ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot); in check_inode()
1264 u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) { in check_inode()
1276 if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) { in check_inode()
1277 ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum); in check_inode()
1284 u.bi_inum, k.k->p.snapshot, u.bi_subvol) || in check_inode()
1287 k.k->p.snapshot), in check_inode()
1290 u.bi_inum, k.k->p.snapshot, u.bi_subvol, in check_inode()
1299 if (fsck_err_on(u.bi_journal_seq > journal_cur_seq(&c->journal), in check_inode()
1302 journal_cur_seq(&c->journal), in check_inode()
1306 u.bi_journal_seq = journal_cur_seq(&c->journal); in check_inode()
1346 struct bch_fs *c = trans->c; in find_oldest_inode_needs_reattach()
1358 SPOS(0, inode->bi_inum, inode->bi_snapshot + 1), in find_oldest_inode_needs_reattach()
1360 if (k.k->p.offset != inode->bi_inum) in find_oldest_inode_needs_reattach()
1363 if (!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, k.k->p.snapshot)) in find_oldest_inode_needs_reattach()
1421 * that points to it has its backpointer field set - so we're just looking for
1422 * non-unlinked inodes without backpointers:
1458 struct bch_fs *c = trans->c; in check_key_has_inode()
1464 if (k.k->type == KEY_TYPE_whiteout) in check_key_has_inode()
1467 if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) { in check_key_has_inode()
1468 ret = reconstruct_inode(trans, iter->btree_id, k.k->p.snapshot, k.k->p.inode) ?: in check_key_has_inode()
1473 inode->last_pos.inode--; in check_key_has_inode()
1474 ret = -BCH_ERR_transaction_restart_nested; in check_key_has_inode()
1485 if (fsck_err_on(i && !btree_matches_i_mode(iter->btree_id, i->inode.bi_mode), in check_key_has_inode()
1488 i->inode.bi_mode, in check_key_has_inode()
1505 struct bch_fs *c = trans->c; in check_i_sectors_notnested()
1509 darray_for_each(w->inodes, i) { in check_i_sectors_notnested()
1510 if (i->inode.bi_sectors == i->count) in check_i_sectors_notnested()
1513 count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot); in check_i_sectors_notnested()
1515 if (w->recalculate_sums) in check_i_sectors_notnested()
1516 i->count = count2; in check_i_sectors_notnested()
1518 if (i->count != count2) { in check_i_sectors_notnested()
1520 w->last_pos.inode, i->snapshot, i->count, count2); in check_i_sectors_notnested()
1521 i->count = count2; in check_i_sectors_notnested()
1524 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty), in check_i_sectors_notnested()
1527 w->last_pos.inode, i->snapshot, in check_i_sectors_notnested()
1528 i->inode.bi_sectors, i->count)) { in check_i_sectors_notnested()
1529 i->inode.bi_sectors = i->count; in check_i_sectors_notnested()
1530 ret = bch2_fsck_write_inode(trans, &i->inode); in check_i_sectors_notnested()
1542 u32 restart_count = trans->restart_count; in check_i_sectors()
1560 darray_for_each(extent_ends->e, i) in extent_ends_reset()
1561 snapshots_seen_exit(&i->seen); in extent_ends_reset()
1562 extent_ends->e.nr = 0; in extent_ends_reset()
1568 darray_exit(&extent_ends->e); in extent_ends_exit()
1582 .offset = k.k->p.offset, in extent_ends_at()
1583 .snapshot = k.k->p.snapshot, in extent_ends_at()
1587 n.seen.ids.data = kmemdup(seen->ids.data, in extent_ends_at()
1588 sizeof(seen->ids.data[0]) * seen->ids.size, in extent_ends_at()
1591 return -BCH_ERR_ENOMEM_fsck_extent_ends_at; in extent_ends_at()
1593 __darray_for_each(extent_ends->e, i) { in extent_ends_at()
1594 if (i->snapshot == k.k->p.snapshot) { in extent_ends_at()
1595 snapshots_seen_exit(&i->seen); in extent_ends_at()
1600 if (i->snapshot >= k.k->p.snapshot) in extent_ends_at()
1604 return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n); in extent_ends_at()
1611 bool *fixed, in overlapping_extents_found() argument
1614 struct bch_fs *c = trans->c; in overlapping_extents_found()
1633 if (!bpos_eq(pos1, k1.k->p)) { in overlapping_extents_found()
1641 ret = -BCH_ERR_internal_fsck_err; in overlapping_extents_found()
1655 if (bpos_ge(k2.k->p, pos2.p)) in overlapping_extents_found()
1662 if (bpos_gt(k2.k->p, pos2.p) || in overlapping_extents_found()
1663 pos2.size != k2.k->size) { in overlapping_extents_found()
1666 ret = -BCH_ERR_internal_fsck_err; in overlapping_extents_found()
1683 trans->extra_disk_res += bch2_bkey_sectors_compressed(k2); in overlapping_extents_found()
1694 *fixed = true; in overlapping_extents_found()
1701 extent_end->offset = bkey_start_offset(&pos2); in overlapping_extents_found()
1709 * We overwrote the second extent - restart in overlapping_extents_found()
1712 ret = -BCH_ERR_transaction_restart_nested; in overlapping_extents_found()
1728 bool *fixed) in check_overlapping_extents() argument
1730 struct bch_fs *c = trans->c; in check_overlapping_extents()
1734 if (bpos_eq(extent_ends->last_pos, k.k->p)) in check_overlapping_extents()
1737 if (extent_ends->last_pos.inode != k.k->p.inode) in check_overlapping_extents()
1740 darray_for_each(extent_ends->e, i) { in check_overlapping_extents()
1741 if (i->offset <= bkey_start_offset(k.k)) in check_overlapping_extents()
1745 k.k->p.snapshot, seen, in check_overlapping_extents()
1746 i->snapshot, &i->seen)) in check_overlapping_extents()
1749 ret = overlapping_extents_found(trans, iter->btree_id, in check_overlapping_extents()
1750 SPOS(iter->pos.inode, in check_overlapping_extents()
1751 i->offset, in check_overlapping_extents()
1752 i->snapshot), in check_overlapping_extents()
1753 &i->seen, in check_overlapping_extents()
1754 *k.k, fixed, i); in check_overlapping_extents()
1759 extent_ends->last_pos = k.k->p; in check_overlapping_extents()
1767 struct bch_fs *c = trans->c; in check_extent_overbig()
1771 unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9; in check_extent_overbig()
1793 struct bch_fs *c = trans->c; in check_extent()
1803 if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) { in check_extent()
1809 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); in check_extent()
1822 if (k.k->type != KEY_TYPE_whiteout) { in check_extent()
1824 &inode->recalculate_sums); in check_extent()
1833 for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); in check_extent()
1834 inode->inodes.data && i >= inode->inodes.data; in check_extent()
1835 --i) { in check_extent()
1836 if (i->snapshot > k.k->p.snapshot || in check_extent()
1837 !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot)) in check_extent()
1840 if (fsck_err_on(k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 && in check_extent()
1844 i->inode.bi_inum, i->snapshot, i->inode.bi_size, in check_extent()
1849 bch2_btree_iter_set_snapshot(&iter2, i->snapshot); in check_extent()
1857 iter->k.type = KEY_TYPE_whiteout; in check_extent()
1868 for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); in check_extent()
1869 inode->inodes.data && i >= inode->inodes.data; in check_extent()
1870 --i) { in check_extent()
1871 if (i->snapshot > k.k->p.snapshot || in check_extent()
1872 !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot)) in check_extent()
1875 i->count += k.k->size; in check_extent()
1879 if (k.k->type != KEY_TYPE_whiteout) { in check_extent()
1946 struct bch_fs *c = trans->c; in check_subdir_count_notnested()
1950 darray_for_each(w->inodes, i) { in check_subdir_count_notnested()
1951 if (i->inode.bi_nlink == i->count) in check_subdir_count_notnested()
1954 count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot); in check_subdir_count_notnested()
1958 if (i->count != count2) { in check_subdir_count_notnested()
1960 w->last_pos.inode, i->snapshot, i->count, count2); in check_subdir_count_notnested()
1961 i->count = count2; in check_subdir_count_notnested()
1962 if (i->inode.bi_nlink == i->count) in check_subdir_count_notnested()
1966 if (fsck_err_on(i->inode.bi_nlink != i->count, in check_subdir_count_notnested()
1969 w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) { in check_subdir_count_notnested()
1970 i->inode.bi_nlink = i->count; in check_subdir_count_notnested()
1971 ret = bch2_fsck_write_inode(trans, &i->inode); in check_subdir_count_notnested()
1983 u32 restart_count = trans->restart_count; in check_subdir_dirents_count()
1994 struct bch_fs *c = trans->c; in check_dirent_inode_dirent()
2002 if (!target->bi_dir && in check_dirent_inode_dirent()
2003 !target->bi_dir_offset) { in check_dirent_inode_dirent()
2004 fsck_err_on(S_ISDIR(target->bi_mode), in check_dirent_inode_dirent()
2013 fsck_err_on(target->bi_flags & BCH_INODE_unlinked, in check_dirent_inode_dirent()
2022 target->bi_flags &= ~BCH_INODE_unlinked; in check_dirent_inode_dirent()
2023 target->bi_dir = d.k->p.inode; in check_dirent_inode_dirent()
2024 target->bi_dir_offset = d.k->p.offset; in check_dirent_inode_dirent()
2038 SPOS(target->bi_dir, target->bi_dir_offset, target->bi_snapshot)); in check_dirent_inode_dirent()
2051 target->bi_inum, target->bi_snapshot, in check_dirent_inode_dirent()
2052 target->bi_dir, in check_dirent_inode_dirent()
2053 target->bi_dir_offset, in check_dirent_inode_dirent()
2054 d.k->p.inode, in check_dirent_inode_dirent()
2055 d.k->p.offset)) { in check_dirent_inode_dirent()
2056 target->bi_dir = d.k->p.inode; in check_dirent_inode_dirent()
2057 target->bi_dir_offset = d.k->p.offset; in check_dirent_inode_dirent()
2068 (S_ISDIR(target->bi_mode) || in check_dirent_inode_dirent()
2069 target->bi_subvol), in check_dirent_inode_dirent()
2071 "%s %llu:%u with multiple links\n%s", in check_dirent_inode_dirent()
2072 S_ISDIR(target->bi_mode) ? "directory" : "subvolume", in check_dirent_inode_dirent()
2073 target->bi_inum, target->bi_snapshot, buf.buf)) { in check_dirent_inode_dirent()
2074 ret = __remove_dirent(trans, d.k->p); in check_dirent_inode_dirent()
2083 if (fsck_err_on(backpointer_exists && !target->bi_nlink, in check_dirent_inode_dirent()
2085 "inode %llu:%u type %s has multiple links but i_nlink 0\n%s", in check_dirent_inode_dirent()
2086 target->bi_inum, target->bi_snapshot, bch2_d_types[d.v->d_type], buf.buf)) { in check_dirent_inode_dirent()
2087 target->bi_nlink++; in check_dirent_inode_dirent()
2088 target->bi_flags &= ~BCH_INODE_unlinked; in check_dirent_inode_dirent()
2108 struct bch_fs *c = trans->c; in check_dirent_target()
2117 if (fsck_err_on(d.v->d_type != inode_d_type(target), in check_dirent_target()
2120 bch2_d_type_str(d.v->d_type), in check_dirent_target()
2129 bkey_reassemble(&n->k_i, d.s_c); in check_dirent_target()
2130 n->v.d_type = inode_d_type(target); in check_dirent_target()
2131 if (n->v.d_type == DT_SUBVOL) { in check_dirent_target()
2132 n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol); in check_dirent_target()
2133 n->v.d_child_subvol = cpu_to_le32(target->bi_subvol); in check_dirent_target()
2135 n->v.d_inum = cpu_to_le64(target->bi_inum); in check_dirent_target()
2138 ret = bch2_trans_update(trans, iter, &n->k_i, 0); in check_dirent_target()
2159 if (k.k->type != KEY_TYPE_subvolume) in find_snapshot_subvol()
2163 if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) { in find_snapshot_subvol()
2165 *subvolid = k.k->p.offset; in find_snapshot_subvol()
2170 ret = -ENOENT; in find_snapshot_subvol()
2180 struct bch_fs *c = trans->c; in check_dirent_to_subvol()
2183 u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol); in check_dirent_to_subvol()
2184 u32 target_subvol = le32_to_cpu(d.v->d_child_subvol); in check_dirent_to_subvol()
2196 (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) { in check_dirent_to_subvol()
2197 int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol); in check_dirent_to_subvol()
2204 (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) { in check_dirent_to_subvol()
2206 * Couldn't find a subvol for dirent's snapshot - but we lost in check_dirent_to_subvol()
2209 ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0); in check_dirent_to_subvol()
2213 parent_snapshot = d.k->p.snapshot; in check_dirent_to_subvol()
2220 fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot), in check_dirent_to_subvol()
2226 bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot); in check_dirent_to_subvol()
2227 return -BCH_ERR_fsck_repair_unimplemented; in check_dirent_to_subvol()
2235 new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol); in check_dirent_to_subvol()
2250 return __remove_dirent(trans, d.k->p); in check_dirent_to_subvol()
2255 if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol, in check_dirent_to_subvol()
2266 n->v.fs_path_parent = cpu_to_le32(parent_subvol); in check_dirent_to_subvol()
2269 u64 target_inum = le64_to_cpu(s.v->inode); in check_dirent_to_subvol()
2270 u32 target_snapshot = le32_to_cpu(s.v->snapshot); in check_dirent_to_subvol()
2278 ret = -BCH_ERR_fsck_repair_unimplemented; in check_dirent_to_subvol()
2288 subvol_root.bi_snapshot = le32_to_cpu(s.v->snapshot); in check_dirent_to_subvol()
2312 struct bch_fs *c = trans->c; in check_dirent()
2323 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); in check_dirent()
2327 if (k.k->type == KEY_TYPE_whiteout) in check_dirent()
2330 if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) { in check_dirent()
2348 if (dir->first_this_inode) in check_dirent()
2349 *hash_info = bch2_hash_info_init(c, &i->inode); in check_dirent()
2350 dir->first_this_inode = false; in check_dirent()
2361 if (k.k->type != KEY_TYPE_dirent) in check_dirent()
2366 if (d.v->d_type == DT_SUBVOL) { in check_dirent()
2371 ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum)); in check_dirent()
2375 if (fsck_err_on(!target->inodes.nr, in check_dirent()
2381 ret = __remove_dirent(trans, d.k->p); in check_dirent()
2386 darray_for_each(target->inodes, i) { in check_dirent()
2387 ret = check_dirent_target(trans, iter, d, &i->inode); in check_dirent()
2392 darray_for_each(target->deletes, i) in check_dirent()
2393 if (fsck_err_on(!snapshot_list_has_id(&s->ids, *i), in check_dirent()
2403 SPOS(k.k->p.inode, k.k->p.offset, *i), in check_dirent()
2421 for_each_visible_inode(c, s, dir, d.k->p.snapshot, i) { in check_dirent()
2422 if (d.v->d_type == DT_DIR) in check_dirent()
2423 i->count++; in check_dirent()
2424 i->i_size += bkey_bytes(d.k); in check_dirent()
2466 struct bch_fs *c = trans->c; in check_xattr()
2488 if (inode->first_this_inode) in check_xattr()
2489 *hash_info = bch2_hash_info_init(c, &i->inode); in check_xattr()
2490 inode->first_this_inode = false; in check_xattr()
2522 struct bch_fs *c = trans->c; in check_root_trans()
2543 bkey_subvolume_init(&root_subvol->k_i); in check_root_trans()
2544 root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL; in check_root_trans()
2545 root_subvol->v.flags = 0; in check_root_trans()
2546 root_subvol->v.snapshot = cpu_to_le32(snapshot); in check_root_trans()
2547 root_subvol->v.inode = cpu_to_le64(inum); in check_root_trans()
2548 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0); in check_root_trans()
2598 struct bch_fs *c = trans->c; in check_subvol_path()
2604 if (k.k->type != KEY_TYPE_subvolume) in check_subvol_path()
2607 while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) { in check_subvol_path()
2608 ret = darray_push(&subvol_path, k.k->p.offset); in check_subvol_path()
2616 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) }, in check_subvol_path()
2621 u32 parent = le32_to_cpu(s.v->fs_path_parent); in check_subvol_path()
2637 if (fsck_err_on(k.k->type != KEY_TYPE_subvolume, in check_subvol_path()
2677 SPOS(0, p->inum, p->snapshot), 0); in bch2_bi_depth_renumber_one()
2681 !bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode in bch2_bi_depth_renumber_one()
2698 u32 restart_count = trans->restart_count; in bch2_bi_depth_renumber()
2704 bch_err_fn(trans->c, ret); in bch2_bi_depth_renumber()
2717 if (i->inum == inum && in path_is_dup()
2718 i->snapshot == snapshot) in path_is_dup()
2725 struct bch_fs *c = trans->c; in check_path_loop()
2729 u32 snapshot = inode_k.k->p.snapshot; in check_path_loop()
2777 !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode in check_path_loop()
2792 snapshot = inode_k.k->p.snapshot; in check_path_loop()
2800 pr_err("%llu:%u", i->inum, i->snapshot); in check_path_loop()
2833 * have been fixed by prior passes
2870 if (t->nr == t->size) { in add_nlink()
2871 size_t new_size = max_t(size_t, 128UL, t->size * 2); in add_nlink()
2872 void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL); in add_nlink()
2877 return -BCH_ERR_ENOMEM_fsck_add_nlink; in add_nlink()
2880 if (t->d) in add_nlink()
2881 memcpy(d, t->d, t->size * sizeof(t->d[0])); in add_nlink()
2882 kvfree(t->d); in add_nlink()
2884 t->d = d; in add_nlink()
2885 t->size = new_size; in add_nlink()
2889 t->d[t->nr++] = (struct nlink) { in add_nlink()
2902 return cmp_int(l->inum, r->inum); in nlink_cmp()
2906 struct nlink_table *links, in inc_link() argument
2916 link = __inline_bsearch(&key, links->d, links->nr, in inc_link()
2917 sizeof(links->d[0]), nlink_cmp); in inc_link()
2921 while (link > links->d && link[0].inum == link[-1].inum) in inc_link()
2922 --link; in inc_link()
2924 for (; link < links->d + links->nr && link->inum == inum; link++) in inc_link()
2925 if (ref_visible(c, s, snapshot, link->snapshot)) { in inc_link()
2926 link->count++; in inc_link()
2927 if (link->snapshot >= snapshot) in inc_link()
2966 ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot); in check_nlinks_find_hardlinks()
2968 *end = k.k->p.offset; in check_nlinks_find_hardlinks()
2980 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links, in check_nlinks_walk_dirents() argument
2992 ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p); in check_nlinks_walk_dirents()
2996 if (k.k->type == KEY_TYPE_dirent) { in check_nlinks_walk_dirents()
2999 if (d.v->d_type != DT_DIR && in check_nlinks_walk_dirents()
3000 d.v->d_type != DT_SUBVOL) in check_nlinks_walk_dirents()
3001 inc_link(c, &s, links, range_start, range_end, in check_nlinks_walk_dirents()
3002 le64_to_cpu(d.v->d_inum), d.k->p.snapshot); in check_nlinks_walk_dirents()
3015 struct nlink_table *links, in check_nlinks_update_inode() argument
3019 struct nlink *link = &links->d[*idx]; in check_nlinks_update_inode()
3022 if (k.k->p.offset >= range_end) in check_nlinks_update_inode()
3038 while ((cmp_int(link->inum, k.k->p.offset) ?: in check_nlinks_update_inode()
3039 cmp_int(link->snapshot, k.k->p.snapshot)) < 0) { in check_nlinks_update_inode()
3040 BUG_ON(*idx == links->nr); in check_nlinks_update_inode()
3041 link = &links->d[++*idx]; in check_nlinks_update_inode()
3044 if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, in check_nlinks_update_inode()
3048 bch2_inode_nlink_get(&u), link->count)) { in check_nlinks_update_inode()
3049 bch2_inode_nlink_set(&u, link->count); in check_nlinks_update_inode()
3058 struct nlink_table *links, in check_nlinks_update_hardlinks() argument
3068 check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end))); in check_nlinks_update_hardlinks()
3079 struct nlink_table links = { 0 }; in bch2_check_nlinks() local
3087 ret = check_nlinks_find_hardlinks(c, &links, in bch2_check_nlinks()
3091 ret = check_nlinks_walk_dirents(c, &links, in bch2_check_nlinks()
3097 ret = check_nlinks_update_hardlinks(c, &links, in bch2_check_nlinks()
3103 links.nr = 0; in bch2_check_nlinks()
3106 kvfree(links.d); in bch2_check_nlinks()
3117 if (k.k->type != KEY_TYPE_reflink_p) in fix_reflink_p_key()
3122 if (!p.v->front_pad && !p.v->back_pad) in fix_reflink_p_key()
3130 bkey_reassemble(&u->k_i, k); in fix_reflink_p_key()
3131 u->v.front_pad = 0; in fix_reflink_p_key()
3132 u->v.back_pad = 0; in fix_reflink_p_key()
3134 return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun); in fix_reflink_p_key()
3139 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) in bch2_fix_reflink_p()
3170 struct bch_fs *c = thr->c; in bch2_fsck_offline_thread_fn()
3176 ret = bch2_fs_start(thr->c); in bch2_fsck_offline_thread_fn()
3180 if (test_bit(BCH_FS_errors_fixed, &c->flags)) { in bch2_fsck_offline_thread_fn()
3181 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); in bch2_fsck_offline_thread_fn()
3184 if (test_bit(BCH_FS_error, &c->flags)) { in bch2_fsck_offline_thread_fn()
3185 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); in bch2_fsck_offline_thread_fn()
3206 return -EFAULT; in bch2_ioctl_fsck_offline()
3209 return -EINVAL; in bch2_ioctl_fsck_offline()
3212 return -EPERM; in bch2_ioctl_fsck_offline()
3216 ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); in bch2_ioctl_fsck_offline()
3234 ret = -ENOMEM; in bch2_ioctl_fsck_offline()
3238 thr->opts = bch2_opts_empty(); in bch2_ioctl_fsck_offline()
3243 bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr); in bch2_ioctl_fsck_offline()
3251 opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); in bch2_ioctl_fsck_offline()
3252 opt_set(thr->opts, read_only, 1); in bch2_ioctl_fsck_offline()
3253 opt_set(thr->opts, ratelimit_errors, 0); in bch2_ioctl_fsck_offline()
3256 opt_set(thr->opts, nostart, true); in bch2_ioctl_fsck_offline()
3258 bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); in bch2_ioctl_fsck_offline()
3260 thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); in bch2_ioctl_fsck_offline()
3262 if (!IS_ERR(thr->c) && in bch2_ioctl_fsck_offline()
3263 thr->c->opts.errors == BCH_ON_ERROR_panic) in bch2_ioctl_fsck_offline()
3264 thr->c->opts.errors = BCH_ON_ERROR_ro; in bch2_ioctl_fsck_offline()
3266 ret = __bch2_run_thread_with_stdio(&thr->thr); in bch2_ioctl_fsck_offline()
3274 bch2_fsck_thread_exit(&thr->thr); in bch2_ioctl_fsck_offline()
3282 struct bch_fs *c = thr->c; in bch2_fsck_online_thread_fn()
3284 c->stdio_filter = current; in bch2_fsck_online_thread_fn()
3285 c->stdio = &thr->thr.stdio; in bch2_fsck_online_thread_fn()
3288 * XXX: can we figure out a way to do this without mucking with c->opts? in bch2_fsck_online_thread_fn()
3290 unsigned old_fix_errors = c->opts.fix_errors; in bch2_fsck_online_thread_fn()
3291 if (opt_defined(thr->opts, fix_errors)) in bch2_fsck_online_thread_fn()
3292 c->opts.fix_errors = thr->opts.fix_errors; in bch2_fsck_online_thread_fn()
3294 c->opts.fix_errors = FSCK_FIX_ask; in bch2_fsck_online_thread_fn()
3296 c->opts.fsck = true; in bch2_fsck_online_thread_fn()
3297 set_bit(BCH_FS_fsck_running, &c->flags); in bch2_fsck_online_thread_fn()
3299 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; in bch2_fsck_online_thread_fn()
3302 clear_bit(BCH_FS_fsck_running, &c->flags); in bch2_fsck_online_thread_fn()
3305 c->stdio = NULL; in bch2_fsck_online_thread_fn()
3306 c->stdio_filter = NULL; in bch2_fsck_online_thread_fn()
3307 c->opts.fix_errors = old_fix_errors; in bch2_fsck_online_thread_fn()
3309 up(&c->online_fsck_mutex); in bch2_fsck_online_thread_fn()
3325 return -EINVAL; in bch2_ioctl_fsck_online()
3328 return -EPERM; in bch2_ioctl_fsck_online()
3331 return -EROFS; in bch2_ioctl_fsck_online()
3333 if (down_trylock(&c->online_fsck_mutex)) { in bch2_ioctl_fsck_online()
3335 return -EAGAIN; in bch2_ioctl_fsck_online()
3340 ret = -ENOMEM; in bch2_ioctl_fsck_online()
3344 thr->c = c; in bch2_ioctl_fsck_online()
3345 thr->opts = bch2_opts_empty(); in bch2_ioctl_fsck_online()
3351 bch2_parse_mount_opts(c, &thr->opts, NULL, optstr); in bch2_ioctl_fsck_online()
3359 ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); in bch2_ioctl_fsck_online()
3364 bch2_fsck_thread_exit(&thr->thr); in bch2_ioctl_fsck_online()
3365 up(&c->online_fsck_mutex); in bch2_ioctl_fsck_online()