1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "bcachefs_ioctl.h" 5 #include "bkey_buf.h" 6 #include "btree_cache.h" 7 #include "btree_update.h" 8 #include "buckets.h" 9 #include "darray.h" 10 #include "dirent.h" 11 #include "error.h" 12 #include "fs.h" 13 #include "fsck.h" 14 #include "inode.h" 15 #include "keylist.h" 16 #include "namei.h" 17 #include "recovery_passes.h" 18 #include "snapshot.h" 19 #include "super.h" 20 #include "thread_with_file.h" 21 #include "xattr.h" 22 23 #include <linux/bsearch.h> 24 #include <linux/dcache.h> /* struct qstr */ 25 26 static int dirent_points_to_inode_nowarn(struct bkey_s_c_dirent d, 27 struct bch_inode_unpacked *inode) 28 { 29 if (d.v->d_type == DT_SUBVOL 30 ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol 31 : le64_to_cpu(d.v->d_inum) == inode->bi_inum) 32 return 0; 33 return -BCH_ERR_ENOENT_dirent_doesnt_match_inode; 34 } 35 36 static void dirent_inode_mismatch_msg(struct printbuf *out, 37 struct bch_fs *c, 38 struct bkey_s_c_dirent dirent, 39 struct bch_inode_unpacked *inode) 40 { 41 prt_str(out, "inode points to dirent that does not point back:"); 42 prt_newline(out); 43 bch2_bkey_val_to_text(out, c, dirent.s_c); 44 prt_newline(out); 45 bch2_inode_unpacked_to_text(out, inode); 46 } 47 48 static int dirent_points_to_inode(struct bch_fs *c, 49 struct bkey_s_c_dirent dirent, 50 struct bch_inode_unpacked *inode) 51 { 52 int ret = dirent_points_to_inode_nowarn(dirent, inode); 53 if (ret) { 54 struct printbuf buf = PRINTBUF; 55 dirent_inode_mismatch_msg(&buf, c, dirent, inode); 56 bch_warn(c, "%s", buf.buf); 57 printbuf_exit(&buf); 58 } 59 return ret; 60 } 61 62 /* 63 * XXX: this is handling transaction restarts without returning 64 * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore: 65 */ 66 static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum, 67 u32 snapshot) 68 { 69 u64 sectors = 0; 70 71 int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, 72 SPOS(inum, 0, snapshot), 73 POS(inum, U64_MAX), 74 0, k, ({ 75 if (bkey_extent_is_allocation(k.k)) 76 sectors += k.k->size; 77 0; 78 })); 79 80 return ret ?: sectors; 81 } 82 83 static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum, 84 u32 snapshot) 85 { 86 u64 subdirs = 0; 87 88 int ret = for_each_btree_key_max(trans, iter, BTREE_ID_dirents, 89 SPOS(inum, 0, snapshot), 90 POS(inum, U64_MAX), 91 0, k, ({ 92 if (k.k->type == KEY_TYPE_dirent && 93 bkey_s_c_to_dirent(k).v->d_type == DT_DIR) 94 subdirs++; 95 0; 96 })); 97 98 return ret ?: subdirs; 99 } 100 101 static int subvol_lookup(struct btree_trans *trans, u32 subvol, 102 u32 *snapshot, u64 *inum) 103 { 104 struct bch_subvolume s; 105 int ret = bch2_subvolume_get(trans, subvol, false, &s); 106 107 *snapshot = le32_to_cpu(s.snapshot); 108 *inum = le64_to_cpu(s.inode); 109 return ret; 110 } 111 112 static int lookup_inode(struct btree_trans *trans, u64 inode_nr, u32 snapshot, 113 struct bch_inode_unpacked *inode) 114 { 115 struct btree_iter iter; 116 struct bkey_s_c k; 117 int ret; 118 119 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 120 SPOS(0, inode_nr, snapshot), 0); 121 ret = bkey_err(k); 122 if (ret) 123 goto err; 124 125 ret = bkey_is_inode(k.k) 126 ? bch2_inode_unpack(k, inode) 127 : -BCH_ERR_ENOENT_inode; 128 err: 129 bch2_trans_iter_exit(trans, &iter); 130 return ret; 131 } 132 133 static int lookup_dirent_in_snapshot(struct btree_trans *trans, 134 struct bch_hash_info hash_info, 135 subvol_inum dir, struct qstr *name, 136 u64 *target, unsigned *type, u32 snapshot) 137 { 138 struct btree_iter iter; 139 struct bkey_s_c k = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc, 140 &hash_info, dir, name, 0, snapshot); 141 int ret = bkey_err(k); 142 if (ret) 143 return ret; 144 145 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 146 *target = le64_to_cpu(d.v->d_inum); 147 *type = d.v->d_type; 148 bch2_trans_iter_exit(trans, &iter); 149 return 0; 150 } 151 152 /* 153 * Find any subvolume associated with a tree of snapshots 154 * We can't rely on master_subvol - it might have been deleted. 155 */ 156 static int find_snapshot_tree_subvol(struct btree_trans *trans, 157 u32 tree_id, u32 *subvol) 158 { 159 struct btree_iter iter; 160 struct bkey_s_c k; 161 int ret; 162 163 for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshots, POS_MIN, 0, k, ret) { 164 if (k.k->type != KEY_TYPE_snapshot) 165 continue; 166 167 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k); 168 if (le32_to_cpu(s.v->tree) != tree_id) 169 continue; 170 171 if (s.v->subvol) { 172 *subvol = le32_to_cpu(s.v->subvol); 173 goto found; 174 } 175 } 176 ret = -BCH_ERR_ENOENT_no_snapshot_tree_subvol; 177 found: 178 bch2_trans_iter_exit(trans, &iter); 179 return ret; 180 } 181 182 /* Get lost+found, create if it doesn't exist: */ 183 static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, 184 struct bch_inode_unpacked *lostfound, 185 u64 reattaching_inum) 186 { 187 struct bch_fs *c = trans->c; 188 struct qstr lostfound_str = QSTR("lost+found"); 189 struct btree_iter lostfound_iter = {}; 190 u64 inum = 0; 191 unsigned d_type = 0; 192 int ret; 193 194 struct bch_snapshot_tree st; 195 ret = bch2_snapshot_tree_lookup(trans, 196 bch2_snapshot_tree(c, snapshot), &st); 197 if (ret) 198 return ret; 199 200 u32 subvolid; 201 ret = find_snapshot_tree_subvol(trans, 202 bch2_snapshot_tree(c, snapshot), &subvolid); 203 bch_err_msg(c, ret, "finding subvol associated with snapshot tree %u", 204 bch2_snapshot_tree(c, snapshot)); 205 if (ret) 206 return ret; 207 208 struct bch_subvolume subvol; 209 ret = bch2_subvolume_get(trans, subvolid, false, &subvol); 210 bch_err_msg(c, ret, "looking up subvol %u for snapshot %u", subvolid, snapshot); 211 if (ret) 212 return ret; 213 214 if (!subvol.inode) { 215 struct btree_iter iter; 216 struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter, 217 BTREE_ID_subvolumes, POS(0, subvolid), 218 0, subvolume); 219 ret = PTR_ERR_OR_ZERO(subvol); 220 if (ret) 221 return ret; 222 223 subvol->v.inode = cpu_to_le64(reattaching_inum); 224 bch2_trans_iter_exit(trans, &iter); 225 } 226 227 subvol_inum root_inum = { 228 .subvol = subvolid, 229 .inum = le64_to_cpu(subvol.inode) 230 }; 231 232 struct bch_inode_unpacked root_inode; 233 struct bch_hash_info root_hash_info; 234 ret = lookup_inode(trans, root_inum.inum, snapshot, &root_inode); 235 bch_err_msg(c, ret, "looking up root inode %llu for subvol %u", 236 root_inum.inum, subvolid); 237 if (ret) 238 return ret; 239 240 root_hash_info = bch2_hash_info_init(c, &root_inode); 241 242 ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum, 243 &lostfound_str, &inum, &d_type, snapshot); 244 if (bch2_err_matches(ret, ENOENT)) 245 goto create_lostfound; 246 247 bch_err_fn(c, ret); 248 if (ret) 249 return ret; 250 251 if (d_type != DT_DIR) { 252 bch_err(c, "error looking up lost+found: not a directory"); 253 return -BCH_ERR_ENOENT_not_directory; 254 } 255 256 /* 257 * The bch2_check_dirents pass has already run, dangling dirents 258 * shouldn't exist here: 259 */ 260 ret = lookup_inode(trans, inum, snapshot, lostfound); 261 bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)", 262 inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot)); 263 return ret; 264 265 create_lostfound: 266 /* 267 * we always create lost+found in the root snapshot; we don't want 268 * different branches of the snapshot tree to have different lost+found 269 */ 270 snapshot = le32_to_cpu(st.root_snapshot); 271 /* 272 * XXX: we could have a nicer log message here if we had a nice way to 273 * walk backpointers to print a path 274 */ 275 struct printbuf path = PRINTBUF; 276 ret = bch2_inum_to_path(trans, root_inum, &path); 277 if (ret) 278 goto err; 279 280 bch_notice(c, "creating %s/lost+found in subvol %llu snapshot %u", 281 path.buf, root_inum.subvol, snapshot); 282 printbuf_exit(&path); 283 284 u64 now = bch2_current_time(c); 285 u64 cpu = raw_smp_processor_id(); 286 287 bch2_inode_init_early(c, lostfound); 288 bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode); 289 lostfound->bi_dir = root_inode.bi_inum; 290 lostfound->bi_snapshot = le32_to_cpu(st.root_snapshot); 291 292 root_inode.bi_nlink++; 293 294 ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu); 295 if (ret) 296 goto err; 297 298 bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot); 299 ret = bch2_btree_iter_traverse(trans, &lostfound_iter); 300 if (ret) 301 goto err; 302 303 ret = bch2_dirent_create_snapshot(trans, 304 0, root_inode.bi_inum, snapshot, &root_hash_info, 305 mode_to_type(lostfound->bi_mode), 306 &lostfound_str, 307 lostfound->bi_inum, 308 &lostfound->bi_dir_offset, 309 STR_HASH_must_create) ?: 310 bch2_inode_write_flags(trans, &lostfound_iter, lostfound, 311 BTREE_UPDATE_internal_snapshot_node); 312 err: 313 bch_err_msg(c, ret, "creating lost+found"); 314 bch2_trans_iter_exit(trans, &lostfound_iter); 315 return ret; 316 } 317 318 static inline bool inode_should_reattach(struct bch_inode_unpacked *inode) 319 { 320 if (inode->bi_inum == BCACHEFS_ROOT_INO && 321 inode->bi_subvol == BCACHEFS_ROOT_SUBVOL) 322 return false; 323 324 return !inode->bi_dir && !(inode->bi_flags & BCH_INODE_unlinked); 325 } 326 327 static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 snapshot) 328 { 329 struct btree_iter iter; 330 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_dirents, 331 SPOS(d_pos.inode, d_pos.offset, snapshot), 332 BTREE_ITER_intent| 333 BTREE_ITER_with_updates); 334 int ret = bkey_err(k); 335 if (ret) 336 return ret; 337 338 if (bpos_eq(k.k->p, d_pos)) { 339 /* 340 * delet_at() doesn't work because the update path doesn't 341 * internally use BTREE_ITER_with_updates yet 342 */ 343 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); 344 ret = PTR_ERR_OR_ZERO(k); 345 if (ret) 346 goto err; 347 348 bkey_init(&k->k); 349 k->k.type = KEY_TYPE_whiteout; 350 k->k.p = iter.pos; 351 ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node); 352 } 353 err: 354 bch2_trans_iter_exit(trans, &iter); 355 return ret; 356 } 357 358 static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode) 359 { 360 struct bch_fs *c = trans->c; 361 struct bch_inode_unpacked lostfound; 362 char name_buf[20]; 363 int ret; 364 365 u32 dirent_snapshot = inode->bi_snapshot; 366 if (inode->bi_subvol) { 367 inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL; 368 369 u64 root_inum; 370 ret = subvol_lookup(trans, inode->bi_parent_subvol, 371 &dirent_snapshot, &root_inum); 372 if (ret) 373 return ret; 374 375 snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol); 376 } else { 377 snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum); 378 } 379 380 ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum); 381 if (ret) 382 return ret; 383 384 lostfound.bi_nlink += S_ISDIR(inode->bi_mode); 385 386 /* ensure lost+found inode is also present in inode snapshot */ 387 if (!inode->bi_subvol) { 388 BUG_ON(!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, lostfound.bi_snapshot)); 389 lostfound.bi_snapshot = inode->bi_snapshot; 390 } 391 392 ret = __bch2_fsck_write_inode(trans, &lostfound); 393 if (ret) 394 return ret; 395 396 struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound); 397 struct qstr name = QSTR(name_buf); 398 399 inode->bi_dir = lostfound.bi_inum; 400 401 ret = bch2_dirent_create_snapshot(trans, 402 inode->bi_parent_subvol, lostfound.bi_inum, 403 dirent_snapshot, 404 &dir_hash, 405 inode_d_type(inode), 406 &name, 407 inode->bi_subvol ?: inode->bi_inum, 408 &inode->bi_dir_offset, 409 STR_HASH_must_create); 410 if (ret) { 411 bch_err_msg(c, ret, "error creating dirent"); 412 return ret; 413 } 414 415 ret = __bch2_fsck_write_inode(trans, inode); 416 if (ret) 417 return ret; 418 419 /* 420 * Fix up inodes in child snapshots: if they should also be reattached 421 * update the backpointer field, if they should not be we need to emit 422 * whiteouts for the dirent we just created. 423 */ 424 if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) { 425 snapshot_id_list whiteouts_done; 426 struct btree_iter iter; 427 struct bkey_s_c k; 428 429 darray_init(&whiteouts_done); 430 431 for_each_btree_key_reverse_norestart(trans, iter, 432 BTREE_ID_inodes, SPOS(0, inode->bi_inum, inode->bi_snapshot - 1), 433 BTREE_ITER_all_snapshots|BTREE_ITER_intent, k, ret) { 434 if (k.k->p.offset != inode->bi_inum) 435 break; 436 437 if (!bkey_is_inode(k.k) || 438 !bch2_snapshot_is_ancestor(c, k.k->p.snapshot, inode->bi_snapshot) || 439 snapshot_list_has_ancestor(c, &whiteouts_done, k.k->p.snapshot)) 440 continue; 441 442 struct bch_inode_unpacked child_inode; 443 ret = bch2_inode_unpack(k, &child_inode); 444 if (ret) 445 break; 446 447 if (!inode_should_reattach(&child_inode)) { 448 ret = maybe_delete_dirent(trans, 449 SPOS(lostfound.bi_inum, inode->bi_dir_offset, 450 dirent_snapshot), 451 k.k->p.snapshot); 452 if (ret) 453 break; 454 455 ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot); 456 if (ret) 457 break; 458 } else { 459 iter.snapshot = k.k->p.snapshot; 460 child_inode.bi_dir = inode->bi_dir; 461 child_inode.bi_dir_offset = inode->bi_dir_offset; 462 463 ret = bch2_inode_write_flags(trans, &iter, &child_inode, 464 BTREE_UPDATE_internal_snapshot_node); 465 if (ret) 466 break; 467 } 468 } 469 darray_exit(&whiteouts_done); 470 bch2_trans_iter_exit(trans, &iter); 471 } 472 473 return ret; 474 } 475 476 static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans, 477 struct btree_iter *iter, 478 struct bpos pos) 479 { 480 return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent); 481 } 482 483 static int remove_backpointer(struct btree_trans *trans, 484 struct bch_inode_unpacked *inode) 485 { 486 if (!inode->bi_dir) 487 return 0; 488 489 struct bch_fs *c = trans->c; 490 struct btree_iter iter; 491 struct bkey_s_c_dirent d = dirent_get_by_pos(trans, &iter, 492 SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot)); 493 int ret = bkey_err(d) ?: 494 dirent_points_to_inode(c, d, inode) ?: 495 bch2_fsck_remove_dirent(trans, d.k->p); 496 bch2_trans_iter_exit(trans, &iter); 497 return ret; 498 } 499 500 static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume s) 501 { 502 struct bch_fs *c = trans->c; 503 504 struct bch_inode_unpacked inode; 505 int ret = bch2_inode_find_by_inum_trans(trans, 506 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) }, 507 &inode); 508 if (ret) 509 return ret; 510 511 ret = remove_backpointer(trans, &inode); 512 if (!bch2_err_matches(ret, ENOENT)) 513 bch_err_msg(c, ret, "removing dirent"); 514 if (ret) 515 return ret; 516 517 ret = reattach_inode(trans, &inode); 518 bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum); 519 return ret; 520 } 521 522 static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum) 523 { 524 struct bch_fs *c = trans->c; 525 526 if (!bch2_snapshot_is_leaf(c, snapshotid)) { 527 bch_err(c, "need to reconstruct subvol, but have interior node snapshot"); 528 return -BCH_ERR_fsck_repair_unimplemented; 529 } 530 531 /* 532 * If inum isn't set, that means we're being called from check_dirents, 533 * not check_inodes - the root of this subvolume doesn't exist or we 534 * would have found it there: 535 */ 536 if (!inum) { 537 struct btree_iter inode_iter = {}; 538 struct bch_inode_unpacked new_inode; 539 u64 cpu = raw_smp_processor_id(); 540 541 bch2_inode_init_early(c, &new_inode); 542 bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL); 543 544 new_inode.bi_subvol = subvolid; 545 546 int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?: 547 bch2_btree_iter_traverse(trans, &inode_iter) ?: 548 bch2_inode_write(trans, &inode_iter, &new_inode); 549 bch2_trans_iter_exit(trans, &inode_iter); 550 if (ret) 551 return ret; 552 553 inum = new_inode.bi_inum; 554 } 555 556 bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum); 557 558 struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol)); 559 int ret = PTR_ERR_OR_ZERO(new_subvol); 560 if (ret) 561 return ret; 562 563 bkey_subvolume_init(&new_subvol->k_i); 564 new_subvol->k.p.offset = subvolid; 565 new_subvol->v.snapshot = cpu_to_le32(snapshotid); 566 new_subvol->v.inode = cpu_to_le64(inum); 567 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0); 568 if (ret) 569 return ret; 570 571 struct btree_iter iter; 572 struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter, 573 BTREE_ID_snapshots, POS(0, snapshotid), 574 0, snapshot); 575 ret = PTR_ERR_OR_ZERO(s); 576 bch_err_msg(c, ret, "getting snapshot %u", snapshotid); 577 if (ret) 578 return ret; 579 580 u32 snapshot_tree = le32_to_cpu(s->v.tree); 581 582 s->v.subvol = cpu_to_le32(subvolid); 583 SET_BCH_SNAPSHOT_SUBVOL(&s->v, true); 584 bch2_trans_iter_exit(trans, &iter); 585 586 struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter, 587 BTREE_ID_snapshot_trees, POS(0, snapshot_tree), 588 0, snapshot_tree); 589 ret = PTR_ERR_OR_ZERO(st); 590 bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree); 591 if (ret) 592 return ret; 593 594 if (!st->v.master_subvol) 595 st->v.master_subvol = cpu_to_le32(subvolid); 596 597 bch2_trans_iter_exit(trans, &iter); 598 return 0; 599 } 600 601 static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 snapshot, u64 inum) 602 { 603 struct bch_fs *c = trans->c; 604 unsigned i_mode = S_IFREG; 605 u64 i_size = 0; 606 607 switch (btree) { 608 case BTREE_ID_extents: { 609 struct btree_iter iter = {}; 610 611 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); 612 struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0)); 613 bch2_trans_iter_exit(trans, &iter); 614 int ret = bkey_err(k); 615 if (ret) 616 return ret; 617 618 i_size = k.k->p.offset << 9; 619 break; 620 } 621 case BTREE_ID_dirents: 622 i_mode = S_IFDIR; 623 break; 624 case BTREE_ID_xattrs: 625 break; 626 default: 627 BUG(); 628 } 629 630 struct bch_inode_unpacked new_inode; 631 bch2_inode_init_early(c, &new_inode); 632 bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, i_mode|0600, 0, NULL); 633 new_inode.bi_size = i_size; 634 new_inode.bi_inum = inum; 635 new_inode.bi_snapshot = snapshot; 636 637 return __bch2_fsck_write_inode(trans, &new_inode); 638 } 639 640 struct snapshots_seen { 641 struct bpos pos; 642 snapshot_id_list ids; 643 }; 644 645 static inline void snapshots_seen_exit(struct snapshots_seen *s) 646 { 647 darray_exit(&s->ids); 648 } 649 650 static inline void snapshots_seen_init(struct snapshots_seen *s) 651 { 652 memset(s, 0, sizeof(*s)); 653 } 654 655 static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id) 656 { 657 u32 *i; 658 __darray_for_each(s->ids, i) { 659 if (*i == id) 660 return 0; 661 if (*i > id) 662 break; 663 } 664 665 int ret = darray_insert_item(&s->ids, i - s->ids.data, id); 666 if (ret) 667 bch_err(c, "error reallocating snapshots_seen table (size %zu)", 668 s->ids.size); 669 return ret; 670 } 671 672 static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s, 673 enum btree_id btree_id, struct bpos pos) 674 { 675 if (!bkey_eq(s->pos, pos)) 676 s->ids.nr = 0; 677 s->pos = pos; 678 679 return snapshot_list_add_nodup(c, &s->ids, pos.snapshot); 680 } 681 682 /** 683 * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor, 684 * and @ancestor hasn't been overwritten in @seen 685 * 686 * @c: filesystem handle 687 * @seen: list of snapshot ids already seen at current position 688 * @id: descendent snapshot id 689 * @ancestor: ancestor snapshot id 690 * 691 * Returns: whether key in @ancestor snapshot is visible in @id snapshot 692 */ 693 static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen, 694 u32 id, u32 ancestor) 695 { 696 ssize_t i; 697 698 EBUG_ON(id > ancestor); 699 700 /* @ancestor should be the snapshot most recently added to @seen */ 701 EBUG_ON(ancestor != seen->pos.snapshot); 702 EBUG_ON(ancestor != darray_last(seen->ids)); 703 704 if (id == ancestor) 705 return true; 706 707 if (!bch2_snapshot_is_ancestor(c, id, ancestor)) 708 return false; 709 710 /* 711 * We know that @id is a descendant of @ancestor, we're checking if 712 * we've seen a key that overwrote @ancestor - i.e. also a descendent of 713 * @ascestor and with @id as a descendent. 714 * 715 * But we already know that we're scanning IDs between @id and @ancestor 716 * numerically, since snapshot ID lists are kept sorted, so if we find 717 * an id that's an ancestor of @id we're done: 718 */ 719 720 for (i = seen->ids.nr - 2; 721 i >= 0 && seen->ids.data[i] >= id; 722 --i) 723 if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i])) 724 return false; 725 726 return true; 727 } 728 729 /** 730 * ref_visible - given a key with snapshot id @src that points to a key with 731 * snapshot id @dst, test whether there is some snapshot in which @dst is 732 * visible. 733 * 734 * @c: filesystem handle 735 * @s: list of snapshot IDs already seen at @src 736 * @src: snapshot ID of src key 737 * @dst: snapshot ID of dst key 738 * Returns: true if there is some snapshot in which @dst is visible 739 * 740 * Assumes we're visiting @src keys in natural key order 741 */ 742 static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s, 743 u32 src, u32 dst) 744 { 745 return dst <= src 746 ? key_visible_in_snapshot(c, s, dst, src) 747 : bch2_snapshot_is_ancestor(c, src, dst); 748 } 749 750 static int ref_visible2(struct bch_fs *c, 751 u32 src, struct snapshots_seen *src_seen, 752 u32 dst, struct snapshots_seen *dst_seen) 753 { 754 if (dst > src) { 755 swap(dst, src); 756 swap(dst_seen, src_seen); 757 } 758 return key_visible_in_snapshot(c, src_seen, dst, src); 759 } 760 761 #define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \ 762 for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \ 763 (_i)->snapshot <= (_snapshot); _i++) \ 764 if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot)) 765 766 struct inode_walker_entry { 767 struct bch_inode_unpacked inode; 768 u32 snapshot; 769 u64 count; 770 u64 i_size; 771 }; 772 773 struct inode_walker { 774 bool first_this_inode; 775 bool have_inodes; 776 bool recalculate_sums; 777 struct bpos last_pos; 778 779 DARRAY(struct inode_walker_entry) inodes; 780 snapshot_id_list deletes; 781 }; 782 783 static void inode_walker_exit(struct inode_walker *w) 784 { 785 darray_exit(&w->inodes); 786 darray_exit(&w->deletes); 787 } 788 789 static struct inode_walker inode_walker_init(void) 790 { 791 return (struct inode_walker) { 0, }; 792 } 793 794 static int add_inode(struct bch_fs *c, struct inode_walker *w, 795 struct bkey_s_c inode) 796 { 797 struct bch_inode_unpacked u; 798 799 return bch2_inode_unpack(inode, &u) ?: 800 darray_push(&w->inodes, ((struct inode_walker_entry) { 801 .inode = u, 802 .snapshot = inode.k->p.snapshot, 803 })); 804 } 805 806 static int get_inodes_all_snapshots(struct btree_trans *trans, 807 struct inode_walker *w, u64 inum) 808 { 809 struct bch_fs *c = trans->c; 810 struct btree_iter iter; 811 struct bkey_s_c k; 812 int ret; 813 814 /* 815 * We no longer have inodes for w->last_pos; clear this to avoid 816 * screwing up check_i_sectors/check_subdir_count if we take a 817 * transaction restart here: 818 */ 819 w->have_inodes = false; 820 w->recalculate_sums = false; 821 w->inodes.nr = 0; 822 823 for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum), 824 BTREE_ITER_all_snapshots, k, ret) { 825 if (k.k->p.offset != inum) 826 break; 827 828 if (bkey_is_inode(k.k)) 829 add_inode(c, w, k); 830 } 831 bch2_trans_iter_exit(trans, &iter); 832 833 if (ret) 834 return ret; 835 836 w->first_this_inode = true; 837 w->have_inodes = true; 838 return 0; 839 } 840 841 static struct inode_walker_entry * 842 lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c k) 843 { 844 bool is_whiteout = k.k->type == KEY_TYPE_whiteout; 845 846 struct inode_walker_entry *i; 847 __darray_for_each(w->inodes, i) 848 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->snapshot)) 849 goto found; 850 851 return NULL; 852 found: 853 BUG_ON(k.k->p.snapshot > i->snapshot); 854 855 if (k.k->p.snapshot != i->snapshot && !is_whiteout) { 856 struct inode_walker_entry new = *i; 857 858 new.snapshot = k.k->p.snapshot; 859 new.count = 0; 860 new.i_size = 0; 861 862 struct printbuf buf = PRINTBUF; 863 bch2_bkey_val_to_text(&buf, c, k); 864 865 bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n" 866 "unexpected because we should always update the inode when we update a key in that inode\n" 867 "%s", 868 w->last_pos.inode, k.k->p.snapshot, i->snapshot, buf.buf); 869 printbuf_exit(&buf); 870 871 while (i > w->inodes.data && i[-1].snapshot > k.k->p.snapshot) 872 --i; 873 874 size_t pos = i - w->inodes.data; 875 int ret = darray_insert_item(&w->inodes, pos, new); 876 if (ret) 877 return ERR_PTR(ret); 878 879 i = w->inodes.data + pos; 880 } 881 882 return i; 883 } 884 885 static struct inode_walker_entry *walk_inode(struct btree_trans *trans, 886 struct inode_walker *w, 887 struct bkey_s_c k) 888 { 889 if (w->last_pos.inode != k.k->p.inode) { 890 int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode); 891 if (ret) 892 return ERR_PTR(ret); 893 } 894 895 w->last_pos = k.k->p; 896 897 return lookup_inode_for_snapshot(trans->c, w, k); 898 } 899 900 static int get_visible_inodes(struct btree_trans *trans, 901 struct inode_walker *w, 902 struct snapshots_seen *s, 903 u64 inum) 904 { 905 struct bch_fs *c = trans->c; 906 struct btree_iter iter; 907 struct bkey_s_c k; 908 int ret; 909 910 w->inodes.nr = 0; 911 w->deletes.nr = 0; 912 913 for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot), 914 BTREE_ITER_all_snapshots, k, ret) { 915 if (k.k->p.offset != inum) 916 break; 917 918 if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot)) 919 continue; 920 921 if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot)) 922 continue; 923 924 ret = bkey_is_inode(k.k) 925 ? add_inode(c, w, k) 926 : snapshot_list_add(c, &w->deletes, k.k->p.snapshot); 927 if (ret) 928 break; 929 } 930 bch2_trans_iter_exit(trans, &iter); 931 932 return ret; 933 } 934 935 /* 936 * Prefer to delete the first one, since that will be the one at the wrong 937 * offset: 938 * return value: 0 -> delete k1, 1 -> delete k2 939 */ 940 int bch2_fsck_update_backpointers(struct btree_trans *trans, 941 struct snapshots_seen *s, 942 const struct bch_hash_desc desc, 943 struct bch_hash_info *hash_info, 944 struct bkey_i *new) 945 { 946 if (new->k.type != KEY_TYPE_dirent) 947 return 0; 948 949 struct bkey_i_dirent *d = bkey_i_to_dirent(new); 950 struct inode_walker target = inode_walker_init(); 951 int ret = 0; 952 953 if (d->v.d_type == DT_SUBVOL) { 954 BUG(); 955 } else { 956 ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum)); 957 if (ret) 958 goto err; 959 960 darray_for_each(target.inodes, i) { 961 i->inode.bi_dir_offset = d->k.p.offset; 962 ret = __bch2_fsck_write_inode(trans, &i->inode); 963 if (ret) 964 goto err; 965 } 966 } 967 err: 968 inode_walker_exit(&target); 969 return ret; 970 } 971 972 static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans, 973 struct btree_iter *iter, 974 struct bch_inode_unpacked *inode, 975 u32 *snapshot) 976 { 977 if (inode->bi_subvol) { 978 u64 inum; 979 int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum); 980 if (ret) 981 return ((struct bkey_s_c_dirent) { .k = ERR_PTR(ret) }); 982 } 983 984 return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot)); 985 } 986 987 static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p) 988 { 989 struct btree_iter iter; 990 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0); 991 int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set; 992 bch2_trans_iter_exit(trans, &iter); 993 return ret; 994 } 995 996 static int check_inode_dirent_inode(struct btree_trans *trans, 997 struct bch_inode_unpacked *inode, 998 bool *write_inode) 999 { 1000 struct bch_fs *c = trans->c; 1001 struct printbuf buf = PRINTBUF; 1002 1003 u32 inode_snapshot = inode->bi_snapshot; 1004 struct btree_iter dirent_iter = {}; 1005 struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot); 1006 int ret = bkey_err(d); 1007 if (ret && !bch2_err_matches(ret, ENOENT)) 1008 return ret; 1009 1010 if (fsck_err_on(ret, 1011 trans, inode_points_to_missing_dirent, 1012 "inode points to missing dirent\n%s", 1013 (bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) || 1014 fsck_err_on(!ret && dirent_points_to_inode_nowarn(d, inode), 1015 trans, inode_points_to_wrong_dirent, 1016 "%s", 1017 (printbuf_reset(&buf), 1018 dirent_inode_mismatch_msg(&buf, c, d, inode), 1019 buf.buf))) { 1020 /* 1021 * We just clear the backpointer fields for now. If we find a 1022 * dirent that points to this inode in check_dirents(), we'll 1023 * update it then; then when we get to check_path() if the 1024 * backpointer is still 0 we'll reattach it. 1025 */ 1026 inode->bi_dir = 0; 1027 inode->bi_dir_offset = 0; 1028 *write_inode = true; 1029 } 1030 1031 ret = 0; 1032 fsck_err: 1033 bch2_trans_iter_exit(trans, &dirent_iter); 1034 printbuf_exit(&buf); 1035 bch_err_fn(c, ret); 1036 return ret; 1037 } 1038 1039 static int get_snapshot_root_inode(struct btree_trans *trans, 1040 struct bch_inode_unpacked *root, 1041 u64 inum) 1042 { 1043 struct btree_iter iter; 1044 struct bkey_s_c k; 1045 int ret = 0; 1046 1047 for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, 1048 SPOS(0, inum, U32_MAX), 1049 BTREE_ITER_all_snapshots, k, ret) { 1050 if (k.k->p.offset != inum) 1051 break; 1052 if (bkey_is_inode(k.k)) 1053 goto found_root; 1054 } 1055 if (ret) 1056 goto err; 1057 BUG(); 1058 found_root: 1059 ret = bch2_inode_unpack(k, root); 1060 err: 1061 bch2_trans_iter_exit(trans, &iter); 1062 return ret; 1063 } 1064 1065 static int check_inode(struct btree_trans *trans, 1066 struct btree_iter *iter, 1067 struct bkey_s_c k, 1068 struct bch_inode_unpacked *snapshot_root, 1069 struct snapshots_seen *s) 1070 { 1071 struct bch_fs *c = trans->c; 1072 struct printbuf buf = PRINTBUF; 1073 struct bch_inode_unpacked u; 1074 bool do_update = false; 1075 int ret; 1076 1077 ret = bch2_check_key_has_snapshot(trans, iter, k); 1078 if (ret < 0) 1079 goto err; 1080 if (ret) 1081 return 0; 1082 1083 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); 1084 if (ret) 1085 goto err; 1086 1087 if (!bkey_is_inode(k.k)) 1088 return 0; 1089 1090 ret = bch2_inode_unpack(k, &u); 1091 if (ret) 1092 goto err; 1093 1094 if (snapshot_root->bi_inum != u.bi_inum) { 1095 ret = get_snapshot_root_inode(trans, snapshot_root, u.bi_inum); 1096 if (ret) 1097 goto err; 1098 } 1099 1100 if (fsck_err_on(u.bi_hash_seed != snapshot_root->bi_hash_seed || 1101 INODE_STR_HASH(&u) != INODE_STR_HASH(snapshot_root), 1102 trans, inode_snapshot_mismatch, 1103 "inode hash info in different snapshots don't match")) { 1104 u.bi_hash_seed = snapshot_root->bi_hash_seed; 1105 SET_INODE_STR_HASH(&u, INODE_STR_HASH(snapshot_root)); 1106 do_update = true; 1107 } 1108 1109 if (u.bi_dir || u.bi_dir_offset) { 1110 ret = check_inode_dirent_inode(trans, &u, &do_update); 1111 if (ret) 1112 goto err; 1113 } 1114 1115 if (fsck_err_on(u.bi_dir && (u.bi_flags & BCH_INODE_unlinked), 1116 trans, inode_unlinked_but_has_dirent, 1117 "inode unlinked but has dirent\n%s", 1118 (printbuf_reset(&buf), 1119 bch2_inode_unpacked_to_text(&buf, &u), 1120 buf.buf))) { 1121 u.bi_flags &= ~BCH_INODE_unlinked; 1122 do_update = true; 1123 } 1124 1125 if (S_ISDIR(u.bi_mode) && (u.bi_flags & BCH_INODE_unlinked)) { 1126 /* Check for this early so that check_unreachable_inode() will reattach it */ 1127 1128 ret = bch2_empty_dir_snapshot(trans, k.k->p.offset, 0, k.k->p.snapshot); 1129 if (ret && ret != -BCH_ERR_ENOTEMPTY_dir_not_empty) 1130 goto err; 1131 1132 fsck_err_on(ret, trans, inode_dir_unlinked_but_not_empty, 1133 "dir unlinked but not empty\n%s", 1134 (printbuf_reset(&buf), 1135 bch2_inode_unpacked_to_text(&buf, &u), 1136 buf.buf)); 1137 u.bi_flags &= ~BCH_INODE_unlinked; 1138 do_update = true; 1139 ret = 0; 1140 } 1141 1142 ret = bch2_inode_has_child_snapshots(trans, k.k->p); 1143 if (ret < 0) 1144 goto err; 1145 1146 if (fsck_err_on(ret != !!(u.bi_flags & BCH_INODE_has_child_snapshot), 1147 trans, inode_has_child_snapshots_wrong, 1148 "inode has_child_snapshots flag wrong (should be %u)\n%s", 1149 ret, 1150 (printbuf_reset(&buf), 1151 bch2_inode_unpacked_to_text(&buf, &u), 1152 buf.buf))) { 1153 if (ret) 1154 u.bi_flags |= BCH_INODE_has_child_snapshot; 1155 else 1156 u.bi_flags &= ~BCH_INODE_has_child_snapshot; 1157 do_update = true; 1158 } 1159 ret = 0; 1160 1161 if ((u.bi_flags & BCH_INODE_unlinked) && 1162 !(u.bi_flags & BCH_INODE_has_child_snapshot)) { 1163 if (!test_bit(BCH_FS_started, &c->flags)) { 1164 /* 1165 * If we're not in online fsck, don't delete unlinked 1166 * inodes, just make sure they're on the deleted list. 1167 * 1168 * They might be referred to by a logged operation - 1169 * i.e. we might have crashed in the middle of a 1170 * truncate on an unlinked but open file - so we want to 1171 * let the delete_dead_inodes kill it after resuming 1172 * logged ops. 1173 */ 1174 ret = check_inode_deleted_list(trans, k.k->p); 1175 if (ret < 0) 1176 goto err_noprint; 1177 1178 fsck_err_on(!ret, 1179 trans, unlinked_inode_not_on_deleted_list, 1180 "inode %llu:%u unlinked, but not on deleted list", 1181 u.bi_inum, k.k->p.snapshot); 1182 1183 ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1); 1184 if (ret) 1185 goto err; 1186 } else { 1187 ret = bch2_inode_or_descendents_is_open(trans, k.k->p); 1188 if (ret < 0) 1189 goto err; 1190 1191 if (fsck_err_on(!ret, 1192 trans, inode_unlinked_and_not_open, 1193 "inode %llu:%u unlinked and not open", 1194 u.bi_inum, u.bi_snapshot)) { 1195 ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot); 1196 bch_err_msg(c, ret, "in fsck deleting inode"); 1197 goto err_noprint; 1198 } 1199 ret = 0; 1200 } 1201 } 1202 1203 if (fsck_err_on(u.bi_parent_subvol && 1204 (u.bi_subvol == 0 || 1205 u.bi_subvol == BCACHEFS_ROOT_SUBVOL), 1206 trans, inode_bi_parent_nonzero, 1207 "inode %llu:%u has subvol %u but nonzero parent subvol %u", 1208 u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) { 1209 u.bi_parent_subvol = 0; 1210 do_update = true; 1211 } 1212 1213 if (u.bi_subvol) { 1214 struct bch_subvolume s; 1215 1216 ret = bch2_subvolume_get(trans, u.bi_subvol, false, &s); 1217 if (ret && !bch2_err_matches(ret, ENOENT)) 1218 goto err; 1219 1220 if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) { 1221 ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum); 1222 goto do_update; 1223 } 1224 1225 if (fsck_err_on(ret, 1226 trans, inode_bi_subvol_missing, 1227 "inode %llu:%u bi_subvol points to missing subvolume %u", 1228 u.bi_inum, k.k->p.snapshot, u.bi_subvol) || 1229 fsck_err_on(le64_to_cpu(s.inode) != u.bi_inum || 1230 !bch2_snapshot_is_ancestor(c, le32_to_cpu(s.snapshot), 1231 k.k->p.snapshot), 1232 trans, inode_bi_subvol_wrong, 1233 "inode %llu:%u points to subvol %u, but subvol points to %llu:%u", 1234 u.bi_inum, k.k->p.snapshot, u.bi_subvol, 1235 le64_to_cpu(s.inode), 1236 le32_to_cpu(s.snapshot))) { 1237 u.bi_subvol = 0; 1238 u.bi_parent_subvol = 0; 1239 do_update = true; 1240 } 1241 } 1242 1243 if (fsck_err_on(u.bi_journal_seq > journal_cur_seq(&c->journal), 1244 trans, inode_journal_seq_in_future, 1245 "inode journal seq in future (currently at %llu)\n%s", 1246 journal_cur_seq(&c->journal), 1247 (printbuf_reset(&buf), 1248 bch2_inode_unpacked_to_text(&buf, &u), 1249 buf.buf))) { 1250 u.bi_journal_seq = journal_cur_seq(&c->journal); 1251 do_update = true; 1252 } 1253 do_update: 1254 if (do_update) { 1255 ret = __bch2_fsck_write_inode(trans, &u); 1256 bch_err_msg(c, ret, "in fsck updating inode"); 1257 if (ret) 1258 goto err_noprint; 1259 } 1260 err: 1261 fsck_err: 1262 bch_err_fn(c, ret); 1263 err_noprint: 1264 printbuf_exit(&buf); 1265 return ret; 1266 } 1267 1268 int bch2_check_inodes(struct bch_fs *c) 1269 { 1270 struct bch_inode_unpacked snapshot_root = {}; 1271 struct snapshots_seen s; 1272 1273 snapshots_seen_init(&s); 1274 1275 int ret = bch2_trans_run(c, 1276 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, 1277 POS_MIN, 1278 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, 1279 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1280 check_inode(trans, &iter, k, &snapshot_root, &s))); 1281 1282 snapshots_seen_exit(&s); 1283 bch_err_fn(c, ret); 1284 return ret; 1285 } 1286 1287 static int find_oldest_inode_needs_reattach(struct btree_trans *trans, 1288 struct bch_inode_unpacked *inode) 1289 { 1290 struct bch_fs *c = trans->c; 1291 struct btree_iter iter; 1292 struct bkey_s_c k; 1293 int ret = 0; 1294 1295 /* 1296 * We look for inodes to reattach in natural key order, leaves first, 1297 * but we should do the reattach at the oldest version that needs to be 1298 * reattached: 1299 */ 1300 for_each_btree_key_norestart(trans, iter, 1301 BTREE_ID_inodes, 1302 SPOS(0, inode->bi_inum, inode->bi_snapshot + 1), 1303 BTREE_ITER_all_snapshots, k, ret) { 1304 if (k.k->p.offset != inode->bi_inum) 1305 break; 1306 1307 if (!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, k.k->p.snapshot)) 1308 continue; 1309 1310 if (!bkey_is_inode(k.k)) 1311 break; 1312 1313 struct bch_inode_unpacked parent_inode; 1314 ret = bch2_inode_unpack(k, &parent_inode); 1315 if (ret) 1316 break; 1317 1318 if (!inode_should_reattach(&parent_inode)) 1319 break; 1320 1321 *inode = parent_inode; 1322 } 1323 bch2_trans_iter_exit(trans, &iter); 1324 1325 return ret; 1326 } 1327 1328 static int check_unreachable_inode(struct btree_trans *trans, 1329 struct btree_iter *iter, 1330 struct bkey_s_c k) 1331 { 1332 struct printbuf buf = PRINTBUF; 1333 int ret = 0; 1334 1335 if (!bkey_is_inode(k.k)) 1336 return 0; 1337 1338 struct bch_inode_unpacked inode; 1339 ret = bch2_inode_unpack(k, &inode); 1340 if (ret) 1341 return ret; 1342 1343 if (!inode_should_reattach(&inode)) 1344 return 0; 1345 1346 ret = find_oldest_inode_needs_reattach(trans, &inode); 1347 if (ret) 1348 return ret; 1349 1350 if (fsck_err(trans, inode_unreachable, 1351 "unreachable inode:\n%s", 1352 (bch2_inode_unpacked_to_text(&buf, &inode), 1353 buf.buf))) 1354 ret = reattach_inode(trans, &inode); 1355 fsck_err: 1356 printbuf_exit(&buf); 1357 return ret; 1358 } 1359 1360 /* 1361 * Reattach unreachable (but not unlinked) inodes 1362 * 1363 * Run after check_inodes() and check_dirents(), so we node that inode 1364 * backpointer fields point to valid dirents, and every inode that has a dirent 1365 * that points to it has its backpointer field set - so we're just looking for 1366 * non-unlinked inodes without backpointers: 1367 * 1368 * XXX: this is racy w.r.t. hardlink removal in online fsck 1369 */ 1370 int bch2_check_unreachable_inodes(struct bch_fs *c) 1371 { 1372 int ret = bch2_trans_run(c, 1373 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, 1374 POS_MIN, 1375 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, 1376 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1377 check_unreachable_inode(trans, &iter, k))); 1378 bch_err_fn(c, ret); 1379 return ret; 1380 } 1381 1382 static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode) 1383 { 1384 switch (btree) { 1385 case BTREE_ID_extents: 1386 return S_ISREG(mode) || S_ISLNK(mode); 1387 case BTREE_ID_dirents: 1388 return S_ISDIR(mode); 1389 case BTREE_ID_xattrs: 1390 return true; 1391 default: 1392 BUG(); 1393 } 1394 } 1395 1396 static int check_key_has_inode(struct btree_trans *trans, 1397 struct btree_iter *iter, 1398 struct inode_walker *inode, 1399 struct inode_walker_entry *i, 1400 struct bkey_s_c k) 1401 { 1402 struct bch_fs *c = trans->c; 1403 struct printbuf buf = PRINTBUF; 1404 int ret = PTR_ERR_OR_ZERO(i); 1405 if (ret) 1406 return ret; 1407 1408 if (k.k->type == KEY_TYPE_whiteout) 1409 goto out; 1410 1411 if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) { 1412 ret = reconstruct_inode(trans, iter->btree_id, k.k->p.snapshot, k.k->p.inode) ?: 1413 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 1414 if (ret) 1415 goto err; 1416 1417 inode->last_pos.inode--; 1418 ret = -BCH_ERR_transaction_restart_nested; 1419 goto err; 1420 } 1421 1422 if (fsck_err_on(!i, 1423 trans, key_in_missing_inode, 1424 "key in missing inode:\n%s", 1425 (printbuf_reset(&buf), 1426 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1427 goto delete; 1428 1429 if (fsck_err_on(i && !btree_matches_i_mode(iter->btree_id, i->inode.bi_mode), 1430 trans, key_in_wrong_inode_type, 1431 "key for wrong inode mode %o:\n%s", 1432 i->inode.bi_mode, 1433 (printbuf_reset(&buf), 1434 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1435 goto delete; 1436 out: 1437 err: 1438 fsck_err: 1439 printbuf_exit(&buf); 1440 bch_err_fn(c, ret); 1441 return ret; 1442 delete: 1443 ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node); 1444 goto out; 1445 } 1446 1447 static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_walker *w) 1448 { 1449 struct bch_fs *c = trans->c; 1450 int ret = 0; 1451 s64 count2; 1452 1453 darray_for_each(w->inodes, i) { 1454 if (i->inode.bi_sectors == i->count) 1455 continue; 1456 1457 count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot); 1458 1459 if (w->recalculate_sums) 1460 i->count = count2; 1461 1462 if (i->count != count2) { 1463 bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu", 1464 w->last_pos.inode, i->snapshot, i->count, count2); 1465 i->count = count2; 1466 } 1467 1468 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty), 1469 trans, inode_i_sectors_wrong, 1470 "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu", 1471 w->last_pos.inode, i->snapshot, 1472 i->inode.bi_sectors, i->count)) { 1473 i->inode.bi_sectors = i->count; 1474 ret = bch2_fsck_write_inode(trans, &i->inode); 1475 if (ret) 1476 break; 1477 } 1478 } 1479 fsck_err: 1480 bch_err_fn(c, ret); 1481 return ret; 1482 } 1483 1484 static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w) 1485 { 1486 u32 restart_count = trans->restart_count; 1487 return check_i_sectors_notnested(trans, w) ?: 1488 trans_was_restarted(trans, restart_count); 1489 } 1490 1491 struct extent_end { 1492 u32 snapshot; 1493 u64 offset; 1494 struct snapshots_seen seen; 1495 }; 1496 1497 struct extent_ends { 1498 struct bpos last_pos; 1499 DARRAY(struct extent_end) e; 1500 }; 1501 1502 static void extent_ends_reset(struct extent_ends *extent_ends) 1503 { 1504 darray_for_each(extent_ends->e, i) 1505 snapshots_seen_exit(&i->seen); 1506 extent_ends->e.nr = 0; 1507 } 1508 1509 static void extent_ends_exit(struct extent_ends *extent_ends) 1510 { 1511 extent_ends_reset(extent_ends); 1512 darray_exit(&extent_ends->e); 1513 } 1514 1515 static void extent_ends_init(struct extent_ends *extent_ends) 1516 { 1517 memset(extent_ends, 0, sizeof(*extent_ends)); 1518 } 1519 1520 static int extent_ends_at(struct bch_fs *c, 1521 struct extent_ends *extent_ends, 1522 struct snapshots_seen *seen, 1523 struct bkey_s_c k) 1524 { 1525 struct extent_end *i, n = (struct extent_end) { 1526 .offset = k.k->p.offset, 1527 .snapshot = k.k->p.snapshot, 1528 .seen = *seen, 1529 }; 1530 1531 n.seen.ids.data = kmemdup(seen->ids.data, 1532 sizeof(seen->ids.data[0]) * seen->ids.size, 1533 GFP_KERNEL); 1534 if (!n.seen.ids.data) 1535 return -BCH_ERR_ENOMEM_fsck_extent_ends_at; 1536 1537 __darray_for_each(extent_ends->e, i) { 1538 if (i->snapshot == k.k->p.snapshot) { 1539 snapshots_seen_exit(&i->seen); 1540 *i = n; 1541 return 0; 1542 } 1543 1544 if (i->snapshot >= k.k->p.snapshot) 1545 break; 1546 } 1547 1548 return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n); 1549 } 1550 1551 static int overlapping_extents_found(struct btree_trans *trans, 1552 enum btree_id btree, 1553 struct bpos pos1, struct snapshots_seen *pos1_seen, 1554 struct bkey pos2, 1555 bool *fixed, 1556 struct extent_end *extent_end) 1557 { 1558 struct bch_fs *c = trans->c; 1559 struct printbuf buf = PRINTBUF; 1560 struct btree_iter iter1, iter2 = {}; 1561 struct bkey_s_c k1, k2; 1562 int ret; 1563 1564 BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2))); 1565 1566 bch2_trans_iter_init(trans, &iter1, btree, pos1, 1567 BTREE_ITER_all_snapshots| 1568 BTREE_ITER_not_extents); 1569 k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX)); 1570 ret = bkey_err(k1); 1571 if (ret) 1572 goto err; 1573 1574 prt_newline(&buf); 1575 bch2_bkey_val_to_text(&buf, c, k1); 1576 1577 if (!bpos_eq(pos1, k1.k->p)) { 1578 prt_str(&buf, "\nwanted\n "); 1579 bch2_bpos_to_text(&buf, pos1); 1580 prt_str(&buf, "\n"); 1581 bch2_bkey_to_text(&buf, &pos2); 1582 1583 bch_err(c, "%s: error finding first overlapping extent when repairing, got%s", 1584 __func__, buf.buf); 1585 ret = -BCH_ERR_internal_fsck_err; 1586 goto err; 1587 } 1588 1589 bch2_trans_copy_iter(trans, &iter2, &iter1); 1590 1591 while (1) { 1592 bch2_btree_iter_advance(trans, &iter2); 1593 1594 k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX)); 1595 ret = bkey_err(k2); 1596 if (ret) 1597 goto err; 1598 1599 if (bpos_ge(k2.k->p, pos2.p)) 1600 break; 1601 } 1602 1603 prt_newline(&buf); 1604 bch2_bkey_val_to_text(&buf, c, k2); 1605 1606 if (bpos_gt(k2.k->p, pos2.p) || 1607 pos2.size != k2.k->size) { 1608 bch_err(c, "%s: error finding seconding overlapping extent when repairing%s", 1609 __func__, buf.buf); 1610 ret = -BCH_ERR_internal_fsck_err; 1611 goto err; 1612 } 1613 1614 prt_printf(&buf, "\noverwriting %s extent", 1615 pos1.snapshot >= pos2.p.snapshot ? "first" : "second"); 1616 1617 if (fsck_err(trans, extent_overlapping, 1618 "overlapping extents%s", buf.buf)) { 1619 struct btree_iter *old_iter = &iter1; 1620 struct disk_reservation res = { 0 }; 1621 1622 if (pos1.snapshot < pos2.p.snapshot) { 1623 old_iter = &iter2; 1624 swap(k1, k2); 1625 } 1626 1627 trans->extra_disk_res += bch2_bkey_sectors_compressed(k2); 1628 1629 ret = bch2_trans_update_extent_overwrite(trans, old_iter, 1630 BTREE_UPDATE_internal_snapshot_node, 1631 k1, k2) ?: 1632 bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc); 1633 bch2_disk_reservation_put(c, &res); 1634 1635 bch_info(c, "repair ret %s", bch2_err_str(ret)); 1636 1637 if (ret) 1638 goto err; 1639 1640 *fixed = true; 1641 1642 if (pos1.snapshot == pos2.p.snapshot) { 1643 /* 1644 * We overwrote the first extent, and did the overwrite 1645 * in the same snapshot: 1646 */ 1647 extent_end->offset = bkey_start_offset(&pos2); 1648 } else if (pos1.snapshot > pos2.p.snapshot) { 1649 /* 1650 * We overwrote the first extent in pos2's snapshot: 1651 */ 1652 ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot); 1653 } else { 1654 /* 1655 * We overwrote the second extent - restart 1656 * check_extent() from the top: 1657 */ 1658 ret = -BCH_ERR_transaction_restart_nested; 1659 } 1660 } 1661 fsck_err: 1662 err: 1663 bch2_trans_iter_exit(trans, &iter2); 1664 bch2_trans_iter_exit(trans, &iter1); 1665 printbuf_exit(&buf); 1666 return ret; 1667 } 1668 1669 static int check_overlapping_extents(struct btree_trans *trans, 1670 struct snapshots_seen *seen, 1671 struct extent_ends *extent_ends, 1672 struct bkey_s_c k, 1673 struct btree_iter *iter, 1674 bool *fixed) 1675 { 1676 struct bch_fs *c = trans->c; 1677 int ret = 0; 1678 1679 /* transaction restart, running again */ 1680 if (bpos_eq(extent_ends->last_pos, k.k->p)) 1681 return 0; 1682 1683 if (extent_ends->last_pos.inode != k.k->p.inode) 1684 extent_ends_reset(extent_ends); 1685 1686 darray_for_each(extent_ends->e, i) { 1687 if (i->offset <= bkey_start_offset(k.k)) 1688 continue; 1689 1690 if (!ref_visible2(c, 1691 k.k->p.snapshot, seen, 1692 i->snapshot, &i->seen)) 1693 continue; 1694 1695 ret = overlapping_extents_found(trans, iter->btree_id, 1696 SPOS(iter->pos.inode, 1697 i->offset, 1698 i->snapshot), 1699 &i->seen, 1700 *k.k, fixed, i); 1701 if (ret) 1702 goto err; 1703 } 1704 1705 extent_ends->last_pos = k.k->p; 1706 err: 1707 return ret; 1708 } 1709 1710 static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter, 1711 struct bkey_s_c k) 1712 { 1713 struct bch_fs *c = trans->c; 1714 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1715 struct bch_extent_crc_unpacked crc; 1716 const union bch_extent_entry *i; 1717 unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9; 1718 1719 bkey_for_each_crc(k.k, ptrs, crc, i) 1720 if (crc_is_encoded(crc) && 1721 crc.uncompressed_size > encoded_extent_max_sectors) { 1722 struct printbuf buf = PRINTBUF; 1723 1724 bch2_bkey_val_to_text(&buf, c, k); 1725 bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf); 1726 printbuf_exit(&buf); 1727 } 1728 1729 return 0; 1730 } 1731 1732 static int check_extent(struct btree_trans *trans, struct btree_iter *iter, 1733 struct bkey_s_c k, 1734 struct inode_walker *inode, 1735 struct snapshots_seen *s, 1736 struct extent_ends *extent_ends, 1737 struct disk_reservation *res) 1738 { 1739 struct bch_fs *c = trans->c; 1740 struct printbuf buf = PRINTBUF; 1741 int ret = 0; 1742 1743 ret = bch2_check_key_has_snapshot(trans, iter, k); 1744 if (ret) { 1745 ret = ret < 0 ? ret : 0; 1746 goto out; 1747 } 1748 1749 if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) { 1750 ret = check_i_sectors(trans, inode); 1751 if (ret) 1752 goto err; 1753 } 1754 1755 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); 1756 if (ret) 1757 goto err; 1758 1759 struct inode_walker_entry *extent_i = walk_inode(trans, inode, k); 1760 ret = PTR_ERR_OR_ZERO(extent_i); 1761 if (ret) 1762 goto err; 1763 1764 ret = check_key_has_inode(trans, iter, inode, extent_i, k); 1765 if (ret) 1766 goto err; 1767 1768 if (k.k->type != KEY_TYPE_whiteout) { 1769 ret = check_overlapping_extents(trans, s, extent_ends, k, iter, 1770 &inode->recalculate_sums); 1771 if (ret) 1772 goto err; 1773 1774 /* 1775 * Check inodes in reverse order, from oldest snapshots to 1776 * newest, starting from the inode that matches this extent's 1777 * snapshot. If we didn't have one, iterate over all inodes: 1778 */ 1779 for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); 1780 inode->inodes.data && i >= inode->inodes.data; 1781 --i) { 1782 if (i->snapshot > k.k->p.snapshot || 1783 !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot)) 1784 continue; 1785 1786 if (fsck_err_on(k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 && 1787 !bkey_extent_is_reservation(k), 1788 trans, extent_past_end_of_inode, 1789 "extent type past end of inode %llu:%u, i_size %llu\n%s", 1790 i->inode.bi_inum, i->snapshot, i->inode.bi_size, 1791 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1792 struct btree_iter iter2; 1793 1794 bch2_trans_copy_iter(trans, &iter2, iter); 1795 bch2_btree_iter_set_snapshot(trans, &iter2, i->snapshot); 1796 ret = bch2_btree_iter_traverse(trans, &iter2) ?: 1797 bch2_btree_delete_at(trans, &iter2, 1798 BTREE_UPDATE_internal_snapshot_node); 1799 bch2_trans_iter_exit(trans, &iter2); 1800 if (ret) 1801 goto err; 1802 1803 iter->k.type = KEY_TYPE_whiteout; 1804 break; 1805 } 1806 } 1807 } 1808 1809 ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc); 1810 if (ret) 1811 goto err; 1812 1813 if (bkey_extent_is_allocation(k.k)) { 1814 for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); 1815 inode->inodes.data && i >= inode->inodes.data; 1816 --i) { 1817 if (i->snapshot > k.k->p.snapshot || 1818 !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot)) 1819 continue; 1820 1821 i->count += k.k->size; 1822 } 1823 } 1824 1825 if (k.k->type != KEY_TYPE_whiteout) { 1826 ret = extent_ends_at(c, extent_ends, s, k); 1827 if (ret) 1828 goto err; 1829 } 1830 out: 1831 err: 1832 fsck_err: 1833 printbuf_exit(&buf); 1834 bch_err_fn(c, ret); 1835 return ret; 1836 } 1837 1838 /* 1839 * Walk extents: verify that extents have a corresponding S_ISREG inode, and 1840 * that i_size an i_sectors are consistent 1841 */ 1842 int bch2_check_extents(struct bch_fs *c) 1843 { 1844 struct inode_walker w = inode_walker_init(); 1845 struct snapshots_seen s; 1846 struct extent_ends extent_ends; 1847 struct disk_reservation res = { 0 }; 1848 1849 snapshots_seen_init(&s); 1850 extent_ends_init(&extent_ends); 1851 1852 int ret = bch2_trans_run(c, 1853 for_each_btree_key(trans, iter, BTREE_ID_extents, 1854 POS(BCACHEFS_ROOT_INO, 0), 1855 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ 1856 bch2_disk_reservation_put(c, &res); 1857 check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?: 1858 check_extent_overbig(trans, &iter, k); 1859 })) ?: 1860 check_i_sectors_notnested(trans, &w)); 1861 1862 bch2_disk_reservation_put(c, &res); 1863 extent_ends_exit(&extent_ends); 1864 inode_walker_exit(&w); 1865 snapshots_seen_exit(&s); 1866 1867 bch_err_fn(c, ret); 1868 return ret; 1869 } 1870 1871 int bch2_check_indirect_extents(struct bch_fs *c) 1872 { 1873 struct disk_reservation res = { 0 }; 1874 1875 int ret = bch2_trans_run(c, 1876 for_each_btree_key_commit(trans, iter, BTREE_ID_reflink, 1877 POS_MIN, 1878 BTREE_ITER_prefetch, k, 1879 &res, NULL, 1880 BCH_TRANS_COMMIT_no_enospc, ({ 1881 bch2_disk_reservation_put(c, &res); 1882 check_extent_overbig(trans, &iter, k); 1883 }))); 1884 1885 bch2_disk_reservation_put(c, &res); 1886 bch_err_fn(c, ret); 1887 return ret; 1888 } 1889 1890 static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_walker *w) 1891 { 1892 struct bch_fs *c = trans->c; 1893 int ret = 0; 1894 s64 count2; 1895 1896 darray_for_each(w->inodes, i) { 1897 if (i->inode.bi_nlink == i->count) 1898 continue; 1899 1900 count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot); 1901 if (count2 < 0) 1902 return count2; 1903 1904 if (i->count != count2) { 1905 bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu", 1906 w->last_pos.inode, i->snapshot, i->count, count2); 1907 i->count = count2; 1908 if (i->inode.bi_nlink == i->count) 1909 continue; 1910 } 1911 1912 if (fsck_err_on(i->inode.bi_nlink != i->count, 1913 trans, inode_dir_wrong_nlink, 1914 "directory %llu:%u with wrong i_nlink: got %u, should be %llu", 1915 w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) { 1916 i->inode.bi_nlink = i->count; 1917 ret = bch2_fsck_write_inode(trans, &i->inode); 1918 if (ret) 1919 break; 1920 } 1921 } 1922 fsck_err: 1923 bch_err_fn(c, ret); 1924 return ret; 1925 } 1926 1927 static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w) 1928 { 1929 u32 restart_count = trans->restart_count; 1930 return check_subdir_count_notnested(trans, w) ?: 1931 trans_was_restarted(trans, restart_count); 1932 } 1933 1934 /* find a subvolume that's a descendent of @snapshot: */ 1935 static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid) 1936 { 1937 struct btree_iter iter; 1938 struct bkey_s_c k; 1939 int ret; 1940 1941 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 0, k, ret) { 1942 if (k.k->type != KEY_TYPE_subvolume) 1943 continue; 1944 1945 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); 1946 if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) { 1947 bch2_trans_iter_exit(trans, &iter); 1948 *subvolid = k.k->p.offset; 1949 goto found; 1950 } 1951 } 1952 if (!ret) 1953 ret = -ENOENT; 1954 found: 1955 bch2_trans_iter_exit(trans, &iter); 1956 return ret; 1957 } 1958 1959 noinline_for_stack 1960 static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter, 1961 struct bkey_s_c_dirent d) 1962 { 1963 struct bch_fs *c = trans->c; 1964 struct btree_iter subvol_iter = {}; 1965 struct bch_inode_unpacked subvol_root; 1966 u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol); 1967 u32 target_subvol = le32_to_cpu(d.v->d_child_subvol); 1968 u32 parent_snapshot; 1969 u32 new_parent_subvol = 0; 1970 u64 parent_inum; 1971 struct printbuf buf = PRINTBUF; 1972 int ret = 0; 1973 1974 ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum); 1975 if (ret && !bch2_err_matches(ret, ENOENT)) 1976 return ret; 1977 1978 if (ret || 1979 (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) { 1980 int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol); 1981 if (ret2 && !bch2_err_matches(ret, ENOENT)) 1982 return ret2; 1983 } 1984 1985 if (ret && 1986 !new_parent_subvol && 1987 (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) { 1988 /* 1989 * Couldn't find a subvol for dirent's snapshot - but we lost 1990 * subvols, so we need to reconstruct: 1991 */ 1992 ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0); 1993 if (ret) 1994 return ret; 1995 1996 parent_snapshot = d.k->p.snapshot; 1997 } 1998 1999 if (fsck_err_on(ret, 2000 trans, dirent_to_missing_parent_subvol, 2001 "dirent parent_subvol points to missing subvolume\n%s", 2002 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) || 2003 fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot), 2004 trans, dirent_not_visible_in_parent_subvol, 2005 "dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s", 2006 parent_snapshot, 2007 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) { 2008 if (!new_parent_subvol) { 2009 bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot); 2010 return -BCH_ERR_fsck_repair_unimplemented; 2011 } 2012 2013 struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent); 2014 ret = PTR_ERR_OR_ZERO(new_dirent); 2015 if (ret) 2016 goto err; 2017 2018 new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol); 2019 } 2020 2021 struct bkey_s_c_subvolume s = 2022 bch2_bkey_get_iter_typed(trans, &subvol_iter, 2023 BTREE_ID_subvolumes, POS(0, target_subvol), 2024 0, subvolume); 2025 ret = bkey_err(s.s_c); 2026 if (ret && !bch2_err_matches(ret, ENOENT)) 2027 return ret; 2028 2029 if (ret) { 2030 if (fsck_err(trans, dirent_to_missing_subvol, 2031 "dirent points to missing subvolume\n%s", 2032 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) 2033 return bch2_fsck_remove_dirent(trans, d.k->p); 2034 ret = 0; 2035 goto out; 2036 } 2037 2038 if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol, 2039 trans, subvol_fs_path_parent_wrong, 2040 "subvol with wrong fs_path_parent, should be be %u\n%s", 2041 parent_subvol, 2042 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { 2043 struct bkey_i_subvolume *n = 2044 bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume); 2045 ret = PTR_ERR_OR_ZERO(n); 2046 if (ret) 2047 goto err; 2048 2049 n->v.fs_path_parent = cpu_to_le32(parent_subvol); 2050 } 2051 2052 u64 target_inum = le64_to_cpu(s.v->inode); 2053 u32 target_snapshot = le32_to_cpu(s.v->snapshot); 2054 2055 ret = lookup_inode(trans, target_inum, target_snapshot, &subvol_root); 2056 if (ret && !bch2_err_matches(ret, ENOENT)) 2057 goto err; 2058 2059 if (ret) { 2060 bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum); 2061 ret = -BCH_ERR_fsck_repair_unimplemented; 2062 goto err; 2063 } 2064 2065 if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol, 2066 trans, inode_bi_parent_wrong, 2067 "subvol root %llu has wrong bi_parent_subvol: got %u, should be %u", 2068 target_inum, 2069 subvol_root.bi_parent_subvol, parent_subvol)) { 2070 subvol_root.bi_parent_subvol = parent_subvol; 2071 subvol_root.bi_snapshot = le32_to_cpu(s.v->snapshot); 2072 ret = __bch2_fsck_write_inode(trans, &subvol_root); 2073 if (ret) 2074 goto err; 2075 } 2076 2077 ret = bch2_check_dirent_target(trans, iter, d, &subvol_root, true); 2078 if (ret) 2079 goto err; 2080 out: 2081 err: 2082 fsck_err: 2083 bch2_trans_iter_exit(trans, &subvol_iter); 2084 printbuf_exit(&buf); 2085 return ret; 2086 } 2087 2088 static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, 2089 struct bkey_s_c k, 2090 struct bch_hash_info *hash_info, 2091 struct inode_walker *dir, 2092 struct inode_walker *target, 2093 struct snapshots_seen *s) 2094 { 2095 struct bch_fs *c = trans->c; 2096 struct inode_walker_entry *i; 2097 struct printbuf buf = PRINTBUF; 2098 int ret = 0; 2099 2100 ret = bch2_check_key_has_snapshot(trans, iter, k); 2101 if (ret) { 2102 ret = ret < 0 ? ret : 0; 2103 goto out; 2104 } 2105 2106 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); 2107 if (ret) 2108 goto err; 2109 2110 if (k.k->type == KEY_TYPE_whiteout) 2111 goto out; 2112 2113 if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) { 2114 ret = check_subdir_dirents_count(trans, dir); 2115 if (ret) 2116 goto err; 2117 } 2118 2119 i = walk_inode(trans, dir, k); 2120 ret = PTR_ERR_OR_ZERO(i); 2121 if (ret < 0) 2122 goto err; 2123 2124 ret = check_key_has_inode(trans, iter, dir, i, k); 2125 if (ret) 2126 goto err; 2127 2128 if (!i) 2129 goto out; 2130 2131 if (dir->first_this_inode) 2132 *hash_info = bch2_hash_info_init(c, &i->inode); 2133 dir->first_this_inode = false; 2134 2135 ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info, iter, k); 2136 if (ret < 0) 2137 goto err; 2138 if (ret) { 2139 /* dirent has been deleted */ 2140 ret = 0; 2141 goto out; 2142 } 2143 2144 if (k.k->type != KEY_TYPE_dirent) 2145 goto out; 2146 2147 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 2148 2149 if (d.v->d_type == DT_SUBVOL) { 2150 ret = check_dirent_to_subvol(trans, iter, d); 2151 if (ret) 2152 goto err; 2153 } else { 2154 ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum)); 2155 if (ret) 2156 goto err; 2157 2158 if (fsck_err_on(!target->inodes.nr, 2159 trans, dirent_to_missing_inode, 2160 "dirent points to missing inode:\n%s", 2161 (printbuf_reset(&buf), 2162 bch2_bkey_val_to_text(&buf, c, k), 2163 buf.buf))) { 2164 ret = bch2_fsck_remove_dirent(trans, d.k->p); 2165 if (ret) 2166 goto err; 2167 } 2168 2169 darray_for_each(target->inodes, i) { 2170 ret = bch2_check_dirent_target(trans, iter, d, &i->inode, true); 2171 if (ret) 2172 goto err; 2173 } 2174 2175 darray_for_each(target->deletes, i) 2176 if (fsck_err_on(!snapshot_list_has_id(&s->ids, *i), 2177 trans, dirent_to_overwritten_inode, 2178 "dirent points to inode overwritten in snapshot %u:\n%s", 2179 *i, 2180 (printbuf_reset(&buf), 2181 bch2_bkey_val_to_text(&buf, c, k), 2182 buf.buf))) { 2183 struct btree_iter delete_iter; 2184 bch2_trans_iter_init(trans, &delete_iter, 2185 BTREE_ID_dirents, 2186 SPOS(k.k->p.inode, k.k->p.offset, *i), 2187 BTREE_ITER_intent); 2188 ret = bch2_btree_iter_traverse(trans, &delete_iter) ?: 2189 bch2_hash_delete_at(trans, bch2_dirent_hash_desc, 2190 hash_info, 2191 &delete_iter, 2192 BTREE_UPDATE_internal_snapshot_node); 2193 bch2_trans_iter_exit(trans, &delete_iter); 2194 if (ret) 2195 goto err; 2196 2197 } 2198 } 2199 2200 ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 2201 if (ret) 2202 goto err; 2203 2204 for_each_visible_inode(c, s, dir, d.k->p.snapshot, i) { 2205 if (d.v->d_type == DT_DIR) 2206 i->count++; 2207 i->i_size += bkey_bytes(d.k); 2208 } 2209 out: 2210 err: 2211 fsck_err: 2212 printbuf_exit(&buf); 2213 bch_err_fn(c, ret); 2214 return ret; 2215 } 2216 2217 /* 2218 * Walk dirents: verify that they all have a corresponding S_ISDIR inode, 2219 * validate d_type 2220 */ 2221 int bch2_check_dirents(struct bch_fs *c) 2222 { 2223 struct inode_walker dir = inode_walker_init(); 2224 struct inode_walker target = inode_walker_init(); 2225 struct snapshots_seen s; 2226 struct bch_hash_info hash_info; 2227 2228 snapshots_seen_init(&s); 2229 2230 int ret = bch2_trans_run(c, 2231 for_each_btree_key(trans, iter, BTREE_ID_dirents, 2232 POS(BCACHEFS_ROOT_INO, 0), 2233 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, 2234 check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)) ?: 2235 check_subdir_count_notnested(trans, &dir)); 2236 2237 snapshots_seen_exit(&s); 2238 inode_walker_exit(&dir); 2239 inode_walker_exit(&target); 2240 bch_err_fn(c, ret); 2241 return ret; 2242 } 2243 2244 static int check_xattr(struct btree_trans *trans, struct btree_iter *iter, 2245 struct bkey_s_c k, 2246 struct bch_hash_info *hash_info, 2247 struct inode_walker *inode) 2248 { 2249 struct bch_fs *c = trans->c; 2250 struct inode_walker_entry *i; 2251 int ret; 2252 2253 ret = bch2_check_key_has_snapshot(trans, iter, k); 2254 if (ret < 0) 2255 return ret; 2256 if (ret) 2257 return 0; 2258 2259 i = walk_inode(trans, inode, k); 2260 ret = PTR_ERR_OR_ZERO(i); 2261 if (ret) 2262 return ret; 2263 2264 ret = check_key_has_inode(trans, iter, inode, i, k); 2265 if (ret) 2266 return ret; 2267 2268 if (!i) 2269 return 0; 2270 2271 if (inode->first_this_inode) 2272 *hash_info = bch2_hash_info_init(c, &i->inode); 2273 inode->first_this_inode = false; 2274 2275 ret = bch2_str_hash_check_key(trans, NULL, &bch2_xattr_hash_desc, hash_info, iter, k); 2276 bch_err_fn(c, ret); 2277 return ret; 2278 } 2279 2280 /* 2281 * Walk xattrs: verify that they all have a corresponding inode 2282 */ 2283 int bch2_check_xattrs(struct bch_fs *c) 2284 { 2285 struct inode_walker inode = inode_walker_init(); 2286 struct bch_hash_info hash_info; 2287 int ret = 0; 2288 2289 ret = bch2_trans_run(c, 2290 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, 2291 POS(BCACHEFS_ROOT_INO, 0), 2292 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, 2293 k, 2294 NULL, NULL, 2295 BCH_TRANS_COMMIT_no_enospc, 2296 check_xattr(trans, &iter, k, &hash_info, &inode))); 2297 2298 inode_walker_exit(&inode); 2299 bch_err_fn(c, ret); 2300 return ret; 2301 } 2302 2303 static int check_root_trans(struct btree_trans *trans) 2304 { 2305 struct bch_fs *c = trans->c; 2306 struct bch_inode_unpacked root_inode; 2307 u32 snapshot; 2308 u64 inum; 2309 int ret; 2310 2311 ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum); 2312 if (ret && !bch2_err_matches(ret, ENOENT)) 2313 return ret; 2314 2315 if (mustfix_fsck_err_on(ret, trans, root_subvol_missing, 2316 "root subvol missing")) { 2317 struct bkey_i_subvolume *root_subvol = 2318 bch2_trans_kmalloc(trans, sizeof(*root_subvol)); 2319 ret = PTR_ERR_OR_ZERO(root_subvol); 2320 if (ret) 2321 goto err; 2322 2323 snapshot = U32_MAX; 2324 inum = BCACHEFS_ROOT_INO; 2325 2326 bkey_subvolume_init(&root_subvol->k_i); 2327 root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL; 2328 root_subvol->v.flags = 0; 2329 root_subvol->v.snapshot = cpu_to_le32(snapshot); 2330 root_subvol->v.inode = cpu_to_le64(inum); 2331 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0); 2332 bch_err_msg(c, ret, "writing root subvol"); 2333 if (ret) 2334 goto err; 2335 } 2336 2337 ret = lookup_inode(trans, BCACHEFS_ROOT_INO, snapshot, &root_inode); 2338 if (ret && !bch2_err_matches(ret, ENOENT)) 2339 return ret; 2340 2341 if (mustfix_fsck_err_on(ret, 2342 trans, root_dir_missing, 2343 "root directory missing") || 2344 mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), 2345 trans, root_inode_not_dir, 2346 "root inode not a directory")) { 2347 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 2348 0, NULL); 2349 root_inode.bi_inum = inum; 2350 root_inode.bi_snapshot = snapshot; 2351 2352 ret = __bch2_fsck_write_inode(trans, &root_inode); 2353 bch_err_msg(c, ret, "writing root inode"); 2354 } 2355 err: 2356 fsck_err: 2357 return ret; 2358 } 2359 2360 /* Get root directory, create if it doesn't exist: */ 2361 int bch2_check_root(struct bch_fs *c) 2362 { 2363 int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 2364 check_root_trans(trans)); 2365 bch_err_fn(c, ret); 2366 return ret; 2367 } 2368 2369 typedef DARRAY(u32) darray_u32; 2370 2371 static bool darray_u32_has(darray_u32 *d, u32 v) 2372 { 2373 darray_for_each(*d, i) 2374 if (*i == v) 2375 return true; 2376 return false; 2377 } 2378 2379 static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) 2380 { 2381 struct bch_fs *c = trans->c; 2382 struct btree_iter parent_iter = {}; 2383 darray_u32 subvol_path = {}; 2384 struct printbuf buf = PRINTBUF; 2385 int ret = 0; 2386 2387 if (k.k->type != KEY_TYPE_subvolume) 2388 return 0; 2389 2390 while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) { 2391 ret = darray_push(&subvol_path, k.k->p.offset); 2392 if (ret) 2393 goto err; 2394 2395 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); 2396 2397 struct bch_inode_unpacked subvol_root; 2398 ret = bch2_inode_find_by_inum_trans(trans, 2399 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) }, 2400 &subvol_root); 2401 if (ret) 2402 break; 2403 2404 u32 parent = le32_to_cpu(s.v->fs_path_parent); 2405 2406 if (darray_u32_has(&subvol_path, parent)) { 2407 if (fsck_err(c, subvol_loop, "subvolume loop")) 2408 ret = reattach_subvol(trans, s); 2409 break; 2410 } 2411 2412 bch2_trans_iter_exit(trans, &parent_iter); 2413 bch2_trans_iter_init(trans, &parent_iter, 2414 BTREE_ID_subvolumes, POS(0, parent), 0); 2415 k = bch2_btree_iter_peek_slot(trans, &parent_iter); 2416 ret = bkey_err(k); 2417 if (ret) 2418 goto err; 2419 2420 if (fsck_err_on(k.k->type != KEY_TYPE_subvolume, 2421 trans, subvol_unreachable, 2422 "unreachable subvolume %s", 2423 (bch2_bkey_val_to_text(&buf, c, s.s_c), 2424 buf.buf))) { 2425 ret = reattach_subvol(trans, s); 2426 break; 2427 } 2428 } 2429 fsck_err: 2430 err: 2431 printbuf_exit(&buf); 2432 darray_exit(&subvol_path); 2433 bch2_trans_iter_exit(trans, &parent_iter); 2434 return ret; 2435 } 2436 2437 int bch2_check_subvolume_structure(struct bch_fs *c) 2438 { 2439 int ret = bch2_trans_run(c, 2440 for_each_btree_key_commit(trans, iter, 2441 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k, 2442 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 2443 check_subvol_path(trans, &iter, k))); 2444 bch_err_fn(c, ret); 2445 return ret; 2446 } 2447 2448 struct pathbuf_entry { 2449 u64 inum; 2450 u32 snapshot; 2451 }; 2452 2453 typedef DARRAY(struct pathbuf_entry) pathbuf; 2454 2455 static int bch2_bi_depth_renumber_one(struct btree_trans *trans, struct pathbuf_entry *p, 2456 u32 new_depth) 2457 { 2458 struct btree_iter iter; 2459 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 2460 SPOS(0, p->inum, p->snapshot), 0); 2461 2462 struct bch_inode_unpacked inode; 2463 int ret = bkey_err(k) ?: 2464 !bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode 2465 : bch2_inode_unpack(k, &inode); 2466 if (ret) 2467 goto err; 2468 2469 if (inode.bi_depth != new_depth) { 2470 inode.bi_depth = new_depth; 2471 ret = __bch2_fsck_write_inode(trans, &inode) ?: 2472 bch2_trans_commit(trans, NULL, NULL, 0); 2473 } 2474 err: 2475 bch2_trans_iter_exit(trans, &iter); 2476 return ret; 2477 } 2478 2479 static int bch2_bi_depth_renumber(struct btree_trans *trans, pathbuf *path, u32 new_bi_depth) 2480 { 2481 u32 restart_count = trans->restart_count; 2482 int ret = 0; 2483 2484 darray_for_each_reverse(*path, i) { 2485 ret = nested_lockrestart_do(trans, 2486 bch2_bi_depth_renumber_one(trans, i, new_bi_depth)); 2487 bch_err_fn(trans->c, ret); 2488 if (ret) 2489 break; 2490 2491 new_bi_depth++; 2492 } 2493 2494 return ret ?: trans_was_restarted(trans, restart_count); 2495 } 2496 2497 static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot) 2498 { 2499 darray_for_each(*p, i) 2500 if (i->inum == inum && 2501 i->snapshot == snapshot) 2502 return true; 2503 return false; 2504 } 2505 2506 static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) 2507 { 2508 struct bch_fs *c = trans->c; 2509 struct btree_iter inode_iter = {}; 2510 pathbuf path = {}; 2511 struct printbuf buf = PRINTBUF; 2512 u32 snapshot = inode_k.k->p.snapshot; 2513 bool redo_bi_depth = false; 2514 u32 min_bi_depth = U32_MAX; 2515 int ret = 0; 2516 2517 struct bch_inode_unpacked inode; 2518 ret = bch2_inode_unpack(inode_k, &inode); 2519 if (ret) 2520 return ret; 2521 2522 while (!inode.bi_subvol) { 2523 struct btree_iter dirent_iter; 2524 struct bkey_s_c_dirent d; 2525 u32 parent_snapshot = snapshot; 2526 2527 d = inode_get_dirent(trans, &dirent_iter, &inode, &parent_snapshot); 2528 ret = bkey_err(d.s_c); 2529 if (ret && !bch2_err_matches(ret, ENOENT)) 2530 goto out; 2531 2532 if (!ret && (ret = dirent_points_to_inode(c, d, &inode))) 2533 bch2_trans_iter_exit(trans, &dirent_iter); 2534 2535 if (bch2_err_matches(ret, ENOENT)) { 2536 printbuf_reset(&buf); 2537 bch2_bkey_val_to_text(&buf, c, inode_k); 2538 bch_err(c, "unreachable inode in check_directory_structure: %s\n%s", 2539 bch2_err_str(ret), buf.buf); 2540 goto out; 2541 } 2542 2543 bch2_trans_iter_exit(trans, &dirent_iter); 2544 2545 ret = darray_push(&path, ((struct pathbuf_entry) { 2546 .inum = inode.bi_inum, 2547 .snapshot = snapshot, 2548 })); 2549 if (ret) 2550 return ret; 2551 2552 snapshot = parent_snapshot; 2553 2554 bch2_trans_iter_exit(trans, &inode_iter); 2555 inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, 2556 SPOS(0, inode.bi_dir, snapshot), 0); 2557 2558 struct bch_inode_unpacked parent_inode; 2559 ret = bkey_err(inode_k) ?: 2560 !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode 2561 : bch2_inode_unpack(inode_k, &parent_inode); 2562 if (ret) { 2563 /* Should have been caught in dirents pass */ 2564 bch_err_msg(c, ret, "error looking up parent directory"); 2565 goto out; 2566 } 2567 2568 min_bi_depth = parent_inode.bi_depth; 2569 2570 if (parent_inode.bi_depth < inode.bi_depth && 2571 min_bi_depth < U16_MAX) 2572 break; 2573 2574 inode = parent_inode; 2575 snapshot = inode_k.k->p.snapshot; 2576 redo_bi_depth = true; 2577 2578 if (path_is_dup(&path, inode.bi_inum, snapshot)) { 2579 /* XXX print path */ 2580 bch_err(c, "directory structure loop"); 2581 2582 darray_for_each(path, i) 2583 pr_err("%llu:%u", i->inum, i->snapshot); 2584 pr_err("%llu:%u", inode.bi_inum, snapshot); 2585 2586 if (fsck_err(trans, dir_loop, "directory structure loop")) { 2587 ret = remove_backpointer(trans, &inode); 2588 bch_err_msg(c, ret, "removing dirent"); 2589 if (ret) 2590 break; 2591 2592 ret = reattach_inode(trans, &inode); 2593 bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum); 2594 } 2595 2596 goto out; 2597 } 2598 } 2599 2600 if (inode.bi_subvol) 2601 min_bi_depth = 0; 2602 2603 if (redo_bi_depth) 2604 ret = bch2_bi_depth_renumber(trans, &path, min_bi_depth); 2605 out: 2606 fsck_err: 2607 bch2_trans_iter_exit(trans, &inode_iter); 2608 darray_exit(&path); 2609 printbuf_exit(&buf); 2610 bch_err_fn(c, ret); 2611 return ret; 2612 } 2613 2614 /* 2615 * Check for loops in the directory structure: all other connectivity issues 2616 * have been fixed by prior passes 2617 */ 2618 int bch2_check_directory_structure(struct bch_fs *c) 2619 { 2620 int ret = bch2_trans_run(c, 2621 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, 2622 BTREE_ITER_intent| 2623 BTREE_ITER_prefetch| 2624 BTREE_ITER_all_snapshots, k, 2625 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ 2626 if (!S_ISDIR(bkey_inode_mode(k))) 2627 continue; 2628 2629 if (bch2_inode_flags(k) & BCH_INODE_unlinked) 2630 continue; 2631 2632 check_path_loop(trans, k); 2633 }))); 2634 2635 bch_err_fn(c, ret); 2636 return ret; 2637 } 2638 2639 struct nlink_table { 2640 size_t nr; 2641 size_t size; 2642 2643 struct nlink { 2644 u64 inum; 2645 u32 snapshot; 2646 u32 count; 2647 } *d; 2648 }; 2649 2650 static int add_nlink(struct bch_fs *c, struct nlink_table *t, 2651 u64 inum, u32 snapshot) 2652 { 2653 if (t->nr == t->size) { 2654 size_t new_size = max_t(size_t, 128UL, t->size * 2); 2655 void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL); 2656 2657 if (!d) { 2658 bch_err(c, "fsck: error allocating memory for nlink_table, size %zu", 2659 new_size); 2660 return -BCH_ERR_ENOMEM_fsck_add_nlink; 2661 } 2662 2663 if (t->d) 2664 memcpy(d, t->d, t->size * sizeof(t->d[0])); 2665 kvfree(t->d); 2666 2667 t->d = d; 2668 t->size = new_size; 2669 } 2670 2671 2672 t->d[t->nr++] = (struct nlink) { 2673 .inum = inum, 2674 .snapshot = snapshot, 2675 }; 2676 2677 return 0; 2678 } 2679 2680 static int nlink_cmp(const void *_l, const void *_r) 2681 { 2682 const struct nlink *l = _l; 2683 const struct nlink *r = _r; 2684 2685 return cmp_int(l->inum, r->inum); 2686 } 2687 2688 static void inc_link(struct bch_fs *c, struct snapshots_seen *s, 2689 struct nlink_table *links, 2690 u64 range_start, u64 range_end, u64 inum, u32 snapshot) 2691 { 2692 struct nlink *link, key = { 2693 .inum = inum, .snapshot = U32_MAX, 2694 }; 2695 2696 if (inum < range_start || inum >= range_end) 2697 return; 2698 2699 link = __inline_bsearch(&key, links->d, links->nr, 2700 sizeof(links->d[0]), nlink_cmp); 2701 if (!link) 2702 return; 2703 2704 while (link > links->d && link[0].inum == link[-1].inum) 2705 --link; 2706 2707 for (; link < links->d + links->nr && link->inum == inum; link++) 2708 if (ref_visible(c, s, snapshot, link->snapshot)) { 2709 link->count++; 2710 if (link->snapshot >= snapshot) 2711 break; 2712 } 2713 } 2714 2715 noinline_for_stack 2716 static int check_nlinks_find_hardlinks(struct bch_fs *c, 2717 struct nlink_table *t, 2718 u64 start, u64 *end) 2719 { 2720 int ret = bch2_trans_run(c, 2721 for_each_btree_key(trans, iter, BTREE_ID_inodes, 2722 POS(0, start), 2723 BTREE_ITER_intent| 2724 BTREE_ITER_prefetch| 2725 BTREE_ITER_all_snapshots, k, ({ 2726 if (!bkey_is_inode(k.k)) 2727 continue; 2728 2729 /* Should never fail, checked by bch2_inode_invalid: */ 2730 struct bch_inode_unpacked u; 2731 _ret3 = bch2_inode_unpack(k, &u); 2732 if (_ret3) 2733 break; 2734 2735 /* 2736 * Backpointer and directory structure checks are sufficient for 2737 * directories, since they can't have hardlinks: 2738 */ 2739 if (S_ISDIR(u.bi_mode)) 2740 continue; 2741 2742 /* 2743 * Previous passes ensured that bi_nlink is nonzero if 2744 * it had multiple hardlinks: 2745 */ 2746 if (!u.bi_nlink) 2747 continue; 2748 2749 ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot); 2750 if (ret) { 2751 *end = k.k->p.offset; 2752 ret = 0; 2753 break; 2754 } 2755 0; 2756 }))); 2757 2758 bch_err_fn(c, ret); 2759 return ret; 2760 } 2761 2762 noinline_for_stack 2763 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links, 2764 u64 range_start, u64 range_end) 2765 { 2766 struct snapshots_seen s; 2767 2768 snapshots_seen_init(&s); 2769 2770 int ret = bch2_trans_run(c, 2771 for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN, 2772 BTREE_ITER_intent| 2773 BTREE_ITER_prefetch| 2774 BTREE_ITER_all_snapshots, k, ({ 2775 ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p); 2776 if (ret) 2777 break; 2778 2779 if (k.k->type == KEY_TYPE_dirent) { 2780 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 2781 2782 if (d.v->d_type != DT_DIR && 2783 d.v->d_type != DT_SUBVOL) 2784 inc_link(c, &s, links, range_start, range_end, 2785 le64_to_cpu(d.v->d_inum), d.k->p.snapshot); 2786 } 2787 0; 2788 }))); 2789 2790 snapshots_seen_exit(&s); 2791 2792 bch_err_fn(c, ret); 2793 return ret; 2794 } 2795 2796 static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter, 2797 struct bkey_s_c k, 2798 struct nlink_table *links, 2799 size_t *idx, u64 range_end) 2800 { 2801 struct bch_inode_unpacked u; 2802 struct nlink *link = &links->d[*idx]; 2803 int ret = 0; 2804 2805 if (k.k->p.offset >= range_end) 2806 return 1; 2807 2808 if (!bkey_is_inode(k.k)) 2809 return 0; 2810 2811 ret = bch2_inode_unpack(k, &u); 2812 if (ret) 2813 return ret; 2814 2815 if (S_ISDIR(u.bi_mode)) 2816 return 0; 2817 2818 if (!u.bi_nlink) 2819 return 0; 2820 2821 while ((cmp_int(link->inum, k.k->p.offset) ?: 2822 cmp_int(link->snapshot, k.k->p.snapshot)) < 0) { 2823 BUG_ON(*idx == links->nr); 2824 link = &links->d[++*idx]; 2825 } 2826 2827 if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, 2828 trans, inode_wrong_nlink, 2829 "inode %llu type %s has wrong i_nlink (%u, should be %u)", 2830 u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)], 2831 bch2_inode_nlink_get(&u), link->count)) { 2832 bch2_inode_nlink_set(&u, link->count); 2833 ret = __bch2_fsck_write_inode(trans, &u); 2834 } 2835 fsck_err: 2836 return ret; 2837 } 2838 2839 noinline_for_stack 2840 static int check_nlinks_update_hardlinks(struct bch_fs *c, 2841 struct nlink_table *links, 2842 u64 range_start, u64 range_end) 2843 { 2844 size_t idx = 0; 2845 2846 int ret = bch2_trans_run(c, 2847 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, 2848 POS(0, range_start), 2849 BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, 2850 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 2851 check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end))); 2852 if (ret < 0) { 2853 bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret)); 2854 return ret; 2855 } 2856 2857 return 0; 2858 } 2859 2860 int bch2_check_nlinks(struct bch_fs *c) 2861 { 2862 struct nlink_table links = { 0 }; 2863 u64 this_iter_range_start, next_iter_range_start = 0; 2864 int ret = 0; 2865 2866 do { 2867 this_iter_range_start = next_iter_range_start; 2868 next_iter_range_start = U64_MAX; 2869 2870 ret = check_nlinks_find_hardlinks(c, &links, 2871 this_iter_range_start, 2872 &next_iter_range_start); 2873 2874 ret = check_nlinks_walk_dirents(c, &links, 2875 this_iter_range_start, 2876 next_iter_range_start); 2877 if (ret) 2878 break; 2879 2880 ret = check_nlinks_update_hardlinks(c, &links, 2881 this_iter_range_start, 2882 next_iter_range_start); 2883 if (ret) 2884 break; 2885 2886 links.nr = 0; 2887 } while (next_iter_range_start != U64_MAX); 2888 2889 kvfree(links.d); 2890 bch_err_fn(c, ret); 2891 return ret; 2892 } 2893 2894 static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter, 2895 struct bkey_s_c k) 2896 { 2897 struct bkey_s_c_reflink_p p; 2898 struct bkey_i_reflink_p *u; 2899 2900 if (k.k->type != KEY_TYPE_reflink_p) 2901 return 0; 2902 2903 p = bkey_s_c_to_reflink_p(k); 2904 2905 if (!p.v->front_pad && !p.v->back_pad) 2906 return 0; 2907 2908 u = bch2_trans_kmalloc(trans, sizeof(*u)); 2909 int ret = PTR_ERR_OR_ZERO(u); 2910 if (ret) 2911 return ret; 2912 2913 bkey_reassemble(&u->k_i, k); 2914 u->v.front_pad = 0; 2915 u->v.back_pad = 0; 2916 2917 return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun); 2918 } 2919 2920 int bch2_fix_reflink_p(struct bch_fs *c) 2921 { 2922 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) 2923 return 0; 2924 2925 int ret = bch2_trans_run(c, 2926 for_each_btree_key_commit(trans, iter, 2927 BTREE_ID_extents, POS_MIN, 2928 BTREE_ITER_intent|BTREE_ITER_prefetch| 2929 BTREE_ITER_all_snapshots, k, 2930 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 2931 fix_reflink_p_key(trans, &iter, k))); 2932 bch_err_fn(c, ret); 2933 return ret; 2934 } 2935 2936 #ifndef NO_BCACHEFS_CHARDEV 2937 2938 struct fsck_thread { 2939 struct thread_with_stdio thr; 2940 struct bch_fs *c; 2941 struct bch_opts opts; 2942 }; 2943 2944 static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr) 2945 { 2946 struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr); 2947 kfree(thr); 2948 } 2949 2950 static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio) 2951 { 2952 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); 2953 struct bch_fs *c = thr->c; 2954 2955 int ret = PTR_ERR_OR_ZERO(c); 2956 if (ret) 2957 return ret; 2958 2959 ret = bch2_fs_start(thr->c); 2960 if (ret) 2961 goto err; 2962 2963 if (test_bit(BCH_FS_errors_fixed, &c->flags)) { 2964 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); 2965 ret |= 1; 2966 } 2967 if (test_bit(BCH_FS_error, &c->flags)) { 2968 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); 2969 ret |= 4; 2970 } 2971 err: 2972 bch2_fs_stop(c); 2973 return ret; 2974 } 2975 2976 static const struct thread_with_stdio_ops bch2_offline_fsck_ops = { 2977 .exit = bch2_fsck_thread_exit, 2978 .fn = bch2_fsck_offline_thread_fn, 2979 }; 2980 2981 long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) 2982 { 2983 struct bch_ioctl_fsck_offline arg; 2984 struct fsck_thread *thr = NULL; 2985 darray_str(devs) = {}; 2986 long ret = 0; 2987 2988 if (copy_from_user(&arg, user_arg, sizeof(arg))) 2989 return -EFAULT; 2990 2991 if (arg.flags) 2992 return -EINVAL; 2993 2994 if (!capable(CAP_SYS_ADMIN)) 2995 return -EPERM; 2996 2997 for (size_t i = 0; i < arg.nr_devs; i++) { 2998 u64 dev_u64; 2999 ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); 3000 if (ret) 3001 goto err; 3002 3003 char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX); 3004 ret = PTR_ERR_OR_ZERO(dev_str); 3005 if (ret) 3006 goto err; 3007 3008 ret = darray_push(&devs, dev_str); 3009 if (ret) { 3010 kfree(dev_str); 3011 goto err; 3012 } 3013 } 3014 3015 thr = kzalloc(sizeof(*thr), GFP_KERNEL); 3016 if (!thr) { 3017 ret = -ENOMEM; 3018 goto err; 3019 } 3020 3021 thr->opts = bch2_opts_empty(); 3022 3023 if (arg.opts) { 3024 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); 3025 ret = PTR_ERR_OR_ZERO(optstr) ?: 3026 bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr, false); 3027 if (!IS_ERR(optstr)) 3028 kfree(optstr); 3029 3030 if (ret) 3031 goto err; 3032 } 3033 3034 opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); 3035 opt_set(thr->opts, read_only, 1); 3036 opt_set(thr->opts, ratelimit_errors, 0); 3037 3038 /* We need request_key() to be called before we punt to kthread: */ 3039 opt_set(thr->opts, nostart, true); 3040 3041 bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); 3042 3043 thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); 3044 3045 if (!IS_ERR(thr->c) && 3046 thr->c->opts.errors == BCH_ON_ERROR_panic) 3047 thr->c->opts.errors = BCH_ON_ERROR_ro; 3048 3049 ret = __bch2_run_thread_with_stdio(&thr->thr); 3050 out: 3051 darray_for_each(devs, i) 3052 kfree(*i); 3053 darray_exit(&devs); 3054 return ret; 3055 err: 3056 if (thr) 3057 bch2_fsck_thread_exit(&thr->thr); 3058 pr_err("ret %s", bch2_err_str(ret)); 3059 goto out; 3060 } 3061 3062 static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) 3063 { 3064 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); 3065 struct bch_fs *c = thr->c; 3066 3067 c->stdio_filter = current; 3068 c->stdio = &thr->thr.stdio; 3069 3070 /* 3071 * XXX: can we figure out a way to do this without mucking with c->opts? 3072 */ 3073 unsigned old_fix_errors = c->opts.fix_errors; 3074 if (opt_defined(thr->opts, fix_errors)) 3075 c->opts.fix_errors = thr->opts.fix_errors; 3076 else 3077 c->opts.fix_errors = FSCK_FIX_ask; 3078 3079 c->opts.fsck = true; 3080 set_bit(BCH_FS_fsck_running, &c->flags); 3081 3082 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 3083 int ret = bch2_run_online_recovery_passes(c); 3084 3085 clear_bit(BCH_FS_fsck_running, &c->flags); 3086 bch_err_fn(c, ret); 3087 3088 c->stdio = NULL; 3089 c->stdio_filter = NULL; 3090 c->opts.fix_errors = old_fix_errors; 3091 3092 up(&c->online_fsck_mutex); 3093 bch2_ro_ref_put(c); 3094 return ret; 3095 } 3096 3097 static const struct thread_with_stdio_ops bch2_online_fsck_ops = { 3098 .exit = bch2_fsck_thread_exit, 3099 .fn = bch2_fsck_online_thread_fn, 3100 }; 3101 3102 long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg) 3103 { 3104 struct fsck_thread *thr = NULL; 3105 long ret = 0; 3106 3107 if (arg.flags) 3108 return -EINVAL; 3109 3110 if (!capable(CAP_SYS_ADMIN)) 3111 return -EPERM; 3112 3113 if (!bch2_ro_ref_tryget(c)) 3114 return -EROFS; 3115 3116 if (down_trylock(&c->online_fsck_mutex)) { 3117 bch2_ro_ref_put(c); 3118 return -EAGAIN; 3119 } 3120 3121 thr = kzalloc(sizeof(*thr), GFP_KERNEL); 3122 if (!thr) { 3123 ret = -ENOMEM; 3124 goto err; 3125 } 3126 3127 thr->c = c; 3128 thr->opts = bch2_opts_empty(); 3129 3130 if (arg.opts) { 3131 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); 3132 3133 ret = PTR_ERR_OR_ZERO(optstr) ?: 3134 bch2_parse_mount_opts(c, &thr->opts, NULL, optstr, false); 3135 if (!IS_ERR(optstr)) 3136 kfree(optstr); 3137 3138 if (ret) 3139 goto err; 3140 } 3141 3142 ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); 3143 err: 3144 if (ret < 0) { 3145 bch_err_fn(c, ret); 3146 if (thr) 3147 bch2_fsck_thread_exit(&thr->thr); 3148 up(&c->online_fsck_mutex); 3149 bch2_ro_ref_put(c); 3150 } 3151 return ret; 3152 } 3153 3154 #endif /* NO_BCACHEFS_CHARDEV */ 3155