1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "backpointers.h" 5 #include "bkey_buf.h" 6 #include "alloc_background.h" 7 #include "btree_gc.h" 8 #include "btree_journal_iter.h" 9 #include "btree_update.h" 10 #include "btree_update_interior.h" 11 #include "btree_io.h" 12 #include "buckets.h" 13 #include "dirent.h" 14 #include "ec.h" 15 #include "errcode.h" 16 #include "error.h" 17 #include "fs-common.h" 18 #include "fsck.h" 19 #include "journal_io.h" 20 #include "journal_reclaim.h" 21 #include "journal_seq_blacklist.h" 22 #include "lru.h" 23 #include "logged_ops.h" 24 #include "move.h" 25 #include "quota.h" 26 #include "rebalance.h" 27 #include "recovery.h" 28 #include "replicas.h" 29 #include "sb-clean.h" 30 #include "snapshot.h" 31 #include "subvolume.h" 32 #include "super-io.h" 33 34 #include <linux/sort.h> 35 #include <linux/stat.h> 36 37 #define QSTR(n) { { { .len = strlen(n) } }, .name = n } 38 39 static bool btree_id_is_alloc(enum btree_id id) 40 { 41 switch (id) { 42 case BTREE_ID_alloc: 43 case BTREE_ID_backpointers: 44 case BTREE_ID_need_discard: 45 case BTREE_ID_freespace: 46 case BTREE_ID_bucket_gens: 47 return true; 48 default: 49 return false; 50 } 51 } 52 53 /* for -o reconstruct_alloc: */ 54 static void drop_alloc_keys(struct journal_keys *keys) 55 { 56 size_t src, dst; 57 58 for (src = 0, dst = 0; src < keys->nr; src++) 59 if (!btree_id_is_alloc(keys->d[src].btree_id)) 60 keys->d[dst++] = keys->d[src]; 61 62 keys->nr = dst; 63 } 64 65 /* 66 * Btree node pointers have a field to stack a pointer to the in memory btree 67 * node; we need to zero out this field when reading in btree nodes, or when 68 * reading in keys from the journal: 69 */ 70 static void zero_out_btree_mem_ptr(struct journal_keys *keys) 71 { 72 struct journal_key *i; 73 74 for (i = keys->d; i < keys->d + keys->nr; i++) 75 if (i->k->k.type == KEY_TYPE_btree_ptr_v2) 76 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0; 77 } 78 79 /* journal replay: */ 80 81 static void replay_now_at(struct journal *j, u64 seq) 82 { 83 BUG_ON(seq < j->replay_journal_seq); 84 85 seq = min(seq, j->replay_journal_seq_end); 86 87 while (j->replay_journal_seq < seq) 88 bch2_journal_pin_put(j, j->replay_journal_seq++); 89 } 90 91 static int bch2_journal_replay_key(struct btree_trans *trans, 92 struct journal_key *k) 93 { 94 struct btree_iter iter; 95 unsigned iter_flags = 96 BTREE_ITER_INTENT| 97 BTREE_ITER_NOT_EXTENTS; 98 unsigned update_flags = BTREE_TRIGGER_NORUN; 99 int ret; 100 101 /* 102 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to 103 * keep the key cache coherent with the underlying btree. Nothing 104 * besides the allocator is doing updates yet so we don't need key cache 105 * coherency for non-alloc btrees, and key cache fills for snapshots 106 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until 107 * the snapshots recovery pass runs. 108 */ 109 if (!k->level && k->btree_id == BTREE_ID_alloc) 110 iter_flags |= BTREE_ITER_CACHED; 111 else 112 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM; 113 114 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, 115 BTREE_MAX_DEPTH, k->level, 116 iter_flags); 117 ret = bch2_btree_iter_traverse(&iter); 118 if (ret) 119 goto out; 120 121 /* Must be checked with btree locked: */ 122 if (k->overwritten) 123 goto out; 124 125 ret = bch2_trans_update(trans, &iter, k->k, update_flags); 126 out: 127 bch2_trans_iter_exit(trans, &iter); 128 return ret; 129 } 130 131 static int journal_sort_seq_cmp(const void *_l, const void *_r) 132 { 133 const struct journal_key *l = *((const struct journal_key **)_l); 134 const struct journal_key *r = *((const struct journal_key **)_r); 135 136 return cmp_int(l->journal_seq, r->journal_seq); 137 } 138 139 static int bch2_journal_replay(struct bch_fs *c) 140 { 141 struct journal_keys *keys = &c->journal_keys; 142 struct journal_key **keys_sorted, *k; 143 struct journal *j = &c->journal; 144 u64 start_seq = c->journal_replay_seq_start; 145 u64 end_seq = c->journal_replay_seq_start; 146 size_t i; 147 int ret; 148 149 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr); 150 keys->gap = keys->nr; 151 152 keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL); 153 if (!keys_sorted) 154 return -BCH_ERR_ENOMEM_journal_replay; 155 156 for (i = 0; i < keys->nr; i++) 157 keys_sorted[i] = &keys->d[i]; 158 159 sort(keys_sorted, keys->nr, 160 sizeof(keys_sorted[0]), 161 journal_sort_seq_cmp, NULL); 162 163 if (keys->nr) { 164 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)", 165 keys->nr, start_seq, end_seq); 166 if (ret) 167 goto err; 168 } 169 170 BUG_ON(!atomic_read(&keys->ref)); 171 172 for (i = 0; i < keys->nr; i++) { 173 k = keys_sorted[i]; 174 175 cond_resched(); 176 177 replay_now_at(j, k->journal_seq); 178 179 ret = bch2_trans_do(c, NULL, NULL, 180 BTREE_INSERT_LAZY_RW| 181 BTREE_INSERT_NOFAIL| 182 (!k->allocated 183 ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim 184 : 0), 185 bch2_journal_replay_key(trans, k)); 186 if (ret) { 187 bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s", 188 bch2_btree_id_str(k->btree_id), k->level, bch2_err_str(ret)); 189 goto err; 190 } 191 } 192 193 if (!c->opts.keep_journal) 194 bch2_journal_keys_put_initial(c); 195 196 replay_now_at(j, j->replay_journal_seq_end); 197 j->replay_journal_seq = 0; 198 199 bch2_journal_set_replay_done(j); 200 bch2_journal_flush_all_pins(j); 201 ret = bch2_journal_error(j); 202 203 if (keys->nr && !ret) 204 bch2_journal_log_msg(c, "journal replay finished"); 205 err: 206 kvfree(keys_sorted); 207 208 if (ret) 209 bch_err_fn(c, ret); 210 return ret; 211 } 212 213 /* journal replay early: */ 214 215 static int journal_replay_entry_early(struct bch_fs *c, 216 struct jset_entry *entry) 217 { 218 int ret = 0; 219 220 switch (entry->type) { 221 case BCH_JSET_ENTRY_btree_root: { 222 struct btree_root *r; 223 224 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) { 225 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL }); 226 if (ret) 227 return ret; 228 } 229 230 r = bch2_btree_id_root(c, entry->btree_id); 231 232 if (entry->u64s) { 233 r->level = entry->level; 234 bkey_copy(&r->key, (struct bkey_i *) entry->start); 235 r->error = 0; 236 } else { 237 r->error = -EIO; 238 } 239 r->alive = true; 240 break; 241 } 242 case BCH_JSET_ENTRY_usage: { 243 struct jset_entry_usage *u = 244 container_of(entry, struct jset_entry_usage, entry); 245 246 switch (entry->btree_id) { 247 case BCH_FS_USAGE_reserved: 248 if (entry->level < BCH_REPLICAS_MAX) 249 c->usage_base->persistent_reserved[entry->level] = 250 le64_to_cpu(u->v); 251 break; 252 case BCH_FS_USAGE_inodes: 253 c->usage_base->nr_inodes = le64_to_cpu(u->v); 254 break; 255 case BCH_FS_USAGE_key_version: 256 atomic64_set(&c->key_version, 257 le64_to_cpu(u->v)); 258 break; 259 } 260 261 break; 262 } 263 case BCH_JSET_ENTRY_data_usage: { 264 struct jset_entry_data_usage *u = 265 container_of(entry, struct jset_entry_data_usage, entry); 266 267 ret = bch2_replicas_set_usage(c, &u->r, 268 le64_to_cpu(u->v)); 269 break; 270 } 271 case BCH_JSET_ENTRY_dev_usage: { 272 struct jset_entry_dev_usage *u = 273 container_of(entry, struct jset_entry_dev_usage, entry); 274 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev)); 275 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); 276 277 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec); 278 279 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) { 280 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets); 281 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors); 282 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented); 283 } 284 285 break; 286 } 287 case BCH_JSET_ENTRY_blacklist: { 288 struct jset_entry_blacklist *bl_entry = 289 container_of(entry, struct jset_entry_blacklist, entry); 290 291 ret = bch2_journal_seq_blacklist_add(c, 292 le64_to_cpu(bl_entry->seq), 293 le64_to_cpu(bl_entry->seq) + 1); 294 break; 295 } 296 case BCH_JSET_ENTRY_blacklist_v2: { 297 struct jset_entry_blacklist_v2 *bl_entry = 298 container_of(entry, struct jset_entry_blacklist_v2, entry); 299 300 ret = bch2_journal_seq_blacklist_add(c, 301 le64_to_cpu(bl_entry->start), 302 le64_to_cpu(bl_entry->end) + 1); 303 break; 304 } 305 case BCH_JSET_ENTRY_clock: { 306 struct jset_entry_clock *clock = 307 container_of(entry, struct jset_entry_clock, entry); 308 309 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); 310 } 311 } 312 313 return ret; 314 } 315 316 static int journal_replay_early(struct bch_fs *c, 317 struct bch_sb_field_clean *clean) 318 { 319 struct jset_entry *entry; 320 int ret; 321 322 if (clean) { 323 for (entry = clean->start; 324 entry != vstruct_end(&clean->field); 325 entry = vstruct_next(entry)) { 326 ret = journal_replay_entry_early(c, entry); 327 if (ret) 328 return ret; 329 } 330 } else { 331 struct genradix_iter iter; 332 struct journal_replay *i, **_i; 333 334 genradix_for_each(&c->journal_entries, iter, _i) { 335 i = *_i; 336 337 if (!i || i->ignore) 338 continue; 339 340 vstruct_for_each(&i->j, entry) { 341 ret = journal_replay_entry_early(c, entry); 342 if (ret) 343 return ret; 344 } 345 } 346 } 347 348 bch2_fs_usage_initialize(c); 349 350 return 0; 351 } 352 353 /* sb clean section: */ 354 355 static int read_btree_roots(struct bch_fs *c) 356 { 357 unsigned i; 358 int ret = 0; 359 360 for (i = 0; i < btree_id_nr_alive(c); i++) { 361 struct btree_root *r = bch2_btree_id_root(c, i); 362 363 if (!r->alive) 364 continue; 365 366 if (btree_id_is_alloc(i) && 367 c->opts.reconstruct_alloc) { 368 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 369 continue; 370 } 371 372 if (r->error) { 373 __fsck_err(c, 374 btree_id_is_alloc(i) 375 ? FSCK_CAN_IGNORE : 0, 376 btree_root_bkey_invalid, 377 "invalid btree root %s", 378 bch2_btree_id_str(i)); 379 if (i == BTREE_ID_alloc) 380 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 381 } 382 383 ret = bch2_btree_root_read(c, i, &r->key, r->level); 384 if (ret) { 385 fsck_err(c, 386 btree_root_read_error, 387 "error reading btree root %s", 388 bch2_btree_id_str(i)); 389 if (btree_id_is_alloc(i)) 390 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 391 ret = 0; 392 } 393 } 394 395 for (i = 0; i < BTREE_ID_NR; i++) { 396 struct btree_root *r = bch2_btree_id_root(c, i); 397 398 if (!r->b) { 399 r->alive = false; 400 r->level = 0; 401 bch2_btree_root_alloc(c, i); 402 } 403 } 404 fsck_err: 405 return ret; 406 } 407 408 static int bch2_initialize_subvolumes(struct bch_fs *c) 409 { 410 struct bkey_i_snapshot_tree root_tree; 411 struct bkey_i_snapshot root_snapshot; 412 struct bkey_i_subvolume root_volume; 413 int ret; 414 415 bkey_snapshot_tree_init(&root_tree.k_i); 416 root_tree.k.p.offset = 1; 417 root_tree.v.master_subvol = cpu_to_le32(1); 418 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX); 419 420 bkey_snapshot_init(&root_snapshot.k_i); 421 root_snapshot.k.p.offset = U32_MAX; 422 root_snapshot.v.flags = 0; 423 root_snapshot.v.parent = 0; 424 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL); 425 root_snapshot.v.tree = cpu_to_le32(1); 426 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true); 427 428 bkey_subvolume_init(&root_volume.k_i); 429 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL; 430 root_volume.v.flags = 0; 431 root_volume.v.snapshot = cpu_to_le32(U32_MAX); 432 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO); 433 434 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?: 435 bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?: 436 bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0); 437 if (ret) 438 bch_err_fn(c, ret); 439 return ret; 440 } 441 442 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) 443 { 444 struct btree_iter iter; 445 struct bkey_s_c k; 446 struct bch_inode_unpacked inode; 447 int ret; 448 449 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 450 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); 451 ret = bkey_err(k); 452 if (ret) 453 return ret; 454 455 if (!bkey_is_inode(k.k)) { 456 bch_err(trans->c, "root inode not found"); 457 ret = -BCH_ERR_ENOENT_inode; 458 goto err; 459 } 460 461 ret = bch2_inode_unpack(k, &inode); 462 BUG_ON(ret); 463 464 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 465 466 ret = bch2_inode_write(trans, &iter, &inode); 467 err: 468 bch2_trans_iter_exit(trans, &iter); 469 return ret; 470 } 471 472 /* set bi_subvol on root inode */ 473 noinline_for_stack 474 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) 475 { 476 int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW, 477 __bch2_fs_upgrade_for_subvolumes(trans)); 478 if (ret) 479 bch_err_fn(c, ret); 480 return ret; 481 } 482 483 const char * const bch2_recovery_passes[] = { 484 #define x(_fn, _when) #_fn, 485 BCH_RECOVERY_PASSES() 486 #undef x 487 NULL 488 }; 489 490 static int bch2_check_allocations(struct bch_fs *c) 491 { 492 return bch2_gc(c, true, c->opts.norecovery); 493 } 494 495 static int bch2_set_may_go_rw(struct bch_fs *c) 496 { 497 set_bit(BCH_FS_MAY_GO_RW, &c->flags); 498 return 0; 499 } 500 501 struct recovery_pass_fn { 502 int (*fn)(struct bch_fs *); 503 unsigned when; 504 }; 505 506 static struct recovery_pass_fn recovery_pass_fns[] = { 507 #define x(_fn, _when) { .fn = bch2_##_fn, .when = _when }, 508 BCH_RECOVERY_PASSES() 509 #undef x 510 }; 511 512 static void check_version_upgrade(struct bch_fs *c) 513 { 514 unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version); 515 unsigned latest_version = bcachefs_metadata_version_current; 516 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version; 517 unsigned new_version = 0; 518 u64 recovery_passes; 519 520 if (old_version < bcachefs_metadata_required_upgrade_below) { 521 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible || 522 latest_compatible < bcachefs_metadata_required_upgrade_below) 523 new_version = latest_version; 524 else 525 new_version = latest_compatible; 526 } else { 527 switch (c->opts.version_upgrade) { 528 case BCH_VERSION_UPGRADE_compatible: 529 new_version = latest_compatible; 530 break; 531 case BCH_VERSION_UPGRADE_incompatible: 532 new_version = latest_version; 533 break; 534 case BCH_VERSION_UPGRADE_none: 535 new_version = old_version; 536 break; 537 } 538 } 539 540 if (new_version > old_version) { 541 struct printbuf buf = PRINTBUF; 542 543 if (old_version < bcachefs_metadata_required_upgrade_below) 544 prt_str(&buf, "Version upgrade required:\n"); 545 546 if (old_version != c->sb.version) { 547 prt_str(&buf, "Version upgrade from "); 548 bch2_version_to_text(&buf, c->sb.version_upgrade_complete); 549 prt_str(&buf, " to "); 550 bch2_version_to_text(&buf, c->sb.version); 551 prt_str(&buf, " incomplete\n"); 552 } 553 554 prt_printf(&buf, "Doing %s version upgrade from ", 555 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version) 556 ? "incompatible" : "compatible"); 557 bch2_version_to_text(&buf, old_version); 558 prt_str(&buf, " to "); 559 bch2_version_to_text(&buf, new_version); 560 prt_newline(&buf); 561 562 recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version); 563 if (recovery_passes) { 564 if ((recovery_passes & RECOVERY_PASS_ALL_FSCK) == RECOVERY_PASS_ALL_FSCK) 565 prt_str(&buf, "fsck required"); 566 else { 567 prt_str(&buf, "running recovery passes: "); 568 prt_bitflags(&buf, bch2_recovery_passes, recovery_passes); 569 } 570 571 c->recovery_passes_explicit |= recovery_passes; 572 c->opts.fix_errors = FSCK_FIX_yes; 573 } 574 575 bch_info(c, "%s", buf.buf); 576 577 mutex_lock(&c->sb_lock); 578 bch2_sb_upgrade(c, new_version); 579 mutex_unlock(&c->sb_lock); 580 581 printbuf_exit(&buf); 582 } 583 } 584 585 u64 bch2_fsck_recovery_passes(void) 586 { 587 u64 ret = 0; 588 589 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) 590 if (recovery_pass_fns[i].when & PASS_FSCK) 591 ret |= BIT_ULL(i); 592 return ret; 593 } 594 595 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 596 { 597 struct recovery_pass_fn *p = recovery_pass_fns + c->curr_recovery_pass; 598 599 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read) 600 return false; 601 if (c->recovery_passes_explicit & BIT_ULL(pass)) 602 return true; 603 if ((p->when & PASS_FSCK) && c->opts.fsck) 604 return true; 605 if ((p->when & PASS_UNCLEAN) && !c->sb.clean) 606 return true; 607 if (p->when & PASS_ALWAYS) 608 return true; 609 return false; 610 } 611 612 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 613 { 614 int ret; 615 616 c->curr_recovery_pass = pass; 617 618 if (should_run_recovery_pass(c, pass)) { 619 struct recovery_pass_fn *p = recovery_pass_fns + pass; 620 621 if (!(p->when & PASS_SILENT)) 622 printk(KERN_INFO bch2_log_msg(c, "%s..."), 623 bch2_recovery_passes[pass]); 624 ret = p->fn(c); 625 if (ret) 626 return ret; 627 if (!(p->when & PASS_SILENT)) 628 printk(KERN_CONT " done\n"); 629 630 c->recovery_passes_complete |= BIT_ULL(pass); 631 } 632 633 return 0; 634 } 635 636 static int bch2_run_recovery_passes(struct bch_fs *c) 637 { 638 int ret = 0; 639 640 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { 641 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass); 642 if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) 643 continue; 644 if (ret) 645 break; 646 c->curr_recovery_pass++; 647 } 648 649 return ret; 650 } 651 652 int bch2_fs_recovery(struct bch_fs *c) 653 { 654 struct bch_sb_field_clean *clean = NULL; 655 struct jset *last_journal_entry = NULL; 656 u64 last_seq = 0, blacklist_seq, journal_seq; 657 bool write_sb = false; 658 int ret = 0; 659 660 if (c->sb.clean) { 661 clean = bch2_read_superblock_clean(c); 662 ret = PTR_ERR_OR_ZERO(clean); 663 if (ret) 664 goto err; 665 666 bch_info(c, "recovering from clean shutdown, journal seq %llu", 667 le64_to_cpu(clean->journal_seq)); 668 } else { 669 bch_info(c, "recovering from unclean shutdown"); 670 } 671 672 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) { 673 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported"); 674 ret = -EINVAL; 675 goto err; 676 } 677 678 if (!c->sb.clean && 679 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) { 680 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix"); 681 ret = -EINVAL; 682 goto err; 683 } 684 685 if (c->opts.fsck || !(c->opts.nochanges && c->opts.norecovery)) 686 check_version_upgrade(c); 687 688 if (c->opts.fsck && c->opts.norecovery) { 689 bch_err(c, "cannot select both norecovery and fsck"); 690 ret = -EINVAL; 691 goto err; 692 } 693 694 ret = bch2_blacklist_table_initialize(c); 695 if (ret) { 696 bch_err(c, "error initializing blacklist table"); 697 goto err; 698 } 699 700 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) { 701 struct genradix_iter iter; 702 struct journal_replay **i; 703 704 bch_verbose(c, "starting journal read"); 705 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq); 706 if (ret) 707 goto err; 708 709 /* 710 * note: cmd_list_journal needs the blacklist table fully up to date so 711 * it can asterisk ignored journal entries: 712 */ 713 if (c->opts.read_journal_only) 714 goto out; 715 716 genradix_for_each_reverse(&c->journal_entries, iter, i) 717 if (*i && !(*i)->ignore) { 718 last_journal_entry = &(*i)->j; 719 break; 720 } 721 722 if (mustfix_fsck_err_on(c->sb.clean && 723 last_journal_entry && 724 !journal_entry_empty(last_journal_entry), c, 725 clean_but_journal_not_empty, 726 "filesystem marked clean but journal not empty")) { 727 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 728 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 729 c->sb.clean = false; 730 } 731 732 if (!last_journal_entry) { 733 fsck_err_on(!c->sb.clean, c, 734 dirty_but_no_journal_entries, 735 "no journal entries found"); 736 if (clean) 737 goto use_clean; 738 739 genradix_for_each_reverse(&c->journal_entries, iter, i) 740 if (*i) { 741 last_journal_entry = &(*i)->j; 742 (*i)->ignore = false; 743 /* 744 * This was probably a NO_FLUSH entry, 745 * so last_seq was garbage - but we know 746 * we're only using a single journal 747 * entry, set it here: 748 */ 749 (*i)->j.last_seq = (*i)->j.seq; 750 break; 751 } 752 } 753 754 ret = bch2_journal_keys_sort(c); 755 if (ret) 756 goto err; 757 758 if (c->sb.clean && last_journal_entry) { 759 ret = bch2_verify_superblock_clean(c, &clean, 760 last_journal_entry); 761 if (ret) 762 goto err; 763 } 764 } else { 765 use_clean: 766 if (!clean) { 767 bch_err(c, "no superblock clean section found"); 768 ret = -BCH_ERR_fsck_repair_impossible; 769 goto err; 770 771 } 772 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1; 773 } 774 775 c->journal_replay_seq_start = last_seq; 776 c->journal_replay_seq_end = blacklist_seq - 1; 777 778 if (c->opts.reconstruct_alloc) { 779 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 780 drop_alloc_keys(&c->journal_keys); 781 } 782 783 zero_out_btree_mem_ptr(&c->journal_keys); 784 785 ret = journal_replay_early(c, clean); 786 if (ret) 787 goto err; 788 789 /* 790 * After an unclean shutdown, skip then next few journal sequence 791 * numbers as they may have been referenced by btree writes that 792 * happened before their corresponding journal writes - those btree 793 * writes need to be ignored, by skipping and blacklisting the next few 794 * journal sequence numbers: 795 */ 796 if (!c->sb.clean) 797 journal_seq += 8; 798 799 if (blacklist_seq != journal_seq) { 800 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu", 801 blacklist_seq, journal_seq) ?: 802 bch2_journal_seq_blacklist_add(c, 803 blacklist_seq, journal_seq); 804 if (ret) { 805 bch_err(c, "error creating new journal seq blacklist entry"); 806 goto err; 807 } 808 } 809 810 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu", 811 journal_seq, last_seq, blacklist_seq - 1) ?: 812 bch2_fs_journal_start(&c->journal, journal_seq); 813 if (ret) 814 goto err; 815 816 if (c->opts.reconstruct_alloc) 817 bch2_journal_log_msg(c, "dropping alloc info"); 818 819 /* 820 * Skip past versions that might have possibly been used (as nonces), 821 * but hadn't had their pointers written: 822 */ 823 if (c->sb.encryption_type && !c->sb.clean) 824 atomic64_add(1 << 16, &c->key_version); 825 826 ret = read_btree_roots(c); 827 if (ret) 828 goto err; 829 830 if (c->opts.fsck && 831 (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) || 832 BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb))) 833 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); 834 835 ret = bch2_run_recovery_passes(c); 836 if (ret) 837 goto err; 838 839 /* If we fixed errors, verify that fs is actually clean now: */ 840 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 841 test_bit(BCH_FS_ERRORS_FIXED, &c->flags) && 842 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) && 843 !test_bit(BCH_FS_ERROR, &c->flags)) { 844 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); 845 clear_bit(BCH_FS_ERRORS_FIXED, &c->flags); 846 847 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 848 849 ret = bch2_run_recovery_passes(c); 850 if (ret) 851 goto err; 852 853 if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) || 854 test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) { 855 bch_err(c, "Second fsck run was not clean"); 856 set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags); 857 } 858 859 set_bit(BCH_FS_ERRORS_FIXED, &c->flags); 860 } 861 862 if (enabled_qtypes(c)) { 863 bch_verbose(c, "reading quotas"); 864 ret = bch2_fs_quota_read(c); 865 if (ret) 866 goto err; 867 bch_verbose(c, "quotas done"); 868 } 869 870 mutex_lock(&c->sb_lock); 871 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != c->sb.version) { 872 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, c->sb.version); 873 write_sb = true; 874 } 875 876 if (!test_bit(BCH_FS_ERROR, &c->flags)) { 877 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); 878 write_sb = true; 879 } 880 881 if (c->opts.fsck && 882 !test_bit(BCH_FS_ERROR, &c->flags) && 883 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) { 884 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); 885 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); 886 write_sb = true; 887 } 888 889 if (write_sb) 890 bch2_write_super(c); 891 mutex_unlock(&c->sb_lock); 892 893 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) || 894 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) { 895 struct bch_move_stats stats; 896 897 bch2_move_stats_init(&stats, "recovery"); 898 899 bch_info(c, "scanning for old btree nodes"); 900 ret = bch2_fs_read_write(c) ?: 901 bch2_scan_old_btree_nodes(c, &stats); 902 if (ret) 903 goto err; 904 bch_info(c, "scanning for old btree nodes done"); 905 } 906 907 if (c->journal_seq_blacklist_table && 908 c->journal_seq_blacklist_table->nr > 128) 909 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work); 910 911 ret = 0; 912 out: 913 set_bit(BCH_FS_FSCK_DONE, &c->flags); 914 bch2_flush_fsck_errs(c); 915 916 if (!c->opts.keep_journal && 917 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) 918 bch2_journal_keys_put_initial(c); 919 kfree(clean); 920 921 if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) { 922 bch2_fs_read_write_early(c); 923 bch2_delete_dead_snapshots_async(c); 924 } 925 926 if (ret) 927 bch_err_fn(c, ret); 928 return ret; 929 err: 930 fsck_err: 931 bch2_fs_emergency_read_only(c); 932 goto out; 933 } 934 935 int bch2_fs_initialize(struct bch_fs *c) 936 { 937 struct bch_inode_unpacked root_inode, lostfound_inode; 938 struct bkey_inode_buf packed_inode; 939 struct qstr lostfound = QSTR("lost+found"); 940 struct bch_dev *ca; 941 unsigned i; 942 int ret; 943 944 bch_notice(c, "initializing new filesystem"); 945 946 mutex_lock(&c->sb_lock); 947 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); 948 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); 949 950 bch2_sb_maybe_downgrade(c); 951 952 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { 953 bch2_sb_upgrade(c, bcachefs_metadata_version_current); 954 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); 955 bch2_write_super(c); 956 } 957 mutex_unlock(&c->sb_lock); 958 959 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns); 960 set_bit(BCH_FS_MAY_GO_RW, &c->flags); 961 set_bit(BCH_FS_FSCK_DONE, &c->flags); 962 963 for (i = 0; i < BTREE_ID_NR; i++) 964 bch2_btree_root_alloc(c, i); 965 966 for_each_member_device(ca, c, i) 967 bch2_dev_usage_init(ca); 968 969 ret = bch2_fs_journal_alloc(c); 970 if (ret) 971 goto err; 972 973 /* 974 * journal_res_get() will crash if called before this has 975 * set up the journal.pin FIFO and journal.cur pointer: 976 */ 977 bch2_fs_journal_start(&c->journal, 1); 978 bch2_journal_set_replay_done(&c->journal); 979 980 ret = bch2_fs_read_write_early(c); 981 if (ret) 982 goto err; 983 984 /* 985 * Write out the superblock and journal buckets, now that we can do 986 * btree updates 987 */ 988 bch_verbose(c, "marking superblocks"); 989 ret = bch2_trans_mark_dev_sbs(c); 990 bch_err_msg(c, ret, "marking superblocks"); 991 if (ret) 992 goto err; 993 994 for_each_online_member(ca, c, i) 995 ca->new_fs_bucket_idx = 0; 996 997 ret = bch2_fs_freespace_init(c); 998 if (ret) 999 goto err; 1000 1001 ret = bch2_initialize_subvolumes(c); 1002 if (ret) 1003 goto err; 1004 1005 bch_verbose(c, "reading snapshots table"); 1006 ret = bch2_snapshots_read(c); 1007 if (ret) 1008 goto err; 1009 bch_verbose(c, "reading snapshots done"); 1010 1011 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL); 1012 root_inode.bi_inum = BCACHEFS_ROOT_INO; 1013 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 1014 bch2_inode_pack(&packed_inode, &root_inode); 1015 packed_inode.inode.k.p.snapshot = U32_MAX; 1016 1017 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0); 1018 if (ret) { 1019 bch_err_msg(c, ret, "creating root directory"); 1020 goto err; 1021 } 1022 1023 bch2_inode_init_early(c, &lostfound_inode); 1024 1025 ret = bch2_trans_do(c, NULL, NULL, 0, 1026 bch2_create_trans(trans, 1027 BCACHEFS_ROOT_SUBVOL_INUM, 1028 &root_inode, &lostfound_inode, 1029 &lostfound, 1030 0, 0, S_IFDIR|0700, 0, 1031 NULL, NULL, (subvol_inum) { 0 }, 0)); 1032 if (ret) { 1033 bch_err_msg(c, ret, "creating lost+found"); 1034 goto err; 1035 } 1036 1037 if (enabled_qtypes(c)) { 1038 ret = bch2_fs_quota_read(c); 1039 if (ret) 1040 goto err; 1041 } 1042 1043 ret = bch2_journal_flush(&c->journal); 1044 if (ret) { 1045 bch_err_msg(c, ret, "writing first journal entry"); 1046 goto err; 1047 } 1048 1049 mutex_lock(&c->sb_lock); 1050 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); 1051 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 1052 1053 bch2_write_super(c); 1054 mutex_unlock(&c->sb_lock); 1055 1056 return 0; 1057 err: 1058 bch_err_fn(ca, ret); 1059 return ret; 1060 } 1061