1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "backpointers.h" 5 #include "bkey_buf.h" 6 #include "alloc_background.h" 7 #include "btree_gc.h" 8 #include "btree_journal_iter.h" 9 #include "btree_update.h" 10 #include "btree_update_interior.h" 11 #include "btree_io.h" 12 #include "buckets.h" 13 #include "dirent.h" 14 #include "ec.h" 15 #include "errcode.h" 16 #include "error.h" 17 #include "fs-common.h" 18 #include "fsck.h" 19 #include "journal_io.h" 20 #include "journal_reclaim.h" 21 #include "journal_seq_blacklist.h" 22 #include "lru.h" 23 #include "logged_ops.h" 24 #include "move.h" 25 #include "quota.h" 26 #include "rebalance.h" 27 #include "recovery.h" 28 #include "replicas.h" 29 #include "sb-clean.h" 30 #include "snapshot.h" 31 #include "subvolume.h" 32 #include "super-io.h" 33 34 #include <linux/sort.h> 35 #include <linux/stat.h> 36 37 #define QSTR(n) { { { .len = strlen(n) } }, .name = n } 38 39 static bool btree_id_is_alloc(enum btree_id id) 40 { 41 switch (id) { 42 case BTREE_ID_alloc: 43 case BTREE_ID_backpointers: 44 case BTREE_ID_need_discard: 45 case BTREE_ID_freespace: 46 case BTREE_ID_bucket_gens: 47 return true; 48 default: 49 return false; 50 } 51 } 52 53 /* for -o reconstruct_alloc: */ 54 static void drop_alloc_keys(struct journal_keys *keys) 55 { 56 size_t src, dst; 57 58 for (src = 0, dst = 0; src < keys->nr; src++) 59 if (!btree_id_is_alloc(keys->d[src].btree_id)) 60 keys->d[dst++] = keys->d[src]; 61 62 keys->nr = dst; 63 } 64 65 /* 66 * Btree node pointers have a field to stack a pointer to the in memory btree 67 * node; we need to zero out this field when reading in btree nodes, or when 68 * reading in keys from the journal: 69 */ 70 static void zero_out_btree_mem_ptr(struct journal_keys *keys) 71 { 72 struct journal_key *i; 73 74 for (i = keys->d; i < keys->d + keys->nr; i++) 75 if (i->k->k.type == KEY_TYPE_btree_ptr_v2) 76 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0; 77 } 78 79 /* journal replay: */ 80 81 static void replay_now_at(struct journal *j, u64 seq) 82 { 83 BUG_ON(seq < j->replay_journal_seq); 84 85 seq = min(seq, j->replay_journal_seq_end); 86 87 while (j->replay_journal_seq < seq) 88 bch2_journal_pin_put(j, j->replay_journal_seq++); 89 } 90 91 static int bch2_journal_replay_key(struct btree_trans *trans, 92 struct journal_key *k) 93 { 94 struct btree_iter iter; 95 unsigned iter_flags = 96 BTREE_ITER_INTENT| 97 BTREE_ITER_NOT_EXTENTS; 98 unsigned update_flags = BTREE_TRIGGER_NORUN; 99 int ret; 100 101 /* 102 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to 103 * keep the key cache coherent with the underlying btree. Nothing 104 * besides the allocator is doing updates yet so we don't need key cache 105 * coherency for non-alloc btrees, and key cache fills for snapshots 106 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until 107 * the snapshots recovery pass runs. 108 */ 109 if (!k->level && k->btree_id == BTREE_ID_alloc) 110 iter_flags |= BTREE_ITER_CACHED; 111 else 112 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM; 113 114 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, 115 BTREE_MAX_DEPTH, k->level, 116 iter_flags); 117 ret = bch2_btree_iter_traverse(&iter); 118 if (ret) 119 goto out; 120 121 /* Must be checked with btree locked: */ 122 if (k->overwritten) 123 goto out; 124 125 ret = bch2_trans_update(trans, &iter, k->k, update_flags); 126 out: 127 bch2_trans_iter_exit(trans, &iter); 128 return ret; 129 } 130 131 static int journal_sort_seq_cmp(const void *_l, const void *_r) 132 { 133 const struct journal_key *l = *((const struct journal_key **)_l); 134 const struct journal_key *r = *((const struct journal_key **)_r); 135 136 return cmp_int(l->journal_seq, r->journal_seq); 137 } 138 139 static int bch2_journal_replay(struct bch_fs *c) 140 { 141 struct journal_keys *keys = &c->journal_keys; 142 struct journal_key **keys_sorted, *k; 143 struct journal *j = &c->journal; 144 u64 start_seq = c->journal_replay_seq_start; 145 u64 end_seq = c->journal_replay_seq_start; 146 size_t i; 147 int ret; 148 149 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr); 150 keys->gap = keys->nr; 151 152 keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL); 153 if (!keys_sorted) 154 return -BCH_ERR_ENOMEM_journal_replay; 155 156 for (i = 0; i < keys->nr; i++) 157 keys_sorted[i] = &keys->d[i]; 158 159 sort(keys_sorted, keys->nr, 160 sizeof(keys_sorted[0]), 161 journal_sort_seq_cmp, NULL); 162 163 if (keys->nr) { 164 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)", 165 keys->nr, start_seq, end_seq); 166 if (ret) 167 goto err; 168 } 169 170 for (i = 0; i < keys->nr; i++) { 171 k = keys_sorted[i]; 172 173 cond_resched(); 174 175 replay_now_at(j, k->journal_seq); 176 177 ret = bch2_trans_do(c, NULL, NULL, 178 BTREE_INSERT_LAZY_RW| 179 BTREE_INSERT_NOFAIL| 180 (!k->allocated 181 ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim 182 : 0), 183 bch2_journal_replay_key(trans, k)); 184 if (ret) { 185 bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s", 186 bch2_btree_id_str(k->btree_id), k->level, bch2_err_str(ret)); 187 goto err; 188 } 189 } 190 191 replay_now_at(j, j->replay_journal_seq_end); 192 j->replay_journal_seq = 0; 193 194 bch2_journal_set_replay_done(j); 195 bch2_journal_flush_all_pins(j); 196 ret = bch2_journal_error(j); 197 198 if (keys->nr && !ret) 199 bch2_journal_log_msg(c, "journal replay finished"); 200 err: 201 kvfree(keys_sorted); 202 203 if (ret) 204 bch_err_fn(c, ret); 205 return ret; 206 } 207 208 /* journal replay early: */ 209 210 static int journal_replay_entry_early(struct bch_fs *c, 211 struct jset_entry *entry) 212 { 213 int ret = 0; 214 215 switch (entry->type) { 216 case BCH_JSET_ENTRY_btree_root: { 217 struct btree_root *r; 218 219 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) { 220 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL }); 221 if (ret) 222 return ret; 223 } 224 225 r = bch2_btree_id_root(c, entry->btree_id); 226 227 if (entry->u64s) { 228 r->level = entry->level; 229 bkey_copy(&r->key, (struct bkey_i *) entry->start); 230 r->error = 0; 231 } else { 232 r->error = -EIO; 233 } 234 r->alive = true; 235 break; 236 } 237 case BCH_JSET_ENTRY_usage: { 238 struct jset_entry_usage *u = 239 container_of(entry, struct jset_entry_usage, entry); 240 241 switch (entry->btree_id) { 242 case BCH_FS_USAGE_reserved: 243 if (entry->level < BCH_REPLICAS_MAX) 244 c->usage_base->persistent_reserved[entry->level] = 245 le64_to_cpu(u->v); 246 break; 247 case BCH_FS_USAGE_inodes: 248 c->usage_base->nr_inodes = le64_to_cpu(u->v); 249 break; 250 case BCH_FS_USAGE_key_version: 251 atomic64_set(&c->key_version, 252 le64_to_cpu(u->v)); 253 break; 254 } 255 256 break; 257 } 258 case BCH_JSET_ENTRY_data_usage: { 259 struct jset_entry_data_usage *u = 260 container_of(entry, struct jset_entry_data_usage, entry); 261 262 ret = bch2_replicas_set_usage(c, &u->r, 263 le64_to_cpu(u->v)); 264 break; 265 } 266 case BCH_JSET_ENTRY_dev_usage: { 267 struct jset_entry_dev_usage *u = 268 container_of(entry, struct jset_entry_dev_usage, entry); 269 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev)); 270 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); 271 272 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec); 273 274 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) { 275 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets); 276 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors); 277 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented); 278 } 279 280 break; 281 } 282 case BCH_JSET_ENTRY_blacklist: { 283 struct jset_entry_blacklist *bl_entry = 284 container_of(entry, struct jset_entry_blacklist, entry); 285 286 ret = bch2_journal_seq_blacklist_add(c, 287 le64_to_cpu(bl_entry->seq), 288 le64_to_cpu(bl_entry->seq) + 1); 289 break; 290 } 291 case BCH_JSET_ENTRY_blacklist_v2: { 292 struct jset_entry_blacklist_v2 *bl_entry = 293 container_of(entry, struct jset_entry_blacklist_v2, entry); 294 295 ret = bch2_journal_seq_blacklist_add(c, 296 le64_to_cpu(bl_entry->start), 297 le64_to_cpu(bl_entry->end) + 1); 298 break; 299 } 300 case BCH_JSET_ENTRY_clock: { 301 struct jset_entry_clock *clock = 302 container_of(entry, struct jset_entry_clock, entry); 303 304 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); 305 } 306 } 307 308 return ret; 309 } 310 311 static int journal_replay_early(struct bch_fs *c, 312 struct bch_sb_field_clean *clean) 313 { 314 struct jset_entry *entry; 315 int ret; 316 317 if (clean) { 318 for (entry = clean->start; 319 entry != vstruct_end(&clean->field); 320 entry = vstruct_next(entry)) { 321 ret = journal_replay_entry_early(c, entry); 322 if (ret) 323 return ret; 324 } 325 } else { 326 struct genradix_iter iter; 327 struct journal_replay *i, **_i; 328 329 genradix_for_each(&c->journal_entries, iter, _i) { 330 i = *_i; 331 332 if (!i || i->ignore) 333 continue; 334 335 vstruct_for_each(&i->j, entry) { 336 ret = journal_replay_entry_early(c, entry); 337 if (ret) 338 return ret; 339 } 340 } 341 } 342 343 bch2_fs_usage_initialize(c); 344 345 return 0; 346 } 347 348 /* sb clean section: */ 349 350 static int read_btree_roots(struct bch_fs *c) 351 { 352 unsigned i; 353 int ret = 0; 354 355 for (i = 0; i < btree_id_nr_alive(c); i++) { 356 struct btree_root *r = bch2_btree_id_root(c, i); 357 358 if (!r->alive) 359 continue; 360 361 if (btree_id_is_alloc(i) && 362 c->opts.reconstruct_alloc) { 363 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 364 continue; 365 } 366 367 if (r->error) { 368 __fsck_err(c, 369 btree_id_is_alloc(i) 370 ? FSCK_CAN_IGNORE : 0, 371 btree_root_bkey_invalid, 372 "invalid btree root %s", 373 bch2_btree_id_str(i)); 374 if (i == BTREE_ID_alloc) 375 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 376 } 377 378 ret = bch2_btree_root_read(c, i, &r->key, r->level); 379 if (ret) { 380 fsck_err(c, 381 btree_root_read_error, 382 "error reading btree root %s", 383 bch2_btree_id_str(i)); 384 if (btree_id_is_alloc(i)) 385 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 386 ret = 0; 387 } 388 } 389 390 for (i = 0; i < BTREE_ID_NR; i++) { 391 struct btree_root *r = bch2_btree_id_root(c, i); 392 393 if (!r->b) { 394 r->alive = false; 395 r->level = 0; 396 bch2_btree_root_alloc(c, i); 397 } 398 } 399 fsck_err: 400 return ret; 401 } 402 403 static int bch2_initialize_subvolumes(struct bch_fs *c) 404 { 405 struct bkey_i_snapshot_tree root_tree; 406 struct bkey_i_snapshot root_snapshot; 407 struct bkey_i_subvolume root_volume; 408 int ret; 409 410 bkey_snapshot_tree_init(&root_tree.k_i); 411 root_tree.k.p.offset = 1; 412 root_tree.v.master_subvol = cpu_to_le32(1); 413 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX); 414 415 bkey_snapshot_init(&root_snapshot.k_i); 416 root_snapshot.k.p.offset = U32_MAX; 417 root_snapshot.v.flags = 0; 418 root_snapshot.v.parent = 0; 419 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL); 420 root_snapshot.v.tree = cpu_to_le32(1); 421 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true); 422 423 bkey_subvolume_init(&root_volume.k_i); 424 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL; 425 root_volume.v.flags = 0; 426 root_volume.v.snapshot = cpu_to_le32(U32_MAX); 427 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO); 428 429 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?: 430 bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?: 431 bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0); 432 if (ret) 433 bch_err_fn(c, ret); 434 return ret; 435 } 436 437 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) 438 { 439 struct btree_iter iter; 440 struct bkey_s_c k; 441 struct bch_inode_unpacked inode; 442 int ret; 443 444 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 445 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); 446 ret = bkey_err(k); 447 if (ret) 448 return ret; 449 450 if (!bkey_is_inode(k.k)) { 451 bch_err(trans->c, "root inode not found"); 452 ret = -BCH_ERR_ENOENT_inode; 453 goto err; 454 } 455 456 ret = bch2_inode_unpack(k, &inode); 457 BUG_ON(ret); 458 459 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 460 461 ret = bch2_inode_write(trans, &iter, &inode); 462 err: 463 bch2_trans_iter_exit(trans, &iter); 464 return ret; 465 } 466 467 /* set bi_subvol on root inode */ 468 noinline_for_stack 469 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) 470 { 471 int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW, 472 __bch2_fs_upgrade_for_subvolumes(trans)); 473 if (ret) 474 bch_err_fn(c, ret); 475 return ret; 476 } 477 478 const char * const bch2_recovery_passes[] = { 479 #define x(_fn, _when) #_fn, 480 BCH_RECOVERY_PASSES() 481 #undef x 482 NULL 483 }; 484 485 static int bch2_check_allocations(struct bch_fs *c) 486 { 487 return bch2_gc(c, true, c->opts.norecovery); 488 } 489 490 static int bch2_set_may_go_rw(struct bch_fs *c) 491 { 492 set_bit(BCH_FS_MAY_GO_RW, &c->flags); 493 return 0; 494 } 495 496 struct recovery_pass_fn { 497 int (*fn)(struct bch_fs *); 498 unsigned when; 499 }; 500 501 static struct recovery_pass_fn recovery_pass_fns[] = { 502 #define x(_fn, _when) { .fn = bch2_##_fn, .when = _when }, 503 BCH_RECOVERY_PASSES() 504 #undef x 505 }; 506 507 static void check_version_upgrade(struct bch_fs *c) 508 { 509 unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version); 510 unsigned latest_version = bcachefs_metadata_version_current; 511 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version; 512 unsigned new_version = 0; 513 u64 recovery_passes; 514 515 if (old_version < bcachefs_metadata_required_upgrade_below) { 516 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible || 517 latest_compatible < bcachefs_metadata_required_upgrade_below) 518 new_version = latest_version; 519 else 520 new_version = latest_compatible; 521 } else { 522 switch (c->opts.version_upgrade) { 523 case BCH_VERSION_UPGRADE_compatible: 524 new_version = latest_compatible; 525 break; 526 case BCH_VERSION_UPGRADE_incompatible: 527 new_version = latest_version; 528 break; 529 case BCH_VERSION_UPGRADE_none: 530 new_version = old_version; 531 break; 532 } 533 } 534 535 if (new_version > old_version) { 536 struct printbuf buf = PRINTBUF; 537 538 if (old_version < bcachefs_metadata_required_upgrade_below) 539 prt_str(&buf, "Version upgrade required:\n"); 540 541 if (old_version != c->sb.version) { 542 prt_str(&buf, "Version upgrade from "); 543 bch2_version_to_text(&buf, c->sb.version_upgrade_complete); 544 prt_str(&buf, " to "); 545 bch2_version_to_text(&buf, c->sb.version); 546 prt_str(&buf, " incomplete\n"); 547 } 548 549 prt_printf(&buf, "Doing %s version upgrade from ", 550 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version) 551 ? "incompatible" : "compatible"); 552 bch2_version_to_text(&buf, old_version); 553 prt_str(&buf, " to "); 554 bch2_version_to_text(&buf, new_version); 555 prt_newline(&buf); 556 557 recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version); 558 if (recovery_passes) { 559 if ((recovery_passes & RECOVERY_PASS_ALL_FSCK) == RECOVERY_PASS_ALL_FSCK) 560 prt_str(&buf, "fsck required"); 561 else { 562 prt_str(&buf, "running recovery passes: "); 563 prt_bitflags(&buf, bch2_recovery_passes, recovery_passes); 564 } 565 566 c->recovery_passes_explicit |= recovery_passes; 567 c->opts.fix_errors = FSCK_FIX_yes; 568 } 569 570 bch_info(c, "%s", buf.buf); 571 572 mutex_lock(&c->sb_lock); 573 bch2_sb_upgrade(c, new_version); 574 mutex_unlock(&c->sb_lock); 575 576 printbuf_exit(&buf); 577 } 578 } 579 580 u64 bch2_fsck_recovery_passes(void) 581 { 582 u64 ret = 0; 583 584 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) 585 if (recovery_pass_fns[i].when & PASS_FSCK) 586 ret |= BIT_ULL(i); 587 return ret; 588 } 589 590 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 591 { 592 struct recovery_pass_fn *p = recovery_pass_fns + c->curr_recovery_pass; 593 594 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read) 595 return false; 596 if (c->recovery_passes_explicit & BIT_ULL(pass)) 597 return true; 598 if ((p->when & PASS_FSCK) && c->opts.fsck) 599 return true; 600 if ((p->when & PASS_UNCLEAN) && !c->sb.clean) 601 return true; 602 if (p->when & PASS_ALWAYS) 603 return true; 604 return false; 605 } 606 607 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 608 { 609 int ret; 610 611 c->curr_recovery_pass = pass; 612 613 if (should_run_recovery_pass(c, pass)) { 614 struct recovery_pass_fn *p = recovery_pass_fns + pass; 615 616 if (!(p->when & PASS_SILENT)) 617 printk(KERN_INFO bch2_log_msg(c, "%s..."), 618 bch2_recovery_passes[pass]); 619 ret = p->fn(c); 620 if (ret) 621 return ret; 622 if (!(p->when & PASS_SILENT)) 623 printk(KERN_CONT " done\n"); 624 625 c->recovery_passes_complete |= BIT_ULL(pass); 626 } 627 628 return 0; 629 } 630 631 static int bch2_run_recovery_passes(struct bch_fs *c) 632 { 633 int ret = 0; 634 635 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { 636 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass); 637 if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) 638 continue; 639 if (ret) 640 break; 641 c->curr_recovery_pass++; 642 } 643 644 return ret; 645 } 646 647 int bch2_fs_recovery(struct bch_fs *c) 648 { 649 struct bch_sb_field_clean *clean = NULL; 650 struct jset *last_journal_entry = NULL; 651 u64 last_seq = 0, blacklist_seq, journal_seq; 652 bool write_sb = false; 653 int ret = 0; 654 655 if (c->sb.clean) { 656 clean = bch2_read_superblock_clean(c); 657 ret = PTR_ERR_OR_ZERO(clean); 658 if (ret) 659 goto err; 660 661 bch_info(c, "recovering from clean shutdown, journal seq %llu", 662 le64_to_cpu(clean->journal_seq)); 663 } else { 664 bch_info(c, "recovering from unclean shutdown"); 665 } 666 667 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) { 668 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported"); 669 ret = -EINVAL; 670 goto err; 671 } 672 673 if (!c->sb.clean && 674 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) { 675 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix"); 676 ret = -EINVAL; 677 goto err; 678 } 679 680 if (c->opts.fsck || !(c->opts.nochanges && c->opts.norecovery)) 681 check_version_upgrade(c); 682 683 if (c->opts.fsck && c->opts.norecovery) { 684 bch_err(c, "cannot select both norecovery and fsck"); 685 ret = -EINVAL; 686 goto err; 687 } 688 689 ret = bch2_blacklist_table_initialize(c); 690 if (ret) { 691 bch_err(c, "error initializing blacklist table"); 692 goto err; 693 } 694 695 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) { 696 struct genradix_iter iter; 697 struct journal_replay **i; 698 699 bch_verbose(c, "starting journal read"); 700 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq); 701 if (ret) 702 goto err; 703 704 /* 705 * note: cmd_list_journal needs the blacklist table fully up to date so 706 * it can asterisk ignored journal entries: 707 */ 708 if (c->opts.read_journal_only) 709 goto out; 710 711 genradix_for_each_reverse(&c->journal_entries, iter, i) 712 if (*i && !(*i)->ignore) { 713 last_journal_entry = &(*i)->j; 714 break; 715 } 716 717 if (mustfix_fsck_err_on(c->sb.clean && 718 last_journal_entry && 719 !journal_entry_empty(last_journal_entry), c, 720 clean_but_journal_not_empty, 721 "filesystem marked clean but journal not empty")) { 722 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 723 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 724 c->sb.clean = false; 725 } 726 727 if (!last_journal_entry) { 728 fsck_err_on(!c->sb.clean, c, 729 dirty_but_no_journal_entries, 730 "no journal entries found"); 731 if (clean) 732 goto use_clean; 733 734 genradix_for_each_reverse(&c->journal_entries, iter, i) 735 if (*i) { 736 last_journal_entry = &(*i)->j; 737 (*i)->ignore = false; 738 /* 739 * This was probably a NO_FLUSH entry, 740 * so last_seq was garbage - but we know 741 * we're only using a single journal 742 * entry, set it here: 743 */ 744 (*i)->j.last_seq = (*i)->j.seq; 745 break; 746 } 747 } 748 749 ret = bch2_journal_keys_sort(c); 750 if (ret) 751 goto err; 752 753 if (c->sb.clean && last_journal_entry) { 754 ret = bch2_verify_superblock_clean(c, &clean, 755 last_journal_entry); 756 if (ret) 757 goto err; 758 } 759 } else { 760 use_clean: 761 if (!clean) { 762 bch_err(c, "no superblock clean section found"); 763 ret = -BCH_ERR_fsck_repair_impossible; 764 goto err; 765 766 } 767 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1; 768 } 769 770 c->journal_replay_seq_start = last_seq; 771 c->journal_replay_seq_end = blacklist_seq - 1; 772 773 if (c->opts.reconstruct_alloc) { 774 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 775 drop_alloc_keys(&c->journal_keys); 776 } 777 778 zero_out_btree_mem_ptr(&c->journal_keys); 779 780 ret = journal_replay_early(c, clean); 781 if (ret) 782 goto err; 783 784 /* 785 * After an unclean shutdown, skip then next few journal sequence 786 * numbers as they may have been referenced by btree writes that 787 * happened before their corresponding journal writes - those btree 788 * writes need to be ignored, by skipping and blacklisting the next few 789 * journal sequence numbers: 790 */ 791 if (!c->sb.clean) 792 journal_seq += 8; 793 794 if (blacklist_seq != journal_seq) { 795 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu", 796 blacklist_seq, journal_seq) ?: 797 bch2_journal_seq_blacklist_add(c, 798 blacklist_seq, journal_seq); 799 if (ret) { 800 bch_err(c, "error creating new journal seq blacklist entry"); 801 goto err; 802 } 803 } 804 805 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu", 806 journal_seq, last_seq, blacklist_seq - 1) ?: 807 bch2_fs_journal_start(&c->journal, journal_seq); 808 if (ret) 809 goto err; 810 811 if (c->opts.reconstruct_alloc) 812 bch2_journal_log_msg(c, "dropping alloc info"); 813 814 /* 815 * Skip past versions that might have possibly been used (as nonces), 816 * but hadn't had their pointers written: 817 */ 818 if (c->sb.encryption_type && !c->sb.clean) 819 atomic64_add(1 << 16, &c->key_version); 820 821 ret = read_btree_roots(c); 822 if (ret) 823 goto err; 824 825 if (c->opts.fsck && 826 (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) || 827 BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb))) 828 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); 829 830 ret = bch2_run_recovery_passes(c); 831 if (ret) 832 goto err; 833 834 /* If we fixed errors, verify that fs is actually clean now: */ 835 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 836 test_bit(BCH_FS_ERRORS_FIXED, &c->flags) && 837 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) && 838 !test_bit(BCH_FS_ERROR, &c->flags)) { 839 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); 840 clear_bit(BCH_FS_ERRORS_FIXED, &c->flags); 841 842 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 843 844 ret = bch2_run_recovery_passes(c); 845 if (ret) 846 goto err; 847 848 if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) || 849 test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) { 850 bch_err(c, "Second fsck run was not clean"); 851 set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags); 852 } 853 854 set_bit(BCH_FS_ERRORS_FIXED, &c->flags); 855 } 856 857 if (enabled_qtypes(c)) { 858 bch_verbose(c, "reading quotas"); 859 ret = bch2_fs_quota_read(c); 860 if (ret) 861 goto err; 862 bch_verbose(c, "quotas done"); 863 } 864 865 mutex_lock(&c->sb_lock); 866 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != c->sb.version) { 867 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, c->sb.version); 868 write_sb = true; 869 } 870 871 if (!test_bit(BCH_FS_ERROR, &c->flags)) { 872 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); 873 write_sb = true; 874 } 875 876 if (c->opts.fsck && 877 !test_bit(BCH_FS_ERROR, &c->flags) && 878 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) { 879 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); 880 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); 881 write_sb = true; 882 } 883 884 if (write_sb) 885 bch2_write_super(c); 886 mutex_unlock(&c->sb_lock); 887 888 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) || 889 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) { 890 struct bch_move_stats stats; 891 892 bch2_move_stats_init(&stats, "recovery"); 893 894 bch_info(c, "scanning for old btree nodes"); 895 ret = bch2_fs_read_write(c) ?: 896 bch2_scan_old_btree_nodes(c, &stats); 897 if (ret) 898 goto err; 899 bch_info(c, "scanning for old btree nodes done"); 900 } 901 902 if (c->journal_seq_blacklist_table && 903 c->journal_seq_blacklist_table->nr > 128) 904 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work); 905 906 ret = 0; 907 out: 908 set_bit(BCH_FS_FSCK_DONE, &c->flags); 909 bch2_flush_fsck_errs(c); 910 911 if (!c->opts.keep_journal && 912 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) { 913 bch2_journal_keys_free(&c->journal_keys); 914 bch2_journal_entries_free(c); 915 } 916 kfree(clean); 917 918 if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) { 919 bch2_fs_read_write_early(c); 920 bch2_delete_dead_snapshots_async(c); 921 } 922 923 if (ret) 924 bch_err_fn(c, ret); 925 return ret; 926 err: 927 fsck_err: 928 bch2_fs_emergency_read_only(c); 929 goto out; 930 } 931 932 int bch2_fs_initialize(struct bch_fs *c) 933 { 934 struct bch_inode_unpacked root_inode, lostfound_inode; 935 struct bkey_inode_buf packed_inode; 936 struct qstr lostfound = QSTR("lost+found"); 937 struct bch_dev *ca; 938 unsigned i; 939 int ret; 940 941 bch_notice(c, "initializing new filesystem"); 942 943 mutex_lock(&c->sb_lock); 944 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); 945 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); 946 947 bch2_sb_maybe_downgrade(c); 948 949 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { 950 bch2_sb_upgrade(c, bcachefs_metadata_version_current); 951 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); 952 bch2_write_super(c); 953 } 954 mutex_unlock(&c->sb_lock); 955 956 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns); 957 set_bit(BCH_FS_MAY_GO_RW, &c->flags); 958 set_bit(BCH_FS_FSCK_DONE, &c->flags); 959 960 for (i = 0; i < BTREE_ID_NR; i++) 961 bch2_btree_root_alloc(c, i); 962 963 for_each_member_device(ca, c, i) 964 bch2_dev_usage_init(ca); 965 966 ret = bch2_fs_journal_alloc(c); 967 if (ret) 968 goto err; 969 970 /* 971 * journal_res_get() will crash if called before this has 972 * set up the journal.pin FIFO and journal.cur pointer: 973 */ 974 bch2_fs_journal_start(&c->journal, 1); 975 bch2_journal_set_replay_done(&c->journal); 976 977 ret = bch2_fs_read_write_early(c); 978 if (ret) 979 goto err; 980 981 /* 982 * Write out the superblock and journal buckets, now that we can do 983 * btree updates 984 */ 985 bch_verbose(c, "marking superblocks"); 986 ret = bch2_trans_mark_dev_sbs(c); 987 bch_err_msg(c, ret, "marking superblocks"); 988 if (ret) 989 goto err; 990 991 for_each_online_member(ca, c, i) 992 ca->new_fs_bucket_idx = 0; 993 994 ret = bch2_fs_freespace_init(c); 995 if (ret) 996 goto err; 997 998 ret = bch2_initialize_subvolumes(c); 999 if (ret) 1000 goto err; 1001 1002 bch_verbose(c, "reading snapshots table"); 1003 ret = bch2_snapshots_read(c); 1004 if (ret) 1005 goto err; 1006 bch_verbose(c, "reading snapshots done"); 1007 1008 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL); 1009 root_inode.bi_inum = BCACHEFS_ROOT_INO; 1010 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 1011 bch2_inode_pack(&packed_inode, &root_inode); 1012 packed_inode.inode.k.p.snapshot = U32_MAX; 1013 1014 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0); 1015 if (ret) { 1016 bch_err_msg(c, ret, "creating root directory"); 1017 goto err; 1018 } 1019 1020 bch2_inode_init_early(c, &lostfound_inode); 1021 1022 ret = bch2_trans_do(c, NULL, NULL, 0, 1023 bch2_create_trans(trans, 1024 BCACHEFS_ROOT_SUBVOL_INUM, 1025 &root_inode, &lostfound_inode, 1026 &lostfound, 1027 0, 0, S_IFDIR|0700, 0, 1028 NULL, NULL, (subvol_inum) { 0 }, 0)); 1029 if (ret) { 1030 bch_err_msg(c, ret, "creating lost+found"); 1031 goto err; 1032 } 1033 1034 if (enabled_qtypes(c)) { 1035 ret = bch2_fs_quota_read(c); 1036 if (ret) 1037 goto err; 1038 } 1039 1040 ret = bch2_journal_flush(&c->journal); 1041 if (ret) { 1042 bch_err_msg(c, ret, "writing first journal entry"); 1043 goto err; 1044 } 1045 1046 mutex_lock(&c->sb_lock); 1047 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); 1048 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 1049 1050 bch2_write_super(c); 1051 mutex_unlock(&c->sb_lock); 1052 1053 return 0; 1054 err: 1055 bch_err_fn(ca, ret); 1056 return ret; 1057 } 1058