1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "backpointers.h" 5 #include "bkey_buf.h" 6 #include "alloc_background.h" 7 #include "btree_gc.h" 8 #include "btree_journal_iter.h" 9 #include "btree_update.h" 10 #include "btree_update_interior.h" 11 #include "btree_io.h" 12 #include "buckets.h" 13 #include "dirent.h" 14 #include "ec.h" 15 #include "errcode.h" 16 #include "error.h" 17 #include "fs-common.h" 18 #include "fsck.h" 19 #include "journal_io.h" 20 #include "journal_reclaim.h" 21 #include "journal_seq_blacklist.h" 22 #include "lru.h" 23 #include "logged_ops.h" 24 #include "move.h" 25 #include "quota.h" 26 #include "rebalance.h" 27 #include "recovery.h" 28 #include "replicas.h" 29 #include "sb-clean.h" 30 #include "sb-downgrade.h" 31 #include "snapshot.h" 32 #include "subvolume.h" 33 #include "super-io.h" 34 35 #include <linux/sort.h> 36 #include <linux/stat.h> 37 38 #define QSTR(n) { { { .len = strlen(n) } }, .name = n } 39 40 static bool btree_id_is_alloc(enum btree_id id) 41 { 42 switch (id) { 43 case BTREE_ID_alloc: 44 case BTREE_ID_backpointers: 45 case BTREE_ID_need_discard: 46 case BTREE_ID_freespace: 47 case BTREE_ID_bucket_gens: 48 return true; 49 default: 50 return false; 51 } 52 } 53 54 /* for -o reconstruct_alloc: */ 55 static void drop_alloc_keys(struct journal_keys *keys) 56 { 57 size_t src, dst; 58 59 for (src = 0, dst = 0; src < keys->nr; src++) 60 if (!btree_id_is_alloc(keys->d[src].btree_id)) 61 keys->d[dst++] = keys->d[src]; 62 63 keys->nr = dst; 64 } 65 66 /* 67 * Btree node pointers have a field to stack a pointer to the in memory btree 68 * node; we need to zero out this field when reading in btree nodes, or when 69 * reading in keys from the journal: 70 */ 71 static void zero_out_btree_mem_ptr(struct journal_keys *keys) 72 { 73 struct journal_key *i; 74 75 for (i = keys->d; i < keys->d + keys->nr; i++) 76 if (i->k->k.type == KEY_TYPE_btree_ptr_v2) 77 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0; 78 } 79 80 /* journal replay: */ 81 82 static void replay_now_at(struct journal *j, u64 seq) 83 { 84 BUG_ON(seq < j->replay_journal_seq); 85 86 seq = min(seq, j->replay_journal_seq_end); 87 88 while (j->replay_journal_seq < seq) 89 bch2_journal_pin_put(j, j->replay_journal_seq++); 90 } 91 92 static int bch2_journal_replay_key(struct btree_trans *trans, 93 struct journal_key *k) 94 { 95 struct btree_iter iter; 96 unsigned iter_flags = 97 BTREE_ITER_INTENT| 98 BTREE_ITER_NOT_EXTENTS; 99 unsigned update_flags = BTREE_TRIGGER_NORUN; 100 int ret; 101 102 if (k->overwritten) 103 return 0; 104 105 trans->journal_res.seq = k->journal_seq; 106 107 /* 108 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to 109 * keep the key cache coherent with the underlying btree. Nothing 110 * besides the allocator is doing updates yet so we don't need key cache 111 * coherency for non-alloc btrees, and key cache fills for snapshots 112 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until 113 * the snapshots recovery pass runs. 114 */ 115 if (!k->level && k->btree_id == BTREE_ID_alloc) 116 iter_flags |= BTREE_ITER_CACHED; 117 else 118 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM; 119 120 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, 121 BTREE_MAX_DEPTH, k->level, 122 iter_flags); 123 ret = bch2_btree_iter_traverse(&iter); 124 if (ret) 125 goto out; 126 127 /* Must be checked with btree locked: */ 128 if (k->overwritten) 129 goto out; 130 131 ret = bch2_trans_update(trans, &iter, k->k, update_flags); 132 out: 133 bch2_trans_iter_exit(trans, &iter); 134 return ret; 135 } 136 137 static int journal_sort_seq_cmp(const void *_l, const void *_r) 138 { 139 const struct journal_key *l = *((const struct journal_key **)_l); 140 const struct journal_key *r = *((const struct journal_key **)_r); 141 142 return cmp_int(l->journal_seq, r->journal_seq); 143 } 144 145 static int bch2_journal_replay(struct bch_fs *c) 146 { 147 struct journal_keys *keys = &c->journal_keys; 148 DARRAY(struct journal_key *) keys_sorted = { 0 }; 149 struct journal *j = &c->journal; 150 u64 start_seq = c->journal_replay_seq_start; 151 u64 end_seq = c->journal_replay_seq_start; 152 struct btree_trans *trans = bch2_trans_get(c); 153 int ret = 0; 154 155 if (keys->nr) { 156 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)", 157 keys->nr, start_seq, end_seq); 158 if (ret) 159 goto err; 160 } 161 162 BUG_ON(!atomic_read(&keys->ref)); 163 164 /* 165 * First, attempt to replay keys in sorted order. This is more 166 * efficient - better locality of btree access - but some might fail if 167 * that would cause a journal deadlock. 168 */ 169 for (size_t i = 0; i < keys->nr; i++) { 170 cond_resched(); 171 172 struct journal_key *k = keys->d + i; 173 174 /* Skip fastpath if we're low on space in the journal */ 175 ret = c->journal.watermark ? -1 : 176 commit_do(trans, NULL, NULL, 177 BCH_TRANS_COMMIT_no_enospc| 178 BCH_TRANS_COMMIT_journal_reclaim| 179 (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0), 180 bch2_journal_replay_key(trans, k)); 181 BUG_ON(!ret && !k->overwritten); 182 if (ret) { 183 ret = darray_push(&keys_sorted, k); 184 if (ret) 185 goto err; 186 } 187 } 188 189 /* 190 * Now, replay any remaining keys in the order in which they appear in 191 * the journal, unpinning those journal entries as we go: 192 */ 193 sort(keys_sorted.data, keys_sorted.nr, 194 sizeof(keys_sorted.data[0]), 195 journal_sort_seq_cmp, NULL); 196 197 darray_for_each(keys_sorted, kp) { 198 cond_resched(); 199 200 struct journal_key *k = *kp; 201 202 replay_now_at(j, k->journal_seq); 203 204 ret = commit_do(trans, NULL, NULL, 205 BCH_TRANS_COMMIT_no_enospc| 206 (!k->allocated 207 ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim 208 : 0), 209 bch2_journal_replay_key(trans, k)); 210 bch_err_msg(c, ret, "while replaying key at btree %s level %u:", 211 bch2_btree_id_str(k->btree_id), k->level); 212 if (ret) 213 goto err; 214 215 BUG_ON(!k->overwritten); 216 } 217 218 /* 219 * We need to put our btree_trans before calling flush_all_pins(), since 220 * that will use a btree_trans internally 221 */ 222 bch2_trans_put(trans); 223 trans = NULL; 224 225 if (!c->opts.keep_journal) 226 bch2_journal_keys_put_initial(c); 227 228 replay_now_at(j, j->replay_journal_seq_end); 229 j->replay_journal_seq = 0; 230 231 bch2_journal_set_replay_done(j); 232 233 if (keys->nr) 234 bch2_journal_log_msg(c, "journal replay finished"); 235 err: 236 if (trans) 237 bch2_trans_put(trans); 238 darray_exit(&keys_sorted); 239 bch_err_fn(c, ret); 240 return ret; 241 } 242 243 /* journal replay early: */ 244 245 static int journal_replay_entry_early(struct bch_fs *c, 246 struct jset_entry *entry) 247 { 248 int ret = 0; 249 250 switch (entry->type) { 251 case BCH_JSET_ENTRY_btree_root: { 252 struct btree_root *r; 253 254 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) { 255 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL }); 256 if (ret) 257 return ret; 258 } 259 260 r = bch2_btree_id_root(c, entry->btree_id); 261 262 if (entry->u64s) { 263 r->level = entry->level; 264 bkey_copy(&r->key, (struct bkey_i *) entry->start); 265 r->error = 0; 266 } else { 267 r->error = -EIO; 268 } 269 r->alive = true; 270 break; 271 } 272 case BCH_JSET_ENTRY_usage: { 273 struct jset_entry_usage *u = 274 container_of(entry, struct jset_entry_usage, entry); 275 276 switch (entry->btree_id) { 277 case BCH_FS_USAGE_reserved: 278 if (entry->level < BCH_REPLICAS_MAX) 279 c->usage_base->persistent_reserved[entry->level] = 280 le64_to_cpu(u->v); 281 break; 282 case BCH_FS_USAGE_inodes: 283 c->usage_base->b.nr_inodes = le64_to_cpu(u->v); 284 break; 285 case BCH_FS_USAGE_key_version: 286 atomic64_set(&c->key_version, 287 le64_to_cpu(u->v)); 288 break; 289 } 290 291 break; 292 } 293 case BCH_JSET_ENTRY_data_usage: { 294 struct jset_entry_data_usage *u = 295 container_of(entry, struct jset_entry_data_usage, entry); 296 297 ret = bch2_replicas_set_usage(c, &u->r, 298 le64_to_cpu(u->v)); 299 break; 300 } 301 case BCH_JSET_ENTRY_dev_usage: { 302 struct jset_entry_dev_usage *u = 303 container_of(entry, struct jset_entry_dev_usage, entry); 304 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev)); 305 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); 306 307 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) { 308 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets); 309 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors); 310 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented); 311 } 312 313 break; 314 } 315 case BCH_JSET_ENTRY_blacklist: { 316 struct jset_entry_blacklist *bl_entry = 317 container_of(entry, struct jset_entry_blacklist, entry); 318 319 ret = bch2_journal_seq_blacklist_add(c, 320 le64_to_cpu(bl_entry->seq), 321 le64_to_cpu(bl_entry->seq) + 1); 322 break; 323 } 324 case BCH_JSET_ENTRY_blacklist_v2: { 325 struct jset_entry_blacklist_v2 *bl_entry = 326 container_of(entry, struct jset_entry_blacklist_v2, entry); 327 328 ret = bch2_journal_seq_blacklist_add(c, 329 le64_to_cpu(bl_entry->start), 330 le64_to_cpu(bl_entry->end) + 1); 331 break; 332 } 333 case BCH_JSET_ENTRY_clock: { 334 struct jset_entry_clock *clock = 335 container_of(entry, struct jset_entry_clock, entry); 336 337 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); 338 } 339 } 340 341 return ret; 342 } 343 344 static int journal_replay_early(struct bch_fs *c, 345 struct bch_sb_field_clean *clean) 346 { 347 if (clean) { 348 for (struct jset_entry *entry = clean->start; 349 entry != vstruct_end(&clean->field); 350 entry = vstruct_next(entry)) { 351 int ret = journal_replay_entry_early(c, entry); 352 if (ret) 353 return ret; 354 } 355 } else { 356 struct genradix_iter iter; 357 struct journal_replay *i, **_i; 358 359 genradix_for_each(&c->journal_entries, iter, _i) { 360 i = *_i; 361 362 if (!i || i->ignore) 363 continue; 364 365 vstruct_for_each(&i->j, entry) { 366 int ret = journal_replay_entry_early(c, entry); 367 if (ret) 368 return ret; 369 } 370 } 371 } 372 373 bch2_fs_usage_initialize(c); 374 375 return 0; 376 } 377 378 /* sb clean section: */ 379 380 static int read_btree_roots(struct bch_fs *c) 381 { 382 unsigned i; 383 int ret = 0; 384 385 for (i = 0; i < btree_id_nr_alive(c); i++) { 386 struct btree_root *r = bch2_btree_id_root(c, i); 387 388 if (!r->alive) 389 continue; 390 391 if (btree_id_is_alloc(i) && 392 c->opts.reconstruct_alloc) { 393 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 394 continue; 395 } 396 397 if (r->error) { 398 __fsck_err(c, 399 btree_id_is_alloc(i) 400 ? FSCK_CAN_IGNORE : 0, 401 btree_root_bkey_invalid, 402 "invalid btree root %s", 403 bch2_btree_id_str(i)); 404 if (i == BTREE_ID_alloc) 405 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 406 } 407 408 ret = bch2_btree_root_read(c, i, &r->key, r->level); 409 if (ret) { 410 fsck_err(c, 411 btree_root_read_error, 412 "error reading btree root %s", 413 bch2_btree_id_str(i)); 414 if (btree_id_is_alloc(i)) 415 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 416 ret = 0; 417 } 418 } 419 420 for (i = 0; i < BTREE_ID_NR; i++) { 421 struct btree_root *r = bch2_btree_id_root(c, i); 422 423 if (!r->b) { 424 r->alive = false; 425 r->level = 0; 426 bch2_btree_root_alloc(c, i); 427 } 428 } 429 fsck_err: 430 return ret; 431 } 432 433 static int bch2_initialize_subvolumes(struct bch_fs *c) 434 { 435 struct bkey_i_snapshot_tree root_tree; 436 struct bkey_i_snapshot root_snapshot; 437 struct bkey_i_subvolume root_volume; 438 int ret; 439 440 bkey_snapshot_tree_init(&root_tree.k_i); 441 root_tree.k.p.offset = 1; 442 root_tree.v.master_subvol = cpu_to_le32(1); 443 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX); 444 445 bkey_snapshot_init(&root_snapshot.k_i); 446 root_snapshot.k.p.offset = U32_MAX; 447 root_snapshot.v.flags = 0; 448 root_snapshot.v.parent = 0; 449 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL); 450 root_snapshot.v.tree = cpu_to_le32(1); 451 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true); 452 453 bkey_subvolume_init(&root_volume.k_i); 454 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL; 455 root_volume.v.flags = 0; 456 root_volume.v.snapshot = cpu_to_le32(U32_MAX); 457 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO); 458 459 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?: 460 bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?: 461 bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0); 462 bch_err_fn(c, ret); 463 return ret; 464 } 465 466 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) 467 { 468 struct btree_iter iter; 469 struct bkey_s_c k; 470 struct bch_inode_unpacked inode; 471 int ret; 472 473 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 474 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); 475 ret = bkey_err(k); 476 if (ret) 477 return ret; 478 479 if (!bkey_is_inode(k.k)) { 480 bch_err(trans->c, "root inode not found"); 481 ret = -BCH_ERR_ENOENT_inode; 482 goto err; 483 } 484 485 ret = bch2_inode_unpack(k, &inode); 486 BUG_ON(ret); 487 488 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 489 490 ret = bch2_inode_write(trans, &iter, &inode); 491 err: 492 bch2_trans_iter_exit(trans, &iter); 493 return ret; 494 } 495 496 /* set bi_subvol on root inode */ 497 noinline_for_stack 498 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) 499 { 500 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, 501 __bch2_fs_upgrade_for_subvolumes(trans)); 502 bch_err_fn(c, ret); 503 return ret; 504 } 505 506 const char * const bch2_recovery_passes[] = { 507 #define x(_fn, ...) #_fn, 508 BCH_RECOVERY_PASSES() 509 #undef x 510 NULL 511 }; 512 513 static int bch2_check_allocations(struct bch_fs *c) 514 { 515 return bch2_gc(c, true, c->opts.norecovery); 516 } 517 518 static int bch2_set_may_go_rw(struct bch_fs *c) 519 { 520 struct journal_keys *keys = &c->journal_keys; 521 522 /* 523 * After we go RW, the journal keys buffer can't be modified (except for 524 * setting journal_key->overwritten: it will be accessed by multiple 525 * threads 526 */ 527 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr); 528 keys->gap = keys->nr; 529 530 set_bit(BCH_FS_may_go_rw, &c->flags); 531 532 if (keys->nr || c->opts.fsck || !c->sb.clean) 533 return bch2_fs_read_write_early(c); 534 return 0; 535 } 536 537 struct recovery_pass_fn { 538 int (*fn)(struct bch_fs *); 539 unsigned when; 540 }; 541 542 static struct recovery_pass_fn recovery_pass_fns[] = { 543 #define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when }, 544 BCH_RECOVERY_PASSES() 545 #undef x 546 }; 547 548 u64 bch2_recovery_passes_to_stable(u64 v) 549 { 550 static const u8 map[] = { 551 #define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n, 552 BCH_RECOVERY_PASSES() 553 #undef x 554 }; 555 556 u64 ret = 0; 557 for (unsigned i = 0; i < ARRAY_SIZE(map); i++) 558 if (v & BIT_ULL(i)) 559 ret |= BIT_ULL(map[i]); 560 return ret; 561 } 562 563 u64 bch2_recovery_passes_from_stable(u64 v) 564 { 565 static const u8 map[] = { 566 #define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n, 567 BCH_RECOVERY_PASSES() 568 #undef x 569 }; 570 571 u64 ret = 0; 572 for (unsigned i = 0; i < ARRAY_SIZE(map); i++) 573 if (v & BIT_ULL(i)) 574 ret |= BIT_ULL(map[i]); 575 return ret; 576 } 577 578 static bool check_version_upgrade(struct bch_fs *c) 579 { 580 unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version); 581 unsigned latest_version = bcachefs_metadata_version_current; 582 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version; 583 unsigned new_version = 0; 584 585 if (old_version < bcachefs_metadata_required_upgrade_below) { 586 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible || 587 latest_compatible < bcachefs_metadata_required_upgrade_below) 588 new_version = latest_version; 589 else 590 new_version = latest_compatible; 591 } else { 592 switch (c->opts.version_upgrade) { 593 case BCH_VERSION_UPGRADE_compatible: 594 new_version = latest_compatible; 595 break; 596 case BCH_VERSION_UPGRADE_incompatible: 597 new_version = latest_version; 598 break; 599 case BCH_VERSION_UPGRADE_none: 600 new_version = old_version; 601 break; 602 } 603 } 604 605 if (new_version > old_version) { 606 struct printbuf buf = PRINTBUF; 607 608 if (old_version < bcachefs_metadata_required_upgrade_below) 609 prt_str(&buf, "Version upgrade required:\n"); 610 611 if (old_version != c->sb.version) { 612 prt_str(&buf, "Version upgrade from "); 613 bch2_version_to_text(&buf, c->sb.version_upgrade_complete); 614 prt_str(&buf, " to "); 615 bch2_version_to_text(&buf, c->sb.version); 616 prt_str(&buf, " incomplete\n"); 617 } 618 619 prt_printf(&buf, "Doing %s version upgrade from ", 620 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version) 621 ? "incompatible" : "compatible"); 622 bch2_version_to_text(&buf, old_version); 623 prt_str(&buf, " to "); 624 bch2_version_to_text(&buf, new_version); 625 prt_newline(&buf); 626 627 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 628 __le64 passes = ext->recovery_passes_required[0]; 629 bch2_sb_set_upgrade(c, old_version, new_version); 630 passes = ext->recovery_passes_required[0] & ~passes; 631 632 if (passes) { 633 prt_str(&buf, " running recovery passes: "); 634 prt_bitflags(&buf, bch2_recovery_passes, 635 bch2_recovery_passes_from_stable(le64_to_cpu(passes))); 636 } 637 638 bch_info(c, "%s", buf.buf); 639 640 bch2_sb_upgrade(c, new_version); 641 642 printbuf_exit(&buf); 643 return true; 644 } 645 646 return false; 647 } 648 649 u64 bch2_fsck_recovery_passes(void) 650 { 651 u64 ret = 0; 652 653 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) 654 if (recovery_pass_fns[i].when & PASS_FSCK) 655 ret |= BIT_ULL(i); 656 return ret; 657 } 658 659 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 660 { 661 struct recovery_pass_fn *p = recovery_pass_fns + pass; 662 663 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read) 664 return false; 665 if (c->recovery_passes_explicit & BIT_ULL(pass)) 666 return true; 667 if ((p->when & PASS_FSCK) && c->opts.fsck) 668 return true; 669 if ((p->when & PASS_UNCLEAN) && !c->sb.clean) 670 return true; 671 if (p->when & PASS_ALWAYS) 672 return true; 673 return false; 674 } 675 676 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 677 { 678 struct recovery_pass_fn *p = recovery_pass_fns + pass; 679 int ret; 680 681 if (!(p->when & PASS_SILENT)) 682 bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."), 683 bch2_recovery_passes[pass]); 684 ret = p->fn(c); 685 if (ret) 686 return ret; 687 if (!(p->when & PASS_SILENT)) 688 bch2_print(c, KERN_CONT " done\n"); 689 690 return 0; 691 } 692 693 static int bch2_run_recovery_passes(struct bch_fs *c) 694 { 695 int ret = 0; 696 697 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { 698 if (should_run_recovery_pass(c, c->curr_recovery_pass)) { 699 unsigned pass = c->curr_recovery_pass; 700 701 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass); 702 if (bch2_err_matches(ret, BCH_ERR_restart_recovery) || 703 (ret && c->curr_recovery_pass < pass)) 704 continue; 705 if (ret) 706 break; 707 708 c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass); 709 } 710 c->curr_recovery_pass++; 711 c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass); 712 } 713 714 return ret; 715 } 716 717 int bch2_run_online_recovery_passes(struct bch_fs *c) 718 { 719 int ret = 0; 720 721 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) { 722 struct recovery_pass_fn *p = recovery_pass_fns + i; 723 724 if (!(p->when & PASS_ONLINE)) 725 continue; 726 727 ret = bch2_run_recovery_pass(c, i); 728 if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) { 729 i = c->curr_recovery_pass; 730 continue; 731 } 732 if (ret) 733 break; 734 } 735 736 return ret; 737 } 738 739 int bch2_fs_recovery(struct bch_fs *c) 740 { 741 struct bch_sb_field_clean *clean = NULL; 742 struct jset *last_journal_entry = NULL; 743 u64 last_seq = 0, blacklist_seq, journal_seq; 744 int ret = 0; 745 746 if (c->sb.clean) { 747 clean = bch2_read_superblock_clean(c); 748 ret = PTR_ERR_OR_ZERO(clean); 749 if (ret) 750 goto err; 751 752 bch_info(c, "recovering from clean shutdown, journal seq %llu", 753 le64_to_cpu(clean->journal_seq)); 754 } else { 755 bch_info(c, "recovering from unclean shutdown"); 756 } 757 758 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) { 759 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported"); 760 ret = -EINVAL; 761 goto err; 762 } 763 764 if (!c->sb.clean && 765 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) { 766 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix"); 767 ret = -EINVAL; 768 goto err; 769 } 770 771 if (c->opts.fsck && c->opts.norecovery) { 772 bch_err(c, "cannot select both norecovery and fsck"); 773 ret = -EINVAL; 774 goto err; 775 } 776 777 if (!(c->opts.nochanges && c->opts.norecovery)) { 778 mutex_lock(&c->sb_lock); 779 bool write_sb = false; 780 781 struct bch_sb_field_ext *ext = 782 bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64)); 783 if (!ext) { 784 ret = -BCH_ERR_ENOSPC_sb; 785 mutex_unlock(&c->sb_lock); 786 goto err; 787 } 788 789 if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) { 790 ext->recovery_passes_required[0] |= 791 cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology))); 792 write_sb = true; 793 } 794 795 u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 796 if (sb_passes) { 797 struct printbuf buf = PRINTBUF; 798 prt_str(&buf, "superblock requires following recovery passes to be run:\n "); 799 prt_bitflags(&buf, bch2_recovery_passes, sb_passes); 800 bch_info(c, "%s", buf.buf); 801 printbuf_exit(&buf); 802 } 803 804 if (bch2_check_version_downgrade(c)) { 805 struct printbuf buf = PRINTBUF; 806 807 prt_str(&buf, "Version downgrade required:\n"); 808 809 __le64 passes = ext->recovery_passes_required[0]; 810 bch2_sb_set_downgrade(c, 811 BCH_VERSION_MINOR(bcachefs_metadata_version_current), 812 BCH_VERSION_MINOR(c->sb.version)); 813 passes = ext->recovery_passes_required[0] & ~passes; 814 if (passes) { 815 prt_str(&buf, " running recovery passes: "); 816 prt_bitflags(&buf, bch2_recovery_passes, 817 bch2_recovery_passes_from_stable(le64_to_cpu(passes))); 818 } 819 820 bch_info(c, "%s", buf.buf); 821 printbuf_exit(&buf); 822 write_sb = true; 823 } 824 825 if (check_version_upgrade(c)) 826 write_sb = true; 827 828 if (write_sb) 829 bch2_write_super(c); 830 831 c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 832 mutex_unlock(&c->sb_lock); 833 } 834 835 if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) 836 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); 837 838 if (c->opts.fsck) 839 set_bit(BCH_FS_fsck_running, &c->flags); 840 841 ret = bch2_blacklist_table_initialize(c); 842 if (ret) { 843 bch_err(c, "error initializing blacklist table"); 844 goto err; 845 } 846 847 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) { 848 struct genradix_iter iter; 849 struct journal_replay **i; 850 851 bch_verbose(c, "starting journal read"); 852 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq); 853 if (ret) 854 goto err; 855 856 /* 857 * note: cmd_list_journal needs the blacklist table fully up to date so 858 * it can asterisk ignored journal entries: 859 */ 860 if (c->opts.read_journal_only) 861 goto out; 862 863 genradix_for_each_reverse(&c->journal_entries, iter, i) 864 if (*i && !(*i)->ignore) { 865 last_journal_entry = &(*i)->j; 866 break; 867 } 868 869 if (mustfix_fsck_err_on(c->sb.clean && 870 last_journal_entry && 871 !journal_entry_empty(last_journal_entry), c, 872 clean_but_journal_not_empty, 873 "filesystem marked clean but journal not empty")) { 874 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 875 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 876 c->sb.clean = false; 877 } 878 879 if (!last_journal_entry) { 880 fsck_err_on(!c->sb.clean, c, 881 dirty_but_no_journal_entries, 882 "no journal entries found"); 883 if (clean) 884 goto use_clean; 885 886 genradix_for_each_reverse(&c->journal_entries, iter, i) 887 if (*i) { 888 last_journal_entry = &(*i)->j; 889 (*i)->ignore = false; 890 /* 891 * This was probably a NO_FLUSH entry, 892 * so last_seq was garbage - but we know 893 * we're only using a single journal 894 * entry, set it here: 895 */ 896 (*i)->j.last_seq = (*i)->j.seq; 897 break; 898 } 899 } 900 901 ret = bch2_journal_keys_sort(c); 902 if (ret) 903 goto err; 904 905 if (c->sb.clean && last_journal_entry) { 906 ret = bch2_verify_superblock_clean(c, &clean, 907 last_journal_entry); 908 if (ret) 909 goto err; 910 } 911 } else { 912 use_clean: 913 if (!clean) { 914 bch_err(c, "no superblock clean section found"); 915 ret = -BCH_ERR_fsck_repair_impossible; 916 goto err; 917 918 } 919 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1; 920 } 921 922 c->journal_replay_seq_start = last_seq; 923 c->journal_replay_seq_end = blacklist_seq - 1; 924 925 if (c->opts.reconstruct_alloc) { 926 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 927 drop_alloc_keys(&c->journal_keys); 928 } 929 930 zero_out_btree_mem_ptr(&c->journal_keys); 931 932 ret = journal_replay_early(c, clean); 933 if (ret) 934 goto err; 935 936 /* 937 * After an unclean shutdown, skip then next few journal sequence 938 * numbers as they may have been referenced by btree writes that 939 * happened before their corresponding journal writes - those btree 940 * writes need to be ignored, by skipping and blacklisting the next few 941 * journal sequence numbers: 942 */ 943 if (!c->sb.clean) 944 journal_seq += 8; 945 946 if (blacklist_seq != journal_seq) { 947 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu", 948 blacklist_seq, journal_seq) ?: 949 bch2_journal_seq_blacklist_add(c, 950 blacklist_seq, journal_seq); 951 if (ret) { 952 bch_err(c, "error creating new journal seq blacklist entry"); 953 goto err; 954 } 955 } 956 957 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu", 958 journal_seq, last_seq, blacklist_seq - 1) ?: 959 bch2_fs_journal_start(&c->journal, journal_seq); 960 if (ret) 961 goto err; 962 963 if (c->opts.reconstruct_alloc) 964 bch2_journal_log_msg(c, "dropping alloc info"); 965 966 /* 967 * Skip past versions that might have possibly been used (as nonces), 968 * but hadn't had their pointers written: 969 */ 970 if (c->sb.encryption_type && !c->sb.clean) 971 atomic64_add(1 << 16, &c->key_version); 972 973 ret = read_btree_roots(c); 974 if (ret) 975 goto err; 976 977 ret = bch2_run_recovery_passes(c); 978 if (ret) 979 goto err; 980 981 clear_bit(BCH_FS_fsck_running, &c->flags); 982 983 /* If we fixed errors, verify that fs is actually clean now: */ 984 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 985 test_bit(BCH_FS_errors_fixed, &c->flags) && 986 !test_bit(BCH_FS_errors_not_fixed, &c->flags) && 987 !test_bit(BCH_FS_error, &c->flags)) { 988 bch2_flush_fsck_errs(c); 989 990 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); 991 clear_bit(BCH_FS_errors_fixed, &c->flags); 992 993 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 994 995 ret = bch2_run_recovery_passes(c); 996 if (ret) 997 goto err; 998 999 if (test_bit(BCH_FS_errors_fixed, &c->flags) || 1000 test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 1001 bch_err(c, "Second fsck run was not clean"); 1002 set_bit(BCH_FS_errors_not_fixed, &c->flags); 1003 } 1004 1005 set_bit(BCH_FS_errors_fixed, &c->flags); 1006 } 1007 1008 if (enabled_qtypes(c)) { 1009 bch_verbose(c, "reading quotas"); 1010 ret = bch2_fs_quota_read(c); 1011 if (ret) 1012 goto err; 1013 bch_verbose(c, "quotas done"); 1014 } 1015 1016 mutex_lock(&c->sb_lock); 1017 bool write_sb = false; 1018 1019 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) { 1020 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version)); 1021 write_sb = true; 1022 } 1023 1024 if (!test_bit(BCH_FS_error, &c->flags) && 1025 !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) { 1026 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); 1027 write_sb = true; 1028 } 1029 1030 if (!test_bit(BCH_FS_error, &c->flags)) { 1031 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 1032 if (ext && 1033 (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) || 1034 !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) { 1035 memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required)); 1036 memset(ext->errors_silent, 0, sizeof(ext->errors_silent)); 1037 write_sb = true; 1038 } 1039 } 1040 1041 if (c->opts.fsck && 1042 !test_bit(BCH_FS_error, &c->flags) && 1043 !test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 1044 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); 1045 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); 1046 write_sb = true; 1047 } 1048 1049 if (write_sb) 1050 bch2_write_super(c); 1051 mutex_unlock(&c->sb_lock); 1052 1053 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) || 1054 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) { 1055 struct bch_move_stats stats; 1056 1057 bch2_move_stats_init(&stats, "recovery"); 1058 1059 struct printbuf buf = PRINTBUF; 1060 bch2_version_to_text(&buf, c->sb.version_min); 1061 bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf); 1062 printbuf_exit(&buf); 1063 1064 ret = bch2_fs_read_write_early(c) ?: 1065 bch2_scan_old_btree_nodes(c, &stats); 1066 if (ret) 1067 goto err; 1068 bch_info(c, "scanning for old btree nodes done"); 1069 } 1070 1071 if (c->journal_seq_blacklist_table && 1072 c->journal_seq_blacklist_table->nr > 128) 1073 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work); 1074 1075 ret = 0; 1076 out: 1077 bch2_flush_fsck_errs(c); 1078 1079 if (!c->opts.keep_journal && 1080 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) 1081 bch2_journal_keys_put_initial(c); 1082 kfree(clean); 1083 1084 if (!ret && 1085 test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) && 1086 !c->opts.nochanges) { 1087 bch2_fs_read_write_early(c); 1088 bch2_delete_dead_snapshots_async(c); 1089 } 1090 1091 bch_err_fn(c, ret); 1092 return ret; 1093 err: 1094 fsck_err: 1095 bch2_fs_emergency_read_only(c); 1096 goto out; 1097 } 1098 1099 int bch2_fs_initialize(struct bch_fs *c) 1100 { 1101 struct bch_inode_unpacked root_inode, lostfound_inode; 1102 struct bkey_inode_buf packed_inode; 1103 struct qstr lostfound = QSTR("lost+found"); 1104 int ret; 1105 1106 bch_notice(c, "initializing new filesystem"); 1107 1108 mutex_lock(&c->sb_lock); 1109 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); 1110 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); 1111 1112 bch2_check_version_downgrade(c); 1113 1114 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { 1115 bch2_sb_upgrade(c, bcachefs_metadata_version_current); 1116 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); 1117 bch2_write_super(c); 1118 } 1119 mutex_unlock(&c->sb_lock); 1120 1121 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns); 1122 set_bit(BCH_FS_may_go_rw, &c->flags); 1123 1124 for (unsigned i = 0; i < BTREE_ID_NR; i++) 1125 bch2_btree_root_alloc(c, i); 1126 1127 for_each_member_device(c, ca) 1128 bch2_dev_usage_init(ca); 1129 1130 ret = bch2_fs_journal_alloc(c); 1131 if (ret) 1132 goto err; 1133 1134 /* 1135 * journal_res_get() will crash if called before this has 1136 * set up the journal.pin FIFO and journal.cur pointer: 1137 */ 1138 bch2_fs_journal_start(&c->journal, 1); 1139 bch2_journal_set_replay_done(&c->journal); 1140 1141 ret = bch2_fs_read_write_early(c); 1142 if (ret) 1143 goto err; 1144 1145 /* 1146 * Write out the superblock and journal buckets, now that we can do 1147 * btree updates 1148 */ 1149 bch_verbose(c, "marking superblocks"); 1150 ret = bch2_trans_mark_dev_sbs(c); 1151 bch_err_msg(c, ret, "marking superblocks"); 1152 if (ret) 1153 goto err; 1154 1155 for_each_online_member(c, ca) 1156 ca->new_fs_bucket_idx = 0; 1157 1158 ret = bch2_fs_freespace_init(c); 1159 if (ret) 1160 goto err; 1161 1162 ret = bch2_initialize_subvolumes(c); 1163 if (ret) 1164 goto err; 1165 1166 bch_verbose(c, "reading snapshots table"); 1167 ret = bch2_snapshots_read(c); 1168 if (ret) 1169 goto err; 1170 bch_verbose(c, "reading snapshots done"); 1171 1172 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL); 1173 root_inode.bi_inum = BCACHEFS_ROOT_INO; 1174 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 1175 bch2_inode_pack(&packed_inode, &root_inode); 1176 packed_inode.inode.k.p.snapshot = U32_MAX; 1177 1178 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0); 1179 bch_err_msg(c, ret, "creating root directory"); 1180 if (ret) 1181 goto err; 1182 1183 bch2_inode_init_early(c, &lostfound_inode); 1184 1185 ret = bch2_trans_do(c, NULL, NULL, 0, 1186 bch2_create_trans(trans, 1187 BCACHEFS_ROOT_SUBVOL_INUM, 1188 &root_inode, &lostfound_inode, 1189 &lostfound, 1190 0, 0, S_IFDIR|0700, 0, 1191 NULL, NULL, (subvol_inum) { 0 }, 0)); 1192 bch_err_msg(c, ret, "creating lost+found"); 1193 if (ret) 1194 goto err; 1195 1196 c->recovery_pass_done = ARRAY_SIZE(recovery_pass_fns) - 1; 1197 1198 if (enabled_qtypes(c)) { 1199 ret = bch2_fs_quota_read(c); 1200 if (ret) 1201 goto err; 1202 } 1203 1204 ret = bch2_journal_flush(&c->journal); 1205 bch_err_msg(c, ret, "writing first journal entry"); 1206 if (ret) 1207 goto err; 1208 1209 mutex_lock(&c->sb_lock); 1210 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); 1211 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 1212 1213 bch2_write_super(c); 1214 mutex_unlock(&c->sb_lock); 1215 1216 return 0; 1217 err: 1218 bch_err_fn(c, ret); 1219 return ret; 1220 } 1221