1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "backpointers.h" 5 #include "bkey_buf.h" 6 #include "alloc_background.h" 7 #include "btree_gc.h" 8 #include "btree_journal_iter.h" 9 #include "btree_update.h" 10 #include "btree_update_interior.h" 11 #include "btree_io.h" 12 #include "buckets.h" 13 #include "dirent.h" 14 #include "ec.h" 15 #include "errcode.h" 16 #include "error.h" 17 #include "fs-common.h" 18 #include "fsck.h" 19 #include "journal_io.h" 20 #include "journal_reclaim.h" 21 #include "journal_seq_blacklist.h" 22 #include "lru.h" 23 #include "logged_ops.h" 24 #include "move.h" 25 #include "quota.h" 26 #include "rebalance.h" 27 #include "recovery.h" 28 #include "replicas.h" 29 #include "sb-clean.h" 30 #include "sb-downgrade.h" 31 #include "snapshot.h" 32 #include "subvolume.h" 33 #include "super-io.h" 34 35 #include <linux/sort.h> 36 #include <linux/stat.h> 37 38 #define QSTR(n) { { { .len = strlen(n) } }, .name = n } 39 40 static bool btree_id_is_alloc(enum btree_id id) 41 { 42 switch (id) { 43 case BTREE_ID_alloc: 44 case BTREE_ID_backpointers: 45 case BTREE_ID_need_discard: 46 case BTREE_ID_freespace: 47 case BTREE_ID_bucket_gens: 48 return true; 49 default: 50 return false; 51 } 52 } 53 54 /* for -o reconstruct_alloc: */ 55 static void drop_alloc_keys(struct journal_keys *keys) 56 { 57 size_t src, dst; 58 59 for (src = 0, dst = 0; src < keys->nr; src++) 60 if (!btree_id_is_alloc(keys->d[src].btree_id)) 61 keys->d[dst++] = keys->d[src]; 62 63 keys->nr = dst; 64 } 65 66 /* 67 * Btree node pointers have a field to stack a pointer to the in memory btree 68 * node; we need to zero out this field when reading in btree nodes, or when 69 * reading in keys from the journal: 70 */ 71 static void zero_out_btree_mem_ptr(struct journal_keys *keys) 72 { 73 struct journal_key *i; 74 75 for (i = keys->d; i < keys->d + keys->nr; i++) 76 if (i->k->k.type == KEY_TYPE_btree_ptr_v2) 77 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0; 78 } 79 80 /* journal replay: */ 81 82 static void replay_now_at(struct journal *j, u64 seq) 83 { 84 BUG_ON(seq < j->replay_journal_seq); 85 86 seq = min(seq, j->replay_journal_seq_end); 87 88 while (j->replay_journal_seq < seq) 89 bch2_journal_pin_put(j, j->replay_journal_seq++); 90 } 91 92 static int bch2_journal_replay_key(struct btree_trans *trans, 93 struct journal_key *k) 94 { 95 struct btree_iter iter; 96 unsigned iter_flags = 97 BTREE_ITER_INTENT| 98 BTREE_ITER_NOT_EXTENTS; 99 unsigned update_flags = BTREE_TRIGGER_NORUN; 100 int ret; 101 102 if (k->overwritten) 103 return 0; 104 105 trans->journal_res.seq = k->journal_seq; 106 107 /* 108 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to 109 * keep the key cache coherent with the underlying btree. Nothing 110 * besides the allocator is doing updates yet so we don't need key cache 111 * coherency for non-alloc btrees, and key cache fills for snapshots 112 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until 113 * the snapshots recovery pass runs. 114 */ 115 if (!k->level && k->btree_id == BTREE_ID_alloc) 116 iter_flags |= BTREE_ITER_CACHED; 117 else 118 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM; 119 120 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, 121 BTREE_MAX_DEPTH, k->level, 122 iter_flags); 123 ret = bch2_btree_iter_traverse(&iter); 124 if (ret) 125 goto out; 126 127 /* Must be checked with btree locked: */ 128 if (k->overwritten) 129 goto out; 130 131 ret = bch2_trans_update(trans, &iter, k->k, update_flags); 132 out: 133 bch2_trans_iter_exit(trans, &iter); 134 return ret; 135 } 136 137 static int journal_sort_seq_cmp(const void *_l, const void *_r) 138 { 139 const struct journal_key *l = *((const struct journal_key **)_l); 140 const struct journal_key *r = *((const struct journal_key **)_r); 141 142 return cmp_int(l->journal_seq, r->journal_seq); 143 } 144 145 static int bch2_journal_replay(struct bch_fs *c) 146 { 147 struct journal_keys *keys = &c->journal_keys; 148 DARRAY(struct journal_key *) keys_sorted = { 0 }; 149 struct journal *j = &c->journal; 150 u64 start_seq = c->journal_replay_seq_start; 151 u64 end_seq = c->journal_replay_seq_start; 152 struct btree_trans *trans = bch2_trans_get(c); 153 int ret = 0; 154 155 if (keys->nr) { 156 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)", 157 keys->nr, start_seq, end_seq); 158 if (ret) 159 goto err; 160 } 161 162 BUG_ON(!atomic_read(&keys->ref)); 163 164 /* 165 * First, attempt to replay keys in sorted order. This is more 166 * efficient - better locality of btree access - but some might fail if 167 * that would cause a journal deadlock. 168 */ 169 for (size_t i = 0; i < keys->nr; i++) { 170 cond_resched(); 171 172 struct journal_key *k = keys->d + i; 173 174 /* Skip fastpath if we're low on space in the journal */ 175 ret = c->journal.watermark ? -1 : 176 commit_do(trans, NULL, NULL, 177 BCH_TRANS_COMMIT_no_enospc| 178 BCH_TRANS_COMMIT_journal_reclaim| 179 (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0), 180 bch2_journal_replay_key(trans, k)); 181 BUG_ON(!ret && !k->overwritten); 182 if (ret) { 183 ret = darray_push(&keys_sorted, k); 184 if (ret) 185 goto err; 186 } 187 } 188 189 /* 190 * Now, replay any remaining keys in the order in which they appear in 191 * the journal, unpinning those journal entries as we go: 192 */ 193 sort(keys_sorted.data, keys_sorted.nr, 194 sizeof(keys_sorted.data[0]), 195 journal_sort_seq_cmp, NULL); 196 197 darray_for_each(keys_sorted, kp) { 198 cond_resched(); 199 200 struct journal_key *k = *kp; 201 202 replay_now_at(j, k->journal_seq); 203 204 ret = commit_do(trans, NULL, NULL, 205 BCH_TRANS_COMMIT_no_enospc| 206 (!k->allocated 207 ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim 208 : 0), 209 bch2_journal_replay_key(trans, k)); 210 bch_err_msg(c, ret, "while replaying key at btree %s level %u:", 211 bch2_btree_id_str(k->btree_id), k->level); 212 if (ret) 213 goto err; 214 215 BUG_ON(!k->overwritten); 216 } 217 218 /* 219 * We need to put our btree_trans before calling flush_all_pins(), since 220 * that will use a btree_trans internally 221 */ 222 bch2_trans_put(trans); 223 trans = NULL; 224 225 if (!c->opts.keep_journal) 226 bch2_journal_keys_put_initial(c); 227 228 replay_now_at(j, j->replay_journal_seq_end); 229 j->replay_journal_seq = 0; 230 231 bch2_journal_set_replay_done(j); 232 233 if (keys->nr) 234 bch2_journal_log_msg(c, "journal replay finished"); 235 err: 236 if (trans) 237 bch2_trans_put(trans); 238 darray_exit(&keys_sorted); 239 bch_err_fn(c, ret); 240 return ret; 241 } 242 243 /* journal replay early: */ 244 245 static int journal_replay_entry_early(struct bch_fs *c, 246 struct jset_entry *entry) 247 { 248 int ret = 0; 249 250 switch (entry->type) { 251 case BCH_JSET_ENTRY_btree_root: { 252 struct btree_root *r; 253 254 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) { 255 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL }); 256 if (ret) 257 return ret; 258 } 259 260 r = bch2_btree_id_root(c, entry->btree_id); 261 262 if (entry->u64s) { 263 r->level = entry->level; 264 bkey_copy(&r->key, (struct bkey_i *) entry->start); 265 r->error = 0; 266 } else { 267 r->error = -EIO; 268 } 269 r->alive = true; 270 break; 271 } 272 case BCH_JSET_ENTRY_usage: { 273 struct jset_entry_usage *u = 274 container_of(entry, struct jset_entry_usage, entry); 275 276 switch (entry->btree_id) { 277 case BCH_FS_USAGE_reserved: 278 if (entry->level < BCH_REPLICAS_MAX) 279 c->usage_base->persistent_reserved[entry->level] = 280 le64_to_cpu(u->v); 281 break; 282 case BCH_FS_USAGE_inodes: 283 c->usage_base->b.nr_inodes = le64_to_cpu(u->v); 284 break; 285 case BCH_FS_USAGE_key_version: 286 atomic64_set(&c->key_version, 287 le64_to_cpu(u->v)); 288 break; 289 } 290 291 break; 292 } 293 case BCH_JSET_ENTRY_data_usage: { 294 struct jset_entry_data_usage *u = 295 container_of(entry, struct jset_entry_data_usage, entry); 296 297 ret = bch2_replicas_set_usage(c, &u->r, 298 le64_to_cpu(u->v)); 299 break; 300 } 301 case BCH_JSET_ENTRY_dev_usage: { 302 struct jset_entry_dev_usage *u = 303 container_of(entry, struct jset_entry_dev_usage, entry); 304 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev)); 305 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); 306 307 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) { 308 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets); 309 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors); 310 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented); 311 } 312 313 break; 314 } 315 case BCH_JSET_ENTRY_blacklist: { 316 struct jset_entry_blacklist *bl_entry = 317 container_of(entry, struct jset_entry_blacklist, entry); 318 319 ret = bch2_journal_seq_blacklist_add(c, 320 le64_to_cpu(bl_entry->seq), 321 le64_to_cpu(bl_entry->seq) + 1); 322 break; 323 } 324 case BCH_JSET_ENTRY_blacklist_v2: { 325 struct jset_entry_blacklist_v2 *bl_entry = 326 container_of(entry, struct jset_entry_blacklist_v2, entry); 327 328 ret = bch2_journal_seq_blacklist_add(c, 329 le64_to_cpu(bl_entry->start), 330 le64_to_cpu(bl_entry->end) + 1); 331 break; 332 } 333 case BCH_JSET_ENTRY_clock: { 334 struct jset_entry_clock *clock = 335 container_of(entry, struct jset_entry_clock, entry); 336 337 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); 338 } 339 } 340 341 return ret; 342 } 343 344 static int journal_replay_early(struct bch_fs *c, 345 struct bch_sb_field_clean *clean) 346 { 347 if (clean) { 348 for (struct jset_entry *entry = clean->start; 349 entry != vstruct_end(&clean->field); 350 entry = vstruct_next(entry)) { 351 int ret = journal_replay_entry_early(c, entry); 352 if (ret) 353 return ret; 354 } 355 } else { 356 struct genradix_iter iter; 357 struct journal_replay *i, **_i; 358 359 genradix_for_each(&c->journal_entries, iter, _i) { 360 i = *_i; 361 362 if (!i || i->ignore) 363 continue; 364 365 vstruct_for_each(&i->j, entry) { 366 int ret = journal_replay_entry_early(c, entry); 367 if (ret) 368 return ret; 369 } 370 } 371 } 372 373 bch2_fs_usage_initialize(c); 374 375 return 0; 376 } 377 378 /* sb clean section: */ 379 380 static int read_btree_roots(struct bch_fs *c) 381 { 382 unsigned i; 383 int ret = 0; 384 385 for (i = 0; i < btree_id_nr_alive(c); i++) { 386 struct btree_root *r = bch2_btree_id_root(c, i); 387 388 if (!r->alive) 389 continue; 390 391 if (btree_id_is_alloc(i) && 392 c->opts.reconstruct_alloc) { 393 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 394 continue; 395 } 396 397 if (r->error) { 398 __fsck_err(c, 399 btree_id_is_alloc(i) 400 ? FSCK_CAN_IGNORE : 0, 401 btree_root_bkey_invalid, 402 "invalid btree root %s", 403 bch2_btree_id_str(i)); 404 if (i == BTREE_ID_alloc) 405 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 406 } 407 408 ret = bch2_btree_root_read(c, i, &r->key, r->level); 409 if (ret) { 410 fsck_err(c, 411 btree_root_read_error, 412 "error reading btree root %s", 413 bch2_btree_id_str(i)); 414 if (btree_id_is_alloc(i)) 415 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 416 ret = 0; 417 } 418 } 419 420 for (i = 0; i < BTREE_ID_NR; i++) { 421 struct btree_root *r = bch2_btree_id_root(c, i); 422 423 if (!r->b) { 424 r->alive = false; 425 r->level = 0; 426 bch2_btree_root_alloc(c, i); 427 } 428 } 429 fsck_err: 430 return ret; 431 } 432 433 static int bch2_initialize_subvolumes(struct bch_fs *c) 434 { 435 struct bkey_i_snapshot_tree root_tree; 436 struct bkey_i_snapshot root_snapshot; 437 struct bkey_i_subvolume root_volume; 438 int ret; 439 440 bkey_snapshot_tree_init(&root_tree.k_i); 441 root_tree.k.p.offset = 1; 442 root_tree.v.master_subvol = cpu_to_le32(1); 443 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX); 444 445 bkey_snapshot_init(&root_snapshot.k_i); 446 root_snapshot.k.p.offset = U32_MAX; 447 root_snapshot.v.flags = 0; 448 root_snapshot.v.parent = 0; 449 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL); 450 root_snapshot.v.tree = cpu_to_le32(1); 451 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true); 452 453 bkey_subvolume_init(&root_volume.k_i); 454 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL; 455 root_volume.v.flags = 0; 456 root_volume.v.snapshot = cpu_to_le32(U32_MAX); 457 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO); 458 459 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?: 460 bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?: 461 bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0); 462 bch_err_fn(c, ret); 463 return ret; 464 } 465 466 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) 467 { 468 struct btree_iter iter; 469 struct bkey_s_c k; 470 struct bch_inode_unpacked inode; 471 int ret; 472 473 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 474 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); 475 ret = bkey_err(k); 476 if (ret) 477 return ret; 478 479 if (!bkey_is_inode(k.k)) { 480 bch_err(trans->c, "root inode not found"); 481 ret = -BCH_ERR_ENOENT_inode; 482 goto err; 483 } 484 485 ret = bch2_inode_unpack(k, &inode); 486 BUG_ON(ret); 487 488 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 489 490 ret = bch2_inode_write(trans, &iter, &inode); 491 err: 492 bch2_trans_iter_exit(trans, &iter); 493 return ret; 494 } 495 496 /* set bi_subvol on root inode */ 497 noinline_for_stack 498 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) 499 { 500 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, 501 __bch2_fs_upgrade_for_subvolumes(trans)); 502 bch_err_fn(c, ret); 503 return ret; 504 } 505 506 const char * const bch2_recovery_passes[] = { 507 #define x(_fn, ...) #_fn, 508 BCH_RECOVERY_PASSES() 509 #undef x 510 NULL 511 }; 512 513 static int bch2_check_allocations(struct bch_fs *c) 514 { 515 return bch2_gc(c, true, c->opts.norecovery); 516 } 517 518 static int bch2_set_may_go_rw(struct bch_fs *c) 519 { 520 struct journal_keys *keys = &c->journal_keys; 521 522 /* 523 * After we go RW, the journal keys buffer can't be modified (except for 524 * setting journal_key->overwritten: it will be accessed by multiple 525 * threads 526 */ 527 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr); 528 keys->gap = keys->nr; 529 530 set_bit(BCH_FS_may_go_rw, &c->flags); 531 532 if (keys->nr || c->opts.fsck || !c->sb.clean) 533 return bch2_fs_read_write_early(c); 534 return 0; 535 } 536 537 struct recovery_pass_fn { 538 int (*fn)(struct bch_fs *); 539 unsigned when; 540 }; 541 542 static struct recovery_pass_fn recovery_pass_fns[] = { 543 #define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when }, 544 BCH_RECOVERY_PASSES() 545 #undef x 546 }; 547 548 u64 bch2_recovery_passes_to_stable(u64 v) 549 { 550 static const u8 map[] = { 551 #define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n, 552 BCH_RECOVERY_PASSES() 553 #undef x 554 }; 555 556 u64 ret = 0; 557 for (unsigned i = 0; i < ARRAY_SIZE(map); i++) 558 if (v & BIT_ULL(i)) 559 ret |= BIT_ULL(map[i]); 560 return ret; 561 } 562 563 u64 bch2_recovery_passes_from_stable(u64 v) 564 { 565 static const u8 map[] = { 566 #define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n, 567 BCH_RECOVERY_PASSES() 568 #undef x 569 }; 570 571 u64 ret = 0; 572 for (unsigned i = 0; i < ARRAY_SIZE(map); i++) 573 if (v & BIT_ULL(i)) 574 ret |= BIT_ULL(map[i]); 575 return ret; 576 } 577 578 static bool check_version_upgrade(struct bch_fs *c) 579 { 580 unsigned latest_version = bcachefs_metadata_version_current; 581 unsigned latest_compatible = min(latest_version, 582 bch2_latest_compatible_version(c->sb.version)); 583 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version; 584 unsigned new_version = 0; 585 586 if (old_version < bcachefs_metadata_required_upgrade_below) { 587 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible || 588 latest_compatible < bcachefs_metadata_required_upgrade_below) 589 new_version = latest_version; 590 else 591 new_version = latest_compatible; 592 } else { 593 switch (c->opts.version_upgrade) { 594 case BCH_VERSION_UPGRADE_compatible: 595 new_version = latest_compatible; 596 break; 597 case BCH_VERSION_UPGRADE_incompatible: 598 new_version = latest_version; 599 break; 600 case BCH_VERSION_UPGRADE_none: 601 new_version = min(old_version, latest_version); 602 break; 603 } 604 } 605 606 if (new_version > old_version) { 607 struct printbuf buf = PRINTBUF; 608 609 if (old_version < bcachefs_metadata_required_upgrade_below) 610 prt_str(&buf, "Version upgrade required:\n"); 611 612 if (old_version != c->sb.version) { 613 prt_str(&buf, "Version upgrade from "); 614 bch2_version_to_text(&buf, c->sb.version_upgrade_complete); 615 prt_str(&buf, " to "); 616 bch2_version_to_text(&buf, c->sb.version); 617 prt_str(&buf, " incomplete\n"); 618 } 619 620 prt_printf(&buf, "Doing %s version upgrade from ", 621 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version) 622 ? "incompatible" : "compatible"); 623 bch2_version_to_text(&buf, old_version); 624 prt_str(&buf, " to "); 625 bch2_version_to_text(&buf, new_version); 626 prt_newline(&buf); 627 628 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 629 __le64 passes = ext->recovery_passes_required[0]; 630 bch2_sb_set_upgrade(c, old_version, new_version); 631 passes = ext->recovery_passes_required[0] & ~passes; 632 633 if (passes) { 634 prt_str(&buf, " running recovery passes: "); 635 prt_bitflags(&buf, bch2_recovery_passes, 636 bch2_recovery_passes_from_stable(le64_to_cpu(passes))); 637 } 638 639 bch_info(c, "%s", buf.buf); 640 641 bch2_sb_upgrade(c, new_version); 642 643 printbuf_exit(&buf); 644 return true; 645 } 646 647 return false; 648 } 649 650 u64 bch2_fsck_recovery_passes(void) 651 { 652 u64 ret = 0; 653 654 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) 655 if (recovery_pass_fns[i].when & PASS_FSCK) 656 ret |= BIT_ULL(i); 657 return ret; 658 } 659 660 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 661 { 662 struct recovery_pass_fn *p = recovery_pass_fns + pass; 663 664 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read) 665 return false; 666 if (c->recovery_passes_explicit & BIT_ULL(pass)) 667 return true; 668 if ((p->when & PASS_FSCK) && c->opts.fsck) 669 return true; 670 if ((p->when & PASS_UNCLEAN) && !c->sb.clean) 671 return true; 672 if (p->when & PASS_ALWAYS) 673 return true; 674 return false; 675 } 676 677 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 678 { 679 struct recovery_pass_fn *p = recovery_pass_fns + pass; 680 int ret; 681 682 if (!(p->when & PASS_SILENT)) 683 bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."), 684 bch2_recovery_passes[pass]); 685 ret = p->fn(c); 686 if (ret) 687 return ret; 688 if (!(p->when & PASS_SILENT)) 689 bch2_print(c, KERN_CONT " done\n"); 690 691 return 0; 692 } 693 694 static int bch2_run_recovery_passes(struct bch_fs *c) 695 { 696 int ret = 0; 697 698 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { 699 if (should_run_recovery_pass(c, c->curr_recovery_pass)) { 700 unsigned pass = c->curr_recovery_pass; 701 702 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass); 703 if (bch2_err_matches(ret, BCH_ERR_restart_recovery) || 704 (ret && c->curr_recovery_pass < pass)) 705 continue; 706 if (ret) 707 break; 708 709 c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass); 710 } 711 c->curr_recovery_pass++; 712 c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass); 713 } 714 715 return ret; 716 } 717 718 int bch2_run_online_recovery_passes(struct bch_fs *c) 719 { 720 int ret = 0; 721 722 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) { 723 struct recovery_pass_fn *p = recovery_pass_fns + i; 724 725 if (!(p->when & PASS_ONLINE)) 726 continue; 727 728 ret = bch2_run_recovery_pass(c, i); 729 if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) { 730 i = c->curr_recovery_pass; 731 continue; 732 } 733 if (ret) 734 break; 735 } 736 737 return ret; 738 } 739 740 int bch2_fs_recovery(struct bch_fs *c) 741 { 742 struct bch_sb_field_clean *clean = NULL; 743 struct jset *last_journal_entry = NULL; 744 u64 last_seq = 0, blacklist_seq, journal_seq; 745 int ret = 0; 746 747 if (c->sb.clean) { 748 clean = bch2_read_superblock_clean(c); 749 ret = PTR_ERR_OR_ZERO(clean); 750 if (ret) 751 goto err; 752 753 bch_info(c, "recovering from clean shutdown, journal seq %llu", 754 le64_to_cpu(clean->journal_seq)); 755 } else { 756 bch_info(c, "recovering from unclean shutdown"); 757 } 758 759 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) { 760 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported"); 761 ret = -EINVAL; 762 goto err; 763 } 764 765 if (!c->sb.clean && 766 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) { 767 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix"); 768 ret = -EINVAL; 769 goto err; 770 } 771 772 if (c->opts.fsck && c->opts.norecovery) { 773 bch_err(c, "cannot select both norecovery and fsck"); 774 ret = -EINVAL; 775 goto err; 776 } 777 778 if (!c->opts.nochanges) { 779 mutex_lock(&c->sb_lock); 780 bool write_sb = false; 781 782 struct bch_sb_field_ext *ext = 783 bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64)); 784 if (!ext) { 785 ret = -BCH_ERR_ENOSPC_sb; 786 mutex_unlock(&c->sb_lock); 787 goto err; 788 } 789 790 if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) { 791 ext->recovery_passes_required[0] |= 792 cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology))); 793 write_sb = true; 794 } 795 796 u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 797 if (sb_passes) { 798 struct printbuf buf = PRINTBUF; 799 prt_str(&buf, "superblock requires following recovery passes to be run:\n "); 800 prt_bitflags(&buf, bch2_recovery_passes, sb_passes); 801 bch_info(c, "%s", buf.buf); 802 printbuf_exit(&buf); 803 } 804 805 if (bch2_check_version_downgrade(c)) { 806 struct printbuf buf = PRINTBUF; 807 808 prt_str(&buf, "Version downgrade required:"); 809 810 __le64 passes = ext->recovery_passes_required[0]; 811 bch2_sb_set_downgrade(c, 812 BCH_VERSION_MINOR(bcachefs_metadata_version_current), 813 BCH_VERSION_MINOR(c->sb.version)); 814 passes = ext->recovery_passes_required[0] & ~passes; 815 if (passes) { 816 prt_str(&buf, "\n running recovery passes: "); 817 prt_bitflags(&buf, bch2_recovery_passes, 818 bch2_recovery_passes_from_stable(le64_to_cpu(passes))); 819 } 820 821 bch_info(c, "%s", buf.buf); 822 printbuf_exit(&buf); 823 write_sb = true; 824 } 825 826 if (check_version_upgrade(c)) 827 write_sb = true; 828 829 if (write_sb) 830 bch2_write_super(c); 831 832 c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 833 mutex_unlock(&c->sb_lock); 834 } 835 836 if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) 837 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); 838 839 if (c->opts.fsck) 840 set_bit(BCH_FS_fsck_running, &c->flags); 841 842 ret = bch2_blacklist_table_initialize(c); 843 if (ret) { 844 bch_err(c, "error initializing blacklist table"); 845 goto err; 846 } 847 848 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) { 849 struct genradix_iter iter; 850 struct journal_replay **i; 851 852 bch_verbose(c, "starting journal read"); 853 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq); 854 if (ret) 855 goto err; 856 857 /* 858 * note: cmd_list_journal needs the blacklist table fully up to date so 859 * it can asterisk ignored journal entries: 860 */ 861 if (c->opts.read_journal_only) 862 goto out; 863 864 genradix_for_each_reverse(&c->journal_entries, iter, i) 865 if (*i && !(*i)->ignore) { 866 last_journal_entry = &(*i)->j; 867 break; 868 } 869 870 if (mustfix_fsck_err_on(c->sb.clean && 871 last_journal_entry && 872 !journal_entry_empty(last_journal_entry), c, 873 clean_but_journal_not_empty, 874 "filesystem marked clean but journal not empty")) { 875 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 876 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 877 c->sb.clean = false; 878 } 879 880 if (!last_journal_entry) { 881 fsck_err_on(!c->sb.clean, c, 882 dirty_but_no_journal_entries, 883 "no journal entries found"); 884 if (clean) 885 goto use_clean; 886 887 genradix_for_each_reverse(&c->journal_entries, iter, i) 888 if (*i) { 889 last_journal_entry = &(*i)->j; 890 (*i)->ignore = false; 891 /* 892 * This was probably a NO_FLUSH entry, 893 * so last_seq was garbage - but we know 894 * we're only using a single journal 895 * entry, set it here: 896 */ 897 (*i)->j.last_seq = (*i)->j.seq; 898 break; 899 } 900 } 901 902 ret = bch2_journal_keys_sort(c); 903 if (ret) 904 goto err; 905 906 if (c->sb.clean && last_journal_entry) { 907 ret = bch2_verify_superblock_clean(c, &clean, 908 last_journal_entry); 909 if (ret) 910 goto err; 911 } 912 } else { 913 use_clean: 914 if (!clean) { 915 bch_err(c, "no superblock clean section found"); 916 ret = -BCH_ERR_fsck_repair_impossible; 917 goto err; 918 919 } 920 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1; 921 } 922 923 c->journal_replay_seq_start = last_seq; 924 c->journal_replay_seq_end = blacklist_seq - 1; 925 926 if (c->opts.reconstruct_alloc) { 927 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 928 drop_alloc_keys(&c->journal_keys); 929 } 930 931 zero_out_btree_mem_ptr(&c->journal_keys); 932 933 ret = journal_replay_early(c, clean); 934 if (ret) 935 goto err; 936 937 /* 938 * After an unclean shutdown, skip then next few journal sequence 939 * numbers as they may have been referenced by btree writes that 940 * happened before their corresponding journal writes - those btree 941 * writes need to be ignored, by skipping and blacklisting the next few 942 * journal sequence numbers: 943 */ 944 if (!c->sb.clean) 945 journal_seq += 8; 946 947 if (blacklist_seq != journal_seq) { 948 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu", 949 blacklist_seq, journal_seq) ?: 950 bch2_journal_seq_blacklist_add(c, 951 blacklist_seq, journal_seq); 952 if (ret) { 953 bch_err(c, "error creating new journal seq blacklist entry"); 954 goto err; 955 } 956 } 957 958 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu", 959 journal_seq, last_seq, blacklist_seq - 1) ?: 960 bch2_fs_journal_start(&c->journal, journal_seq); 961 if (ret) 962 goto err; 963 964 if (c->opts.reconstruct_alloc) 965 bch2_journal_log_msg(c, "dropping alloc info"); 966 967 /* 968 * Skip past versions that might have possibly been used (as nonces), 969 * but hadn't had their pointers written: 970 */ 971 if (c->sb.encryption_type && !c->sb.clean) 972 atomic64_add(1 << 16, &c->key_version); 973 974 ret = read_btree_roots(c); 975 if (ret) 976 goto err; 977 978 ret = bch2_run_recovery_passes(c); 979 if (ret) 980 goto err; 981 982 clear_bit(BCH_FS_fsck_running, &c->flags); 983 984 /* If we fixed errors, verify that fs is actually clean now: */ 985 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 986 test_bit(BCH_FS_errors_fixed, &c->flags) && 987 !test_bit(BCH_FS_errors_not_fixed, &c->flags) && 988 !test_bit(BCH_FS_error, &c->flags)) { 989 bch2_flush_fsck_errs(c); 990 991 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); 992 clear_bit(BCH_FS_errors_fixed, &c->flags); 993 994 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 995 996 ret = bch2_run_recovery_passes(c); 997 if (ret) 998 goto err; 999 1000 if (test_bit(BCH_FS_errors_fixed, &c->flags) || 1001 test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 1002 bch_err(c, "Second fsck run was not clean"); 1003 set_bit(BCH_FS_errors_not_fixed, &c->flags); 1004 } 1005 1006 set_bit(BCH_FS_errors_fixed, &c->flags); 1007 } 1008 1009 if (enabled_qtypes(c)) { 1010 bch_verbose(c, "reading quotas"); 1011 ret = bch2_fs_quota_read(c); 1012 if (ret) 1013 goto err; 1014 bch_verbose(c, "quotas done"); 1015 } 1016 1017 mutex_lock(&c->sb_lock); 1018 bool write_sb = false; 1019 1020 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) { 1021 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version)); 1022 write_sb = true; 1023 } 1024 1025 if (!test_bit(BCH_FS_error, &c->flags) && 1026 !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) { 1027 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); 1028 write_sb = true; 1029 } 1030 1031 if (!test_bit(BCH_FS_error, &c->flags)) { 1032 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 1033 if (ext && 1034 (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) || 1035 !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) { 1036 memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required)); 1037 memset(ext->errors_silent, 0, sizeof(ext->errors_silent)); 1038 write_sb = true; 1039 } 1040 } 1041 1042 if (c->opts.fsck && 1043 !test_bit(BCH_FS_error, &c->flags) && 1044 !test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 1045 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); 1046 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); 1047 write_sb = true; 1048 } 1049 1050 if (write_sb) 1051 bch2_write_super(c); 1052 mutex_unlock(&c->sb_lock); 1053 1054 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) || 1055 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) { 1056 struct bch_move_stats stats; 1057 1058 bch2_move_stats_init(&stats, "recovery"); 1059 1060 struct printbuf buf = PRINTBUF; 1061 bch2_version_to_text(&buf, c->sb.version_min); 1062 bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf); 1063 printbuf_exit(&buf); 1064 1065 ret = bch2_fs_read_write_early(c) ?: 1066 bch2_scan_old_btree_nodes(c, &stats); 1067 if (ret) 1068 goto err; 1069 bch_info(c, "scanning for old btree nodes done"); 1070 } 1071 1072 if (c->journal_seq_blacklist_table && 1073 c->journal_seq_blacklist_table->nr > 128) 1074 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work); 1075 1076 ret = 0; 1077 out: 1078 bch2_flush_fsck_errs(c); 1079 1080 if (!c->opts.keep_journal && 1081 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) 1082 bch2_journal_keys_put_initial(c); 1083 kfree(clean); 1084 1085 if (!ret && 1086 test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) && 1087 !c->opts.nochanges) { 1088 bch2_fs_read_write_early(c); 1089 bch2_delete_dead_snapshots_async(c); 1090 } 1091 1092 bch_err_fn(c, ret); 1093 return ret; 1094 err: 1095 fsck_err: 1096 bch2_fs_emergency_read_only(c); 1097 goto out; 1098 } 1099 1100 int bch2_fs_initialize(struct bch_fs *c) 1101 { 1102 struct bch_inode_unpacked root_inode, lostfound_inode; 1103 struct bkey_inode_buf packed_inode; 1104 struct qstr lostfound = QSTR("lost+found"); 1105 int ret; 1106 1107 bch_notice(c, "initializing new filesystem"); 1108 1109 mutex_lock(&c->sb_lock); 1110 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); 1111 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); 1112 1113 bch2_check_version_downgrade(c); 1114 1115 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { 1116 bch2_sb_upgrade(c, bcachefs_metadata_version_current); 1117 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); 1118 bch2_write_super(c); 1119 } 1120 mutex_unlock(&c->sb_lock); 1121 1122 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns); 1123 set_bit(BCH_FS_may_go_rw, &c->flags); 1124 1125 for (unsigned i = 0; i < BTREE_ID_NR; i++) 1126 bch2_btree_root_alloc(c, i); 1127 1128 for_each_member_device(c, ca) 1129 bch2_dev_usage_init(ca); 1130 1131 ret = bch2_fs_journal_alloc(c); 1132 if (ret) 1133 goto err; 1134 1135 /* 1136 * journal_res_get() will crash if called before this has 1137 * set up the journal.pin FIFO and journal.cur pointer: 1138 */ 1139 bch2_fs_journal_start(&c->journal, 1); 1140 bch2_journal_set_replay_done(&c->journal); 1141 1142 ret = bch2_fs_read_write_early(c); 1143 if (ret) 1144 goto err; 1145 1146 /* 1147 * Write out the superblock and journal buckets, now that we can do 1148 * btree updates 1149 */ 1150 bch_verbose(c, "marking superblocks"); 1151 ret = bch2_trans_mark_dev_sbs(c); 1152 bch_err_msg(c, ret, "marking superblocks"); 1153 if (ret) 1154 goto err; 1155 1156 for_each_online_member(c, ca) 1157 ca->new_fs_bucket_idx = 0; 1158 1159 ret = bch2_fs_freespace_init(c); 1160 if (ret) 1161 goto err; 1162 1163 ret = bch2_initialize_subvolumes(c); 1164 if (ret) 1165 goto err; 1166 1167 bch_verbose(c, "reading snapshots table"); 1168 ret = bch2_snapshots_read(c); 1169 if (ret) 1170 goto err; 1171 bch_verbose(c, "reading snapshots done"); 1172 1173 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL); 1174 root_inode.bi_inum = BCACHEFS_ROOT_INO; 1175 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 1176 bch2_inode_pack(&packed_inode, &root_inode); 1177 packed_inode.inode.k.p.snapshot = U32_MAX; 1178 1179 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0); 1180 bch_err_msg(c, ret, "creating root directory"); 1181 if (ret) 1182 goto err; 1183 1184 bch2_inode_init_early(c, &lostfound_inode); 1185 1186 ret = bch2_trans_do(c, NULL, NULL, 0, 1187 bch2_create_trans(trans, 1188 BCACHEFS_ROOT_SUBVOL_INUM, 1189 &root_inode, &lostfound_inode, 1190 &lostfound, 1191 0, 0, S_IFDIR|0700, 0, 1192 NULL, NULL, (subvol_inum) { 0 }, 0)); 1193 bch_err_msg(c, ret, "creating lost+found"); 1194 if (ret) 1195 goto err; 1196 1197 c->recovery_pass_done = ARRAY_SIZE(recovery_pass_fns) - 1; 1198 1199 if (enabled_qtypes(c)) { 1200 ret = bch2_fs_quota_read(c); 1201 if (ret) 1202 goto err; 1203 } 1204 1205 ret = bch2_journal_flush(&c->journal); 1206 bch_err_msg(c, ret, "writing first journal entry"); 1207 if (ret) 1208 goto err; 1209 1210 mutex_lock(&c->sb_lock); 1211 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); 1212 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 1213 1214 bch2_write_super(c); 1215 mutex_unlock(&c->sb_lock); 1216 1217 return 0; 1218 err: 1219 bch_err_fn(c, ret); 1220 return ret; 1221 } 1222