1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "backpointers.h" 5 #include "bkey_buf.h" 6 #include "alloc_background.h" 7 #include "btree_gc.h" 8 #include "btree_journal_iter.h" 9 #include "btree_update.h" 10 #include "btree_update_interior.h" 11 #include "btree_io.h" 12 #include "buckets.h" 13 #include "dirent.h" 14 #include "ec.h" 15 #include "errcode.h" 16 #include "error.h" 17 #include "fs-common.h" 18 #include "fsck.h" 19 #include "journal_io.h" 20 #include "journal_reclaim.h" 21 #include "journal_seq_blacklist.h" 22 #include "lru.h" 23 #include "logged_ops.h" 24 #include "move.h" 25 #include "quota.h" 26 #include "rebalance.h" 27 #include "recovery.h" 28 #include "replicas.h" 29 #include "sb-clean.h" 30 #include "sb-downgrade.h" 31 #include "snapshot.h" 32 #include "subvolume.h" 33 #include "super-io.h" 34 35 #include <linux/sort.h> 36 #include <linux/stat.h> 37 38 #define QSTR(n) { { { .len = strlen(n) } }, .name = n } 39 40 static bool btree_id_is_alloc(enum btree_id id) 41 { 42 switch (id) { 43 case BTREE_ID_alloc: 44 case BTREE_ID_backpointers: 45 case BTREE_ID_need_discard: 46 case BTREE_ID_freespace: 47 case BTREE_ID_bucket_gens: 48 return true; 49 default: 50 return false; 51 } 52 } 53 54 /* for -o reconstruct_alloc: */ 55 static void do_reconstruct_alloc(struct bch_fs *c) 56 { 57 bch2_journal_log_msg(c, "dropping alloc info"); 58 bch_info(c, "dropping and reconstructing all alloc info"); 59 60 mutex_lock(&c->sb_lock); 61 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 62 63 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required); 64 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_info, ext->recovery_passes_required); 65 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_lrus, ext->recovery_passes_required); 66 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_extents_to_backpointers, ext->recovery_passes_required); 67 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_to_lru_refs, ext->recovery_passes_required); 68 69 __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent); 70 __set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent); 71 __set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent); 72 __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); 73 __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); 74 __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); 75 __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent); 76 __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); 77 __set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent); 78 __set_bit_le64(BCH_FSCK_ERR_freespace_key_wrong, ext->errors_silent); 79 __set_bit_le64(BCH_FSCK_ERR_bucket_gens_key_wrong, ext->errors_silent); 80 __set_bit_le64(BCH_FSCK_ERR_freespace_hole_missing, ext->errors_silent); 81 __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_backpointer, ext->errors_silent); 82 __set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent); 83 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 84 85 bch2_write_super(c); 86 mutex_unlock(&c->sb_lock); 87 88 c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 89 90 struct journal_keys *keys = &c->journal_keys; 91 size_t src, dst; 92 93 for (src = 0, dst = 0; src < keys->nr; src++) 94 if (!btree_id_is_alloc(keys->data[src].btree_id)) 95 keys->data[dst++] = keys->data[src]; 96 keys->nr = dst; 97 } 98 99 /* 100 * Btree node pointers have a field to stack a pointer to the in memory btree 101 * node; we need to zero out this field when reading in btree nodes, or when 102 * reading in keys from the journal: 103 */ 104 static void zero_out_btree_mem_ptr(struct journal_keys *keys) 105 { 106 darray_for_each(*keys, i) 107 if (i->k->k.type == KEY_TYPE_btree_ptr_v2) 108 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0; 109 } 110 111 /* journal replay: */ 112 113 static void replay_now_at(struct journal *j, u64 seq) 114 { 115 BUG_ON(seq < j->replay_journal_seq); 116 117 seq = min(seq, j->replay_journal_seq_end); 118 119 while (j->replay_journal_seq < seq) 120 bch2_journal_pin_put(j, j->replay_journal_seq++); 121 } 122 123 static int bch2_journal_replay_key(struct btree_trans *trans, 124 struct journal_key *k) 125 { 126 struct btree_iter iter; 127 unsigned iter_flags = 128 BTREE_ITER_INTENT| 129 BTREE_ITER_NOT_EXTENTS; 130 unsigned update_flags = BTREE_TRIGGER_NORUN; 131 int ret; 132 133 if (k->overwritten) 134 return 0; 135 136 trans->journal_res.seq = k->journal_seq; 137 138 /* 139 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to 140 * keep the key cache coherent with the underlying btree. Nothing 141 * besides the allocator is doing updates yet so we don't need key cache 142 * coherency for non-alloc btrees, and key cache fills for snapshots 143 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until 144 * the snapshots recovery pass runs. 145 */ 146 if (!k->level && k->btree_id == BTREE_ID_alloc) 147 iter_flags |= BTREE_ITER_CACHED; 148 else 149 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM; 150 151 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, 152 BTREE_MAX_DEPTH, k->level, 153 iter_flags); 154 ret = bch2_btree_iter_traverse(&iter); 155 if (ret) 156 goto out; 157 158 struct btree_path *path = btree_iter_path(trans, &iter); 159 if (unlikely(!btree_path_node(path, k->level))) { 160 bch2_trans_iter_exit(trans, &iter); 161 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, 162 BTREE_MAX_DEPTH, 0, iter_flags); 163 ret = bch2_btree_iter_traverse(&iter) ?: 164 bch2_btree_increase_depth(trans, iter.path, 0) ?: 165 -BCH_ERR_transaction_restart_nested; 166 goto out; 167 } 168 169 /* Must be checked with btree locked: */ 170 if (k->overwritten) 171 goto out; 172 173 ret = bch2_trans_update(trans, &iter, k->k, update_flags); 174 out: 175 bch2_trans_iter_exit(trans, &iter); 176 return ret; 177 } 178 179 static int journal_sort_seq_cmp(const void *_l, const void *_r) 180 { 181 const struct journal_key *l = *((const struct journal_key **)_l); 182 const struct journal_key *r = *((const struct journal_key **)_r); 183 184 return cmp_int(l->journal_seq, r->journal_seq); 185 } 186 187 static int bch2_journal_replay(struct bch_fs *c) 188 { 189 struct journal_keys *keys = &c->journal_keys; 190 DARRAY(struct journal_key *) keys_sorted = { 0 }; 191 struct journal *j = &c->journal; 192 u64 start_seq = c->journal_replay_seq_start; 193 u64 end_seq = c->journal_replay_seq_start; 194 struct btree_trans *trans = bch2_trans_get(c); 195 int ret = 0; 196 197 if (keys->nr) { 198 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)", 199 keys->nr, start_seq, end_seq); 200 if (ret) 201 goto err; 202 } 203 204 BUG_ON(!atomic_read(&keys->ref)); 205 206 /* 207 * First, attempt to replay keys in sorted order. This is more 208 * efficient - better locality of btree access - but some might fail if 209 * that would cause a journal deadlock. 210 */ 211 darray_for_each(*keys, k) { 212 cond_resched(); 213 214 /* Skip fastpath if we're low on space in the journal */ 215 ret = c->journal.watermark ? -1 : 216 commit_do(trans, NULL, NULL, 217 BCH_TRANS_COMMIT_no_enospc| 218 BCH_TRANS_COMMIT_journal_reclaim| 219 (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0), 220 bch2_journal_replay_key(trans, k)); 221 BUG_ON(!ret && !k->overwritten); 222 if (ret) { 223 ret = darray_push(&keys_sorted, k); 224 if (ret) 225 goto err; 226 } 227 } 228 229 /* 230 * Now, replay any remaining keys in the order in which they appear in 231 * the journal, unpinning those journal entries as we go: 232 */ 233 sort(keys_sorted.data, keys_sorted.nr, 234 sizeof(keys_sorted.data[0]), 235 journal_sort_seq_cmp, NULL); 236 237 darray_for_each(keys_sorted, kp) { 238 cond_resched(); 239 240 struct journal_key *k = *kp; 241 242 replay_now_at(j, k->journal_seq); 243 244 ret = commit_do(trans, NULL, NULL, 245 BCH_TRANS_COMMIT_no_enospc| 246 (!k->allocated 247 ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim 248 : 0), 249 bch2_journal_replay_key(trans, k)); 250 bch_err_msg(c, ret, "while replaying key at btree %s level %u:", 251 bch2_btree_id_str(k->btree_id), k->level); 252 if (ret) 253 goto err; 254 255 BUG_ON(!k->overwritten); 256 } 257 258 /* 259 * We need to put our btree_trans before calling flush_all_pins(), since 260 * that will use a btree_trans internally 261 */ 262 bch2_trans_put(trans); 263 trans = NULL; 264 265 if (!c->opts.keep_journal) 266 bch2_journal_keys_put_initial(c); 267 268 replay_now_at(j, j->replay_journal_seq_end); 269 j->replay_journal_seq = 0; 270 271 bch2_journal_set_replay_done(j); 272 273 if (keys->nr) 274 bch2_journal_log_msg(c, "journal replay finished"); 275 err: 276 if (trans) 277 bch2_trans_put(trans); 278 darray_exit(&keys_sorted); 279 bch_err_fn(c, ret); 280 return ret; 281 } 282 283 /* journal replay early: */ 284 285 static int journal_replay_entry_early(struct bch_fs *c, 286 struct jset_entry *entry) 287 { 288 int ret = 0; 289 290 switch (entry->type) { 291 case BCH_JSET_ENTRY_btree_root: { 292 struct btree_root *r; 293 294 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) { 295 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL }); 296 if (ret) 297 return ret; 298 } 299 300 r = bch2_btree_id_root(c, entry->btree_id); 301 302 if (entry->u64s) { 303 r->level = entry->level; 304 bkey_copy(&r->key, (struct bkey_i *) entry->start); 305 r->error = 0; 306 } else { 307 r->error = -BCH_ERR_btree_node_read_error; 308 } 309 r->alive = true; 310 break; 311 } 312 case BCH_JSET_ENTRY_usage: { 313 struct jset_entry_usage *u = 314 container_of(entry, struct jset_entry_usage, entry); 315 316 switch (entry->btree_id) { 317 case BCH_FS_USAGE_reserved: 318 if (entry->level < BCH_REPLICAS_MAX) 319 c->usage_base->persistent_reserved[entry->level] = 320 le64_to_cpu(u->v); 321 break; 322 case BCH_FS_USAGE_inodes: 323 c->usage_base->b.nr_inodes = le64_to_cpu(u->v); 324 break; 325 case BCH_FS_USAGE_key_version: 326 atomic64_set(&c->key_version, 327 le64_to_cpu(u->v)); 328 break; 329 } 330 331 break; 332 } 333 case BCH_JSET_ENTRY_data_usage: { 334 struct jset_entry_data_usage *u = 335 container_of(entry, struct jset_entry_data_usage, entry); 336 337 ret = bch2_replicas_set_usage(c, &u->r, 338 le64_to_cpu(u->v)); 339 break; 340 } 341 case BCH_JSET_ENTRY_dev_usage: { 342 struct jset_entry_dev_usage *u = 343 container_of(entry, struct jset_entry_dev_usage, entry); 344 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev)); 345 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); 346 347 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) { 348 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets); 349 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors); 350 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented); 351 } 352 353 break; 354 } 355 case BCH_JSET_ENTRY_blacklist: { 356 struct jset_entry_blacklist *bl_entry = 357 container_of(entry, struct jset_entry_blacklist, entry); 358 359 ret = bch2_journal_seq_blacklist_add(c, 360 le64_to_cpu(bl_entry->seq), 361 le64_to_cpu(bl_entry->seq) + 1); 362 break; 363 } 364 case BCH_JSET_ENTRY_blacklist_v2: { 365 struct jset_entry_blacklist_v2 *bl_entry = 366 container_of(entry, struct jset_entry_blacklist_v2, entry); 367 368 ret = bch2_journal_seq_blacklist_add(c, 369 le64_to_cpu(bl_entry->start), 370 le64_to_cpu(bl_entry->end) + 1); 371 break; 372 } 373 case BCH_JSET_ENTRY_clock: { 374 struct jset_entry_clock *clock = 375 container_of(entry, struct jset_entry_clock, entry); 376 377 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); 378 } 379 } 380 381 return ret; 382 } 383 384 static int journal_replay_early(struct bch_fs *c, 385 struct bch_sb_field_clean *clean) 386 { 387 if (clean) { 388 for (struct jset_entry *entry = clean->start; 389 entry != vstruct_end(&clean->field); 390 entry = vstruct_next(entry)) { 391 int ret = journal_replay_entry_early(c, entry); 392 if (ret) 393 return ret; 394 } 395 } else { 396 struct genradix_iter iter; 397 struct journal_replay *i, **_i; 398 399 genradix_for_each(&c->journal_entries, iter, _i) { 400 i = *_i; 401 402 if (journal_replay_ignore(i)) 403 continue; 404 405 vstruct_for_each(&i->j, entry) { 406 int ret = journal_replay_entry_early(c, entry); 407 if (ret) 408 return ret; 409 } 410 } 411 } 412 413 bch2_fs_usage_initialize(c); 414 415 return 0; 416 } 417 418 /* sb clean section: */ 419 420 static int read_btree_roots(struct bch_fs *c) 421 { 422 unsigned i; 423 int ret = 0; 424 425 for (i = 0; i < btree_id_nr_alive(c); i++) { 426 struct btree_root *r = bch2_btree_id_root(c, i); 427 428 if (!r->alive) 429 continue; 430 431 if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc) 432 continue; 433 434 if (r->error) { 435 __fsck_err(c, 436 btree_id_is_alloc(i) 437 ? FSCK_CAN_IGNORE : 0, 438 btree_root_bkey_invalid, 439 "invalid btree root %s", 440 bch2_btree_id_str(i)); 441 if (i == BTREE_ID_alloc) 442 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 443 } 444 445 ret = bch2_btree_root_read(c, i, &r->key, r->level); 446 if (ret) { 447 fsck_err(c, 448 btree_root_read_error, 449 "error reading btree root %s", 450 bch2_btree_id_str(i)); 451 if (btree_id_is_alloc(i)) 452 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 453 ret = 0; 454 } 455 } 456 457 for (i = 0; i < BTREE_ID_NR; i++) { 458 struct btree_root *r = bch2_btree_id_root(c, i); 459 460 if (!r->b) { 461 r->alive = false; 462 r->level = 0; 463 bch2_btree_root_alloc(c, i); 464 } 465 } 466 fsck_err: 467 return ret; 468 } 469 470 static int bch2_initialize_subvolumes(struct bch_fs *c) 471 { 472 struct bkey_i_snapshot_tree root_tree; 473 struct bkey_i_snapshot root_snapshot; 474 struct bkey_i_subvolume root_volume; 475 int ret; 476 477 bkey_snapshot_tree_init(&root_tree.k_i); 478 root_tree.k.p.offset = 1; 479 root_tree.v.master_subvol = cpu_to_le32(1); 480 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX); 481 482 bkey_snapshot_init(&root_snapshot.k_i); 483 root_snapshot.k.p.offset = U32_MAX; 484 root_snapshot.v.flags = 0; 485 root_snapshot.v.parent = 0; 486 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL); 487 root_snapshot.v.tree = cpu_to_le32(1); 488 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true); 489 490 bkey_subvolume_init(&root_volume.k_i); 491 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL; 492 root_volume.v.flags = 0; 493 root_volume.v.snapshot = cpu_to_le32(U32_MAX); 494 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO); 495 496 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?: 497 bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?: 498 bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0); 499 bch_err_fn(c, ret); 500 return ret; 501 } 502 503 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) 504 { 505 struct btree_iter iter; 506 struct bkey_s_c k; 507 struct bch_inode_unpacked inode; 508 int ret; 509 510 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 511 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); 512 ret = bkey_err(k); 513 if (ret) 514 return ret; 515 516 if (!bkey_is_inode(k.k)) { 517 bch_err(trans->c, "root inode not found"); 518 ret = -BCH_ERR_ENOENT_inode; 519 goto err; 520 } 521 522 ret = bch2_inode_unpack(k, &inode); 523 BUG_ON(ret); 524 525 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 526 527 ret = bch2_inode_write(trans, &iter, &inode); 528 err: 529 bch2_trans_iter_exit(trans, &iter); 530 return ret; 531 } 532 533 /* set bi_subvol on root inode */ 534 noinline_for_stack 535 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) 536 { 537 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, 538 __bch2_fs_upgrade_for_subvolumes(trans)); 539 bch_err_fn(c, ret); 540 return ret; 541 } 542 543 const char * const bch2_recovery_passes[] = { 544 #define x(_fn, ...) #_fn, 545 BCH_RECOVERY_PASSES() 546 #undef x 547 NULL 548 }; 549 550 static int bch2_check_allocations(struct bch_fs *c) 551 { 552 return bch2_gc(c, true, c->opts.norecovery); 553 } 554 555 static int bch2_set_may_go_rw(struct bch_fs *c) 556 { 557 struct journal_keys *keys = &c->journal_keys; 558 559 /* 560 * After we go RW, the journal keys buffer can't be modified (except for 561 * setting journal_key->overwritten: it will be accessed by multiple 562 * threads 563 */ 564 move_gap(keys, keys->nr); 565 566 set_bit(BCH_FS_may_go_rw, &c->flags); 567 568 if (keys->nr || c->opts.fsck || !c->sb.clean) 569 return bch2_fs_read_write_early(c); 570 return 0; 571 } 572 573 struct recovery_pass_fn { 574 int (*fn)(struct bch_fs *); 575 unsigned when; 576 }; 577 578 static struct recovery_pass_fn recovery_pass_fns[] = { 579 #define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when }, 580 BCH_RECOVERY_PASSES() 581 #undef x 582 }; 583 584 u64 bch2_recovery_passes_to_stable(u64 v) 585 { 586 static const u8 map[] = { 587 #define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n, 588 BCH_RECOVERY_PASSES() 589 #undef x 590 }; 591 592 u64 ret = 0; 593 for (unsigned i = 0; i < ARRAY_SIZE(map); i++) 594 if (v & BIT_ULL(i)) 595 ret |= BIT_ULL(map[i]); 596 return ret; 597 } 598 599 u64 bch2_recovery_passes_from_stable(u64 v) 600 { 601 static const u8 map[] = { 602 #define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n, 603 BCH_RECOVERY_PASSES() 604 #undef x 605 }; 606 607 u64 ret = 0; 608 for (unsigned i = 0; i < ARRAY_SIZE(map); i++) 609 if (v & BIT_ULL(i)) 610 ret |= BIT_ULL(map[i]); 611 return ret; 612 } 613 614 static bool check_version_upgrade(struct bch_fs *c) 615 { 616 unsigned latest_version = bcachefs_metadata_version_current; 617 unsigned latest_compatible = min(latest_version, 618 bch2_latest_compatible_version(c->sb.version)); 619 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version; 620 unsigned new_version = 0; 621 622 if (old_version < bcachefs_metadata_required_upgrade_below) { 623 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible || 624 latest_compatible < bcachefs_metadata_required_upgrade_below) 625 new_version = latest_version; 626 else 627 new_version = latest_compatible; 628 } else { 629 switch (c->opts.version_upgrade) { 630 case BCH_VERSION_UPGRADE_compatible: 631 new_version = latest_compatible; 632 break; 633 case BCH_VERSION_UPGRADE_incompatible: 634 new_version = latest_version; 635 break; 636 case BCH_VERSION_UPGRADE_none: 637 new_version = min(old_version, latest_version); 638 break; 639 } 640 } 641 642 if (new_version > old_version) { 643 struct printbuf buf = PRINTBUF; 644 645 if (old_version < bcachefs_metadata_required_upgrade_below) 646 prt_str(&buf, "Version upgrade required:\n"); 647 648 if (old_version != c->sb.version) { 649 prt_str(&buf, "Version upgrade from "); 650 bch2_version_to_text(&buf, c->sb.version_upgrade_complete); 651 prt_str(&buf, " to "); 652 bch2_version_to_text(&buf, c->sb.version); 653 prt_str(&buf, " incomplete\n"); 654 } 655 656 prt_printf(&buf, "Doing %s version upgrade from ", 657 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version) 658 ? "incompatible" : "compatible"); 659 bch2_version_to_text(&buf, old_version); 660 prt_str(&buf, " to "); 661 bch2_version_to_text(&buf, new_version); 662 prt_newline(&buf); 663 664 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 665 __le64 passes = ext->recovery_passes_required[0]; 666 bch2_sb_set_upgrade(c, old_version, new_version); 667 passes = ext->recovery_passes_required[0] & ~passes; 668 669 if (passes) { 670 prt_str(&buf, " running recovery passes: "); 671 prt_bitflags(&buf, bch2_recovery_passes, 672 bch2_recovery_passes_from_stable(le64_to_cpu(passes))); 673 } 674 675 bch_info(c, "%s", buf.buf); 676 677 bch2_sb_upgrade(c, new_version); 678 679 printbuf_exit(&buf); 680 return true; 681 } 682 683 return false; 684 } 685 686 u64 bch2_fsck_recovery_passes(void) 687 { 688 u64 ret = 0; 689 690 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) 691 if (recovery_pass_fns[i].when & PASS_FSCK) 692 ret |= BIT_ULL(i); 693 return ret; 694 } 695 696 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 697 { 698 struct recovery_pass_fn *p = recovery_pass_fns + pass; 699 700 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read) 701 return false; 702 if (c->recovery_passes_explicit & BIT_ULL(pass)) 703 return true; 704 if ((p->when & PASS_FSCK) && c->opts.fsck) 705 return true; 706 if ((p->when & PASS_UNCLEAN) && !c->sb.clean) 707 return true; 708 if (p->when & PASS_ALWAYS) 709 return true; 710 return false; 711 } 712 713 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) 714 { 715 struct recovery_pass_fn *p = recovery_pass_fns + pass; 716 int ret; 717 718 if (!(p->when & PASS_SILENT)) 719 bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."), 720 bch2_recovery_passes[pass]); 721 ret = p->fn(c); 722 if (ret) 723 return ret; 724 if (!(p->when & PASS_SILENT)) 725 bch2_print(c, KERN_CONT " done\n"); 726 727 return 0; 728 } 729 730 static int bch2_run_recovery_passes(struct bch_fs *c) 731 { 732 int ret = 0; 733 734 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { 735 if (should_run_recovery_pass(c, c->curr_recovery_pass)) { 736 unsigned pass = c->curr_recovery_pass; 737 738 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass); 739 if (bch2_err_matches(ret, BCH_ERR_restart_recovery) || 740 (ret && c->curr_recovery_pass < pass)) 741 continue; 742 if (ret) 743 break; 744 745 c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass); 746 } 747 c->curr_recovery_pass++; 748 c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass); 749 } 750 751 return ret; 752 } 753 754 int bch2_run_online_recovery_passes(struct bch_fs *c) 755 { 756 int ret = 0; 757 758 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) { 759 struct recovery_pass_fn *p = recovery_pass_fns + i; 760 761 if (!(p->when & PASS_ONLINE)) 762 continue; 763 764 ret = bch2_run_recovery_pass(c, i); 765 if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) { 766 i = c->curr_recovery_pass; 767 continue; 768 } 769 if (ret) 770 break; 771 } 772 773 return ret; 774 } 775 776 int bch2_fs_recovery(struct bch_fs *c) 777 { 778 struct bch_sb_field_clean *clean = NULL; 779 struct jset *last_journal_entry = NULL; 780 u64 last_seq = 0, blacklist_seq, journal_seq; 781 int ret = 0; 782 783 if (c->sb.clean) { 784 clean = bch2_read_superblock_clean(c); 785 ret = PTR_ERR_OR_ZERO(clean); 786 if (ret) 787 goto err; 788 789 bch_info(c, "recovering from clean shutdown, journal seq %llu", 790 le64_to_cpu(clean->journal_seq)); 791 } else { 792 bch_info(c, "recovering from unclean shutdown"); 793 } 794 795 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) { 796 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported"); 797 ret = -EINVAL; 798 goto err; 799 } 800 801 if (!c->sb.clean && 802 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) { 803 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix"); 804 ret = -EINVAL; 805 goto err; 806 } 807 808 if (c->opts.fsck && c->opts.norecovery) { 809 bch_err(c, "cannot select both norecovery and fsck"); 810 ret = -EINVAL; 811 goto err; 812 } 813 814 if (!c->opts.nochanges) { 815 mutex_lock(&c->sb_lock); 816 bool write_sb = false; 817 818 struct bch_sb_field_ext *ext = 819 bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64)); 820 if (!ext) { 821 ret = -BCH_ERR_ENOSPC_sb; 822 mutex_unlock(&c->sb_lock); 823 goto err; 824 } 825 826 if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) { 827 ext->recovery_passes_required[0] |= 828 cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology))); 829 write_sb = true; 830 } 831 832 u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 833 if (sb_passes) { 834 struct printbuf buf = PRINTBUF; 835 prt_str(&buf, "superblock requires following recovery passes to be run:\n "); 836 prt_bitflags(&buf, bch2_recovery_passes, sb_passes); 837 bch_info(c, "%s", buf.buf); 838 printbuf_exit(&buf); 839 } 840 841 if (bch2_check_version_downgrade(c)) { 842 struct printbuf buf = PRINTBUF; 843 844 prt_str(&buf, "Version downgrade required:"); 845 846 __le64 passes = ext->recovery_passes_required[0]; 847 bch2_sb_set_downgrade(c, 848 BCH_VERSION_MINOR(bcachefs_metadata_version_current), 849 BCH_VERSION_MINOR(c->sb.version)); 850 passes = ext->recovery_passes_required[0] & ~passes; 851 if (passes) { 852 prt_str(&buf, "\n running recovery passes: "); 853 prt_bitflags(&buf, bch2_recovery_passes, 854 bch2_recovery_passes_from_stable(le64_to_cpu(passes))); 855 } 856 857 bch_info(c, "%s", buf.buf); 858 printbuf_exit(&buf); 859 write_sb = true; 860 } 861 862 if (check_version_upgrade(c)) 863 write_sb = true; 864 865 if (write_sb) 866 bch2_write_super(c); 867 868 c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); 869 mutex_unlock(&c->sb_lock); 870 } 871 872 if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) 873 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); 874 875 if (c->opts.fsck) 876 set_bit(BCH_FS_fsck_running, &c->flags); 877 878 ret = bch2_blacklist_table_initialize(c); 879 if (ret) { 880 bch_err(c, "error initializing blacklist table"); 881 goto err; 882 } 883 884 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) { 885 struct genradix_iter iter; 886 struct journal_replay **i; 887 888 bch_verbose(c, "starting journal read"); 889 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq); 890 if (ret) 891 goto err; 892 893 /* 894 * note: cmd_list_journal needs the blacklist table fully up to date so 895 * it can asterisk ignored journal entries: 896 */ 897 if (c->opts.read_journal_only) 898 goto out; 899 900 genradix_for_each_reverse(&c->journal_entries, iter, i) 901 if (!journal_replay_ignore(*i)) { 902 last_journal_entry = &(*i)->j; 903 break; 904 } 905 906 if (mustfix_fsck_err_on(c->sb.clean && 907 last_journal_entry && 908 !journal_entry_empty(last_journal_entry), c, 909 clean_but_journal_not_empty, 910 "filesystem marked clean but journal not empty")) { 911 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 912 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 913 c->sb.clean = false; 914 } 915 916 if (!last_journal_entry) { 917 fsck_err_on(!c->sb.clean, c, 918 dirty_but_no_journal_entries, 919 "no journal entries found"); 920 if (clean) 921 goto use_clean; 922 923 genradix_for_each_reverse(&c->journal_entries, iter, i) 924 if (*i) { 925 last_journal_entry = &(*i)->j; 926 (*i)->ignore_blacklisted = false; 927 (*i)->ignore_not_dirty= false; 928 /* 929 * This was probably a NO_FLUSH entry, 930 * so last_seq was garbage - but we know 931 * we're only using a single journal 932 * entry, set it here: 933 */ 934 (*i)->j.last_seq = (*i)->j.seq; 935 break; 936 } 937 } 938 939 ret = bch2_journal_keys_sort(c); 940 if (ret) 941 goto err; 942 943 if (c->sb.clean && last_journal_entry) { 944 ret = bch2_verify_superblock_clean(c, &clean, 945 last_journal_entry); 946 if (ret) 947 goto err; 948 } 949 } else { 950 use_clean: 951 if (!clean) { 952 bch_err(c, "no superblock clean section found"); 953 ret = -BCH_ERR_fsck_repair_impossible; 954 goto err; 955 956 } 957 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1; 958 } 959 960 c->journal_replay_seq_start = last_seq; 961 c->journal_replay_seq_end = blacklist_seq - 1; 962 963 if (c->opts.reconstruct_alloc) 964 do_reconstruct_alloc(c); 965 966 zero_out_btree_mem_ptr(&c->journal_keys); 967 968 ret = journal_replay_early(c, clean); 969 if (ret) 970 goto err; 971 972 /* 973 * After an unclean shutdown, skip then next few journal sequence 974 * numbers as they may have been referenced by btree writes that 975 * happened before their corresponding journal writes - those btree 976 * writes need to be ignored, by skipping and blacklisting the next few 977 * journal sequence numbers: 978 */ 979 if (!c->sb.clean) 980 journal_seq += 8; 981 982 if (blacklist_seq != journal_seq) { 983 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu", 984 blacklist_seq, journal_seq) ?: 985 bch2_journal_seq_blacklist_add(c, 986 blacklist_seq, journal_seq); 987 if (ret) { 988 bch_err_msg(c, ret, "error creating new journal seq blacklist entry"); 989 goto err; 990 } 991 } 992 993 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu", 994 journal_seq, last_seq, blacklist_seq - 1) ?: 995 bch2_fs_journal_start(&c->journal, journal_seq); 996 if (ret) 997 goto err; 998 999 /* 1000 * Skip past versions that might have possibly been used (as nonces), 1001 * but hadn't had their pointers written: 1002 */ 1003 if (c->sb.encryption_type && !c->sb.clean) 1004 atomic64_add(1 << 16, &c->key_version); 1005 1006 ret = read_btree_roots(c); 1007 if (ret) 1008 goto err; 1009 1010 ret = bch2_run_recovery_passes(c); 1011 if (ret) 1012 goto err; 1013 1014 clear_bit(BCH_FS_fsck_running, &c->flags); 1015 1016 /* If we fixed errors, verify that fs is actually clean now: */ 1017 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 1018 test_bit(BCH_FS_errors_fixed, &c->flags) && 1019 !test_bit(BCH_FS_errors_not_fixed, &c->flags) && 1020 !test_bit(BCH_FS_error, &c->flags)) { 1021 bch2_flush_fsck_errs(c); 1022 1023 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); 1024 clear_bit(BCH_FS_errors_fixed, &c->flags); 1025 1026 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 1027 1028 ret = bch2_run_recovery_passes(c); 1029 if (ret) 1030 goto err; 1031 1032 if (test_bit(BCH_FS_errors_fixed, &c->flags) || 1033 test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 1034 bch_err(c, "Second fsck run was not clean"); 1035 set_bit(BCH_FS_errors_not_fixed, &c->flags); 1036 } 1037 1038 set_bit(BCH_FS_errors_fixed, &c->flags); 1039 } 1040 1041 if (enabled_qtypes(c)) { 1042 bch_verbose(c, "reading quotas"); 1043 ret = bch2_fs_quota_read(c); 1044 if (ret) 1045 goto err; 1046 bch_verbose(c, "quotas done"); 1047 } 1048 1049 mutex_lock(&c->sb_lock); 1050 bool write_sb = false; 1051 1052 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) { 1053 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version)); 1054 write_sb = true; 1055 } 1056 1057 if (!test_bit(BCH_FS_error, &c->flags) && 1058 !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) { 1059 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); 1060 write_sb = true; 1061 } 1062 1063 if (!test_bit(BCH_FS_error, &c->flags)) { 1064 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); 1065 if (ext && 1066 (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) || 1067 !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) { 1068 memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required)); 1069 memset(ext->errors_silent, 0, sizeof(ext->errors_silent)); 1070 write_sb = true; 1071 } 1072 } 1073 1074 if (c->opts.fsck && 1075 !test_bit(BCH_FS_error, &c->flags) && 1076 !test_bit(BCH_FS_errors_not_fixed, &c->flags)) { 1077 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); 1078 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); 1079 write_sb = true; 1080 } 1081 1082 if (write_sb) 1083 bch2_write_super(c); 1084 mutex_unlock(&c->sb_lock); 1085 1086 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) || 1087 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) { 1088 struct bch_move_stats stats; 1089 1090 bch2_move_stats_init(&stats, "recovery"); 1091 1092 struct printbuf buf = PRINTBUF; 1093 bch2_version_to_text(&buf, c->sb.version_min); 1094 bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf); 1095 printbuf_exit(&buf); 1096 1097 ret = bch2_fs_read_write_early(c) ?: 1098 bch2_scan_old_btree_nodes(c, &stats); 1099 if (ret) 1100 goto err; 1101 bch_info(c, "scanning for old btree nodes done"); 1102 } 1103 1104 if (c->journal_seq_blacklist_table && 1105 c->journal_seq_blacklist_table->nr > 128) 1106 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work); 1107 1108 ret = 0; 1109 out: 1110 bch2_flush_fsck_errs(c); 1111 1112 if (!c->opts.keep_journal && 1113 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) 1114 bch2_journal_keys_put_initial(c); 1115 kfree(clean); 1116 1117 if (!ret && 1118 test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) && 1119 !c->opts.nochanges) { 1120 bch2_fs_read_write_early(c); 1121 bch2_delete_dead_snapshots_async(c); 1122 } 1123 1124 bch_err_fn(c, ret); 1125 return ret; 1126 err: 1127 fsck_err: 1128 bch2_fs_emergency_read_only(c); 1129 goto out; 1130 } 1131 1132 int bch2_fs_initialize(struct bch_fs *c) 1133 { 1134 struct bch_inode_unpacked root_inode, lostfound_inode; 1135 struct bkey_inode_buf packed_inode; 1136 struct qstr lostfound = QSTR("lost+found"); 1137 int ret; 1138 1139 bch_notice(c, "initializing new filesystem"); 1140 1141 mutex_lock(&c->sb_lock); 1142 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); 1143 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); 1144 1145 bch2_check_version_downgrade(c); 1146 1147 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { 1148 bch2_sb_upgrade(c, bcachefs_metadata_version_current); 1149 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); 1150 bch2_write_super(c); 1151 } 1152 mutex_unlock(&c->sb_lock); 1153 1154 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns); 1155 set_bit(BCH_FS_may_go_rw, &c->flags); 1156 1157 for (unsigned i = 0; i < BTREE_ID_NR; i++) 1158 bch2_btree_root_alloc(c, i); 1159 1160 for_each_member_device(c, ca) 1161 bch2_dev_usage_init(ca); 1162 1163 ret = bch2_fs_journal_alloc(c); 1164 if (ret) 1165 goto err; 1166 1167 /* 1168 * journal_res_get() will crash if called before this has 1169 * set up the journal.pin FIFO and journal.cur pointer: 1170 */ 1171 bch2_fs_journal_start(&c->journal, 1); 1172 bch2_journal_set_replay_done(&c->journal); 1173 1174 ret = bch2_fs_read_write_early(c); 1175 if (ret) 1176 goto err; 1177 1178 /* 1179 * Write out the superblock and journal buckets, now that we can do 1180 * btree updates 1181 */ 1182 bch_verbose(c, "marking superblocks"); 1183 ret = bch2_trans_mark_dev_sbs(c); 1184 bch_err_msg(c, ret, "marking superblocks"); 1185 if (ret) 1186 goto err; 1187 1188 for_each_online_member(c, ca) 1189 ca->new_fs_bucket_idx = 0; 1190 1191 ret = bch2_fs_freespace_init(c); 1192 if (ret) 1193 goto err; 1194 1195 ret = bch2_initialize_subvolumes(c); 1196 if (ret) 1197 goto err; 1198 1199 bch_verbose(c, "reading snapshots table"); 1200 ret = bch2_snapshots_read(c); 1201 if (ret) 1202 goto err; 1203 bch_verbose(c, "reading snapshots done"); 1204 1205 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL); 1206 root_inode.bi_inum = BCACHEFS_ROOT_INO; 1207 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; 1208 bch2_inode_pack(&packed_inode, &root_inode); 1209 packed_inode.inode.k.p.snapshot = U32_MAX; 1210 1211 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0); 1212 bch_err_msg(c, ret, "creating root directory"); 1213 if (ret) 1214 goto err; 1215 1216 bch2_inode_init_early(c, &lostfound_inode); 1217 1218 ret = bch2_trans_do(c, NULL, NULL, 0, 1219 bch2_create_trans(trans, 1220 BCACHEFS_ROOT_SUBVOL_INUM, 1221 &root_inode, &lostfound_inode, 1222 &lostfound, 1223 0, 0, S_IFDIR|0700, 0, 1224 NULL, NULL, (subvol_inum) { 0 }, 0)); 1225 bch_err_msg(c, ret, "creating lost+found"); 1226 if (ret) 1227 goto err; 1228 1229 c->recovery_pass_done = ARRAY_SIZE(recovery_pass_fns) - 1; 1230 1231 if (enabled_qtypes(c)) { 1232 ret = bch2_fs_quota_read(c); 1233 if (ret) 1234 goto err; 1235 } 1236 1237 ret = bch2_journal_flush(&c->journal); 1238 bch_err_msg(c, ret, "writing first journal entry"); 1239 if (ret) 1240 goto err; 1241 1242 mutex_lock(&c->sb_lock); 1243 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); 1244 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 1245 1246 bch2_write_super(c); 1247 mutex_unlock(&c->sb_lock); 1248 1249 return 0; 1250 err: 1251 bch_err_fn(c, ret); 1252 return ret; 1253 } 1254