1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcachefs setup/teardown code, and some metadata io - read a superblock and 4 * figure out what to do with it. 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcachefs.h" 11 #include "alloc_background.h" 12 #include "alloc_foreground.h" 13 #include "bkey_sort.h" 14 #include "btree_cache.h" 15 #include "btree_gc.h" 16 #include "btree_journal_iter.h" 17 #include "btree_key_cache.h" 18 #include "btree_update_interior.h" 19 #include "btree_io.h" 20 #include "btree_write_buffer.h" 21 #include "buckets_waiting_for_journal.h" 22 #include "chardev.h" 23 #include "checksum.h" 24 #include "clock.h" 25 #include "compress.h" 26 #include "counters.h" 27 #include "debug.h" 28 #include "disk_groups.h" 29 #include "ec.h" 30 #include "errcode.h" 31 #include "error.h" 32 #include "fs.h" 33 #include "fs-io.h" 34 #include "fs-io-buffered.h" 35 #include "fs-io-direct.h" 36 #include "fsck.h" 37 #include "inode.h" 38 #include "io_read.h" 39 #include "io_write.h" 40 #include "journal.h" 41 #include "journal_reclaim.h" 42 #include "journal_seq_blacklist.h" 43 #include "move.h" 44 #include "migrate.h" 45 #include "movinggc.h" 46 #include "nocow_locking.h" 47 #include "quota.h" 48 #include "rebalance.h" 49 #include "recovery.h" 50 #include "replicas.h" 51 #include "sb-clean.h" 52 #include "sb-errors.h" 53 #include "sb-members.h" 54 #include "snapshot.h" 55 #include "subvolume.h" 56 #include "super.h" 57 #include "super-io.h" 58 #include "sysfs.h" 59 #include "trace.h" 60 61 #include <linux/backing-dev.h> 62 #include <linux/blkdev.h> 63 #include <linux/debugfs.h> 64 #include <linux/device.h> 65 #include <linux/idr.h> 66 #include <linux/module.h> 67 #include <linux/percpu.h> 68 #include <linux/random.h> 69 #include <linux/sysfs.h> 70 #include <crypto/hash.h> 71 72 MODULE_LICENSE("GPL"); 73 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 74 MODULE_DESCRIPTION("bcachefs filesystem"); 75 76 #define KTYPE(type) \ 77 static const struct attribute_group type ## _group = { \ 78 .attrs = type ## _files \ 79 }; \ 80 \ 81 static const struct attribute_group *type ## _groups[] = { \ 82 &type ## _group, \ 83 NULL \ 84 }; \ 85 \ 86 static const struct kobj_type type ## _ktype = { \ 87 .release = type ## _release, \ 88 .sysfs_ops = &type ## _sysfs_ops, \ 89 .default_groups = type ## _groups \ 90 } 91 92 static void bch2_fs_release(struct kobject *); 93 static void bch2_dev_release(struct kobject *); 94 static void bch2_fs_counters_release(struct kobject *k) 95 { 96 } 97 98 static void bch2_fs_internal_release(struct kobject *k) 99 { 100 } 101 102 static void bch2_fs_opts_dir_release(struct kobject *k) 103 { 104 } 105 106 static void bch2_fs_time_stats_release(struct kobject *k) 107 { 108 } 109 110 KTYPE(bch2_fs); 111 KTYPE(bch2_fs_counters); 112 KTYPE(bch2_fs_internal); 113 KTYPE(bch2_fs_opts_dir); 114 KTYPE(bch2_fs_time_stats); 115 KTYPE(bch2_dev); 116 117 static struct kset *bcachefs_kset; 118 static LIST_HEAD(bch_fs_list); 119 static DEFINE_MUTEX(bch_fs_list_lock); 120 121 DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait); 122 123 static void bch2_dev_free(struct bch_dev *); 124 static int bch2_dev_alloc(struct bch_fs *, unsigned); 125 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *); 126 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *); 127 128 struct bch_fs *bch2_dev_to_fs(dev_t dev) 129 { 130 struct bch_fs *c; 131 struct bch_dev *ca; 132 unsigned i; 133 134 mutex_lock(&bch_fs_list_lock); 135 rcu_read_lock(); 136 137 list_for_each_entry(c, &bch_fs_list, list) 138 for_each_member_device_rcu(ca, c, i, NULL) 139 if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) { 140 closure_get(&c->cl); 141 goto found; 142 } 143 c = NULL; 144 found: 145 rcu_read_unlock(); 146 mutex_unlock(&bch_fs_list_lock); 147 148 return c; 149 } 150 151 static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid) 152 { 153 struct bch_fs *c; 154 155 lockdep_assert_held(&bch_fs_list_lock); 156 157 list_for_each_entry(c, &bch_fs_list, list) 158 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid))) 159 return c; 160 161 return NULL; 162 } 163 164 struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid) 165 { 166 struct bch_fs *c; 167 168 mutex_lock(&bch_fs_list_lock); 169 c = __bch2_uuid_to_fs(uuid); 170 if (c) 171 closure_get(&c->cl); 172 mutex_unlock(&bch_fs_list_lock); 173 174 return c; 175 } 176 177 static void bch2_dev_usage_journal_reserve(struct bch_fs *c) 178 { 179 struct bch_dev *ca; 180 unsigned i, nr = 0, u64s = 181 ((sizeof(struct jset_entry_dev_usage) + 182 sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) / 183 sizeof(u64); 184 185 rcu_read_lock(); 186 for_each_member_device_rcu(ca, c, i, NULL) 187 nr++; 188 rcu_read_unlock(); 189 190 bch2_journal_entry_res_resize(&c->journal, 191 &c->dev_usage_journal_res, u64s * nr); 192 } 193 194 /* Filesystem RO/RW: */ 195 196 /* 197 * For startup/shutdown of RW stuff, the dependencies are: 198 * 199 * - foreground writes depend on copygc and rebalance (to free up space) 200 * 201 * - copygc and rebalance depend on mark and sweep gc (they actually probably 202 * don't because they either reserve ahead of time or don't block if 203 * allocations fail, but allocations can require mark and sweep gc to run 204 * because of generation number wraparound) 205 * 206 * - all of the above depends on the allocator threads 207 * 208 * - allocator depends on the journal (when it rewrites prios and gens) 209 */ 210 211 static void __bch2_fs_read_only(struct bch_fs *c) 212 { 213 struct bch_dev *ca; 214 unsigned i, clean_passes = 0; 215 u64 seq = 0; 216 217 bch2_fs_ec_stop(c); 218 bch2_open_buckets_stop(c, NULL, true); 219 bch2_rebalance_stop(c); 220 bch2_copygc_stop(c); 221 bch2_gc_thread_stop(c); 222 bch2_fs_ec_flush(c); 223 224 bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu", 225 journal_cur_seq(&c->journal)); 226 227 do { 228 clean_passes++; 229 230 if (bch2_btree_interior_updates_flush(c) || 231 bch2_journal_flush_all_pins(&c->journal) || 232 bch2_btree_flush_all_writes(c) || 233 seq != atomic64_read(&c->journal.seq)) { 234 seq = atomic64_read(&c->journal.seq); 235 clean_passes = 0; 236 } 237 } while (clean_passes < 2); 238 239 bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu", 240 journal_cur_seq(&c->journal)); 241 242 if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) && 243 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) 244 set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags); 245 bch2_fs_journal_stop(&c->journal); 246 247 /* 248 * After stopping journal: 249 */ 250 for_each_member_device(ca, c, i) 251 bch2_dev_allocator_remove(c, ca); 252 } 253 254 #ifndef BCH_WRITE_REF_DEBUG 255 static void bch2_writes_disabled(struct percpu_ref *writes) 256 { 257 struct bch_fs *c = container_of(writes, struct bch_fs, writes); 258 259 set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); 260 wake_up(&bch2_read_only_wait); 261 } 262 #endif 263 264 void bch2_fs_read_only(struct bch_fs *c) 265 { 266 if (!test_bit(BCH_FS_RW, &c->flags)) { 267 bch2_journal_reclaim_stop(&c->journal); 268 return; 269 } 270 271 BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); 272 273 /* 274 * Block new foreground-end write operations from starting - any new 275 * writes will return -EROFS: 276 */ 277 set_bit(BCH_FS_GOING_RO, &c->flags); 278 #ifndef BCH_WRITE_REF_DEBUG 279 percpu_ref_kill(&c->writes); 280 #else 281 for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) 282 bch2_write_ref_put(c, i); 283 #endif 284 285 /* 286 * If we're not doing an emergency shutdown, we want to wait on 287 * outstanding writes to complete so they don't see spurious errors due 288 * to shutting down the allocator: 289 * 290 * If we are doing an emergency shutdown outstanding writes may 291 * hang until we shutdown the allocator so we don't want to wait 292 * on outstanding writes before shutting everything down - but 293 * we do need to wait on them before returning and signalling 294 * that going RO is complete: 295 */ 296 wait_event(bch2_read_only_wait, 297 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) || 298 test_bit(BCH_FS_EMERGENCY_RO, &c->flags)); 299 300 __bch2_fs_read_only(c); 301 302 wait_event(bch2_read_only_wait, 303 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); 304 305 clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); 306 clear_bit(BCH_FS_GOING_RO, &c->flags); 307 308 if (!bch2_journal_error(&c->journal) && 309 !test_bit(BCH_FS_ERROR, &c->flags) && 310 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) && 311 test_bit(BCH_FS_STARTED, &c->flags) && 312 test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) && 313 !c->opts.norecovery) { 314 BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal)); 315 BUG_ON(atomic_read(&c->btree_cache.dirty)); 316 BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty)); 317 BUG_ON(c->btree_write_buffer.state.nr); 318 319 bch_verbose(c, "marking filesystem clean"); 320 bch2_fs_mark_clean(c); 321 } 322 323 clear_bit(BCH_FS_RW, &c->flags); 324 } 325 326 static void bch2_fs_read_only_work(struct work_struct *work) 327 { 328 struct bch_fs *c = 329 container_of(work, struct bch_fs, read_only_work); 330 331 down_write(&c->state_lock); 332 bch2_fs_read_only(c); 333 up_write(&c->state_lock); 334 } 335 336 static void bch2_fs_read_only_async(struct bch_fs *c) 337 { 338 queue_work(system_long_wq, &c->read_only_work); 339 } 340 341 bool bch2_fs_emergency_read_only(struct bch_fs *c) 342 { 343 bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags); 344 345 bch2_journal_halt(&c->journal); 346 bch2_fs_read_only_async(c); 347 348 wake_up(&bch2_read_only_wait); 349 return ret; 350 } 351 352 static int bch2_fs_read_write_late(struct bch_fs *c) 353 { 354 int ret; 355 356 /* 357 * Data move operations can't run until after check_snapshots has 358 * completed, and bch2_snapshot_is_ancestor() is available. 359 * 360 * Ideally we'd start copygc/rebalance earlier instead of waiting for 361 * all of recovery/fsck to complete: 362 */ 363 ret = bch2_copygc_start(c); 364 if (ret) { 365 bch_err(c, "error starting copygc thread"); 366 return ret; 367 } 368 369 ret = bch2_rebalance_start(c); 370 if (ret) { 371 bch_err(c, "error starting rebalance thread"); 372 return ret; 373 } 374 375 return 0; 376 } 377 378 static int __bch2_fs_read_write(struct bch_fs *c, bool early) 379 { 380 struct bch_dev *ca; 381 unsigned i; 382 int ret; 383 384 if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) { 385 bch_err(c, "cannot go rw, unfixed btree errors"); 386 return -BCH_ERR_erofs_unfixed_errors; 387 } 388 389 if (test_bit(BCH_FS_RW, &c->flags)) 390 return 0; 391 392 if (c->opts.norecovery) 393 return -BCH_ERR_erofs_norecovery; 394 395 /* 396 * nochanges is used for fsck -n mode - we have to allow going rw 397 * during recovery for that to work: 398 */ 399 if (c->opts.nochanges && (!early || c->opts.read_only)) 400 return -BCH_ERR_erofs_nochanges; 401 402 bch_info(c, "going read-write"); 403 404 ret = bch2_sb_members_v2_init(c); 405 if (ret) 406 goto err; 407 408 ret = bch2_fs_mark_dirty(c); 409 if (ret) 410 goto err; 411 412 clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags); 413 414 /* 415 * First journal write must be a flush write: after a clean shutdown we 416 * don't read the journal, so the first journal write may end up 417 * overwriting whatever was there previously, and there must always be 418 * at least one non-flush write in the journal or recovery will fail: 419 */ 420 set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags); 421 422 for_each_rw_member(ca, c, i) 423 bch2_dev_allocator_add(c, ca); 424 bch2_recalc_capacity(c); 425 426 set_bit(BCH_FS_RW, &c->flags); 427 set_bit(BCH_FS_WAS_RW, &c->flags); 428 429 #ifndef BCH_WRITE_REF_DEBUG 430 percpu_ref_reinit(&c->writes); 431 #else 432 for (i = 0; i < BCH_WRITE_REF_NR; i++) { 433 BUG_ON(atomic_long_read(&c->writes[i])); 434 atomic_long_inc(&c->writes[i]); 435 } 436 #endif 437 438 ret = bch2_gc_thread_start(c); 439 if (ret) { 440 bch_err(c, "error starting gc thread"); 441 return ret; 442 } 443 444 ret = bch2_journal_reclaim_start(&c->journal); 445 if (ret) 446 goto err; 447 448 if (!early) { 449 ret = bch2_fs_read_write_late(c); 450 if (ret) 451 goto err; 452 } 453 454 bch2_do_discards(c); 455 bch2_do_invalidates(c); 456 bch2_do_stripe_deletes(c); 457 bch2_do_pending_node_rewrites(c); 458 return 0; 459 err: 460 if (test_bit(BCH_FS_RW, &c->flags)) 461 bch2_fs_read_only(c); 462 else 463 __bch2_fs_read_only(c); 464 return ret; 465 } 466 467 int bch2_fs_read_write(struct bch_fs *c) 468 { 469 return __bch2_fs_read_write(c, false); 470 } 471 472 int bch2_fs_read_write_early(struct bch_fs *c) 473 { 474 lockdep_assert_held(&c->state_lock); 475 476 return __bch2_fs_read_write(c, true); 477 } 478 479 /* Filesystem startup/shutdown: */ 480 481 static void __bch2_fs_free(struct bch_fs *c) 482 { 483 unsigned i; 484 485 for (i = 0; i < BCH_TIME_STAT_NR; i++) 486 bch2_time_stats_exit(&c->times[i]); 487 488 bch2_free_pending_node_rewrites(c); 489 bch2_fs_sb_errors_exit(c); 490 bch2_fs_counters_exit(c); 491 bch2_fs_snapshots_exit(c); 492 bch2_fs_quota_exit(c); 493 bch2_fs_fs_io_direct_exit(c); 494 bch2_fs_fs_io_buffered_exit(c); 495 bch2_fs_fsio_exit(c); 496 bch2_fs_ec_exit(c); 497 bch2_fs_encryption_exit(c); 498 bch2_fs_nocow_locking_exit(c); 499 bch2_fs_io_write_exit(c); 500 bch2_fs_io_read_exit(c); 501 bch2_fs_buckets_waiting_for_journal_exit(c); 502 bch2_fs_btree_interior_update_exit(c); 503 bch2_fs_btree_iter_exit(c); 504 bch2_fs_btree_key_cache_exit(&c->btree_key_cache); 505 bch2_fs_btree_cache_exit(c); 506 bch2_fs_replicas_exit(c); 507 bch2_fs_journal_exit(&c->journal); 508 bch2_io_clock_exit(&c->io_clock[WRITE]); 509 bch2_io_clock_exit(&c->io_clock[READ]); 510 bch2_fs_compress_exit(c); 511 bch2_journal_keys_put_initial(c); 512 BUG_ON(atomic_read(&c->journal_keys.ref)); 513 bch2_fs_btree_write_buffer_exit(c); 514 percpu_free_rwsem(&c->mark_lock); 515 free_percpu(c->online_reserved); 516 517 darray_exit(&c->btree_roots_extra); 518 free_percpu(c->pcpu); 519 mempool_exit(&c->large_bkey_pool); 520 mempool_exit(&c->btree_bounce_pool); 521 bioset_exit(&c->btree_bio); 522 mempool_exit(&c->fill_iter); 523 #ifndef BCH_WRITE_REF_DEBUG 524 percpu_ref_exit(&c->writes); 525 #endif 526 kfree(rcu_dereference_protected(c->disk_groups, 1)); 527 kfree(c->journal_seq_blacklist_table); 528 kfree(c->unused_inode_hints); 529 530 if (c->write_ref_wq) 531 destroy_workqueue(c->write_ref_wq); 532 if (c->io_complete_wq) 533 destroy_workqueue(c->io_complete_wq); 534 if (c->copygc_wq) 535 destroy_workqueue(c->copygc_wq); 536 if (c->btree_io_complete_wq) 537 destroy_workqueue(c->btree_io_complete_wq); 538 if (c->btree_update_wq) 539 destroy_workqueue(c->btree_update_wq); 540 541 bch2_free_super(&c->disk_sb); 542 kvpfree(c, sizeof(*c)); 543 module_put(THIS_MODULE); 544 } 545 546 static void bch2_fs_release(struct kobject *kobj) 547 { 548 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); 549 550 __bch2_fs_free(c); 551 } 552 553 void __bch2_fs_stop(struct bch_fs *c) 554 { 555 struct bch_dev *ca; 556 unsigned i; 557 558 bch_verbose(c, "shutting down"); 559 560 set_bit(BCH_FS_STOPPING, &c->flags); 561 562 cancel_work_sync(&c->journal_seq_blacklist_gc_work); 563 564 down_write(&c->state_lock); 565 bch2_fs_read_only(c); 566 up_write(&c->state_lock); 567 568 for_each_member_device(ca, c, i) 569 if (ca->kobj.state_in_sysfs && 570 ca->disk_sb.bdev) 571 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); 572 573 if (c->kobj.state_in_sysfs) 574 kobject_del(&c->kobj); 575 576 bch2_fs_debug_exit(c); 577 bch2_fs_chardev_exit(c); 578 579 kobject_put(&c->counters_kobj); 580 kobject_put(&c->time_stats); 581 kobject_put(&c->opts_dir); 582 kobject_put(&c->internal); 583 584 /* btree prefetch might have kicked off reads in the background: */ 585 bch2_btree_flush_all_reads(c); 586 587 for_each_member_device(ca, c, i) 588 cancel_work_sync(&ca->io_error_work); 589 590 cancel_work_sync(&c->read_only_work); 591 } 592 593 void bch2_fs_free(struct bch_fs *c) 594 { 595 unsigned i; 596 597 mutex_lock(&bch_fs_list_lock); 598 list_del(&c->list); 599 mutex_unlock(&bch_fs_list_lock); 600 601 closure_sync(&c->cl); 602 closure_debug_destroy(&c->cl); 603 604 for (i = 0; i < c->sb.nr_devices; i++) { 605 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true); 606 607 if (ca) { 608 bch2_free_super(&ca->disk_sb); 609 bch2_dev_free(ca); 610 } 611 } 612 613 bch_verbose(c, "shutdown complete"); 614 615 kobject_put(&c->kobj); 616 } 617 618 void bch2_fs_stop(struct bch_fs *c) 619 { 620 __bch2_fs_stop(c); 621 bch2_fs_free(c); 622 } 623 624 static int bch2_fs_online(struct bch_fs *c) 625 { 626 struct bch_dev *ca; 627 unsigned i; 628 int ret = 0; 629 630 lockdep_assert_held(&bch_fs_list_lock); 631 632 if (__bch2_uuid_to_fs(c->sb.uuid)) { 633 bch_err(c, "filesystem UUID already open"); 634 return -EINVAL; 635 } 636 637 ret = bch2_fs_chardev_init(c); 638 if (ret) { 639 bch_err(c, "error creating character device"); 640 return ret; 641 } 642 643 bch2_fs_debug_init(c); 644 645 ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?: 646 kobject_add(&c->internal, &c->kobj, "internal") ?: 647 kobject_add(&c->opts_dir, &c->kobj, "options") ?: 648 kobject_add(&c->time_stats, &c->kobj, "time_stats") ?: 649 kobject_add(&c->counters_kobj, &c->kobj, "counters") ?: 650 bch2_opts_create_sysfs_files(&c->opts_dir); 651 if (ret) { 652 bch_err(c, "error creating sysfs objects"); 653 return ret; 654 } 655 656 down_write(&c->state_lock); 657 658 for_each_member_device(ca, c, i) { 659 ret = bch2_dev_sysfs_online(c, ca); 660 if (ret) { 661 bch_err(c, "error creating sysfs objects"); 662 percpu_ref_put(&ca->ref); 663 goto err; 664 } 665 } 666 667 BUG_ON(!list_empty(&c->list)); 668 list_add(&c->list, &bch_fs_list); 669 err: 670 up_write(&c->state_lock); 671 return ret; 672 } 673 674 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) 675 { 676 struct bch_fs *c; 677 struct printbuf name = PRINTBUF; 678 unsigned i, iter_size; 679 int ret = 0; 680 681 c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); 682 if (!c) { 683 c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc); 684 goto out; 685 } 686 687 __module_get(THIS_MODULE); 688 689 closure_init(&c->cl, NULL); 690 691 c->kobj.kset = bcachefs_kset; 692 kobject_init(&c->kobj, &bch2_fs_ktype); 693 kobject_init(&c->internal, &bch2_fs_internal_ktype); 694 kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype); 695 kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype); 696 kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype); 697 698 c->minor = -1; 699 c->disk_sb.fs_sb = true; 700 701 init_rwsem(&c->state_lock); 702 mutex_init(&c->sb_lock); 703 mutex_init(&c->replicas_gc_lock); 704 mutex_init(&c->btree_root_lock); 705 INIT_WORK(&c->read_only_work, bch2_fs_read_only_work); 706 707 init_rwsem(&c->gc_lock); 708 mutex_init(&c->gc_gens_lock); 709 atomic_set(&c->journal_keys.ref, 1); 710 c->journal_keys.initial_ref_held = true; 711 712 for (i = 0; i < BCH_TIME_STAT_NR; i++) 713 bch2_time_stats_init(&c->times[i]); 714 715 bch2_fs_copygc_init(c); 716 bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); 717 bch2_fs_btree_interior_update_init_early(c); 718 bch2_fs_allocator_background_init(c); 719 bch2_fs_allocator_foreground_init(c); 720 bch2_fs_rebalance_init(c); 721 bch2_fs_quota_init(c); 722 bch2_fs_ec_init_early(c); 723 bch2_fs_move_init(c); 724 bch2_fs_sb_errors_init_early(c); 725 726 INIT_LIST_HEAD(&c->list); 727 728 mutex_init(&c->usage_scratch_lock); 729 730 mutex_init(&c->bio_bounce_pages_lock); 731 mutex_init(&c->snapshot_table_lock); 732 init_rwsem(&c->snapshot_create_lock); 733 734 spin_lock_init(&c->btree_write_error_lock); 735 736 INIT_WORK(&c->journal_seq_blacklist_gc_work, 737 bch2_blacklist_entries_gc); 738 739 INIT_LIST_HEAD(&c->journal_iters); 740 741 INIT_LIST_HEAD(&c->fsck_error_msgs); 742 mutex_init(&c->fsck_error_msgs_lock); 743 744 seqcount_init(&c->gc_pos_lock); 745 746 seqcount_init(&c->usage_lock); 747 748 sema_init(&c->io_in_flight, 128); 749 750 INIT_LIST_HEAD(&c->vfs_inodes_list); 751 mutex_init(&c->vfs_inodes_lock); 752 753 c->copy_gc_enabled = 1; 754 c->rebalance.enabled = 1; 755 c->promote_whole_extents = true; 756 757 c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write]; 758 c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write]; 759 c->journal.blocked_time = &c->times[BCH_TIME_blocked_journal]; 760 c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq]; 761 762 bch2_fs_btree_cache_init_early(&c->btree_cache); 763 764 mutex_init(&c->sectors_available_lock); 765 766 ret = percpu_init_rwsem(&c->mark_lock); 767 if (ret) 768 goto err; 769 770 mutex_lock(&c->sb_lock); 771 ret = bch2_sb_to_fs(c, sb); 772 mutex_unlock(&c->sb_lock); 773 774 if (ret) 775 goto err; 776 777 pr_uuid(&name, c->sb.user_uuid.b); 778 strscpy(c->name, name.buf, sizeof(c->name)); 779 printbuf_exit(&name); 780 781 ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0; 782 if (ret) 783 goto err; 784 785 /* Compat: */ 786 if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 && 787 !BCH_SB_JOURNAL_FLUSH_DELAY(sb)) 788 SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000); 789 790 if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 && 791 !BCH_SB_JOURNAL_RECLAIM_DELAY(sb)) 792 SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100); 793 794 c->opts = bch2_opts_default; 795 ret = bch2_opts_from_sb(&c->opts, sb); 796 if (ret) 797 goto err; 798 799 bch2_opts_apply(&c->opts, opts); 800 801 c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc; 802 if (c->opts.inodes_use_key_cache) 803 c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes; 804 c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops; 805 806 c->block_bits = ilog2(block_sectors(c)); 807 c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c); 808 809 if (bch2_fs_init_fault("fs_alloc")) { 810 bch_err(c, "fs_alloc fault injected"); 811 ret = -EFAULT; 812 goto err; 813 } 814 815 iter_size = sizeof(struct sort_iter) + 816 (btree_blocks(c) + 1) * 2 * 817 sizeof(struct sort_iter_set); 818 819 c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus())); 820 821 if (!(c->btree_update_wq = alloc_workqueue("bcachefs", 822 WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512)) || 823 !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io", 824 WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || 825 !(c->copygc_wq = alloc_workqueue("bcachefs_copygc", 826 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || 827 !(c->io_complete_wq = alloc_workqueue("bcachefs_io", 828 WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) || 829 !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref", 830 WQ_FREEZABLE, 0)) || 831 #ifndef BCH_WRITE_REF_DEBUG 832 percpu_ref_init(&c->writes, bch2_writes_disabled, 833 PERCPU_REF_INIT_DEAD, GFP_KERNEL) || 834 #endif 835 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || 836 bioset_init(&c->btree_bio, 1, 837 max(offsetof(struct btree_read_bio, bio), 838 offsetof(struct btree_write_bio, wbio.bio)), 839 BIOSET_NEED_BVECS) || 840 !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) || 841 !(c->online_reserved = alloc_percpu(u64)) || 842 mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, 843 btree_bytes(c)) || 844 mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) || 845 !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits, 846 sizeof(u64), GFP_KERNEL))) { 847 ret = -BCH_ERR_ENOMEM_fs_other_alloc; 848 goto err; 849 } 850 851 ret = bch2_fs_counters_init(c) ?: 852 bch2_fs_sb_errors_init(c) ?: 853 bch2_io_clock_init(&c->io_clock[READ]) ?: 854 bch2_io_clock_init(&c->io_clock[WRITE]) ?: 855 bch2_fs_journal_init(&c->journal) ?: 856 bch2_fs_replicas_init(c) ?: 857 bch2_fs_btree_cache_init(c) ?: 858 bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?: 859 bch2_fs_btree_iter_init(c) ?: 860 bch2_fs_btree_interior_update_init(c) ?: 861 bch2_fs_buckets_waiting_for_journal_init(c) ?: 862 bch2_fs_btree_write_buffer_init(c) ?: 863 bch2_fs_subvolumes_init(c) ?: 864 bch2_fs_io_read_init(c) ?: 865 bch2_fs_io_write_init(c) ?: 866 bch2_fs_nocow_locking_init(c) ?: 867 bch2_fs_encryption_init(c) ?: 868 bch2_fs_compress_init(c) ?: 869 bch2_fs_ec_init(c) ?: 870 bch2_fs_fsio_init(c) ?: 871 bch2_fs_fs_io_buffered_init(c) ?: 872 bch2_fs_fs_io_direct_init(c); 873 if (ret) 874 goto err; 875 876 for (i = 0; i < c->sb.nr_devices; i++) 877 if (bch2_dev_exists(c->disk_sb.sb, i) && 878 bch2_dev_alloc(c, i)) { 879 ret = -EEXIST; 880 goto err; 881 } 882 883 bch2_journal_entry_res_resize(&c->journal, 884 &c->btree_root_journal_res, 885 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX)); 886 bch2_dev_usage_journal_reserve(c); 887 bch2_journal_entry_res_resize(&c->journal, 888 &c->clock_journal_res, 889 (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2); 890 891 mutex_lock(&bch_fs_list_lock); 892 ret = bch2_fs_online(c); 893 mutex_unlock(&bch_fs_list_lock); 894 895 if (ret) 896 goto err; 897 out: 898 return c; 899 err: 900 bch2_fs_free(c); 901 c = ERR_PTR(ret); 902 goto out; 903 } 904 905 noinline_for_stack 906 static void print_mount_opts(struct bch_fs *c) 907 { 908 enum bch_opt_id i; 909 struct printbuf p = PRINTBUF; 910 bool first = true; 911 912 prt_str(&p, "mounting version "); 913 bch2_version_to_text(&p, c->sb.version); 914 915 if (c->opts.read_only) { 916 prt_str(&p, " opts="); 917 first = false; 918 prt_printf(&p, "ro"); 919 } 920 921 for (i = 0; i < bch2_opts_nr; i++) { 922 const struct bch_option *opt = &bch2_opt_table[i]; 923 u64 v = bch2_opt_get_by_id(&c->opts, i); 924 925 if (!(opt->flags & OPT_MOUNT)) 926 continue; 927 928 if (v == bch2_opt_get_by_id(&bch2_opts_default, i)) 929 continue; 930 931 prt_str(&p, first ? " opts=" : ","); 932 first = false; 933 bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE); 934 } 935 936 bch_info(c, "%s", p.buf); 937 printbuf_exit(&p); 938 } 939 940 int bch2_fs_start(struct bch_fs *c) 941 { 942 struct bch_dev *ca; 943 time64_t now = ktime_get_real_seconds(); 944 unsigned i; 945 int ret; 946 947 print_mount_opts(c); 948 949 down_write(&c->state_lock); 950 951 BUG_ON(test_bit(BCH_FS_STARTED, &c->flags)); 952 953 mutex_lock(&c->sb_lock); 954 955 ret = bch2_sb_members_v2_init(c); 956 if (ret) { 957 mutex_unlock(&c->sb_lock); 958 goto err; 959 } 960 961 for_each_online_member(ca, c, i) 962 bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now); 963 964 mutex_unlock(&c->sb_lock); 965 966 for_each_rw_member(ca, c, i) 967 bch2_dev_allocator_add(c, ca); 968 bch2_recalc_capacity(c); 969 970 ret = BCH_SB_INITIALIZED(c->disk_sb.sb) 971 ? bch2_fs_recovery(c) 972 : bch2_fs_initialize(c); 973 if (ret) 974 goto err; 975 976 ret = bch2_opts_check_may_set(c); 977 if (ret) 978 goto err; 979 980 if (bch2_fs_init_fault("fs_start")) { 981 bch_err(c, "fs_start fault injected"); 982 ret = -EINVAL; 983 goto err; 984 } 985 986 set_bit(BCH_FS_STARTED, &c->flags); 987 988 if (c->opts.read_only || c->opts.nochanges) { 989 bch2_fs_read_only(c); 990 } else { 991 ret = !test_bit(BCH_FS_RW, &c->flags) 992 ? bch2_fs_read_write(c) 993 : bch2_fs_read_write_late(c); 994 if (ret) 995 goto err; 996 } 997 998 ret = 0; 999 out: 1000 up_write(&c->state_lock); 1001 return ret; 1002 err: 1003 bch_err_msg(c, ret, "starting filesystem"); 1004 goto out; 1005 } 1006 1007 static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c) 1008 { 1009 struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx); 1010 1011 if (le16_to_cpu(sb->block_size) != block_sectors(c)) 1012 return -BCH_ERR_mismatched_block_size; 1013 1014 if (le16_to_cpu(m.bucket_size) < 1015 BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb)) 1016 return -BCH_ERR_bucket_size_too_small; 1017 1018 return 0; 1019 } 1020 1021 static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb) 1022 { 1023 struct bch_sb *newest = 1024 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb; 1025 1026 if (!uuid_equal(&fs->uuid, &sb->uuid)) 1027 return -BCH_ERR_device_not_a_member_of_filesystem; 1028 1029 if (!bch2_dev_exists(newest, sb->dev_idx)) 1030 return -BCH_ERR_device_has_been_removed; 1031 1032 if (fs->block_size != sb->block_size) 1033 return -BCH_ERR_mismatched_block_size; 1034 1035 return 0; 1036 } 1037 1038 /* Device startup/shutdown: */ 1039 1040 static void bch2_dev_release(struct kobject *kobj) 1041 { 1042 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); 1043 1044 kfree(ca); 1045 } 1046 1047 static void bch2_dev_free(struct bch_dev *ca) 1048 { 1049 cancel_work_sync(&ca->io_error_work); 1050 1051 if (ca->kobj.state_in_sysfs && 1052 ca->disk_sb.bdev) 1053 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); 1054 1055 if (ca->kobj.state_in_sysfs) 1056 kobject_del(&ca->kobj); 1057 1058 bch2_free_super(&ca->disk_sb); 1059 bch2_dev_journal_exit(ca); 1060 1061 free_percpu(ca->io_done); 1062 bioset_exit(&ca->replica_set); 1063 bch2_dev_buckets_free(ca); 1064 free_page((unsigned long) ca->sb_read_scratch); 1065 1066 bch2_time_stats_exit(&ca->io_latency[WRITE]); 1067 bch2_time_stats_exit(&ca->io_latency[READ]); 1068 1069 percpu_ref_exit(&ca->io_ref); 1070 percpu_ref_exit(&ca->ref); 1071 kobject_put(&ca->kobj); 1072 } 1073 1074 static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca) 1075 { 1076 1077 lockdep_assert_held(&c->state_lock); 1078 1079 if (percpu_ref_is_zero(&ca->io_ref)) 1080 return; 1081 1082 __bch2_dev_read_only(c, ca); 1083 1084 reinit_completion(&ca->io_ref_completion); 1085 percpu_ref_kill(&ca->io_ref); 1086 wait_for_completion(&ca->io_ref_completion); 1087 1088 if (ca->kobj.state_in_sysfs) { 1089 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); 1090 sysfs_remove_link(&ca->kobj, "block"); 1091 } 1092 1093 bch2_free_super(&ca->disk_sb); 1094 bch2_dev_journal_exit(ca); 1095 } 1096 1097 static void bch2_dev_ref_complete(struct percpu_ref *ref) 1098 { 1099 struct bch_dev *ca = container_of(ref, struct bch_dev, ref); 1100 1101 complete(&ca->ref_completion); 1102 } 1103 1104 static void bch2_dev_io_ref_complete(struct percpu_ref *ref) 1105 { 1106 struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref); 1107 1108 complete(&ca->io_ref_completion); 1109 } 1110 1111 static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca) 1112 { 1113 int ret; 1114 1115 if (!c->kobj.state_in_sysfs) 1116 return 0; 1117 1118 if (!ca->kobj.state_in_sysfs) { 1119 ret = kobject_add(&ca->kobj, &c->kobj, 1120 "dev-%u", ca->dev_idx); 1121 if (ret) 1122 return ret; 1123 } 1124 1125 if (ca->disk_sb.bdev) { 1126 struct kobject *block = bdev_kobj(ca->disk_sb.bdev); 1127 1128 ret = sysfs_create_link(block, &ca->kobj, "bcachefs"); 1129 if (ret) 1130 return ret; 1131 1132 ret = sysfs_create_link(&ca->kobj, block, "block"); 1133 if (ret) 1134 return ret; 1135 } 1136 1137 return 0; 1138 } 1139 1140 static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, 1141 struct bch_member *member) 1142 { 1143 struct bch_dev *ca; 1144 unsigned i; 1145 1146 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1147 if (!ca) 1148 return NULL; 1149 1150 kobject_init(&ca->kobj, &bch2_dev_ktype); 1151 init_completion(&ca->ref_completion); 1152 init_completion(&ca->io_ref_completion); 1153 1154 init_rwsem(&ca->bucket_lock); 1155 1156 INIT_WORK(&ca->io_error_work, bch2_io_error_work); 1157 1158 bch2_time_stats_init(&ca->io_latency[READ]); 1159 bch2_time_stats_init(&ca->io_latency[WRITE]); 1160 1161 ca->mi = bch2_mi_to_cpu(member); 1162 1163 for (i = 0; i < ARRAY_SIZE(member->errors); i++) 1164 atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i])); 1165 1166 ca->uuid = member->uuid; 1167 1168 ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE, 1169 ca->mi.bucket_size / btree_sectors(c)); 1170 1171 if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete, 1172 0, GFP_KERNEL) || 1173 percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete, 1174 PERCPU_REF_INIT_DEAD, GFP_KERNEL) || 1175 !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) || 1176 bch2_dev_buckets_alloc(c, ca) || 1177 bioset_init(&ca->replica_set, 4, 1178 offsetof(struct bch_write_bio, bio), 0) || 1179 !(ca->io_done = alloc_percpu(*ca->io_done))) 1180 goto err; 1181 1182 return ca; 1183 err: 1184 bch2_dev_free(ca); 1185 return NULL; 1186 } 1187 1188 static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca, 1189 unsigned dev_idx) 1190 { 1191 ca->dev_idx = dev_idx; 1192 __set_bit(ca->dev_idx, ca->self.d); 1193 scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx); 1194 1195 ca->fs = c; 1196 rcu_assign_pointer(c->devs[ca->dev_idx], ca); 1197 1198 if (bch2_dev_sysfs_online(c, ca)) 1199 pr_warn("error creating sysfs objects"); 1200 } 1201 1202 static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) 1203 { 1204 struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx); 1205 struct bch_dev *ca = NULL; 1206 int ret = 0; 1207 1208 if (bch2_fs_init_fault("dev_alloc")) 1209 goto err; 1210 1211 ca = __bch2_dev_alloc(c, &member); 1212 if (!ca) 1213 goto err; 1214 1215 ca->fs = c; 1216 1217 bch2_dev_attach(c, ca, dev_idx); 1218 return ret; 1219 err: 1220 if (ca) 1221 bch2_dev_free(ca); 1222 return -BCH_ERR_ENOMEM_dev_alloc; 1223 } 1224 1225 static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) 1226 { 1227 unsigned ret; 1228 1229 if (bch2_dev_is_online(ca)) { 1230 bch_err(ca, "already have device online in slot %u", 1231 sb->sb->dev_idx); 1232 return -BCH_ERR_device_already_online; 1233 } 1234 1235 if (get_capacity(sb->bdev->bd_disk) < 1236 ca->mi.bucket_size * ca->mi.nbuckets) { 1237 bch_err(ca, "cannot online: device too small"); 1238 return -BCH_ERR_device_size_too_small; 1239 } 1240 1241 BUG_ON(!percpu_ref_is_zero(&ca->io_ref)); 1242 1243 ret = bch2_dev_journal_init(ca, sb->sb); 1244 if (ret) 1245 return ret; 1246 1247 /* Commit: */ 1248 ca->disk_sb = *sb; 1249 memset(sb, 0, sizeof(*sb)); 1250 1251 ca->dev = ca->disk_sb.bdev->bd_dev; 1252 1253 percpu_ref_reinit(&ca->io_ref); 1254 1255 return 0; 1256 } 1257 1258 static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb) 1259 { 1260 struct bch_dev *ca; 1261 int ret; 1262 1263 lockdep_assert_held(&c->state_lock); 1264 1265 if (le64_to_cpu(sb->sb->seq) > 1266 le64_to_cpu(c->disk_sb.sb->seq)) 1267 bch2_sb_to_fs(c, sb->sb); 1268 1269 BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices || 1270 !c->devs[sb->sb->dev_idx]); 1271 1272 ca = bch_dev_locked(c, sb->sb->dev_idx); 1273 1274 ret = __bch2_dev_attach_bdev(ca, sb); 1275 if (ret) 1276 return ret; 1277 1278 bch2_dev_sysfs_online(c, ca); 1279 1280 if (c->sb.nr_devices == 1) 1281 snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev); 1282 snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev); 1283 1284 rebalance_wakeup(c); 1285 return 0; 1286 } 1287 1288 /* Device management: */ 1289 1290 /* 1291 * Note: this function is also used by the error paths - when a particular 1292 * device sees an error, we call it to determine whether we can just set the 1293 * device RO, or - if this function returns false - we'll set the whole 1294 * filesystem RO: 1295 * 1296 * XXX: maybe we should be more explicit about whether we're changing state 1297 * because we got an error or what have you? 1298 */ 1299 bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca, 1300 enum bch_member_state new_state, int flags) 1301 { 1302 struct bch_devs_mask new_online_devs; 1303 struct bch_dev *ca2; 1304 int i, nr_rw = 0, required; 1305 1306 lockdep_assert_held(&c->state_lock); 1307 1308 switch (new_state) { 1309 case BCH_MEMBER_STATE_rw: 1310 return true; 1311 case BCH_MEMBER_STATE_ro: 1312 if (ca->mi.state != BCH_MEMBER_STATE_rw) 1313 return true; 1314 1315 /* do we have enough devices to write to? */ 1316 for_each_member_device(ca2, c, i) 1317 if (ca2 != ca) 1318 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw; 1319 1320 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED) 1321 ? c->opts.metadata_replicas 1322 : c->opts.metadata_replicas_required, 1323 !(flags & BCH_FORCE_IF_DATA_DEGRADED) 1324 ? c->opts.data_replicas 1325 : c->opts.data_replicas_required); 1326 1327 return nr_rw >= required; 1328 case BCH_MEMBER_STATE_failed: 1329 case BCH_MEMBER_STATE_spare: 1330 if (ca->mi.state != BCH_MEMBER_STATE_rw && 1331 ca->mi.state != BCH_MEMBER_STATE_ro) 1332 return true; 1333 1334 /* do we have enough devices to read from? */ 1335 new_online_devs = bch2_online_devs(c); 1336 __clear_bit(ca->dev_idx, new_online_devs.d); 1337 1338 return bch2_have_enough_devs(c, new_online_devs, flags, false); 1339 default: 1340 BUG(); 1341 } 1342 } 1343 1344 static bool bch2_fs_may_start(struct bch_fs *c) 1345 { 1346 struct bch_dev *ca; 1347 unsigned i, flags = 0; 1348 1349 if (c->opts.very_degraded) 1350 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST; 1351 1352 if (c->opts.degraded) 1353 flags |= BCH_FORCE_IF_DEGRADED; 1354 1355 if (!c->opts.degraded && 1356 !c->opts.very_degraded) { 1357 mutex_lock(&c->sb_lock); 1358 1359 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) { 1360 if (!bch2_dev_exists(c->disk_sb.sb, i)) 1361 continue; 1362 1363 ca = bch_dev_locked(c, i); 1364 1365 if (!bch2_dev_is_online(ca) && 1366 (ca->mi.state == BCH_MEMBER_STATE_rw || 1367 ca->mi.state == BCH_MEMBER_STATE_ro)) { 1368 mutex_unlock(&c->sb_lock); 1369 return false; 1370 } 1371 } 1372 mutex_unlock(&c->sb_lock); 1373 } 1374 1375 return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true); 1376 } 1377 1378 static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca) 1379 { 1380 /* 1381 * The allocator thread itself allocates btree nodes, so stop it first: 1382 */ 1383 bch2_dev_allocator_remove(c, ca); 1384 bch2_dev_journal_stop(&c->journal, ca); 1385 } 1386 1387 static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) 1388 { 1389 lockdep_assert_held(&c->state_lock); 1390 1391 BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw); 1392 1393 bch2_dev_allocator_add(c, ca); 1394 bch2_recalc_capacity(c); 1395 } 1396 1397 int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, 1398 enum bch_member_state new_state, int flags) 1399 { 1400 struct bch_member *m; 1401 int ret = 0; 1402 1403 if (ca->mi.state == new_state) 1404 return 0; 1405 1406 if (!bch2_dev_state_allowed(c, ca, new_state, flags)) 1407 return -BCH_ERR_device_state_not_allowed; 1408 1409 if (new_state != BCH_MEMBER_STATE_rw) 1410 __bch2_dev_read_only(c, ca); 1411 1412 bch_notice(ca, "%s", bch2_member_states[new_state]); 1413 1414 mutex_lock(&c->sb_lock); 1415 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 1416 SET_BCH_MEMBER_STATE(m, new_state); 1417 bch2_write_super(c); 1418 mutex_unlock(&c->sb_lock); 1419 1420 if (new_state == BCH_MEMBER_STATE_rw) 1421 __bch2_dev_read_write(c, ca); 1422 1423 rebalance_wakeup(c); 1424 1425 return ret; 1426 } 1427 1428 int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, 1429 enum bch_member_state new_state, int flags) 1430 { 1431 int ret; 1432 1433 down_write(&c->state_lock); 1434 ret = __bch2_dev_set_state(c, ca, new_state, flags); 1435 up_write(&c->state_lock); 1436 1437 return ret; 1438 } 1439 1440 /* Device add/removal: */ 1441 1442 static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) 1443 { 1444 struct bpos start = POS(ca->dev_idx, 0); 1445 struct bpos end = POS(ca->dev_idx, U64_MAX); 1446 int ret; 1447 1448 /* 1449 * We clear the LRU and need_discard btrees first so that we don't race 1450 * with bch2_do_invalidates() and bch2_do_discards() 1451 */ 1452 ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end, 1453 BTREE_TRIGGER_NORUN, NULL) ?: 1454 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end, 1455 BTREE_TRIGGER_NORUN, NULL) ?: 1456 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end, 1457 BTREE_TRIGGER_NORUN, NULL) ?: 1458 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end, 1459 BTREE_TRIGGER_NORUN, NULL) ?: 1460 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end, 1461 BTREE_TRIGGER_NORUN, NULL) ?: 1462 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end, 1463 BTREE_TRIGGER_NORUN, NULL); 1464 if (ret) 1465 bch_err_msg(c, ret, "removing dev alloc info"); 1466 1467 return ret; 1468 } 1469 1470 int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) 1471 { 1472 struct bch_member *m; 1473 unsigned dev_idx = ca->dev_idx, data; 1474 int ret; 1475 1476 down_write(&c->state_lock); 1477 1478 /* 1479 * We consume a reference to ca->ref, regardless of whether we succeed 1480 * or fail: 1481 */ 1482 percpu_ref_put(&ca->ref); 1483 1484 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) { 1485 bch_err(ca, "Cannot remove without losing data"); 1486 ret = -BCH_ERR_device_state_not_allowed; 1487 goto err; 1488 } 1489 1490 __bch2_dev_read_only(c, ca); 1491 1492 ret = bch2_dev_data_drop(c, ca->dev_idx, flags); 1493 if (ret) { 1494 bch_err_msg(ca, ret, "dropping data"); 1495 goto err; 1496 } 1497 1498 ret = bch2_dev_remove_alloc(c, ca); 1499 if (ret) { 1500 bch_err_msg(ca, ret, "deleting alloc info"); 1501 goto err; 1502 } 1503 1504 ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx); 1505 if (ret) { 1506 bch_err_msg(ca, ret, "flushing journal"); 1507 goto err; 1508 } 1509 1510 ret = bch2_journal_flush(&c->journal); 1511 if (ret) { 1512 bch_err(ca, "journal error"); 1513 goto err; 1514 } 1515 1516 ret = bch2_replicas_gc2(c); 1517 if (ret) { 1518 bch_err_msg(ca, ret, "in replicas_gc2()"); 1519 goto err; 1520 } 1521 1522 data = bch2_dev_has_data(c, ca); 1523 if (data) { 1524 struct printbuf data_has = PRINTBUF; 1525 1526 prt_bitflags(&data_has, bch2_data_types, data); 1527 bch_err(ca, "Remove failed, still has data (%s)", data_has.buf); 1528 printbuf_exit(&data_has); 1529 ret = -EBUSY; 1530 goto err; 1531 } 1532 1533 __bch2_dev_offline(c, ca); 1534 1535 mutex_lock(&c->sb_lock); 1536 rcu_assign_pointer(c->devs[ca->dev_idx], NULL); 1537 mutex_unlock(&c->sb_lock); 1538 1539 percpu_ref_kill(&ca->ref); 1540 wait_for_completion(&ca->ref_completion); 1541 1542 bch2_dev_free(ca); 1543 1544 /* 1545 * At this point the device object has been removed in-core, but the 1546 * on-disk journal might still refer to the device index via sb device 1547 * usage entries. Recovery fails if it sees usage information for an 1548 * invalid device. Flush journal pins to push the back of the journal 1549 * past now invalid device index references before we update the 1550 * superblock, but after the device object has been removed so any 1551 * further journal writes elide usage info for the device. 1552 */ 1553 bch2_journal_flush_all_pins(&c->journal); 1554 1555 /* 1556 * Free this device's slot in the bch_member array - all pointers to 1557 * this device must be gone: 1558 */ 1559 mutex_lock(&c->sb_lock); 1560 m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx); 1561 memset(&m->uuid, 0, sizeof(m->uuid)); 1562 1563 bch2_write_super(c); 1564 1565 mutex_unlock(&c->sb_lock); 1566 up_write(&c->state_lock); 1567 1568 bch2_dev_usage_journal_reserve(c); 1569 return 0; 1570 err: 1571 if (ca->mi.state == BCH_MEMBER_STATE_rw && 1572 !percpu_ref_is_zero(&ca->io_ref)) 1573 __bch2_dev_read_write(c, ca); 1574 up_write(&c->state_lock); 1575 return ret; 1576 } 1577 1578 /* Add new device to running filesystem: */ 1579 int bch2_dev_add(struct bch_fs *c, const char *path) 1580 { 1581 struct bch_opts opts = bch2_opts_empty(); 1582 struct bch_sb_handle sb; 1583 struct bch_dev *ca = NULL; 1584 struct bch_sb_field_members_v2 *mi; 1585 struct bch_member dev_mi; 1586 unsigned dev_idx, nr_devices, u64s; 1587 struct printbuf errbuf = PRINTBUF; 1588 struct printbuf label = PRINTBUF; 1589 int ret; 1590 1591 ret = bch2_read_super(path, &opts, &sb); 1592 if (ret) { 1593 bch_err_msg(c, ret, "reading super"); 1594 goto err; 1595 } 1596 1597 dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx); 1598 1599 if (BCH_MEMBER_GROUP(&dev_mi)) { 1600 bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1); 1601 if (label.allocation_failure) { 1602 ret = -ENOMEM; 1603 goto err; 1604 } 1605 } 1606 1607 ret = bch2_dev_may_add(sb.sb, c); 1608 if (ret) { 1609 bch_err_fn(c, ret); 1610 goto err; 1611 } 1612 1613 ca = __bch2_dev_alloc(c, &dev_mi); 1614 if (!ca) { 1615 ret = -ENOMEM; 1616 goto err; 1617 } 1618 1619 bch2_dev_usage_init(ca); 1620 1621 ret = __bch2_dev_attach_bdev(ca, &sb); 1622 if (ret) 1623 goto err; 1624 1625 ret = bch2_dev_journal_alloc(ca); 1626 if (ret) { 1627 bch_err_msg(c, ret, "allocating journal"); 1628 goto err; 1629 } 1630 1631 down_write(&c->state_lock); 1632 mutex_lock(&c->sb_lock); 1633 1634 ret = bch2_sb_from_fs(c, ca); 1635 if (ret) { 1636 bch_err_msg(c, ret, "setting up new superblock"); 1637 goto err_unlock; 1638 } 1639 1640 if (dynamic_fault("bcachefs:add:no_slot")) 1641 goto no_slot; 1642 1643 for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) 1644 if (!bch2_dev_exists(c->disk_sb.sb, dev_idx)) 1645 goto have_slot; 1646 no_slot: 1647 ret = -BCH_ERR_ENOSPC_sb_members; 1648 bch_err_msg(c, ret, "setting up new superblock"); 1649 goto err_unlock; 1650 1651 have_slot: 1652 nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices); 1653 1654 mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); 1655 u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) + 1656 le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64)); 1657 1658 mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s); 1659 if (!mi) { 1660 ret = -BCH_ERR_ENOSPC_sb_members; 1661 bch_err_msg(c, ret, "setting up new superblock"); 1662 goto err_unlock; 1663 } 1664 struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx); 1665 1666 /* success: */ 1667 1668 *m = dev_mi; 1669 m->last_mount = cpu_to_le64(ktime_get_real_seconds()); 1670 c->disk_sb.sb->nr_devices = nr_devices; 1671 1672 ca->disk_sb.sb->dev_idx = dev_idx; 1673 bch2_dev_attach(c, ca, dev_idx); 1674 1675 if (BCH_MEMBER_GROUP(&dev_mi)) { 1676 ret = __bch2_dev_group_set(c, ca, label.buf); 1677 if (ret) { 1678 bch_err_msg(c, ret, "creating new label"); 1679 goto err_unlock; 1680 } 1681 } 1682 1683 bch2_write_super(c); 1684 mutex_unlock(&c->sb_lock); 1685 1686 bch2_dev_usage_journal_reserve(c); 1687 1688 ret = bch2_trans_mark_dev_sb(c, ca); 1689 if (ret) { 1690 bch_err_msg(ca, ret, "marking new superblock"); 1691 goto err_late; 1692 } 1693 1694 ret = bch2_fs_freespace_init(c); 1695 if (ret) { 1696 bch_err_msg(ca, ret, "initializing free space"); 1697 goto err_late; 1698 } 1699 1700 ca->new_fs_bucket_idx = 0; 1701 1702 if (ca->mi.state == BCH_MEMBER_STATE_rw) 1703 __bch2_dev_read_write(c, ca); 1704 1705 up_write(&c->state_lock); 1706 return 0; 1707 1708 err_unlock: 1709 mutex_unlock(&c->sb_lock); 1710 up_write(&c->state_lock); 1711 err: 1712 if (ca) 1713 bch2_dev_free(ca); 1714 bch2_free_super(&sb); 1715 printbuf_exit(&label); 1716 printbuf_exit(&errbuf); 1717 return ret; 1718 err_late: 1719 up_write(&c->state_lock); 1720 ca = NULL; 1721 goto err; 1722 } 1723 1724 /* Hot add existing device to running filesystem: */ 1725 int bch2_dev_online(struct bch_fs *c, const char *path) 1726 { 1727 struct bch_opts opts = bch2_opts_empty(); 1728 struct bch_sb_handle sb = { NULL }; 1729 struct bch_dev *ca; 1730 unsigned dev_idx; 1731 int ret; 1732 1733 down_write(&c->state_lock); 1734 1735 ret = bch2_read_super(path, &opts, &sb); 1736 if (ret) { 1737 up_write(&c->state_lock); 1738 return ret; 1739 } 1740 1741 dev_idx = sb.sb->dev_idx; 1742 1743 ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb); 1744 if (ret) { 1745 bch_err_msg(c, ret, "bringing %s online", path); 1746 goto err; 1747 } 1748 1749 ret = bch2_dev_attach_bdev(c, &sb); 1750 if (ret) 1751 goto err; 1752 1753 ca = bch_dev_locked(c, dev_idx); 1754 1755 ret = bch2_trans_mark_dev_sb(c, ca); 1756 if (ret) { 1757 bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path); 1758 goto err; 1759 } 1760 1761 if (ca->mi.state == BCH_MEMBER_STATE_rw) 1762 __bch2_dev_read_write(c, ca); 1763 1764 if (!ca->mi.freespace_initialized) { 1765 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); 1766 bch_err_msg(ca, ret, "initializing free space"); 1767 if (ret) 1768 goto err; 1769 } 1770 1771 if (!ca->journal.nr) { 1772 ret = bch2_dev_journal_alloc(ca); 1773 bch_err_msg(ca, ret, "allocating journal"); 1774 if (ret) 1775 goto err; 1776 } 1777 1778 mutex_lock(&c->sb_lock); 1779 bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = 1780 cpu_to_le64(ktime_get_real_seconds()); 1781 bch2_write_super(c); 1782 mutex_unlock(&c->sb_lock); 1783 1784 up_write(&c->state_lock); 1785 return 0; 1786 err: 1787 up_write(&c->state_lock); 1788 bch2_free_super(&sb); 1789 return ret; 1790 } 1791 1792 int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags) 1793 { 1794 down_write(&c->state_lock); 1795 1796 if (!bch2_dev_is_online(ca)) { 1797 bch_err(ca, "Already offline"); 1798 up_write(&c->state_lock); 1799 return 0; 1800 } 1801 1802 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) { 1803 bch_err(ca, "Cannot offline required disk"); 1804 up_write(&c->state_lock); 1805 return -BCH_ERR_device_state_not_allowed; 1806 } 1807 1808 __bch2_dev_offline(c, ca); 1809 1810 up_write(&c->state_lock); 1811 return 0; 1812 } 1813 1814 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) 1815 { 1816 struct bch_member *m; 1817 u64 old_nbuckets; 1818 int ret = 0; 1819 1820 down_write(&c->state_lock); 1821 old_nbuckets = ca->mi.nbuckets; 1822 1823 if (nbuckets < ca->mi.nbuckets) { 1824 bch_err(ca, "Cannot shrink yet"); 1825 ret = -EINVAL; 1826 goto err; 1827 } 1828 1829 if (bch2_dev_is_online(ca) && 1830 get_capacity(ca->disk_sb.bdev->bd_disk) < 1831 ca->mi.bucket_size * nbuckets) { 1832 bch_err(ca, "New size larger than device"); 1833 ret = -BCH_ERR_device_size_too_small; 1834 goto err; 1835 } 1836 1837 ret = bch2_dev_buckets_resize(c, ca, nbuckets); 1838 if (ret) { 1839 bch_err_msg(ca, ret, "resizing buckets"); 1840 goto err; 1841 } 1842 1843 ret = bch2_trans_mark_dev_sb(c, ca); 1844 if (ret) 1845 goto err; 1846 1847 mutex_lock(&c->sb_lock); 1848 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 1849 m->nbuckets = cpu_to_le64(nbuckets); 1850 1851 bch2_write_super(c); 1852 mutex_unlock(&c->sb_lock); 1853 1854 if (ca->mi.freespace_initialized) { 1855 ret = bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets); 1856 if (ret) 1857 goto err; 1858 1859 /* 1860 * XXX: this is all wrong transactionally - we'll be able to do 1861 * this correctly after the disk space accounting rewrite 1862 */ 1863 ca->usage_base->d[BCH_DATA_free].buckets += nbuckets - old_nbuckets; 1864 } 1865 1866 bch2_recalc_capacity(c); 1867 err: 1868 up_write(&c->state_lock); 1869 return ret; 1870 } 1871 1872 /* return with ref on ca->ref: */ 1873 struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name) 1874 { 1875 struct bch_dev *ca; 1876 unsigned i; 1877 1878 rcu_read_lock(); 1879 for_each_member_device_rcu(ca, c, i, NULL) 1880 if (!strcmp(name, ca->name)) 1881 goto found; 1882 ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found); 1883 found: 1884 rcu_read_unlock(); 1885 1886 return ca; 1887 } 1888 1889 /* Filesystem open: */ 1890 1891 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, 1892 struct bch_opts opts) 1893 { 1894 DARRAY(struct bch_sb_handle) sbs = { 0 }; 1895 struct bch_fs *c = NULL; 1896 struct bch_sb_handle *sb, *best = NULL; 1897 struct printbuf errbuf = PRINTBUF; 1898 int ret = 0; 1899 1900 if (!try_module_get(THIS_MODULE)) 1901 return ERR_PTR(-ENODEV); 1902 1903 if (!nr_devices) { 1904 ret = -EINVAL; 1905 goto err; 1906 } 1907 1908 ret = darray_make_room(&sbs, nr_devices); 1909 if (ret) 1910 goto err; 1911 1912 for (unsigned i = 0; i < nr_devices; i++) { 1913 struct bch_sb_handle sb = { NULL }; 1914 1915 ret = bch2_read_super(devices[i], &opts, &sb); 1916 if (ret) 1917 goto err; 1918 1919 BUG_ON(darray_push(&sbs, sb)); 1920 } 1921 1922 darray_for_each(sbs, sb) 1923 if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq)) 1924 best = sb; 1925 1926 darray_for_each_reverse(sbs, sb) { 1927 if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) { 1928 pr_info("%pg has been removed, skipping", sb->bdev); 1929 bch2_free_super(sb); 1930 darray_remove_item(&sbs, sb); 1931 best -= best > sb; 1932 continue; 1933 } 1934 1935 ret = bch2_dev_in_fs(best->sb, sb->sb); 1936 if (ret) 1937 goto err_print; 1938 } 1939 1940 c = bch2_fs_alloc(best->sb, opts); 1941 ret = PTR_ERR_OR_ZERO(c); 1942 if (ret) 1943 goto err; 1944 1945 down_write(&c->state_lock); 1946 darray_for_each(sbs, sb) { 1947 ret = bch2_dev_attach_bdev(c, sb); 1948 if (ret) { 1949 up_write(&c->state_lock); 1950 goto err; 1951 } 1952 } 1953 up_write(&c->state_lock); 1954 1955 if (!bch2_fs_may_start(c)) { 1956 ret = -BCH_ERR_insufficient_devices_to_start; 1957 goto err_print; 1958 } 1959 1960 if (!c->opts.nostart) { 1961 ret = bch2_fs_start(c); 1962 if (ret) 1963 goto err; 1964 } 1965 out: 1966 darray_for_each(sbs, sb) 1967 bch2_free_super(sb); 1968 darray_exit(&sbs); 1969 printbuf_exit(&errbuf); 1970 module_put(THIS_MODULE); 1971 return c; 1972 err_print: 1973 pr_err("bch_fs_open err opening %s: %s", 1974 devices[0], bch2_err_str(ret)); 1975 err: 1976 if (!IS_ERR_OR_NULL(c)) 1977 bch2_fs_stop(c); 1978 c = ERR_PTR(ret); 1979 goto out; 1980 } 1981 1982 /* Global interfaces/init */ 1983 1984 static void bcachefs_exit(void) 1985 { 1986 bch2_debug_exit(); 1987 bch2_vfs_exit(); 1988 bch2_chardev_exit(); 1989 bch2_btree_key_cache_exit(); 1990 if (bcachefs_kset) 1991 kset_unregister(bcachefs_kset); 1992 } 1993 1994 static int __init bcachefs_init(void) 1995 { 1996 bch2_bkey_pack_test(); 1997 1998 if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) || 1999 bch2_btree_key_cache_init() || 2000 bch2_chardev_init() || 2001 bch2_vfs_init() || 2002 bch2_debug_init()) 2003 goto err; 2004 2005 return 0; 2006 err: 2007 bcachefs_exit(); 2008 return -ENOMEM; 2009 } 2010 2011 #define BCH_DEBUG_PARAM(name, description) \ 2012 bool bch2_##name; \ 2013 module_param_named(name, bch2_##name, bool, 0644); \ 2014 MODULE_PARM_DESC(name, description); 2015 BCH_DEBUG_PARAMS() 2016 #undef BCH_DEBUG_PARAM 2017 2018 __maybe_unused 2019 static unsigned bch2_metadata_version = bcachefs_metadata_version_current; 2020 module_param_named(version, bch2_metadata_version, uint, 0400); 2021 2022 module_exit(bcachefs_exit); 2023 module_init(bcachefs_init); 2024