1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/blkdev.h> 7 #include <linux/module.h> 8 #include <linux/fs.h> 9 #include <linux/pagemap.h> 10 #include <linux/highmem.h> 11 #include <linux/time.h> 12 #include <linux/init.h> 13 #include <linux/seq_file.h> 14 #include <linux/string.h> 15 #include <linux/backing-dev.h> 16 #include <linux/mount.h> 17 #include <linux/writeback.h> 18 #include <linux/statfs.h> 19 #include <linux/compat.h> 20 #include <linux/parser.h> 21 #include <linux/ctype.h> 22 #include <linux/namei.h> 23 #include <linux/miscdevice.h> 24 #include <linux/magic.h> 25 #include <linux/slab.h> 26 #include <linux/ratelimit.h> 27 #include <linux/crc32c.h> 28 #include <linux/btrfs.h> 29 #include <linux/security.h> 30 #include <linux/fs_parser.h> 31 #include "messages.h" 32 #include "delayed-inode.h" 33 #include "ctree.h" 34 #include "disk-io.h" 35 #include "transaction.h" 36 #include "btrfs_inode.h" 37 #include "direct-io.h" 38 #include "props.h" 39 #include "xattr.h" 40 #include "bio.h" 41 #include "export.h" 42 #include "compression.h" 43 #include "dev-replace.h" 44 #include "free-space-cache.h" 45 #include "backref.h" 46 #include "space-info.h" 47 #include "sysfs.h" 48 #include "zoned.h" 49 #include "tests/btrfs-tests.h" 50 #include "block-group.h" 51 #include "discard.h" 52 #include "qgroup.h" 53 #include "raid56.h" 54 #include "fs.h" 55 #include "accessors.h" 56 #include "defrag.h" 57 #include "dir-item.h" 58 #include "ioctl.h" 59 #include "scrub.h" 60 #include "verity.h" 61 #include "super.h" 62 #include "extent-tree.h" 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/btrfs.h> 65 66 static const struct super_operations btrfs_super_ops; 67 static struct file_system_type btrfs_fs_type; 68 69 static void btrfs_put_super(struct super_block *sb) 70 { 71 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 72 73 btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid); 74 close_ctree(fs_info); 75 } 76 77 /* Store the mount options related information. */ 78 struct btrfs_fs_context { 79 char *subvol_name; 80 u64 subvol_objectid; 81 u64 max_inline; 82 u32 commit_interval; 83 u32 metadata_ratio; 84 u32 thread_pool_size; 85 unsigned long long mount_opt; 86 unsigned long compress_type:4; 87 int compress_level; 88 refcount_t refs; 89 }; 90 91 static void btrfs_emit_options(struct btrfs_fs_info *info, 92 struct btrfs_fs_context *old); 93 94 enum { 95 Opt_acl, 96 Opt_clear_cache, 97 Opt_commit_interval, 98 Opt_compress, 99 Opt_compress_force, 100 Opt_compress_force_type, 101 Opt_compress_type, 102 Opt_degraded, 103 Opt_device, 104 Opt_fatal_errors, 105 Opt_flushoncommit, 106 Opt_max_inline, 107 Opt_barrier, 108 Opt_datacow, 109 Opt_datasum, 110 Opt_defrag, 111 Opt_discard, 112 Opt_discard_mode, 113 Opt_ratio, 114 Opt_rescan_uuid_tree, 115 Opt_skip_balance, 116 Opt_space_cache, 117 Opt_space_cache_version, 118 Opt_ssd, 119 Opt_ssd_spread, 120 Opt_subvol, 121 Opt_subvol_empty, 122 Opt_subvolid, 123 Opt_thread_pool, 124 Opt_treelog, 125 Opt_user_subvol_rm_allowed, 126 Opt_norecovery, 127 128 /* Rescue options */ 129 Opt_rescue, 130 Opt_usebackuproot, 131 132 /* Debugging options */ 133 Opt_enospc_debug, 134 #ifdef CONFIG_BTRFS_DEBUG 135 Opt_fragment, Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all, 136 Opt_ref_verify, 137 Opt_ref_tracker, 138 #endif 139 Opt_err, 140 }; 141 142 enum { 143 Opt_fatal_errors_panic, 144 Opt_fatal_errors_bug, 145 }; 146 147 static const struct constant_table btrfs_parameter_fatal_errors[] = { 148 { "panic", Opt_fatal_errors_panic }, 149 { "bug", Opt_fatal_errors_bug }, 150 {} 151 }; 152 153 enum { 154 Opt_discard_sync, 155 Opt_discard_async, 156 }; 157 158 static const struct constant_table btrfs_parameter_discard[] = { 159 { "sync", Opt_discard_sync }, 160 { "async", Opt_discard_async }, 161 {} 162 }; 163 164 enum { 165 Opt_space_cache_v1, 166 Opt_space_cache_v2, 167 }; 168 169 static const struct constant_table btrfs_parameter_space_cache[] = { 170 { "v1", Opt_space_cache_v1 }, 171 { "v2", Opt_space_cache_v2 }, 172 {} 173 }; 174 175 enum { 176 Opt_rescue_usebackuproot, 177 Opt_rescue_nologreplay, 178 Opt_rescue_ignorebadroots, 179 Opt_rescue_ignoredatacsums, 180 Opt_rescue_ignoremetacsums, 181 Opt_rescue_ignoresuperflags, 182 Opt_rescue_parameter_all, 183 }; 184 185 static const struct constant_table btrfs_parameter_rescue[] = { 186 { "usebackuproot", Opt_rescue_usebackuproot }, 187 { "nologreplay", Opt_rescue_nologreplay }, 188 { "ignorebadroots", Opt_rescue_ignorebadroots }, 189 { "ibadroots", Opt_rescue_ignorebadroots }, 190 { "ignoredatacsums", Opt_rescue_ignoredatacsums }, 191 { "ignoremetacsums", Opt_rescue_ignoremetacsums}, 192 { "ignoresuperflags", Opt_rescue_ignoresuperflags}, 193 { "idatacsums", Opt_rescue_ignoredatacsums }, 194 { "imetacsums", Opt_rescue_ignoremetacsums}, 195 { "isuperflags", Opt_rescue_ignoresuperflags}, 196 { "all", Opt_rescue_parameter_all }, 197 {} 198 }; 199 200 #ifdef CONFIG_BTRFS_DEBUG 201 enum { 202 Opt_fragment_parameter_data, 203 Opt_fragment_parameter_metadata, 204 Opt_fragment_parameter_all, 205 }; 206 207 static const struct constant_table btrfs_parameter_fragment[] = { 208 { "data", Opt_fragment_parameter_data }, 209 { "metadata", Opt_fragment_parameter_metadata }, 210 { "all", Opt_fragment_parameter_all }, 211 {} 212 }; 213 #endif 214 215 static const struct fs_parameter_spec btrfs_fs_parameters[] = { 216 fsparam_flag_no("acl", Opt_acl), 217 fsparam_flag_no("autodefrag", Opt_defrag), 218 fsparam_flag_no("barrier", Opt_barrier), 219 fsparam_flag("clear_cache", Opt_clear_cache), 220 fsparam_u32("commit", Opt_commit_interval), 221 fsparam_flag("compress", Opt_compress), 222 fsparam_string("compress", Opt_compress_type), 223 fsparam_flag("compress-force", Opt_compress_force), 224 fsparam_string("compress-force", Opt_compress_force_type), 225 fsparam_flag_no("datacow", Opt_datacow), 226 fsparam_flag_no("datasum", Opt_datasum), 227 fsparam_flag("degraded", Opt_degraded), 228 fsparam_string("device", Opt_device), 229 fsparam_flag_no("discard", Opt_discard), 230 fsparam_enum("discard", Opt_discard_mode, btrfs_parameter_discard), 231 fsparam_enum("fatal_errors", Opt_fatal_errors, btrfs_parameter_fatal_errors), 232 fsparam_flag_no("flushoncommit", Opt_flushoncommit), 233 fsparam_string("max_inline", Opt_max_inline), 234 fsparam_u32("metadata_ratio", Opt_ratio), 235 fsparam_flag("rescan_uuid_tree", Opt_rescan_uuid_tree), 236 fsparam_flag("skip_balance", Opt_skip_balance), 237 fsparam_flag_no("space_cache", Opt_space_cache), 238 fsparam_enum("space_cache", Opt_space_cache_version, btrfs_parameter_space_cache), 239 fsparam_flag_no("ssd", Opt_ssd), 240 fsparam_flag_no("ssd_spread", Opt_ssd_spread), 241 fsparam_string("subvol", Opt_subvol), 242 fsparam_flag("subvol=", Opt_subvol_empty), 243 fsparam_u64("subvolid", Opt_subvolid), 244 fsparam_u32("thread_pool", Opt_thread_pool), 245 fsparam_flag_no("treelog", Opt_treelog), 246 fsparam_flag("user_subvol_rm_allowed", Opt_user_subvol_rm_allowed), 247 248 /* Rescue options. */ 249 fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue), 250 /* Deprecated, with alias rescue=usebackuproot */ 251 __fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL), 252 /* For compatibility only, alias for "rescue=nologreplay". */ 253 fsparam_flag("norecovery", Opt_norecovery), 254 255 /* Debugging options. */ 256 fsparam_flag_no("enospc_debug", Opt_enospc_debug), 257 #ifdef CONFIG_BTRFS_DEBUG 258 fsparam_enum("fragment", Opt_fragment, btrfs_parameter_fragment), 259 fsparam_flag("ref_tracker", Opt_ref_tracker), 260 fsparam_flag("ref_verify", Opt_ref_verify), 261 #endif 262 {} 263 }; 264 265 static bool btrfs_match_compress_type(const char *string, const char *type, bool may_have_level) 266 { 267 const int len = strlen(type); 268 269 return (strncmp(string, type, len) == 0) && 270 ((may_have_level && string[len] == ':') || string[len] == '\0'); 271 } 272 273 static int btrfs_parse_compress(struct btrfs_fs_context *ctx, 274 const struct fs_parameter *param, int opt) 275 { 276 const char *string = param->string; 277 int ret; 278 279 /* 280 * Provide the same semantics as older kernels that don't use fs 281 * context, specifying the "compress" option clears "force-compress" 282 * without the need to pass "compress-force=[no|none]" before 283 * specifying "compress". 284 */ 285 if (opt != Opt_compress_force && opt != Opt_compress_force_type) 286 btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); 287 288 if (opt == Opt_compress || opt == Opt_compress_force) { 289 ctx->compress_type = BTRFS_COMPRESS_ZLIB; 290 ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL; 291 btrfs_set_opt(ctx->mount_opt, COMPRESS); 292 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 293 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 294 } else if (btrfs_match_compress_type(string, "zlib", true)) { 295 ctx->compress_type = BTRFS_COMPRESS_ZLIB; 296 ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, string + 4, 297 &ctx->compress_level); 298 if (ret < 0) 299 goto error; 300 btrfs_set_opt(ctx->mount_opt, COMPRESS); 301 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 302 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 303 } else if (btrfs_match_compress_type(string, "lzo", true)) { 304 ctx->compress_type = BTRFS_COMPRESS_LZO; 305 ret = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, string + 3, 306 &ctx->compress_level); 307 if (ret < 0) 308 goto error; 309 if (string[3] == ':' && string[4]) 310 btrfs_warn(NULL, "Compression level ignored for LZO"); 311 btrfs_set_opt(ctx->mount_opt, COMPRESS); 312 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 313 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 314 } else if (btrfs_match_compress_type(string, "zstd", true)) { 315 ctx->compress_type = BTRFS_COMPRESS_ZSTD; 316 ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, string + 4, 317 &ctx->compress_level); 318 if (ret < 0) 319 goto error; 320 btrfs_set_opt(ctx->mount_opt, COMPRESS); 321 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 322 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 323 } else if (btrfs_match_compress_type(string, "no", false) || 324 btrfs_match_compress_type(string, "none", false)) { 325 ctx->compress_level = 0; 326 ctx->compress_type = 0; 327 btrfs_clear_opt(ctx->mount_opt, COMPRESS); 328 btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); 329 } else { 330 ret = -EINVAL; 331 goto error; 332 } 333 return 0; 334 error: 335 btrfs_err(NULL, "failed to parse compression option '%s'", string); 336 return ret; 337 338 } 339 340 static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 341 { 342 struct btrfs_fs_context *ctx = fc->fs_private; 343 struct fs_parse_result result; 344 int opt; 345 346 opt = fs_parse(fc, btrfs_fs_parameters, param, &result); 347 if (opt < 0) 348 return opt; 349 350 switch (opt) { 351 case Opt_degraded: 352 btrfs_set_opt(ctx->mount_opt, DEGRADED); 353 break; 354 case Opt_subvol_empty: 355 /* 356 * This exists because we used to allow it on accident, so we're 357 * keeping it to maintain ABI. See 37becec95ac3 ("Btrfs: allow 358 * empty subvol= again"). 359 */ 360 break; 361 case Opt_subvol: 362 kfree(ctx->subvol_name); 363 ctx->subvol_name = kstrdup(param->string, GFP_KERNEL); 364 if (!ctx->subvol_name) 365 return -ENOMEM; 366 break; 367 case Opt_subvolid: 368 ctx->subvol_objectid = result.uint_64; 369 370 /* subvolid=0 means give me the original fs_tree. */ 371 if (!ctx->subvol_objectid) 372 ctx->subvol_objectid = BTRFS_FS_TREE_OBJECTID; 373 break; 374 case Opt_device: { 375 struct btrfs_device *device; 376 377 mutex_lock(&uuid_mutex); 378 device = btrfs_scan_one_device(param->string, false); 379 mutex_unlock(&uuid_mutex); 380 if (IS_ERR(device)) 381 return PTR_ERR(device); 382 break; 383 } 384 case Opt_datasum: 385 if (result.negated) { 386 btrfs_set_opt(ctx->mount_opt, NODATASUM); 387 } else { 388 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 389 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 390 } 391 break; 392 case Opt_datacow: 393 if (result.negated) { 394 btrfs_clear_opt(ctx->mount_opt, COMPRESS); 395 btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); 396 btrfs_set_opt(ctx->mount_opt, NODATACOW); 397 btrfs_set_opt(ctx->mount_opt, NODATASUM); 398 } else { 399 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 400 } 401 break; 402 case Opt_compress_force: 403 case Opt_compress_force_type: 404 btrfs_set_opt(ctx->mount_opt, FORCE_COMPRESS); 405 fallthrough; 406 case Opt_compress: 407 case Opt_compress_type: 408 if (btrfs_parse_compress(ctx, param, opt)) 409 return -EINVAL; 410 break; 411 case Opt_ssd: 412 if (result.negated) { 413 btrfs_set_opt(ctx->mount_opt, NOSSD); 414 btrfs_clear_opt(ctx->mount_opt, SSD); 415 btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD); 416 } else { 417 btrfs_set_opt(ctx->mount_opt, SSD); 418 btrfs_clear_opt(ctx->mount_opt, NOSSD); 419 } 420 break; 421 case Opt_ssd_spread: 422 if (result.negated) { 423 btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD); 424 } else { 425 btrfs_set_opt(ctx->mount_opt, SSD); 426 btrfs_set_opt(ctx->mount_opt, SSD_SPREAD); 427 btrfs_clear_opt(ctx->mount_opt, NOSSD); 428 } 429 break; 430 case Opt_barrier: 431 if (result.negated) 432 btrfs_set_opt(ctx->mount_opt, NOBARRIER); 433 else 434 btrfs_clear_opt(ctx->mount_opt, NOBARRIER); 435 break; 436 case Opt_thread_pool: 437 if (result.uint_32 == 0) { 438 btrfs_err(NULL, "invalid value 0 for thread_pool"); 439 return -EINVAL; 440 } 441 ctx->thread_pool_size = result.uint_32; 442 break; 443 case Opt_max_inline: 444 ctx->max_inline = memparse(param->string, NULL); 445 break; 446 case Opt_acl: 447 if (result.negated) { 448 fc->sb_flags &= ~SB_POSIXACL; 449 } else { 450 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 451 fc->sb_flags |= SB_POSIXACL; 452 #else 453 btrfs_err(NULL, "support for ACL not compiled in"); 454 return -EINVAL; 455 #endif 456 } 457 /* 458 * VFS limits the ability to toggle ACL on and off via remount, 459 * despite every file system allowing this. This seems to be 460 * an oversight since we all do, but it'll fail if we're 461 * remounting. So don't set the mask here, we'll check it in 462 * btrfs_reconfigure and do the toggling ourselves. 463 */ 464 if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE) 465 fc->sb_flags_mask |= SB_POSIXACL; 466 break; 467 case Opt_treelog: 468 if (result.negated) 469 btrfs_set_opt(ctx->mount_opt, NOTREELOG); 470 else 471 btrfs_clear_opt(ctx->mount_opt, NOTREELOG); 472 break; 473 case Opt_norecovery: 474 btrfs_info(NULL, 475 "'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'"); 476 btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); 477 break; 478 case Opt_flushoncommit: 479 if (result.negated) 480 btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT); 481 else 482 btrfs_set_opt(ctx->mount_opt, FLUSHONCOMMIT); 483 break; 484 case Opt_ratio: 485 ctx->metadata_ratio = result.uint_32; 486 break; 487 case Opt_discard: 488 if (result.negated) { 489 btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC); 490 btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); 491 btrfs_set_opt(ctx->mount_opt, NODISCARD); 492 } else { 493 btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC); 494 btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); 495 } 496 break; 497 case Opt_discard_mode: 498 switch (result.uint_32) { 499 case Opt_discard_sync: 500 btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); 501 btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC); 502 break; 503 case Opt_discard_async: 504 btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC); 505 btrfs_set_opt(ctx->mount_opt, DISCARD_ASYNC); 506 break; 507 default: 508 btrfs_err(NULL, "unrecognized discard mode value %s", 509 param->key); 510 return -EINVAL; 511 } 512 btrfs_clear_opt(ctx->mount_opt, NODISCARD); 513 break; 514 case Opt_space_cache: 515 if (result.negated) { 516 btrfs_set_opt(ctx->mount_opt, NOSPACECACHE); 517 btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE); 518 btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); 519 } else { 520 btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); 521 btrfs_set_opt(ctx->mount_opt, SPACE_CACHE); 522 } 523 break; 524 case Opt_space_cache_version: 525 switch (result.uint_32) { 526 case Opt_space_cache_v1: 527 btrfs_set_opt(ctx->mount_opt, SPACE_CACHE); 528 btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); 529 break; 530 case Opt_space_cache_v2: 531 btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE); 532 btrfs_set_opt(ctx->mount_opt, FREE_SPACE_TREE); 533 break; 534 default: 535 btrfs_err(NULL, "unrecognized space_cache value %s", 536 param->key); 537 return -EINVAL; 538 } 539 break; 540 case Opt_rescan_uuid_tree: 541 btrfs_set_opt(ctx->mount_opt, RESCAN_UUID_TREE); 542 break; 543 case Opt_clear_cache: 544 btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE); 545 break; 546 case Opt_user_subvol_rm_allowed: 547 btrfs_set_opt(ctx->mount_opt, USER_SUBVOL_RM_ALLOWED); 548 break; 549 case Opt_enospc_debug: 550 if (result.negated) 551 btrfs_clear_opt(ctx->mount_opt, ENOSPC_DEBUG); 552 else 553 btrfs_set_opt(ctx->mount_opt, ENOSPC_DEBUG); 554 break; 555 case Opt_defrag: 556 if (result.negated) 557 btrfs_clear_opt(ctx->mount_opt, AUTO_DEFRAG); 558 else 559 btrfs_set_opt(ctx->mount_opt, AUTO_DEFRAG); 560 break; 561 case Opt_usebackuproot: 562 btrfs_warn(NULL, 563 "'usebackuproot' is deprecated, use 'rescue=usebackuproot' instead"); 564 btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT); 565 566 /* If we're loading the backup roots we can't trust the space cache. */ 567 btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE); 568 break; 569 case Opt_skip_balance: 570 btrfs_set_opt(ctx->mount_opt, SKIP_BALANCE); 571 break; 572 case Opt_fatal_errors: 573 switch (result.uint_32) { 574 case Opt_fatal_errors_panic: 575 btrfs_set_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR); 576 break; 577 case Opt_fatal_errors_bug: 578 btrfs_clear_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR); 579 break; 580 default: 581 btrfs_err(NULL, "unrecognized fatal_errors value %s", 582 param->key); 583 return -EINVAL; 584 } 585 break; 586 case Opt_commit_interval: 587 ctx->commit_interval = result.uint_32; 588 if (ctx->commit_interval > BTRFS_WARNING_COMMIT_INTERVAL) { 589 btrfs_warn(NULL, "excessive commit interval %u, use with care", 590 ctx->commit_interval); 591 } 592 if (ctx->commit_interval == 0) 593 ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 594 break; 595 case Opt_rescue: 596 switch (result.uint_32) { 597 case Opt_rescue_usebackuproot: 598 btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT); 599 break; 600 case Opt_rescue_nologreplay: 601 btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); 602 break; 603 case Opt_rescue_ignorebadroots: 604 btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS); 605 break; 606 case Opt_rescue_ignoredatacsums: 607 btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS); 608 break; 609 case Opt_rescue_ignoremetacsums: 610 btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS); 611 break; 612 case Opt_rescue_ignoresuperflags: 613 btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS); 614 break; 615 case Opt_rescue_parameter_all: 616 btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS); 617 btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS); 618 btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS); 619 btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS); 620 btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); 621 break; 622 default: 623 btrfs_info(NULL, "unrecognized rescue option '%s'", 624 param->key); 625 return -EINVAL; 626 } 627 break; 628 #ifdef CONFIG_BTRFS_DEBUG 629 case Opt_fragment: 630 switch (result.uint_32) { 631 case Opt_fragment_parameter_all: 632 btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA); 633 btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA); 634 break; 635 case Opt_fragment_parameter_metadata: 636 btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA); 637 break; 638 case Opt_fragment_parameter_data: 639 btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA); 640 break; 641 default: 642 btrfs_info(NULL, "unrecognized fragment option '%s'", 643 param->key); 644 return -EINVAL; 645 } 646 break; 647 case Opt_ref_verify: 648 btrfs_set_opt(ctx->mount_opt, REF_VERIFY); 649 break; 650 case Opt_ref_tracker: 651 btrfs_set_opt(ctx->mount_opt, REF_TRACKER); 652 break; 653 #endif 654 default: 655 btrfs_err(NULL, "unrecognized mount option '%s'", param->key); 656 return -EINVAL; 657 } 658 659 return 0; 660 } 661 662 /* 663 * Some options only have meaning at mount time and shouldn't persist across 664 * remounts, or be displayed. Clear these at the end of mount and remount code 665 * paths. 666 */ 667 static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info) 668 { 669 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); 670 btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE); 671 btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE); 672 } 673 674 static bool check_ro_option(const struct btrfs_fs_info *fs_info, 675 unsigned long long mount_opt, unsigned long long opt, 676 const char *opt_name) 677 { 678 if (mount_opt & opt) { 679 btrfs_err(fs_info, "%s must be used with ro mount option", 680 opt_name); 681 return true; 682 } 683 return false; 684 } 685 686 bool btrfs_check_options(const struct btrfs_fs_info *info, 687 unsigned long long *mount_opt, 688 unsigned long flags) 689 { 690 bool ret = true; 691 692 if (!(flags & SB_RDONLY) && 693 (check_ro_option(info, *mount_opt, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") || 694 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") || 695 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums") || 696 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREMETACSUMS, "ignoremetacsums") || 697 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNORESUPERFLAGS, "ignoresuperflags"))) 698 ret = false; 699 700 if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) && 701 !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE) && 702 !btrfs_raw_test_opt(*mount_opt, CLEAR_CACHE)) { 703 btrfs_err(info, "cannot disable free-space-tree"); 704 ret = false; 705 } 706 if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) && 707 !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) { 708 btrfs_err(info, "cannot disable free-space-tree with block-group-tree feature"); 709 ret = false; 710 } 711 712 if (btrfs_check_mountopts_zoned(info, mount_opt)) 713 ret = false; 714 715 if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) { 716 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) { 717 btrfs_warn(info, 718 "space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2"); 719 } 720 } 721 722 return ret; 723 } 724 725 /* 726 * This is subtle, we only call this during open_ctree(). We need to pre-load 727 * the mount options with the on-disk settings. Before the new mount API took 728 * effect we would do this on mount and remount. With the new mount API we'll 729 * only do this on the initial mount. 730 * 731 * This isn't a change in behavior, because we're using the current state of the 732 * file system to set the current mount options. If you mounted with special 733 * options to disable these features and then remounted we wouldn't revert the 734 * settings, because mounting without these features cleared the on-disk 735 * settings, so this being called on re-mount is not needed. 736 */ 737 void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info) 738 { 739 if (fs_info->sectorsize < PAGE_SIZE) { 740 btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE); 741 if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) { 742 btrfs_info(fs_info, 743 "forcing free space tree for sector size %u with page size %lu", 744 fs_info->sectorsize, PAGE_SIZE); 745 btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); 746 } 747 } 748 749 /* 750 * At this point our mount options are populated, so we only mess with 751 * these settings if we don't have any settings already. 752 */ 753 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) 754 return; 755 756 if (btrfs_is_zoned(fs_info) && 757 btrfs_free_space_cache_v1_active(fs_info)) { 758 btrfs_info(fs_info, "zoned: clearing existing space cache"); 759 btrfs_set_super_cache_generation(fs_info->super_copy, 0); 760 return; 761 } 762 763 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 764 return; 765 766 if (btrfs_test_opt(fs_info, NOSPACECACHE)) 767 return; 768 769 /* 770 * At this point we don't have explicit options set by the user, set 771 * them ourselves based on the state of the file system. 772 */ 773 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 774 btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); 775 else if (btrfs_free_space_cache_v1_active(fs_info)) 776 btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE); 777 } 778 779 static void set_device_specific_options(struct btrfs_fs_info *fs_info) 780 { 781 if (!btrfs_test_opt(fs_info, NOSSD) && 782 !fs_info->fs_devices->rotating) 783 btrfs_set_opt(fs_info->mount_opt, SSD); 784 785 /* 786 * For devices supporting discard turn on discard=async automatically, 787 * unless it's already set or disabled. This could be turned off by 788 * nodiscard for the same mount. 789 * 790 * The zoned mode piggy backs on the discard functionality for 791 * resetting a zone. There is no reason to delay the zone reset as it is 792 * fast enough. So, do not enable async discard for zoned mode. 793 */ 794 if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) || 795 btrfs_test_opt(fs_info, DISCARD_ASYNC) || 796 btrfs_test_opt(fs_info, NODISCARD)) && 797 fs_info->fs_devices->discardable && 798 !btrfs_is_zoned(fs_info)) 799 btrfs_set_opt(fs_info->mount_opt, DISCARD_ASYNC); 800 } 801 802 char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, 803 u64 subvol_objectid) 804 { 805 struct btrfs_root *root = fs_info->tree_root; 806 struct btrfs_root *fs_root = NULL; 807 struct btrfs_root_ref *root_ref; 808 struct btrfs_inode_ref *inode_ref; 809 struct btrfs_key key; 810 struct btrfs_path *path = NULL; 811 char *name = NULL, *ptr; 812 u64 dirid; 813 int len; 814 int ret; 815 816 path = btrfs_alloc_path(); 817 if (!path) { 818 ret = -ENOMEM; 819 goto err; 820 } 821 822 name = kmalloc(PATH_MAX, GFP_KERNEL); 823 if (!name) { 824 ret = -ENOMEM; 825 goto err; 826 } 827 ptr = name + PATH_MAX - 1; 828 ptr[0] = '\0'; 829 830 /* 831 * Walk up the subvolume trees in the tree of tree roots by root 832 * backrefs until we hit the top-level subvolume. 833 */ 834 while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) { 835 key.objectid = subvol_objectid; 836 key.type = BTRFS_ROOT_BACKREF_KEY; 837 key.offset = (u64)-1; 838 839 ret = btrfs_search_backwards(root, &key, path); 840 if (ret < 0) { 841 goto err; 842 } else if (ret > 0) { 843 ret = -ENOENT; 844 goto err; 845 } 846 847 subvol_objectid = key.offset; 848 849 root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 850 struct btrfs_root_ref); 851 len = btrfs_root_ref_name_len(path->nodes[0], root_ref); 852 ptr -= len + 1; 853 if (ptr < name) { 854 ret = -ENAMETOOLONG; 855 goto err; 856 } 857 read_extent_buffer(path->nodes[0], ptr + 1, 858 (unsigned long)(root_ref + 1), len); 859 ptr[0] = '/'; 860 dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); 861 btrfs_release_path(path); 862 863 fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true); 864 if (IS_ERR(fs_root)) { 865 ret = PTR_ERR(fs_root); 866 fs_root = NULL; 867 goto err; 868 } 869 870 /* 871 * Walk up the filesystem tree by inode refs until we hit the 872 * root directory. 873 */ 874 while (dirid != BTRFS_FIRST_FREE_OBJECTID) { 875 key.objectid = dirid; 876 key.type = BTRFS_INODE_REF_KEY; 877 key.offset = (u64)-1; 878 879 ret = btrfs_search_backwards(fs_root, &key, path); 880 if (ret < 0) { 881 goto err; 882 } else if (ret > 0) { 883 ret = -ENOENT; 884 goto err; 885 } 886 887 dirid = key.offset; 888 889 inode_ref = btrfs_item_ptr(path->nodes[0], 890 path->slots[0], 891 struct btrfs_inode_ref); 892 len = btrfs_inode_ref_name_len(path->nodes[0], 893 inode_ref); 894 ptr -= len + 1; 895 if (ptr < name) { 896 ret = -ENAMETOOLONG; 897 goto err; 898 } 899 read_extent_buffer(path->nodes[0], ptr + 1, 900 (unsigned long)(inode_ref + 1), len); 901 ptr[0] = '/'; 902 btrfs_release_path(path); 903 } 904 btrfs_put_root(fs_root); 905 fs_root = NULL; 906 } 907 908 btrfs_free_path(path); 909 if (ptr == name + PATH_MAX - 1) { 910 name[0] = '/'; 911 name[1] = '\0'; 912 } else { 913 memmove(name, ptr, name + PATH_MAX - ptr); 914 } 915 return name; 916 917 err: 918 btrfs_put_root(fs_root); 919 btrfs_free_path(path); 920 kfree(name); 921 return ERR_PTR(ret); 922 } 923 924 static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid) 925 { 926 struct btrfs_root *root = fs_info->tree_root; 927 struct btrfs_dir_item *di; 928 BTRFS_PATH_AUTO_FREE(path); 929 struct btrfs_key location; 930 struct fscrypt_str name = FSTR_INIT("default", 7); 931 u64 dir_id; 932 933 path = btrfs_alloc_path(); 934 if (!path) 935 return -ENOMEM; 936 937 /* 938 * Find the "default" dir item which points to the root item that we 939 * will mount by default if we haven't been given a specific subvolume 940 * to mount. 941 */ 942 dir_id = btrfs_super_root_dir(fs_info->super_copy); 943 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0); 944 if (IS_ERR(di)) { 945 return PTR_ERR(di); 946 } 947 if (!di) { 948 /* 949 * Ok the default dir item isn't there. This is weird since 950 * it's always been there, but don't freak out, just try and 951 * mount the top-level subvolume. 952 */ 953 *objectid = BTRFS_FS_TREE_OBJECTID; 954 return 0; 955 } 956 957 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 958 *objectid = location.objectid; 959 return 0; 960 } 961 962 static int btrfs_fill_super(struct super_block *sb, 963 struct btrfs_fs_devices *fs_devices) 964 { 965 struct btrfs_inode *inode; 966 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 967 int ret; 968 969 sb->s_maxbytes = MAX_LFS_FILESIZE; 970 sb->s_magic = BTRFS_SUPER_MAGIC; 971 sb->s_op = &btrfs_super_ops; 972 set_default_d_op(sb, &btrfs_dentry_operations); 973 sb->s_export_op = &btrfs_export_ops; 974 #ifdef CONFIG_FS_VERITY 975 sb->s_vop = &btrfs_verityops; 976 #endif 977 sb->s_xattr = btrfs_xattr_handlers; 978 sb->s_time_gran = 1; 979 sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM; 980 981 ret = super_setup_bdi(sb); 982 if (ret) { 983 btrfs_err(fs_info, "super_setup_bdi failed"); 984 return ret; 985 } 986 987 ret = open_ctree(sb, fs_devices); 988 if (ret) { 989 btrfs_err(fs_info, "open_ctree failed: %d", ret); 990 return ret; 991 } 992 993 btrfs_emit_options(fs_info, NULL); 994 995 inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root); 996 if (IS_ERR(inode)) { 997 ret = PTR_ERR(inode); 998 btrfs_handle_fs_error(fs_info, ret, NULL); 999 goto fail_close; 1000 } 1001 1002 sb->s_root = d_make_root(&inode->vfs_inode); 1003 if (!sb->s_root) { 1004 ret = -ENOMEM; 1005 goto fail_close; 1006 } 1007 1008 sb->s_flags |= SB_ACTIVE; 1009 return 0; 1010 1011 fail_close: 1012 close_ctree(fs_info); 1013 return ret; 1014 } 1015 1016 int btrfs_sync_fs(struct super_block *sb, int wait) 1017 { 1018 struct btrfs_trans_handle *trans; 1019 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1020 struct btrfs_root *root = fs_info->tree_root; 1021 1022 trace_btrfs_sync_fs(fs_info, wait); 1023 1024 if (!wait) { 1025 filemap_flush(fs_info->btree_inode->i_mapping); 1026 return 0; 1027 } 1028 1029 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 1030 1031 trans = btrfs_attach_transaction_barrier(root); 1032 if (IS_ERR(trans)) { 1033 /* no transaction, don't bother */ 1034 if (PTR_ERR(trans) == -ENOENT) { 1035 /* 1036 * Exit unless we have some pending changes 1037 * that need to go through commit 1038 */ 1039 if (!test_bit(BTRFS_FS_NEED_TRANS_COMMIT, 1040 &fs_info->flags)) 1041 return 0; 1042 /* 1043 * A non-blocking test if the fs is frozen. We must not 1044 * start a new transaction here otherwise a deadlock 1045 * happens. The pending operations are delayed to the 1046 * next commit after thawing. 1047 */ 1048 if (sb_start_write_trylock(sb)) 1049 sb_end_write(sb); 1050 else 1051 return 0; 1052 trans = btrfs_start_transaction(root, 0); 1053 } 1054 if (IS_ERR(trans)) 1055 return PTR_ERR(trans); 1056 } 1057 return btrfs_commit_transaction(trans); 1058 } 1059 1060 static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed) 1061 { 1062 seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s); 1063 *printed = true; 1064 } 1065 1066 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) 1067 { 1068 struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); 1069 const char *compress_type; 1070 const char *subvol_name; 1071 bool printed = false; 1072 1073 if (btrfs_test_opt(info, DEGRADED)) 1074 seq_puts(seq, ",degraded"); 1075 if (btrfs_test_opt(info, NODATASUM)) 1076 seq_puts(seq, ",nodatasum"); 1077 if (btrfs_test_opt(info, NODATACOW)) 1078 seq_puts(seq, ",nodatacow"); 1079 if (btrfs_test_opt(info, NOBARRIER)) 1080 seq_puts(seq, ",nobarrier"); 1081 if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) 1082 seq_printf(seq, ",max_inline=%llu", info->max_inline); 1083 if (info->thread_pool_size != min_t(unsigned long, 1084 num_online_cpus() + 2, 8)) 1085 seq_printf(seq, ",thread_pool=%u", info->thread_pool_size); 1086 if (btrfs_test_opt(info, COMPRESS)) { 1087 compress_type = btrfs_compress_type2str(info->compress_type); 1088 if (btrfs_test_opt(info, FORCE_COMPRESS)) 1089 seq_printf(seq, ",compress-force=%s", compress_type); 1090 else 1091 seq_printf(seq, ",compress=%s", compress_type); 1092 if (info->compress_level && info->compress_type != BTRFS_COMPRESS_LZO) 1093 seq_printf(seq, ":%d", info->compress_level); 1094 } 1095 if (btrfs_test_opt(info, NOSSD)) 1096 seq_puts(seq, ",nossd"); 1097 if (btrfs_test_opt(info, SSD_SPREAD)) 1098 seq_puts(seq, ",ssd_spread"); 1099 else if (btrfs_test_opt(info, SSD)) 1100 seq_puts(seq, ",ssd"); 1101 if (btrfs_test_opt(info, NOTREELOG)) 1102 seq_puts(seq, ",notreelog"); 1103 if (btrfs_test_opt(info, NOLOGREPLAY)) 1104 print_rescue_option(seq, "nologreplay", &printed); 1105 if (btrfs_test_opt(info, USEBACKUPROOT)) 1106 print_rescue_option(seq, "usebackuproot", &printed); 1107 if (btrfs_test_opt(info, IGNOREBADROOTS)) 1108 print_rescue_option(seq, "ignorebadroots", &printed); 1109 if (btrfs_test_opt(info, IGNOREDATACSUMS)) 1110 print_rescue_option(seq, "ignoredatacsums", &printed); 1111 if (btrfs_test_opt(info, IGNOREMETACSUMS)) 1112 print_rescue_option(seq, "ignoremetacsums", &printed); 1113 if (btrfs_test_opt(info, IGNORESUPERFLAGS)) 1114 print_rescue_option(seq, "ignoresuperflags", &printed); 1115 if (btrfs_test_opt(info, FLUSHONCOMMIT)) 1116 seq_puts(seq, ",flushoncommit"); 1117 if (btrfs_test_opt(info, DISCARD_SYNC)) 1118 seq_puts(seq, ",discard"); 1119 if (btrfs_test_opt(info, DISCARD_ASYNC)) 1120 seq_puts(seq, ",discard=async"); 1121 if (!(info->sb->s_flags & SB_POSIXACL)) 1122 seq_puts(seq, ",noacl"); 1123 if (btrfs_free_space_cache_v1_active(info)) 1124 seq_puts(seq, ",space_cache"); 1125 else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE)) 1126 seq_puts(seq, ",space_cache=v2"); 1127 else 1128 seq_puts(seq, ",nospace_cache"); 1129 if (btrfs_test_opt(info, RESCAN_UUID_TREE)) 1130 seq_puts(seq, ",rescan_uuid_tree"); 1131 if (btrfs_test_opt(info, CLEAR_CACHE)) 1132 seq_puts(seq, ",clear_cache"); 1133 if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED)) 1134 seq_puts(seq, ",user_subvol_rm_allowed"); 1135 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 1136 seq_puts(seq, ",enospc_debug"); 1137 if (btrfs_test_opt(info, AUTO_DEFRAG)) 1138 seq_puts(seq, ",autodefrag"); 1139 if (btrfs_test_opt(info, SKIP_BALANCE)) 1140 seq_puts(seq, ",skip_balance"); 1141 if (info->metadata_ratio) 1142 seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio); 1143 if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR)) 1144 seq_puts(seq, ",fatal_errors=panic"); 1145 if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL) 1146 seq_printf(seq, ",commit=%u", info->commit_interval); 1147 #ifdef CONFIG_BTRFS_DEBUG 1148 if (btrfs_test_opt(info, FRAGMENT_DATA)) 1149 seq_puts(seq, ",fragment=data"); 1150 if (btrfs_test_opt(info, FRAGMENT_METADATA)) 1151 seq_puts(seq, ",fragment=metadata"); 1152 #endif 1153 if (btrfs_test_opt(info, REF_VERIFY)) 1154 seq_puts(seq, ",ref_verify"); 1155 if (btrfs_test_opt(info, REF_TRACKER)) 1156 seq_puts(seq, ",ref_tracker"); 1157 seq_printf(seq, ",subvolid=%llu", btrfs_root_id(BTRFS_I(d_inode(dentry))->root)); 1158 subvol_name = btrfs_get_subvol_name_from_objectid(info, 1159 btrfs_root_id(BTRFS_I(d_inode(dentry))->root)); 1160 if (!IS_ERR(subvol_name)) { 1161 seq_show_option(seq, "subvol", subvol_name); 1162 kfree(subvol_name); 1163 } 1164 return 0; 1165 } 1166 1167 /* 1168 * subvolumes are identified by ino 256 1169 */ 1170 static inline bool is_subvolume_inode(struct inode *inode) 1171 { 1172 if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 1173 return true; 1174 return false; 1175 } 1176 1177 static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, 1178 struct vfsmount *mnt) 1179 { 1180 struct dentry *root; 1181 int ret; 1182 1183 if (!subvol_name) { 1184 if (!subvol_objectid) { 1185 ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb), 1186 &subvol_objectid); 1187 if (ret) { 1188 root = ERR_PTR(ret); 1189 goto out; 1190 } 1191 } 1192 subvol_name = btrfs_get_subvol_name_from_objectid( 1193 btrfs_sb(mnt->mnt_sb), subvol_objectid); 1194 if (IS_ERR(subvol_name)) { 1195 root = ERR_CAST(subvol_name); 1196 subvol_name = NULL; 1197 goto out; 1198 } 1199 1200 } 1201 1202 root = mount_subtree(mnt, subvol_name); 1203 /* mount_subtree() drops our reference on the vfsmount. */ 1204 mnt = NULL; 1205 1206 if (!IS_ERR(root)) { 1207 struct super_block *s = root->d_sb; 1208 struct btrfs_fs_info *fs_info = btrfs_sb(s); 1209 struct inode *root_inode = d_inode(root); 1210 u64 root_objectid = btrfs_root_id(BTRFS_I(root_inode)->root); 1211 1212 ret = 0; 1213 if (!is_subvolume_inode(root_inode)) { 1214 btrfs_err(fs_info, "'%s' is not a valid subvolume", 1215 subvol_name); 1216 ret = -EINVAL; 1217 } 1218 if (subvol_objectid && root_objectid != subvol_objectid) { 1219 /* 1220 * This will also catch a race condition where a 1221 * subvolume which was passed by ID is renamed and 1222 * another subvolume is renamed over the old location. 1223 */ 1224 btrfs_err(fs_info, 1225 "subvol '%s' does not match subvolid %llu", 1226 subvol_name, subvol_objectid); 1227 ret = -EINVAL; 1228 } 1229 if (ret) { 1230 dput(root); 1231 root = ERR_PTR(ret); 1232 deactivate_locked_super(s); 1233 } 1234 } 1235 1236 out: 1237 mntput(mnt); 1238 kfree(subvol_name); 1239 return root; 1240 } 1241 1242 static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, 1243 u32 new_pool_size, u32 old_pool_size) 1244 { 1245 if (new_pool_size == old_pool_size) 1246 return; 1247 1248 fs_info->thread_pool_size = new_pool_size; 1249 1250 btrfs_info(fs_info, "resize thread pool %d -> %d", 1251 old_pool_size, new_pool_size); 1252 1253 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); 1254 btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); 1255 btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); 1256 workqueue_set_max_active(fs_info->endio_workers, new_pool_size); 1257 workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size); 1258 btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); 1259 btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); 1260 btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); 1261 } 1262 1263 static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, 1264 unsigned long long old_opts, int flags) 1265 { 1266 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1267 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || 1268 (flags & SB_RDONLY))) { 1269 /* wait for any defraggers to finish */ 1270 wait_event(fs_info->transaction_wait, 1271 (atomic_read(&fs_info->defrag_running) == 0)); 1272 if (flags & SB_RDONLY) 1273 sync_filesystem(fs_info->sb); 1274 } 1275 } 1276 1277 static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, 1278 unsigned long long old_opts) 1279 { 1280 const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); 1281 1282 /* 1283 * We need to cleanup all defraggable inodes if the autodefragment is 1284 * close or the filesystem is read only. 1285 */ 1286 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1287 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) { 1288 btrfs_cleanup_defrag_inodes(fs_info); 1289 } 1290 1291 /* If we toggled discard async */ 1292 if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && 1293 btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1294 btrfs_discard_resume(fs_info); 1295 else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && 1296 !btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1297 btrfs_discard_cleanup(fs_info); 1298 1299 /* If we toggled space cache */ 1300 if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) 1301 btrfs_set_free_space_cache_v1_active(fs_info, cache_opt); 1302 } 1303 1304 static int btrfs_remount_rw(struct btrfs_fs_info *fs_info) 1305 { 1306 int ret; 1307 1308 if (BTRFS_FS_ERROR(fs_info)) { 1309 btrfs_err(fs_info, 1310 "remounting read-write after error is not allowed"); 1311 return -EINVAL; 1312 } 1313 1314 if (fs_info->fs_devices->rw_devices == 0) 1315 return -EACCES; 1316 1317 if (!btrfs_check_rw_degradable(fs_info, NULL)) { 1318 btrfs_warn(fs_info, 1319 "too many missing devices, writable remount is not allowed"); 1320 return -EACCES; 1321 } 1322 1323 if (btrfs_super_log_root(fs_info->super_copy) != 0) { 1324 btrfs_warn(fs_info, 1325 "mount required to replay tree-log, cannot remount read-write"); 1326 return -EINVAL; 1327 } 1328 1329 /* 1330 * NOTE: when remounting with a change that does writes, don't put it 1331 * anywhere above this point, as we are not sure to be safe to write 1332 * until we pass the above checks. 1333 */ 1334 ret = btrfs_start_pre_rw_mount(fs_info); 1335 if (ret) 1336 return ret; 1337 1338 btrfs_clear_sb_rdonly(fs_info->sb); 1339 1340 set_bit(BTRFS_FS_OPEN, &fs_info->flags); 1341 1342 /* 1343 * If we've gone from readonly -> read-write, we need to get our 1344 * sync/async discard lists in the right state. 1345 */ 1346 btrfs_discard_resume(fs_info); 1347 1348 return 0; 1349 } 1350 1351 static int btrfs_remount_ro(struct btrfs_fs_info *fs_info) 1352 { 1353 /* 1354 * This also happens on 'umount -rf' or on shutdown, when the 1355 * filesystem is busy. 1356 */ 1357 cancel_work_sync(&fs_info->async_reclaim_work); 1358 cancel_work_sync(&fs_info->async_data_reclaim_work); 1359 1360 btrfs_discard_cleanup(fs_info); 1361 1362 /* Wait for the uuid_scan task to finish */ 1363 down(&fs_info->uuid_tree_rescan_sem); 1364 /* Avoid complains from lockdep et al. */ 1365 up(&fs_info->uuid_tree_rescan_sem); 1366 1367 btrfs_set_sb_rdonly(fs_info->sb); 1368 1369 /* 1370 * Setting SB_RDONLY will put the cleaner thread to sleep at the next 1371 * loop if it's already active. If it's already asleep, we'll leave 1372 * unused block groups on disk until we're mounted read-write again 1373 * unless we clean them up here. 1374 */ 1375 btrfs_delete_unused_bgs(fs_info); 1376 1377 /* 1378 * The cleaner task could be already running before we set the flag 1379 * BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock). We must make 1380 * sure that after we finish the remount, i.e. after we call 1381 * btrfs_commit_super(), the cleaner can no longer start a transaction 1382 * - either because it was dropping a dead root, running delayed iputs 1383 * or deleting an unused block group (the cleaner picked a block 1384 * group from the list of unused block groups before we were able to 1385 * in the previous call to btrfs_delete_unused_bgs()). 1386 */ 1387 wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING, TASK_UNINTERRUPTIBLE); 1388 1389 /* 1390 * We've set the superblock to RO mode, so we might have made the 1391 * cleaner task sleep without running all pending delayed iputs. Go 1392 * through all the delayed iputs here, so that if an unmount happens 1393 * without remounting RW we don't end up at finishing close_ctree() 1394 * with a non-empty list of delayed iputs. 1395 */ 1396 btrfs_run_delayed_iputs(fs_info); 1397 1398 btrfs_dev_replace_suspend_for_unmount(fs_info); 1399 btrfs_scrub_cancel(fs_info); 1400 btrfs_pause_balance(fs_info); 1401 1402 /* 1403 * Pause the qgroup rescan worker if it is running. We don't want it to 1404 * be still running after we are in RO mode, as after that, by the time 1405 * we unmount, it might have left a transaction open, so we would leak 1406 * the transaction and/or crash. 1407 */ 1408 btrfs_qgroup_wait_for_completion(fs_info, false); 1409 1410 return btrfs_commit_super(fs_info); 1411 } 1412 1413 static void btrfs_ctx_to_info(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx) 1414 { 1415 fs_info->max_inline = ctx->max_inline; 1416 fs_info->commit_interval = ctx->commit_interval; 1417 fs_info->metadata_ratio = ctx->metadata_ratio; 1418 fs_info->thread_pool_size = ctx->thread_pool_size; 1419 fs_info->mount_opt = ctx->mount_opt; 1420 fs_info->compress_type = ctx->compress_type; 1421 fs_info->compress_level = ctx->compress_level; 1422 } 1423 1424 static void btrfs_info_to_ctx(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx) 1425 { 1426 ctx->max_inline = fs_info->max_inline; 1427 ctx->commit_interval = fs_info->commit_interval; 1428 ctx->metadata_ratio = fs_info->metadata_ratio; 1429 ctx->thread_pool_size = fs_info->thread_pool_size; 1430 ctx->mount_opt = fs_info->mount_opt; 1431 ctx->compress_type = fs_info->compress_type; 1432 ctx->compress_level = fs_info->compress_level; 1433 } 1434 1435 #define btrfs_info_if_set(fs_info, old_ctx, opt, fmt, args...) \ 1436 do { \ 1437 if ((!old_ctx || !btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \ 1438 btrfs_raw_test_opt(fs_info->mount_opt, opt)) \ 1439 btrfs_info(fs_info, fmt, ##args); \ 1440 } while (0) 1441 1442 #define btrfs_info_if_unset(fs_info, old_ctx, opt, fmt, args...) \ 1443 do { \ 1444 if ((old_ctx && btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \ 1445 !btrfs_raw_test_opt(fs_info->mount_opt, opt)) \ 1446 btrfs_info(fs_info, fmt, ##args); \ 1447 } while (0) 1448 1449 static void btrfs_emit_options(struct btrfs_fs_info *info, 1450 struct btrfs_fs_context *old) 1451 { 1452 btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum"); 1453 btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts"); 1454 btrfs_info_if_set(info, old, NODATACOW, "setting nodatacow"); 1455 btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations"); 1456 btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme"); 1457 btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers"); 1458 btrfs_info_if_set(info, old, NOTREELOG, "disabling tree log"); 1459 btrfs_info_if_set(info, old, NOLOGREPLAY, "disabling log replay at mount time"); 1460 btrfs_info_if_set(info, old, FLUSHONCOMMIT, "turning on flush-on-commit"); 1461 btrfs_info_if_set(info, old, DISCARD_SYNC, "turning on sync discard"); 1462 btrfs_info_if_set(info, old, DISCARD_ASYNC, "turning on async discard"); 1463 btrfs_info_if_set(info, old, FREE_SPACE_TREE, "enabling free space tree"); 1464 btrfs_info_if_set(info, old, SPACE_CACHE, "enabling disk space caching"); 1465 btrfs_info_if_set(info, old, CLEAR_CACHE, "force clearing of disk cache"); 1466 btrfs_info_if_set(info, old, AUTO_DEFRAG, "enabling auto defrag"); 1467 btrfs_info_if_set(info, old, FRAGMENT_DATA, "fragmenting data"); 1468 btrfs_info_if_set(info, old, FRAGMENT_METADATA, "fragmenting metadata"); 1469 btrfs_info_if_set(info, old, REF_VERIFY, "doing ref verification"); 1470 btrfs_info_if_set(info, old, USEBACKUPROOT, "trying to use backup root at mount time"); 1471 btrfs_info_if_set(info, old, IGNOREBADROOTS, "ignoring bad roots"); 1472 btrfs_info_if_set(info, old, IGNOREDATACSUMS, "ignoring data csums"); 1473 btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums"); 1474 btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags"); 1475 1476 btrfs_info_if_unset(info, old, NODATASUM, "setting datasum"); 1477 btrfs_info_if_unset(info, old, NODATACOW, "setting datacow"); 1478 btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations"); 1479 btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme"); 1480 btrfs_info_if_unset(info, old, NOBARRIER, "turning on barriers"); 1481 btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log"); 1482 btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching"); 1483 btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree"); 1484 btrfs_info_if_unset(info, old, AUTO_DEFRAG, "disabling auto defrag"); 1485 btrfs_info_if_unset(info, old, COMPRESS, "use no compression"); 1486 1487 /* Did the compression settings change? */ 1488 if (btrfs_test_opt(info, COMPRESS) && 1489 (!old || 1490 old->compress_type != info->compress_type || 1491 old->compress_level != info->compress_level || 1492 (!btrfs_raw_test_opt(old->mount_opt, FORCE_COMPRESS) && 1493 btrfs_raw_test_opt(info->mount_opt, FORCE_COMPRESS)))) { 1494 const char *compress_type = btrfs_compress_type2str(info->compress_type); 1495 1496 btrfs_info(info, "%s %s compression, level %d", 1497 btrfs_test_opt(info, FORCE_COMPRESS) ? "force" : "use", 1498 compress_type, info->compress_level); 1499 } 1500 1501 if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) 1502 btrfs_info(info, "max_inline set to %llu", info->max_inline); 1503 } 1504 1505 static int btrfs_reconfigure(struct fs_context *fc) 1506 { 1507 struct super_block *sb = fc->root->d_sb; 1508 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1509 struct btrfs_fs_context *ctx = fc->fs_private; 1510 struct btrfs_fs_context old_ctx; 1511 int ret = 0; 1512 bool mount_reconfigure = (fc->s_fs_info != NULL); 1513 1514 btrfs_info_to_ctx(fs_info, &old_ctx); 1515 1516 /* 1517 * This is our "bind mount" trick, we don't want to allow the user to do 1518 * anything other than mount a different ro/rw and a different subvol, 1519 * all of the mount options should be maintained. 1520 */ 1521 if (mount_reconfigure) 1522 ctx->mount_opt = old_ctx.mount_opt; 1523 1524 sync_filesystem(sb); 1525 set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1526 1527 if (!btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags)) 1528 return -EINVAL; 1529 1530 ret = btrfs_check_features(fs_info, !(fc->sb_flags & SB_RDONLY)); 1531 if (ret < 0) 1532 return ret; 1533 1534 btrfs_ctx_to_info(fs_info, ctx); 1535 btrfs_remount_begin(fs_info, old_ctx.mount_opt, fc->sb_flags); 1536 btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size, 1537 old_ctx.thread_pool_size); 1538 1539 if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) != 1540 (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 1541 (!sb_rdonly(sb) || (fc->sb_flags & SB_RDONLY))) { 1542 btrfs_warn(fs_info, 1543 "remount supports changing free space tree only from RO to RW"); 1544 /* Make sure free space cache options match the state on disk. */ 1545 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 1546 btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); 1547 btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE); 1548 } 1549 if (btrfs_free_space_cache_v1_active(fs_info)) { 1550 btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE); 1551 btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE); 1552 } 1553 } 1554 1555 ret = 0; 1556 if (!sb_rdonly(sb) && (fc->sb_flags & SB_RDONLY)) 1557 ret = btrfs_remount_ro(fs_info); 1558 else if (sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY)) 1559 ret = btrfs_remount_rw(fs_info); 1560 if (ret) 1561 goto restore; 1562 1563 /* 1564 * If we set the mask during the parameter parsing VFS would reject the 1565 * remount. Here we can set the mask and the value will be updated 1566 * appropriately. 1567 */ 1568 if ((fc->sb_flags & SB_POSIXACL) != (sb->s_flags & SB_POSIXACL)) 1569 fc->sb_flags_mask |= SB_POSIXACL; 1570 1571 btrfs_emit_options(fs_info, &old_ctx); 1572 wake_up_process(fs_info->transaction_kthread); 1573 btrfs_remount_cleanup(fs_info, old_ctx.mount_opt); 1574 btrfs_clear_oneshot_options(fs_info); 1575 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1576 1577 return 0; 1578 restore: 1579 btrfs_ctx_to_info(fs_info, &old_ctx); 1580 btrfs_remount_cleanup(fs_info, old_ctx.mount_opt); 1581 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1582 return ret; 1583 } 1584 1585 /* Used to sort the devices by max_avail(descending sort) */ 1586 static int btrfs_cmp_device_free_bytes(const void *a, const void *b) 1587 { 1588 const struct btrfs_device_info *dev_info1 = a; 1589 const struct btrfs_device_info *dev_info2 = b; 1590 1591 if (dev_info1->max_avail > dev_info2->max_avail) 1592 return -1; 1593 else if (dev_info1->max_avail < dev_info2->max_avail) 1594 return 1; 1595 return 0; 1596 } 1597 1598 /* 1599 * sort the devices by max_avail, in which max free extent size of each device 1600 * is stored.(Descending Sort) 1601 */ 1602 static inline void btrfs_descending_sort_devices( 1603 struct btrfs_device_info *devices, 1604 size_t nr_devices) 1605 { 1606 sort(devices, nr_devices, sizeof(struct btrfs_device_info), 1607 btrfs_cmp_device_free_bytes, NULL); 1608 } 1609 1610 /* 1611 * The helper to calc the free space on the devices that can be used to store 1612 * file data. 1613 */ 1614 static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, 1615 u64 *free_bytes) 1616 { 1617 struct btrfs_device_info *devices_info; 1618 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 1619 struct btrfs_device *device; 1620 u64 type; 1621 u64 avail_space; 1622 u64 min_stripe_size; 1623 int num_stripes = 1; 1624 int i = 0, nr_devices; 1625 const struct btrfs_raid_attr *rattr; 1626 1627 /* 1628 * We aren't under the device list lock, so this is racy-ish, but good 1629 * enough for our purposes. 1630 */ 1631 nr_devices = fs_info->fs_devices->open_devices; 1632 if (!nr_devices) { 1633 smp_mb(); 1634 nr_devices = fs_info->fs_devices->open_devices; 1635 ASSERT(nr_devices); 1636 if (!nr_devices) { 1637 *free_bytes = 0; 1638 return 0; 1639 } 1640 } 1641 1642 devices_info = kmalloc_array(nr_devices, sizeof(*devices_info), 1643 GFP_KERNEL); 1644 if (!devices_info) 1645 return -ENOMEM; 1646 1647 /* calc min stripe number for data space allocation */ 1648 type = btrfs_data_alloc_profile(fs_info); 1649 rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)]; 1650 1651 if (type & BTRFS_BLOCK_GROUP_RAID0) 1652 num_stripes = nr_devices; 1653 else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK) 1654 num_stripes = rattr->ncopies; 1655 else if (type & BTRFS_BLOCK_GROUP_RAID10) 1656 num_stripes = 4; 1657 1658 /* Adjust for more than 1 stripe per device */ 1659 min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN; 1660 1661 rcu_read_lock(); 1662 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 1663 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 1664 &device->dev_state) || 1665 !device->bdev || 1666 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 1667 continue; 1668 1669 if (i >= nr_devices) 1670 break; 1671 1672 avail_space = device->total_bytes - device->bytes_used; 1673 1674 /* align with stripe_len */ 1675 avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN); 1676 1677 /* 1678 * Ensure we have at least min_stripe_size on top of the 1679 * reserved space on the device. 1680 */ 1681 if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size) 1682 continue; 1683 1684 avail_space -= BTRFS_DEVICE_RANGE_RESERVED; 1685 1686 devices_info[i].dev = device; 1687 devices_info[i].max_avail = avail_space; 1688 1689 i++; 1690 } 1691 rcu_read_unlock(); 1692 1693 nr_devices = i; 1694 1695 btrfs_descending_sort_devices(devices_info, nr_devices); 1696 1697 i = nr_devices - 1; 1698 avail_space = 0; 1699 while (nr_devices >= rattr->devs_min) { 1700 num_stripes = min(num_stripes, nr_devices); 1701 1702 if (devices_info[i].max_avail >= min_stripe_size) { 1703 int j; 1704 u64 alloc_size; 1705 1706 avail_space += devices_info[i].max_avail * num_stripes; 1707 alloc_size = devices_info[i].max_avail; 1708 for (j = i + 1 - num_stripes; j <= i; j++) 1709 devices_info[j].max_avail -= alloc_size; 1710 } 1711 i--; 1712 nr_devices--; 1713 } 1714 1715 kfree(devices_info); 1716 *free_bytes = avail_space; 1717 return 0; 1718 } 1719 1720 /* 1721 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles. 1722 * 1723 * If there's a redundant raid level at DATA block groups, use the respective 1724 * multiplier to scale the sizes. 1725 * 1726 * Unused device space usage is based on simulating the chunk allocator 1727 * algorithm that respects the device sizes and order of allocations. This is 1728 * a close approximation of the actual use but there are other factors that may 1729 * change the result (like a new metadata chunk). 1730 * 1731 * If metadata is exhausted, f_bavail will be 0. 1732 */ 1733 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) 1734 { 1735 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 1736 struct btrfs_super_block *disk_super = fs_info->super_copy; 1737 struct btrfs_space_info *found; 1738 u64 total_used = 0; 1739 u64 total_free_data = 0; 1740 u64 total_free_meta = 0; 1741 u32 bits = fs_info->sectorsize_bits; 1742 __be32 *fsid = (__be32 *)fs_info->fs_devices->fsid; 1743 unsigned factor = 1; 1744 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; 1745 int ret; 1746 u64 thresh = 0; 1747 int mixed = 0; 1748 1749 list_for_each_entry(found, &fs_info->space_info, list) { 1750 if (found->flags & BTRFS_BLOCK_GROUP_DATA) { 1751 int i; 1752 1753 total_free_data += found->disk_total - found->disk_used; 1754 total_free_data -= 1755 btrfs_account_ro_block_groups_free_space(found); 1756 1757 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1758 if (!list_empty(&found->block_groups[i])) 1759 factor = btrfs_bg_type_to_factor( 1760 btrfs_raid_array[i].bg_flag); 1761 } 1762 } 1763 1764 /* 1765 * Metadata in mixed block group profiles are accounted in data 1766 */ 1767 if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) { 1768 if (found->flags & BTRFS_BLOCK_GROUP_DATA) 1769 mixed = 1; 1770 else 1771 total_free_meta += found->disk_total - 1772 found->disk_used; 1773 } 1774 1775 total_used += found->disk_used; 1776 } 1777 1778 buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor); 1779 buf->f_blocks >>= bits; 1780 buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits); 1781 1782 /* Account global block reserve as used, it's in logical size already */ 1783 spin_lock(&block_rsv->lock); 1784 /* Mixed block groups accounting is not byte-accurate, avoid overflow */ 1785 if (buf->f_bfree >= block_rsv->size >> bits) 1786 buf->f_bfree -= block_rsv->size >> bits; 1787 else 1788 buf->f_bfree = 0; 1789 spin_unlock(&block_rsv->lock); 1790 1791 buf->f_bavail = div_u64(total_free_data, factor); 1792 ret = btrfs_calc_avail_data_space(fs_info, &total_free_data); 1793 if (ret) 1794 return ret; 1795 buf->f_bavail += div_u64(total_free_data, factor); 1796 buf->f_bavail = buf->f_bavail >> bits; 1797 1798 /* 1799 * We calculate the remaining metadata space minus global reserve. If 1800 * this is (supposedly) smaller than zero, there's no space. But this 1801 * does not hold in practice, the exhausted state happens where's still 1802 * some positive delta. So we apply some guesswork and compare the 1803 * delta to a 4M threshold. (Practically observed delta was ~2M.) 1804 * 1805 * We probably cannot calculate the exact threshold value because this 1806 * depends on the internal reservations requested by various 1807 * operations, so some operations that consume a few metadata will 1808 * succeed even if the Avail is zero. But this is better than the other 1809 * way around. 1810 */ 1811 thresh = SZ_4M; 1812 1813 /* 1814 * We only want to claim there's no available space if we can no longer 1815 * allocate chunks for our metadata profile and our global reserve will 1816 * not fit in the free metadata space. If we aren't ->full then we 1817 * still can allocate chunks and thus are fine using the currently 1818 * calculated f_bavail. 1819 */ 1820 if (!mixed && block_rsv->space_info->full && 1821 (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size)) 1822 buf->f_bavail = 0; 1823 1824 buf->f_type = BTRFS_SUPER_MAGIC; 1825 buf->f_bsize = fs_info->sectorsize; 1826 buf->f_namelen = BTRFS_NAME_LEN; 1827 1828 /* We treat it as constant endianness (it doesn't matter _which_) 1829 because we want the fsid to come out the same whether mounted 1830 on a big-endian or little-endian host */ 1831 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); 1832 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); 1833 /* Mask in the root object ID too, to disambiguate subvols */ 1834 buf->f_fsid.val[0] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root) >> 32; 1835 buf->f_fsid.val[1] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root); 1836 1837 return 0; 1838 } 1839 1840 static int btrfs_fc_test_super(struct super_block *sb, struct fs_context *fc) 1841 { 1842 struct btrfs_fs_info *p = fc->s_fs_info; 1843 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1844 1845 return fs_info->fs_devices == p->fs_devices; 1846 } 1847 1848 static int btrfs_get_tree_super(struct fs_context *fc) 1849 { 1850 struct btrfs_fs_info *fs_info = fc->s_fs_info; 1851 struct btrfs_fs_context *ctx = fc->fs_private; 1852 struct btrfs_fs_devices *fs_devices = NULL; 1853 struct btrfs_device *device; 1854 struct super_block *sb; 1855 blk_mode_t mode = sb_open_mode(fc->sb_flags); 1856 int ret; 1857 1858 btrfs_ctx_to_info(fs_info, ctx); 1859 mutex_lock(&uuid_mutex); 1860 1861 /* 1862 * With 'true' passed to btrfs_scan_one_device() (mount time) we expect 1863 * either a valid device or an error. 1864 */ 1865 device = btrfs_scan_one_device(fc->source, true); 1866 ASSERT(device != NULL); 1867 if (IS_ERR(device)) { 1868 mutex_unlock(&uuid_mutex); 1869 return PTR_ERR(device); 1870 } 1871 fs_devices = device->fs_devices; 1872 /* 1873 * We cannot hold uuid_mutex calling sget_fc(), it will lead to a 1874 * locking order reversal with s_umount. 1875 * 1876 * So here we increase the holding number of fs_devices, this will ensure 1877 * the fs_devices itself won't be freed. 1878 */ 1879 btrfs_fs_devices_inc_holding(fs_devices); 1880 fs_info->fs_devices = fs_devices; 1881 mutex_unlock(&uuid_mutex); 1882 1883 1884 sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc); 1885 if (IS_ERR(sb)) { 1886 mutex_lock(&uuid_mutex); 1887 btrfs_fs_devices_dec_holding(fs_devices); 1888 /* 1889 * Since the fs_devices is not opened, it can be freed at any 1890 * time after unlocking uuid_mutex. We need to avoid double 1891 * free through put_fs_context()->btrfs_free_fs_info(). 1892 * So here we reset fs_info->fs_devices to NULL, and let the 1893 * regular fs_devices reclaim path to handle it. 1894 * 1895 * This applies to all later branches where no fs_devices is 1896 * opened. 1897 */ 1898 fs_info->fs_devices = NULL; 1899 mutex_unlock(&uuid_mutex); 1900 return PTR_ERR(sb); 1901 } 1902 1903 set_device_specific_options(fs_info); 1904 1905 if (sb->s_root) { 1906 /* 1907 * Not the first mount of the fs thus got an existing super block. 1908 * Will reuse the returned super block, fs_info and fs_devices. 1909 * 1910 * fc->s_fs_info is not touched and will be later freed by 1911 * put_fs_context() through btrfs_free_fs_context(). 1912 */ 1913 ASSERT(fc->s_fs_info == fs_info); 1914 1915 mutex_lock(&uuid_mutex); 1916 btrfs_fs_devices_dec_holding(fs_devices); 1917 fs_info->fs_devices = NULL; 1918 mutex_unlock(&uuid_mutex); 1919 /* 1920 * At this stage we may have RO flag mismatch between 1921 * fc->sb_flags and sb->s_flags. Caller should detect such 1922 * mismatch and reconfigure with sb->s_umount rwsem held if 1923 * needed. 1924 */ 1925 } else { 1926 struct block_device *bdev; 1927 1928 /* 1929 * The first mount of the fs thus a new superblock, fc->s_fs_info 1930 * must be NULL, and the ownership of our fs_info and fs_devices is 1931 * transferred to the super block. 1932 */ 1933 ASSERT(fc->s_fs_info == NULL); 1934 1935 mutex_lock(&uuid_mutex); 1936 btrfs_fs_devices_dec_holding(fs_devices); 1937 ret = btrfs_open_devices(fs_devices, mode, sb); 1938 if (ret < 0) 1939 fs_info->fs_devices = NULL; 1940 mutex_unlock(&uuid_mutex); 1941 if (ret < 0) { 1942 deactivate_locked_super(sb); 1943 return ret; 1944 } 1945 if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) { 1946 deactivate_locked_super(sb); 1947 return -EACCES; 1948 } 1949 bdev = fs_devices->latest_dev->bdev; 1950 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); 1951 shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id); 1952 ret = btrfs_fill_super(sb, fs_devices); 1953 if (ret) { 1954 deactivate_locked_super(sb); 1955 return ret; 1956 } 1957 } 1958 1959 btrfs_clear_oneshot_options(fs_info); 1960 1961 fc->root = dget(sb->s_root); 1962 return 0; 1963 } 1964 1965 /* 1966 * Ever since commit 0723a0473fb4 ("btrfs: allow mounting btrfs subvolumes 1967 * with different ro/rw options") the following works: 1968 * 1969 * (i) mount /dev/sda3 -o subvol=foo,ro /mnt/foo 1970 * (ii) mount /dev/sda3 -o subvol=bar,rw /mnt/bar 1971 * 1972 * which looks nice and innocent but is actually pretty intricate and deserves 1973 * a long comment. 1974 * 1975 * On another filesystem a subvolume mount is close to something like: 1976 * 1977 * (iii) # create rw superblock + initial mount 1978 * mount -t xfs /dev/sdb /opt/ 1979 * 1980 * # create ro bind mount 1981 * mount --bind -o ro /opt/foo /mnt/foo 1982 * 1983 * # unmount initial mount 1984 * umount /opt 1985 * 1986 * Of course, there's some special subvolume sauce and there's the fact that the 1987 * sb->s_root dentry is really swapped after mount_subtree(). But conceptually 1988 * it's very close and will help us understand the issue. 1989 * 1990 * The old mount API didn't cleanly distinguish between a mount being made ro 1991 * and a superblock being made ro. The only way to change the ro state of 1992 * either object was by passing ms_rdonly. If a new mount was created via 1993 * mount(2) such as: 1994 * 1995 * mount("/dev/sdb", "/mnt", "xfs", ms_rdonly, null); 1996 * 1997 * the MS_RDONLY flag being specified had two effects: 1998 * 1999 * (1) MNT_READONLY was raised -> the resulting mount got 2000 * @mnt->mnt_flags |= MNT_READONLY raised. 2001 * 2002 * (2) MS_RDONLY was passed to the filesystem's mount method and the filesystems 2003 * made the superblock ro. Note, how SB_RDONLY has the same value as 2004 * ms_rdonly and is raised whenever MS_RDONLY is passed through mount(2). 2005 * 2006 * Creating a subtree mount via (iii) ends up leaving a rw superblock with a 2007 * subtree mounted ro. 2008 * 2009 * But consider the effect on the old mount API on btrfs subvolume mounting 2010 * which combines the distinct step in (iii) into a single step. 2011 * 2012 * By issuing (i) both the mount and the superblock are turned ro. Now when (ii) 2013 * is issued the superblock is ro and thus even if the mount created for (ii) is 2014 * rw it wouldn't help. Hence, btrfs needed to transition the superblock from ro 2015 * to rw for (ii) which it did using an internal remount call. 2016 * 2017 * IOW, subvolume mounting was inherently complicated due to the ambiguity of 2018 * MS_RDONLY in mount(2). Note, this ambiguity has mount(8) always translate 2019 * "ro" to MS_RDONLY. IOW, in both (i) and (ii) "ro" becomes MS_RDONLY when 2020 * passed by mount(8) to mount(2). 2021 * 2022 * Enter the new mount API. The new mount API disambiguates making a mount ro 2023 * and making a superblock ro. 2024 * 2025 * (3) To turn a mount ro the MOUNT_ATTR_ONLY flag can be used with either 2026 * fsmount() or mount_setattr() this is a pure VFS level change for a 2027 * specific mount or mount tree that is never seen by the filesystem itself. 2028 * 2029 * (4) To turn a superblock ro the "ro" flag must be used with 2030 * fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem 2031 * in fc->sb_flags. 2032 * 2033 * But, currently the util-linux mount command already utilizes the new mount 2034 * API and is still setting fsconfig(FSCONFIG_SET_FLAG, "ro") no matter if it's 2035 * btrfs or not, setting the whole super block RO. To make per-subvolume mounting 2036 * work with different options work we need to keep backward compatibility. 2037 */ 2038 static int btrfs_reconfigure_for_mount(struct fs_context *fc) 2039 { 2040 int ret = 0; 2041 2042 if (!(fc->sb_flags & SB_RDONLY) && (fc->root->d_sb->s_flags & SB_RDONLY)) 2043 ret = btrfs_reconfigure(fc); 2044 2045 return ret; 2046 } 2047 2048 static int btrfs_get_tree_subvol(struct fs_context *fc) 2049 { 2050 struct btrfs_fs_info *fs_info = NULL; 2051 struct btrfs_fs_context *ctx = fc->fs_private; 2052 struct fs_context *dup_fc; 2053 struct dentry *dentry; 2054 struct vfsmount *mnt; 2055 int ret = 0; 2056 2057 /* 2058 * Setup a dummy root and fs_info for test/set super. This is because 2059 * we don't actually fill this stuff out until open_ctree, but we need 2060 * then open_ctree will properly initialize the file system specific 2061 * settings later. btrfs_init_fs_info initializes the static elements 2062 * of the fs_info (locks and such) to make cleanup easier if we find a 2063 * superblock with our given fs_devices later on at sget() time. 2064 */ 2065 fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); 2066 if (!fs_info) 2067 return -ENOMEM; 2068 2069 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); 2070 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); 2071 if (!fs_info->super_copy || !fs_info->super_for_commit) { 2072 btrfs_free_fs_info(fs_info); 2073 return -ENOMEM; 2074 } 2075 btrfs_init_fs_info(fs_info); 2076 2077 dup_fc = vfs_dup_fs_context(fc); 2078 if (IS_ERR(dup_fc)) { 2079 btrfs_free_fs_info(fs_info); 2080 return PTR_ERR(dup_fc); 2081 } 2082 2083 /* 2084 * When we do the sget_fc this gets transferred to the sb, so we only 2085 * need to set it on the dup_fc as this is what creates the super block. 2086 */ 2087 dup_fc->s_fs_info = fs_info; 2088 2089 ret = btrfs_get_tree_super(dup_fc); 2090 if (ret) 2091 goto error; 2092 2093 ret = btrfs_reconfigure_for_mount(dup_fc); 2094 up_write(&dup_fc->root->d_sb->s_umount); 2095 if (ret) 2096 goto error; 2097 mnt = vfs_create_mount(dup_fc); 2098 put_fs_context(dup_fc); 2099 if (IS_ERR(mnt)) 2100 return PTR_ERR(mnt); 2101 2102 /* 2103 * This free's ->subvol_name, because if it isn't set we have to 2104 * allocate a buffer to hold the subvol_name, so we just drop our 2105 * reference to it here. 2106 */ 2107 dentry = mount_subvol(ctx->subvol_name, ctx->subvol_objectid, mnt); 2108 ctx->subvol_name = NULL; 2109 if (IS_ERR(dentry)) 2110 return PTR_ERR(dentry); 2111 2112 fc->root = dentry; 2113 return 0; 2114 error: 2115 put_fs_context(dup_fc); 2116 return ret; 2117 } 2118 2119 static int btrfs_get_tree(struct fs_context *fc) 2120 { 2121 ASSERT(fc->s_fs_info == NULL); 2122 2123 return btrfs_get_tree_subvol(fc); 2124 } 2125 2126 static void btrfs_kill_super(struct super_block *sb) 2127 { 2128 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2129 kill_anon_super(sb); 2130 btrfs_free_fs_info(fs_info); 2131 } 2132 2133 static void btrfs_free_fs_context(struct fs_context *fc) 2134 { 2135 struct btrfs_fs_context *ctx = fc->fs_private; 2136 struct btrfs_fs_info *fs_info = fc->s_fs_info; 2137 2138 if (fs_info) 2139 btrfs_free_fs_info(fs_info); 2140 2141 if (ctx && refcount_dec_and_test(&ctx->refs)) { 2142 kfree(ctx->subvol_name); 2143 kfree(ctx); 2144 } 2145 } 2146 2147 static int btrfs_dup_fs_context(struct fs_context *fc, struct fs_context *src_fc) 2148 { 2149 struct btrfs_fs_context *ctx = src_fc->fs_private; 2150 2151 /* 2152 * Give a ref to our ctx to this dup, as we want to keep it around for 2153 * our original fc so we can have the subvolume name or objectid. 2154 * 2155 * We unset ->source in the original fc because the dup needs it for 2156 * mounting, and then once we free the dup it'll free ->source, so we 2157 * need to make sure we're only pointing to it in one fc. 2158 */ 2159 refcount_inc(&ctx->refs); 2160 fc->fs_private = ctx; 2161 fc->source = src_fc->source; 2162 src_fc->source = NULL; 2163 return 0; 2164 } 2165 2166 static const struct fs_context_operations btrfs_fs_context_ops = { 2167 .parse_param = btrfs_parse_param, 2168 .reconfigure = btrfs_reconfigure, 2169 .get_tree = btrfs_get_tree, 2170 .dup = btrfs_dup_fs_context, 2171 .free = btrfs_free_fs_context, 2172 }; 2173 2174 static int btrfs_init_fs_context(struct fs_context *fc) 2175 { 2176 struct btrfs_fs_context *ctx; 2177 2178 ctx = kzalloc(sizeof(struct btrfs_fs_context), GFP_KERNEL); 2179 if (!ctx) 2180 return -ENOMEM; 2181 2182 refcount_set(&ctx->refs, 1); 2183 fc->fs_private = ctx; 2184 fc->ops = &btrfs_fs_context_ops; 2185 2186 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 2187 btrfs_info_to_ctx(btrfs_sb(fc->root->d_sb), ctx); 2188 } else { 2189 ctx->thread_pool_size = 2190 min_t(unsigned long, num_online_cpus() + 2, 8); 2191 ctx->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2192 ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 2193 } 2194 2195 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 2196 fc->sb_flags |= SB_POSIXACL; 2197 #endif 2198 fc->sb_flags |= SB_I_VERSION; 2199 2200 return 0; 2201 } 2202 2203 static struct file_system_type btrfs_fs_type = { 2204 .owner = THIS_MODULE, 2205 .name = "btrfs", 2206 .init_fs_context = btrfs_init_fs_context, 2207 .parameters = btrfs_fs_parameters, 2208 .kill_sb = btrfs_kill_super, 2209 .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | 2210 FS_ALLOW_IDMAP | FS_MGTIME, 2211 }; 2212 2213 MODULE_ALIAS_FS("btrfs"); 2214 2215 static int btrfs_control_open(struct inode *inode, struct file *file) 2216 { 2217 /* 2218 * The control file's private_data is used to hold the 2219 * transaction when it is started and is used to keep 2220 * track of whether a transaction is already in progress. 2221 */ 2222 file->private_data = NULL; 2223 return 0; 2224 } 2225 2226 /* 2227 * Used by /dev/btrfs-control for devices ioctls. 2228 */ 2229 static long btrfs_control_ioctl(struct file *file, unsigned int cmd, 2230 unsigned long arg) 2231 { 2232 struct btrfs_ioctl_vol_args *vol; 2233 struct btrfs_device *device = NULL; 2234 dev_t devt = 0; 2235 int ret = -ENOTTY; 2236 2237 if (!capable(CAP_SYS_ADMIN)) 2238 return -EPERM; 2239 2240 vol = memdup_user((void __user *)arg, sizeof(*vol)); 2241 if (IS_ERR(vol)) 2242 return PTR_ERR(vol); 2243 ret = btrfs_check_ioctl_vol_args_path(vol); 2244 if (ret < 0) 2245 goto out; 2246 2247 switch (cmd) { 2248 case BTRFS_IOC_SCAN_DEV: 2249 mutex_lock(&uuid_mutex); 2250 /* 2251 * Scanning outside of mount can return NULL which would turn 2252 * into 0 error code. 2253 */ 2254 device = btrfs_scan_one_device(vol->name, false); 2255 ret = PTR_ERR_OR_ZERO(device); 2256 mutex_unlock(&uuid_mutex); 2257 break; 2258 case BTRFS_IOC_FORGET_DEV: 2259 if (vol->name[0] != 0) { 2260 ret = lookup_bdev(vol->name, &devt); 2261 if (ret) 2262 break; 2263 } 2264 ret = btrfs_forget_devices(devt); 2265 break; 2266 case BTRFS_IOC_DEVICES_READY: 2267 mutex_lock(&uuid_mutex); 2268 /* 2269 * Scanning outside of mount can return NULL which would turn 2270 * into 0 error code. 2271 */ 2272 device = btrfs_scan_one_device(vol->name, false); 2273 if (IS_ERR_OR_NULL(device)) { 2274 mutex_unlock(&uuid_mutex); 2275 ret = PTR_ERR_OR_ZERO(device); 2276 break; 2277 } 2278 ret = !(device->fs_devices->num_devices == 2279 device->fs_devices->total_devices); 2280 mutex_unlock(&uuid_mutex); 2281 break; 2282 case BTRFS_IOC_GET_SUPPORTED_FEATURES: 2283 ret = btrfs_ioctl_get_supported_features((void __user*)arg); 2284 break; 2285 } 2286 2287 out: 2288 kfree(vol); 2289 return ret; 2290 } 2291 2292 static int btrfs_freeze(struct super_block *sb) 2293 { 2294 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2295 2296 set_bit(BTRFS_FS_FROZEN, &fs_info->flags); 2297 /* 2298 * We don't need a barrier here, we'll wait for any transaction that 2299 * could be in progress on other threads (and do delayed iputs that 2300 * we want to avoid on a frozen filesystem), or do the commit 2301 * ourselves. 2302 */ 2303 return btrfs_commit_current_transaction(fs_info->tree_root); 2304 } 2305 2306 static int check_dev_super(struct btrfs_device *dev) 2307 { 2308 struct btrfs_fs_info *fs_info = dev->fs_info; 2309 struct btrfs_super_block *sb; 2310 u64 last_trans; 2311 u16 csum_type; 2312 int ret = 0; 2313 2314 /* This should be called with fs still frozen. */ 2315 ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags)); 2316 2317 /* Missing dev, no need to check. */ 2318 if (!dev->bdev) 2319 return 0; 2320 2321 /* Only need to check the primary super block. */ 2322 sb = btrfs_read_disk_super(dev->bdev, 0, true); 2323 if (IS_ERR(sb)) 2324 return PTR_ERR(sb); 2325 2326 /* Verify the checksum. */ 2327 csum_type = btrfs_super_csum_type(sb); 2328 if (unlikely(csum_type != btrfs_super_csum_type(fs_info->super_copy))) { 2329 btrfs_err(fs_info, "csum type changed, has %u expect %u", 2330 csum_type, btrfs_super_csum_type(fs_info->super_copy)); 2331 ret = -EUCLEAN; 2332 goto out; 2333 } 2334 2335 if (unlikely(btrfs_check_super_csum(fs_info, sb))) { 2336 btrfs_err(fs_info, "csum for on-disk super block no longer matches"); 2337 ret = -EUCLEAN; 2338 goto out; 2339 } 2340 2341 /* Btrfs_validate_super() includes fsid check against super->fsid. */ 2342 ret = btrfs_validate_super(fs_info, sb, 0); 2343 if (ret < 0) 2344 goto out; 2345 2346 last_trans = btrfs_get_last_trans_committed(fs_info); 2347 if (unlikely(btrfs_super_generation(sb) != last_trans)) { 2348 btrfs_err(fs_info, "transid mismatch, has %llu expect %llu", 2349 btrfs_super_generation(sb), last_trans); 2350 ret = -EUCLEAN; 2351 goto out; 2352 } 2353 out: 2354 btrfs_release_disk_super(sb); 2355 return ret; 2356 } 2357 2358 static int btrfs_unfreeze(struct super_block *sb) 2359 { 2360 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2361 struct btrfs_device *device; 2362 int ret = 0; 2363 2364 /* 2365 * Make sure the fs is not changed by accident (like hibernation then 2366 * modified by other OS). 2367 * If we found anything wrong, we mark the fs error immediately. 2368 * 2369 * And since the fs is frozen, no one can modify the fs yet, thus 2370 * we don't need to hold device_list_mutex. 2371 */ 2372 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { 2373 ret = check_dev_super(device); 2374 if (ret < 0) { 2375 btrfs_handle_fs_error(fs_info, ret, 2376 "super block on devid %llu got modified unexpectedly", 2377 device->devid); 2378 break; 2379 } 2380 } 2381 clear_bit(BTRFS_FS_FROZEN, &fs_info->flags); 2382 2383 /* 2384 * We still return 0, to allow VFS layer to unfreeze the fs even the 2385 * above checks failed. Since the fs is either fine or read-only, we're 2386 * safe to continue, without causing further damage. 2387 */ 2388 return 0; 2389 } 2390 2391 static int btrfs_show_devname(struct seq_file *m, struct dentry *root) 2392 { 2393 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); 2394 2395 /* 2396 * There should be always a valid pointer in latest_dev, it may be stale 2397 * for a short moment in case it's being deleted but still valid until 2398 * the end of RCU grace period. 2399 */ 2400 rcu_read_lock(); 2401 seq_escape(m, btrfs_dev_name(fs_info->fs_devices->latest_dev), " \t\n\\"); 2402 rcu_read_unlock(); 2403 2404 return 0; 2405 } 2406 2407 static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_control *sc) 2408 { 2409 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2410 const s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); 2411 2412 trace_btrfs_extent_map_shrinker_count(fs_info, nr); 2413 2414 return nr; 2415 } 2416 2417 static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc) 2418 { 2419 const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan); 2420 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2421 2422 btrfs_free_extent_maps(fs_info, nr_to_scan); 2423 2424 /* The extent map shrinker runs asynchronously, so always return 0. */ 2425 return 0; 2426 } 2427 2428 static const struct super_operations btrfs_super_ops = { 2429 .drop_inode = btrfs_drop_inode, 2430 .evict_inode = btrfs_evict_inode, 2431 .put_super = btrfs_put_super, 2432 .sync_fs = btrfs_sync_fs, 2433 .show_options = btrfs_show_options, 2434 .show_devname = btrfs_show_devname, 2435 .alloc_inode = btrfs_alloc_inode, 2436 .destroy_inode = btrfs_destroy_inode, 2437 .free_inode = btrfs_free_inode, 2438 .statfs = btrfs_statfs, 2439 .freeze_fs = btrfs_freeze, 2440 .unfreeze_fs = btrfs_unfreeze, 2441 .nr_cached_objects = btrfs_nr_cached_objects, 2442 .free_cached_objects = btrfs_free_cached_objects, 2443 }; 2444 2445 static const struct file_operations btrfs_ctl_fops = { 2446 .open = btrfs_control_open, 2447 .unlocked_ioctl = btrfs_control_ioctl, 2448 .compat_ioctl = compat_ptr_ioctl, 2449 .owner = THIS_MODULE, 2450 .llseek = noop_llseek, 2451 }; 2452 2453 static struct miscdevice btrfs_misc = { 2454 .minor = BTRFS_MINOR, 2455 .name = "btrfs-control", 2456 .fops = &btrfs_ctl_fops 2457 }; 2458 2459 MODULE_ALIAS_MISCDEV(BTRFS_MINOR); 2460 MODULE_ALIAS("devname:btrfs-control"); 2461 2462 static int __init btrfs_interface_init(void) 2463 { 2464 return misc_register(&btrfs_misc); 2465 } 2466 2467 static __cold void btrfs_interface_exit(void) 2468 { 2469 misc_deregister(&btrfs_misc); 2470 } 2471 2472 static int __init btrfs_print_mod_info(void) 2473 { 2474 static const char options[] = "" 2475 #ifdef CONFIG_BTRFS_EXPERIMENTAL 2476 ", experimental=on" 2477 #endif 2478 #ifdef CONFIG_BTRFS_DEBUG 2479 ", debug=on" 2480 #endif 2481 #ifdef CONFIG_BTRFS_ASSERT 2482 ", assert=on" 2483 #endif 2484 #ifdef CONFIG_BLK_DEV_ZONED 2485 ", zoned=yes" 2486 #else 2487 ", zoned=no" 2488 #endif 2489 #ifdef CONFIG_FS_VERITY 2490 ", fsverity=yes" 2491 #else 2492 ", fsverity=no" 2493 #endif 2494 ; 2495 2496 #ifdef CONFIG_BTRFS_EXPERIMENTAL 2497 if (btrfs_get_mod_read_policy() == NULL) 2498 pr_info("Btrfs loaded%s\n", options); 2499 else 2500 pr_info("Btrfs loaded%s, read_policy=%s\n", 2501 options, btrfs_get_mod_read_policy()); 2502 #else 2503 pr_info("Btrfs loaded%s\n", options); 2504 #endif 2505 2506 return 0; 2507 } 2508 2509 static int register_btrfs(void) 2510 { 2511 return register_filesystem(&btrfs_fs_type); 2512 } 2513 2514 static void unregister_btrfs(void) 2515 { 2516 unregister_filesystem(&btrfs_fs_type); 2517 } 2518 2519 /* Helper structure for long init/exit functions. */ 2520 struct init_sequence { 2521 int (*init_func)(void); 2522 /* Can be NULL if the init_func doesn't need cleanup. */ 2523 void (*exit_func)(void); 2524 }; 2525 2526 static const struct init_sequence mod_init_seq[] = { 2527 { 2528 .init_func = btrfs_props_init, 2529 .exit_func = NULL, 2530 }, { 2531 .init_func = btrfs_init_sysfs, 2532 .exit_func = btrfs_exit_sysfs, 2533 }, { 2534 .init_func = btrfs_init_compress, 2535 .exit_func = btrfs_exit_compress, 2536 }, { 2537 .init_func = btrfs_init_cachep, 2538 .exit_func = btrfs_destroy_cachep, 2539 }, { 2540 .init_func = btrfs_init_dio, 2541 .exit_func = btrfs_destroy_dio, 2542 }, { 2543 .init_func = btrfs_transaction_init, 2544 .exit_func = btrfs_transaction_exit, 2545 }, { 2546 .init_func = btrfs_ctree_init, 2547 .exit_func = btrfs_ctree_exit, 2548 }, { 2549 .init_func = btrfs_free_space_init, 2550 .exit_func = btrfs_free_space_exit, 2551 }, { 2552 .init_func = btrfs_extent_state_init_cachep, 2553 .exit_func = btrfs_extent_state_free_cachep, 2554 }, { 2555 .init_func = extent_buffer_init_cachep, 2556 .exit_func = extent_buffer_free_cachep, 2557 }, { 2558 .init_func = btrfs_bioset_init, 2559 .exit_func = btrfs_bioset_exit, 2560 }, { 2561 .init_func = btrfs_extent_map_init, 2562 .exit_func = btrfs_extent_map_exit, 2563 #ifdef CONFIG_BTRFS_EXPERIMENTAL 2564 }, { 2565 .init_func = btrfs_read_policy_init, 2566 .exit_func = NULL, 2567 #endif 2568 }, { 2569 .init_func = ordered_data_init, 2570 .exit_func = ordered_data_exit, 2571 }, { 2572 .init_func = btrfs_delayed_inode_init, 2573 .exit_func = btrfs_delayed_inode_exit, 2574 }, { 2575 .init_func = btrfs_auto_defrag_init, 2576 .exit_func = btrfs_auto_defrag_exit, 2577 }, { 2578 .init_func = btrfs_delayed_ref_init, 2579 .exit_func = btrfs_delayed_ref_exit, 2580 }, { 2581 .init_func = btrfs_prelim_ref_init, 2582 .exit_func = btrfs_prelim_ref_exit, 2583 }, { 2584 .init_func = btrfs_interface_init, 2585 .exit_func = btrfs_interface_exit, 2586 }, { 2587 .init_func = btrfs_print_mod_info, 2588 .exit_func = NULL, 2589 }, { 2590 .init_func = btrfs_run_sanity_tests, 2591 .exit_func = NULL, 2592 }, { 2593 .init_func = register_btrfs, 2594 .exit_func = unregister_btrfs, 2595 } 2596 }; 2597 2598 static bool mod_init_result[ARRAY_SIZE(mod_init_seq)]; 2599 2600 static __always_inline void btrfs_exit_btrfs_fs(void) 2601 { 2602 int i; 2603 2604 for (i = ARRAY_SIZE(mod_init_seq) - 1; i >= 0; i--) { 2605 if (!mod_init_result[i]) 2606 continue; 2607 if (mod_init_seq[i].exit_func) 2608 mod_init_seq[i].exit_func(); 2609 mod_init_result[i] = false; 2610 } 2611 } 2612 2613 static void __exit exit_btrfs_fs(void) 2614 { 2615 btrfs_exit_btrfs_fs(); 2616 btrfs_cleanup_fs_uuids(); 2617 } 2618 2619 static int __init init_btrfs_fs(void) 2620 { 2621 int ret; 2622 int i; 2623 2624 for (i = 0; i < ARRAY_SIZE(mod_init_seq); i++) { 2625 ASSERT(!mod_init_result[i]); 2626 ret = mod_init_seq[i].init_func(); 2627 if (ret < 0) { 2628 btrfs_exit_btrfs_fs(); 2629 return ret; 2630 } 2631 mod_init_result[i] = true; 2632 } 2633 return 0; 2634 } 2635 2636 late_initcall(init_btrfs_fs); 2637 module_exit(exit_btrfs_fs) 2638 2639 MODULE_DESCRIPTION("B-Tree File System (BTRFS)"); 2640 MODULE_LICENSE("GPL"); 2641 MODULE_SOFTDEP("pre: crc32c"); 2642 MODULE_SOFTDEP("pre: xxhash64"); 2643 MODULE_SOFTDEP("pre: sha256"); 2644 MODULE_SOFTDEP("pre: blake2b-256"); 2645