1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/blkdev.h> 7 #include <linux/module.h> 8 #include <linux/fs.h> 9 #include <linux/pagemap.h> 10 #include <linux/highmem.h> 11 #include <linux/time.h> 12 #include <linux/init.h> 13 #include <linux/seq_file.h> 14 #include <linux/string.h> 15 #include <linux/backing-dev.h> 16 #include <linux/mount.h> 17 #include <linux/writeback.h> 18 #include <linux/statfs.h> 19 #include <linux/compat.h> 20 #include <linux/parser.h> 21 #include <linux/ctype.h> 22 #include <linux/namei.h> 23 #include <linux/miscdevice.h> 24 #include <linux/magic.h> 25 #include <linux/slab.h> 26 #include <linux/ratelimit.h> 27 #include <linux/crc32c.h> 28 #include <linux/btrfs.h> 29 #include <linux/security.h> 30 #include <linux/fs_parser.h> 31 #include <linux/swap.h> 32 #include "messages.h" 33 #include "delayed-inode.h" 34 #include "ctree.h" 35 #include "disk-io.h" 36 #include "transaction.h" 37 #include "btrfs_inode.h" 38 #include "direct-io.h" 39 #include "props.h" 40 #include "xattr.h" 41 #include "bio.h" 42 #include "export.h" 43 #include "compression.h" 44 #include "dev-replace.h" 45 #include "free-space-cache.h" 46 #include "backref.h" 47 #include "space-info.h" 48 #include "sysfs.h" 49 #include "zoned.h" 50 #include "tests/btrfs-tests.h" 51 #include "block-group.h" 52 #include "discard.h" 53 #include "qgroup.h" 54 #include "raid56.h" 55 #include "fs.h" 56 #include "accessors.h" 57 #include "defrag.h" 58 #include "dir-item.h" 59 #include "ioctl.h" 60 #include "scrub.h" 61 #include "verity.h" 62 #include "super.h" 63 #include "extent-tree.h" 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/btrfs.h> 66 67 static const struct super_operations btrfs_super_ops; 68 static struct file_system_type btrfs_fs_type; 69 70 static void btrfs_put_super(struct super_block *sb) 71 { 72 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 73 74 btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid); 75 close_ctree(fs_info); 76 } 77 78 /* Store the mount options related information. */ 79 struct btrfs_fs_context { 80 char *subvol_name; 81 u64 subvol_objectid; 82 u64 max_inline; 83 u32 commit_interval; 84 u32 metadata_ratio; 85 u32 thread_pool_size; 86 unsigned long long mount_opt; 87 unsigned long compress_type:4; 88 unsigned int compress_level; 89 refcount_t refs; 90 }; 91 92 enum { 93 Opt_acl, 94 Opt_clear_cache, 95 Opt_commit_interval, 96 Opt_compress, 97 Opt_compress_force, 98 Opt_compress_force_type, 99 Opt_compress_type, 100 Opt_degraded, 101 Opt_device, 102 Opt_fatal_errors, 103 Opt_flushoncommit, 104 Opt_max_inline, 105 Opt_barrier, 106 Opt_datacow, 107 Opt_datasum, 108 Opt_defrag, 109 Opt_discard, 110 Opt_discard_mode, 111 Opt_ratio, 112 Opt_rescan_uuid_tree, 113 Opt_skip_balance, 114 Opt_space_cache, 115 Opt_space_cache_version, 116 Opt_ssd, 117 Opt_ssd_spread, 118 Opt_subvol, 119 Opt_subvol_empty, 120 Opt_subvolid, 121 Opt_thread_pool, 122 Opt_treelog, 123 Opt_user_subvol_rm_allowed, 124 Opt_norecovery, 125 126 /* Rescue options */ 127 Opt_rescue, 128 Opt_usebackuproot, 129 Opt_nologreplay, 130 131 /* Debugging options */ 132 Opt_enospc_debug, 133 #ifdef CONFIG_BTRFS_DEBUG 134 Opt_fragment, Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all, 135 #endif 136 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 137 Opt_ref_verify, 138 #endif 139 Opt_err, 140 }; 141 142 enum { 143 Opt_fatal_errors_panic, 144 Opt_fatal_errors_bug, 145 }; 146 147 static const struct constant_table btrfs_parameter_fatal_errors[] = { 148 { "panic", Opt_fatal_errors_panic }, 149 { "bug", Opt_fatal_errors_bug }, 150 {} 151 }; 152 153 enum { 154 Opt_discard_sync, 155 Opt_discard_async, 156 }; 157 158 static const struct constant_table btrfs_parameter_discard[] = { 159 { "sync", Opt_discard_sync }, 160 { "async", Opt_discard_async }, 161 {} 162 }; 163 164 enum { 165 Opt_space_cache_v1, 166 Opt_space_cache_v2, 167 }; 168 169 static const struct constant_table btrfs_parameter_space_cache[] = { 170 { "v1", Opt_space_cache_v1 }, 171 { "v2", Opt_space_cache_v2 }, 172 {} 173 }; 174 175 enum { 176 Opt_rescue_usebackuproot, 177 Opt_rescue_nologreplay, 178 Opt_rescue_ignorebadroots, 179 Opt_rescue_ignoredatacsums, 180 Opt_rescue_ignoremetacsums, 181 Opt_rescue_ignoresuperflags, 182 Opt_rescue_parameter_all, 183 }; 184 185 static const struct constant_table btrfs_parameter_rescue[] = { 186 { "usebackuproot", Opt_rescue_usebackuproot }, 187 { "nologreplay", Opt_rescue_nologreplay }, 188 { "ignorebadroots", Opt_rescue_ignorebadroots }, 189 { "ibadroots", Opt_rescue_ignorebadroots }, 190 { "ignoredatacsums", Opt_rescue_ignoredatacsums }, 191 { "ignoremetacsums", Opt_rescue_ignoremetacsums}, 192 { "ignoresuperflags", Opt_rescue_ignoresuperflags}, 193 { "idatacsums", Opt_rescue_ignoredatacsums }, 194 { "imetacsums", Opt_rescue_ignoremetacsums}, 195 { "isuperflags", Opt_rescue_ignoresuperflags}, 196 { "all", Opt_rescue_parameter_all }, 197 {} 198 }; 199 200 #ifdef CONFIG_BTRFS_DEBUG 201 enum { 202 Opt_fragment_parameter_data, 203 Opt_fragment_parameter_metadata, 204 Opt_fragment_parameter_all, 205 }; 206 207 static const struct constant_table btrfs_parameter_fragment[] = { 208 { "data", Opt_fragment_parameter_data }, 209 { "metadata", Opt_fragment_parameter_metadata }, 210 { "all", Opt_fragment_parameter_all }, 211 {} 212 }; 213 #endif 214 215 static const struct fs_parameter_spec btrfs_fs_parameters[] = { 216 fsparam_flag_no("acl", Opt_acl), 217 fsparam_flag_no("autodefrag", Opt_defrag), 218 fsparam_flag_no("barrier", Opt_barrier), 219 fsparam_flag("clear_cache", Opt_clear_cache), 220 fsparam_u32("commit", Opt_commit_interval), 221 fsparam_flag("compress", Opt_compress), 222 fsparam_string("compress", Opt_compress_type), 223 fsparam_flag("compress-force", Opt_compress_force), 224 fsparam_string("compress-force", Opt_compress_force_type), 225 fsparam_flag_no("datacow", Opt_datacow), 226 fsparam_flag_no("datasum", Opt_datasum), 227 fsparam_flag("degraded", Opt_degraded), 228 fsparam_string("device", Opt_device), 229 fsparam_flag_no("discard", Opt_discard), 230 fsparam_enum("discard", Opt_discard_mode, btrfs_parameter_discard), 231 fsparam_enum("fatal_errors", Opt_fatal_errors, btrfs_parameter_fatal_errors), 232 fsparam_flag_no("flushoncommit", Opt_flushoncommit), 233 fsparam_string("max_inline", Opt_max_inline), 234 fsparam_u32("metadata_ratio", Opt_ratio), 235 fsparam_flag("rescan_uuid_tree", Opt_rescan_uuid_tree), 236 fsparam_flag("skip_balance", Opt_skip_balance), 237 fsparam_flag_no("space_cache", Opt_space_cache), 238 fsparam_enum("space_cache", Opt_space_cache_version, btrfs_parameter_space_cache), 239 fsparam_flag_no("ssd", Opt_ssd), 240 fsparam_flag_no("ssd_spread", Opt_ssd_spread), 241 fsparam_string("subvol", Opt_subvol), 242 fsparam_flag("subvol=", Opt_subvol_empty), 243 fsparam_u64("subvolid", Opt_subvolid), 244 fsparam_u32("thread_pool", Opt_thread_pool), 245 fsparam_flag_no("treelog", Opt_treelog), 246 fsparam_flag("user_subvol_rm_allowed", Opt_user_subvol_rm_allowed), 247 248 /* Rescue options. */ 249 fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue), 250 /* Deprecated, with alias rescue=nologreplay */ 251 __fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL), 252 /* Deprecated, with alias rescue=usebackuproot */ 253 __fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL), 254 /* For compatibility only, alias for "rescue=nologreplay". */ 255 fsparam_flag("norecovery", Opt_norecovery), 256 257 /* Debugging options. */ 258 fsparam_flag_no("enospc_debug", Opt_enospc_debug), 259 #ifdef CONFIG_BTRFS_DEBUG 260 fsparam_enum("fragment", Opt_fragment, btrfs_parameter_fragment), 261 #endif 262 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 263 fsparam_flag("ref_verify", Opt_ref_verify), 264 #endif 265 {} 266 }; 267 268 /* No support for restricting writes to btrfs devices yet... */ 269 static inline blk_mode_t btrfs_open_mode(struct fs_context *fc) 270 { 271 return sb_open_mode(fc->sb_flags) & ~BLK_OPEN_RESTRICT_WRITES; 272 } 273 274 static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 275 { 276 struct btrfs_fs_context *ctx = fc->fs_private; 277 struct fs_parse_result result; 278 int opt; 279 280 opt = fs_parse(fc, btrfs_fs_parameters, param, &result); 281 if (opt < 0) 282 return opt; 283 284 switch (opt) { 285 case Opt_degraded: 286 btrfs_set_opt(ctx->mount_opt, DEGRADED); 287 break; 288 case Opt_subvol_empty: 289 /* 290 * This exists because we used to allow it on accident, so we're 291 * keeping it to maintain ABI. See 37becec95ac3 ("Btrfs: allow 292 * empty subvol= again"). 293 */ 294 break; 295 case Opt_subvol: 296 kfree(ctx->subvol_name); 297 ctx->subvol_name = kstrdup(param->string, GFP_KERNEL); 298 if (!ctx->subvol_name) 299 return -ENOMEM; 300 break; 301 case Opt_subvolid: 302 ctx->subvol_objectid = result.uint_64; 303 304 /* subvolid=0 means give me the original fs_tree. */ 305 if (!ctx->subvol_objectid) 306 ctx->subvol_objectid = BTRFS_FS_TREE_OBJECTID; 307 break; 308 case Opt_device: { 309 struct btrfs_device *device; 310 blk_mode_t mode = btrfs_open_mode(fc); 311 312 mutex_lock(&uuid_mutex); 313 device = btrfs_scan_one_device(param->string, mode, false); 314 mutex_unlock(&uuid_mutex); 315 if (IS_ERR(device)) 316 return PTR_ERR(device); 317 break; 318 } 319 case Opt_datasum: 320 if (result.negated) { 321 btrfs_set_opt(ctx->mount_opt, NODATASUM); 322 } else { 323 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 324 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 325 } 326 break; 327 case Opt_datacow: 328 if (result.negated) { 329 btrfs_clear_opt(ctx->mount_opt, COMPRESS); 330 btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); 331 btrfs_set_opt(ctx->mount_opt, NODATACOW); 332 btrfs_set_opt(ctx->mount_opt, NODATASUM); 333 } else { 334 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 335 } 336 break; 337 case Opt_compress_force: 338 case Opt_compress_force_type: 339 btrfs_set_opt(ctx->mount_opt, FORCE_COMPRESS); 340 fallthrough; 341 case Opt_compress: 342 case Opt_compress_type: 343 /* 344 * Provide the same semantics as older kernels that don't use fs 345 * context, specifying the "compress" option clears 346 * "force-compress" without the need to pass 347 * "compress-force=[no|none]" before specifying "compress". 348 */ 349 if (opt != Opt_compress_force && opt != Opt_compress_force_type) 350 btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); 351 352 if (opt == Opt_compress || opt == Opt_compress_force) { 353 ctx->compress_type = BTRFS_COMPRESS_ZLIB; 354 ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL; 355 btrfs_set_opt(ctx->mount_opt, COMPRESS); 356 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 357 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 358 } else if (strncmp(param->string, "zlib", 4) == 0) { 359 ctx->compress_type = BTRFS_COMPRESS_ZLIB; 360 ctx->compress_level = 361 btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, 362 param->string + 4); 363 btrfs_set_opt(ctx->mount_opt, COMPRESS); 364 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 365 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 366 } else if (strncmp(param->string, "lzo", 3) == 0) { 367 ctx->compress_type = BTRFS_COMPRESS_LZO; 368 ctx->compress_level = 0; 369 btrfs_set_opt(ctx->mount_opt, COMPRESS); 370 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 371 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 372 } else if (strncmp(param->string, "zstd", 4) == 0) { 373 ctx->compress_type = BTRFS_COMPRESS_ZSTD; 374 ctx->compress_level = 375 btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, 376 param->string + 4); 377 btrfs_set_opt(ctx->mount_opt, COMPRESS); 378 btrfs_clear_opt(ctx->mount_opt, NODATACOW); 379 btrfs_clear_opt(ctx->mount_opt, NODATASUM); 380 } else if (strncmp(param->string, "no", 2) == 0) { 381 ctx->compress_level = 0; 382 ctx->compress_type = 0; 383 btrfs_clear_opt(ctx->mount_opt, COMPRESS); 384 btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); 385 } else { 386 btrfs_err(NULL, "unrecognized compression value %s", 387 param->string); 388 return -EINVAL; 389 } 390 break; 391 case Opt_ssd: 392 if (result.negated) { 393 btrfs_set_opt(ctx->mount_opt, NOSSD); 394 btrfs_clear_opt(ctx->mount_opt, SSD); 395 btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD); 396 } else { 397 btrfs_set_opt(ctx->mount_opt, SSD); 398 btrfs_clear_opt(ctx->mount_opt, NOSSD); 399 } 400 break; 401 case Opt_ssd_spread: 402 if (result.negated) { 403 btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD); 404 } else { 405 btrfs_set_opt(ctx->mount_opt, SSD); 406 btrfs_set_opt(ctx->mount_opt, SSD_SPREAD); 407 btrfs_clear_opt(ctx->mount_opt, NOSSD); 408 } 409 break; 410 case Opt_barrier: 411 if (result.negated) 412 btrfs_set_opt(ctx->mount_opt, NOBARRIER); 413 else 414 btrfs_clear_opt(ctx->mount_opt, NOBARRIER); 415 break; 416 case Opt_thread_pool: 417 if (result.uint_32 == 0) { 418 btrfs_err(NULL, "invalid value 0 for thread_pool"); 419 return -EINVAL; 420 } 421 ctx->thread_pool_size = result.uint_32; 422 break; 423 case Opt_max_inline: 424 ctx->max_inline = memparse(param->string, NULL); 425 break; 426 case Opt_acl: 427 if (result.negated) { 428 fc->sb_flags &= ~SB_POSIXACL; 429 } else { 430 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 431 fc->sb_flags |= SB_POSIXACL; 432 #else 433 btrfs_err(NULL, "support for ACL not compiled in"); 434 return -EINVAL; 435 #endif 436 } 437 /* 438 * VFS limits the ability to toggle ACL on and off via remount, 439 * despite every file system allowing this. This seems to be 440 * an oversight since we all do, but it'll fail if we're 441 * remounting. So don't set the mask here, we'll check it in 442 * btrfs_reconfigure and do the toggling ourselves. 443 */ 444 if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE) 445 fc->sb_flags_mask |= SB_POSIXACL; 446 break; 447 case Opt_treelog: 448 if (result.negated) 449 btrfs_set_opt(ctx->mount_opt, NOTREELOG); 450 else 451 btrfs_clear_opt(ctx->mount_opt, NOTREELOG); 452 break; 453 case Opt_nologreplay: 454 btrfs_warn(NULL, 455 "'nologreplay' is deprecated, use 'rescue=nologreplay' instead"); 456 btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); 457 break; 458 case Opt_norecovery: 459 btrfs_info(NULL, 460 "'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'"); 461 btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); 462 break; 463 case Opt_flushoncommit: 464 if (result.negated) 465 btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT); 466 else 467 btrfs_set_opt(ctx->mount_opt, FLUSHONCOMMIT); 468 break; 469 case Opt_ratio: 470 ctx->metadata_ratio = result.uint_32; 471 break; 472 case Opt_discard: 473 if (result.negated) { 474 btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC); 475 btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); 476 btrfs_set_opt(ctx->mount_opt, NODISCARD); 477 } else { 478 btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC); 479 btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); 480 } 481 break; 482 case Opt_discard_mode: 483 switch (result.uint_32) { 484 case Opt_discard_sync: 485 btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); 486 btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC); 487 break; 488 case Opt_discard_async: 489 btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC); 490 btrfs_set_opt(ctx->mount_opt, DISCARD_ASYNC); 491 break; 492 default: 493 btrfs_err(NULL, "unrecognized discard mode value %s", 494 param->key); 495 return -EINVAL; 496 } 497 btrfs_clear_opt(ctx->mount_opt, NODISCARD); 498 break; 499 case Opt_space_cache: 500 if (result.negated) { 501 btrfs_set_opt(ctx->mount_opt, NOSPACECACHE); 502 btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE); 503 btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); 504 } else { 505 btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); 506 btrfs_set_opt(ctx->mount_opt, SPACE_CACHE); 507 } 508 break; 509 case Opt_space_cache_version: 510 switch (result.uint_32) { 511 case Opt_space_cache_v1: 512 btrfs_set_opt(ctx->mount_opt, SPACE_CACHE); 513 btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); 514 break; 515 case Opt_space_cache_v2: 516 btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE); 517 btrfs_set_opt(ctx->mount_opt, FREE_SPACE_TREE); 518 break; 519 default: 520 btrfs_err(NULL, "unrecognized space_cache value %s", 521 param->key); 522 return -EINVAL; 523 } 524 break; 525 case Opt_rescan_uuid_tree: 526 btrfs_set_opt(ctx->mount_opt, RESCAN_UUID_TREE); 527 break; 528 case Opt_clear_cache: 529 btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE); 530 break; 531 case Opt_user_subvol_rm_allowed: 532 btrfs_set_opt(ctx->mount_opt, USER_SUBVOL_RM_ALLOWED); 533 break; 534 case Opt_enospc_debug: 535 if (result.negated) 536 btrfs_clear_opt(ctx->mount_opt, ENOSPC_DEBUG); 537 else 538 btrfs_set_opt(ctx->mount_opt, ENOSPC_DEBUG); 539 break; 540 case Opt_defrag: 541 if (result.negated) 542 btrfs_clear_opt(ctx->mount_opt, AUTO_DEFRAG); 543 else 544 btrfs_set_opt(ctx->mount_opt, AUTO_DEFRAG); 545 break; 546 case Opt_usebackuproot: 547 btrfs_warn(NULL, 548 "'usebackuproot' is deprecated, use 'rescue=usebackuproot' instead"); 549 btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT); 550 551 /* If we're loading the backup roots we can't trust the space cache. */ 552 btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE); 553 break; 554 case Opt_skip_balance: 555 btrfs_set_opt(ctx->mount_opt, SKIP_BALANCE); 556 break; 557 case Opt_fatal_errors: 558 switch (result.uint_32) { 559 case Opt_fatal_errors_panic: 560 btrfs_set_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR); 561 break; 562 case Opt_fatal_errors_bug: 563 btrfs_clear_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR); 564 break; 565 default: 566 btrfs_err(NULL, "unrecognized fatal_errors value %s", 567 param->key); 568 return -EINVAL; 569 } 570 break; 571 case Opt_commit_interval: 572 ctx->commit_interval = result.uint_32; 573 if (ctx->commit_interval == 0) 574 ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 575 break; 576 case Opt_rescue: 577 switch (result.uint_32) { 578 case Opt_rescue_usebackuproot: 579 btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT); 580 break; 581 case Opt_rescue_nologreplay: 582 btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); 583 break; 584 case Opt_rescue_ignorebadroots: 585 btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS); 586 break; 587 case Opt_rescue_ignoredatacsums: 588 btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS); 589 break; 590 case Opt_rescue_ignoremetacsums: 591 btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS); 592 break; 593 case Opt_rescue_ignoresuperflags: 594 btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS); 595 break; 596 case Opt_rescue_parameter_all: 597 btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS); 598 btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS); 599 btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS); 600 btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS); 601 btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); 602 break; 603 default: 604 btrfs_info(NULL, "unrecognized rescue option '%s'", 605 param->key); 606 return -EINVAL; 607 } 608 break; 609 #ifdef CONFIG_BTRFS_DEBUG 610 case Opt_fragment: 611 switch (result.uint_32) { 612 case Opt_fragment_parameter_all: 613 btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA); 614 btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA); 615 break; 616 case Opt_fragment_parameter_metadata: 617 btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA); 618 break; 619 case Opt_fragment_parameter_data: 620 btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA); 621 break; 622 default: 623 btrfs_info(NULL, "unrecognized fragment option '%s'", 624 param->key); 625 return -EINVAL; 626 } 627 break; 628 #endif 629 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 630 case Opt_ref_verify: 631 btrfs_set_opt(ctx->mount_opt, REF_VERIFY); 632 break; 633 #endif 634 default: 635 btrfs_err(NULL, "unrecognized mount option '%s'", param->key); 636 return -EINVAL; 637 } 638 639 return 0; 640 } 641 642 /* 643 * Some options only have meaning at mount time and shouldn't persist across 644 * remounts, or be displayed. Clear these at the end of mount and remount code 645 * paths. 646 */ 647 static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info) 648 { 649 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); 650 btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE); 651 btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE); 652 } 653 654 static bool check_ro_option(const struct btrfs_fs_info *fs_info, 655 unsigned long long mount_opt, unsigned long long opt, 656 const char *opt_name) 657 { 658 if (mount_opt & opt) { 659 btrfs_err(fs_info, "%s must be used with ro mount option", 660 opt_name); 661 return true; 662 } 663 return false; 664 } 665 666 bool btrfs_check_options(const struct btrfs_fs_info *info, 667 unsigned long long *mount_opt, 668 unsigned long flags) 669 { 670 bool ret = true; 671 672 if (!(flags & SB_RDONLY) && 673 (check_ro_option(info, *mount_opt, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") || 674 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") || 675 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums") || 676 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREMETACSUMS, "ignoremetacsums") || 677 check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNORESUPERFLAGS, "ignoresuperflags"))) 678 ret = false; 679 680 if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) && 681 !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE) && 682 !btrfs_raw_test_opt(*mount_opt, CLEAR_CACHE)) { 683 btrfs_err(info, "cannot disable free-space-tree"); 684 ret = false; 685 } 686 if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) && 687 !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) { 688 btrfs_err(info, "cannot disable free-space-tree with block-group-tree feature"); 689 ret = false; 690 } 691 692 if (btrfs_check_mountopts_zoned(info, mount_opt)) 693 ret = false; 694 695 if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) { 696 if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) { 697 btrfs_info(info, "disk space caching is enabled"); 698 btrfs_warn(info, 699 "space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2"); 700 } 701 if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) 702 btrfs_info(info, "using free-space-tree"); 703 } 704 705 return ret; 706 } 707 708 /* 709 * This is subtle, we only call this during open_ctree(). We need to pre-load 710 * the mount options with the on-disk settings. Before the new mount API took 711 * effect we would do this on mount and remount. With the new mount API we'll 712 * only do this on the initial mount. 713 * 714 * This isn't a change in behavior, because we're using the current state of the 715 * file system to set the current mount options. If you mounted with special 716 * options to disable these features and then remounted we wouldn't revert the 717 * settings, because mounting without these features cleared the on-disk 718 * settings, so this being called on re-mount is not needed. 719 */ 720 void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info) 721 { 722 if (fs_info->sectorsize < PAGE_SIZE) { 723 btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE); 724 if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) { 725 btrfs_info(fs_info, 726 "forcing free space tree for sector size %u with page size %lu", 727 fs_info->sectorsize, PAGE_SIZE); 728 btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); 729 } 730 } 731 732 /* 733 * At this point our mount options are populated, so we only mess with 734 * these settings if we don't have any settings already. 735 */ 736 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) 737 return; 738 739 if (btrfs_is_zoned(fs_info) && 740 btrfs_free_space_cache_v1_active(fs_info)) { 741 btrfs_info(fs_info, "zoned: clearing existing space cache"); 742 btrfs_set_super_cache_generation(fs_info->super_copy, 0); 743 return; 744 } 745 746 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 747 return; 748 749 if (btrfs_test_opt(fs_info, NOSPACECACHE)) 750 return; 751 752 /* 753 * At this point we don't have explicit options set by the user, set 754 * them ourselves based on the state of the file system. 755 */ 756 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 757 btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); 758 else if (btrfs_free_space_cache_v1_active(fs_info)) 759 btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE); 760 } 761 762 static void set_device_specific_options(struct btrfs_fs_info *fs_info) 763 { 764 if (!btrfs_test_opt(fs_info, NOSSD) && 765 !fs_info->fs_devices->rotating) 766 btrfs_set_opt(fs_info->mount_opt, SSD); 767 768 /* 769 * For devices supporting discard turn on discard=async automatically, 770 * unless it's already set or disabled. This could be turned off by 771 * nodiscard for the same mount. 772 * 773 * The zoned mode piggy backs on the discard functionality for 774 * resetting a zone. There is no reason to delay the zone reset as it is 775 * fast enough. So, do not enable async discard for zoned mode. 776 */ 777 if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) || 778 btrfs_test_opt(fs_info, DISCARD_ASYNC) || 779 btrfs_test_opt(fs_info, NODISCARD)) && 780 fs_info->fs_devices->discardable && 781 !btrfs_is_zoned(fs_info)) 782 btrfs_set_opt(fs_info->mount_opt, DISCARD_ASYNC); 783 } 784 785 char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, 786 u64 subvol_objectid) 787 { 788 struct btrfs_root *root = fs_info->tree_root; 789 struct btrfs_root *fs_root = NULL; 790 struct btrfs_root_ref *root_ref; 791 struct btrfs_inode_ref *inode_ref; 792 struct btrfs_key key; 793 struct btrfs_path *path = NULL; 794 char *name = NULL, *ptr; 795 u64 dirid; 796 int len; 797 int ret; 798 799 path = btrfs_alloc_path(); 800 if (!path) { 801 ret = -ENOMEM; 802 goto err; 803 } 804 805 name = kmalloc(PATH_MAX, GFP_KERNEL); 806 if (!name) { 807 ret = -ENOMEM; 808 goto err; 809 } 810 ptr = name + PATH_MAX - 1; 811 ptr[0] = '\0'; 812 813 /* 814 * Walk up the subvolume trees in the tree of tree roots by root 815 * backrefs until we hit the top-level subvolume. 816 */ 817 while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) { 818 key.objectid = subvol_objectid; 819 key.type = BTRFS_ROOT_BACKREF_KEY; 820 key.offset = (u64)-1; 821 822 ret = btrfs_search_backwards(root, &key, path); 823 if (ret < 0) { 824 goto err; 825 } else if (ret > 0) { 826 ret = -ENOENT; 827 goto err; 828 } 829 830 subvol_objectid = key.offset; 831 832 root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 833 struct btrfs_root_ref); 834 len = btrfs_root_ref_name_len(path->nodes[0], root_ref); 835 ptr -= len + 1; 836 if (ptr < name) { 837 ret = -ENAMETOOLONG; 838 goto err; 839 } 840 read_extent_buffer(path->nodes[0], ptr + 1, 841 (unsigned long)(root_ref + 1), len); 842 ptr[0] = '/'; 843 dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); 844 btrfs_release_path(path); 845 846 fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true); 847 if (IS_ERR(fs_root)) { 848 ret = PTR_ERR(fs_root); 849 fs_root = NULL; 850 goto err; 851 } 852 853 /* 854 * Walk up the filesystem tree by inode refs until we hit the 855 * root directory. 856 */ 857 while (dirid != BTRFS_FIRST_FREE_OBJECTID) { 858 key.objectid = dirid; 859 key.type = BTRFS_INODE_REF_KEY; 860 key.offset = (u64)-1; 861 862 ret = btrfs_search_backwards(fs_root, &key, path); 863 if (ret < 0) { 864 goto err; 865 } else if (ret > 0) { 866 ret = -ENOENT; 867 goto err; 868 } 869 870 dirid = key.offset; 871 872 inode_ref = btrfs_item_ptr(path->nodes[0], 873 path->slots[0], 874 struct btrfs_inode_ref); 875 len = btrfs_inode_ref_name_len(path->nodes[0], 876 inode_ref); 877 ptr -= len + 1; 878 if (ptr < name) { 879 ret = -ENAMETOOLONG; 880 goto err; 881 } 882 read_extent_buffer(path->nodes[0], ptr + 1, 883 (unsigned long)(inode_ref + 1), len); 884 ptr[0] = '/'; 885 btrfs_release_path(path); 886 } 887 btrfs_put_root(fs_root); 888 fs_root = NULL; 889 } 890 891 btrfs_free_path(path); 892 if (ptr == name + PATH_MAX - 1) { 893 name[0] = '/'; 894 name[1] = '\0'; 895 } else { 896 memmove(name, ptr, name + PATH_MAX - ptr); 897 } 898 return name; 899 900 err: 901 btrfs_put_root(fs_root); 902 btrfs_free_path(path); 903 kfree(name); 904 return ERR_PTR(ret); 905 } 906 907 static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid) 908 { 909 struct btrfs_root *root = fs_info->tree_root; 910 struct btrfs_dir_item *di; 911 struct btrfs_path *path; 912 struct btrfs_key location; 913 struct fscrypt_str name = FSTR_INIT("default", 7); 914 u64 dir_id; 915 916 path = btrfs_alloc_path(); 917 if (!path) 918 return -ENOMEM; 919 920 /* 921 * Find the "default" dir item which points to the root item that we 922 * will mount by default if we haven't been given a specific subvolume 923 * to mount. 924 */ 925 dir_id = btrfs_super_root_dir(fs_info->super_copy); 926 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0); 927 if (IS_ERR(di)) { 928 btrfs_free_path(path); 929 return PTR_ERR(di); 930 } 931 if (!di) { 932 /* 933 * Ok the default dir item isn't there. This is weird since 934 * it's always been there, but don't freak out, just try and 935 * mount the top-level subvolume. 936 */ 937 btrfs_free_path(path); 938 *objectid = BTRFS_FS_TREE_OBJECTID; 939 return 0; 940 } 941 942 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 943 btrfs_free_path(path); 944 *objectid = location.objectid; 945 return 0; 946 } 947 948 static int btrfs_fill_super(struct super_block *sb, 949 struct btrfs_fs_devices *fs_devices, 950 void *data) 951 { 952 struct inode *inode; 953 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 954 int err; 955 956 sb->s_maxbytes = MAX_LFS_FILESIZE; 957 sb->s_magic = BTRFS_SUPER_MAGIC; 958 sb->s_op = &btrfs_super_ops; 959 sb->s_d_op = &btrfs_dentry_operations; 960 sb->s_export_op = &btrfs_export_ops; 961 #ifdef CONFIG_FS_VERITY 962 sb->s_vop = &btrfs_verityops; 963 #endif 964 sb->s_xattr = btrfs_xattr_handlers; 965 sb->s_time_gran = 1; 966 sb->s_iflags |= SB_I_CGROUPWB; 967 968 err = super_setup_bdi(sb); 969 if (err) { 970 btrfs_err(fs_info, "super_setup_bdi failed"); 971 return err; 972 } 973 974 err = open_ctree(sb, fs_devices, (char *)data); 975 if (err) { 976 btrfs_err(fs_info, "open_ctree failed"); 977 return err; 978 } 979 980 inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root); 981 if (IS_ERR(inode)) { 982 err = PTR_ERR(inode); 983 btrfs_handle_fs_error(fs_info, err, NULL); 984 goto fail_close; 985 } 986 987 sb->s_root = d_make_root(inode); 988 if (!sb->s_root) { 989 err = -ENOMEM; 990 goto fail_close; 991 } 992 993 sb->s_flags |= SB_ACTIVE; 994 return 0; 995 996 fail_close: 997 close_ctree(fs_info); 998 return err; 999 } 1000 1001 int btrfs_sync_fs(struct super_block *sb, int wait) 1002 { 1003 struct btrfs_trans_handle *trans; 1004 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1005 struct btrfs_root *root = fs_info->tree_root; 1006 1007 trace_btrfs_sync_fs(fs_info, wait); 1008 1009 if (!wait) { 1010 filemap_flush(fs_info->btree_inode->i_mapping); 1011 return 0; 1012 } 1013 1014 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 1015 1016 trans = btrfs_attach_transaction_barrier(root); 1017 if (IS_ERR(trans)) { 1018 /* no transaction, don't bother */ 1019 if (PTR_ERR(trans) == -ENOENT) { 1020 /* 1021 * Exit unless we have some pending changes 1022 * that need to go through commit 1023 */ 1024 if (!test_bit(BTRFS_FS_NEED_TRANS_COMMIT, 1025 &fs_info->flags)) 1026 return 0; 1027 /* 1028 * A non-blocking test if the fs is frozen. We must not 1029 * start a new transaction here otherwise a deadlock 1030 * happens. The pending operations are delayed to the 1031 * next commit after thawing. 1032 */ 1033 if (sb_start_write_trylock(sb)) 1034 sb_end_write(sb); 1035 else 1036 return 0; 1037 trans = btrfs_start_transaction(root, 0); 1038 } 1039 if (IS_ERR(trans)) 1040 return PTR_ERR(trans); 1041 } 1042 return btrfs_commit_transaction(trans); 1043 } 1044 1045 static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed) 1046 { 1047 seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s); 1048 *printed = true; 1049 } 1050 1051 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) 1052 { 1053 struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); 1054 const char *compress_type; 1055 const char *subvol_name; 1056 bool printed = false; 1057 1058 if (btrfs_test_opt(info, DEGRADED)) 1059 seq_puts(seq, ",degraded"); 1060 if (btrfs_test_opt(info, NODATASUM)) 1061 seq_puts(seq, ",nodatasum"); 1062 if (btrfs_test_opt(info, NODATACOW)) 1063 seq_puts(seq, ",nodatacow"); 1064 if (btrfs_test_opt(info, NOBARRIER)) 1065 seq_puts(seq, ",nobarrier"); 1066 if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) 1067 seq_printf(seq, ",max_inline=%llu", info->max_inline); 1068 if (info->thread_pool_size != min_t(unsigned long, 1069 num_online_cpus() + 2, 8)) 1070 seq_printf(seq, ",thread_pool=%u", info->thread_pool_size); 1071 if (btrfs_test_opt(info, COMPRESS)) { 1072 compress_type = btrfs_compress_type2str(info->compress_type); 1073 if (btrfs_test_opt(info, FORCE_COMPRESS)) 1074 seq_printf(seq, ",compress-force=%s", compress_type); 1075 else 1076 seq_printf(seq, ",compress=%s", compress_type); 1077 if (info->compress_level) 1078 seq_printf(seq, ":%d", info->compress_level); 1079 } 1080 if (btrfs_test_opt(info, NOSSD)) 1081 seq_puts(seq, ",nossd"); 1082 if (btrfs_test_opt(info, SSD_SPREAD)) 1083 seq_puts(seq, ",ssd_spread"); 1084 else if (btrfs_test_opt(info, SSD)) 1085 seq_puts(seq, ",ssd"); 1086 if (btrfs_test_opt(info, NOTREELOG)) 1087 seq_puts(seq, ",notreelog"); 1088 if (btrfs_test_opt(info, NOLOGREPLAY)) 1089 print_rescue_option(seq, "nologreplay", &printed); 1090 if (btrfs_test_opt(info, USEBACKUPROOT)) 1091 print_rescue_option(seq, "usebackuproot", &printed); 1092 if (btrfs_test_opt(info, IGNOREBADROOTS)) 1093 print_rescue_option(seq, "ignorebadroots", &printed); 1094 if (btrfs_test_opt(info, IGNOREDATACSUMS)) 1095 print_rescue_option(seq, "ignoredatacsums", &printed); 1096 if (btrfs_test_opt(info, IGNOREMETACSUMS)) 1097 print_rescue_option(seq, "ignoremetacsums", &printed); 1098 if (btrfs_test_opt(info, IGNORESUPERFLAGS)) 1099 print_rescue_option(seq, "ignoresuperflags", &printed); 1100 if (btrfs_test_opt(info, FLUSHONCOMMIT)) 1101 seq_puts(seq, ",flushoncommit"); 1102 if (btrfs_test_opt(info, DISCARD_SYNC)) 1103 seq_puts(seq, ",discard"); 1104 if (btrfs_test_opt(info, DISCARD_ASYNC)) 1105 seq_puts(seq, ",discard=async"); 1106 if (!(info->sb->s_flags & SB_POSIXACL)) 1107 seq_puts(seq, ",noacl"); 1108 if (btrfs_free_space_cache_v1_active(info)) 1109 seq_puts(seq, ",space_cache"); 1110 else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE)) 1111 seq_puts(seq, ",space_cache=v2"); 1112 else 1113 seq_puts(seq, ",nospace_cache"); 1114 if (btrfs_test_opt(info, RESCAN_UUID_TREE)) 1115 seq_puts(seq, ",rescan_uuid_tree"); 1116 if (btrfs_test_opt(info, CLEAR_CACHE)) 1117 seq_puts(seq, ",clear_cache"); 1118 if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED)) 1119 seq_puts(seq, ",user_subvol_rm_allowed"); 1120 if (btrfs_test_opt(info, ENOSPC_DEBUG)) 1121 seq_puts(seq, ",enospc_debug"); 1122 if (btrfs_test_opt(info, AUTO_DEFRAG)) 1123 seq_puts(seq, ",autodefrag"); 1124 if (btrfs_test_opt(info, SKIP_BALANCE)) 1125 seq_puts(seq, ",skip_balance"); 1126 if (info->metadata_ratio) 1127 seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio); 1128 if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR)) 1129 seq_puts(seq, ",fatal_errors=panic"); 1130 if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL) 1131 seq_printf(seq, ",commit=%u", info->commit_interval); 1132 #ifdef CONFIG_BTRFS_DEBUG 1133 if (btrfs_test_opt(info, FRAGMENT_DATA)) 1134 seq_puts(seq, ",fragment=data"); 1135 if (btrfs_test_opt(info, FRAGMENT_METADATA)) 1136 seq_puts(seq, ",fragment=metadata"); 1137 #endif 1138 if (btrfs_test_opt(info, REF_VERIFY)) 1139 seq_puts(seq, ",ref_verify"); 1140 seq_printf(seq, ",subvolid=%llu", btrfs_root_id(BTRFS_I(d_inode(dentry))->root)); 1141 subvol_name = btrfs_get_subvol_name_from_objectid(info, 1142 btrfs_root_id(BTRFS_I(d_inode(dentry))->root)); 1143 if (!IS_ERR(subvol_name)) { 1144 seq_puts(seq, ",subvol="); 1145 seq_escape(seq, subvol_name, " \t\n\\"); 1146 kfree(subvol_name); 1147 } 1148 return 0; 1149 } 1150 1151 /* 1152 * subvolumes are identified by ino 256 1153 */ 1154 static inline int is_subvolume_inode(struct inode *inode) 1155 { 1156 if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 1157 return 1; 1158 return 0; 1159 } 1160 1161 static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, 1162 struct vfsmount *mnt) 1163 { 1164 struct dentry *root; 1165 int ret; 1166 1167 if (!subvol_name) { 1168 if (!subvol_objectid) { 1169 ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb), 1170 &subvol_objectid); 1171 if (ret) { 1172 root = ERR_PTR(ret); 1173 goto out; 1174 } 1175 } 1176 subvol_name = btrfs_get_subvol_name_from_objectid( 1177 btrfs_sb(mnt->mnt_sb), subvol_objectid); 1178 if (IS_ERR(subvol_name)) { 1179 root = ERR_CAST(subvol_name); 1180 subvol_name = NULL; 1181 goto out; 1182 } 1183 1184 } 1185 1186 root = mount_subtree(mnt, subvol_name); 1187 /* mount_subtree() drops our reference on the vfsmount. */ 1188 mnt = NULL; 1189 1190 if (!IS_ERR(root)) { 1191 struct super_block *s = root->d_sb; 1192 struct btrfs_fs_info *fs_info = btrfs_sb(s); 1193 struct inode *root_inode = d_inode(root); 1194 u64 root_objectid = btrfs_root_id(BTRFS_I(root_inode)->root); 1195 1196 ret = 0; 1197 if (!is_subvolume_inode(root_inode)) { 1198 btrfs_err(fs_info, "'%s' is not a valid subvolume", 1199 subvol_name); 1200 ret = -EINVAL; 1201 } 1202 if (subvol_objectid && root_objectid != subvol_objectid) { 1203 /* 1204 * This will also catch a race condition where a 1205 * subvolume which was passed by ID is renamed and 1206 * another subvolume is renamed over the old location. 1207 */ 1208 btrfs_err(fs_info, 1209 "subvol '%s' does not match subvolid %llu", 1210 subvol_name, subvol_objectid); 1211 ret = -EINVAL; 1212 } 1213 if (ret) { 1214 dput(root); 1215 root = ERR_PTR(ret); 1216 deactivate_locked_super(s); 1217 } 1218 } 1219 1220 out: 1221 mntput(mnt); 1222 kfree(subvol_name); 1223 return root; 1224 } 1225 1226 static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, 1227 u32 new_pool_size, u32 old_pool_size) 1228 { 1229 if (new_pool_size == old_pool_size) 1230 return; 1231 1232 fs_info->thread_pool_size = new_pool_size; 1233 1234 btrfs_info(fs_info, "resize thread pool %d -> %d", 1235 old_pool_size, new_pool_size); 1236 1237 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); 1238 btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); 1239 btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); 1240 workqueue_set_max_active(fs_info->endio_workers, new_pool_size); 1241 workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size); 1242 btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); 1243 btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); 1244 btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); 1245 } 1246 1247 static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, 1248 unsigned long long old_opts, int flags) 1249 { 1250 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1251 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || 1252 (flags & SB_RDONLY))) { 1253 /* wait for any defraggers to finish */ 1254 wait_event(fs_info->transaction_wait, 1255 (atomic_read(&fs_info->defrag_running) == 0)); 1256 if (flags & SB_RDONLY) 1257 sync_filesystem(fs_info->sb); 1258 } 1259 } 1260 1261 static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, 1262 unsigned long long old_opts) 1263 { 1264 const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); 1265 1266 /* 1267 * We need to cleanup all defragable inodes if the autodefragment is 1268 * close or the filesystem is read only. 1269 */ 1270 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1271 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) { 1272 btrfs_cleanup_defrag_inodes(fs_info); 1273 } 1274 1275 /* If we toggled discard async */ 1276 if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && 1277 btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1278 btrfs_discard_resume(fs_info); 1279 else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && 1280 !btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1281 btrfs_discard_cleanup(fs_info); 1282 1283 /* If we toggled space cache */ 1284 if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) 1285 btrfs_set_free_space_cache_v1_active(fs_info, cache_opt); 1286 } 1287 1288 static int btrfs_remount_rw(struct btrfs_fs_info *fs_info) 1289 { 1290 int ret; 1291 1292 if (BTRFS_FS_ERROR(fs_info)) { 1293 btrfs_err(fs_info, 1294 "remounting read-write after error is not allowed"); 1295 return -EINVAL; 1296 } 1297 1298 if (fs_info->fs_devices->rw_devices == 0) 1299 return -EACCES; 1300 1301 if (!btrfs_check_rw_degradable(fs_info, NULL)) { 1302 btrfs_warn(fs_info, 1303 "too many missing devices, writable remount is not allowed"); 1304 return -EACCES; 1305 } 1306 1307 if (btrfs_super_log_root(fs_info->super_copy) != 0) { 1308 btrfs_warn(fs_info, 1309 "mount required to replay tree-log, cannot remount read-write"); 1310 return -EINVAL; 1311 } 1312 1313 /* 1314 * NOTE: when remounting with a change that does writes, don't put it 1315 * anywhere above this point, as we are not sure to be safe to write 1316 * until we pass the above checks. 1317 */ 1318 ret = btrfs_start_pre_rw_mount(fs_info); 1319 if (ret) 1320 return ret; 1321 1322 btrfs_clear_sb_rdonly(fs_info->sb); 1323 1324 set_bit(BTRFS_FS_OPEN, &fs_info->flags); 1325 1326 /* 1327 * If we've gone from readonly -> read-write, we need to get our 1328 * sync/async discard lists in the right state. 1329 */ 1330 btrfs_discard_resume(fs_info); 1331 1332 return 0; 1333 } 1334 1335 static int btrfs_remount_ro(struct btrfs_fs_info *fs_info) 1336 { 1337 /* 1338 * This also happens on 'umount -rf' or on shutdown, when the 1339 * filesystem is busy. 1340 */ 1341 cancel_work_sync(&fs_info->async_reclaim_work); 1342 cancel_work_sync(&fs_info->async_data_reclaim_work); 1343 1344 btrfs_discard_cleanup(fs_info); 1345 1346 /* Wait for the uuid_scan task to finish */ 1347 down(&fs_info->uuid_tree_rescan_sem); 1348 /* Avoid complains from lockdep et al. */ 1349 up(&fs_info->uuid_tree_rescan_sem); 1350 1351 btrfs_set_sb_rdonly(fs_info->sb); 1352 1353 /* 1354 * Setting SB_RDONLY will put the cleaner thread to sleep at the next 1355 * loop if it's already active. If it's already asleep, we'll leave 1356 * unused block groups on disk until we're mounted read-write again 1357 * unless we clean them up here. 1358 */ 1359 btrfs_delete_unused_bgs(fs_info); 1360 1361 /* 1362 * The cleaner task could be already running before we set the flag 1363 * BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock). We must make 1364 * sure that after we finish the remount, i.e. after we call 1365 * btrfs_commit_super(), the cleaner can no longer start a transaction 1366 * - either because it was dropping a dead root, running delayed iputs 1367 * or deleting an unused block group (the cleaner picked a block 1368 * group from the list of unused block groups before we were able to 1369 * in the previous call to btrfs_delete_unused_bgs()). 1370 */ 1371 wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING, TASK_UNINTERRUPTIBLE); 1372 1373 /* 1374 * We've set the superblock to RO mode, so we might have made the 1375 * cleaner task sleep without running all pending delayed iputs. Go 1376 * through all the delayed iputs here, so that if an unmount happens 1377 * without remounting RW we don't end up at finishing close_ctree() 1378 * with a non-empty list of delayed iputs. 1379 */ 1380 btrfs_run_delayed_iputs(fs_info); 1381 1382 btrfs_dev_replace_suspend_for_unmount(fs_info); 1383 btrfs_scrub_cancel(fs_info); 1384 btrfs_pause_balance(fs_info); 1385 1386 /* 1387 * Pause the qgroup rescan worker if it is running. We don't want it to 1388 * be still running after we are in RO mode, as after that, by the time 1389 * we unmount, it might have left a transaction open, so we would leak 1390 * the transaction and/or crash. 1391 */ 1392 btrfs_qgroup_wait_for_completion(fs_info, false); 1393 1394 return btrfs_commit_super(fs_info); 1395 } 1396 1397 static void btrfs_ctx_to_info(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx) 1398 { 1399 fs_info->max_inline = ctx->max_inline; 1400 fs_info->commit_interval = ctx->commit_interval; 1401 fs_info->metadata_ratio = ctx->metadata_ratio; 1402 fs_info->thread_pool_size = ctx->thread_pool_size; 1403 fs_info->mount_opt = ctx->mount_opt; 1404 fs_info->compress_type = ctx->compress_type; 1405 fs_info->compress_level = ctx->compress_level; 1406 } 1407 1408 static void btrfs_info_to_ctx(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx) 1409 { 1410 ctx->max_inline = fs_info->max_inline; 1411 ctx->commit_interval = fs_info->commit_interval; 1412 ctx->metadata_ratio = fs_info->metadata_ratio; 1413 ctx->thread_pool_size = fs_info->thread_pool_size; 1414 ctx->mount_opt = fs_info->mount_opt; 1415 ctx->compress_type = fs_info->compress_type; 1416 ctx->compress_level = fs_info->compress_level; 1417 } 1418 1419 #define btrfs_info_if_set(fs_info, old_ctx, opt, fmt, args...) \ 1420 do { \ 1421 if ((!old_ctx || !btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \ 1422 btrfs_raw_test_opt(fs_info->mount_opt, opt)) \ 1423 btrfs_info(fs_info, fmt, ##args); \ 1424 } while (0) 1425 1426 #define btrfs_info_if_unset(fs_info, old_ctx, opt, fmt, args...) \ 1427 do { \ 1428 if ((old_ctx && btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \ 1429 !btrfs_raw_test_opt(fs_info->mount_opt, opt)) \ 1430 btrfs_info(fs_info, fmt, ##args); \ 1431 } while (0) 1432 1433 static void btrfs_emit_options(struct btrfs_fs_info *info, 1434 struct btrfs_fs_context *old) 1435 { 1436 btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum"); 1437 btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts"); 1438 btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum"); 1439 btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations"); 1440 btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme"); 1441 btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers"); 1442 btrfs_info_if_set(info, old, NOTREELOG, "disabling tree log"); 1443 btrfs_info_if_set(info, old, NOLOGREPLAY, "disabling log replay at mount time"); 1444 btrfs_info_if_set(info, old, FLUSHONCOMMIT, "turning on flush-on-commit"); 1445 btrfs_info_if_set(info, old, DISCARD_SYNC, "turning on sync discard"); 1446 btrfs_info_if_set(info, old, DISCARD_ASYNC, "turning on async discard"); 1447 btrfs_info_if_set(info, old, FREE_SPACE_TREE, "enabling free space tree"); 1448 btrfs_info_if_set(info, old, SPACE_CACHE, "enabling disk space caching"); 1449 btrfs_info_if_set(info, old, CLEAR_CACHE, "force clearing of disk cache"); 1450 btrfs_info_if_set(info, old, AUTO_DEFRAG, "enabling auto defrag"); 1451 btrfs_info_if_set(info, old, FRAGMENT_DATA, "fragmenting data"); 1452 btrfs_info_if_set(info, old, FRAGMENT_METADATA, "fragmenting metadata"); 1453 btrfs_info_if_set(info, old, REF_VERIFY, "doing ref verification"); 1454 btrfs_info_if_set(info, old, USEBACKUPROOT, "trying to use backup root at mount time"); 1455 btrfs_info_if_set(info, old, IGNOREBADROOTS, "ignoring bad roots"); 1456 btrfs_info_if_set(info, old, IGNOREDATACSUMS, "ignoring data csums"); 1457 btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums"); 1458 btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags"); 1459 1460 btrfs_info_if_unset(info, old, NODATACOW, "setting datacow"); 1461 btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations"); 1462 btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme"); 1463 btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers"); 1464 btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log"); 1465 btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching"); 1466 btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree"); 1467 btrfs_info_if_unset(info, old, AUTO_DEFRAG, "disabling auto defrag"); 1468 btrfs_info_if_unset(info, old, COMPRESS, "use no compression"); 1469 1470 /* Did the compression settings change? */ 1471 if (btrfs_test_opt(info, COMPRESS) && 1472 (!old || 1473 old->compress_type != info->compress_type || 1474 old->compress_level != info->compress_level || 1475 (!btrfs_raw_test_opt(old->mount_opt, FORCE_COMPRESS) && 1476 btrfs_raw_test_opt(info->mount_opt, FORCE_COMPRESS)))) { 1477 const char *compress_type = btrfs_compress_type2str(info->compress_type); 1478 1479 btrfs_info(info, "%s %s compression, level %d", 1480 btrfs_test_opt(info, FORCE_COMPRESS) ? "force" : "use", 1481 compress_type, info->compress_level); 1482 } 1483 1484 if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) 1485 btrfs_info(info, "max_inline set to %llu", info->max_inline); 1486 } 1487 1488 static int btrfs_reconfigure(struct fs_context *fc) 1489 { 1490 struct super_block *sb = fc->root->d_sb; 1491 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1492 struct btrfs_fs_context *ctx = fc->fs_private; 1493 struct btrfs_fs_context old_ctx; 1494 int ret = 0; 1495 bool mount_reconfigure = (fc->s_fs_info != NULL); 1496 1497 btrfs_info_to_ctx(fs_info, &old_ctx); 1498 1499 /* 1500 * This is our "bind mount" trick, we don't want to allow the user to do 1501 * anything other than mount a different ro/rw and a different subvol, 1502 * all of the mount options should be maintained. 1503 */ 1504 if (mount_reconfigure) 1505 ctx->mount_opt = old_ctx.mount_opt; 1506 1507 sync_filesystem(sb); 1508 set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1509 1510 if (!btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags)) 1511 return -EINVAL; 1512 1513 ret = btrfs_check_features(fs_info, !(fc->sb_flags & SB_RDONLY)); 1514 if (ret < 0) 1515 return ret; 1516 1517 btrfs_ctx_to_info(fs_info, ctx); 1518 btrfs_remount_begin(fs_info, old_ctx.mount_opt, fc->sb_flags); 1519 btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size, 1520 old_ctx.thread_pool_size); 1521 1522 if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) != 1523 (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 1524 (!sb_rdonly(sb) || (fc->sb_flags & SB_RDONLY))) { 1525 btrfs_warn(fs_info, 1526 "remount supports changing free space tree only from RO to RW"); 1527 /* Make sure free space cache options match the state on disk. */ 1528 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 1529 btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); 1530 btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE); 1531 } 1532 if (btrfs_free_space_cache_v1_active(fs_info)) { 1533 btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE); 1534 btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE); 1535 } 1536 } 1537 1538 ret = 0; 1539 if (!sb_rdonly(sb) && (fc->sb_flags & SB_RDONLY)) 1540 ret = btrfs_remount_ro(fs_info); 1541 else if (sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY)) 1542 ret = btrfs_remount_rw(fs_info); 1543 if (ret) 1544 goto restore; 1545 1546 /* 1547 * If we set the mask during the parameter parsing VFS would reject the 1548 * remount. Here we can set the mask and the value will be updated 1549 * appropriately. 1550 */ 1551 if ((fc->sb_flags & SB_POSIXACL) != (sb->s_flags & SB_POSIXACL)) 1552 fc->sb_flags_mask |= SB_POSIXACL; 1553 1554 btrfs_emit_options(fs_info, &old_ctx); 1555 wake_up_process(fs_info->transaction_kthread); 1556 btrfs_remount_cleanup(fs_info, old_ctx.mount_opt); 1557 btrfs_clear_oneshot_options(fs_info); 1558 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1559 1560 return 0; 1561 restore: 1562 btrfs_ctx_to_info(fs_info, &old_ctx); 1563 btrfs_remount_cleanup(fs_info, old_ctx.mount_opt); 1564 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1565 return ret; 1566 } 1567 1568 /* Used to sort the devices by max_avail(descending sort) */ 1569 static int btrfs_cmp_device_free_bytes(const void *a, const void *b) 1570 { 1571 const struct btrfs_device_info *dev_info1 = a; 1572 const struct btrfs_device_info *dev_info2 = b; 1573 1574 if (dev_info1->max_avail > dev_info2->max_avail) 1575 return -1; 1576 else if (dev_info1->max_avail < dev_info2->max_avail) 1577 return 1; 1578 return 0; 1579 } 1580 1581 /* 1582 * sort the devices by max_avail, in which max free extent size of each device 1583 * is stored.(Descending Sort) 1584 */ 1585 static inline void btrfs_descending_sort_devices( 1586 struct btrfs_device_info *devices, 1587 size_t nr_devices) 1588 { 1589 sort(devices, nr_devices, sizeof(struct btrfs_device_info), 1590 btrfs_cmp_device_free_bytes, NULL); 1591 } 1592 1593 /* 1594 * The helper to calc the free space on the devices that can be used to store 1595 * file data. 1596 */ 1597 static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, 1598 u64 *free_bytes) 1599 { 1600 struct btrfs_device_info *devices_info; 1601 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 1602 struct btrfs_device *device; 1603 u64 type; 1604 u64 avail_space; 1605 u64 min_stripe_size; 1606 int num_stripes = 1; 1607 int i = 0, nr_devices; 1608 const struct btrfs_raid_attr *rattr; 1609 1610 /* 1611 * We aren't under the device list lock, so this is racy-ish, but good 1612 * enough for our purposes. 1613 */ 1614 nr_devices = fs_info->fs_devices->open_devices; 1615 if (!nr_devices) { 1616 smp_mb(); 1617 nr_devices = fs_info->fs_devices->open_devices; 1618 ASSERT(nr_devices); 1619 if (!nr_devices) { 1620 *free_bytes = 0; 1621 return 0; 1622 } 1623 } 1624 1625 devices_info = kmalloc_array(nr_devices, sizeof(*devices_info), 1626 GFP_KERNEL); 1627 if (!devices_info) 1628 return -ENOMEM; 1629 1630 /* calc min stripe number for data space allocation */ 1631 type = btrfs_data_alloc_profile(fs_info); 1632 rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)]; 1633 1634 if (type & BTRFS_BLOCK_GROUP_RAID0) 1635 num_stripes = nr_devices; 1636 else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK) 1637 num_stripes = rattr->ncopies; 1638 else if (type & BTRFS_BLOCK_GROUP_RAID10) 1639 num_stripes = 4; 1640 1641 /* Adjust for more than 1 stripe per device */ 1642 min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN; 1643 1644 rcu_read_lock(); 1645 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 1646 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, 1647 &device->dev_state) || 1648 !device->bdev || 1649 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) 1650 continue; 1651 1652 if (i >= nr_devices) 1653 break; 1654 1655 avail_space = device->total_bytes - device->bytes_used; 1656 1657 /* align with stripe_len */ 1658 avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN); 1659 1660 /* 1661 * Ensure we have at least min_stripe_size on top of the 1662 * reserved space on the device. 1663 */ 1664 if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size) 1665 continue; 1666 1667 avail_space -= BTRFS_DEVICE_RANGE_RESERVED; 1668 1669 devices_info[i].dev = device; 1670 devices_info[i].max_avail = avail_space; 1671 1672 i++; 1673 } 1674 rcu_read_unlock(); 1675 1676 nr_devices = i; 1677 1678 btrfs_descending_sort_devices(devices_info, nr_devices); 1679 1680 i = nr_devices - 1; 1681 avail_space = 0; 1682 while (nr_devices >= rattr->devs_min) { 1683 num_stripes = min(num_stripes, nr_devices); 1684 1685 if (devices_info[i].max_avail >= min_stripe_size) { 1686 int j; 1687 u64 alloc_size; 1688 1689 avail_space += devices_info[i].max_avail * num_stripes; 1690 alloc_size = devices_info[i].max_avail; 1691 for (j = i + 1 - num_stripes; j <= i; j++) 1692 devices_info[j].max_avail -= alloc_size; 1693 } 1694 i--; 1695 nr_devices--; 1696 } 1697 1698 kfree(devices_info); 1699 *free_bytes = avail_space; 1700 return 0; 1701 } 1702 1703 /* 1704 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles. 1705 * 1706 * If there's a redundant raid level at DATA block groups, use the respective 1707 * multiplier to scale the sizes. 1708 * 1709 * Unused device space usage is based on simulating the chunk allocator 1710 * algorithm that respects the device sizes and order of allocations. This is 1711 * a close approximation of the actual use but there are other factors that may 1712 * change the result (like a new metadata chunk). 1713 * 1714 * If metadata is exhausted, f_bavail will be 0. 1715 */ 1716 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) 1717 { 1718 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 1719 struct btrfs_super_block *disk_super = fs_info->super_copy; 1720 struct btrfs_space_info *found; 1721 u64 total_used = 0; 1722 u64 total_free_data = 0; 1723 u64 total_free_meta = 0; 1724 u32 bits = fs_info->sectorsize_bits; 1725 __be32 *fsid = (__be32 *)fs_info->fs_devices->fsid; 1726 unsigned factor = 1; 1727 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; 1728 int ret; 1729 u64 thresh = 0; 1730 int mixed = 0; 1731 1732 list_for_each_entry(found, &fs_info->space_info, list) { 1733 if (found->flags & BTRFS_BLOCK_GROUP_DATA) { 1734 int i; 1735 1736 total_free_data += found->disk_total - found->disk_used; 1737 total_free_data -= 1738 btrfs_account_ro_block_groups_free_space(found); 1739 1740 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1741 if (!list_empty(&found->block_groups[i])) 1742 factor = btrfs_bg_type_to_factor( 1743 btrfs_raid_array[i].bg_flag); 1744 } 1745 } 1746 1747 /* 1748 * Metadata in mixed block group profiles are accounted in data 1749 */ 1750 if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) { 1751 if (found->flags & BTRFS_BLOCK_GROUP_DATA) 1752 mixed = 1; 1753 else 1754 total_free_meta += found->disk_total - 1755 found->disk_used; 1756 } 1757 1758 total_used += found->disk_used; 1759 } 1760 1761 buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor); 1762 buf->f_blocks >>= bits; 1763 buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits); 1764 1765 /* Account global block reserve as used, it's in logical size already */ 1766 spin_lock(&block_rsv->lock); 1767 /* Mixed block groups accounting is not byte-accurate, avoid overflow */ 1768 if (buf->f_bfree >= block_rsv->size >> bits) 1769 buf->f_bfree -= block_rsv->size >> bits; 1770 else 1771 buf->f_bfree = 0; 1772 spin_unlock(&block_rsv->lock); 1773 1774 buf->f_bavail = div_u64(total_free_data, factor); 1775 ret = btrfs_calc_avail_data_space(fs_info, &total_free_data); 1776 if (ret) 1777 return ret; 1778 buf->f_bavail += div_u64(total_free_data, factor); 1779 buf->f_bavail = buf->f_bavail >> bits; 1780 1781 /* 1782 * We calculate the remaining metadata space minus global reserve. If 1783 * this is (supposedly) smaller than zero, there's no space. But this 1784 * does not hold in practice, the exhausted state happens where's still 1785 * some positive delta. So we apply some guesswork and compare the 1786 * delta to a 4M threshold. (Practically observed delta was ~2M.) 1787 * 1788 * We probably cannot calculate the exact threshold value because this 1789 * depends on the internal reservations requested by various 1790 * operations, so some operations that consume a few metadata will 1791 * succeed even if the Avail is zero. But this is better than the other 1792 * way around. 1793 */ 1794 thresh = SZ_4M; 1795 1796 /* 1797 * We only want to claim there's no available space if we can no longer 1798 * allocate chunks for our metadata profile and our global reserve will 1799 * not fit in the free metadata space. If we aren't ->full then we 1800 * still can allocate chunks and thus are fine using the currently 1801 * calculated f_bavail. 1802 */ 1803 if (!mixed && block_rsv->space_info->full && 1804 (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size)) 1805 buf->f_bavail = 0; 1806 1807 buf->f_type = BTRFS_SUPER_MAGIC; 1808 buf->f_bsize = fs_info->sectorsize; 1809 buf->f_namelen = BTRFS_NAME_LEN; 1810 1811 /* We treat it as constant endianness (it doesn't matter _which_) 1812 because we want the fsid to come out the same whether mounted 1813 on a big-endian or little-endian host */ 1814 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); 1815 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); 1816 /* Mask in the root object ID too, to disambiguate subvols */ 1817 buf->f_fsid.val[0] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root) >> 32; 1818 buf->f_fsid.val[1] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root); 1819 1820 return 0; 1821 } 1822 1823 static int btrfs_fc_test_super(struct super_block *sb, struct fs_context *fc) 1824 { 1825 struct btrfs_fs_info *p = fc->s_fs_info; 1826 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1827 1828 return fs_info->fs_devices == p->fs_devices; 1829 } 1830 1831 static int btrfs_get_tree_super(struct fs_context *fc) 1832 { 1833 struct btrfs_fs_info *fs_info = fc->s_fs_info; 1834 struct btrfs_fs_context *ctx = fc->fs_private; 1835 struct btrfs_fs_devices *fs_devices = NULL; 1836 struct block_device *bdev; 1837 struct btrfs_device *device; 1838 struct super_block *sb; 1839 blk_mode_t mode = btrfs_open_mode(fc); 1840 int ret; 1841 1842 btrfs_ctx_to_info(fs_info, ctx); 1843 mutex_lock(&uuid_mutex); 1844 1845 /* 1846 * With 'true' passed to btrfs_scan_one_device() (mount time) we expect 1847 * either a valid device or an error. 1848 */ 1849 device = btrfs_scan_one_device(fc->source, mode, true); 1850 ASSERT(device != NULL); 1851 if (IS_ERR(device)) { 1852 mutex_unlock(&uuid_mutex); 1853 return PTR_ERR(device); 1854 } 1855 1856 fs_devices = device->fs_devices; 1857 fs_info->fs_devices = fs_devices; 1858 1859 ret = btrfs_open_devices(fs_devices, mode, &btrfs_fs_type); 1860 mutex_unlock(&uuid_mutex); 1861 if (ret) 1862 return ret; 1863 1864 if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) { 1865 ret = -EACCES; 1866 goto error; 1867 } 1868 1869 bdev = fs_devices->latest_dev->bdev; 1870 1871 /* 1872 * From now on the error handling is not straightforward. 1873 * 1874 * If successful, this will transfer the fs_info into the super block, 1875 * and fc->s_fs_info will be NULL. However if there's an existing 1876 * super, we'll still have fc->s_fs_info populated. If we error 1877 * completely out it'll be cleaned up when we drop the fs_context, 1878 * otherwise it's tied to the lifetime of the super_block. 1879 */ 1880 sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc); 1881 if (IS_ERR(sb)) { 1882 ret = PTR_ERR(sb); 1883 goto error; 1884 } 1885 1886 set_device_specific_options(fs_info); 1887 1888 if (sb->s_root) { 1889 btrfs_close_devices(fs_devices); 1890 if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY) 1891 ret = -EBUSY; 1892 } else { 1893 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); 1894 shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id); 1895 btrfs_sb(sb)->bdev_holder = &btrfs_fs_type; 1896 ret = btrfs_fill_super(sb, fs_devices, NULL); 1897 } 1898 1899 if (ret) { 1900 deactivate_locked_super(sb); 1901 return ret; 1902 } 1903 1904 btrfs_clear_oneshot_options(fs_info); 1905 1906 fc->root = dget(sb->s_root); 1907 return 0; 1908 1909 error: 1910 btrfs_close_devices(fs_devices); 1911 return ret; 1912 } 1913 1914 /* 1915 * Ever since commit 0723a0473fb4 ("btrfs: allow mounting btrfs subvolumes 1916 * with different ro/rw options") the following works: 1917 * 1918 * (i) mount /dev/sda3 -o subvol=foo,ro /mnt/foo 1919 * (ii) mount /dev/sda3 -o subvol=bar,rw /mnt/bar 1920 * 1921 * which looks nice and innocent but is actually pretty intricate and deserves 1922 * a long comment. 1923 * 1924 * On another filesystem a subvolume mount is close to something like: 1925 * 1926 * (iii) # create rw superblock + initial mount 1927 * mount -t xfs /dev/sdb /opt/ 1928 * 1929 * # create ro bind mount 1930 * mount --bind -o ro /opt/foo /mnt/foo 1931 * 1932 * # unmount initial mount 1933 * umount /opt 1934 * 1935 * Of course, there's some special subvolume sauce and there's the fact that the 1936 * sb->s_root dentry is really swapped after mount_subtree(). But conceptually 1937 * it's very close and will help us understand the issue. 1938 * 1939 * The old mount API didn't cleanly distinguish between a mount being made ro 1940 * and a superblock being made ro. The only way to change the ro state of 1941 * either object was by passing ms_rdonly. If a new mount was created via 1942 * mount(2) such as: 1943 * 1944 * mount("/dev/sdb", "/mnt", "xfs", ms_rdonly, null); 1945 * 1946 * the MS_RDONLY flag being specified had two effects: 1947 * 1948 * (1) MNT_READONLY was raised -> the resulting mount got 1949 * @mnt->mnt_flags |= MNT_READONLY raised. 1950 * 1951 * (2) MS_RDONLY was passed to the filesystem's mount method and the filesystems 1952 * made the superblock ro. Note, how SB_RDONLY has the same value as 1953 * ms_rdonly and is raised whenever MS_RDONLY is passed through mount(2). 1954 * 1955 * Creating a subtree mount via (iii) ends up leaving a rw superblock with a 1956 * subtree mounted ro. 1957 * 1958 * But consider the effect on the old mount API on btrfs subvolume mounting 1959 * which combines the distinct step in (iii) into a single step. 1960 * 1961 * By issuing (i) both the mount and the superblock are turned ro. Now when (ii) 1962 * is issued the superblock is ro and thus even if the mount created for (ii) is 1963 * rw it wouldn't help. Hence, btrfs needed to transition the superblock from ro 1964 * to rw for (ii) which it did using an internal remount call. 1965 * 1966 * IOW, subvolume mounting was inherently complicated due to the ambiguity of 1967 * MS_RDONLY in mount(2). Note, this ambiguity has mount(8) always translate 1968 * "ro" to MS_RDONLY. IOW, in both (i) and (ii) "ro" becomes MS_RDONLY when 1969 * passed by mount(8) to mount(2). 1970 * 1971 * Enter the new mount API. The new mount API disambiguates making a mount ro 1972 * and making a superblock ro. 1973 * 1974 * (3) To turn a mount ro the MOUNT_ATTR_ONLY flag can be used with either 1975 * fsmount() or mount_setattr() this is a pure VFS level change for a 1976 * specific mount or mount tree that is never seen by the filesystem itself. 1977 * 1978 * (4) To turn a superblock ro the "ro" flag must be used with 1979 * fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem 1980 * in fc->sb_flags. 1981 * 1982 * But, currently the util-linux mount command already utilizes the new mount 1983 * API and is still setting fsconfig(FSCONFIG_SET_FLAG, "ro") no matter if it's 1984 * btrfs or not, setting the whole super block RO. To make per-subvolume mounting 1985 * work with different options work we need to keep backward compatibility. 1986 */ 1987 static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc) 1988 { 1989 struct vfsmount *mnt; 1990 int ret; 1991 const bool ro2rw = !(fc->sb_flags & SB_RDONLY); 1992 1993 /* 1994 * We got an EBUSY because our SB_RDONLY flag didn't match the existing 1995 * super block, so invert our setting here and retry the mount so we 1996 * can get our vfsmount. 1997 */ 1998 if (ro2rw) 1999 fc->sb_flags |= SB_RDONLY; 2000 else 2001 fc->sb_flags &= ~SB_RDONLY; 2002 2003 mnt = fc_mount(fc); 2004 if (IS_ERR(mnt)) 2005 return mnt; 2006 2007 if (!ro2rw) 2008 return mnt; 2009 2010 /* We need to convert to rw, call reconfigure. */ 2011 fc->sb_flags &= ~SB_RDONLY; 2012 down_write(&mnt->mnt_sb->s_umount); 2013 ret = btrfs_reconfigure(fc); 2014 up_write(&mnt->mnt_sb->s_umount); 2015 if (ret) { 2016 mntput(mnt); 2017 return ERR_PTR(ret); 2018 } 2019 return mnt; 2020 } 2021 2022 static int btrfs_get_tree_subvol(struct fs_context *fc) 2023 { 2024 struct btrfs_fs_info *fs_info = NULL; 2025 struct btrfs_fs_context *ctx = fc->fs_private; 2026 struct fs_context *dup_fc; 2027 struct dentry *dentry; 2028 struct vfsmount *mnt; 2029 2030 /* 2031 * Setup a dummy root and fs_info for test/set super. This is because 2032 * we don't actually fill this stuff out until open_ctree, but we need 2033 * then open_ctree will properly initialize the file system specific 2034 * settings later. btrfs_init_fs_info initializes the static elements 2035 * of the fs_info (locks and such) to make cleanup easier if we find a 2036 * superblock with our given fs_devices later on at sget() time. 2037 */ 2038 fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); 2039 if (!fs_info) 2040 return -ENOMEM; 2041 2042 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); 2043 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); 2044 if (!fs_info->super_copy || !fs_info->super_for_commit) { 2045 btrfs_free_fs_info(fs_info); 2046 return -ENOMEM; 2047 } 2048 btrfs_init_fs_info(fs_info); 2049 2050 dup_fc = vfs_dup_fs_context(fc); 2051 if (IS_ERR(dup_fc)) { 2052 btrfs_free_fs_info(fs_info); 2053 return PTR_ERR(dup_fc); 2054 } 2055 2056 /* 2057 * When we do the sget_fc this gets transferred to the sb, so we only 2058 * need to set it on the dup_fc as this is what creates the super block. 2059 */ 2060 dup_fc->s_fs_info = fs_info; 2061 2062 /* 2063 * We'll do the security settings in our btrfs_get_tree_super() mount 2064 * loop, they were duplicated into dup_fc, we can drop the originals 2065 * here. 2066 */ 2067 security_free_mnt_opts(&fc->security); 2068 fc->security = NULL; 2069 2070 mnt = fc_mount(dup_fc); 2071 if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) 2072 mnt = btrfs_reconfigure_for_mount(dup_fc); 2073 put_fs_context(dup_fc); 2074 if (IS_ERR(mnt)) 2075 return PTR_ERR(mnt); 2076 2077 /* 2078 * This free's ->subvol_name, because if it isn't set we have to 2079 * allocate a buffer to hold the subvol_name, so we just drop our 2080 * reference to it here. 2081 */ 2082 dentry = mount_subvol(ctx->subvol_name, ctx->subvol_objectid, mnt); 2083 ctx->subvol_name = NULL; 2084 if (IS_ERR(dentry)) 2085 return PTR_ERR(dentry); 2086 2087 fc->root = dentry; 2088 return 0; 2089 } 2090 2091 static int btrfs_get_tree(struct fs_context *fc) 2092 { 2093 /* 2094 * Since we use mount_subtree to mount the default/specified subvol, we 2095 * have to do mounts in two steps. 2096 * 2097 * First pass through we call btrfs_get_tree_subvol(), this is just a 2098 * wrapper around fc_mount() to call back into here again, and this time 2099 * we'll call btrfs_get_tree_super(). This will do the open_ctree() and 2100 * everything to open the devices and file system. Then we return back 2101 * with a fully constructed vfsmount in btrfs_get_tree_subvol(), and 2102 * from there we can do our mount_subvol() call, which will lookup 2103 * whichever subvol we're mounting and setup this fc with the 2104 * appropriate dentry for the subvol. 2105 */ 2106 if (fc->s_fs_info) 2107 return btrfs_get_tree_super(fc); 2108 return btrfs_get_tree_subvol(fc); 2109 } 2110 2111 static void btrfs_kill_super(struct super_block *sb) 2112 { 2113 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2114 kill_anon_super(sb); 2115 btrfs_free_fs_info(fs_info); 2116 } 2117 2118 static void btrfs_free_fs_context(struct fs_context *fc) 2119 { 2120 struct btrfs_fs_context *ctx = fc->fs_private; 2121 struct btrfs_fs_info *fs_info = fc->s_fs_info; 2122 2123 if (fs_info) 2124 btrfs_free_fs_info(fs_info); 2125 2126 if (ctx && refcount_dec_and_test(&ctx->refs)) { 2127 kfree(ctx->subvol_name); 2128 kfree(ctx); 2129 } 2130 } 2131 2132 static int btrfs_dup_fs_context(struct fs_context *fc, struct fs_context *src_fc) 2133 { 2134 struct btrfs_fs_context *ctx = src_fc->fs_private; 2135 2136 /* 2137 * Give a ref to our ctx to this dup, as we want to keep it around for 2138 * our original fc so we can have the subvolume name or objectid. 2139 * 2140 * We unset ->source in the original fc because the dup needs it for 2141 * mounting, and then once we free the dup it'll free ->source, so we 2142 * need to make sure we're only pointing to it in one fc. 2143 */ 2144 refcount_inc(&ctx->refs); 2145 fc->fs_private = ctx; 2146 fc->source = src_fc->source; 2147 src_fc->source = NULL; 2148 return 0; 2149 } 2150 2151 static const struct fs_context_operations btrfs_fs_context_ops = { 2152 .parse_param = btrfs_parse_param, 2153 .reconfigure = btrfs_reconfigure, 2154 .get_tree = btrfs_get_tree, 2155 .dup = btrfs_dup_fs_context, 2156 .free = btrfs_free_fs_context, 2157 }; 2158 2159 static int btrfs_init_fs_context(struct fs_context *fc) 2160 { 2161 struct btrfs_fs_context *ctx; 2162 2163 ctx = kzalloc(sizeof(struct btrfs_fs_context), GFP_KERNEL); 2164 if (!ctx) 2165 return -ENOMEM; 2166 2167 refcount_set(&ctx->refs, 1); 2168 fc->fs_private = ctx; 2169 fc->ops = &btrfs_fs_context_ops; 2170 2171 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 2172 btrfs_info_to_ctx(btrfs_sb(fc->root->d_sb), ctx); 2173 } else { 2174 ctx->thread_pool_size = 2175 min_t(unsigned long, num_online_cpus() + 2, 8); 2176 ctx->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2177 ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 2178 } 2179 2180 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 2181 fc->sb_flags |= SB_POSIXACL; 2182 #endif 2183 fc->sb_flags |= SB_I_VERSION; 2184 2185 return 0; 2186 } 2187 2188 static struct file_system_type btrfs_fs_type = { 2189 .owner = THIS_MODULE, 2190 .name = "btrfs", 2191 .init_fs_context = btrfs_init_fs_context, 2192 .parameters = btrfs_fs_parameters, 2193 .kill_sb = btrfs_kill_super, 2194 .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | 2195 FS_ALLOW_IDMAP | FS_MGTIME, 2196 }; 2197 2198 MODULE_ALIAS_FS("btrfs"); 2199 2200 static int btrfs_control_open(struct inode *inode, struct file *file) 2201 { 2202 /* 2203 * The control file's private_data is used to hold the 2204 * transaction when it is started and is used to keep 2205 * track of whether a transaction is already in progress. 2206 */ 2207 file->private_data = NULL; 2208 return 0; 2209 } 2210 2211 /* 2212 * Used by /dev/btrfs-control for devices ioctls. 2213 */ 2214 static long btrfs_control_ioctl(struct file *file, unsigned int cmd, 2215 unsigned long arg) 2216 { 2217 struct btrfs_ioctl_vol_args *vol; 2218 struct btrfs_device *device = NULL; 2219 dev_t devt = 0; 2220 int ret = -ENOTTY; 2221 2222 if (!capable(CAP_SYS_ADMIN)) 2223 return -EPERM; 2224 2225 vol = memdup_user((void __user *)arg, sizeof(*vol)); 2226 if (IS_ERR(vol)) 2227 return PTR_ERR(vol); 2228 ret = btrfs_check_ioctl_vol_args_path(vol); 2229 if (ret < 0) 2230 goto out; 2231 2232 switch (cmd) { 2233 case BTRFS_IOC_SCAN_DEV: 2234 mutex_lock(&uuid_mutex); 2235 /* 2236 * Scanning outside of mount can return NULL which would turn 2237 * into 0 error code. 2238 */ 2239 device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false); 2240 ret = PTR_ERR_OR_ZERO(device); 2241 mutex_unlock(&uuid_mutex); 2242 break; 2243 case BTRFS_IOC_FORGET_DEV: 2244 if (vol->name[0] != 0) { 2245 ret = lookup_bdev(vol->name, &devt); 2246 if (ret) 2247 break; 2248 } 2249 ret = btrfs_forget_devices(devt); 2250 break; 2251 case BTRFS_IOC_DEVICES_READY: 2252 mutex_lock(&uuid_mutex); 2253 /* 2254 * Scanning outside of mount can return NULL which would turn 2255 * into 0 error code. 2256 */ 2257 device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false); 2258 if (IS_ERR_OR_NULL(device)) { 2259 mutex_unlock(&uuid_mutex); 2260 ret = PTR_ERR(device); 2261 break; 2262 } 2263 ret = !(device->fs_devices->num_devices == 2264 device->fs_devices->total_devices); 2265 mutex_unlock(&uuid_mutex); 2266 break; 2267 case BTRFS_IOC_GET_SUPPORTED_FEATURES: 2268 ret = btrfs_ioctl_get_supported_features((void __user*)arg); 2269 break; 2270 } 2271 2272 out: 2273 kfree(vol); 2274 return ret; 2275 } 2276 2277 static int btrfs_freeze(struct super_block *sb) 2278 { 2279 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2280 2281 set_bit(BTRFS_FS_FROZEN, &fs_info->flags); 2282 /* 2283 * We don't need a barrier here, we'll wait for any transaction that 2284 * could be in progress on other threads (and do delayed iputs that 2285 * we want to avoid on a frozen filesystem), or do the commit 2286 * ourselves. 2287 */ 2288 return btrfs_commit_current_transaction(fs_info->tree_root); 2289 } 2290 2291 static int check_dev_super(struct btrfs_device *dev) 2292 { 2293 struct btrfs_fs_info *fs_info = dev->fs_info; 2294 struct btrfs_super_block *sb; 2295 u64 last_trans; 2296 u16 csum_type; 2297 int ret = 0; 2298 2299 /* This should be called with fs still frozen. */ 2300 ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags)); 2301 2302 /* Missing dev, no need to check. */ 2303 if (!dev->bdev) 2304 return 0; 2305 2306 /* Only need to check the primary super block. */ 2307 sb = btrfs_read_dev_one_super(dev->bdev, 0, true); 2308 if (IS_ERR(sb)) 2309 return PTR_ERR(sb); 2310 2311 /* Verify the checksum. */ 2312 csum_type = btrfs_super_csum_type(sb); 2313 if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) { 2314 btrfs_err(fs_info, "csum type changed, has %u expect %u", 2315 csum_type, btrfs_super_csum_type(fs_info->super_copy)); 2316 ret = -EUCLEAN; 2317 goto out; 2318 } 2319 2320 if (btrfs_check_super_csum(fs_info, sb)) { 2321 btrfs_err(fs_info, "csum for on-disk super block no longer matches"); 2322 ret = -EUCLEAN; 2323 goto out; 2324 } 2325 2326 /* Btrfs_validate_super() includes fsid check against super->fsid. */ 2327 ret = btrfs_validate_super(fs_info, sb, 0); 2328 if (ret < 0) 2329 goto out; 2330 2331 last_trans = btrfs_get_last_trans_committed(fs_info); 2332 if (btrfs_super_generation(sb) != last_trans) { 2333 btrfs_err(fs_info, "transid mismatch, has %llu expect %llu", 2334 btrfs_super_generation(sb), last_trans); 2335 ret = -EUCLEAN; 2336 goto out; 2337 } 2338 out: 2339 btrfs_release_disk_super(sb); 2340 return ret; 2341 } 2342 2343 static int btrfs_unfreeze(struct super_block *sb) 2344 { 2345 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2346 struct btrfs_device *device; 2347 int ret = 0; 2348 2349 /* 2350 * Make sure the fs is not changed by accident (like hibernation then 2351 * modified by other OS). 2352 * If we found anything wrong, we mark the fs error immediately. 2353 * 2354 * And since the fs is frozen, no one can modify the fs yet, thus 2355 * we don't need to hold device_list_mutex. 2356 */ 2357 list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { 2358 ret = check_dev_super(device); 2359 if (ret < 0) { 2360 btrfs_handle_fs_error(fs_info, ret, 2361 "super block on devid %llu got modified unexpectedly", 2362 device->devid); 2363 break; 2364 } 2365 } 2366 clear_bit(BTRFS_FS_FROZEN, &fs_info->flags); 2367 2368 /* 2369 * We still return 0, to allow VFS layer to unfreeze the fs even the 2370 * above checks failed. Since the fs is either fine or read-only, we're 2371 * safe to continue, without causing further damage. 2372 */ 2373 return 0; 2374 } 2375 2376 static int btrfs_show_devname(struct seq_file *m, struct dentry *root) 2377 { 2378 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); 2379 2380 /* 2381 * There should be always a valid pointer in latest_dev, it may be stale 2382 * for a short moment in case it's being deleted but still valid until 2383 * the end of RCU grace period. 2384 */ 2385 rcu_read_lock(); 2386 seq_escape(m, btrfs_dev_name(fs_info->fs_devices->latest_dev), " \t\n\\"); 2387 rcu_read_unlock(); 2388 2389 return 0; 2390 } 2391 2392 static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_control *sc) 2393 { 2394 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2395 const s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); 2396 2397 trace_btrfs_extent_map_shrinker_count(fs_info, nr); 2398 2399 /* 2400 * Only report the real number for DEBUG builds, as there are reports of 2401 * serious performance degradation caused by too frequent shrinks. 2402 */ 2403 if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) 2404 return nr; 2405 return 0; 2406 } 2407 2408 static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc) 2409 { 2410 const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan); 2411 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2412 2413 /* 2414 * We may be called from any task trying to allocate memory and we don't 2415 * want to slow it down with scanning and dropping extent maps. It would 2416 * also cause heavy lock contention if many tasks concurrently enter 2417 * here. Therefore only allow kswapd tasks to scan and drop extent maps. 2418 */ 2419 if (!current_is_kswapd()) 2420 return 0; 2421 2422 return btrfs_free_extent_maps(fs_info, nr_to_scan); 2423 } 2424 2425 static const struct super_operations btrfs_super_ops = { 2426 .drop_inode = btrfs_drop_inode, 2427 .evict_inode = btrfs_evict_inode, 2428 .put_super = btrfs_put_super, 2429 .sync_fs = btrfs_sync_fs, 2430 .show_options = btrfs_show_options, 2431 .show_devname = btrfs_show_devname, 2432 .alloc_inode = btrfs_alloc_inode, 2433 .destroy_inode = btrfs_destroy_inode, 2434 .free_inode = btrfs_free_inode, 2435 .statfs = btrfs_statfs, 2436 .freeze_fs = btrfs_freeze, 2437 .unfreeze_fs = btrfs_unfreeze, 2438 .nr_cached_objects = btrfs_nr_cached_objects, 2439 .free_cached_objects = btrfs_free_cached_objects, 2440 }; 2441 2442 static const struct file_operations btrfs_ctl_fops = { 2443 .open = btrfs_control_open, 2444 .unlocked_ioctl = btrfs_control_ioctl, 2445 .compat_ioctl = compat_ptr_ioctl, 2446 .owner = THIS_MODULE, 2447 .llseek = noop_llseek, 2448 }; 2449 2450 static struct miscdevice btrfs_misc = { 2451 .minor = BTRFS_MINOR, 2452 .name = "btrfs-control", 2453 .fops = &btrfs_ctl_fops 2454 }; 2455 2456 MODULE_ALIAS_MISCDEV(BTRFS_MINOR); 2457 MODULE_ALIAS("devname:btrfs-control"); 2458 2459 static int __init btrfs_interface_init(void) 2460 { 2461 return misc_register(&btrfs_misc); 2462 } 2463 2464 static __cold void btrfs_interface_exit(void) 2465 { 2466 misc_deregister(&btrfs_misc); 2467 } 2468 2469 static int __init btrfs_print_mod_info(void) 2470 { 2471 static const char options[] = "" 2472 #ifdef CONFIG_BTRFS_DEBUG 2473 ", debug=on" 2474 #endif 2475 #ifdef CONFIG_BTRFS_ASSERT 2476 ", assert=on" 2477 #endif 2478 #ifdef CONFIG_BTRFS_FS_REF_VERIFY 2479 ", ref-verify=on" 2480 #endif 2481 #ifdef CONFIG_BLK_DEV_ZONED 2482 ", zoned=yes" 2483 #else 2484 ", zoned=no" 2485 #endif 2486 #ifdef CONFIG_FS_VERITY 2487 ", fsverity=yes" 2488 #else 2489 ", fsverity=no" 2490 #endif 2491 ; 2492 pr_info("Btrfs loaded%s\n", options); 2493 return 0; 2494 } 2495 2496 static int register_btrfs(void) 2497 { 2498 return register_filesystem(&btrfs_fs_type); 2499 } 2500 2501 static void unregister_btrfs(void) 2502 { 2503 unregister_filesystem(&btrfs_fs_type); 2504 } 2505 2506 /* Helper structure for long init/exit functions. */ 2507 struct init_sequence { 2508 int (*init_func)(void); 2509 /* Can be NULL if the init_func doesn't need cleanup. */ 2510 void (*exit_func)(void); 2511 }; 2512 2513 static const struct init_sequence mod_init_seq[] = { 2514 { 2515 .init_func = btrfs_props_init, 2516 .exit_func = NULL, 2517 }, { 2518 .init_func = btrfs_init_sysfs, 2519 .exit_func = btrfs_exit_sysfs, 2520 }, { 2521 .init_func = btrfs_init_compress, 2522 .exit_func = btrfs_exit_compress, 2523 }, { 2524 .init_func = btrfs_init_cachep, 2525 .exit_func = btrfs_destroy_cachep, 2526 }, { 2527 .init_func = btrfs_init_dio, 2528 .exit_func = btrfs_destroy_dio, 2529 }, { 2530 .init_func = btrfs_transaction_init, 2531 .exit_func = btrfs_transaction_exit, 2532 }, { 2533 .init_func = btrfs_ctree_init, 2534 .exit_func = btrfs_ctree_exit, 2535 }, { 2536 .init_func = btrfs_free_space_init, 2537 .exit_func = btrfs_free_space_exit, 2538 }, { 2539 .init_func = extent_state_init_cachep, 2540 .exit_func = extent_state_free_cachep, 2541 }, { 2542 .init_func = extent_buffer_init_cachep, 2543 .exit_func = extent_buffer_free_cachep, 2544 }, { 2545 .init_func = btrfs_bioset_init, 2546 .exit_func = btrfs_bioset_exit, 2547 }, { 2548 .init_func = extent_map_init, 2549 .exit_func = extent_map_exit, 2550 }, { 2551 .init_func = ordered_data_init, 2552 .exit_func = ordered_data_exit, 2553 }, { 2554 .init_func = btrfs_delayed_inode_init, 2555 .exit_func = btrfs_delayed_inode_exit, 2556 }, { 2557 .init_func = btrfs_auto_defrag_init, 2558 .exit_func = btrfs_auto_defrag_exit, 2559 }, { 2560 .init_func = btrfs_delayed_ref_init, 2561 .exit_func = btrfs_delayed_ref_exit, 2562 }, { 2563 .init_func = btrfs_prelim_ref_init, 2564 .exit_func = btrfs_prelim_ref_exit, 2565 }, { 2566 .init_func = btrfs_interface_init, 2567 .exit_func = btrfs_interface_exit, 2568 }, { 2569 .init_func = btrfs_print_mod_info, 2570 .exit_func = NULL, 2571 }, { 2572 .init_func = btrfs_run_sanity_tests, 2573 .exit_func = NULL, 2574 }, { 2575 .init_func = register_btrfs, 2576 .exit_func = unregister_btrfs, 2577 } 2578 }; 2579 2580 static bool mod_init_result[ARRAY_SIZE(mod_init_seq)]; 2581 2582 static __always_inline void btrfs_exit_btrfs_fs(void) 2583 { 2584 int i; 2585 2586 for (i = ARRAY_SIZE(mod_init_seq) - 1; i >= 0; i--) { 2587 if (!mod_init_result[i]) 2588 continue; 2589 if (mod_init_seq[i].exit_func) 2590 mod_init_seq[i].exit_func(); 2591 mod_init_result[i] = false; 2592 } 2593 } 2594 2595 static void __exit exit_btrfs_fs(void) 2596 { 2597 btrfs_exit_btrfs_fs(); 2598 btrfs_cleanup_fs_uuids(); 2599 } 2600 2601 static int __init init_btrfs_fs(void) 2602 { 2603 int ret; 2604 int i; 2605 2606 for (i = 0; i < ARRAY_SIZE(mod_init_seq); i++) { 2607 ASSERT(!mod_init_result[i]); 2608 ret = mod_init_seq[i].init_func(); 2609 if (ret < 0) { 2610 btrfs_exit_btrfs_fs(); 2611 return ret; 2612 } 2613 mod_init_result[i] = true; 2614 } 2615 return 0; 2616 } 2617 2618 late_initcall(init_btrfs_fs); 2619 module_exit(exit_btrfs_fs) 2620 2621 MODULE_DESCRIPTION("B-Tree File System (BTRFS)"); 2622 MODULE_LICENSE("GPL"); 2623 MODULE_SOFTDEP("pre: crc32c"); 2624 MODULE_SOFTDEP("pre: xxhash64"); 2625 MODULE_SOFTDEP("pre: sha256"); 2626 MODULE_SOFTDEP("pre: blake2b-256"); 2627