1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/blkdev.h> 20 #include <linux/module.h> 21 #include <linux/buffer_head.h> 22 #include <linux/fs.h> 23 #include <linux/pagemap.h> 24 #include <linux/highmem.h> 25 #include <linux/time.h> 26 #include <linux/init.h> 27 #include <linux/seq_file.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mount.h> 31 #include <linux/mpage.h> 32 #include <linux/swap.h> 33 #include <linux/writeback.h> 34 #include <linux/statfs.h> 35 #include <linux/compat.h> 36 #include <linux/parser.h> 37 #include <linux/ctype.h> 38 #include <linux/namei.h> 39 #include <linux/miscdevice.h> 40 #include <linux/magic.h> 41 #include <linux/slab.h> 42 #include <linux/cleancache.h> 43 #include <linux/ratelimit.h> 44 #include <linux/btrfs.h> 45 #include "delayed-inode.h" 46 #include "ctree.h" 47 #include "disk-io.h" 48 #include "transaction.h" 49 #include "btrfs_inode.h" 50 #include "print-tree.h" 51 #include "hash.h" 52 #include "props.h" 53 #include "xattr.h" 54 #include "volumes.h" 55 #include "export.h" 56 #include "compression.h" 57 #include "rcu-string.h" 58 #include "dev-replace.h" 59 #include "free-space-cache.h" 60 #include "backref.h" 61 #include "tests/btrfs-tests.h" 62 63 #include "qgroup.h" 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/btrfs.h> 66 67 static const struct super_operations btrfs_super_ops; 68 static struct file_system_type btrfs_fs_type; 69 70 static int btrfs_remount(struct super_block *sb, int *flags, char *data); 71 72 static const char *btrfs_decode_error(int errno) 73 { 74 char *errstr = "unknown"; 75 76 switch (errno) { 77 case -EIO: 78 errstr = "IO failure"; 79 break; 80 case -ENOMEM: 81 errstr = "Out of memory"; 82 break; 83 case -EROFS: 84 errstr = "Readonly filesystem"; 85 break; 86 case -EEXIST: 87 errstr = "Object already exists"; 88 break; 89 case -ENOSPC: 90 errstr = "No space left"; 91 break; 92 case -ENOENT: 93 errstr = "No such entry"; 94 break; 95 } 96 97 return errstr; 98 } 99 100 static void save_error_info(struct btrfs_fs_info *fs_info) 101 { 102 /* 103 * today we only save the error info into ram. Long term we'll 104 * also send it down to the disk 105 */ 106 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); 107 } 108 109 /* btrfs handle error by forcing the filesystem readonly */ 110 static void btrfs_handle_error(struct btrfs_fs_info *fs_info) 111 { 112 struct super_block *sb = fs_info->sb; 113 114 if (sb->s_flags & MS_RDONLY) 115 return; 116 117 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 118 sb->s_flags |= MS_RDONLY; 119 btrfs_info(fs_info, "forced readonly"); 120 /* 121 * Note that a running device replace operation is not 122 * canceled here although there is no way to update 123 * the progress. It would add the risk of a deadlock, 124 * therefore the canceling is ommited. The only penalty 125 * is that some I/O remains active until the procedure 126 * completes. The next time when the filesystem is 127 * mounted writeable again, the device replace 128 * operation continues. 129 */ 130 } 131 } 132 133 #ifdef CONFIG_PRINTK 134 /* 135 * __btrfs_std_error decodes expected errors from the caller and 136 * invokes the approciate error response. 137 */ 138 __cold 139 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 140 unsigned int line, int errno, const char *fmt, ...) 141 { 142 struct super_block *sb = fs_info->sb; 143 const char *errstr; 144 145 /* 146 * Special case: if the error is EROFS, and we're already 147 * under MS_RDONLY, then it is safe here. 148 */ 149 if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) 150 return; 151 152 errstr = btrfs_decode_error(errno); 153 if (fmt) { 154 struct va_format vaf; 155 va_list args; 156 157 va_start(args, fmt); 158 vaf.fmt = fmt; 159 vaf.va = &args; 160 161 printk(KERN_CRIT 162 "BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n", 163 sb->s_id, function, line, errno, errstr, &vaf); 164 va_end(args); 165 } else { 166 printk(KERN_CRIT "BTRFS: error (device %s) in %s:%d: errno=%d %s\n", 167 sb->s_id, function, line, errno, errstr); 168 } 169 170 /* Don't go through full error handling during mount */ 171 save_error_info(fs_info); 172 if (sb->s_flags & MS_BORN) 173 btrfs_handle_error(fs_info); 174 } 175 176 static const char * const logtypes[] = { 177 "emergency", 178 "alert", 179 "critical", 180 "error", 181 "warning", 182 "notice", 183 "info", 184 "debug", 185 }; 186 187 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 188 { 189 struct super_block *sb = fs_info->sb; 190 char lvl[4]; 191 struct va_format vaf; 192 va_list args; 193 const char *type = logtypes[4]; 194 int kern_level; 195 196 va_start(args, fmt); 197 198 kern_level = printk_get_level(fmt); 199 if (kern_level) { 200 size_t size = printk_skip_level(fmt) - fmt; 201 memcpy(lvl, fmt, size); 202 lvl[size] = '\0'; 203 fmt += size; 204 type = logtypes[kern_level - '0']; 205 } else 206 *lvl = '\0'; 207 208 vaf.fmt = fmt; 209 vaf.va = &args; 210 211 printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf); 212 213 va_end(args); 214 } 215 216 #else 217 218 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 219 unsigned int line, int errno, const char *fmt, ...) 220 { 221 struct super_block *sb = fs_info->sb; 222 223 /* 224 * Special case: if the error is EROFS, and we're already 225 * under MS_RDONLY, then it is safe here. 226 */ 227 if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) 228 return; 229 230 /* Don't go through full error handling during mount */ 231 if (sb->s_flags & MS_BORN) { 232 save_error_info(fs_info); 233 btrfs_handle_error(fs_info); 234 } 235 } 236 #endif 237 238 /* 239 * We only mark the transaction aborted and then set the file system read-only. 240 * This will prevent new transactions from starting or trying to join this 241 * one. 242 * 243 * This means that error recovery at the call site is limited to freeing 244 * any local memory allocations and passing the error code up without 245 * further cleanup. The transaction should complete as it normally would 246 * in the call path but will return -EIO. 247 * 248 * We'll complete the cleanup in btrfs_end_transaction and 249 * btrfs_commit_transaction. 250 */ 251 __cold 252 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 253 struct btrfs_root *root, const char *function, 254 unsigned int line, int errno) 255 { 256 trans->aborted = errno; 257 /* Nothing used. The other threads that have joined this 258 * transaction may be able to continue. */ 259 if (!trans->blocks_used && list_empty(&trans->new_bgs)) { 260 const char *errstr; 261 262 errstr = btrfs_decode_error(errno); 263 btrfs_warn(root->fs_info, 264 "%s:%d: Aborting unused transaction(%s).", 265 function, line, errstr); 266 return; 267 } 268 ACCESS_ONCE(trans->transaction->aborted) = errno; 269 /* Wake up anybody who may be waiting on this transaction */ 270 wake_up(&root->fs_info->transaction_wait); 271 wake_up(&root->fs_info->transaction_blocked_wait); 272 __btrfs_std_error(root->fs_info, function, line, errno, NULL); 273 } 274 /* 275 * __btrfs_panic decodes unexpected, fatal errors from the caller, 276 * issues an alert, and either panics or BUGs, depending on mount options. 277 */ 278 __cold 279 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 280 unsigned int line, int errno, const char *fmt, ...) 281 { 282 char *s_id = "<unknown>"; 283 const char *errstr; 284 struct va_format vaf = { .fmt = fmt }; 285 va_list args; 286 287 if (fs_info) 288 s_id = fs_info->sb->s_id; 289 290 va_start(args, fmt); 291 vaf.va = &args; 292 293 errstr = btrfs_decode_error(errno); 294 if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)) 295 panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n", 296 s_id, function, line, &vaf, errno, errstr); 297 298 btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)", 299 function, line, &vaf, errno, errstr); 300 va_end(args); 301 /* Caller calls BUG() */ 302 } 303 304 static void btrfs_put_super(struct super_block *sb) 305 { 306 close_ctree(btrfs_sb(sb)->tree_root); 307 } 308 309 enum { 310 Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, 311 Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, 312 Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, 313 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 314 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 315 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 316 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache, 317 Opt_no_space_cache, Opt_recovery, Opt_skip_balance, 318 Opt_check_integrity, Opt_check_integrity_including_extent_data, 319 Opt_check_integrity_print_mask, Opt_fatal_errors, Opt_rescan_uuid_tree, 320 Opt_commit_interval, Opt_barrier, Opt_nodefrag, Opt_nodiscard, 321 Opt_noenospc_debug, Opt_noflushoncommit, Opt_acl, Opt_datacow, 322 Opt_datasum, Opt_treelog, Opt_noinode_cache, 323 Opt_err, 324 }; 325 326 static match_table_t tokens = { 327 {Opt_degraded, "degraded"}, 328 {Opt_subvol, "subvol=%s"}, 329 {Opt_subvolid, "subvolid=%s"}, 330 {Opt_device, "device=%s"}, 331 {Opt_nodatasum, "nodatasum"}, 332 {Opt_datasum, "datasum"}, 333 {Opt_nodatacow, "nodatacow"}, 334 {Opt_datacow, "datacow"}, 335 {Opt_nobarrier, "nobarrier"}, 336 {Opt_barrier, "barrier"}, 337 {Opt_max_inline, "max_inline=%s"}, 338 {Opt_alloc_start, "alloc_start=%s"}, 339 {Opt_thread_pool, "thread_pool=%d"}, 340 {Opt_compress, "compress"}, 341 {Opt_compress_type, "compress=%s"}, 342 {Opt_compress_force, "compress-force"}, 343 {Opt_compress_force_type, "compress-force=%s"}, 344 {Opt_ssd, "ssd"}, 345 {Opt_ssd_spread, "ssd_spread"}, 346 {Opt_nossd, "nossd"}, 347 {Opt_acl, "acl"}, 348 {Opt_noacl, "noacl"}, 349 {Opt_notreelog, "notreelog"}, 350 {Opt_treelog, "treelog"}, 351 {Opt_flushoncommit, "flushoncommit"}, 352 {Opt_noflushoncommit, "noflushoncommit"}, 353 {Opt_ratio, "metadata_ratio=%d"}, 354 {Opt_discard, "discard"}, 355 {Opt_nodiscard, "nodiscard"}, 356 {Opt_space_cache, "space_cache"}, 357 {Opt_clear_cache, "clear_cache"}, 358 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, 359 {Opt_enospc_debug, "enospc_debug"}, 360 {Opt_noenospc_debug, "noenospc_debug"}, 361 {Opt_subvolrootid, "subvolrootid=%d"}, 362 {Opt_defrag, "autodefrag"}, 363 {Opt_nodefrag, "noautodefrag"}, 364 {Opt_inode_cache, "inode_cache"}, 365 {Opt_noinode_cache, "noinode_cache"}, 366 {Opt_no_space_cache, "nospace_cache"}, 367 {Opt_recovery, "recovery"}, 368 {Opt_skip_balance, "skip_balance"}, 369 {Opt_check_integrity, "check_int"}, 370 {Opt_check_integrity_including_extent_data, "check_int_data"}, 371 {Opt_check_integrity_print_mask, "check_int_print_mask=%d"}, 372 {Opt_rescan_uuid_tree, "rescan_uuid_tree"}, 373 {Opt_fatal_errors, "fatal_errors=%s"}, 374 {Opt_commit_interval, "commit=%d"}, 375 {Opt_err, NULL}, 376 }; 377 378 /* 379 * Regular mount options parser. Everything that is needed only when 380 * reading in a new superblock is parsed here. 381 * XXX JDM: This needs to be cleaned up for remount. 382 */ 383 int btrfs_parse_options(struct btrfs_root *root, char *options) 384 { 385 struct btrfs_fs_info *info = root->fs_info; 386 substring_t args[MAX_OPT_ARGS]; 387 char *p, *num, *orig = NULL; 388 u64 cache_gen; 389 int intarg; 390 int ret = 0; 391 char *compress_type; 392 bool compress_force = false; 393 394 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); 395 if (cache_gen) 396 btrfs_set_opt(info->mount_opt, SPACE_CACHE); 397 398 if (!options) 399 goto out; 400 401 /* 402 * strsep changes the string, duplicate it because parse_options 403 * gets called twice 404 */ 405 options = kstrdup(options, GFP_NOFS); 406 if (!options) 407 return -ENOMEM; 408 409 orig = options; 410 411 while ((p = strsep(&options, ",")) != NULL) { 412 int token; 413 if (!*p) 414 continue; 415 416 token = match_token(p, tokens, args); 417 switch (token) { 418 case Opt_degraded: 419 btrfs_info(root->fs_info, "allowing degraded mounts"); 420 btrfs_set_opt(info->mount_opt, DEGRADED); 421 break; 422 case Opt_subvol: 423 case Opt_subvolid: 424 case Opt_subvolrootid: 425 case Opt_device: 426 /* 427 * These are parsed by btrfs_parse_early_options 428 * and can be happily ignored here. 429 */ 430 break; 431 case Opt_nodatasum: 432 btrfs_set_and_info(root, NODATASUM, 433 "setting nodatasum"); 434 break; 435 case Opt_datasum: 436 if (btrfs_test_opt(root, NODATASUM)) { 437 if (btrfs_test_opt(root, NODATACOW)) 438 btrfs_info(root->fs_info, "setting datasum, datacow enabled"); 439 else 440 btrfs_info(root->fs_info, "setting datasum"); 441 } 442 btrfs_clear_opt(info->mount_opt, NODATACOW); 443 btrfs_clear_opt(info->mount_opt, NODATASUM); 444 break; 445 case Opt_nodatacow: 446 if (!btrfs_test_opt(root, NODATACOW)) { 447 if (!btrfs_test_opt(root, COMPRESS) || 448 !btrfs_test_opt(root, FORCE_COMPRESS)) { 449 btrfs_info(root->fs_info, 450 "setting nodatacow, compression disabled"); 451 } else { 452 btrfs_info(root->fs_info, "setting nodatacow"); 453 } 454 } 455 btrfs_clear_opt(info->mount_opt, COMPRESS); 456 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 457 btrfs_set_opt(info->mount_opt, NODATACOW); 458 btrfs_set_opt(info->mount_opt, NODATASUM); 459 break; 460 case Opt_datacow: 461 btrfs_clear_and_info(root, NODATACOW, 462 "setting datacow"); 463 break; 464 case Opt_compress_force: 465 case Opt_compress_force_type: 466 compress_force = true; 467 /* Fallthrough */ 468 case Opt_compress: 469 case Opt_compress_type: 470 if (token == Opt_compress || 471 token == Opt_compress_force || 472 strcmp(args[0].from, "zlib") == 0) { 473 compress_type = "zlib"; 474 info->compress_type = BTRFS_COMPRESS_ZLIB; 475 btrfs_set_opt(info->mount_opt, COMPRESS); 476 btrfs_clear_opt(info->mount_opt, NODATACOW); 477 btrfs_clear_opt(info->mount_opt, NODATASUM); 478 } else if (strcmp(args[0].from, "lzo") == 0) { 479 compress_type = "lzo"; 480 info->compress_type = BTRFS_COMPRESS_LZO; 481 btrfs_set_opt(info->mount_opt, COMPRESS); 482 btrfs_clear_opt(info->mount_opt, NODATACOW); 483 btrfs_clear_opt(info->mount_opt, NODATASUM); 484 btrfs_set_fs_incompat(info, COMPRESS_LZO); 485 } else if (strncmp(args[0].from, "no", 2) == 0) { 486 compress_type = "no"; 487 btrfs_clear_opt(info->mount_opt, COMPRESS); 488 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 489 compress_force = false; 490 } else { 491 ret = -EINVAL; 492 goto out; 493 } 494 495 if (compress_force) { 496 btrfs_set_and_info(root, FORCE_COMPRESS, 497 "force %s compression", 498 compress_type); 499 } else { 500 if (!btrfs_test_opt(root, COMPRESS)) 501 btrfs_info(root->fs_info, 502 "btrfs: use %s compression", 503 compress_type); 504 /* 505 * If we remount from compress-force=xxx to 506 * compress=xxx, we need clear FORCE_COMPRESS 507 * flag, otherwise, there is no way for users 508 * to disable forcible compression separately. 509 */ 510 btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); 511 } 512 break; 513 case Opt_ssd: 514 btrfs_set_and_info(root, SSD, 515 "use ssd allocation scheme"); 516 break; 517 case Opt_ssd_spread: 518 btrfs_set_and_info(root, SSD_SPREAD, 519 "use spread ssd allocation scheme"); 520 btrfs_set_opt(info->mount_opt, SSD); 521 break; 522 case Opt_nossd: 523 btrfs_set_and_info(root, NOSSD, 524 "not using ssd allocation scheme"); 525 btrfs_clear_opt(info->mount_opt, SSD); 526 break; 527 case Opt_barrier: 528 btrfs_clear_and_info(root, NOBARRIER, 529 "turning on barriers"); 530 break; 531 case Opt_nobarrier: 532 btrfs_set_and_info(root, NOBARRIER, 533 "turning off barriers"); 534 break; 535 case Opt_thread_pool: 536 ret = match_int(&args[0], &intarg); 537 if (ret) { 538 goto out; 539 } else if (intarg > 0) { 540 info->thread_pool_size = intarg; 541 } else { 542 ret = -EINVAL; 543 goto out; 544 } 545 break; 546 case Opt_max_inline: 547 num = match_strdup(&args[0]); 548 if (num) { 549 info->max_inline = memparse(num, NULL); 550 kfree(num); 551 552 if (info->max_inline) { 553 info->max_inline = min_t(u64, 554 info->max_inline, 555 root->sectorsize); 556 } 557 btrfs_info(root->fs_info, "max_inline at %llu", 558 info->max_inline); 559 } else { 560 ret = -ENOMEM; 561 goto out; 562 } 563 break; 564 case Opt_alloc_start: 565 num = match_strdup(&args[0]); 566 if (num) { 567 mutex_lock(&info->chunk_mutex); 568 info->alloc_start = memparse(num, NULL); 569 mutex_unlock(&info->chunk_mutex); 570 kfree(num); 571 btrfs_info(root->fs_info, "allocations start at %llu", 572 info->alloc_start); 573 } else { 574 ret = -ENOMEM; 575 goto out; 576 } 577 break; 578 case Opt_acl: 579 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 580 root->fs_info->sb->s_flags |= MS_POSIXACL; 581 break; 582 #else 583 btrfs_err(root->fs_info, 584 "support for ACL not compiled in!"); 585 ret = -EINVAL; 586 goto out; 587 #endif 588 case Opt_noacl: 589 root->fs_info->sb->s_flags &= ~MS_POSIXACL; 590 break; 591 case Opt_notreelog: 592 btrfs_set_and_info(root, NOTREELOG, 593 "disabling tree log"); 594 break; 595 case Opt_treelog: 596 btrfs_clear_and_info(root, NOTREELOG, 597 "enabling tree log"); 598 break; 599 case Opt_flushoncommit: 600 btrfs_set_and_info(root, FLUSHONCOMMIT, 601 "turning on flush-on-commit"); 602 break; 603 case Opt_noflushoncommit: 604 btrfs_clear_and_info(root, FLUSHONCOMMIT, 605 "turning off flush-on-commit"); 606 break; 607 case Opt_ratio: 608 ret = match_int(&args[0], &intarg); 609 if (ret) { 610 goto out; 611 } else if (intarg >= 0) { 612 info->metadata_ratio = intarg; 613 btrfs_info(root->fs_info, "metadata ratio %d", 614 info->metadata_ratio); 615 } else { 616 ret = -EINVAL; 617 goto out; 618 } 619 break; 620 case Opt_discard: 621 btrfs_set_and_info(root, DISCARD, 622 "turning on discard"); 623 break; 624 case Opt_nodiscard: 625 btrfs_clear_and_info(root, DISCARD, 626 "turning off discard"); 627 break; 628 case Opt_space_cache: 629 btrfs_set_and_info(root, SPACE_CACHE, 630 "enabling disk space caching"); 631 break; 632 case Opt_rescan_uuid_tree: 633 btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE); 634 break; 635 case Opt_no_space_cache: 636 btrfs_clear_and_info(root, SPACE_CACHE, 637 "disabling disk space caching"); 638 break; 639 case Opt_inode_cache: 640 btrfs_set_pending_and_info(info, INODE_MAP_CACHE, 641 "enabling inode map caching"); 642 break; 643 case Opt_noinode_cache: 644 btrfs_clear_pending_and_info(info, INODE_MAP_CACHE, 645 "disabling inode map caching"); 646 break; 647 case Opt_clear_cache: 648 btrfs_set_and_info(root, CLEAR_CACHE, 649 "force clearing of disk cache"); 650 break; 651 case Opt_user_subvol_rm_allowed: 652 btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); 653 break; 654 case Opt_enospc_debug: 655 btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); 656 break; 657 case Opt_noenospc_debug: 658 btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG); 659 break; 660 case Opt_defrag: 661 btrfs_set_and_info(root, AUTO_DEFRAG, 662 "enabling auto defrag"); 663 break; 664 case Opt_nodefrag: 665 btrfs_clear_and_info(root, AUTO_DEFRAG, 666 "disabling auto defrag"); 667 break; 668 case Opt_recovery: 669 btrfs_info(root->fs_info, "enabling auto recovery"); 670 btrfs_set_opt(info->mount_opt, RECOVERY); 671 break; 672 case Opt_skip_balance: 673 btrfs_set_opt(info->mount_opt, SKIP_BALANCE); 674 break; 675 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 676 case Opt_check_integrity_including_extent_data: 677 btrfs_info(root->fs_info, 678 "enabling check integrity including extent data"); 679 btrfs_set_opt(info->mount_opt, 680 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA); 681 btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY); 682 break; 683 case Opt_check_integrity: 684 btrfs_info(root->fs_info, "enabling check integrity"); 685 btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY); 686 break; 687 case Opt_check_integrity_print_mask: 688 ret = match_int(&args[0], &intarg); 689 if (ret) { 690 goto out; 691 } else if (intarg >= 0) { 692 info->check_integrity_print_mask = intarg; 693 btrfs_info(root->fs_info, "check_integrity_print_mask 0x%x", 694 info->check_integrity_print_mask); 695 } else { 696 ret = -EINVAL; 697 goto out; 698 } 699 break; 700 #else 701 case Opt_check_integrity_including_extent_data: 702 case Opt_check_integrity: 703 case Opt_check_integrity_print_mask: 704 btrfs_err(root->fs_info, 705 "support for check_integrity* not compiled in!"); 706 ret = -EINVAL; 707 goto out; 708 #endif 709 case Opt_fatal_errors: 710 if (strcmp(args[0].from, "panic") == 0) 711 btrfs_set_opt(info->mount_opt, 712 PANIC_ON_FATAL_ERROR); 713 else if (strcmp(args[0].from, "bug") == 0) 714 btrfs_clear_opt(info->mount_opt, 715 PANIC_ON_FATAL_ERROR); 716 else { 717 ret = -EINVAL; 718 goto out; 719 } 720 break; 721 case Opt_commit_interval: 722 intarg = 0; 723 ret = match_int(&args[0], &intarg); 724 if (ret < 0) { 725 btrfs_err(root->fs_info, "invalid commit interval"); 726 ret = -EINVAL; 727 goto out; 728 } 729 if (intarg > 0) { 730 if (intarg > 300) { 731 btrfs_warn(root->fs_info, "excessive commit interval %d", 732 intarg); 733 } 734 info->commit_interval = intarg; 735 } else { 736 btrfs_info(root->fs_info, "using default commit interval %ds", 737 BTRFS_DEFAULT_COMMIT_INTERVAL); 738 info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 739 } 740 break; 741 case Opt_err: 742 btrfs_info(root->fs_info, "unrecognized mount option '%s'", p); 743 ret = -EINVAL; 744 goto out; 745 default: 746 break; 747 } 748 } 749 out: 750 if (!ret && btrfs_test_opt(root, SPACE_CACHE)) 751 btrfs_info(root->fs_info, "disk space caching is enabled"); 752 kfree(orig); 753 return ret; 754 } 755 756 /* 757 * Parse mount options that are required early in the mount process. 758 * 759 * All other options will be parsed on much later in the mount process and 760 * only when we need to allocate a new super block. 761 */ 762 static int btrfs_parse_early_options(const char *options, fmode_t flags, 763 void *holder, char **subvol_name, u64 *subvol_objectid, 764 struct btrfs_fs_devices **fs_devices) 765 { 766 substring_t args[MAX_OPT_ARGS]; 767 char *device_name, *opts, *orig, *p; 768 char *num = NULL; 769 int error = 0; 770 771 if (!options) 772 return 0; 773 774 /* 775 * strsep changes the string, duplicate it because parse_options 776 * gets called twice 777 */ 778 opts = kstrdup(options, GFP_KERNEL); 779 if (!opts) 780 return -ENOMEM; 781 orig = opts; 782 783 while ((p = strsep(&opts, ",")) != NULL) { 784 int token; 785 if (!*p) 786 continue; 787 788 token = match_token(p, tokens, args); 789 switch (token) { 790 case Opt_subvol: 791 kfree(*subvol_name); 792 *subvol_name = match_strdup(&args[0]); 793 if (!*subvol_name) { 794 error = -ENOMEM; 795 goto out; 796 } 797 break; 798 case Opt_subvolid: 799 num = match_strdup(&args[0]); 800 if (num) { 801 *subvol_objectid = memparse(num, NULL); 802 kfree(num); 803 /* we want the original fs_tree */ 804 if (!*subvol_objectid) 805 *subvol_objectid = 806 BTRFS_FS_TREE_OBJECTID; 807 } else { 808 error = -EINVAL; 809 goto out; 810 } 811 break; 812 case Opt_subvolrootid: 813 printk(KERN_WARNING 814 "BTRFS: 'subvolrootid' mount option is deprecated and has " 815 "no effect\n"); 816 break; 817 case Opt_device: 818 device_name = match_strdup(&args[0]); 819 if (!device_name) { 820 error = -ENOMEM; 821 goto out; 822 } 823 error = btrfs_scan_one_device(device_name, 824 flags, holder, fs_devices); 825 kfree(device_name); 826 if (error) 827 goto out; 828 break; 829 default: 830 break; 831 } 832 } 833 834 out: 835 kfree(orig); 836 return error; 837 } 838 839 static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, 840 u64 subvol_objectid) 841 { 842 struct btrfs_root *root = fs_info->tree_root; 843 struct btrfs_root *fs_root; 844 struct btrfs_root_ref *root_ref; 845 struct btrfs_inode_ref *inode_ref; 846 struct btrfs_key key; 847 struct btrfs_path *path = NULL; 848 char *name = NULL, *ptr; 849 u64 dirid; 850 int len; 851 int ret; 852 853 path = btrfs_alloc_path(); 854 if (!path) { 855 ret = -ENOMEM; 856 goto err; 857 } 858 path->leave_spinning = 1; 859 860 name = kmalloc(PATH_MAX, GFP_NOFS); 861 if (!name) { 862 ret = -ENOMEM; 863 goto err; 864 } 865 ptr = name + PATH_MAX - 1; 866 ptr[0] = '\0'; 867 868 /* 869 * Walk up the subvolume trees in the tree of tree roots by root 870 * backrefs until we hit the top-level subvolume. 871 */ 872 while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) { 873 key.objectid = subvol_objectid; 874 key.type = BTRFS_ROOT_BACKREF_KEY; 875 key.offset = (u64)-1; 876 877 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 878 if (ret < 0) { 879 goto err; 880 } else if (ret > 0) { 881 ret = btrfs_previous_item(root, path, subvol_objectid, 882 BTRFS_ROOT_BACKREF_KEY); 883 if (ret < 0) { 884 goto err; 885 } else if (ret > 0) { 886 ret = -ENOENT; 887 goto err; 888 } 889 } 890 891 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 892 subvol_objectid = key.offset; 893 894 root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 895 struct btrfs_root_ref); 896 len = btrfs_root_ref_name_len(path->nodes[0], root_ref); 897 ptr -= len + 1; 898 if (ptr < name) { 899 ret = -ENAMETOOLONG; 900 goto err; 901 } 902 read_extent_buffer(path->nodes[0], ptr + 1, 903 (unsigned long)(root_ref + 1), len); 904 ptr[0] = '/'; 905 dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); 906 btrfs_release_path(path); 907 908 key.objectid = subvol_objectid; 909 key.type = BTRFS_ROOT_ITEM_KEY; 910 key.offset = (u64)-1; 911 fs_root = btrfs_read_fs_root_no_name(fs_info, &key); 912 if (IS_ERR(fs_root)) { 913 ret = PTR_ERR(fs_root); 914 goto err; 915 } 916 917 /* 918 * Walk up the filesystem tree by inode refs until we hit the 919 * root directory. 920 */ 921 while (dirid != BTRFS_FIRST_FREE_OBJECTID) { 922 key.objectid = dirid; 923 key.type = BTRFS_INODE_REF_KEY; 924 key.offset = (u64)-1; 925 926 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 927 if (ret < 0) { 928 goto err; 929 } else if (ret > 0) { 930 ret = btrfs_previous_item(fs_root, path, dirid, 931 BTRFS_INODE_REF_KEY); 932 if (ret < 0) { 933 goto err; 934 } else if (ret > 0) { 935 ret = -ENOENT; 936 goto err; 937 } 938 } 939 940 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 941 dirid = key.offset; 942 943 inode_ref = btrfs_item_ptr(path->nodes[0], 944 path->slots[0], 945 struct btrfs_inode_ref); 946 len = btrfs_inode_ref_name_len(path->nodes[0], 947 inode_ref); 948 ptr -= len + 1; 949 if (ptr < name) { 950 ret = -ENAMETOOLONG; 951 goto err; 952 } 953 read_extent_buffer(path->nodes[0], ptr + 1, 954 (unsigned long)(inode_ref + 1), len); 955 ptr[0] = '/'; 956 btrfs_release_path(path); 957 } 958 } 959 960 btrfs_free_path(path); 961 if (ptr == name + PATH_MAX - 1) { 962 name[0] = '/'; 963 name[1] = '\0'; 964 } else { 965 memmove(name, ptr, name + PATH_MAX - ptr); 966 } 967 return name; 968 969 err: 970 btrfs_free_path(path); 971 kfree(name); 972 return ERR_PTR(ret); 973 } 974 975 static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid) 976 { 977 struct btrfs_root *root = fs_info->tree_root; 978 struct btrfs_dir_item *di; 979 struct btrfs_path *path; 980 struct btrfs_key location; 981 u64 dir_id; 982 983 path = btrfs_alloc_path(); 984 if (!path) 985 return -ENOMEM; 986 path->leave_spinning = 1; 987 988 /* 989 * Find the "default" dir item which points to the root item that we 990 * will mount by default if we haven't been given a specific subvolume 991 * to mount. 992 */ 993 dir_id = btrfs_super_root_dir(fs_info->super_copy); 994 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); 995 if (IS_ERR(di)) { 996 btrfs_free_path(path); 997 return PTR_ERR(di); 998 } 999 if (!di) { 1000 /* 1001 * Ok the default dir item isn't there. This is weird since 1002 * it's always been there, but don't freak out, just try and 1003 * mount the top-level subvolume. 1004 */ 1005 btrfs_free_path(path); 1006 *objectid = BTRFS_FS_TREE_OBJECTID; 1007 return 0; 1008 } 1009 1010 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 1011 btrfs_free_path(path); 1012 *objectid = location.objectid; 1013 return 0; 1014 } 1015 1016 static int btrfs_fill_super(struct super_block *sb, 1017 struct btrfs_fs_devices *fs_devices, 1018 void *data, int silent) 1019 { 1020 struct inode *inode; 1021 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1022 struct btrfs_key key; 1023 int err; 1024 1025 sb->s_maxbytes = MAX_LFS_FILESIZE; 1026 sb->s_magic = BTRFS_SUPER_MAGIC; 1027 sb->s_op = &btrfs_super_ops; 1028 sb->s_d_op = &btrfs_dentry_operations; 1029 sb->s_export_op = &btrfs_export_ops; 1030 sb->s_xattr = btrfs_xattr_handlers; 1031 sb->s_time_gran = 1; 1032 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 1033 sb->s_flags |= MS_POSIXACL; 1034 #endif 1035 sb->s_flags |= MS_I_VERSION; 1036 err = open_ctree(sb, fs_devices, (char *)data); 1037 if (err) { 1038 printk(KERN_ERR "BTRFS: open_ctree failed\n"); 1039 return err; 1040 } 1041 1042 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 1043 key.type = BTRFS_INODE_ITEM_KEY; 1044 key.offset = 0; 1045 inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL); 1046 if (IS_ERR(inode)) { 1047 err = PTR_ERR(inode); 1048 goto fail_close; 1049 } 1050 1051 sb->s_root = d_make_root(inode); 1052 if (!sb->s_root) { 1053 err = -ENOMEM; 1054 goto fail_close; 1055 } 1056 1057 save_mount_options(sb, data); 1058 cleancache_init_fs(sb); 1059 sb->s_flags |= MS_ACTIVE; 1060 return 0; 1061 1062 fail_close: 1063 close_ctree(fs_info->tree_root); 1064 return err; 1065 } 1066 1067 int btrfs_sync_fs(struct super_block *sb, int wait) 1068 { 1069 struct btrfs_trans_handle *trans; 1070 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1071 struct btrfs_root *root = fs_info->tree_root; 1072 1073 trace_btrfs_sync_fs(wait); 1074 1075 if (!wait) { 1076 filemap_flush(fs_info->btree_inode->i_mapping); 1077 return 0; 1078 } 1079 1080 btrfs_wait_ordered_roots(fs_info, -1); 1081 1082 trans = btrfs_attach_transaction_barrier(root); 1083 if (IS_ERR(trans)) { 1084 /* no transaction, don't bother */ 1085 if (PTR_ERR(trans) == -ENOENT) { 1086 /* 1087 * Exit unless we have some pending changes 1088 * that need to go through commit 1089 */ 1090 if (fs_info->pending_changes == 0) 1091 return 0; 1092 /* 1093 * A non-blocking test if the fs is frozen. We must not 1094 * start a new transaction here otherwise a deadlock 1095 * happens. The pending operations are delayed to the 1096 * next commit after thawing. 1097 */ 1098 if (__sb_start_write(sb, SB_FREEZE_WRITE, false)) 1099 __sb_end_write(sb, SB_FREEZE_WRITE); 1100 else 1101 return 0; 1102 trans = btrfs_start_transaction(root, 0); 1103 } 1104 if (IS_ERR(trans)) 1105 return PTR_ERR(trans); 1106 } 1107 return btrfs_commit_transaction(trans, root); 1108 } 1109 1110 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) 1111 { 1112 struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); 1113 struct btrfs_root *root = info->tree_root; 1114 char *compress_type; 1115 1116 if (btrfs_test_opt(root, DEGRADED)) 1117 seq_puts(seq, ",degraded"); 1118 if (btrfs_test_opt(root, NODATASUM)) 1119 seq_puts(seq, ",nodatasum"); 1120 if (btrfs_test_opt(root, NODATACOW)) 1121 seq_puts(seq, ",nodatacow"); 1122 if (btrfs_test_opt(root, NOBARRIER)) 1123 seq_puts(seq, ",nobarrier"); 1124 if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) 1125 seq_printf(seq, ",max_inline=%llu", info->max_inline); 1126 if (info->alloc_start != 0) 1127 seq_printf(seq, ",alloc_start=%llu", info->alloc_start); 1128 if (info->thread_pool_size != min_t(unsigned long, 1129 num_online_cpus() + 2, 8)) 1130 seq_printf(seq, ",thread_pool=%d", info->thread_pool_size); 1131 if (btrfs_test_opt(root, COMPRESS)) { 1132 if (info->compress_type == BTRFS_COMPRESS_ZLIB) 1133 compress_type = "zlib"; 1134 else 1135 compress_type = "lzo"; 1136 if (btrfs_test_opt(root, FORCE_COMPRESS)) 1137 seq_printf(seq, ",compress-force=%s", compress_type); 1138 else 1139 seq_printf(seq, ",compress=%s", compress_type); 1140 } 1141 if (btrfs_test_opt(root, NOSSD)) 1142 seq_puts(seq, ",nossd"); 1143 if (btrfs_test_opt(root, SSD_SPREAD)) 1144 seq_puts(seq, ",ssd_spread"); 1145 else if (btrfs_test_opt(root, SSD)) 1146 seq_puts(seq, ",ssd"); 1147 if (btrfs_test_opt(root, NOTREELOG)) 1148 seq_puts(seq, ",notreelog"); 1149 if (btrfs_test_opt(root, FLUSHONCOMMIT)) 1150 seq_puts(seq, ",flushoncommit"); 1151 if (btrfs_test_opt(root, DISCARD)) 1152 seq_puts(seq, ",discard"); 1153 if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) 1154 seq_puts(seq, ",noacl"); 1155 if (btrfs_test_opt(root, SPACE_CACHE)) 1156 seq_puts(seq, ",space_cache"); 1157 else 1158 seq_puts(seq, ",nospace_cache"); 1159 if (btrfs_test_opt(root, RESCAN_UUID_TREE)) 1160 seq_puts(seq, ",rescan_uuid_tree"); 1161 if (btrfs_test_opt(root, CLEAR_CACHE)) 1162 seq_puts(seq, ",clear_cache"); 1163 if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) 1164 seq_puts(seq, ",user_subvol_rm_allowed"); 1165 if (btrfs_test_opt(root, ENOSPC_DEBUG)) 1166 seq_puts(seq, ",enospc_debug"); 1167 if (btrfs_test_opt(root, AUTO_DEFRAG)) 1168 seq_puts(seq, ",autodefrag"); 1169 if (btrfs_test_opt(root, INODE_MAP_CACHE)) 1170 seq_puts(seq, ",inode_cache"); 1171 if (btrfs_test_opt(root, SKIP_BALANCE)) 1172 seq_puts(seq, ",skip_balance"); 1173 if (btrfs_test_opt(root, RECOVERY)) 1174 seq_puts(seq, ",recovery"); 1175 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1176 if (btrfs_test_opt(root, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA)) 1177 seq_puts(seq, ",check_int_data"); 1178 else if (btrfs_test_opt(root, CHECK_INTEGRITY)) 1179 seq_puts(seq, ",check_int"); 1180 if (info->check_integrity_print_mask) 1181 seq_printf(seq, ",check_int_print_mask=%d", 1182 info->check_integrity_print_mask); 1183 #endif 1184 if (info->metadata_ratio) 1185 seq_printf(seq, ",metadata_ratio=%d", 1186 info->metadata_ratio); 1187 if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR)) 1188 seq_puts(seq, ",fatal_errors=panic"); 1189 if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL) 1190 seq_printf(seq, ",commit=%d", info->commit_interval); 1191 seq_printf(seq, ",subvolid=%llu", 1192 BTRFS_I(d_inode(dentry))->root->root_key.objectid); 1193 seq_puts(seq, ",subvol="); 1194 seq_dentry(seq, dentry, " \t\n\\"); 1195 return 0; 1196 } 1197 1198 static int btrfs_test_super(struct super_block *s, void *data) 1199 { 1200 struct btrfs_fs_info *p = data; 1201 struct btrfs_fs_info *fs_info = btrfs_sb(s); 1202 1203 return fs_info->fs_devices == p->fs_devices; 1204 } 1205 1206 static int btrfs_set_super(struct super_block *s, void *data) 1207 { 1208 int err = set_anon_super(s, data); 1209 if (!err) 1210 s->s_fs_info = data; 1211 return err; 1212 } 1213 1214 /* 1215 * subvolumes are identified by ino 256 1216 */ 1217 static inline int is_subvolume_inode(struct inode *inode) 1218 { 1219 if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 1220 return 1; 1221 return 0; 1222 } 1223 1224 /* 1225 * This will add subvolid=0 to the argument string while removing any subvol= 1226 * and subvolid= arguments to make sure we get the top-level root for path 1227 * walking to the subvol we want. 1228 */ 1229 static char *setup_root_args(char *args) 1230 { 1231 char *buf, *dst, *sep; 1232 1233 if (!args) 1234 return kstrdup("subvolid=0", GFP_NOFS); 1235 1236 /* The worst case is that we add ",subvolid=0" to the end. */ 1237 buf = dst = kmalloc(strlen(args) + strlen(",subvolid=0") + 1, GFP_NOFS); 1238 if (!buf) 1239 return NULL; 1240 1241 while (1) { 1242 sep = strchrnul(args, ','); 1243 if (!strstarts(args, "subvol=") && 1244 !strstarts(args, "subvolid=")) { 1245 memcpy(dst, args, sep - args); 1246 dst += sep - args; 1247 *dst++ = ','; 1248 } 1249 if (*sep) 1250 args = sep + 1; 1251 else 1252 break; 1253 } 1254 strcpy(dst, "subvolid=0"); 1255 1256 return buf; 1257 } 1258 1259 static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, 1260 int flags, const char *device_name, 1261 char *data) 1262 { 1263 struct dentry *root; 1264 struct vfsmount *mnt = NULL; 1265 char *newargs; 1266 int ret; 1267 1268 newargs = setup_root_args(data); 1269 if (!newargs) { 1270 root = ERR_PTR(-ENOMEM); 1271 goto out; 1272 } 1273 1274 mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs); 1275 if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) { 1276 if (flags & MS_RDONLY) { 1277 mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY, 1278 device_name, newargs); 1279 } else { 1280 mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, 1281 device_name, newargs); 1282 if (IS_ERR(mnt)) { 1283 root = ERR_CAST(mnt); 1284 mnt = NULL; 1285 goto out; 1286 } 1287 1288 down_write(&mnt->mnt_sb->s_umount); 1289 ret = btrfs_remount(mnt->mnt_sb, &flags, NULL); 1290 up_write(&mnt->mnt_sb->s_umount); 1291 if (ret < 0) { 1292 root = ERR_PTR(ret); 1293 goto out; 1294 } 1295 } 1296 } 1297 if (IS_ERR(mnt)) { 1298 root = ERR_CAST(mnt); 1299 mnt = NULL; 1300 goto out; 1301 } 1302 1303 if (!subvol_name) { 1304 if (!subvol_objectid) { 1305 ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb), 1306 &subvol_objectid); 1307 if (ret) { 1308 root = ERR_PTR(ret); 1309 goto out; 1310 } 1311 } 1312 subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), 1313 subvol_objectid); 1314 if (IS_ERR(subvol_name)) { 1315 root = ERR_CAST(subvol_name); 1316 subvol_name = NULL; 1317 goto out; 1318 } 1319 1320 } 1321 1322 root = mount_subtree(mnt, subvol_name); 1323 /* mount_subtree() drops our reference on the vfsmount. */ 1324 mnt = NULL; 1325 1326 if (!IS_ERR(root)) { 1327 struct super_block *s = root->d_sb; 1328 struct inode *root_inode = d_inode(root); 1329 u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid; 1330 1331 ret = 0; 1332 if (!is_subvolume_inode(root_inode)) { 1333 pr_err("BTRFS: '%s' is not a valid subvolume\n", 1334 subvol_name); 1335 ret = -EINVAL; 1336 } 1337 if (subvol_objectid && root_objectid != subvol_objectid) { 1338 /* 1339 * This will also catch a race condition where a 1340 * subvolume which was passed by ID is renamed and 1341 * another subvolume is renamed over the old location. 1342 */ 1343 pr_err("BTRFS: subvol '%s' does not match subvolid %llu\n", 1344 subvol_name, subvol_objectid); 1345 ret = -EINVAL; 1346 } 1347 if (ret) { 1348 dput(root); 1349 root = ERR_PTR(ret); 1350 deactivate_locked_super(s); 1351 } 1352 } 1353 1354 out: 1355 mntput(mnt); 1356 kfree(newargs); 1357 kfree(subvol_name); 1358 return root; 1359 } 1360 1361 static int parse_security_options(char *orig_opts, 1362 struct security_mnt_opts *sec_opts) 1363 { 1364 char *secdata = NULL; 1365 int ret = 0; 1366 1367 secdata = alloc_secdata(); 1368 if (!secdata) 1369 return -ENOMEM; 1370 ret = security_sb_copy_data(orig_opts, secdata); 1371 if (ret) { 1372 free_secdata(secdata); 1373 return ret; 1374 } 1375 ret = security_sb_parse_opts_str(secdata, sec_opts); 1376 free_secdata(secdata); 1377 return ret; 1378 } 1379 1380 static int setup_security_options(struct btrfs_fs_info *fs_info, 1381 struct super_block *sb, 1382 struct security_mnt_opts *sec_opts) 1383 { 1384 int ret = 0; 1385 1386 /* 1387 * Call security_sb_set_mnt_opts() to check whether new sec_opts 1388 * is valid. 1389 */ 1390 ret = security_sb_set_mnt_opts(sb, sec_opts, 0, NULL); 1391 if (ret) 1392 return ret; 1393 1394 #ifdef CONFIG_SECURITY 1395 if (!fs_info->security_opts.num_mnt_opts) { 1396 /* first time security setup, copy sec_opts to fs_info */ 1397 memcpy(&fs_info->security_opts, sec_opts, sizeof(*sec_opts)); 1398 } else { 1399 /* 1400 * Since SELinux(the only one supports security_mnt_opts) does 1401 * NOT support changing context during remount/mount same sb, 1402 * This must be the same or part of the same security options, 1403 * just free it. 1404 */ 1405 security_free_mnt_opts(sec_opts); 1406 } 1407 #endif 1408 return ret; 1409 } 1410 1411 /* 1412 * Find a superblock for the given device / mount point. 1413 * 1414 * Note: This is based on get_sb_bdev from fs/super.c with a few additions 1415 * for multiple device setup. Make sure to keep it in sync. 1416 */ 1417 static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, 1418 const char *device_name, void *data) 1419 { 1420 struct block_device *bdev = NULL; 1421 struct super_block *s; 1422 struct btrfs_fs_devices *fs_devices = NULL; 1423 struct btrfs_fs_info *fs_info = NULL; 1424 struct security_mnt_opts new_sec_opts; 1425 fmode_t mode = FMODE_READ; 1426 char *subvol_name = NULL; 1427 u64 subvol_objectid = 0; 1428 int error = 0; 1429 1430 if (!(flags & MS_RDONLY)) 1431 mode |= FMODE_WRITE; 1432 1433 error = btrfs_parse_early_options(data, mode, fs_type, 1434 &subvol_name, &subvol_objectid, 1435 &fs_devices); 1436 if (error) { 1437 kfree(subvol_name); 1438 return ERR_PTR(error); 1439 } 1440 1441 if (subvol_name || subvol_objectid != BTRFS_FS_TREE_OBJECTID) { 1442 /* mount_subvol() will free subvol_name. */ 1443 return mount_subvol(subvol_name, subvol_objectid, flags, 1444 device_name, data); 1445 } 1446 1447 security_init_mnt_opts(&new_sec_opts); 1448 if (data) { 1449 error = parse_security_options(data, &new_sec_opts); 1450 if (error) 1451 return ERR_PTR(error); 1452 } 1453 1454 error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); 1455 if (error) 1456 goto error_sec_opts; 1457 1458 /* 1459 * Setup a dummy root and fs_info for test/set super. This is because 1460 * we don't actually fill this stuff out until open_ctree, but we need 1461 * it for searching for existing supers, so this lets us do that and 1462 * then open_ctree will properly initialize everything later. 1463 */ 1464 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS); 1465 if (!fs_info) { 1466 error = -ENOMEM; 1467 goto error_sec_opts; 1468 } 1469 1470 fs_info->fs_devices = fs_devices; 1471 1472 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 1473 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS); 1474 security_init_mnt_opts(&fs_info->security_opts); 1475 if (!fs_info->super_copy || !fs_info->super_for_commit) { 1476 error = -ENOMEM; 1477 goto error_fs_info; 1478 } 1479 1480 error = btrfs_open_devices(fs_devices, mode, fs_type); 1481 if (error) 1482 goto error_fs_info; 1483 1484 if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) { 1485 error = -EACCES; 1486 goto error_close_devices; 1487 } 1488 1489 bdev = fs_devices->latest_bdev; 1490 s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC, 1491 fs_info); 1492 if (IS_ERR(s)) { 1493 error = PTR_ERR(s); 1494 goto error_close_devices; 1495 } 1496 1497 if (s->s_root) { 1498 btrfs_close_devices(fs_devices); 1499 free_fs_info(fs_info); 1500 if ((flags ^ s->s_flags) & MS_RDONLY) 1501 error = -EBUSY; 1502 } else { 1503 char b[BDEVNAME_SIZE]; 1504 1505 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 1506 btrfs_sb(s)->bdev_holder = fs_type; 1507 error = btrfs_fill_super(s, fs_devices, data, 1508 flags & MS_SILENT ? 1 : 0); 1509 } 1510 if (error) { 1511 deactivate_locked_super(s); 1512 goto error_sec_opts; 1513 } 1514 1515 fs_info = btrfs_sb(s); 1516 error = setup_security_options(fs_info, s, &new_sec_opts); 1517 if (error) { 1518 deactivate_locked_super(s); 1519 goto error_sec_opts; 1520 } 1521 1522 return dget(s->s_root); 1523 1524 error_close_devices: 1525 btrfs_close_devices(fs_devices); 1526 error_fs_info: 1527 free_fs_info(fs_info); 1528 error_sec_opts: 1529 security_free_mnt_opts(&new_sec_opts); 1530 return ERR_PTR(error); 1531 } 1532 1533 static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, 1534 int new_pool_size, int old_pool_size) 1535 { 1536 if (new_pool_size == old_pool_size) 1537 return; 1538 1539 fs_info->thread_pool_size = new_pool_size; 1540 1541 btrfs_info(fs_info, "resize thread pool %d -> %d", 1542 old_pool_size, new_pool_size); 1543 1544 btrfs_workqueue_set_max(fs_info->workers, new_pool_size); 1545 btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); 1546 btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size); 1547 btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); 1548 btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size); 1549 btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size); 1550 btrfs_workqueue_set_max(fs_info->endio_meta_write_workers, 1551 new_pool_size); 1552 btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); 1553 btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); 1554 btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); 1555 btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size); 1556 btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers, 1557 new_pool_size); 1558 } 1559 1560 static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info) 1561 { 1562 set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1563 } 1564 1565 static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, 1566 unsigned long old_opts, int flags) 1567 { 1568 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1569 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || 1570 (flags & MS_RDONLY))) { 1571 /* wait for any defraggers to finish */ 1572 wait_event(fs_info->transaction_wait, 1573 (atomic_read(&fs_info->defrag_running) == 0)); 1574 if (flags & MS_RDONLY) 1575 sync_filesystem(fs_info->sb); 1576 } 1577 } 1578 1579 static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, 1580 unsigned long old_opts) 1581 { 1582 /* 1583 * We need cleanup all defragable inodes if the autodefragment is 1584 * close or the fs is R/O. 1585 */ 1586 if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && 1587 (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || 1588 (fs_info->sb->s_flags & MS_RDONLY))) { 1589 btrfs_cleanup_defrag_inodes(fs_info); 1590 } 1591 1592 clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 1593 } 1594 1595 static int btrfs_remount(struct super_block *sb, int *flags, char *data) 1596 { 1597 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1598 struct btrfs_root *root = fs_info->tree_root; 1599 unsigned old_flags = sb->s_flags; 1600 unsigned long old_opts = fs_info->mount_opt; 1601 unsigned long old_compress_type = fs_info->compress_type; 1602 u64 old_max_inline = fs_info->max_inline; 1603 u64 old_alloc_start = fs_info->alloc_start; 1604 int old_thread_pool_size = fs_info->thread_pool_size; 1605 unsigned int old_metadata_ratio = fs_info->metadata_ratio; 1606 int ret; 1607 1608 sync_filesystem(sb); 1609 btrfs_remount_prepare(fs_info); 1610 1611 if (data) { 1612 struct security_mnt_opts new_sec_opts; 1613 1614 security_init_mnt_opts(&new_sec_opts); 1615 ret = parse_security_options(data, &new_sec_opts); 1616 if (ret) 1617 goto restore; 1618 ret = setup_security_options(fs_info, sb, 1619 &new_sec_opts); 1620 if (ret) { 1621 security_free_mnt_opts(&new_sec_opts); 1622 goto restore; 1623 } 1624 } 1625 1626 ret = btrfs_parse_options(root, data); 1627 if (ret) { 1628 ret = -EINVAL; 1629 goto restore; 1630 } 1631 1632 btrfs_remount_begin(fs_info, old_opts, *flags); 1633 btrfs_resize_thread_pool(fs_info, 1634 fs_info->thread_pool_size, old_thread_pool_size); 1635 1636 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 1637 goto out; 1638 1639 if (*flags & MS_RDONLY) { 1640 /* 1641 * this also happens on 'umount -rf' or on shutdown, when 1642 * the filesystem is busy. 1643 */ 1644 cancel_work_sync(&fs_info->async_reclaim_work); 1645 1646 /* wait for the uuid_scan task to finish */ 1647 down(&fs_info->uuid_tree_rescan_sem); 1648 /* avoid complains from lockdep et al. */ 1649 up(&fs_info->uuid_tree_rescan_sem); 1650 1651 sb->s_flags |= MS_RDONLY; 1652 1653 btrfs_dev_replace_suspend_for_unmount(fs_info); 1654 btrfs_scrub_cancel(fs_info); 1655 btrfs_pause_balance(fs_info); 1656 1657 ret = btrfs_commit_super(root); 1658 if (ret) 1659 goto restore; 1660 } else { 1661 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { 1662 btrfs_err(fs_info, 1663 "Remounting read-write after error is not allowed"); 1664 ret = -EINVAL; 1665 goto restore; 1666 } 1667 if (fs_info->fs_devices->rw_devices == 0) { 1668 ret = -EACCES; 1669 goto restore; 1670 } 1671 1672 if (fs_info->fs_devices->missing_devices > 1673 fs_info->num_tolerated_disk_barrier_failures && 1674 !(*flags & MS_RDONLY)) { 1675 btrfs_warn(fs_info, 1676 "too many missing devices, writeable remount is not allowed"); 1677 ret = -EACCES; 1678 goto restore; 1679 } 1680 1681 if (btrfs_super_log_root(fs_info->super_copy) != 0) { 1682 ret = -EINVAL; 1683 goto restore; 1684 } 1685 1686 ret = btrfs_cleanup_fs_roots(fs_info); 1687 if (ret) 1688 goto restore; 1689 1690 /* recover relocation */ 1691 mutex_lock(&fs_info->cleaner_mutex); 1692 ret = btrfs_recover_relocation(root); 1693 mutex_unlock(&fs_info->cleaner_mutex); 1694 if (ret) 1695 goto restore; 1696 1697 ret = btrfs_resume_balance_async(fs_info); 1698 if (ret) 1699 goto restore; 1700 1701 ret = btrfs_resume_dev_replace_async(fs_info); 1702 if (ret) { 1703 btrfs_warn(fs_info, "failed to resume dev_replace"); 1704 goto restore; 1705 } 1706 1707 if (!fs_info->uuid_root) { 1708 btrfs_info(fs_info, "creating UUID tree"); 1709 ret = btrfs_create_uuid_tree(fs_info); 1710 if (ret) { 1711 btrfs_warn(fs_info, "failed to create the UUID tree %d", ret); 1712 goto restore; 1713 } 1714 } 1715 sb->s_flags &= ~MS_RDONLY; 1716 } 1717 out: 1718 wake_up_process(fs_info->transaction_kthread); 1719 btrfs_remount_cleanup(fs_info, old_opts); 1720 return 0; 1721 1722 restore: 1723 /* We've hit an error - don't reset MS_RDONLY */ 1724 if (sb->s_flags & MS_RDONLY) 1725 old_flags |= MS_RDONLY; 1726 sb->s_flags = old_flags; 1727 fs_info->mount_opt = old_opts; 1728 fs_info->compress_type = old_compress_type; 1729 fs_info->max_inline = old_max_inline; 1730 mutex_lock(&fs_info->chunk_mutex); 1731 fs_info->alloc_start = old_alloc_start; 1732 mutex_unlock(&fs_info->chunk_mutex); 1733 btrfs_resize_thread_pool(fs_info, 1734 old_thread_pool_size, fs_info->thread_pool_size); 1735 fs_info->metadata_ratio = old_metadata_ratio; 1736 btrfs_remount_cleanup(fs_info, old_opts); 1737 return ret; 1738 } 1739 1740 /* Used to sort the devices by max_avail(descending sort) */ 1741 static int btrfs_cmp_device_free_bytes(const void *dev_info1, 1742 const void *dev_info2) 1743 { 1744 if (((struct btrfs_device_info *)dev_info1)->max_avail > 1745 ((struct btrfs_device_info *)dev_info2)->max_avail) 1746 return -1; 1747 else if (((struct btrfs_device_info *)dev_info1)->max_avail < 1748 ((struct btrfs_device_info *)dev_info2)->max_avail) 1749 return 1; 1750 else 1751 return 0; 1752 } 1753 1754 /* 1755 * sort the devices by max_avail, in which max free extent size of each device 1756 * is stored.(Descending Sort) 1757 */ 1758 static inline void btrfs_descending_sort_devices( 1759 struct btrfs_device_info *devices, 1760 size_t nr_devices) 1761 { 1762 sort(devices, nr_devices, sizeof(struct btrfs_device_info), 1763 btrfs_cmp_device_free_bytes, NULL); 1764 } 1765 1766 /* 1767 * The helper to calc the free space on the devices that can be used to store 1768 * file data. 1769 */ 1770 static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) 1771 { 1772 struct btrfs_fs_info *fs_info = root->fs_info; 1773 struct btrfs_device_info *devices_info; 1774 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 1775 struct btrfs_device *device; 1776 u64 skip_space; 1777 u64 type; 1778 u64 avail_space; 1779 u64 used_space; 1780 u64 min_stripe_size; 1781 int min_stripes = 1, num_stripes = 1; 1782 int i = 0, nr_devices; 1783 int ret; 1784 1785 /* 1786 * We aren't under the device list lock, so this is racey-ish, but good 1787 * enough for our purposes. 1788 */ 1789 nr_devices = fs_info->fs_devices->open_devices; 1790 if (!nr_devices) { 1791 smp_mb(); 1792 nr_devices = fs_info->fs_devices->open_devices; 1793 ASSERT(nr_devices); 1794 if (!nr_devices) { 1795 *free_bytes = 0; 1796 return 0; 1797 } 1798 } 1799 1800 devices_info = kmalloc_array(nr_devices, sizeof(*devices_info), 1801 GFP_NOFS); 1802 if (!devices_info) 1803 return -ENOMEM; 1804 1805 /* calc min stripe number for data space alloction */ 1806 type = btrfs_get_alloc_profile(root, 1); 1807 if (type & BTRFS_BLOCK_GROUP_RAID0) { 1808 min_stripes = 2; 1809 num_stripes = nr_devices; 1810 } else if (type & BTRFS_BLOCK_GROUP_RAID1) { 1811 min_stripes = 2; 1812 num_stripes = 2; 1813 } else if (type & BTRFS_BLOCK_GROUP_RAID10) { 1814 min_stripes = 4; 1815 num_stripes = 4; 1816 } 1817 1818 if (type & BTRFS_BLOCK_GROUP_DUP) 1819 min_stripe_size = 2 * BTRFS_STRIPE_LEN; 1820 else 1821 min_stripe_size = BTRFS_STRIPE_LEN; 1822 1823 if (fs_info->alloc_start) 1824 mutex_lock(&fs_devices->device_list_mutex); 1825 rcu_read_lock(); 1826 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { 1827 if (!device->in_fs_metadata || !device->bdev || 1828 device->is_tgtdev_for_dev_replace) 1829 continue; 1830 1831 if (i >= nr_devices) 1832 break; 1833 1834 avail_space = device->total_bytes - device->bytes_used; 1835 1836 /* align with stripe_len */ 1837 avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN); 1838 avail_space *= BTRFS_STRIPE_LEN; 1839 1840 /* 1841 * In order to avoid overwritting the superblock on the drive, 1842 * btrfs starts at an offset of at least 1MB when doing chunk 1843 * allocation. 1844 */ 1845 skip_space = 1024 * 1024; 1846 1847 /* user can set the offset in fs_info->alloc_start. */ 1848 if (fs_info->alloc_start && 1849 fs_info->alloc_start + BTRFS_STRIPE_LEN <= 1850 device->total_bytes) { 1851 rcu_read_unlock(); 1852 skip_space = max(fs_info->alloc_start, skip_space); 1853 1854 /* 1855 * btrfs can not use the free space in 1856 * [0, skip_space - 1], we must subtract it from the 1857 * total. In order to implement it, we account the used 1858 * space in this range first. 1859 */ 1860 ret = btrfs_account_dev_extents_size(device, 0, 1861 skip_space - 1, 1862 &used_space); 1863 if (ret) { 1864 kfree(devices_info); 1865 mutex_unlock(&fs_devices->device_list_mutex); 1866 return ret; 1867 } 1868 1869 rcu_read_lock(); 1870 1871 /* calc the free space in [0, skip_space - 1] */ 1872 skip_space -= used_space; 1873 } 1874 1875 /* 1876 * we can use the free space in [0, skip_space - 1], subtract 1877 * it from the total. 1878 */ 1879 if (avail_space && avail_space >= skip_space) 1880 avail_space -= skip_space; 1881 else 1882 avail_space = 0; 1883 1884 if (avail_space < min_stripe_size) 1885 continue; 1886 1887 devices_info[i].dev = device; 1888 devices_info[i].max_avail = avail_space; 1889 1890 i++; 1891 } 1892 rcu_read_unlock(); 1893 if (fs_info->alloc_start) 1894 mutex_unlock(&fs_devices->device_list_mutex); 1895 1896 nr_devices = i; 1897 1898 btrfs_descending_sort_devices(devices_info, nr_devices); 1899 1900 i = nr_devices - 1; 1901 avail_space = 0; 1902 while (nr_devices >= min_stripes) { 1903 if (num_stripes > nr_devices) 1904 num_stripes = nr_devices; 1905 1906 if (devices_info[i].max_avail >= min_stripe_size) { 1907 int j; 1908 u64 alloc_size; 1909 1910 avail_space += devices_info[i].max_avail * num_stripes; 1911 alloc_size = devices_info[i].max_avail; 1912 for (j = i + 1 - num_stripes; j <= i; j++) 1913 devices_info[j].max_avail -= alloc_size; 1914 } 1915 i--; 1916 nr_devices--; 1917 } 1918 1919 kfree(devices_info); 1920 *free_bytes = avail_space; 1921 return 0; 1922 } 1923 1924 /* 1925 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles. 1926 * 1927 * If there's a redundant raid level at DATA block groups, use the respective 1928 * multiplier to scale the sizes. 1929 * 1930 * Unused device space usage is based on simulating the chunk allocator 1931 * algorithm that respects the device sizes, order of allocations and the 1932 * 'alloc_start' value, this is a close approximation of the actual use but 1933 * there are other factors that may change the result (like a new metadata 1934 * chunk). 1935 * 1936 * FIXME: not accurate for mixed block groups, total and free/used are ok, 1937 * available appears slightly larger. 1938 */ 1939 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) 1940 { 1941 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 1942 struct btrfs_super_block *disk_super = fs_info->super_copy; 1943 struct list_head *head = &fs_info->space_info; 1944 struct btrfs_space_info *found; 1945 u64 total_used = 0; 1946 u64 total_free_data = 0; 1947 int bits = dentry->d_sb->s_blocksize_bits; 1948 __be32 *fsid = (__be32 *)fs_info->fsid; 1949 unsigned factor = 1; 1950 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; 1951 int ret; 1952 1953 /* 1954 * holding chunk_muext to avoid allocating new chunks, holding 1955 * device_list_mutex to avoid the device being removed 1956 */ 1957 rcu_read_lock(); 1958 list_for_each_entry_rcu(found, head, list) { 1959 if (found->flags & BTRFS_BLOCK_GROUP_DATA) { 1960 int i; 1961 1962 total_free_data += found->disk_total - found->disk_used; 1963 total_free_data -= 1964 btrfs_account_ro_block_groups_free_space(found); 1965 1966 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 1967 if (!list_empty(&found->block_groups[i])) { 1968 switch (i) { 1969 case BTRFS_RAID_DUP: 1970 case BTRFS_RAID_RAID1: 1971 case BTRFS_RAID_RAID10: 1972 factor = 2; 1973 } 1974 } 1975 } 1976 } 1977 1978 total_used += found->disk_used; 1979 } 1980 1981 rcu_read_unlock(); 1982 1983 buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor); 1984 buf->f_blocks >>= bits; 1985 buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits); 1986 1987 /* Account global block reserve as used, it's in logical size already */ 1988 spin_lock(&block_rsv->lock); 1989 buf->f_bfree -= block_rsv->size >> bits; 1990 spin_unlock(&block_rsv->lock); 1991 1992 buf->f_bavail = div_u64(total_free_data, factor); 1993 ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data); 1994 if (ret) 1995 return ret; 1996 buf->f_bavail += div_u64(total_free_data, factor); 1997 buf->f_bavail = buf->f_bavail >> bits; 1998 1999 buf->f_type = BTRFS_SUPER_MAGIC; 2000 buf->f_bsize = dentry->d_sb->s_blocksize; 2001 buf->f_namelen = BTRFS_NAME_LEN; 2002 2003 /* We treat it as constant endianness (it doesn't matter _which_) 2004 because we want the fsid to come out the same whether mounted 2005 on a big-endian or little-endian host */ 2006 buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); 2007 buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); 2008 /* Mask in the root object ID too, to disambiguate subvols */ 2009 buf->f_fsid.val[0] ^= BTRFS_I(d_inode(dentry))->root->objectid >> 32; 2010 buf->f_fsid.val[1] ^= BTRFS_I(d_inode(dentry))->root->objectid; 2011 2012 return 0; 2013 } 2014 2015 static void btrfs_kill_super(struct super_block *sb) 2016 { 2017 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2018 kill_anon_super(sb); 2019 free_fs_info(fs_info); 2020 } 2021 2022 static struct file_system_type btrfs_fs_type = { 2023 .owner = THIS_MODULE, 2024 .name = "btrfs", 2025 .mount = btrfs_mount, 2026 .kill_sb = btrfs_kill_super, 2027 .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA, 2028 }; 2029 MODULE_ALIAS_FS("btrfs"); 2030 2031 static int btrfs_control_open(struct inode *inode, struct file *file) 2032 { 2033 /* 2034 * The control file's private_data is used to hold the 2035 * transaction when it is started and is used to keep 2036 * track of whether a transaction is already in progress. 2037 */ 2038 file->private_data = NULL; 2039 return 0; 2040 } 2041 2042 /* 2043 * used by btrfsctl to scan devices when no FS is mounted 2044 */ 2045 static long btrfs_control_ioctl(struct file *file, unsigned int cmd, 2046 unsigned long arg) 2047 { 2048 struct btrfs_ioctl_vol_args *vol; 2049 struct btrfs_fs_devices *fs_devices; 2050 int ret = -ENOTTY; 2051 2052 if (!capable(CAP_SYS_ADMIN)) 2053 return -EPERM; 2054 2055 vol = memdup_user((void __user *)arg, sizeof(*vol)); 2056 if (IS_ERR(vol)) 2057 return PTR_ERR(vol); 2058 2059 switch (cmd) { 2060 case BTRFS_IOC_SCAN_DEV: 2061 ret = btrfs_scan_one_device(vol->name, FMODE_READ, 2062 &btrfs_fs_type, &fs_devices); 2063 break; 2064 case BTRFS_IOC_DEVICES_READY: 2065 ret = btrfs_scan_one_device(vol->name, FMODE_READ, 2066 &btrfs_fs_type, &fs_devices); 2067 if (ret) 2068 break; 2069 ret = !(fs_devices->num_devices == fs_devices->total_devices); 2070 break; 2071 } 2072 2073 kfree(vol); 2074 return ret; 2075 } 2076 2077 static int btrfs_freeze(struct super_block *sb) 2078 { 2079 struct btrfs_trans_handle *trans; 2080 struct btrfs_root *root = btrfs_sb(sb)->tree_root; 2081 2082 trans = btrfs_attach_transaction_barrier(root); 2083 if (IS_ERR(trans)) { 2084 /* no transaction, don't bother */ 2085 if (PTR_ERR(trans) == -ENOENT) 2086 return 0; 2087 return PTR_ERR(trans); 2088 } 2089 return btrfs_commit_transaction(trans, root); 2090 } 2091 2092 static int btrfs_show_devname(struct seq_file *m, struct dentry *root) 2093 { 2094 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); 2095 struct btrfs_fs_devices *cur_devices; 2096 struct btrfs_device *dev, *first_dev = NULL; 2097 struct list_head *head; 2098 struct rcu_string *name; 2099 2100 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2101 cur_devices = fs_info->fs_devices; 2102 while (cur_devices) { 2103 head = &cur_devices->devices; 2104 list_for_each_entry(dev, head, dev_list) { 2105 if (dev->missing) 2106 continue; 2107 if (!dev->name) 2108 continue; 2109 if (!first_dev || dev->devid < first_dev->devid) 2110 first_dev = dev; 2111 } 2112 cur_devices = cur_devices->seed; 2113 } 2114 2115 if (first_dev) { 2116 rcu_read_lock(); 2117 name = rcu_dereference(first_dev->name); 2118 seq_escape(m, name->str, " \t\n\\"); 2119 rcu_read_unlock(); 2120 } else { 2121 WARN_ON(1); 2122 } 2123 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2124 return 0; 2125 } 2126 2127 static const struct super_operations btrfs_super_ops = { 2128 .drop_inode = btrfs_drop_inode, 2129 .evict_inode = btrfs_evict_inode, 2130 .put_super = btrfs_put_super, 2131 .sync_fs = btrfs_sync_fs, 2132 .show_options = btrfs_show_options, 2133 .show_devname = btrfs_show_devname, 2134 .write_inode = btrfs_write_inode, 2135 .alloc_inode = btrfs_alloc_inode, 2136 .destroy_inode = btrfs_destroy_inode, 2137 .statfs = btrfs_statfs, 2138 .remount_fs = btrfs_remount, 2139 .freeze_fs = btrfs_freeze, 2140 }; 2141 2142 static const struct file_operations btrfs_ctl_fops = { 2143 .open = btrfs_control_open, 2144 .unlocked_ioctl = btrfs_control_ioctl, 2145 .compat_ioctl = btrfs_control_ioctl, 2146 .owner = THIS_MODULE, 2147 .llseek = noop_llseek, 2148 }; 2149 2150 static struct miscdevice btrfs_misc = { 2151 .minor = BTRFS_MINOR, 2152 .name = "btrfs-control", 2153 .fops = &btrfs_ctl_fops 2154 }; 2155 2156 MODULE_ALIAS_MISCDEV(BTRFS_MINOR); 2157 MODULE_ALIAS("devname:btrfs-control"); 2158 2159 static int btrfs_interface_init(void) 2160 { 2161 return misc_register(&btrfs_misc); 2162 } 2163 2164 static void btrfs_interface_exit(void) 2165 { 2166 if (misc_deregister(&btrfs_misc) < 0) 2167 printk(KERN_INFO "BTRFS: misc_deregister failed for control device\n"); 2168 } 2169 2170 static void btrfs_print_info(void) 2171 { 2172 printk(KERN_INFO "Btrfs loaded" 2173 #ifdef CONFIG_BTRFS_DEBUG 2174 ", debug=on" 2175 #endif 2176 #ifdef CONFIG_BTRFS_ASSERT 2177 ", assert=on" 2178 #endif 2179 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2180 ", integrity-checker=on" 2181 #endif 2182 "\n"); 2183 } 2184 2185 static int btrfs_run_sanity_tests(void) 2186 { 2187 int ret; 2188 2189 ret = btrfs_init_test_fs(); 2190 if (ret) 2191 return ret; 2192 2193 ret = btrfs_test_free_space_cache(); 2194 if (ret) 2195 goto out; 2196 ret = btrfs_test_extent_buffer_operations(); 2197 if (ret) 2198 goto out; 2199 ret = btrfs_test_extent_io(); 2200 if (ret) 2201 goto out; 2202 ret = btrfs_test_inodes(); 2203 if (ret) 2204 goto out; 2205 ret = btrfs_test_qgroups(); 2206 out: 2207 btrfs_destroy_test_fs(); 2208 return ret; 2209 } 2210 2211 static int __init init_btrfs_fs(void) 2212 { 2213 int err; 2214 2215 err = btrfs_hash_init(); 2216 if (err) 2217 return err; 2218 2219 btrfs_props_init(); 2220 2221 err = btrfs_init_sysfs(); 2222 if (err) 2223 goto free_hash; 2224 2225 btrfs_init_compress(); 2226 2227 err = btrfs_init_cachep(); 2228 if (err) 2229 goto free_compress; 2230 2231 err = extent_io_init(); 2232 if (err) 2233 goto free_cachep; 2234 2235 err = extent_map_init(); 2236 if (err) 2237 goto free_extent_io; 2238 2239 err = ordered_data_init(); 2240 if (err) 2241 goto free_extent_map; 2242 2243 err = btrfs_delayed_inode_init(); 2244 if (err) 2245 goto free_ordered_data; 2246 2247 err = btrfs_auto_defrag_init(); 2248 if (err) 2249 goto free_delayed_inode; 2250 2251 err = btrfs_delayed_ref_init(); 2252 if (err) 2253 goto free_auto_defrag; 2254 2255 err = btrfs_prelim_ref_init(); 2256 if (err) 2257 goto free_delayed_ref; 2258 2259 err = btrfs_end_io_wq_init(); 2260 if (err) 2261 goto free_prelim_ref; 2262 2263 err = btrfs_interface_init(); 2264 if (err) 2265 goto free_end_io_wq; 2266 2267 btrfs_init_lockdep(); 2268 2269 btrfs_print_info(); 2270 2271 err = btrfs_run_sanity_tests(); 2272 if (err) 2273 goto unregister_ioctl; 2274 2275 err = register_filesystem(&btrfs_fs_type); 2276 if (err) 2277 goto unregister_ioctl; 2278 2279 return 0; 2280 2281 unregister_ioctl: 2282 btrfs_interface_exit(); 2283 free_end_io_wq: 2284 btrfs_end_io_wq_exit(); 2285 free_prelim_ref: 2286 btrfs_prelim_ref_exit(); 2287 free_delayed_ref: 2288 btrfs_delayed_ref_exit(); 2289 free_auto_defrag: 2290 btrfs_auto_defrag_exit(); 2291 free_delayed_inode: 2292 btrfs_delayed_inode_exit(); 2293 free_ordered_data: 2294 ordered_data_exit(); 2295 free_extent_map: 2296 extent_map_exit(); 2297 free_extent_io: 2298 extent_io_exit(); 2299 free_cachep: 2300 btrfs_destroy_cachep(); 2301 free_compress: 2302 btrfs_exit_compress(); 2303 btrfs_exit_sysfs(); 2304 free_hash: 2305 btrfs_hash_exit(); 2306 return err; 2307 } 2308 2309 static void __exit exit_btrfs_fs(void) 2310 { 2311 btrfs_destroy_cachep(); 2312 btrfs_delayed_ref_exit(); 2313 btrfs_auto_defrag_exit(); 2314 btrfs_delayed_inode_exit(); 2315 btrfs_prelim_ref_exit(); 2316 ordered_data_exit(); 2317 extent_map_exit(); 2318 extent_io_exit(); 2319 btrfs_interface_exit(); 2320 btrfs_end_io_wq_exit(); 2321 unregister_filesystem(&btrfs_fs_type); 2322 btrfs_exit_sysfs(); 2323 btrfs_cleanup_fs_uuids(); 2324 btrfs_exit_compress(); 2325 btrfs_hash_exit(); 2326 } 2327 2328 late_initcall(init_btrfs_fs); 2329 module_exit(exit_btrfs_fs) 2330 2331 MODULE_LICENSE("GPL"); 2332