1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/blkdev.h> 21 #include <linux/scatterlist.h> 22 #include <linux/swap.h> 23 #include <linux/radix-tree.h> 24 #include <linux/writeback.h> 25 #include <linux/buffer_head.h> 26 #include <linux/workqueue.h> 27 #include <linux/kthread.h> 28 #include <linux/slab.h> 29 #include <linux/migrate.h> 30 #include <linux/ratelimit.h> 31 #include <linux/uuid.h> 32 #include <linux/semaphore.h> 33 #include <asm/unaligned.h> 34 #include "ctree.h" 35 #include "disk-io.h" 36 #include "hash.h" 37 #include "transaction.h" 38 #include "btrfs_inode.h" 39 #include "volumes.h" 40 #include "print-tree.h" 41 #include "locking.h" 42 #include "tree-log.h" 43 #include "free-space-cache.h" 44 #include "free-space-tree.h" 45 #include "inode-map.h" 46 #include "check-integrity.h" 47 #include "rcu-string.h" 48 #include "dev-replace.h" 49 #include "raid56.h" 50 #include "sysfs.h" 51 #include "qgroup.h" 52 #include "compression.h" 53 54 #ifdef CONFIG_X86 55 #include <asm/cpufeature.h> 56 #endif 57 58 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ 59 BTRFS_HEADER_FLAG_RELOC |\ 60 BTRFS_SUPER_FLAG_ERROR |\ 61 BTRFS_SUPER_FLAG_SEEDING |\ 62 BTRFS_SUPER_FLAG_METADUMP) 63 64 static const struct extent_io_ops btree_extent_io_ops; 65 static void end_workqueue_fn(struct btrfs_work *work); 66 static void free_fs_root(struct btrfs_root *root); 67 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info); 68 static void btrfs_destroy_ordered_extents(struct btrfs_root *root); 69 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 70 struct btrfs_fs_info *fs_info); 71 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 72 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 73 struct extent_io_tree *dirty_pages, 74 int mark); 75 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 76 struct extent_io_tree *pinned_extents); 77 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); 78 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); 79 80 /* 81 * btrfs_end_io_wq structs are used to do processing in task context when an IO 82 * is complete. This is used during reads to verify checksums, and it is used 83 * by writes to insert metadata for new file extents after IO is complete. 84 */ 85 struct btrfs_end_io_wq { 86 struct bio *bio; 87 bio_end_io_t *end_io; 88 void *private; 89 struct btrfs_fs_info *info; 90 blk_status_t status; 91 enum btrfs_wq_endio_type metadata; 92 struct btrfs_work work; 93 }; 94 95 static struct kmem_cache *btrfs_end_io_wq_cache; 96 97 int __init btrfs_end_io_wq_init(void) 98 { 99 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq", 100 sizeof(struct btrfs_end_io_wq), 101 0, 102 SLAB_MEM_SPREAD, 103 NULL); 104 if (!btrfs_end_io_wq_cache) 105 return -ENOMEM; 106 return 0; 107 } 108 109 void btrfs_end_io_wq_exit(void) 110 { 111 kmem_cache_destroy(btrfs_end_io_wq_cache); 112 } 113 114 /* 115 * async submit bios are used to offload expensive checksumming 116 * onto the worker threads. They checksum file and metadata bios 117 * just before they are sent down the IO stack. 118 */ 119 struct async_submit_bio { 120 void *private_data; 121 struct btrfs_fs_info *fs_info; 122 struct bio *bio; 123 extent_submit_bio_hook_t *submit_bio_start; 124 extent_submit_bio_hook_t *submit_bio_done; 125 int mirror_num; 126 unsigned long bio_flags; 127 /* 128 * bio_offset is optional, can be used if the pages in the bio 129 * can't tell us where in the file the bio should go 130 */ 131 u64 bio_offset; 132 struct btrfs_work work; 133 blk_status_t status; 134 }; 135 136 /* 137 * Lockdep class keys for extent_buffer->lock's in this root. For a given 138 * eb, the lockdep key is determined by the btrfs_root it belongs to and 139 * the level the eb occupies in the tree. 140 * 141 * Different roots are used for different purposes and may nest inside each 142 * other and they require separate keysets. As lockdep keys should be 143 * static, assign keysets according to the purpose of the root as indicated 144 * by btrfs_root->objectid. This ensures that all special purpose roots 145 * have separate keysets. 146 * 147 * Lock-nesting across peer nodes is always done with the immediate parent 148 * node locked thus preventing deadlock. As lockdep doesn't know this, use 149 * subclass to avoid triggering lockdep warning in such cases. 150 * 151 * The key is set by the readpage_end_io_hook after the buffer has passed 152 * csum validation but before the pages are unlocked. It is also set by 153 * btrfs_init_new_buffer on freshly allocated blocks. 154 * 155 * We also add a check to make sure the highest level of the tree is the 156 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code 157 * needs update as well. 158 */ 159 #ifdef CONFIG_DEBUG_LOCK_ALLOC 160 # if BTRFS_MAX_LEVEL != 8 161 # error 162 # endif 163 164 static struct btrfs_lockdep_keyset { 165 u64 id; /* root objectid */ 166 const char *name_stem; /* lock name stem */ 167 char names[BTRFS_MAX_LEVEL + 1][20]; 168 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; 169 } btrfs_lockdep_keysets[] = { 170 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, 171 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, 172 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, 173 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, 174 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, 175 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, 176 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, 177 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, 178 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, 179 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, 180 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, 181 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, 182 { .id = 0, .name_stem = "tree" }, 183 }; 184 185 void __init btrfs_init_lockdep(void) 186 { 187 int i, j; 188 189 /* initialize lockdep class names */ 190 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { 191 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; 192 193 for (j = 0; j < ARRAY_SIZE(ks->names); j++) 194 snprintf(ks->names[j], sizeof(ks->names[j]), 195 "btrfs-%s-%02d", ks->name_stem, j); 196 } 197 } 198 199 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, 200 int level) 201 { 202 struct btrfs_lockdep_keyset *ks; 203 204 BUG_ON(level >= ARRAY_SIZE(ks->keys)); 205 206 /* find the matching keyset, id 0 is the default entry */ 207 for (ks = btrfs_lockdep_keysets; ks->id; ks++) 208 if (ks->id == objectid) 209 break; 210 211 lockdep_set_class_and_name(&eb->lock, 212 &ks->keys[level], ks->names[level]); 213 } 214 215 #endif 216 217 /* 218 * extents on the btree inode are pretty simple, there's one extent 219 * that covers the entire device 220 */ 221 static struct extent_map *btree_get_extent(struct btrfs_inode *inode, 222 struct page *page, size_t pg_offset, u64 start, u64 len, 223 int create) 224 { 225 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); 226 struct extent_map_tree *em_tree = &inode->extent_tree; 227 struct extent_map *em; 228 int ret; 229 230 read_lock(&em_tree->lock); 231 em = lookup_extent_mapping(em_tree, start, len); 232 if (em) { 233 em->bdev = fs_info->fs_devices->latest_bdev; 234 read_unlock(&em_tree->lock); 235 goto out; 236 } 237 read_unlock(&em_tree->lock); 238 239 em = alloc_extent_map(); 240 if (!em) { 241 em = ERR_PTR(-ENOMEM); 242 goto out; 243 } 244 em->start = 0; 245 em->len = (u64)-1; 246 em->block_len = (u64)-1; 247 em->block_start = 0; 248 em->bdev = fs_info->fs_devices->latest_bdev; 249 250 write_lock(&em_tree->lock); 251 ret = add_extent_mapping(em_tree, em, 0); 252 if (ret == -EEXIST) { 253 free_extent_map(em); 254 em = lookup_extent_mapping(em_tree, start, len); 255 if (!em) 256 em = ERR_PTR(-EIO); 257 } else if (ret) { 258 free_extent_map(em); 259 em = ERR_PTR(ret); 260 } 261 write_unlock(&em_tree->lock); 262 263 out: 264 return em; 265 } 266 267 u32 btrfs_csum_data(const char *data, u32 seed, size_t len) 268 { 269 return btrfs_crc32c(seed, data, len); 270 } 271 272 void btrfs_csum_final(u32 crc, u8 *result) 273 { 274 put_unaligned_le32(~crc, result); 275 } 276 277 /* 278 * compute the csum for a btree block, and either verify it or write it 279 * into the csum field of the block. 280 */ 281 static int csum_tree_block(struct btrfs_fs_info *fs_info, 282 struct extent_buffer *buf, 283 int verify) 284 { 285 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 286 char *result = NULL; 287 unsigned long len; 288 unsigned long cur_len; 289 unsigned long offset = BTRFS_CSUM_SIZE; 290 char *kaddr; 291 unsigned long map_start; 292 unsigned long map_len; 293 int err; 294 u32 crc = ~(u32)0; 295 unsigned long inline_result; 296 297 len = buf->len - offset; 298 while (len > 0) { 299 err = map_private_extent_buffer(buf, offset, 32, 300 &kaddr, &map_start, &map_len); 301 if (err) 302 return err; 303 cur_len = min(len, map_len - (offset - map_start)); 304 crc = btrfs_csum_data(kaddr + offset - map_start, 305 crc, cur_len); 306 len -= cur_len; 307 offset += cur_len; 308 } 309 if (csum_size > sizeof(inline_result)) { 310 result = kzalloc(csum_size, GFP_NOFS); 311 if (!result) 312 return -ENOMEM; 313 } else { 314 result = (char *)&inline_result; 315 } 316 317 btrfs_csum_final(crc, result); 318 319 if (verify) { 320 if (memcmp_extent_buffer(buf, result, 0, csum_size)) { 321 u32 val; 322 u32 found = 0; 323 memcpy(&found, result, csum_size); 324 325 read_extent_buffer(buf, &val, 0, csum_size); 326 btrfs_warn_rl(fs_info, 327 "%s checksum verify failed on %llu wanted %X found %X level %d", 328 fs_info->sb->s_id, buf->start, 329 val, found, btrfs_header_level(buf)); 330 if (result != (char *)&inline_result) 331 kfree(result); 332 return -EUCLEAN; 333 } 334 } else { 335 write_extent_buffer(buf, result, 0, csum_size); 336 } 337 if (result != (char *)&inline_result) 338 kfree(result); 339 return 0; 340 } 341 342 /* 343 * we can't consider a given block up to date unless the transid of the 344 * block matches the transid in the parent node's pointer. This is how we 345 * detect blocks that either didn't get written at all or got written 346 * in the wrong place. 347 */ 348 static int verify_parent_transid(struct extent_io_tree *io_tree, 349 struct extent_buffer *eb, u64 parent_transid, 350 int atomic) 351 { 352 struct extent_state *cached_state = NULL; 353 int ret; 354 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); 355 356 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 357 return 0; 358 359 if (atomic) 360 return -EAGAIN; 361 362 if (need_lock) { 363 btrfs_tree_read_lock(eb); 364 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 365 } 366 367 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 368 &cached_state); 369 if (extent_buffer_uptodate(eb) && 370 btrfs_header_generation(eb) == parent_transid) { 371 ret = 0; 372 goto out; 373 } 374 btrfs_err_rl(eb->fs_info, 375 "parent transid verify failed on %llu wanted %llu found %llu", 376 eb->start, 377 parent_transid, btrfs_header_generation(eb)); 378 ret = 1; 379 380 /* 381 * Things reading via commit roots that don't have normal protection, 382 * like send, can have a really old block in cache that may point at a 383 * block that has been freed and re-allocated. So don't clear uptodate 384 * if we find an eb that is under IO (dirty/writeback) because we could 385 * end up reading in the stale data and then writing it back out and 386 * making everybody very sad. 387 */ 388 if (!extent_buffer_under_io(eb)) 389 clear_extent_buffer_uptodate(eb); 390 out: 391 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 392 &cached_state, GFP_NOFS); 393 if (need_lock) 394 btrfs_tree_read_unlock_blocking(eb); 395 return ret; 396 } 397 398 /* 399 * Return 0 if the superblock checksum type matches the checksum value of that 400 * algorithm. Pass the raw disk superblock data. 401 */ 402 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, 403 char *raw_disk_sb) 404 { 405 struct btrfs_super_block *disk_sb = 406 (struct btrfs_super_block *)raw_disk_sb; 407 u16 csum_type = btrfs_super_csum_type(disk_sb); 408 int ret = 0; 409 410 if (csum_type == BTRFS_CSUM_TYPE_CRC32) { 411 u32 crc = ~(u32)0; 412 const int csum_size = sizeof(crc); 413 char result[csum_size]; 414 415 /* 416 * The super_block structure does not span the whole 417 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space 418 * is filled with zeros and is included in the checksum. 419 */ 420 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, 421 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 422 btrfs_csum_final(crc, result); 423 424 if (memcmp(raw_disk_sb, result, csum_size)) 425 ret = 1; 426 } 427 428 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { 429 btrfs_err(fs_info, "unsupported checksum algorithm %u", 430 csum_type); 431 ret = 1; 432 } 433 434 return ret; 435 } 436 437 /* 438 * helper to read a given tree block, doing retries as required when 439 * the checksums don't match and we have alternate mirrors to try. 440 */ 441 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info, 442 struct extent_buffer *eb, 443 u64 parent_transid) 444 { 445 struct extent_io_tree *io_tree; 446 int failed = 0; 447 int ret; 448 int num_copies = 0; 449 int mirror_num = 0; 450 int failed_mirror = 0; 451 452 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 453 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; 454 while (1) { 455 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, 456 btree_get_extent, mirror_num); 457 if (!ret) { 458 if (!verify_parent_transid(io_tree, eb, 459 parent_transid, 0)) 460 break; 461 else 462 ret = -EIO; 463 } 464 465 /* 466 * This buffer's crc is fine, but its contents are corrupted, so 467 * there is no reason to read the other copies, they won't be 468 * any less wrong. 469 */ 470 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) 471 break; 472 473 num_copies = btrfs_num_copies(fs_info, 474 eb->start, eb->len); 475 if (num_copies == 1) 476 break; 477 478 if (!failed_mirror) { 479 failed = 1; 480 failed_mirror = eb->read_mirror; 481 } 482 483 mirror_num++; 484 if (mirror_num == failed_mirror) 485 mirror_num++; 486 487 if (mirror_num > num_copies) 488 break; 489 } 490 491 if (failed && !ret && failed_mirror) 492 repair_eb_io_failure(fs_info, eb, failed_mirror); 493 494 return ret; 495 } 496 497 /* 498 * checksum a dirty tree block before IO. This has extra checks to make sure 499 * we only fill in the checksum field in the first page of a multi-page block 500 */ 501 502 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) 503 { 504 u64 start = page_offset(page); 505 u64 found_start; 506 struct extent_buffer *eb; 507 508 eb = (struct extent_buffer *)page->private; 509 if (page != eb->pages[0]) 510 return 0; 511 512 found_start = btrfs_header_bytenr(eb); 513 /* 514 * Please do not consolidate these warnings into a single if. 515 * It is useful to know what went wrong. 516 */ 517 if (WARN_ON(found_start != start)) 518 return -EUCLEAN; 519 if (WARN_ON(!PageUptodate(page))) 520 return -EUCLEAN; 521 522 ASSERT(memcmp_extent_buffer(eb, fs_info->fsid, 523 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0); 524 525 return csum_tree_block(fs_info, eb, 0); 526 } 527 528 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info, 529 struct extent_buffer *eb) 530 { 531 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 532 u8 fsid[BTRFS_FSID_SIZE]; 533 int ret = 1; 534 535 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); 536 while (fs_devices) { 537 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { 538 ret = 0; 539 break; 540 } 541 fs_devices = fs_devices->seed; 542 } 543 return ret; 544 } 545 546 #define CORRUPT(reason, eb, root, slot) \ 547 btrfs_crit(root->fs_info, \ 548 "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \ 549 btrfs_header_level(eb) == 0 ? "leaf" : "node", \ 550 reason, btrfs_header_bytenr(eb), root->objectid, slot) 551 552 static noinline int check_leaf(struct btrfs_root *root, 553 struct extent_buffer *leaf) 554 { 555 struct btrfs_fs_info *fs_info = root->fs_info; 556 struct btrfs_key key; 557 struct btrfs_key leaf_key; 558 u32 nritems = btrfs_header_nritems(leaf); 559 int slot; 560 561 /* 562 * Extent buffers from a relocation tree have a owner field that 563 * corresponds to the subvolume tree they are based on. So just from an 564 * extent buffer alone we can not find out what is the id of the 565 * corresponding subvolume tree, so we can not figure out if the extent 566 * buffer corresponds to the root of the relocation tree or not. So skip 567 * this check for relocation trees. 568 */ 569 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { 570 struct btrfs_root *check_root; 571 572 key.objectid = btrfs_header_owner(leaf); 573 key.type = BTRFS_ROOT_ITEM_KEY; 574 key.offset = (u64)-1; 575 576 check_root = btrfs_get_fs_root(fs_info, &key, false); 577 /* 578 * The only reason we also check NULL here is that during 579 * open_ctree() some roots has not yet been set up. 580 */ 581 if (!IS_ERR_OR_NULL(check_root)) { 582 struct extent_buffer *eb; 583 584 eb = btrfs_root_node(check_root); 585 /* if leaf is the root, then it's fine */ 586 if (leaf != eb) { 587 CORRUPT("non-root leaf's nritems is 0", 588 leaf, check_root, 0); 589 free_extent_buffer(eb); 590 return -EIO; 591 } 592 free_extent_buffer(eb); 593 } 594 return 0; 595 } 596 597 if (nritems == 0) 598 return 0; 599 600 /* Check the 0 item */ 601 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != 602 BTRFS_LEAF_DATA_SIZE(fs_info)) { 603 CORRUPT("invalid item offset size pair", leaf, root, 0); 604 return -EIO; 605 } 606 607 /* 608 * Check to make sure each items keys are in the correct order and their 609 * offsets make sense. We only have to loop through nritems-1 because 610 * we check the current slot against the next slot, which verifies the 611 * next slot's offset+size makes sense and that the current's slot 612 * offset is correct. 613 */ 614 for (slot = 0; slot < nritems - 1; slot++) { 615 btrfs_item_key_to_cpu(leaf, &leaf_key, slot); 616 btrfs_item_key_to_cpu(leaf, &key, slot + 1); 617 618 /* Make sure the keys are in the right order */ 619 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { 620 CORRUPT("bad key order", leaf, root, slot); 621 return -EIO; 622 } 623 624 /* 625 * Make sure the offset and ends are right, remember that the 626 * item data starts at the end of the leaf and grows towards the 627 * front. 628 */ 629 if (btrfs_item_offset_nr(leaf, slot) != 630 btrfs_item_end_nr(leaf, slot + 1)) { 631 CORRUPT("slot offset bad", leaf, root, slot); 632 return -EIO; 633 } 634 635 /* 636 * Check to make sure that we don't point outside of the leaf, 637 * just in case all the items are consistent to each other, but 638 * all point outside of the leaf. 639 */ 640 if (btrfs_item_end_nr(leaf, slot) > 641 BTRFS_LEAF_DATA_SIZE(fs_info)) { 642 CORRUPT("slot end outside of leaf", leaf, root, slot); 643 return -EIO; 644 } 645 } 646 647 return 0; 648 } 649 650 static int check_node(struct btrfs_root *root, struct extent_buffer *node) 651 { 652 unsigned long nr = btrfs_header_nritems(node); 653 struct btrfs_key key, next_key; 654 int slot; 655 u64 bytenr; 656 int ret = 0; 657 658 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) { 659 btrfs_crit(root->fs_info, 660 "corrupt node: block %llu root %llu nritems %lu", 661 node->start, root->objectid, nr); 662 return -EIO; 663 } 664 665 for (slot = 0; slot < nr - 1; slot++) { 666 bytenr = btrfs_node_blockptr(node, slot); 667 btrfs_node_key_to_cpu(node, &key, slot); 668 btrfs_node_key_to_cpu(node, &next_key, slot + 1); 669 670 if (!bytenr) { 671 CORRUPT("invalid item slot", node, root, slot); 672 ret = -EIO; 673 goto out; 674 } 675 676 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) { 677 CORRUPT("bad key order", node, root, slot); 678 ret = -EIO; 679 goto out; 680 } 681 } 682 out: 683 return ret; 684 } 685 686 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 687 u64 phy_offset, struct page *page, 688 u64 start, u64 end, int mirror) 689 { 690 u64 found_start; 691 int found_level; 692 struct extent_buffer *eb; 693 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 694 struct btrfs_fs_info *fs_info = root->fs_info; 695 int ret = 0; 696 int reads_done; 697 698 if (!page->private) 699 goto out; 700 701 eb = (struct extent_buffer *)page->private; 702 703 /* the pending IO might have been the only thing that kept this buffer 704 * in memory. Make sure we have a ref for all this other checks 705 */ 706 extent_buffer_get(eb); 707 708 reads_done = atomic_dec_and_test(&eb->io_pages); 709 if (!reads_done) 710 goto err; 711 712 eb->read_mirror = mirror; 713 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { 714 ret = -EIO; 715 goto err; 716 } 717 718 found_start = btrfs_header_bytenr(eb); 719 if (found_start != eb->start) { 720 btrfs_err_rl(fs_info, "bad tree block start %llu %llu", 721 found_start, eb->start); 722 ret = -EIO; 723 goto err; 724 } 725 if (check_tree_block_fsid(fs_info, eb)) { 726 btrfs_err_rl(fs_info, "bad fsid on block %llu", 727 eb->start); 728 ret = -EIO; 729 goto err; 730 } 731 found_level = btrfs_header_level(eb); 732 if (found_level >= BTRFS_MAX_LEVEL) { 733 btrfs_err(fs_info, "bad tree block level %d", 734 (int)btrfs_header_level(eb)); 735 ret = -EIO; 736 goto err; 737 } 738 739 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), 740 eb, found_level); 741 742 ret = csum_tree_block(fs_info, eb, 1); 743 if (ret) 744 goto err; 745 746 /* 747 * If this is a leaf block and it is corrupt, set the corrupt bit so 748 * that we don't try and read the other copies of this block, just 749 * return -EIO. 750 */ 751 if (found_level == 0 && check_leaf(root, eb)) { 752 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 753 ret = -EIO; 754 } 755 756 if (found_level > 0 && check_node(root, eb)) 757 ret = -EIO; 758 759 if (!ret) 760 set_extent_buffer_uptodate(eb); 761 err: 762 if (reads_done && 763 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 764 btree_readahead_hook(eb, ret); 765 766 if (ret) { 767 /* 768 * our io error hook is going to dec the io pages 769 * again, we have to make sure it has something 770 * to decrement 771 */ 772 atomic_inc(&eb->io_pages); 773 clear_extent_buffer_uptodate(eb); 774 } 775 free_extent_buffer(eb); 776 out: 777 return ret; 778 } 779 780 static int btree_io_failed_hook(struct page *page, int failed_mirror) 781 { 782 struct extent_buffer *eb; 783 784 eb = (struct extent_buffer *)page->private; 785 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); 786 eb->read_mirror = failed_mirror; 787 atomic_dec(&eb->io_pages); 788 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) 789 btree_readahead_hook(eb, -EIO); 790 return -EIO; /* we fixed nothing */ 791 } 792 793 static void end_workqueue_bio(struct bio *bio) 794 { 795 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; 796 struct btrfs_fs_info *fs_info; 797 struct btrfs_workqueue *wq; 798 btrfs_work_func_t func; 799 800 fs_info = end_io_wq->info; 801 end_io_wq->status = bio->bi_status; 802 803 if (bio_op(bio) == REQ_OP_WRITE) { 804 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { 805 wq = fs_info->endio_meta_write_workers; 806 func = btrfs_endio_meta_write_helper; 807 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { 808 wq = fs_info->endio_freespace_worker; 809 func = btrfs_freespace_write_helper; 810 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 811 wq = fs_info->endio_raid56_workers; 812 func = btrfs_endio_raid56_helper; 813 } else { 814 wq = fs_info->endio_write_workers; 815 func = btrfs_endio_write_helper; 816 } 817 } else { 818 if (unlikely(end_io_wq->metadata == 819 BTRFS_WQ_ENDIO_DIO_REPAIR)) { 820 wq = fs_info->endio_repair_workers; 821 func = btrfs_endio_repair_helper; 822 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { 823 wq = fs_info->endio_raid56_workers; 824 func = btrfs_endio_raid56_helper; 825 } else if (end_io_wq->metadata) { 826 wq = fs_info->endio_meta_workers; 827 func = btrfs_endio_meta_helper; 828 } else { 829 wq = fs_info->endio_workers; 830 func = btrfs_endio_helper; 831 } 832 } 833 834 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); 835 btrfs_queue_work(wq, &end_io_wq->work); 836 } 837 838 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 839 enum btrfs_wq_endio_type metadata) 840 { 841 struct btrfs_end_io_wq *end_io_wq; 842 843 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); 844 if (!end_io_wq) 845 return BLK_STS_RESOURCE; 846 847 end_io_wq->private = bio->bi_private; 848 end_io_wq->end_io = bio->bi_end_io; 849 end_io_wq->info = info; 850 end_io_wq->status = 0; 851 end_io_wq->bio = bio; 852 end_io_wq->metadata = metadata; 853 854 bio->bi_private = end_io_wq; 855 bio->bi_end_io = end_workqueue_bio; 856 return 0; 857 } 858 859 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) 860 { 861 unsigned long limit = min_t(unsigned long, 862 info->thread_pool_size, 863 info->fs_devices->open_devices); 864 return 256 * limit; 865 } 866 867 static void run_one_async_start(struct btrfs_work *work) 868 { 869 struct async_submit_bio *async; 870 blk_status_t ret; 871 872 async = container_of(work, struct async_submit_bio, work); 873 ret = async->submit_bio_start(async->private_data, async->bio, 874 async->mirror_num, async->bio_flags, 875 async->bio_offset); 876 if (ret) 877 async->status = ret; 878 } 879 880 static void run_one_async_done(struct btrfs_work *work) 881 { 882 struct btrfs_fs_info *fs_info; 883 struct async_submit_bio *async; 884 int limit; 885 886 async = container_of(work, struct async_submit_bio, work); 887 fs_info = async->fs_info; 888 889 limit = btrfs_async_submit_limit(fs_info); 890 limit = limit * 2 / 3; 891 892 /* 893 * atomic_dec_return implies a barrier for waitqueue_active 894 */ 895 if (atomic_dec_return(&fs_info->nr_async_submits) < limit && 896 waitqueue_active(&fs_info->async_submit_wait)) 897 wake_up(&fs_info->async_submit_wait); 898 899 /* If an error occurred we just want to clean up the bio and move on */ 900 if (async->status) { 901 async->bio->bi_status = async->status; 902 bio_endio(async->bio); 903 return; 904 } 905 906 async->submit_bio_done(async->private_data, async->bio, async->mirror_num, 907 async->bio_flags, async->bio_offset); 908 } 909 910 static void run_one_async_free(struct btrfs_work *work) 911 { 912 struct async_submit_bio *async; 913 914 async = container_of(work, struct async_submit_bio, work); 915 kfree(async); 916 } 917 918 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, 919 int mirror_num, unsigned long bio_flags, 920 u64 bio_offset, void *private_data, 921 extent_submit_bio_hook_t *submit_bio_start, 922 extent_submit_bio_hook_t *submit_bio_done) 923 { 924 struct async_submit_bio *async; 925 926 async = kmalloc(sizeof(*async), GFP_NOFS); 927 if (!async) 928 return BLK_STS_RESOURCE; 929 930 async->private_data = private_data; 931 async->fs_info = fs_info; 932 async->bio = bio; 933 async->mirror_num = mirror_num; 934 async->submit_bio_start = submit_bio_start; 935 async->submit_bio_done = submit_bio_done; 936 937 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, 938 run_one_async_done, run_one_async_free); 939 940 async->bio_flags = bio_flags; 941 async->bio_offset = bio_offset; 942 943 async->status = 0; 944 945 atomic_inc(&fs_info->nr_async_submits); 946 947 if (op_is_sync(bio->bi_opf)) 948 btrfs_set_work_high_priority(&async->work); 949 950 btrfs_queue_work(fs_info->workers, &async->work); 951 952 while (atomic_read(&fs_info->async_submit_draining) && 953 atomic_read(&fs_info->nr_async_submits)) { 954 wait_event(fs_info->async_submit_wait, 955 (atomic_read(&fs_info->nr_async_submits) == 0)); 956 } 957 958 return 0; 959 } 960 961 static blk_status_t btree_csum_one_bio(struct bio *bio) 962 { 963 struct bio_vec *bvec; 964 struct btrfs_root *root; 965 int i, ret = 0; 966 967 ASSERT(!bio_flagged(bio, BIO_CLONED)); 968 bio_for_each_segment_all(bvec, bio, i) { 969 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 970 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); 971 if (ret) 972 break; 973 } 974 975 return errno_to_blk_status(ret); 976 } 977 978 static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio, 979 int mirror_num, unsigned long bio_flags, 980 u64 bio_offset) 981 { 982 /* 983 * when we're called for a write, we're already in the async 984 * submission context. Just jump into btrfs_map_bio 985 */ 986 return btree_csum_one_bio(bio); 987 } 988 989 static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio, 990 int mirror_num, unsigned long bio_flags, 991 u64 bio_offset) 992 { 993 struct inode *inode = private_data; 994 blk_status_t ret; 995 996 /* 997 * when we're called for a write, we're already in the async 998 * submission context. Just jump into btrfs_map_bio 999 */ 1000 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); 1001 if (ret) { 1002 bio->bi_status = ret; 1003 bio_endio(bio); 1004 } 1005 return ret; 1006 } 1007 1008 static int check_async_write(unsigned long bio_flags) 1009 { 1010 if (bio_flags & EXTENT_BIO_TREE_LOG) 1011 return 0; 1012 #ifdef CONFIG_X86 1013 if (static_cpu_has(X86_FEATURE_XMM4_2)) 1014 return 0; 1015 #endif 1016 return 1; 1017 } 1018 1019 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio, 1020 int mirror_num, unsigned long bio_flags, 1021 u64 bio_offset) 1022 { 1023 struct inode *inode = private_data; 1024 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1025 int async = check_async_write(bio_flags); 1026 blk_status_t ret; 1027 1028 if (bio_op(bio) != REQ_OP_WRITE) { 1029 /* 1030 * called for a read, do the setup so that checksum validation 1031 * can happen in the async kernel threads 1032 */ 1033 ret = btrfs_bio_wq_end_io(fs_info, bio, 1034 BTRFS_WQ_ENDIO_METADATA); 1035 if (ret) 1036 goto out_w_error; 1037 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 1038 } else if (!async) { 1039 ret = btree_csum_one_bio(bio); 1040 if (ret) 1041 goto out_w_error; 1042 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 1043 } else { 1044 /* 1045 * kthread helpers are used to submit writes so that 1046 * checksumming can happen in parallel across all CPUs 1047 */ 1048 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0, 1049 bio_offset, private_data, 1050 __btree_submit_bio_start, 1051 __btree_submit_bio_done); 1052 } 1053 1054 if (ret) 1055 goto out_w_error; 1056 return 0; 1057 1058 out_w_error: 1059 bio->bi_status = ret; 1060 bio_endio(bio); 1061 return ret; 1062 } 1063 1064 #ifdef CONFIG_MIGRATION 1065 static int btree_migratepage(struct address_space *mapping, 1066 struct page *newpage, struct page *page, 1067 enum migrate_mode mode) 1068 { 1069 /* 1070 * we can't safely write a btree page from here, 1071 * we haven't done the locking hook 1072 */ 1073 if (PageDirty(page)) 1074 return -EAGAIN; 1075 /* 1076 * Buffers may be managed in a filesystem specific way. 1077 * We must have no buffers or drop them. 1078 */ 1079 if (page_has_private(page) && 1080 !try_to_release_page(page, GFP_KERNEL)) 1081 return -EAGAIN; 1082 return migrate_page(mapping, newpage, page, mode); 1083 } 1084 #endif 1085 1086 1087 static int btree_writepages(struct address_space *mapping, 1088 struct writeback_control *wbc) 1089 { 1090 struct btrfs_fs_info *fs_info; 1091 int ret; 1092 1093 if (wbc->sync_mode == WB_SYNC_NONE) { 1094 1095 if (wbc->for_kupdate) 1096 return 0; 1097 1098 fs_info = BTRFS_I(mapping->host)->root->fs_info; 1099 /* this is a bit racy, but that's ok */ 1100 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, 1101 BTRFS_DIRTY_METADATA_THRESH); 1102 if (ret < 0) 1103 return 0; 1104 } 1105 return btree_write_cache_pages(mapping, wbc); 1106 } 1107 1108 static int btree_readpage(struct file *file, struct page *page) 1109 { 1110 struct extent_io_tree *tree; 1111 tree = &BTRFS_I(page->mapping->host)->io_tree; 1112 return extent_read_full_page(tree, page, btree_get_extent, 0); 1113 } 1114 1115 static int btree_releasepage(struct page *page, gfp_t gfp_flags) 1116 { 1117 if (PageWriteback(page) || PageDirty(page)) 1118 return 0; 1119 1120 return try_release_extent_buffer(page); 1121 } 1122 1123 static void btree_invalidatepage(struct page *page, unsigned int offset, 1124 unsigned int length) 1125 { 1126 struct extent_io_tree *tree; 1127 tree = &BTRFS_I(page->mapping->host)->io_tree; 1128 extent_invalidatepage(tree, page, offset); 1129 btree_releasepage(page, GFP_NOFS); 1130 if (PagePrivate(page)) { 1131 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, 1132 "page private not zero on page %llu", 1133 (unsigned long long)page_offset(page)); 1134 ClearPagePrivate(page); 1135 set_page_private(page, 0); 1136 put_page(page); 1137 } 1138 } 1139 1140 static int btree_set_page_dirty(struct page *page) 1141 { 1142 #ifdef DEBUG 1143 struct extent_buffer *eb; 1144 1145 BUG_ON(!PagePrivate(page)); 1146 eb = (struct extent_buffer *)page->private; 1147 BUG_ON(!eb); 1148 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); 1149 BUG_ON(!atomic_read(&eb->refs)); 1150 btrfs_assert_tree_locked(eb); 1151 #endif 1152 return __set_page_dirty_nobuffers(page); 1153 } 1154 1155 static const struct address_space_operations btree_aops = { 1156 .readpage = btree_readpage, 1157 .writepages = btree_writepages, 1158 .releasepage = btree_releasepage, 1159 .invalidatepage = btree_invalidatepage, 1160 #ifdef CONFIG_MIGRATION 1161 .migratepage = btree_migratepage, 1162 #endif 1163 .set_page_dirty = btree_set_page_dirty, 1164 }; 1165 1166 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) 1167 { 1168 struct extent_buffer *buf = NULL; 1169 struct inode *btree_inode = fs_info->btree_inode; 1170 1171 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1172 if (IS_ERR(buf)) 1173 return; 1174 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, 1175 buf, WAIT_NONE, btree_get_extent, 0); 1176 free_extent_buffer(buf); 1177 } 1178 1179 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, 1180 int mirror_num, struct extent_buffer **eb) 1181 { 1182 struct extent_buffer *buf = NULL; 1183 struct inode *btree_inode = fs_info->btree_inode; 1184 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; 1185 int ret; 1186 1187 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1188 if (IS_ERR(buf)) 1189 return 0; 1190 1191 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); 1192 1193 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK, 1194 btree_get_extent, mirror_num); 1195 if (ret) { 1196 free_extent_buffer(buf); 1197 return ret; 1198 } 1199 1200 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { 1201 free_extent_buffer(buf); 1202 return -EIO; 1203 } else if (extent_buffer_uptodate(buf)) { 1204 *eb = buf; 1205 } else { 1206 free_extent_buffer(buf); 1207 } 1208 return 0; 1209 } 1210 1211 struct extent_buffer *btrfs_find_create_tree_block( 1212 struct btrfs_fs_info *fs_info, 1213 u64 bytenr) 1214 { 1215 if (btrfs_is_testing(fs_info)) 1216 return alloc_test_extent_buffer(fs_info, bytenr); 1217 return alloc_extent_buffer(fs_info, bytenr); 1218 } 1219 1220 1221 int btrfs_write_tree_block(struct extent_buffer *buf) 1222 { 1223 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, 1224 buf->start + buf->len - 1); 1225 } 1226 1227 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 1228 { 1229 filemap_fdatawait_range(buf->pages[0]->mapping, 1230 buf->start, buf->start + buf->len - 1); 1231 } 1232 1233 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, 1234 u64 parent_transid) 1235 { 1236 struct extent_buffer *buf = NULL; 1237 int ret; 1238 1239 buf = btrfs_find_create_tree_block(fs_info, bytenr); 1240 if (IS_ERR(buf)) 1241 return buf; 1242 1243 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid); 1244 if (ret) { 1245 free_extent_buffer(buf); 1246 return ERR_PTR(ret); 1247 } 1248 return buf; 1249 1250 } 1251 1252 void clean_tree_block(struct btrfs_fs_info *fs_info, 1253 struct extent_buffer *buf) 1254 { 1255 if (btrfs_header_generation(buf) == 1256 fs_info->running_transaction->transid) { 1257 btrfs_assert_tree_locked(buf); 1258 1259 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { 1260 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 1261 -buf->len, 1262 fs_info->dirty_metadata_batch); 1263 /* ugh, clear_extent_buffer_dirty needs to lock the page */ 1264 btrfs_set_lock_blocking(buf); 1265 clear_extent_buffer_dirty(buf); 1266 } 1267 } 1268 } 1269 1270 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) 1271 { 1272 struct btrfs_subvolume_writers *writers; 1273 int ret; 1274 1275 writers = kmalloc(sizeof(*writers), GFP_NOFS); 1276 if (!writers) 1277 return ERR_PTR(-ENOMEM); 1278 1279 ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL); 1280 if (ret < 0) { 1281 kfree(writers); 1282 return ERR_PTR(ret); 1283 } 1284 1285 init_waitqueue_head(&writers->wait); 1286 return writers; 1287 } 1288 1289 static void 1290 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers) 1291 { 1292 percpu_counter_destroy(&writers->counter); 1293 kfree(writers); 1294 } 1295 1296 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, 1297 u64 objectid) 1298 { 1299 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); 1300 root->node = NULL; 1301 root->commit_root = NULL; 1302 root->state = 0; 1303 root->orphan_cleanup_state = 0; 1304 1305 root->objectid = objectid; 1306 root->last_trans = 0; 1307 root->highest_objectid = 0; 1308 root->nr_delalloc_inodes = 0; 1309 root->nr_ordered_extents = 0; 1310 root->name = NULL; 1311 root->inode_tree = RB_ROOT; 1312 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1313 root->block_rsv = NULL; 1314 root->orphan_block_rsv = NULL; 1315 1316 INIT_LIST_HEAD(&root->dirty_list); 1317 INIT_LIST_HEAD(&root->root_list); 1318 INIT_LIST_HEAD(&root->delalloc_inodes); 1319 INIT_LIST_HEAD(&root->delalloc_root); 1320 INIT_LIST_HEAD(&root->ordered_extents); 1321 INIT_LIST_HEAD(&root->ordered_root); 1322 INIT_LIST_HEAD(&root->logged_list[0]); 1323 INIT_LIST_HEAD(&root->logged_list[1]); 1324 spin_lock_init(&root->orphan_lock); 1325 spin_lock_init(&root->inode_lock); 1326 spin_lock_init(&root->delalloc_lock); 1327 spin_lock_init(&root->ordered_extent_lock); 1328 spin_lock_init(&root->accounting_lock); 1329 spin_lock_init(&root->log_extents_lock[0]); 1330 spin_lock_init(&root->log_extents_lock[1]); 1331 mutex_init(&root->objectid_mutex); 1332 mutex_init(&root->log_mutex); 1333 mutex_init(&root->ordered_extent_mutex); 1334 mutex_init(&root->delalloc_mutex); 1335 init_waitqueue_head(&root->log_writer_wait); 1336 init_waitqueue_head(&root->log_commit_wait[0]); 1337 init_waitqueue_head(&root->log_commit_wait[1]); 1338 INIT_LIST_HEAD(&root->log_ctxs[0]); 1339 INIT_LIST_HEAD(&root->log_ctxs[1]); 1340 atomic_set(&root->log_commit[0], 0); 1341 atomic_set(&root->log_commit[1], 0); 1342 atomic_set(&root->log_writers, 0); 1343 atomic_set(&root->log_batch, 0); 1344 atomic_set(&root->orphan_inodes, 0); 1345 refcount_set(&root->refs, 1); 1346 atomic_set(&root->will_be_snapshotted, 0); 1347 atomic64_set(&root->qgroup_meta_rsv, 0); 1348 root->log_transid = 0; 1349 root->log_transid_committed = -1; 1350 root->last_log_commit = 0; 1351 if (!dummy) 1352 extent_io_tree_init(&root->dirty_log_pages, NULL); 1353 1354 memset(&root->root_key, 0, sizeof(root->root_key)); 1355 memset(&root->root_item, 0, sizeof(root->root_item)); 1356 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); 1357 if (!dummy) 1358 root->defrag_trans_start = fs_info->generation; 1359 else 1360 root->defrag_trans_start = 0; 1361 root->root_key.objectid = objectid; 1362 root->anon_dev = 0; 1363 1364 spin_lock_init(&root->root_item_lock); 1365 } 1366 1367 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, 1368 gfp_t flags) 1369 { 1370 struct btrfs_root *root = kzalloc(sizeof(*root), flags); 1371 if (root) 1372 root->fs_info = fs_info; 1373 return root; 1374 } 1375 1376 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1377 /* Should only be used by the testing infrastructure */ 1378 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) 1379 { 1380 struct btrfs_root *root; 1381 1382 if (!fs_info) 1383 return ERR_PTR(-EINVAL); 1384 1385 root = btrfs_alloc_root(fs_info, GFP_KERNEL); 1386 if (!root) 1387 return ERR_PTR(-ENOMEM); 1388 1389 /* We don't use the stripesize in selftest, set it as sectorsize */ 1390 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID); 1391 root->alloc_bytenr = 0; 1392 1393 return root; 1394 } 1395 #endif 1396 1397 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, 1398 struct btrfs_fs_info *fs_info, 1399 u64 objectid) 1400 { 1401 struct extent_buffer *leaf; 1402 struct btrfs_root *tree_root = fs_info->tree_root; 1403 struct btrfs_root *root; 1404 struct btrfs_key key; 1405 int ret = 0; 1406 uuid_le uuid; 1407 1408 root = btrfs_alloc_root(fs_info, GFP_KERNEL); 1409 if (!root) 1410 return ERR_PTR(-ENOMEM); 1411 1412 __setup_root(root, fs_info, objectid); 1413 root->root_key.objectid = objectid; 1414 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1415 root->root_key.offset = 0; 1416 1417 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); 1418 if (IS_ERR(leaf)) { 1419 ret = PTR_ERR(leaf); 1420 leaf = NULL; 1421 goto fail; 1422 } 1423 1424 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header)); 1425 btrfs_set_header_bytenr(leaf, leaf->start); 1426 btrfs_set_header_generation(leaf, trans->transid); 1427 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); 1428 btrfs_set_header_owner(leaf, objectid); 1429 root->node = leaf; 1430 1431 write_extent_buffer_fsid(leaf, fs_info->fsid); 1432 write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid); 1433 btrfs_mark_buffer_dirty(leaf); 1434 1435 root->commit_root = btrfs_root_node(root); 1436 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 1437 1438 root->root_item.flags = 0; 1439 root->root_item.byte_limit = 0; 1440 btrfs_set_root_bytenr(&root->root_item, leaf->start); 1441 btrfs_set_root_generation(&root->root_item, trans->transid); 1442 btrfs_set_root_level(&root->root_item, 0); 1443 btrfs_set_root_refs(&root->root_item, 1); 1444 btrfs_set_root_used(&root->root_item, leaf->len); 1445 btrfs_set_root_last_snapshot(&root->root_item, 0); 1446 btrfs_set_root_dirid(&root->root_item, 0); 1447 uuid_le_gen(&uuid); 1448 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); 1449 root->root_item.drop_level = 0; 1450 1451 key.objectid = objectid; 1452 key.type = BTRFS_ROOT_ITEM_KEY; 1453 key.offset = 0; 1454 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); 1455 if (ret) 1456 goto fail; 1457 1458 btrfs_tree_unlock(leaf); 1459 1460 return root; 1461 1462 fail: 1463 if (leaf) { 1464 btrfs_tree_unlock(leaf); 1465 free_extent_buffer(root->commit_root); 1466 free_extent_buffer(leaf); 1467 } 1468 kfree(root); 1469 1470 return ERR_PTR(ret); 1471 } 1472 1473 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 1474 struct btrfs_fs_info *fs_info) 1475 { 1476 struct btrfs_root *root; 1477 struct extent_buffer *leaf; 1478 1479 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1480 if (!root) 1481 return ERR_PTR(-ENOMEM); 1482 1483 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID); 1484 1485 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; 1486 root->root_key.type = BTRFS_ROOT_ITEM_KEY; 1487 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; 1488 1489 /* 1490 * DON'T set REF_COWS for log trees 1491 * 1492 * log trees do not get reference counted because they go away 1493 * before a real commit is actually done. They do store pointers 1494 * to file data extents, and those reference counts still get 1495 * updated (along with back refs to the log tree). 1496 */ 1497 1498 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, 1499 NULL, 0, 0, 0); 1500 if (IS_ERR(leaf)) { 1501 kfree(root); 1502 return ERR_CAST(leaf); 1503 } 1504 1505 memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header)); 1506 btrfs_set_header_bytenr(leaf, leaf->start); 1507 btrfs_set_header_generation(leaf, trans->transid); 1508 btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); 1509 btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); 1510 root->node = leaf; 1511 1512 write_extent_buffer_fsid(root->node, fs_info->fsid); 1513 btrfs_mark_buffer_dirty(root->node); 1514 btrfs_tree_unlock(root->node); 1515 return root; 1516 } 1517 1518 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 1519 struct btrfs_fs_info *fs_info) 1520 { 1521 struct btrfs_root *log_root; 1522 1523 log_root = alloc_log_tree(trans, fs_info); 1524 if (IS_ERR(log_root)) 1525 return PTR_ERR(log_root); 1526 WARN_ON(fs_info->log_root_tree); 1527 fs_info->log_root_tree = log_root; 1528 return 0; 1529 } 1530 1531 int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 1532 struct btrfs_root *root) 1533 { 1534 struct btrfs_fs_info *fs_info = root->fs_info; 1535 struct btrfs_root *log_root; 1536 struct btrfs_inode_item *inode_item; 1537 1538 log_root = alloc_log_tree(trans, fs_info); 1539 if (IS_ERR(log_root)) 1540 return PTR_ERR(log_root); 1541 1542 log_root->last_trans = trans->transid; 1543 log_root->root_key.offset = root->root_key.objectid; 1544 1545 inode_item = &log_root->root_item.inode; 1546 btrfs_set_stack_inode_generation(inode_item, 1); 1547 btrfs_set_stack_inode_size(inode_item, 3); 1548 btrfs_set_stack_inode_nlink(inode_item, 1); 1549 btrfs_set_stack_inode_nbytes(inode_item, 1550 fs_info->nodesize); 1551 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); 1552 1553 btrfs_set_root_node(&log_root->root_item, log_root->node); 1554 1555 WARN_ON(root->log_root); 1556 root->log_root = log_root; 1557 root->log_transid = 0; 1558 root->log_transid_committed = -1; 1559 root->last_log_commit = 0; 1560 return 0; 1561 } 1562 1563 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, 1564 struct btrfs_key *key) 1565 { 1566 struct btrfs_root *root; 1567 struct btrfs_fs_info *fs_info = tree_root->fs_info; 1568 struct btrfs_path *path; 1569 u64 generation; 1570 int ret; 1571 1572 path = btrfs_alloc_path(); 1573 if (!path) 1574 return ERR_PTR(-ENOMEM); 1575 1576 root = btrfs_alloc_root(fs_info, GFP_NOFS); 1577 if (!root) { 1578 ret = -ENOMEM; 1579 goto alloc_fail; 1580 } 1581 1582 __setup_root(root, fs_info, key->objectid); 1583 1584 ret = btrfs_find_root(tree_root, key, path, 1585 &root->root_item, &root->root_key); 1586 if (ret) { 1587 if (ret > 0) 1588 ret = -ENOENT; 1589 goto find_fail; 1590 } 1591 1592 generation = btrfs_root_generation(&root->root_item); 1593 root->node = read_tree_block(fs_info, 1594 btrfs_root_bytenr(&root->root_item), 1595 generation); 1596 if (IS_ERR(root->node)) { 1597 ret = PTR_ERR(root->node); 1598 goto find_fail; 1599 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) { 1600 ret = -EIO; 1601 free_extent_buffer(root->node); 1602 goto find_fail; 1603 } 1604 root->commit_root = btrfs_root_node(root); 1605 out: 1606 btrfs_free_path(path); 1607 return root; 1608 1609 find_fail: 1610 kfree(root); 1611 alloc_fail: 1612 root = ERR_PTR(ret); 1613 goto out; 1614 } 1615 1616 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, 1617 struct btrfs_key *location) 1618 { 1619 struct btrfs_root *root; 1620 1621 root = btrfs_read_tree_root(tree_root, location); 1622 if (IS_ERR(root)) 1623 return root; 1624 1625 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 1626 set_bit(BTRFS_ROOT_REF_COWS, &root->state); 1627 btrfs_check_and_init_root_item(&root->root_item); 1628 } 1629 1630 return root; 1631 } 1632 1633 int btrfs_init_fs_root(struct btrfs_root *root) 1634 { 1635 int ret; 1636 struct btrfs_subvolume_writers *writers; 1637 1638 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1639 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1640 GFP_NOFS); 1641 if (!root->free_ino_pinned || !root->free_ino_ctl) { 1642 ret = -ENOMEM; 1643 goto fail; 1644 } 1645 1646 writers = btrfs_alloc_subvolume_writers(); 1647 if (IS_ERR(writers)) { 1648 ret = PTR_ERR(writers); 1649 goto fail; 1650 } 1651 root->subv_writers = writers; 1652 1653 btrfs_init_free_ino_ctl(root); 1654 spin_lock_init(&root->ino_cache_lock); 1655 init_waitqueue_head(&root->ino_cache_wait); 1656 1657 ret = get_anon_bdev(&root->anon_dev); 1658 if (ret) 1659 goto fail; 1660 1661 mutex_lock(&root->objectid_mutex); 1662 ret = btrfs_find_highest_objectid(root, 1663 &root->highest_objectid); 1664 if (ret) { 1665 mutex_unlock(&root->objectid_mutex); 1666 goto fail; 1667 } 1668 1669 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 1670 1671 mutex_unlock(&root->objectid_mutex); 1672 1673 return 0; 1674 fail: 1675 /* the caller is responsible to call free_fs_root */ 1676 return ret; 1677 } 1678 1679 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, 1680 u64 root_id) 1681 { 1682 struct btrfs_root *root; 1683 1684 spin_lock(&fs_info->fs_roots_radix_lock); 1685 root = radix_tree_lookup(&fs_info->fs_roots_radix, 1686 (unsigned long)root_id); 1687 spin_unlock(&fs_info->fs_roots_radix_lock); 1688 return root; 1689 } 1690 1691 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 1692 struct btrfs_root *root) 1693 { 1694 int ret; 1695 1696 ret = radix_tree_preload(GFP_NOFS); 1697 if (ret) 1698 return ret; 1699 1700 spin_lock(&fs_info->fs_roots_radix_lock); 1701 ret = radix_tree_insert(&fs_info->fs_roots_radix, 1702 (unsigned long)root->root_key.objectid, 1703 root); 1704 if (ret == 0) 1705 set_bit(BTRFS_ROOT_IN_RADIX, &root->state); 1706 spin_unlock(&fs_info->fs_roots_radix_lock); 1707 radix_tree_preload_end(); 1708 1709 return ret; 1710 } 1711 1712 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, 1713 struct btrfs_key *location, 1714 bool check_ref) 1715 { 1716 struct btrfs_root *root; 1717 struct btrfs_path *path; 1718 struct btrfs_key key; 1719 int ret; 1720 1721 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) 1722 return fs_info->tree_root; 1723 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) 1724 return fs_info->extent_root; 1725 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) 1726 return fs_info->chunk_root; 1727 if (location->objectid == BTRFS_DEV_TREE_OBJECTID) 1728 return fs_info->dev_root; 1729 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) 1730 return fs_info->csum_root; 1731 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) 1732 return fs_info->quota_root ? fs_info->quota_root : 1733 ERR_PTR(-ENOENT); 1734 if (location->objectid == BTRFS_UUID_TREE_OBJECTID) 1735 return fs_info->uuid_root ? fs_info->uuid_root : 1736 ERR_PTR(-ENOENT); 1737 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 1738 return fs_info->free_space_root ? fs_info->free_space_root : 1739 ERR_PTR(-ENOENT); 1740 again: 1741 root = btrfs_lookup_fs_root(fs_info, location->objectid); 1742 if (root) { 1743 if (check_ref && btrfs_root_refs(&root->root_item) == 0) 1744 return ERR_PTR(-ENOENT); 1745 return root; 1746 } 1747 1748 root = btrfs_read_fs_root(fs_info->tree_root, location); 1749 if (IS_ERR(root)) 1750 return root; 1751 1752 if (check_ref && btrfs_root_refs(&root->root_item) == 0) { 1753 ret = -ENOENT; 1754 goto fail; 1755 } 1756 1757 ret = btrfs_init_fs_root(root); 1758 if (ret) 1759 goto fail; 1760 1761 path = btrfs_alloc_path(); 1762 if (!path) { 1763 ret = -ENOMEM; 1764 goto fail; 1765 } 1766 key.objectid = BTRFS_ORPHAN_OBJECTID; 1767 key.type = BTRFS_ORPHAN_ITEM_KEY; 1768 key.offset = location->objectid; 1769 1770 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 1771 btrfs_free_path(path); 1772 if (ret < 0) 1773 goto fail; 1774 if (ret == 0) 1775 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); 1776 1777 ret = btrfs_insert_fs_root(fs_info, root); 1778 if (ret) { 1779 if (ret == -EEXIST) { 1780 free_fs_root(root); 1781 goto again; 1782 } 1783 goto fail; 1784 } 1785 return root; 1786 fail: 1787 free_fs_root(root); 1788 return ERR_PTR(ret); 1789 } 1790 1791 static int btrfs_congested_fn(void *congested_data, int bdi_bits) 1792 { 1793 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1794 int ret = 0; 1795 struct btrfs_device *device; 1796 struct backing_dev_info *bdi; 1797 1798 rcu_read_lock(); 1799 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { 1800 if (!device->bdev) 1801 continue; 1802 bdi = device->bdev->bd_bdi; 1803 if (bdi_congested(bdi, bdi_bits)) { 1804 ret = 1; 1805 break; 1806 } 1807 } 1808 rcu_read_unlock(); 1809 return ret; 1810 } 1811 1812 /* 1813 * called by the kthread helper functions to finally call the bio end_io 1814 * functions. This is where read checksum verification actually happens 1815 */ 1816 static void end_workqueue_fn(struct btrfs_work *work) 1817 { 1818 struct bio *bio; 1819 struct btrfs_end_io_wq *end_io_wq; 1820 1821 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1822 bio = end_io_wq->bio; 1823 1824 bio->bi_status = end_io_wq->status; 1825 bio->bi_private = end_io_wq->private; 1826 bio->bi_end_io = end_io_wq->end_io; 1827 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); 1828 bio_endio(bio); 1829 } 1830 1831 static int cleaner_kthread(void *arg) 1832 { 1833 struct btrfs_root *root = arg; 1834 struct btrfs_fs_info *fs_info = root->fs_info; 1835 int again; 1836 struct btrfs_trans_handle *trans; 1837 1838 do { 1839 again = 0; 1840 1841 /* Make the cleaner go to sleep early. */ 1842 if (btrfs_need_cleaner_sleep(fs_info)) 1843 goto sleep; 1844 1845 /* 1846 * Do not do anything if we might cause open_ctree() to block 1847 * before we have finished mounting the filesystem. 1848 */ 1849 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1850 goto sleep; 1851 1852 if (!mutex_trylock(&fs_info->cleaner_mutex)) 1853 goto sleep; 1854 1855 /* 1856 * Avoid the problem that we change the status of the fs 1857 * during the above check and trylock. 1858 */ 1859 if (btrfs_need_cleaner_sleep(fs_info)) { 1860 mutex_unlock(&fs_info->cleaner_mutex); 1861 goto sleep; 1862 } 1863 1864 mutex_lock(&fs_info->cleaner_delayed_iput_mutex); 1865 btrfs_run_delayed_iputs(fs_info); 1866 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); 1867 1868 again = btrfs_clean_one_deleted_snapshot(root); 1869 mutex_unlock(&fs_info->cleaner_mutex); 1870 1871 /* 1872 * The defragger has dealt with the R/O remount and umount, 1873 * needn't do anything special here. 1874 */ 1875 btrfs_run_defrag_inodes(fs_info); 1876 1877 /* 1878 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing 1879 * with relocation (btrfs_relocate_chunk) and relocation 1880 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) 1881 * after acquiring fs_info->delete_unused_bgs_mutex. So we 1882 * can't hold, nor need to, fs_info->cleaner_mutex when deleting 1883 * unused block groups. 1884 */ 1885 btrfs_delete_unused_bgs(fs_info); 1886 sleep: 1887 if (!again) { 1888 set_current_state(TASK_INTERRUPTIBLE); 1889 if (!kthread_should_stop()) 1890 schedule(); 1891 __set_current_state(TASK_RUNNING); 1892 } 1893 } while (!kthread_should_stop()); 1894 1895 /* 1896 * Transaction kthread is stopped before us and wakes us up. 1897 * However we might have started a new transaction and COWed some 1898 * tree blocks when deleting unused block groups for example. So 1899 * make sure we commit the transaction we started to have a clean 1900 * shutdown when evicting the btree inode - if it has dirty pages 1901 * when we do the final iput() on it, eviction will trigger a 1902 * writeback for it which will fail with null pointer dereferences 1903 * since work queues and other resources were already released and 1904 * destroyed by the time the iput/eviction/writeback is made. 1905 */ 1906 trans = btrfs_attach_transaction(root); 1907 if (IS_ERR(trans)) { 1908 if (PTR_ERR(trans) != -ENOENT) 1909 btrfs_err(fs_info, 1910 "cleaner transaction attach returned %ld", 1911 PTR_ERR(trans)); 1912 } else { 1913 int ret; 1914 1915 ret = btrfs_commit_transaction(trans); 1916 if (ret) 1917 btrfs_err(fs_info, 1918 "cleaner open transaction commit returned %d", 1919 ret); 1920 } 1921 1922 return 0; 1923 } 1924 1925 static int transaction_kthread(void *arg) 1926 { 1927 struct btrfs_root *root = arg; 1928 struct btrfs_fs_info *fs_info = root->fs_info; 1929 struct btrfs_trans_handle *trans; 1930 struct btrfs_transaction *cur; 1931 u64 transid; 1932 unsigned long now; 1933 unsigned long delay; 1934 bool cannot_commit; 1935 1936 do { 1937 cannot_commit = false; 1938 delay = HZ * fs_info->commit_interval; 1939 mutex_lock(&fs_info->transaction_kthread_mutex); 1940 1941 spin_lock(&fs_info->trans_lock); 1942 cur = fs_info->running_transaction; 1943 if (!cur) { 1944 spin_unlock(&fs_info->trans_lock); 1945 goto sleep; 1946 } 1947 1948 now = get_seconds(); 1949 if (cur->state < TRANS_STATE_BLOCKED && 1950 (now < cur->start_time || 1951 now - cur->start_time < fs_info->commit_interval)) { 1952 spin_unlock(&fs_info->trans_lock); 1953 delay = HZ * 5; 1954 goto sleep; 1955 } 1956 transid = cur->transid; 1957 spin_unlock(&fs_info->trans_lock); 1958 1959 /* If the file system is aborted, this will always fail. */ 1960 trans = btrfs_attach_transaction(root); 1961 if (IS_ERR(trans)) { 1962 if (PTR_ERR(trans) != -ENOENT) 1963 cannot_commit = true; 1964 goto sleep; 1965 } 1966 if (transid == trans->transid) { 1967 btrfs_commit_transaction(trans); 1968 } else { 1969 btrfs_end_transaction(trans); 1970 } 1971 sleep: 1972 wake_up_process(fs_info->cleaner_kthread); 1973 mutex_unlock(&fs_info->transaction_kthread_mutex); 1974 1975 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, 1976 &fs_info->fs_state))) 1977 btrfs_cleanup_transaction(fs_info); 1978 set_current_state(TASK_INTERRUPTIBLE); 1979 if (!kthread_should_stop() && 1980 (!btrfs_transaction_blocked(fs_info) || 1981 cannot_commit)) 1982 schedule_timeout(delay); 1983 __set_current_state(TASK_RUNNING); 1984 } while (!kthread_should_stop()); 1985 return 0; 1986 } 1987 1988 /* 1989 * this will find the highest generation in the array of 1990 * root backups. The index of the highest array is returned, 1991 * or -1 if we can't find anything. 1992 * 1993 * We check to make sure the array is valid by comparing the 1994 * generation of the latest root in the array with the generation 1995 * in the super block. If they don't match we pitch it. 1996 */ 1997 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) 1998 { 1999 u64 cur; 2000 int newest_index = -1; 2001 struct btrfs_root_backup *root_backup; 2002 int i; 2003 2004 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { 2005 root_backup = info->super_copy->super_roots + i; 2006 cur = btrfs_backup_tree_root_gen(root_backup); 2007 if (cur == newest_gen) 2008 newest_index = i; 2009 } 2010 2011 /* check to see if we actually wrapped around */ 2012 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { 2013 root_backup = info->super_copy->super_roots; 2014 cur = btrfs_backup_tree_root_gen(root_backup); 2015 if (cur == newest_gen) 2016 newest_index = 0; 2017 } 2018 return newest_index; 2019 } 2020 2021 2022 /* 2023 * find the oldest backup so we know where to store new entries 2024 * in the backup array. This will set the backup_root_index 2025 * field in the fs_info struct 2026 */ 2027 static void find_oldest_super_backup(struct btrfs_fs_info *info, 2028 u64 newest_gen) 2029 { 2030 int newest_index = -1; 2031 2032 newest_index = find_newest_super_backup(info, newest_gen); 2033 /* if there was garbage in there, just move along */ 2034 if (newest_index == -1) { 2035 info->backup_root_index = 0; 2036 } else { 2037 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; 2038 } 2039 } 2040 2041 /* 2042 * copy all the root pointers into the super backup array. 2043 * this will bump the backup pointer by one when it is 2044 * done 2045 */ 2046 static void backup_super_roots(struct btrfs_fs_info *info) 2047 { 2048 int next_backup; 2049 struct btrfs_root_backup *root_backup; 2050 int last_backup; 2051 2052 next_backup = info->backup_root_index; 2053 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % 2054 BTRFS_NUM_BACKUP_ROOTS; 2055 2056 /* 2057 * just overwrite the last backup if we're at the same generation 2058 * this happens only at umount 2059 */ 2060 root_backup = info->super_for_commit->super_roots + last_backup; 2061 if (btrfs_backup_tree_root_gen(root_backup) == 2062 btrfs_header_generation(info->tree_root->node)) 2063 next_backup = last_backup; 2064 2065 root_backup = info->super_for_commit->super_roots + next_backup; 2066 2067 /* 2068 * make sure all of our padding and empty slots get zero filled 2069 * regardless of which ones we use today 2070 */ 2071 memset(root_backup, 0, sizeof(*root_backup)); 2072 2073 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; 2074 2075 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); 2076 btrfs_set_backup_tree_root_gen(root_backup, 2077 btrfs_header_generation(info->tree_root->node)); 2078 2079 btrfs_set_backup_tree_root_level(root_backup, 2080 btrfs_header_level(info->tree_root->node)); 2081 2082 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); 2083 btrfs_set_backup_chunk_root_gen(root_backup, 2084 btrfs_header_generation(info->chunk_root->node)); 2085 btrfs_set_backup_chunk_root_level(root_backup, 2086 btrfs_header_level(info->chunk_root->node)); 2087 2088 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); 2089 btrfs_set_backup_extent_root_gen(root_backup, 2090 btrfs_header_generation(info->extent_root->node)); 2091 btrfs_set_backup_extent_root_level(root_backup, 2092 btrfs_header_level(info->extent_root->node)); 2093 2094 /* 2095 * we might commit during log recovery, which happens before we set 2096 * the fs_root. Make sure it is valid before we fill it in. 2097 */ 2098 if (info->fs_root && info->fs_root->node) { 2099 btrfs_set_backup_fs_root(root_backup, 2100 info->fs_root->node->start); 2101 btrfs_set_backup_fs_root_gen(root_backup, 2102 btrfs_header_generation(info->fs_root->node)); 2103 btrfs_set_backup_fs_root_level(root_backup, 2104 btrfs_header_level(info->fs_root->node)); 2105 } 2106 2107 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); 2108 btrfs_set_backup_dev_root_gen(root_backup, 2109 btrfs_header_generation(info->dev_root->node)); 2110 btrfs_set_backup_dev_root_level(root_backup, 2111 btrfs_header_level(info->dev_root->node)); 2112 2113 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); 2114 btrfs_set_backup_csum_root_gen(root_backup, 2115 btrfs_header_generation(info->csum_root->node)); 2116 btrfs_set_backup_csum_root_level(root_backup, 2117 btrfs_header_level(info->csum_root->node)); 2118 2119 btrfs_set_backup_total_bytes(root_backup, 2120 btrfs_super_total_bytes(info->super_copy)); 2121 btrfs_set_backup_bytes_used(root_backup, 2122 btrfs_super_bytes_used(info->super_copy)); 2123 btrfs_set_backup_num_devices(root_backup, 2124 btrfs_super_num_devices(info->super_copy)); 2125 2126 /* 2127 * if we don't copy this out to the super_copy, it won't get remembered 2128 * for the next commit 2129 */ 2130 memcpy(&info->super_copy->super_roots, 2131 &info->super_for_commit->super_roots, 2132 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); 2133 } 2134 2135 /* 2136 * this copies info out of the root backup array and back into 2137 * the in-memory super block. It is meant to help iterate through 2138 * the array, so you send it the number of backups you've already 2139 * tried and the last backup index you used. 2140 * 2141 * this returns -1 when it has tried all the backups 2142 */ 2143 static noinline int next_root_backup(struct btrfs_fs_info *info, 2144 struct btrfs_super_block *super, 2145 int *num_backups_tried, int *backup_index) 2146 { 2147 struct btrfs_root_backup *root_backup; 2148 int newest = *backup_index; 2149 2150 if (*num_backups_tried == 0) { 2151 u64 gen = btrfs_super_generation(super); 2152 2153 newest = find_newest_super_backup(info, gen); 2154 if (newest == -1) 2155 return -1; 2156 2157 *backup_index = newest; 2158 *num_backups_tried = 1; 2159 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { 2160 /* we've tried all the backups, all done */ 2161 return -1; 2162 } else { 2163 /* jump to the next oldest backup */ 2164 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % 2165 BTRFS_NUM_BACKUP_ROOTS; 2166 *backup_index = newest; 2167 *num_backups_tried += 1; 2168 } 2169 root_backup = super->super_roots + newest; 2170 2171 btrfs_set_super_generation(super, 2172 btrfs_backup_tree_root_gen(root_backup)); 2173 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); 2174 btrfs_set_super_root_level(super, 2175 btrfs_backup_tree_root_level(root_backup)); 2176 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); 2177 2178 /* 2179 * fixme: the total bytes and num_devices need to match or we should 2180 * need a fsck 2181 */ 2182 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); 2183 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); 2184 return 0; 2185 } 2186 2187 /* helper to cleanup workers */ 2188 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) 2189 { 2190 btrfs_destroy_workqueue(fs_info->fixup_workers); 2191 btrfs_destroy_workqueue(fs_info->delalloc_workers); 2192 btrfs_destroy_workqueue(fs_info->workers); 2193 btrfs_destroy_workqueue(fs_info->endio_workers); 2194 btrfs_destroy_workqueue(fs_info->endio_raid56_workers); 2195 btrfs_destroy_workqueue(fs_info->endio_repair_workers); 2196 btrfs_destroy_workqueue(fs_info->rmw_workers); 2197 btrfs_destroy_workqueue(fs_info->endio_write_workers); 2198 btrfs_destroy_workqueue(fs_info->endio_freespace_worker); 2199 btrfs_destroy_workqueue(fs_info->submit_workers); 2200 btrfs_destroy_workqueue(fs_info->delayed_workers); 2201 btrfs_destroy_workqueue(fs_info->caching_workers); 2202 btrfs_destroy_workqueue(fs_info->readahead_workers); 2203 btrfs_destroy_workqueue(fs_info->flush_workers); 2204 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); 2205 btrfs_destroy_workqueue(fs_info->extent_workers); 2206 /* 2207 * Now that all other work queues are destroyed, we can safely destroy 2208 * the queues used for metadata I/O, since tasks from those other work 2209 * queues can do metadata I/O operations. 2210 */ 2211 btrfs_destroy_workqueue(fs_info->endio_meta_workers); 2212 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); 2213 } 2214 2215 static void free_root_extent_buffers(struct btrfs_root *root) 2216 { 2217 if (root) { 2218 free_extent_buffer(root->node); 2219 free_extent_buffer(root->commit_root); 2220 root->node = NULL; 2221 root->commit_root = NULL; 2222 } 2223 } 2224 2225 /* helper to cleanup tree roots */ 2226 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) 2227 { 2228 free_root_extent_buffers(info->tree_root); 2229 2230 free_root_extent_buffers(info->dev_root); 2231 free_root_extent_buffers(info->extent_root); 2232 free_root_extent_buffers(info->csum_root); 2233 free_root_extent_buffers(info->quota_root); 2234 free_root_extent_buffers(info->uuid_root); 2235 if (chunk_root) 2236 free_root_extent_buffers(info->chunk_root); 2237 free_root_extent_buffers(info->free_space_root); 2238 } 2239 2240 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) 2241 { 2242 int ret; 2243 struct btrfs_root *gang[8]; 2244 int i; 2245 2246 while (!list_empty(&fs_info->dead_roots)) { 2247 gang[0] = list_entry(fs_info->dead_roots.next, 2248 struct btrfs_root, root_list); 2249 list_del(&gang[0]->root_list); 2250 2251 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) { 2252 btrfs_drop_and_free_fs_root(fs_info, gang[0]); 2253 } else { 2254 free_extent_buffer(gang[0]->node); 2255 free_extent_buffer(gang[0]->commit_root); 2256 btrfs_put_fs_root(gang[0]); 2257 } 2258 } 2259 2260 while (1) { 2261 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 2262 (void **)gang, 0, 2263 ARRAY_SIZE(gang)); 2264 if (!ret) 2265 break; 2266 for (i = 0; i < ret; i++) 2267 btrfs_drop_and_free_fs_root(fs_info, gang[i]); 2268 } 2269 2270 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 2271 btrfs_free_log_root_tree(NULL, fs_info); 2272 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); 2273 } 2274 } 2275 2276 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) 2277 { 2278 mutex_init(&fs_info->scrub_lock); 2279 atomic_set(&fs_info->scrubs_running, 0); 2280 atomic_set(&fs_info->scrub_pause_req, 0); 2281 atomic_set(&fs_info->scrubs_paused, 0); 2282 atomic_set(&fs_info->scrub_cancel_req, 0); 2283 init_waitqueue_head(&fs_info->scrub_pause_wait); 2284 fs_info->scrub_workers_refcnt = 0; 2285 } 2286 2287 static void btrfs_init_balance(struct btrfs_fs_info *fs_info) 2288 { 2289 spin_lock_init(&fs_info->balance_lock); 2290 mutex_init(&fs_info->balance_mutex); 2291 atomic_set(&fs_info->balance_running, 0); 2292 atomic_set(&fs_info->balance_pause_req, 0); 2293 atomic_set(&fs_info->balance_cancel_req, 0); 2294 fs_info->balance_ctl = NULL; 2295 init_waitqueue_head(&fs_info->balance_wait_q); 2296 } 2297 2298 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) 2299 { 2300 struct inode *inode = fs_info->btree_inode; 2301 2302 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; 2303 set_nlink(inode, 1); 2304 /* 2305 * we set the i_size on the btree inode to the max possible int. 2306 * the real end of the address space is determined by all of 2307 * the devices in the system 2308 */ 2309 inode->i_size = OFFSET_MAX; 2310 inode->i_mapping->a_ops = &btree_aops; 2311 2312 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 2313 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode); 2314 BTRFS_I(inode)->io_tree.track_uptodate = 0; 2315 extent_map_tree_init(&BTRFS_I(inode)->extent_tree); 2316 2317 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops; 2318 2319 BTRFS_I(inode)->root = fs_info->tree_root; 2320 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); 2321 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 2322 btrfs_insert_inode_hash(inode); 2323 } 2324 2325 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) 2326 { 2327 fs_info->dev_replace.lock_owner = 0; 2328 atomic_set(&fs_info->dev_replace.nesting_level, 0); 2329 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); 2330 rwlock_init(&fs_info->dev_replace.lock); 2331 atomic_set(&fs_info->dev_replace.read_locks, 0); 2332 atomic_set(&fs_info->dev_replace.blocking_readers, 0); 2333 init_waitqueue_head(&fs_info->replace_wait); 2334 init_waitqueue_head(&fs_info->dev_replace.read_lock_wq); 2335 } 2336 2337 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) 2338 { 2339 spin_lock_init(&fs_info->qgroup_lock); 2340 mutex_init(&fs_info->qgroup_ioctl_lock); 2341 fs_info->qgroup_tree = RB_ROOT; 2342 fs_info->qgroup_op_tree = RB_ROOT; 2343 INIT_LIST_HEAD(&fs_info->dirty_qgroups); 2344 fs_info->qgroup_seq = 1; 2345 fs_info->qgroup_ulist = NULL; 2346 fs_info->qgroup_rescan_running = false; 2347 mutex_init(&fs_info->qgroup_rescan_lock); 2348 } 2349 2350 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info, 2351 struct btrfs_fs_devices *fs_devices) 2352 { 2353 int max_active = fs_info->thread_pool_size; 2354 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; 2355 2356 fs_info->workers = 2357 btrfs_alloc_workqueue(fs_info, "worker", 2358 flags | WQ_HIGHPRI, max_active, 16); 2359 2360 fs_info->delalloc_workers = 2361 btrfs_alloc_workqueue(fs_info, "delalloc", 2362 flags, max_active, 2); 2363 2364 fs_info->flush_workers = 2365 btrfs_alloc_workqueue(fs_info, "flush_delalloc", 2366 flags, max_active, 0); 2367 2368 fs_info->caching_workers = 2369 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); 2370 2371 /* 2372 * a higher idle thresh on the submit workers makes it much more 2373 * likely that bios will be send down in a sane order to the 2374 * devices 2375 */ 2376 fs_info->submit_workers = 2377 btrfs_alloc_workqueue(fs_info, "submit", flags, 2378 min_t(u64, fs_devices->num_devices, 2379 max_active), 64); 2380 2381 fs_info->fixup_workers = 2382 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0); 2383 2384 /* 2385 * endios are largely parallel and should have a very 2386 * low idle thresh 2387 */ 2388 fs_info->endio_workers = 2389 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4); 2390 fs_info->endio_meta_workers = 2391 btrfs_alloc_workqueue(fs_info, "endio-meta", flags, 2392 max_active, 4); 2393 fs_info->endio_meta_write_workers = 2394 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, 2395 max_active, 2); 2396 fs_info->endio_raid56_workers = 2397 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, 2398 max_active, 4); 2399 fs_info->endio_repair_workers = 2400 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0); 2401 fs_info->rmw_workers = 2402 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); 2403 fs_info->endio_write_workers = 2404 btrfs_alloc_workqueue(fs_info, "endio-write", flags, 2405 max_active, 2); 2406 fs_info->endio_freespace_worker = 2407 btrfs_alloc_workqueue(fs_info, "freespace-write", flags, 2408 max_active, 0); 2409 fs_info->delayed_workers = 2410 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, 2411 max_active, 0); 2412 fs_info->readahead_workers = 2413 btrfs_alloc_workqueue(fs_info, "readahead", flags, 2414 max_active, 2); 2415 fs_info->qgroup_rescan_workers = 2416 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0); 2417 fs_info->extent_workers = 2418 btrfs_alloc_workqueue(fs_info, "extent-refs", flags, 2419 min_t(u64, fs_devices->num_devices, 2420 max_active), 8); 2421 2422 if (!(fs_info->workers && fs_info->delalloc_workers && 2423 fs_info->submit_workers && fs_info->flush_workers && 2424 fs_info->endio_workers && fs_info->endio_meta_workers && 2425 fs_info->endio_meta_write_workers && 2426 fs_info->endio_repair_workers && 2427 fs_info->endio_write_workers && fs_info->endio_raid56_workers && 2428 fs_info->endio_freespace_worker && fs_info->rmw_workers && 2429 fs_info->caching_workers && fs_info->readahead_workers && 2430 fs_info->fixup_workers && fs_info->delayed_workers && 2431 fs_info->extent_workers && 2432 fs_info->qgroup_rescan_workers)) { 2433 return -ENOMEM; 2434 } 2435 2436 return 0; 2437 } 2438 2439 static int btrfs_replay_log(struct btrfs_fs_info *fs_info, 2440 struct btrfs_fs_devices *fs_devices) 2441 { 2442 int ret; 2443 struct btrfs_root *log_tree_root; 2444 struct btrfs_super_block *disk_super = fs_info->super_copy; 2445 u64 bytenr = btrfs_super_log_root(disk_super); 2446 2447 if (fs_devices->rw_devices == 0) { 2448 btrfs_warn(fs_info, "log replay required on RO media"); 2449 return -EIO; 2450 } 2451 2452 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2453 if (!log_tree_root) 2454 return -ENOMEM; 2455 2456 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); 2457 2458 log_tree_root->node = read_tree_block(fs_info, bytenr, 2459 fs_info->generation + 1); 2460 if (IS_ERR(log_tree_root->node)) { 2461 btrfs_warn(fs_info, "failed to read log tree"); 2462 ret = PTR_ERR(log_tree_root->node); 2463 kfree(log_tree_root); 2464 return ret; 2465 } else if (!extent_buffer_uptodate(log_tree_root->node)) { 2466 btrfs_err(fs_info, "failed to read log tree"); 2467 free_extent_buffer(log_tree_root->node); 2468 kfree(log_tree_root); 2469 return -EIO; 2470 } 2471 /* returns with log_tree_root freed on success */ 2472 ret = btrfs_recover_log_trees(log_tree_root); 2473 if (ret) { 2474 btrfs_handle_fs_error(fs_info, ret, 2475 "Failed to recover log tree"); 2476 free_extent_buffer(log_tree_root->node); 2477 kfree(log_tree_root); 2478 return ret; 2479 } 2480 2481 if (fs_info->sb->s_flags & MS_RDONLY) { 2482 ret = btrfs_commit_super(fs_info); 2483 if (ret) 2484 return ret; 2485 } 2486 2487 return 0; 2488 } 2489 2490 static int btrfs_read_roots(struct btrfs_fs_info *fs_info) 2491 { 2492 struct btrfs_root *tree_root = fs_info->tree_root; 2493 struct btrfs_root *root; 2494 struct btrfs_key location; 2495 int ret; 2496 2497 BUG_ON(!fs_info->tree_root); 2498 2499 location.objectid = BTRFS_EXTENT_TREE_OBJECTID; 2500 location.type = BTRFS_ROOT_ITEM_KEY; 2501 location.offset = 0; 2502 2503 root = btrfs_read_tree_root(tree_root, &location); 2504 if (IS_ERR(root)) 2505 return PTR_ERR(root); 2506 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2507 fs_info->extent_root = root; 2508 2509 location.objectid = BTRFS_DEV_TREE_OBJECTID; 2510 root = btrfs_read_tree_root(tree_root, &location); 2511 if (IS_ERR(root)) 2512 return PTR_ERR(root); 2513 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2514 fs_info->dev_root = root; 2515 btrfs_init_devices_late(fs_info); 2516 2517 location.objectid = BTRFS_CSUM_TREE_OBJECTID; 2518 root = btrfs_read_tree_root(tree_root, &location); 2519 if (IS_ERR(root)) 2520 return PTR_ERR(root); 2521 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2522 fs_info->csum_root = root; 2523 2524 location.objectid = BTRFS_QUOTA_TREE_OBJECTID; 2525 root = btrfs_read_tree_root(tree_root, &location); 2526 if (!IS_ERR(root)) { 2527 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2528 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 2529 fs_info->quota_root = root; 2530 } 2531 2532 location.objectid = BTRFS_UUID_TREE_OBJECTID; 2533 root = btrfs_read_tree_root(tree_root, &location); 2534 if (IS_ERR(root)) { 2535 ret = PTR_ERR(root); 2536 if (ret != -ENOENT) 2537 return ret; 2538 } else { 2539 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2540 fs_info->uuid_root = root; 2541 } 2542 2543 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 2544 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; 2545 root = btrfs_read_tree_root(tree_root, &location); 2546 if (IS_ERR(root)) 2547 return PTR_ERR(root); 2548 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2549 fs_info->free_space_root = root; 2550 } 2551 2552 return 0; 2553 } 2554 2555 int open_ctree(struct super_block *sb, 2556 struct btrfs_fs_devices *fs_devices, 2557 char *options) 2558 { 2559 u32 sectorsize; 2560 u32 nodesize; 2561 u32 stripesize; 2562 u64 generation; 2563 u64 features; 2564 struct btrfs_key location; 2565 struct buffer_head *bh; 2566 struct btrfs_super_block *disk_super; 2567 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 2568 struct btrfs_root *tree_root; 2569 struct btrfs_root *chunk_root; 2570 int ret; 2571 int err = -EINVAL; 2572 int num_backups_tried = 0; 2573 int backup_index = 0; 2574 int max_active; 2575 int clear_free_space_tree = 0; 2576 2577 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2578 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2579 if (!tree_root || !chunk_root) { 2580 err = -ENOMEM; 2581 goto fail; 2582 } 2583 2584 ret = init_srcu_struct(&fs_info->subvol_srcu); 2585 if (ret) { 2586 err = ret; 2587 goto fail; 2588 } 2589 2590 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); 2591 if (ret) { 2592 err = ret; 2593 goto fail_srcu; 2594 } 2595 fs_info->dirty_metadata_batch = PAGE_SIZE * 2596 (1 + ilog2(nr_cpu_ids)); 2597 2598 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); 2599 if (ret) { 2600 err = ret; 2601 goto fail_dirty_metadata_bytes; 2602 } 2603 2604 ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL); 2605 if (ret) { 2606 err = ret; 2607 goto fail_delalloc_bytes; 2608 } 2609 2610 fs_info->btree_inode = new_inode(sb); 2611 if (!fs_info->btree_inode) { 2612 err = -ENOMEM; 2613 goto fail_bio_counter; 2614 } 2615 2616 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); 2617 2618 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); 2619 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); 2620 INIT_LIST_HEAD(&fs_info->trans_list); 2621 INIT_LIST_HEAD(&fs_info->dead_roots); 2622 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2623 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2624 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2625 spin_lock_init(&fs_info->delalloc_root_lock); 2626 spin_lock_init(&fs_info->trans_lock); 2627 spin_lock_init(&fs_info->fs_roots_radix_lock); 2628 spin_lock_init(&fs_info->delayed_iput_lock); 2629 spin_lock_init(&fs_info->defrag_inodes_lock); 2630 spin_lock_init(&fs_info->tree_mod_seq_lock); 2631 spin_lock_init(&fs_info->super_lock); 2632 spin_lock_init(&fs_info->qgroup_op_lock); 2633 spin_lock_init(&fs_info->buffer_lock); 2634 spin_lock_init(&fs_info->unused_bgs_lock); 2635 rwlock_init(&fs_info->tree_mod_log_lock); 2636 mutex_init(&fs_info->unused_bg_unpin_mutex); 2637 mutex_init(&fs_info->delete_unused_bgs_mutex); 2638 mutex_init(&fs_info->reloc_mutex); 2639 mutex_init(&fs_info->delalloc_root_mutex); 2640 mutex_init(&fs_info->cleaner_delayed_iput_mutex); 2641 seqlock_init(&fs_info->profiles_lock); 2642 2643 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 2644 INIT_LIST_HEAD(&fs_info->space_info); 2645 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); 2646 INIT_LIST_HEAD(&fs_info->unused_bgs); 2647 btrfs_mapping_init(&fs_info->mapping_tree); 2648 btrfs_init_block_rsv(&fs_info->global_block_rsv, 2649 BTRFS_BLOCK_RSV_GLOBAL); 2650 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, 2651 BTRFS_BLOCK_RSV_DELALLOC); 2652 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); 2653 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); 2654 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); 2655 btrfs_init_block_rsv(&fs_info->delayed_block_rsv, 2656 BTRFS_BLOCK_RSV_DELOPS); 2657 atomic_set(&fs_info->nr_async_submits, 0); 2658 atomic_set(&fs_info->async_delalloc_pages, 0); 2659 atomic_set(&fs_info->async_submit_draining, 0); 2660 atomic_set(&fs_info->nr_async_bios, 0); 2661 atomic_set(&fs_info->defrag_running, 0); 2662 atomic_set(&fs_info->qgroup_op_seq, 0); 2663 atomic_set(&fs_info->reada_works_cnt, 0); 2664 atomic64_set(&fs_info->tree_mod_seq, 0); 2665 fs_info->sb = sb; 2666 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2667 fs_info->metadata_ratio = 0; 2668 fs_info->defrag_inodes = RB_ROOT; 2669 atomic64_set(&fs_info->free_chunk_space, 0); 2670 fs_info->tree_mod_log = RB_ROOT; 2671 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; 2672 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */ 2673 /* readahead state */ 2674 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); 2675 spin_lock_init(&fs_info->reada_lock); 2676 2677 fs_info->thread_pool_size = min_t(unsigned long, 2678 num_online_cpus() + 2, 8); 2679 2680 INIT_LIST_HEAD(&fs_info->ordered_roots); 2681 spin_lock_init(&fs_info->ordered_root_lock); 2682 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), 2683 GFP_KERNEL); 2684 if (!fs_info->delayed_root) { 2685 err = -ENOMEM; 2686 goto fail_iput; 2687 } 2688 btrfs_init_delayed_root(fs_info->delayed_root); 2689 2690 btrfs_init_scrub(fs_info); 2691 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2692 fs_info->check_integrity_print_mask = 0; 2693 #endif 2694 btrfs_init_balance(fs_info); 2695 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); 2696 2697 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; 2698 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); 2699 2700 btrfs_init_btree_inode(fs_info); 2701 2702 spin_lock_init(&fs_info->block_group_cache_lock); 2703 fs_info->block_group_cache_tree = RB_ROOT; 2704 fs_info->first_logical_byte = (u64)-1; 2705 2706 extent_io_tree_init(&fs_info->freed_extents[0], NULL); 2707 extent_io_tree_init(&fs_info->freed_extents[1], NULL); 2708 fs_info->pinned_extents = &fs_info->freed_extents[0]; 2709 set_bit(BTRFS_FS_BARRIER, &fs_info->flags); 2710 2711 mutex_init(&fs_info->ordered_operations_mutex); 2712 mutex_init(&fs_info->tree_log_mutex); 2713 mutex_init(&fs_info->chunk_mutex); 2714 mutex_init(&fs_info->transaction_kthread_mutex); 2715 mutex_init(&fs_info->cleaner_mutex); 2716 mutex_init(&fs_info->volume_mutex); 2717 mutex_init(&fs_info->ro_block_group_mutex); 2718 init_rwsem(&fs_info->commit_root_sem); 2719 init_rwsem(&fs_info->cleanup_work_sem); 2720 init_rwsem(&fs_info->subvol_sem); 2721 sema_init(&fs_info->uuid_tree_rescan_sem, 1); 2722 2723 btrfs_init_dev_replace_locks(fs_info); 2724 btrfs_init_qgroup(fs_info); 2725 2726 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); 2727 btrfs_init_free_cluster(&fs_info->data_alloc_cluster); 2728 2729 init_waitqueue_head(&fs_info->transaction_throttle); 2730 init_waitqueue_head(&fs_info->transaction_wait); 2731 init_waitqueue_head(&fs_info->transaction_blocked_wait); 2732 init_waitqueue_head(&fs_info->async_submit_wait); 2733 2734 INIT_LIST_HEAD(&fs_info->pinned_chunks); 2735 2736 /* Usable values until the real ones are cached from the superblock */ 2737 fs_info->nodesize = 4096; 2738 fs_info->sectorsize = 4096; 2739 fs_info->stripesize = 4096; 2740 2741 ret = btrfs_alloc_stripe_hash_table(fs_info); 2742 if (ret) { 2743 err = ret; 2744 goto fail_alloc; 2745 } 2746 2747 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); 2748 2749 invalidate_bdev(fs_devices->latest_bdev); 2750 2751 /* 2752 * Read super block and check the signature bytes only 2753 */ 2754 bh = btrfs_read_dev_super(fs_devices->latest_bdev); 2755 if (IS_ERR(bh)) { 2756 err = PTR_ERR(bh); 2757 goto fail_alloc; 2758 } 2759 2760 /* 2761 * We want to check superblock checksum, the type is stored inside. 2762 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). 2763 */ 2764 if (btrfs_check_super_csum(fs_info, bh->b_data)) { 2765 btrfs_err(fs_info, "superblock checksum mismatch"); 2766 err = -EINVAL; 2767 brelse(bh); 2768 goto fail_alloc; 2769 } 2770 2771 /* 2772 * super_copy is zeroed at allocation time and we never touch the 2773 * following bytes up to INFO_SIZE, the checksum is calculated from 2774 * the whole block of INFO_SIZE 2775 */ 2776 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); 2777 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2778 sizeof(*fs_info->super_for_commit)); 2779 brelse(bh); 2780 2781 memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); 2782 2783 ret = btrfs_check_super_valid(fs_info); 2784 if (ret) { 2785 btrfs_err(fs_info, "superblock contains fatal errors"); 2786 err = -EINVAL; 2787 goto fail_alloc; 2788 } 2789 2790 disk_super = fs_info->super_copy; 2791 if (!btrfs_super_root(disk_super)) 2792 goto fail_alloc; 2793 2794 /* check FS state, whether FS is broken. */ 2795 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) 2796 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); 2797 2798 /* 2799 * run through our array of backup supers and setup 2800 * our ring pointer to the oldest one 2801 */ 2802 generation = btrfs_super_generation(disk_super); 2803 find_oldest_super_backup(fs_info, generation); 2804 2805 /* 2806 * In the long term, we'll store the compression type in the super 2807 * block, and it'll be used for per file compression control. 2808 */ 2809 fs_info->compress_type = BTRFS_COMPRESS_ZLIB; 2810 2811 ret = btrfs_parse_options(fs_info, options, sb->s_flags); 2812 if (ret) { 2813 err = ret; 2814 goto fail_alloc; 2815 } 2816 2817 features = btrfs_super_incompat_flags(disk_super) & 2818 ~BTRFS_FEATURE_INCOMPAT_SUPP; 2819 if (features) { 2820 btrfs_err(fs_info, 2821 "cannot mount because of unsupported optional features (%llx)", 2822 features); 2823 err = -EINVAL; 2824 goto fail_alloc; 2825 } 2826 2827 features = btrfs_super_incompat_flags(disk_super); 2828 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 2829 if (fs_info->compress_type == BTRFS_COMPRESS_LZO) 2830 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2831 2832 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) 2833 btrfs_info(fs_info, "has skinny extents"); 2834 2835 /* 2836 * flag our filesystem as having big metadata blocks if 2837 * they are bigger than the page size 2838 */ 2839 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { 2840 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) 2841 btrfs_info(fs_info, 2842 "flagging fs with big metadata feature"); 2843 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; 2844 } 2845 2846 nodesize = btrfs_super_nodesize(disk_super); 2847 sectorsize = btrfs_super_sectorsize(disk_super); 2848 stripesize = sectorsize; 2849 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); 2850 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); 2851 2852 /* Cache block sizes */ 2853 fs_info->nodesize = nodesize; 2854 fs_info->sectorsize = sectorsize; 2855 fs_info->stripesize = stripesize; 2856 2857 /* 2858 * mixed block groups end up with duplicate but slightly offset 2859 * extent buffers for the same range. It leads to corruptions 2860 */ 2861 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 2862 (sectorsize != nodesize)) { 2863 btrfs_err(fs_info, 2864 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", 2865 nodesize, sectorsize); 2866 goto fail_alloc; 2867 } 2868 2869 /* 2870 * Needn't use the lock because there is no other task which will 2871 * update the flag. 2872 */ 2873 btrfs_set_super_incompat_flags(disk_super, features); 2874 2875 features = btrfs_super_compat_ro_flags(disk_super) & 2876 ~BTRFS_FEATURE_COMPAT_RO_SUPP; 2877 if (!(sb->s_flags & MS_RDONLY) && features) { 2878 btrfs_err(fs_info, 2879 "cannot mount read-write because of unsupported optional features (%llx)", 2880 features); 2881 err = -EINVAL; 2882 goto fail_alloc; 2883 } 2884 2885 max_active = fs_info->thread_pool_size; 2886 2887 ret = btrfs_init_workqueues(fs_info, fs_devices); 2888 if (ret) { 2889 err = ret; 2890 goto fail_sb_buffer; 2891 } 2892 2893 sb->s_bdi->congested_fn = btrfs_congested_fn; 2894 sb->s_bdi->congested_data = fs_info; 2895 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 2896 sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE; 2897 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); 2898 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); 2899 2900 sb->s_blocksize = sectorsize; 2901 sb->s_blocksize_bits = blksize_bits(sectorsize); 2902 2903 mutex_lock(&fs_info->chunk_mutex); 2904 ret = btrfs_read_sys_array(fs_info); 2905 mutex_unlock(&fs_info->chunk_mutex); 2906 if (ret) { 2907 btrfs_err(fs_info, "failed to read the system array: %d", ret); 2908 goto fail_sb_buffer; 2909 } 2910 2911 generation = btrfs_super_chunk_root_generation(disk_super); 2912 2913 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); 2914 2915 chunk_root->node = read_tree_block(fs_info, 2916 btrfs_super_chunk_root(disk_super), 2917 generation); 2918 if (IS_ERR(chunk_root->node) || 2919 !extent_buffer_uptodate(chunk_root->node)) { 2920 btrfs_err(fs_info, "failed to read chunk root"); 2921 if (!IS_ERR(chunk_root->node)) 2922 free_extent_buffer(chunk_root->node); 2923 chunk_root->node = NULL; 2924 goto fail_tree_roots; 2925 } 2926 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2927 chunk_root->commit_root = btrfs_root_node(chunk_root); 2928 2929 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, 2930 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); 2931 2932 ret = btrfs_read_chunk_tree(fs_info); 2933 if (ret) { 2934 btrfs_err(fs_info, "failed to read chunk tree: %d", ret); 2935 goto fail_tree_roots; 2936 } 2937 2938 /* 2939 * keep the device that is marked to be the target device for the 2940 * dev_replace procedure 2941 */ 2942 btrfs_close_extra_devices(fs_devices, 0); 2943 2944 if (!fs_devices->latest_bdev) { 2945 btrfs_err(fs_info, "failed to read devices"); 2946 goto fail_tree_roots; 2947 } 2948 2949 retry_root_backup: 2950 generation = btrfs_super_generation(disk_super); 2951 2952 tree_root->node = read_tree_block(fs_info, 2953 btrfs_super_root(disk_super), 2954 generation); 2955 if (IS_ERR(tree_root->node) || 2956 !extent_buffer_uptodate(tree_root->node)) { 2957 btrfs_warn(fs_info, "failed to read tree root"); 2958 if (!IS_ERR(tree_root->node)) 2959 free_extent_buffer(tree_root->node); 2960 tree_root->node = NULL; 2961 goto recovery_tree_root; 2962 } 2963 2964 btrfs_set_root_node(&tree_root->root_item, tree_root->node); 2965 tree_root->commit_root = btrfs_root_node(tree_root); 2966 btrfs_set_root_refs(&tree_root->root_item, 1); 2967 2968 mutex_lock(&tree_root->objectid_mutex); 2969 ret = btrfs_find_highest_objectid(tree_root, 2970 &tree_root->highest_objectid); 2971 if (ret) { 2972 mutex_unlock(&tree_root->objectid_mutex); 2973 goto recovery_tree_root; 2974 } 2975 2976 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID); 2977 2978 mutex_unlock(&tree_root->objectid_mutex); 2979 2980 ret = btrfs_read_roots(fs_info); 2981 if (ret) 2982 goto recovery_tree_root; 2983 2984 fs_info->generation = generation; 2985 fs_info->last_trans_committed = generation; 2986 2987 ret = btrfs_recover_balance(fs_info); 2988 if (ret) { 2989 btrfs_err(fs_info, "failed to recover balance: %d", ret); 2990 goto fail_block_groups; 2991 } 2992 2993 ret = btrfs_init_dev_stats(fs_info); 2994 if (ret) { 2995 btrfs_err(fs_info, "failed to init dev_stats: %d", ret); 2996 goto fail_block_groups; 2997 } 2998 2999 ret = btrfs_init_dev_replace(fs_info); 3000 if (ret) { 3001 btrfs_err(fs_info, "failed to init dev_replace: %d", ret); 3002 goto fail_block_groups; 3003 } 3004 3005 btrfs_close_extra_devices(fs_devices, 1); 3006 3007 ret = btrfs_sysfs_add_fsid(fs_devices, NULL); 3008 if (ret) { 3009 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", 3010 ret); 3011 goto fail_block_groups; 3012 } 3013 3014 ret = btrfs_sysfs_add_device(fs_devices); 3015 if (ret) { 3016 btrfs_err(fs_info, "failed to init sysfs device interface: %d", 3017 ret); 3018 goto fail_fsdev_sysfs; 3019 } 3020 3021 ret = btrfs_sysfs_add_mounted(fs_info); 3022 if (ret) { 3023 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); 3024 goto fail_fsdev_sysfs; 3025 } 3026 3027 ret = btrfs_init_space_info(fs_info); 3028 if (ret) { 3029 btrfs_err(fs_info, "failed to initialize space info: %d", ret); 3030 goto fail_sysfs; 3031 } 3032 3033 ret = btrfs_read_block_groups(fs_info); 3034 if (ret) { 3035 btrfs_err(fs_info, "failed to read block groups: %d", ret); 3036 goto fail_sysfs; 3037 } 3038 3039 if (!(sb->s_flags & MS_RDONLY) && !btrfs_check_rw_degradable(fs_info)) { 3040 btrfs_warn(fs_info, 3041 "writeable mount is not allowed due to too many missing devices"); 3042 goto fail_sysfs; 3043 } 3044 3045 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 3046 "btrfs-cleaner"); 3047 if (IS_ERR(fs_info->cleaner_kthread)) 3048 goto fail_sysfs; 3049 3050 fs_info->transaction_kthread = kthread_run(transaction_kthread, 3051 tree_root, 3052 "btrfs-transaction"); 3053 if (IS_ERR(fs_info->transaction_kthread)) 3054 goto fail_cleaner; 3055 3056 if (!btrfs_test_opt(fs_info, NOSSD) && 3057 !fs_info->fs_devices->rotating) { 3058 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations"); 3059 } 3060 3061 /* 3062 * Mount does not set all options immediately, we can do it now and do 3063 * not have to wait for transaction commit 3064 */ 3065 btrfs_apply_pending_changes(fs_info); 3066 3067 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3068 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) { 3069 ret = btrfsic_mount(fs_info, fs_devices, 3070 btrfs_test_opt(fs_info, 3071 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? 3072 1 : 0, 3073 fs_info->check_integrity_print_mask); 3074 if (ret) 3075 btrfs_warn(fs_info, 3076 "failed to initialize integrity check module: %d", 3077 ret); 3078 } 3079 #endif 3080 ret = btrfs_read_qgroup_config(fs_info); 3081 if (ret) 3082 goto fail_trans_kthread; 3083 3084 /* do not make disk changes in broken FS or nologreplay is given */ 3085 if (btrfs_super_log_root(disk_super) != 0 && 3086 !btrfs_test_opt(fs_info, NOLOGREPLAY)) { 3087 ret = btrfs_replay_log(fs_info, fs_devices); 3088 if (ret) { 3089 err = ret; 3090 goto fail_qgroup; 3091 } 3092 } 3093 3094 ret = btrfs_find_orphan_roots(fs_info); 3095 if (ret) 3096 goto fail_qgroup; 3097 3098 if (!(sb->s_flags & MS_RDONLY)) { 3099 ret = btrfs_cleanup_fs_roots(fs_info); 3100 if (ret) 3101 goto fail_qgroup; 3102 3103 mutex_lock(&fs_info->cleaner_mutex); 3104 ret = btrfs_recover_relocation(tree_root); 3105 mutex_unlock(&fs_info->cleaner_mutex); 3106 if (ret < 0) { 3107 btrfs_warn(fs_info, "failed to recover relocation: %d", 3108 ret); 3109 err = -EINVAL; 3110 goto fail_qgroup; 3111 } 3112 } 3113 3114 location.objectid = BTRFS_FS_TREE_OBJECTID; 3115 location.type = BTRFS_ROOT_ITEM_KEY; 3116 location.offset = 0; 3117 3118 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); 3119 if (IS_ERR(fs_info->fs_root)) { 3120 err = PTR_ERR(fs_info->fs_root); 3121 goto fail_qgroup; 3122 } 3123 3124 if (sb->s_flags & MS_RDONLY) 3125 return 0; 3126 3127 if (btrfs_test_opt(fs_info, CLEAR_CACHE) && 3128 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3129 clear_free_space_tree = 1; 3130 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 3131 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { 3132 btrfs_warn(fs_info, "free space tree is invalid"); 3133 clear_free_space_tree = 1; 3134 } 3135 3136 if (clear_free_space_tree) { 3137 btrfs_info(fs_info, "clearing free space tree"); 3138 ret = btrfs_clear_free_space_tree(fs_info); 3139 if (ret) { 3140 btrfs_warn(fs_info, 3141 "failed to clear free space tree: %d", ret); 3142 close_ctree(fs_info); 3143 return ret; 3144 } 3145 } 3146 3147 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && 3148 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { 3149 btrfs_info(fs_info, "creating free space tree"); 3150 ret = btrfs_create_free_space_tree(fs_info); 3151 if (ret) { 3152 btrfs_warn(fs_info, 3153 "failed to create free space tree: %d", ret); 3154 close_ctree(fs_info); 3155 return ret; 3156 } 3157 } 3158 3159 down_read(&fs_info->cleanup_work_sem); 3160 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || 3161 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { 3162 up_read(&fs_info->cleanup_work_sem); 3163 close_ctree(fs_info); 3164 return ret; 3165 } 3166 up_read(&fs_info->cleanup_work_sem); 3167 3168 ret = btrfs_resume_balance_async(fs_info); 3169 if (ret) { 3170 btrfs_warn(fs_info, "failed to resume balance: %d", ret); 3171 close_ctree(fs_info); 3172 return ret; 3173 } 3174 3175 ret = btrfs_resume_dev_replace_async(fs_info); 3176 if (ret) { 3177 btrfs_warn(fs_info, "failed to resume device replace: %d", ret); 3178 close_ctree(fs_info); 3179 return ret; 3180 } 3181 3182 btrfs_qgroup_rescan_resume(fs_info); 3183 3184 if (!fs_info->uuid_root) { 3185 btrfs_info(fs_info, "creating UUID tree"); 3186 ret = btrfs_create_uuid_tree(fs_info); 3187 if (ret) { 3188 btrfs_warn(fs_info, 3189 "failed to create the UUID tree: %d", ret); 3190 close_ctree(fs_info); 3191 return ret; 3192 } 3193 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || 3194 fs_info->generation != 3195 btrfs_super_uuid_tree_generation(disk_super)) { 3196 btrfs_info(fs_info, "checking UUID tree"); 3197 ret = btrfs_check_uuid_tree(fs_info); 3198 if (ret) { 3199 btrfs_warn(fs_info, 3200 "failed to check the UUID tree: %d", ret); 3201 close_ctree(fs_info); 3202 return ret; 3203 } 3204 } else { 3205 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); 3206 } 3207 set_bit(BTRFS_FS_OPEN, &fs_info->flags); 3208 3209 /* 3210 * backuproot only affect mount behavior, and if open_ctree succeeded, 3211 * no need to keep the flag 3212 */ 3213 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); 3214 3215 return 0; 3216 3217 fail_qgroup: 3218 btrfs_free_qgroup_config(fs_info); 3219 fail_trans_kthread: 3220 kthread_stop(fs_info->transaction_kthread); 3221 btrfs_cleanup_transaction(fs_info); 3222 btrfs_free_fs_roots(fs_info); 3223 fail_cleaner: 3224 kthread_stop(fs_info->cleaner_kthread); 3225 3226 /* 3227 * make sure we're done with the btree inode before we stop our 3228 * kthreads 3229 */ 3230 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 3231 3232 fail_sysfs: 3233 btrfs_sysfs_remove_mounted(fs_info); 3234 3235 fail_fsdev_sysfs: 3236 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3237 3238 fail_block_groups: 3239 btrfs_put_block_group_cache(fs_info); 3240 3241 fail_tree_roots: 3242 free_root_pointers(fs_info, 1); 3243 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3244 3245 fail_sb_buffer: 3246 btrfs_stop_all_workers(fs_info); 3247 btrfs_free_block_groups(fs_info); 3248 fail_alloc: 3249 fail_iput: 3250 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3251 3252 iput(fs_info->btree_inode); 3253 fail_bio_counter: 3254 percpu_counter_destroy(&fs_info->bio_counter); 3255 fail_delalloc_bytes: 3256 percpu_counter_destroy(&fs_info->delalloc_bytes); 3257 fail_dirty_metadata_bytes: 3258 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 3259 fail_srcu: 3260 cleanup_srcu_struct(&fs_info->subvol_srcu); 3261 fail: 3262 btrfs_free_stripe_hash_table(fs_info); 3263 btrfs_close_devices(fs_info->fs_devices); 3264 return err; 3265 3266 recovery_tree_root: 3267 if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) 3268 goto fail_tree_roots; 3269 3270 free_root_pointers(fs_info, 0); 3271 3272 /* don't use the log in recovery mode, it won't be valid */ 3273 btrfs_set_super_log_root(disk_super, 0); 3274 3275 /* we can't trust the free space cache either */ 3276 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); 3277 3278 ret = next_root_backup(fs_info, fs_info->super_copy, 3279 &num_backups_tried, &backup_index); 3280 if (ret == -1) 3281 goto fail_block_groups; 3282 goto retry_root_backup; 3283 } 3284 3285 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) 3286 { 3287 if (uptodate) { 3288 set_buffer_uptodate(bh); 3289 } else { 3290 struct btrfs_device *device = (struct btrfs_device *) 3291 bh->b_private; 3292 3293 btrfs_warn_rl_in_rcu(device->fs_info, 3294 "lost page write due to IO error on %s", 3295 rcu_str_deref(device->name)); 3296 /* note, we don't set_buffer_write_io_error because we have 3297 * our own ways of dealing with the IO errors 3298 */ 3299 clear_buffer_uptodate(bh); 3300 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); 3301 } 3302 unlock_buffer(bh); 3303 put_bh(bh); 3304 } 3305 3306 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, 3307 struct buffer_head **bh_ret) 3308 { 3309 struct buffer_head *bh; 3310 struct btrfs_super_block *super; 3311 u64 bytenr; 3312 3313 bytenr = btrfs_sb_offset(copy_num); 3314 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode)) 3315 return -EINVAL; 3316 3317 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE); 3318 /* 3319 * If we fail to read from the underlying devices, as of now 3320 * the best option we have is to mark it EIO. 3321 */ 3322 if (!bh) 3323 return -EIO; 3324 3325 super = (struct btrfs_super_block *)bh->b_data; 3326 if (btrfs_super_bytenr(super) != bytenr || 3327 btrfs_super_magic(super) != BTRFS_MAGIC) { 3328 brelse(bh); 3329 return -EINVAL; 3330 } 3331 3332 *bh_ret = bh; 3333 return 0; 3334 } 3335 3336 3337 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) 3338 { 3339 struct buffer_head *bh; 3340 struct buffer_head *latest = NULL; 3341 struct btrfs_super_block *super; 3342 int i; 3343 u64 transid = 0; 3344 int ret = -EINVAL; 3345 3346 /* we would like to check all the supers, but that would make 3347 * a btrfs mount succeed after a mkfs from a different FS. 3348 * So, we need to add a special mount option to scan for 3349 * later supers, using BTRFS_SUPER_MIRROR_MAX instead 3350 */ 3351 for (i = 0; i < 1; i++) { 3352 ret = btrfs_read_dev_one_super(bdev, i, &bh); 3353 if (ret) 3354 continue; 3355 3356 super = (struct btrfs_super_block *)bh->b_data; 3357 3358 if (!latest || btrfs_super_generation(super) > transid) { 3359 brelse(latest); 3360 latest = bh; 3361 transid = btrfs_super_generation(super); 3362 } else { 3363 brelse(bh); 3364 } 3365 } 3366 3367 if (!latest) 3368 return ERR_PTR(ret); 3369 3370 return latest; 3371 } 3372 3373 /* 3374 * Write superblock @sb to the @device. Do not wait for completion, all the 3375 * buffer heads we write are pinned. 3376 * 3377 * Write @max_mirrors copies of the superblock, where 0 means default that fit 3378 * the expected device size at commit time. Note that max_mirrors must be 3379 * same for write and wait phases. 3380 * 3381 * Return number of errors when buffer head is not found or submission fails. 3382 */ 3383 static int write_dev_supers(struct btrfs_device *device, 3384 struct btrfs_super_block *sb, int max_mirrors) 3385 { 3386 struct buffer_head *bh; 3387 int i; 3388 int ret; 3389 int errors = 0; 3390 u32 crc; 3391 u64 bytenr; 3392 3393 if (max_mirrors == 0) 3394 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3395 3396 for (i = 0; i < max_mirrors; i++) { 3397 bytenr = btrfs_sb_offset(i); 3398 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3399 device->commit_total_bytes) 3400 break; 3401 3402 btrfs_set_super_bytenr(sb, bytenr); 3403 3404 crc = ~(u32)0; 3405 crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, 3406 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); 3407 btrfs_csum_final(crc, sb->csum); 3408 3409 /* One reference for us, and we leave it for the caller */ 3410 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, 3411 BTRFS_SUPER_INFO_SIZE); 3412 if (!bh) { 3413 btrfs_err(device->fs_info, 3414 "couldn't get super buffer head for bytenr %llu", 3415 bytenr); 3416 errors++; 3417 continue; 3418 } 3419 3420 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); 3421 3422 /* one reference for submit_bh */ 3423 get_bh(bh); 3424 3425 set_buffer_uptodate(bh); 3426 lock_buffer(bh); 3427 bh->b_end_io = btrfs_end_buffer_write_sync; 3428 bh->b_private = device; 3429 3430 /* 3431 * we fua the first super. The others we allow 3432 * to go down lazy. 3433 */ 3434 if (i == 0) { 3435 ret = btrfsic_submit_bh(REQ_OP_WRITE, 3436 REQ_SYNC | REQ_FUA | REQ_META | REQ_PRIO, bh); 3437 } else { 3438 ret = btrfsic_submit_bh(REQ_OP_WRITE, 3439 REQ_SYNC | REQ_META | REQ_PRIO, bh); 3440 } 3441 if (ret) 3442 errors++; 3443 } 3444 return errors < i ? 0 : -1; 3445 } 3446 3447 /* 3448 * Wait for write completion of superblocks done by write_dev_supers, 3449 * @max_mirrors same for write and wait phases. 3450 * 3451 * Return number of errors when buffer head is not found or not marked up to 3452 * date. 3453 */ 3454 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) 3455 { 3456 struct buffer_head *bh; 3457 int i; 3458 int errors = 0; 3459 u64 bytenr; 3460 3461 if (max_mirrors == 0) 3462 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3463 3464 for (i = 0; i < max_mirrors; i++) { 3465 bytenr = btrfs_sb_offset(i); 3466 if (bytenr + BTRFS_SUPER_INFO_SIZE >= 3467 device->commit_total_bytes) 3468 break; 3469 3470 bh = __find_get_block(device->bdev, 3471 bytenr / BTRFS_BDEV_BLOCKSIZE, 3472 BTRFS_SUPER_INFO_SIZE); 3473 if (!bh) { 3474 errors++; 3475 continue; 3476 } 3477 wait_on_buffer(bh); 3478 if (!buffer_uptodate(bh)) 3479 errors++; 3480 3481 /* drop our reference */ 3482 brelse(bh); 3483 3484 /* drop the reference from the writing run */ 3485 brelse(bh); 3486 } 3487 3488 return errors < i ? 0 : -1; 3489 } 3490 3491 /* 3492 * endio for the write_dev_flush, this will wake anyone waiting 3493 * for the barrier when it is done 3494 */ 3495 static void btrfs_end_empty_barrier(struct bio *bio) 3496 { 3497 complete(bio->bi_private); 3498 } 3499 3500 /* 3501 * Submit a flush request to the device if it supports it. Error handling is 3502 * done in the waiting counterpart. 3503 */ 3504 static void write_dev_flush(struct btrfs_device *device) 3505 { 3506 struct request_queue *q = bdev_get_queue(device->bdev); 3507 struct bio *bio = device->flush_bio; 3508 3509 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 3510 return; 3511 3512 bio_reset(bio); 3513 bio->bi_end_io = btrfs_end_empty_barrier; 3514 bio_set_dev(bio, device->bdev); 3515 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 3516 init_completion(&device->flush_wait); 3517 bio->bi_private = &device->flush_wait; 3518 3519 btrfsic_submit_bio(bio); 3520 device->flush_bio_sent = 1; 3521 } 3522 3523 /* 3524 * If the flush bio has been submitted by write_dev_flush, wait for it. 3525 */ 3526 static blk_status_t wait_dev_flush(struct btrfs_device *device) 3527 { 3528 struct bio *bio = device->flush_bio; 3529 3530 if (!device->flush_bio_sent) 3531 return BLK_STS_OK; 3532 3533 device->flush_bio_sent = 0; 3534 wait_for_completion_io(&device->flush_wait); 3535 3536 return bio->bi_status; 3537 } 3538 3539 static int check_barrier_error(struct btrfs_fs_info *fs_info) 3540 { 3541 if (!btrfs_check_rw_degradable(fs_info)) 3542 return -EIO; 3543 return 0; 3544 } 3545 3546 /* 3547 * send an empty flush down to each device in parallel, 3548 * then wait for them 3549 */ 3550 static int barrier_all_devices(struct btrfs_fs_info *info) 3551 { 3552 struct list_head *head; 3553 struct btrfs_device *dev; 3554 int errors_wait = 0; 3555 blk_status_t ret; 3556 3557 /* send down all the barriers */ 3558 head = &info->fs_devices->devices; 3559 list_for_each_entry_rcu(dev, head, dev_list) { 3560 if (dev->missing) 3561 continue; 3562 if (!dev->bdev) 3563 continue; 3564 if (!dev->in_fs_metadata || !dev->writeable) 3565 continue; 3566 3567 write_dev_flush(dev); 3568 dev->last_flush_error = BLK_STS_OK; 3569 } 3570 3571 /* wait for all the barriers */ 3572 list_for_each_entry_rcu(dev, head, dev_list) { 3573 if (dev->missing) 3574 continue; 3575 if (!dev->bdev) { 3576 errors_wait++; 3577 continue; 3578 } 3579 if (!dev->in_fs_metadata || !dev->writeable) 3580 continue; 3581 3582 ret = wait_dev_flush(dev); 3583 if (ret) { 3584 dev->last_flush_error = ret; 3585 btrfs_dev_stat_inc_and_print(dev, 3586 BTRFS_DEV_STAT_FLUSH_ERRS); 3587 errors_wait++; 3588 } 3589 } 3590 3591 if (errors_wait) { 3592 /* 3593 * At some point we need the status of all disks 3594 * to arrive at the volume status. So error checking 3595 * is being pushed to a separate loop. 3596 */ 3597 return check_barrier_error(info); 3598 } 3599 return 0; 3600 } 3601 3602 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) 3603 { 3604 int raid_type; 3605 int min_tolerated = INT_MAX; 3606 3607 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || 3608 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) 3609 min_tolerated = min(min_tolerated, 3610 btrfs_raid_array[BTRFS_RAID_SINGLE]. 3611 tolerated_failures); 3612 3613 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 3614 if (raid_type == BTRFS_RAID_SINGLE) 3615 continue; 3616 if (!(flags & btrfs_raid_group[raid_type])) 3617 continue; 3618 min_tolerated = min(min_tolerated, 3619 btrfs_raid_array[raid_type]. 3620 tolerated_failures); 3621 } 3622 3623 if (min_tolerated == INT_MAX) { 3624 pr_warn("BTRFS: unknown raid flag: %llu", flags); 3625 min_tolerated = 0; 3626 } 3627 3628 return min_tolerated; 3629 } 3630 3631 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) 3632 { 3633 struct list_head *head; 3634 struct btrfs_device *dev; 3635 struct btrfs_super_block *sb; 3636 struct btrfs_dev_item *dev_item; 3637 int ret; 3638 int do_barriers; 3639 int max_errors; 3640 int total_errors = 0; 3641 u64 flags; 3642 3643 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); 3644 backup_super_roots(fs_info); 3645 3646 sb = fs_info->super_for_commit; 3647 dev_item = &sb->dev_item; 3648 3649 mutex_lock(&fs_info->fs_devices->device_list_mutex); 3650 head = &fs_info->fs_devices->devices; 3651 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; 3652 3653 if (do_barriers) { 3654 ret = barrier_all_devices(fs_info); 3655 if (ret) { 3656 mutex_unlock( 3657 &fs_info->fs_devices->device_list_mutex); 3658 btrfs_handle_fs_error(fs_info, ret, 3659 "errors while submitting device barriers."); 3660 return ret; 3661 } 3662 } 3663 3664 list_for_each_entry_rcu(dev, head, dev_list) { 3665 if (!dev->bdev) { 3666 total_errors++; 3667 continue; 3668 } 3669 if (!dev->in_fs_metadata || !dev->writeable) 3670 continue; 3671 3672 btrfs_set_stack_device_generation(dev_item, 0); 3673 btrfs_set_stack_device_type(dev_item, dev->type); 3674 btrfs_set_stack_device_id(dev_item, dev->devid); 3675 btrfs_set_stack_device_total_bytes(dev_item, 3676 dev->commit_total_bytes); 3677 btrfs_set_stack_device_bytes_used(dev_item, 3678 dev->commit_bytes_used); 3679 btrfs_set_stack_device_io_align(dev_item, dev->io_align); 3680 btrfs_set_stack_device_io_width(dev_item, dev->io_width); 3681 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); 3682 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); 3683 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE); 3684 3685 flags = btrfs_super_flags(sb); 3686 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); 3687 3688 ret = write_dev_supers(dev, sb, max_mirrors); 3689 if (ret) 3690 total_errors++; 3691 } 3692 if (total_errors > max_errors) { 3693 btrfs_err(fs_info, "%d errors while writing supers", 3694 total_errors); 3695 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3696 3697 /* FUA is masked off if unsupported and can't be the reason */ 3698 btrfs_handle_fs_error(fs_info, -EIO, 3699 "%d errors while writing supers", 3700 total_errors); 3701 return -EIO; 3702 } 3703 3704 total_errors = 0; 3705 list_for_each_entry_rcu(dev, head, dev_list) { 3706 if (!dev->bdev) 3707 continue; 3708 if (!dev->in_fs_metadata || !dev->writeable) 3709 continue; 3710 3711 ret = wait_dev_supers(dev, max_mirrors); 3712 if (ret) 3713 total_errors++; 3714 } 3715 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 3716 if (total_errors > max_errors) { 3717 btrfs_handle_fs_error(fs_info, -EIO, 3718 "%d errors while writing supers", 3719 total_errors); 3720 return -EIO; 3721 } 3722 return 0; 3723 } 3724 3725 /* Drop a fs root from the radix tree and free it. */ 3726 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, 3727 struct btrfs_root *root) 3728 { 3729 spin_lock(&fs_info->fs_roots_radix_lock); 3730 radix_tree_delete(&fs_info->fs_roots_radix, 3731 (unsigned long)root->root_key.objectid); 3732 spin_unlock(&fs_info->fs_roots_radix_lock); 3733 3734 if (btrfs_root_refs(&root->root_item) == 0) 3735 synchronize_srcu(&fs_info->subvol_srcu); 3736 3737 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 3738 btrfs_free_log(NULL, root); 3739 if (root->reloc_root) { 3740 free_extent_buffer(root->reloc_root->node); 3741 free_extent_buffer(root->reloc_root->commit_root); 3742 btrfs_put_fs_root(root->reloc_root); 3743 root->reloc_root = NULL; 3744 } 3745 } 3746 3747 if (root->free_ino_pinned) 3748 __btrfs_remove_free_space_cache(root->free_ino_pinned); 3749 if (root->free_ino_ctl) 3750 __btrfs_remove_free_space_cache(root->free_ino_ctl); 3751 free_fs_root(root); 3752 } 3753 3754 static void free_fs_root(struct btrfs_root *root) 3755 { 3756 iput(root->ino_cache_inode); 3757 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 3758 btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv); 3759 root->orphan_block_rsv = NULL; 3760 if (root->anon_dev) 3761 free_anon_bdev(root->anon_dev); 3762 if (root->subv_writers) 3763 btrfs_free_subvolume_writers(root->subv_writers); 3764 free_extent_buffer(root->node); 3765 free_extent_buffer(root->commit_root); 3766 kfree(root->free_ino_ctl); 3767 kfree(root->free_ino_pinned); 3768 kfree(root->name); 3769 btrfs_put_fs_root(root); 3770 } 3771 3772 void btrfs_free_fs_root(struct btrfs_root *root) 3773 { 3774 free_fs_root(root); 3775 } 3776 3777 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) 3778 { 3779 u64 root_objectid = 0; 3780 struct btrfs_root *gang[8]; 3781 int i = 0; 3782 int err = 0; 3783 unsigned int ret = 0; 3784 int index; 3785 3786 while (1) { 3787 index = srcu_read_lock(&fs_info->subvol_srcu); 3788 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 3789 (void **)gang, root_objectid, 3790 ARRAY_SIZE(gang)); 3791 if (!ret) { 3792 srcu_read_unlock(&fs_info->subvol_srcu, index); 3793 break; 3794 } 3795 root_objectid = gang[ret - 1]->root_key.objectid + 1; 3796 3797 for (i = 0; i < ret; i++) { 3798 /* Avoid to grab roots in dead_roots */ 3799 if (btrfs_root_refs(&gang[i]->root_item) == 0) { 3800 gang[i] = NULL; 3801 continue; 3802 } 3803 /* grab all the search result for later use */ 3804 gang[i] = btrfs_grab_fs_root(gang[i]); 3805 } 3806 srcu_read_unlock(&fs_info->subvol_srcu, index); 3807 3808 for (i = 0; i < ret; i++) { 3809 if (!gang[i]) 3810 continue; 3811 root_objectid = gang[i]->root_key.objectid; 3812 err = btrfs_orphan_cleanup(gang[i]); 3813 if (err) 3814 break; 3815 btrfs_put_fs_root(gang[i]); 3816 } 3817 root_objectid++; 3818 } 3819 3820 /* release the uncleaned roots due to error */ 3821 for (; i < ret; i++) { 3822 if (gang[i]) 3823 btrfs_put_fs_root(gang[i]); 3824 } 3825 return err; 3826 } 3827 3828 int btrfs_commit_super(struct btrfs_fs_info *fs_info) 3829 { 3830 struct btrfs_root *root = fs_info->tree_root; 3831 struct btrfs_trans_handle *trans; 3832 3833 mutex_lock(&fs_info->cleaner_mutex); 3834 btrfs_run_delayed_iputs(fs_info); 3835 mutex_unlock(&fs_info->cleaner_mutex); 3836 wake_up_process(fs_info->cleaner_kthread); 3837 3838 /* wait until ongoing cleanup work done */ 3839 down_write(&fs_info->cleanup_work_sem); 3840 up_write(&fs_info->cleanup_work_sem); 3841 3842 trans = btrfs_join_transaction(root); 3843 if (IS_ERR(trans)) 3844 return PTR_ERR(trans); 3845 return btrfs_commit_transaction(trans); 3846 } 3847 3848 void close_ctree(struct btrfs_fs_info *fs_info) 3849 { 3850 struct btrfs_root *root = fs_info->tree_root; 3851 int ret; 3852 3853 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); 3854 3855 /* wait for the qgroup rescan worker to stop */ 3856 btrfs_qgroup_wait_for_completion(fs_info, false); 3857 3858 /* wait for the uuid_scan task to finish */ 3859 down(&fs_info->uuid_tree_rescan_sem); 3860 /* avoid complains from lockdep et al., set sem back to initial state */ 3861 up(&fs_info->uuid_tree_rescan_sem); 3862 3863 /* pause restriper - we want to resume on mount */ 3864 btrfs_pause_balance(fs_info); 3865 3866 btrfs_dev_replace_suspend_for_unmount(fs_info); 3867 3868 btrfs_scrub_cancel(fs_info); 3869 3870 /* wait for any defraggers to finish */ 3871 wait_event(fs_info->transaction_wait, 3872 (atomic_read(&fs_info->defrag_running) == 0)); 3873 3874 /* clear out the rbtree of defraggable inodes */ 3875 btrfs_cleanup_defrag_inodes(fs_info); 3876 3877 cancel_work_sync(&fs_info->async_reclaim_work); 3878 3879 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 3880 /* 3881 * If the cleaner thread is stopped and there are 3882 * block groups queued for removal, the deletion will be 3883 * skipped when we quit the cleaner thread. 3884 */ 3885 btrfs_delete_unused_bgs(fs_info); 3886 3887 ret = btrfs_commit_super(fs_info); 3888 if (ret) 3889 btrfs_err(fs_info, "commit super ret %d", ret); 3890 } 3891 3892 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 3893 btrfs_error_commit_super(fs_info); 3894 3895 kthread_stop(fs_info->transaction_kthread); 3896 kthread_stop(fs_info->cleaner_kthread); 3897 3898 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); 3899 3900 btrfs_free_qgroup_config(fs_info); 3901 3902 if (percpu_counter_sum(&fs_info->delalloc_bytes)) { 3903 btrfs_info(fs_info, "at unmount delalloc count %lld", 3904 percpu_counter_sum(&fs_info->delalloc_bytes)); 3905 } 3906 3907 btrfs_sysfs_remove_mounted(fs_info); 3908 btrfs_sysfs_remove_fsid(fs_info->fs_devices); 3909 3910 btrfs_free_fs_roots(fs_info); 3911 3912 btrfs_put_block_group_cache(fs_info); 3913 3914 /* 3915 * we must make sure there is not any read request to 3916 * submit after we stopping all workers. 3917 */ 3918 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 3919 btrfs_stop_all_workers(fs_info); 3920 3921 btrfs_free_block_groups(fs_info); 3922 3923 clear_bit(BTRFS_FS_OPEN, &fs_info->flags); 3924 free_root_pointers(fs_info, 1); 3925 3926 iput(fs_info->btree_inode); 3927 3928 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 3929 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) 3930 btrfsic_unmount(fs_info->fs_devices); 3931 #endif 3932 3933 btrfs_close_devices(fs_info->fs_devices); 3934 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3935 3936 percpu_counter_destroy(&fs_info->dirty_metadata_bytes); 3937 percpu_counter_destroy(&fs_info->delalloc_bytes); 3938 percpu_counter_destroy(&fs_info->bio_counter); 3939 cleanup_srcu_struct(&fs_info->subvol_srcu); 3940 3941 btrfs_free_stripe_hash_table(fs_info); 3942 3943 __btrfs_free_block_rsv(root->orphan_block_rsv); 3944 root->orphan_block_rsv = NULL; 3945 3946 while (!list_empty(&fs_info->pinned_chunks)) { 3947 struct extent_map *em; 3948 3949 em = list_first_entry(&fs_info->pinned_chunks, 3950 struct extent_map, list); 3951 list_del_init(&em->list); 3952 free_extent_map(em); 3953 } 3954 } 3955 3956 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, 3957 int atomic) 3958 { 3959 int ret; 3960 struct inode *btree_inode = buf->pages[0]->mapping->host; 3961 3962 ret = extent_buffer_uptodate(buf); 3963 if (!ret) 3964 return ret; 3965 3966 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, 3967 parent_transid, atomic); 3968 if (ret == -EAGAIN) 3969 return ret; 3970 return !ret; 3971 } 3972 3973 void btrfs_mark_buffer_dirty(struct extent_buffer *buf) 3974 { 3975 struct btrfs_fs_info *fs_info; 3976 struct btrfs_root *root; 3977 u64 transid = btrfs_header_generation(buf); 3978 int was_dirty; 3979 3980 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3981 /* 3982 * This is a fast path so only do this check if we have sanity tests 3983 * enabled. Normal people shouldn't be marking dummy buffers as dirty 3984 * outside of the sanity tests. 3985 */ 3986 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags))) 3987 return; 3988 #endif 3989 root = BTRFS_I(buf->pages[0]->mapping->host)->root; 3990 fs_info = root->fs_info; 3991 btrfs_assert_tree_locked(buf); 3992 if (transid != fs_info->generation) 3993 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", 3994 buf->start, transid, fs_info->generation); 3995 was_dirty = set_extent_buffer_dirty(buf); 3996 if (!was_dirty) 3997 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, 3998 buf->len, 3999 fs_info->dirty_metadata_batch); 4000 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 4001 if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { 4002 btrfs_print_leaf(buf); 4003 ASSERT(0); 4004 } 4005 #endif 4006 } 4007 4008 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, 4009 int flush_delayed) 4010 { 4011 /* 4012 * looks as though older kernels can get into trouble with 4013 * this code, they end up stuck in balance_dirty_pages forever 4014 */ 4015 int ret; 4016 4017 if (current->flags & PF_MEMALLOC) 4018 return; 4019 4020 if (flush_delayed) 4021 btrfs_balance_delayed_items(fs_info); 4022 4023 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, 4024 BTRFS_DIRTY_METADATA_THRESH); 4025 if (ret > 0) { 4026 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); 4027 } 4028 } 4029 4030 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) 4031 { 4032 __btrfs_btree_balance_dirty(fs_info, 1); 4033 } 4034 4035 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) 4036 { 4037 __btrfs_btree_balance_dirty(fs_info, 0); 4038 } 4039 4040 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) 4041 { 4042 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; 4043 struct btrfs_fs_info *fs_info = root->fs_info; 4044 4045 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid); 4046 } 4047 4048 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info) 4049 { 4050 struct btrfs_super_block *sb = fs_info->super_copy; 4051 u64 nodesize = btrfs_super_nodesize(sb); 4052 u64 sectorsize = btrfs_super_sectorsize(sb); 4053 int ret = 0; 4054 4055 if (btrfs_super_magic(sb) != BTRFS_MAGIC) { 4056 btrfs_err(fs_info, "no valid FS found"); 4057 ret = -EINVAL; 4058 } 4059 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) 4060 btrfs_warn(fs_info, "unrecognized super flag: %llu", 4061 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); 4062 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { 4063 btrfs_err(fs_info, "tree_root level too big: %d >= %d", 4064 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); 4065 ret = -EINVAL; 4066 } 4067 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { 4068 btrfs_err(fs_info, "chunk_root level too big: %d >= %d", 4069 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); 4070 ret = -EINVAL; 4071 } 4072 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { 4073 btrfs_err(fs_info, "log_root level too big: %d >= %d", 4074 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); 4075 ret = -EINVAL; 4076 } 4077 4078 /* 4079 * Check sectorsize and nodesize first, other check will need it. 4080 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. 4081 */ 4082 if (!is_power_of_2(sectorsize) || sectorsize < 4096 || 4083 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { 4084 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); 4085 ret = -EINVAL; 4086 } 4087 /* Only PAGE SIZE is supported yet */ 4088 if (sectorsize != PAGE_SIZE) { 4089 btrfs_err(fs_info, 4090 "sectorsize %llu not supported yet, only support %lu", 4091 sectorsize, PAGE_SIZE); 4092 ret = -EINVAL; 4093 } 4094 if (!is_power_of_2(nodesize) || nodesize < sectorsize || 4095 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { 4096 btrfs_err(fs_info, "invalid nodesize %llu", nodesize); 4097 ret = -EINVAL; 4098 } 4099 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { 4100 btrfs_err(fs_info, "invalid leafsize %u, should be %llu", 4101 le32_to_cpu(sb->__unused_leafsize), nodesize); 4102 ret = -EINVAL; 4103 } 4104 4105 /* Root alignment check */ 4106 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { 4107 btrfs_warn(fs_info, "tree_root block unaligned: %llu", 4108 btrfs_super_root(sb)); 4109 ret = -EINVAL; 4110 } 4111 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { 4112 btrfs_warn(fs_info, "chunk_root block unaligned: %llu", 4113 btrfs_super_chunk_root(sb)); 4114 ret = -EINVAL; 4115 } 4116 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { 4117 btrfs_warn(fs_info, "log_root block unaligned: %llu", 4118 btrfs_super_log_root(sb)); 4119 ret = -EINVAL; 4120 } 4121 4122 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) { 4123 btrfs_err(fs_info, 4124 "dev_item UUID does not match fsid: %pU != %pU", 4125 fs_info->fsid, sb->dev_item.fsid); 4126 ret = -EINVAL; 4127 } 4128 4129 /* 4130 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 4131 * done later 4132 */ 4133 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { 4134 btrfs_err(fs_info, "bytes_used is too small %llu", 4135 btrfs_super_bytes_used(sb)); 4136 ret = -EINVAL; 4137 } 4138 if (!is_power_of_2(btrfs_super_stripesize(sb))) { 4139 btrfs_err(fs_info, "invalid stripesize %u", 4140 btrfs_super_stripesize(sb)); 4141 ret = -EINVAL; 4142 } 4143 if (btrfs_super_num_devices(sb) > (1UL << 31)) 4144 btrfs_warn(fs_info, "suspicious number of devices: %llu", 4145 btrfs_super_num_devices(sb)); 4146 if (btrfs_super_num_devices(sb) == 0) { 4147 btrfs_err(fs_info, "number of devices is 0"); 4148 ret = -EINVAL; 4149 } 4150 4151 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { 4152 btrfs_err(fs_info, "super offset mismatch %llu != %u", 4153 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); 4154 ret = -EINVAL; 4155 } 4156 4157 /* 4158 * Obvious sys_chunk_array corruptions, it must hold at least one key 4159 * and one chunk 4160 */ 4161 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { 4162 btrfs_err(fs_info, "system chunk array too big %u > %u", 4163 btrfs_super_sys_array_size(sb), 4164 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); 4165 ret = -EINVAL; 4166 } 4167 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) 4168 + sizeof(struct btrfs_chunk)) { 4169 btrfs_err(fs_info, "system chunk array too small %u < %zu", 4170 btrfs_super_sys_array_size(sb), 4171 sizeof(struct btrfs_disk_key) 4172 + sizeof(struct btrfs_chunk)); 4173 ret = -EINVAL; 4174 } 4175 4176 /* 4177 * The generation is a global counter, we'll trust it more than the others 4178 * but it's still possible that it's the one that's wrong. 4179 */ 4180 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) 4181 btrfs_warn(fs_info, 4182 "suspicious: generation < chunk_root_generation: %llu < %llu", 4183 btrfs_super_generation(sb), 4184 btrfs_super_chunk_root_generation(sb)); 4185 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) 4186 && btrfs_super_cache_generation(sb) != (u64)-1) 4187 btrfs_warn(fs_info, 4188 "suspicious: generation < cache_generation: %llu < %llu", 4189 btrfs_super_generation(sb), 4190 btrfs_super_cache_generation(sb)); 4191 4192 return ret; 4193 } 4194 4195 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) 4196 { 4197 mutex_lock(&fs_info->cleaner_mutex); 4198 btrfs_run_delayed_iputs(fs_info); 4199 mutex_unlock(&fs_info->cleaner_mutex); 4200 4201 down_write(&fs_info->cleanup_work_sem); 4202 up_write(&fs_info->cleanup_work_sem); 4203 4204 /* cleanup FS via transaction */ 4205 btrfs_cleanup_transaction(fs_info); 4206 } 4207 4208 static void btrfs_destroy_ordered_extents(struct btrfs_root *root) 4209 { 4210 struct btrfs_ordered_extent *ordered; 4211 4212 spin_lock(&root->ordered_extent_lock); 4213 /* 4214 * This will just short circuit the ordered completion stuff which will 4215 * make sure the ordered extent gets properly cleaned up. 4216 */ 4217 list_for_each_entry(ordered, &root->ordered_extents, 4218 root_extent_list) 4219 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 4220 spin_unlock(&root->ordered_extent_lock); 4221 } 4222 4223 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) 4224 { 4225 struct btrfs_root *root; 4226 struct list_head splice; 4227 4228 INIT_LIST_HEAD(&splice); 4229 4230 spin_lock(&fs_info->ordered_root_lock); 4231 list_splice_init(&fs_info->ordered_roots, &splice); 4232 while (!list_empty(&splice)) { 4233 root = list_first_entry(&splice, struct btrfs_root, 4234 ordered_root); 4235 list_move_tail(&root->ordered_root, 4236 &fs_info->ordered_roots); 4237 4238 spin_unlock(&fs_info->ordered_root_lock); 4239 btrfs_destroy_ordered_extents(root); 4240 4241 cond_resched(); 4242 spin_lock(&fs_info->ordered_root_lock); 4243 } 4244 spin_unlock(&fs_info->ordered_root_lock); 4245 } 4246 4247 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 4248 struct btrfs_fs_info *fs_info) 4249 { 4250 struct rb_node *node; 4251 struct btrfs_delayed_ref_root *delayed_refs; 4252 struct btrfs_delayed_ref_node *ref; 4253 int ret = 0; 4254 4255 delayed_refs = &trans->delayed_refs; 4256 4257 spin_lock(&delayed_refs->lock); 4258 if (atomic_read(&delayed_refs->num_entries) == 0) { 4259 spin_unlock(&delayed_refs->lock); 4260 btrfs_info(fs_info, "delayed_refs has NO entry"); 4261 return ret; 4262 } 4263 4264 while ((node = rb_first(&delayed_refs->href_root)) != NULL) { 4265 struct btrfs_delayed_ref_head *head; 4266 struct btrfs_delayed_ref_node *tmp; 4267 bool pin_bytes = false; 4268 4269 head = rb_entry(node, struct btrfs_delayed_ref_head, 4270 href_node); 4271 if (!mutex_trylock(&head->mutex)) { 4272 refcount_inc(&head->node.refs); 4273 spin_unlock(&delayed_refs->lock); 4274 4275 mutex_lock(&head->mutex); 4276 mutex_unlock(&head->mutex); 4277 btrfs_put_delayed_ref(&head->node); 4278 spin_lock(&delayed_refs->lock); 4279 continue; 4280 } 4281 spin_lock(&head->lock); 4282 list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list, 4283 list) { 4284 ref->in_tree = 0; 4285 list_del(&ref->list); 4286 if (!list_empty(&ref->add_list)) 4287 list_del(&ref->add_list); 4288 atomic_dec(&delayed_refs->num_entries); 4289 btrfs_put_delayed_ref(ref); 4290 } 4291 if (head->must_insert_reserved) 4292 pin_bytes = true; 4293 btrfs_free_delayed_extent_op(head->extent_op); 4294 delayed_refs->num_heads--; 4295 if (head->processing == 0) 4296 delayed_refs->num_heads_ready--; 4297 atomic_dec(&delayed_refs->num_entries); 4298 head->node.in_tree = 0; 4299 rb_erase(&head->href_node, &delayed_refs->href_root); 4300 spin_unlock(&head->lock); 4301 spin_unlock(&delayed_refs->lock); 4302 mutex_unlock(&head->mutex); 4303 4304 if (pin_bytes) 4305 btrfs_pin_extent(fs_info, head->node.bytenr, 4306 head->node.num_bytes, 1); 4307 btrfs_put_delayed_ref(&head->node); 4308 cond_resched(); 4309 spin_lock(&delayed_refs->lock); 4310 } 4311 4312 spin_unlock(&delayed_refs->lock); 4313 4314 return ret; 4315 } 4316 4317 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 4318 { 4319 struct btrfs_inode *btrfs_inode; 4320 struct list_head splice; 4321 4322 INIT_LIST_HEAD(&splice); 4323 4324 spin_lock(&root->delalloc_lock); 4325 list_splice_init(&root->delalloc_inodes, &splice); 4326 4327 while (!list_empty(&splice)) { 4328 btrfs_inode = list_first_entry(&splice, struct btrfs_inode, 4329 delalloc_inodes); 4330 4331 list_del_init(&btrfs_inode->delalloc_inodes); 4332 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 4333 &btrfs_inode->runtime_flags); 4334 spin_unlock(&root->delalloc_lock); 4335 4336 btrfs_invalidate_inodes(btrfs_inode->root); 4337 4338 spin_lock(&root->delalloc_lock); 4339 } 4340 4341 spin_unlock(&root->delalloc_lock); 4342 } 4343 4344 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) 4345 { 4346 struct btrfs_root *root; 4347 struct list_head splice; 4348 4349 INIT_LIST_HEAD(&splice); 4350 4351 spin_lock(&fs_info->delalloc_root_lock); 4352 list_splice_init(&fs_info->delalloc_roots, &splice); 4353 while (!list_empty(&splice)) { 4354 root = list_first_entry(&splice, struct btrfs_root, 4355 delalloc_root); 4356 list_del_init(&root->delalloc_root); 4357 root = btrfs_grab_fs_root(root); 4358 BUG_ON(!root); 4359 spin_unlock(&fs_info->delalloc_root_lock); 4360 4361 btrfs_destroy_delalloc_inodes(root); 4362 btrfs_put_fs_root(root); 4363 4364 spin_lock(&fs_info->delalloc_root_lock); 4365 } 4366 spin_unlock(&fs_info->delalloc_root_lock); 4367 } 4368 4369 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, 4370 struct extent_io_tree *dirty_pages, 4371 int mark) 4372 { 4373 int ret; 4374 struct extent_buffer *eb; 4375 u64 start = 0; 4376 u64 end; 4377 4378 while (1) { 4379 ret = find_first_extent_bit(dirty_pages, start, &start, &end, 4380 mark, NULL); 4381 if (ret) 4382 break; 4383 4384 clear_extent_bits(dirty_pages, start, end, mark); 4385 while (start <= end) { 4386 eb = find_extent_buffer(fs_info, start); 4387 start += fs_info->nodesize; 4388 if (!eb) 4389 continue; 4390 wait_on_extent_buffer_writeback(eb); 4391 4392 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, 4393 &eb->bflags)) 4394 clear_extent_buffer_dirty(eb); 4395 free_extent_buffer_stale(eb); 4396 } 4397 } 4398 4399 return ret; 4400 } 4401 4402 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, 4403 struct extent_io_tree *pinned_extents) 4404 { 4405 struct extent_io_tree *unpin; 4406 u64 start; 4407 u64 end; 4408 int ret; 4409 bool loop = true; 4410 4411 unpin = pinned_extents; 4412 again: 4413 while (1) { 4414 ret = find_first_extent_bit(unpin, 0, &start, &end, 4415 EXTENT_DIRTY, NULL); 4416 if (ret) 4417 break; 4418 4419 clear_extent_dirty(unpin, start, end); 4420 btrfs_error_unpin_extent_range(fs_info, start, end); 4421 cond_resched(); 4422 } 4423 4424 if (loop) { 4425 if (unpin == &fs_info->freed_extents[0]) 4426 unpin = &fs_info->freed_extents[1]; 4427 else 4428 unpin = &fs_info->freed_extents[0]; 4429 loop = false; 4430 goto again; 4431 } 4432 4433 return 0; 4434 } 4435 4436 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) 4437 { 4438 struct inode *inode; 4439 4440 inode = cache->io_ctl.inode; 4441 if (inode) { 4442 invalidate_inode_pages2(inode->i_mapping); 4443 BTRFS_I(inode)->generation = 0; 4444 cache->io_ctl.inode = NULL; 4445 iput(inode); 4446 } 4447 btrfs_put_block_group(cache); 4448 } 4449 4450 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, 4451 struct btrfs_fs_info *fs_info) 4452 { 4453 struct btrfs_block_group_cache *cache; 4454 4455 spin_lock(&cur_trans->dirty_bgs_lock); 4456 while (!list_empty(&cur_trans->dirty_bgs)) { 4457 cache = list_first_entry(&cur_trans->dirty_bgs, 4458 struct btrfs_block_group_cache, 4459 dirty_list); 4460 if (!cache) { 4461 btrfs_err(fs_info, "orphan block group dirty_bgs list"); 4462 spin_unlock(&cur_trans->dirty_bgs_lock); 4463 return; 4464 } 4465 4466 if (!list_empty(&cache->io_list)) { 4467 spin_unlock(&cur_trans->dirty_bgs_lock); 4468 list_del_init(&cache->io_list); 4469 btrfs_cleanup_bg_io(cache); 4470 spin_lock(&cur_trans->dirty_bgs_lock); 4471 } 4472 4473 list_del_init(&cache->dirty_list); 4474 spin_lock(&cache->lock); 4475 cache->disk_cache_state = BTRFS_DC_ERROR; 4476 spin_unlock(&cache->lock); 4477 4478 spin_unlock(&cur_trans->dirty_bgs_lock); 4479 btrfs_put_block_group(cache); 4480 spin_lock(&cur_trans->dirty_bgs_lock); 4481 } 4482 spin_unlock(&cur_trans->dirty_bgs_lock); 4483 4484 while (!list_empty(&cur_trans->io_bgs)) { 4485 cache = list_first_entry(&cur_trans->io_bgs, 4486 struct btrfs_block_group_cache, 4487 io_list); 4488 if (!cache) { 4489 btrfs_err(fs_info, "orphan block group on io_bgs list"); 4490 return; 4491 } 4492 4493 list_del_init(&cache->io_list); 4494 spin_lock(&cache->lock); 4495 cache->disk_cache_state = BTRFS_DC_ERROR; 4496 spin_unlock(&cache->lock); 4497 btrfs_cleanup_bg_io(cache); 4498 } 4499 } 4500 4501 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, 4502 struct btrfs_fs_info *fs_info) 4503 { 4504 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 4505 ASSERT(list_empty(&cur_trans->dirty_bgs)); 4506 ASSERT(list_empty(&cur_trans->io_bgs)); 4507 4508 btrfs_destroy_delayed_refs(cur_trans, fs_info); 4509 4510 cur_trans->state = TRANS_STATE_COMMIT_START; 4511 wake_up(&fs_info->transaction_blocked_wait); 4512 4513 cur_trans->state = TRANS_STATE_UNBLOCKED; 4514 wake_up(&fs_info->transaction_wait); 4515 4516 btrfs_destroy_delayed_inodes(fs_info); 4517 btrfs_assert_delayed_root_empty(fs_info); 4518 4519 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, 4520 EXTENT_DIRTY); 4521 btrfs_destroy_pinned_extent(fs_info, 4522 fs_info->pinned_extents); 4523 4524 cur_trans->state =TRANS_STATE_COMPLETED; 4525 wake_up(&cur_trans->commit_wait); 4526 } 4527 4528 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) 4529 { 4530 struct btrfs_transaction *t; 4531 4532 mutex_lock(&fs_info->transaction_kthread_mutex); 4533 4534 spin_lock(&fs_info->trans_lock); 4535 while (!list_empty(&fs_info->trans_list)) { 4536 t = list_first_entry(&fs_info->trans_list, 4537 struct btrfs_transaction, list); 4538 if (t->state >= TRANS_STATE_COMMIT_START) { 4539 refcount_inc(&t->use_count); 4540 spin_unlock(&fs_info->trans_lock); 4541 btrfs_wait_for_commit(fs_info, t->transid); 4542 btrfs_put_transaction(t); 4543 spin_lock(&fs_info->trans_lock); 4544 continue; 4545 } 4546 if (t == fs_info->running_transaction) { 4547 t->state = TRANS_STATE_COMMIT_DOING; 4548 spin_unlock(&fs_info->trans_lock); 4549 /* 4550 * We wait for 0 num_writers since we don't hold a trans 4551 * handle open currently for this transaction. 4552 */ 4553 wait_event(t->writer_wait, 4554 atomic_read(&t->num_writers) == 0); 4555 } else { 4556 spin_unlock(&fs_info->trans_lock); 4557 } 4558 btrfs_cleanup_one_transaction(t, fs_info); 4559 4560 spin_lock(&fs_info->trans_lock); 4561 if (t == fs_info->running_transaction) 4562 fs_info->running_transaction = NULL; 4563 list_del_init(&t->list); 4564 spin_unlock(&fs_info->trans_lock); 4565 4566 btrfs_put_transaction(t); 4567 trace_btrfs_transaction_commit(fs_info->tree_root); 4568 spin_lock(&fs_info->trans_lock); 4569 } 4570 spin_unlock(&fs_info->trans_lock); 4571 btrfs_destroy_all_ordered_extents(fs_info); 4572 btrfs_destroy_delayed_inodes(fs_info); 4573 btrfs_assert_delayed_root_empty(fs_info); 4574 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents); 4575 btrfs_destroy_all_delalloc_inodes(fs_info); 4576 mutex_unlock(&fs_info->transaction_kthread_mutex); 4577 4578 return 0; 4579 } 4580 4581 static struct btrfs_fs_info *btree_fs_info(void *private_data) 4582 { 4583 struct inode *inode = private_data; 4584 return btrfs_sb(inode->i_sb); 4585 } 4586 4587 static const struct extent_io_ops btree_extent_io_ops = { 4588 /* mandatory callbacks */ 4589 .submit_bio_hook = btree_submit_bio_hook, 4590 .readpage_end_io_hook = btree_readpage_end_io_hook, 4591 /* note we're sharing with inode.c for the merge bio hook */ 4592 .merge_bio_hook = btrfs_merge_bio_hook, 4593 .readpage_io_failed_hook = btree_io_failed_hook, 4594 .set_range_writeback = btrfs_set_range_writeback, 4595 .tree_fs_info = btree_fs_info, 4596 4597 /* optional callbacks */ 4598 }; 4599