1 /* 2 * Copyright (C) 2009 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "volumes.h" 29 #include "locking.h" 30 #include "btrfs_inode.h" 31 #include "async-thread.h" 32 #include "free-space-cache.h" 33 #include "inode-map.h" 34 #include "qgroup.h" 35 #include "print-tree.h" 36 37 /* 38 * backref_node, mapping_node and tree_block start with this 39 */ 40 struct tree_entry { 41 struct rb_node rb_node; 42 u64 bytenr; 43 }; 44 45 /* 46 * present a tree block in the backref cache 47 */ 48 struct backref_node { 49 struct rb_node rb_node; 50 u64 bytenr; 51 52 u64 new_bytenr; 53 /* objectid of tree block owner, can be not uptodate */ 54 u64 owner; 55 /* link to pending, changed or detached list */ 56 struct list_head list; 57 /* list of upper level blocks reference this block */ 58 struct list_head upper; 59 /* list of child blocks in the cache */ 60 struct list_head lower; 61 /* NULL if this node is not tree root */ 62 struct btrfs_root *root; 63 /* extent buffer got by COW the block */ 64 struct extent_buffer *eb; 65 /* level of tree block */ 66 unsigned int level:8; 67 /* is the block in non-reference counted tree */ 68 unsigned int cowonly:1; 69 /* 1 if no child node in the cache */ 70 unsigned int lowest:1; 71 /* is the extent buffer locked */ 72 unsigned int locked:1; 73 /* has the block been processed */ 74 unsigned int processed:1; 75 /* have backrefs of this block been checked */ 76 unsigned int checked:1; 77 /* 78 * 1 if corresponding block has been cowed but some upper 79 * level block pointers may not point to the new location 80 */ 81 unsigned int pending:1; 82 /* 83 * 1 if the backref node isn't connected to any other 84 * backref node. 85 */ 86 unsigned int detached:1; 87 }; 88 89 /* 90 * present a block pointer in the backref cache 91 */ 92 struct backref_edge { 93 struct list_head list[2]; 94 struct backref_node *node[2]; 95 }; 96 97 #define LOWER 0 98 #define UPPER 1 99 #define RELOCATION_RESERVED_NODES 256 100 101 struct backref_cache { 102 /* red black tree of all backref nodes in the cache */ 103 struct rb_root rb_root; 104 /* for passing backref nodes to btrfs_reloc_cow_block */ 105 struct backref_node *path[BTRFS_MAX_LEVEL]; 106 /* 107 * list of blocks that have been cowed but some block 108 * pointers in upper level blocks may not reflect the 109 * new location 110 */ 111 struct list_head pending[BTRFS_MAX_LEVEL]; 112 /* list of backref nodes with no child node */ 113 struct list_head leaves; 114 /* list of blocks that have been cowed in current transaction */ 115 struct list_head changed; 116 /* list of detached backref node. */ 117 struct list_head detached; 118 119 u64 last_trans; 120 121 int nr_nodes; 122 int nr_edges; 123 }; 124 125 /* 126 * map address of tree root to tree 127 */ 128 struct mapping_node { 129 struct rb_node rb_node; 130 u64 bytenr; 131 void *data; 132 }; 133 134 struct mapping_tree { 135 struct rb_root rb_root; 136 spinlock_t lock; 137 }; 138 139 /* 140 * present a tree block to process 141 */ 142 struct tree_block { 143 struct rb_node rb_node; 144 u64 bytenr; 145 struct btrfs_key key; 146 unsigned int level:8; 147 unsigned int key_ready:1; 148 }; 149 150 #define MAX_EXTENTS 128 151 152 struct file_extent_cluster { 153 u64 start; 154 u64 end; 155 u64 boundary[MAX_EXTENTS]; 156 unsigned int nr; 157 }; 158 159 struct reloc_control { 160 /* block group to relocate */ 161 struct btrfs_block_group_cache *block_group; 162 /* extent tree */ 163 struct btrfs_root *extent_root; 164 /* inode for moving data */ 165 struct inode *data_inode; 166 167 struct btrfs_block_rsv *block_rsv; 168 169 struct backref_cache backref_cache; 170 171 struct file_extent_cluster cluster; 172 /* tree blocks have been processed */ 173 struct extent_io_tree processed_blocks; 174 /* map start of tree root to corresponding reloc tree */ 175 struct mapping_tree reloc_root_tree; 176 /* list of reloc trees */ 177 struct list_head reloc_roots; 178 /* size of metadata reservation for merging reloc trees */ 179 u64 merging_rsv_size; 180 /* size of relocated tree nodes */ 181 u64 nodes_relocated; 182 /* reserved size for block group relocation*/ 183 u64 reserved_bytes; 184 185 u64 search_start; 186 u64 extents_found; 187 188 unsigned int stage:8; 189 unsigned int create_reloc_tree:1; 190 unsigned int merge_reloc_tree:1; 191 unsigned int found_file_extent:1; 192 }; 193 194 /* stages of data relocation */ 195 #define MOVE_DATA_EXTENTS 0 196 #define UPDATE_DATA_PTRS 1 197 198 static void remove_backref_node(struct backref_cache *cache, 199 struct backref_node *node); 200 static void __mark_block_processed(struct reloc_control *rc, 201 struct backref_node *node); 202 203 static void mapping_tree_init(struct mapping_tree *tree) 204 { 205 tree->rb_root = RB_ROOT; 206 spin_lock_init(&tree->lock); 207 } 208 209 static void backref_cache_init(struct backref_cache *cache) 210 { 211 int i; 212 cache->rb_root = RB_ROOT; 213 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 214 INIT_LIST_HEAD(&cache->pending[i]); 215 INIT_LIST_HEAD(&cache->changed); 216 INIT_LIST_HEAD(&cache->detached); 217 INIT_LIST_HEAD(&cache->leaves); 218 } 219 220 static void backref_cache_cleanup(struct backref_cache *cache) 221 { 222 struct backref_node *node; 223 int i; 224 225 while (!list_empty(&cache->detached)) { 226 node = list_entry(cache->detached.next, 227 struct backref_node, list); 228 remove_backref_node(cache, node); 229 } 230 231 while (!list_empty(&cache->leaves)) { 232 node = list_entry(cache->leaves.next, 233 struct backref_node, lower); 234 remove_backref_node(cache, node); 235 } 236 237 cache->last_trans = 0; 238 239 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 240 ASSERT(list_empty(&cache->pending[i])); 241 ASSERT(list_empty(&cache->changed)); 242 ASSERT(list_empty(&cache->detached)); 243 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); 244 ASSERT(!cache->nr_nodes); 245 ASSERT(!cache->nr_edges); 246 } 247 248 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 249 { 250 struct backref_node *node; 251 252 node = kzalloc(sizeof(*node), GFP_NOFS); 253 if (node) { 254 INIT_LIST_HEAD(&node->list); 255 INIT_LIST_HEAD(&node->upper); 256 INIT_LIST_HEAD(&node->lower); 257 RB_CLEAR_NODE(&node->rb_node); 258 cache->nr_nodes++; 259 } 260 return node; 261 } 262 263 static void free_backref_node(struct backref_cache *cache, 264 struct backref_node *node) 265 { 266 if (node) { 267 cache->nr_nodes--; 268 kfree(node); 269 } 270 } 271 272 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 273 { 274 struct backref_edge *edge; 275 276 edge = kzalloc(sizeof(*edge), GFP_NOFS); 277 if (edge) 278 cache->nr_edges++; 279 return edge; 280 } 281 282 static void free_backref_edge(struct backref_cache *cache, 283 struct backref_edge *edge) 284 { 285 if (edge) { 286 cache->nr_edges--; 287 kfree(edge); 288 } 289 } 290 291 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 292 struct rb_node *node) 293 { 294 struct rb_node **p = &root->rb_node; 295 struct rb_node *parent = NULL; 296 struct tree_entry *entry; 297 298 while (*p) { 299 parent = *p; 300 entry = rb_entry(parent, struct tree_entry, rb_node); 301 302 if (bytenr < entry->bytenr) 303 p = &(*p)->rb_left; 304 else if (bytenr > entry->bytenr) 305 p = &(*p)->rb_right; 306 else 307 return parent; 308 } 309 310 rb_link_node(node, parent, p); 311 rb_insert_color(node, root); 312 return NULL; 313 } 314 315 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 316 { 317 struct rb_node *n = root->rb_node; 318 struct tree_entry *entry; 319 320 while (n) { 321 entry = rb_entry(n, struct tree_entry, rb_node); 322 323 if (bytenr < entry->bytenr) 324 n = n->rb_left; 325 else if (bytenr > entry->bytenr) 326 n = n->rb_right; 327 else 328 return n; 329 } 330 return NULL; 331 } 332 333 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr) 334 { 335 336 struct btrfs_fs_info *fs_info = NULL; 337 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, 338 rb_node); 339 if (bnode->root) 340 fs_info = bnode->root->fs_info; 341 btrfs_panic(fs_info, errno, 342 "Inconsistency in backref cache found at offset %llu", 343 bytenr); 344 } 345 346 /* 347 * walk up backref nodes until reach node presents tree root 348 */ 349 static struct backref_node *walk_up_backref(struct backref_node *node, 350 struct backref_edge *edges[], 351 int *index) 352 { 353 struct backref_edge *edge; 354 int idx = *index; 355 356 while (!list_empty(&node->upper)) { 357 edge = list_entry(node->upper.next, 358 struct backref_edge, list[LOWER]); 359 edges[idx++] = edge; 360 node = edge->node[UPPER]; 361 } 362 BUG_ON(node->detached); 363 *index = idx; 364 return node; 365 } 366 367 /* 368 * walk down backref nodes to find start of next reference path 369 */ 370 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 371 int *index) 372 { 373 struct backref_edge *edge; 374 struct backref_node *lower; 375 int idx = *index; 376 377 while (idx > 0) { 378 edge = edges[idx - 1]; 379 lower = edge->node[LOWER]; 380 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 381 idx--; 382 continue; 383 } 384 edge = list_entry(edge->list[LOWER].next, 385 struct backref_edge, list[LOWER]); 386 edges[idx - 1] = edge; 387 *index = idx; 388 return edge->node[UPPER]; 389 } 390 *index = 0; 391 return NULL; 392 } 393 394 static void unlock_node_buffer(struct backref_node *node) 395 { 396 if (node->locked) { 397 btrfs_tree_unlock(node->eb); 398 node->locked = 0; 399 } 400 } 401 402 static void drop_node_buffer(struct backref_node *node) 403 { 404 if (node->eb) { 405 unlock_node_buffer(node); 406 free_extent_buffer(node->eb); 407 node->eb = NULL; 408 } 409 } 410 411 static void drop_backref_node(struct backref_cache *tree, 412 struct backref_node *node) 413 { 414 BUG_ON(!list_empty(&node->upper)); 415 416 drop_node_buffer(node); 417 list_del(&node->list); 418 list_del(&node->lower); 419 if (!RB_EMPTY_NODE(&node->rb_node)) 420 rb_erase(&node->rb_node, &tree->rb_root); 421 free_backref_node(tree, node); 422 } 423 424 /* 425 * remove a backref node from the backref cache 426 */ 427 static void remove_backref_node(struct backref_cache *cache, 428 struct backref_node *node) 429 { 430 struct backref_node *upper; 431 struct backref_edge *edge; 432 433 if (!node) 434 return; 435 436 BUG_ON(!node->lowest && !node->detached); 437 while (!list_empty(&node->upper)) { 438 edge = list_entry(node->upper.next, struct backref_edge, 439 list[LOWER]); 440 upper = edge->node[UPPER]; 441 list_del(&edge->list[LOWER]); 442 list_del(&edge->list[UPPER]); 443 free_backref_edge(cache, edge); 444 445 if (RB_EMPTY_NODE(&upper->rb_node)) { 446 BUG_ON(!list_empty(&node->upper)); 447 drop_backref_node(cache, node); 448 node = upper; 449 node->lowest = 1; 450 continue; 451 } 452 /* 453 * add the node to leaf node list if no other 454 * child block cached. 455 */ 456 if (list_empty(&upper->lower)) { 457 list_add_tail(&upper->lower, &cache->leaves); 458 upper->lowest = 1; 459 } 460 } 461 462 drop_backref_node(cache, node); 463 } 464 465 static void update_backref_node(struct backref_cache *cache, 466 struct backref_node *node, u64 bytenr) 467 { 468 struct rb_node *rb_node; 469 rb_erase(&node->rb_node, &cache->rb_root); 470 node->bytenr = bytenr; 471 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 472 if (rb_node) 473 backref_tree_panic(rb_node, -EEXIST, bytenr); 474 } 475 476 /* 477 * update backref cache after a transaction commit 478 */ 479 static int update_backref_cache(struct btrfs_trans_handle *trans, 480 struct backref_cache *cache) 481 { 482 struct backref_node *node; 483 int level = 0; 484 485 if (cache->last_trans == 0) { 486 cache->last_trans = trans->transid; 487 return 0; 488 } 489 490 if (cache->last_trans == trans->transid) 491 return 0; 492 493 /* 494 * detached nodes are used to avoid unnecessary backref 495 * lookup. transaction commit changes the extent tree. 496 * so the detached nodes are no longer useful. 497 */ 498 while (!list_empty(&cache->detached)) { 499 node = list_entry(cache->detached.next, 500 struct backref_node, list); 501 remove_backref_node(cache, node); 502 } 503 504 while (!list_empty(&cache->changed)) { 505 node = list_entry(cache->changed.next, 506 struct backref_node, list); 507 list_del_init(&node->list); 508 BUG_ON(node->pending); 509 update_backref_node(cache, node, node->new_bytenr); 510 } 511 512 /* 513 * some nodes can be left in the pending list if there were 514 * errors during processing the pending nodes. 515 */ 516 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 517 list_for_each_entry(node, &cache->pending[level], list) { 518 BUG_ON(!node->pending); 519 if (node->bytenr == node->new_bytenr) 520 continue; 521 update_backref_node(cache, node, node->new_bytenr); 522 } 523 } 524 525 cache->last_trans = 0; 526 return 1; 527 } 528 529 530 static int should_ignore_root(struct btrfs_root *root) 531 { 532 struct btrfs_root *reloc_root; 533 534 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 535 return 0; 536 537 reloc_root = root->reloc_root; 538 if (!reloc_root) 539 return 0; 540 541 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 542 root->fs_info->running_transaction->transid - 1) 543 return 0; 544 /* 545 * if there is reloc tree and it was created in previous 546 * transaction backref lookup can find the reloc tree, 547 * so backref node for the fs tree root is useless for 548 * relocation. 549 */ 550 return 1; 551 } 552 /* 553 * find reloc tree by address of tree root 554 */ 555 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 556 u64 bytenr) 557 { 558 struct rb_node *rb_node; 559 struct mapping_node *node; 560 struct btrfs_root *root = NULL; 561 562 spin_lock(&rc->reloc_root_tree.lock); 563 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 564 if (rb_node) { 565 node = rb_entry(rb_node, struct mapping_node, rb_node); 566 root = (struct btrfs_root *)node->data; 567 } 568 spin_unlock(&rc->reloc_root_tree.lock); 569 return root; 570 } 571 572 static int is_cowonly_root(u64 root_objectid) 573 { 574 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 575 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 576 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 577 root_objectid == BTRFS_DEV_TREE_OBJECTID || 578 root_objectid == BTRFS_TREE_LOG_OBJECTID || 579 root_objectid == BTRFS_CSUM_TREE_OBJECTID || 580 root_objectid == BTRFS_UUID_TREE_OBJECTID || 581 root_objectid == BTRFS_QUOTA_TREE_OBJECTID || 582 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) 583 return 1; 584 return 0; 585 } 586 587 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 588 u64 root_objectid) 589 { 590 struct btrfs_key key; 591 592 key.objectid = root_objectid; 593 key.type = BTRFS_ROOT_ITEM_KEY; 594 if (is_cowonly_root(root_objectid)) 595 key.offset = 0; 596 else 597 key.offset = (u64)-1; 598 599 return btrfs_get_fs_root(fs_info, &key, false); 600 } 601 602 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 603 static noinline_for_stack 604 struct btrfs_root *find_tree_root(struct reloc_control *rc, 605 struct extent_buffer *leaf, 606 struct btrfs_extent_ref_v0 *ref0) 607 { 608 struct btrfs_root *root; 609 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0); 610 u64 generation = btrfs_ref_generation_v0(leaf, ref0); 611 612 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID); 613 614 root = read_fs_root(rc->extent_root->fs_info, root_objectid); 615 BUG_ON(IS_ERR(root)); 616 617 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 618 generation != btrfs_root_generation(&root->root_item)) 619 return NULL; 620 621 return root; 622 } 623 #endif 624 625 static noinline_for_stack 626 int find_inline_backref(struct extent_buffer *leaf, int slot, 627 unsigned long *ptr, unsigned long *end) 628 { 629 struct btrfs_key key; 630 struct btrfs_extent_item *ei; 631 struct btrfs_tree_block_info *bi; 632 u32 item_size; 633 634 btrfs_item_key_to_cpu(leaf, &key, slot); 635 636 item_size = btrfs_item_size_nr(leaf, slot); 637 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 638 if (item_size < sizeof(*ei)) { 639 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 640 return 1; 641 } 642 #endif 643 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 644 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 645 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 646 647 if (key.type == BTRFS_EXTENT_ITEM_KEY && 648 item_size <= sizeof(*ei) + sizeof(*bi)) { 649 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 650 return 1; 651 } 652 if (key.type == BTRFS_METADATA_ITEM_KEY && 653 item_size <= sizeof(*ei)) { 654 WARN_ON(item_size < sizeof(*ei)); 655 return 1; 656 } 657 658 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 659 bi = (struct btrfs_tree_block_info *)(ei + 1); 660 *ptr = (unsigned long)(bi + 1); 661 } else { 662 *ptr = (unsigned long)(ei + 1); 663 } 664 *end = (unsigned long)ei + item_size; 665 return 0; 666 } 667 668 /* 669 * build backref tree for a given tree block. root of the backref tree 670 * corresponds the tree block, leaves of the backref tree correspond 671 * roots of b-trees that reference the tree block. 672 * 673 * the basic idea of this function is check backrefs of a given block 674 * to find upper level blocks that reference the block, and then check 675 * backrefs of these upper level blocks recursively. the recursion stop 676 * when tree root is reached or backrefs for the block is cached. 677 * 678 * NOTE: if we find backrefs for a block are cached, we know backrefs 679 * for all upper level blocks that directly/indirectly reference the 680 * block are also cached. 681 */ 682 static noinline_for_stack 683 struct backref_node *build_backref_tree(struct reloc_control *rc, 684 struct btrfs_key *node_key, 685 int level, u64 bytenr) 686 { 687 struct backref_cache *cache = &rc->backref_cache; 688 struct btrfs_path *path1; 689 struct btrfs_path *path2; 690 struct extent_buffer *eb; 691 struct btrfs_root *root; 692 struct backref_node *cur; 693 struct backref_node *upper; 694 struct backref_node *lower; 695 struct backref_node *node = NULL; 696 struct backref_node *exist = NULL; 697 struct backref_edge *edge; 698 struct rb_node *rb_node; 699 struct btrfs_key key; 700 unsigned long end; 701 unsigned long ptr; 702 LIST_HEAD(list); 703 LIST_HEAD(useless); 704 int cowonly; 705 int ret; 706 int err = 0; 707 bool need_check = true; 708 709 path1 = btrfs_alloc_path(); 710 path2 = btrfs_alloc_path(); 711 if (!path1 || !path2) { 712 err = -ENOMEM; 713 goto out; 714 } 715 path1->reada = READA_FORWARD; 716 path2->reada = READA_FORWARD; 717 718 node = alloc_backref_node(cache); 719 if (!node) { 720 err = -ENOMEM; 721 goto out; 722 } 723 724 node->bytenr = bytenr; 725 node->level = level; 726 node->lowest = 1; 727 cur = node; 728 again: 729 end = 0; 730 ptr = 0; 731 key.objectid = cur->bytenr; 732 key.type = BTRFS_METADATA_ITEM_KEY; 733 key.offset = (u64)-1; 734 735 path1->search_commit_root = 1; 736 path1->skip_locking = 1; 737 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 738 0, 0); 739 if (ret < 0) { 740 err = ret; 741 goto out; 742 } 743 ASSERT(ret); 744 ASSERT(path1->slots[0]); 745 746 path1->slots[0]--; 747 748 WARN_ON(cur->checked); 749 if (!list_empty(&cur->upper)) { 750 /* 751 * the backref was added previously when processing 752 * backref of type BTRFS_TREE_BLOCK_REF_KEY 753 */ 754 ASSERT(list_is_singular(&cur->upper)); 755 edge = list_entry(cur->upper.next, struct backref_edge, 756 list[LOWER]); 757 ASSERT(list_empty(&edge->list[UPPER])); 758 exist = edge->node[UPPER]; 759 /* 760 * add the upper level block to pending list if we need 761 * check its backrefs 762 */ 763 if (!exist->checked) 764 list_add_tail(&edge->list[UPPER], &list); 765 } else { 766 exist = NULL; 767 } 768 769 while (1) { 770 cond_resched(); 771 eb = path1->nodes[0]; 772 773 if (ptr >= end) { 774 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 775 ret = btrfs_next_leaf(rc->extent_root, path1); 776 if (ret < 0) { 777 err = ret; 778 goto out; 779 } 780 if (ret > 0) 781 break; 782 eb = path1->nodes[0]; 783 } 784 785 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 786 if (key.objectid != cur->bytenr) { 787 WARN_ON(exist); 788 break; 789 } 790 791 if (key.type == BTRFS_EXTENT_ITEM_KEY || 792 key.type == BTRFS_METADATA_ITEM_KEY) { 793 ret = find_inline_backref(eb, path1->slots[0], 794 &ptr, &end); 795 if (ret) 796 goto next; 797 } 798 } 799 800 if (ptr < end) { 801 /* update key for inline back ref */ 802 struct btrfs_extent_inline_ref *iref; 803 int type; 804 iref = (struct btrfs_extent_inline_ref *)ptr; 805 type = btrfs_get_extent_inline_ref_type(eb, iref, 806 BTRFS_REF_TYPE_BLOCK); 807 if (type == BTRFS_REF_TYPE_INVALID) { 808 err = -EINVAL; 809 goto out; 810 } 811 key.type = type; 812 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 813 814 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 815 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 816 } 817 818 if (exist && 819 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 820 exist->owner == key.offset) || 821 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 822 exist->bytenr == key.offset))) { 823 exist = NULL; 824 goto next; 825 } 826 827 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 828 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 829 key.type == BTRFS_EXTENT_REF_V0_KEY) { 830 if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 831 struct btrfs_extent_ref_v0 *ref0; 832 ref0 = btrfs_item_ptr(eb, path1->slots[0], 833 struct btrfs_extent_ref_v0); 834 if (key.objectid == key.offset) { 835 root = find_tree_root(rc, eb, ref0); 836 if (root && !should_ignore_root(root)) 837 cur->root = root; 838 else 839 list_add(&cur->list, &useless); 840 break; 841 } 842 if (is_cowonly_root(btrfs_ref_root_v0(eb, 843 ref0))) 844 cur->cowonly = 1; 845 } 846 #else 847 ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY); 848 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 849 #endif 850 if (key.objectid == key.offset) { 851 /* 852 * only root blocks of reloc trees use 853 * backref of this type. 854 */ 855 root = find_reloc_root(rc, cur->bytenr); 856 ASSERT(root); 857 cur->root = root; 858 break; 859 } 860 861 edge = alloc_backref_edge(cache); 862 if (!edge) { 863 err = -ENOMEM; 864 goto out; 865 } 866 rb_node = tree_search(&cache->rb_root, key.offset); 867 if (!rb_node) { 868 upper = alloc_backref_node(cache); 869 if (!upper) { 870 free_backref_edge(cache, edge); 871 err = -ENOMEM; 872 goto out; 873 } 874 upper->bytenr = key.offset; 875 upper->level = cur->level + 1; 876 /* 877 * backrefs for the upper level block isn't 878 * cached, add the block to pending list 879 */ 880 list_add_tail(&edge->list[UPPER], &list); 881 } else { 882 upper = rb_entry(rb_node, struct backref_node, 883 rb_node); 884 ASSERT(upper->checked); 885 INIT_LIST_HEAD(&edge->list[UPPER]); 886 } 887 list_add_tail(&edge->list[LOWER], &cur->upper); 888 edge->node[LOWER] = cur; 889 edge->node[UPPER] = upper; 890 891 goto next; 892 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 893 goto next; 894 } 895 896 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 897 root = read_fs_root(rc->extent_root->fs_info, key.offset); 898 if (IS_ERR(root)) { 899 err = PTR_ERR(root); 900 goto out; 901 } 902 903 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 904 cur->cowonly = 1; 905 906 if (btrfs_root_level(&root->root_item) == cur->level) { 907 /* tree root */ 908 ASSERT(btrfs_root_bytenr(&root->root_item) == 909 cur->bytenr); 910 if (should_ignore_root(root)) 911 list_add(&cur->list, &useless); 912 else 913 cur->root = root; 914 break; 915 } 916 917 level = cur->level + 1; 918 919 /* 920 * searching the tree to find upper level blocks 921 * reference the block. 922 */ 923 path2->search_commit_root = 1; 924 path2->skip_locking = 1; 925 path2->lowest_level = level; 926 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 927 path2->lowest_level = 0; 928 if (ret < 0) { 929 err = ret; 930 goto out; 931 } 932 if (ret > 0 && path2->slots[level] > 0) 933 path2->slots[level]--; 934 935 eb = path2->nodes[level]; 936 if (btrfs_node_blockptr(eb, path2->slots[level]) != 937 cur->bytenr) { 938 btrfs_err(root->fs_info, 939 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", 940 cur->bytenr, level - 1, root->objectid, 941 node_key->objectid, node_key->type, 942 node_key->offset); 943 err = -ENOENT; 944 goto out; 945 } 946 lower = cur; 947 need_check = true; 948 for (; level < BTRFS_MAX_LEVEL; level++) { 949 if (!path2->nodes[level]) { 950 ASSERT(btrfs_root_bytenr(&root->root_item) == 951 lower->bytenr); 952 if (should_ignore_root(root)) 953 list_add(&lower->list, &useless); 954 else 955 lower->root = root; 956 break; 957 } 958 959 edge = alloc_backref_edge(cache); 960 if (!edge) { 961 err = -ENOMEM; 962 goto out; 963 } 964 965 eb = path2->nodes[level]; 966 rb_node = tree_search(&cache->rb_root, eb->start); 967 if (!rb_node) { 968 upper = alloc_backref_node(cache); 969 if (!upper) { 970 free_backref_edge(cache, edge); 971 err = -ENOMEM; 972 goto out; 973 } 974 upper->bytenr = eb->start; 975 upper->owner = btrfs_header_owner(eb); 976 upper->level = lower->level + 1; 977 if (!test_bit(BTRFS_ROOT_REF_COWS, 978 &root->state)) 979 upper->cowonly = 1; 980 981 /* 982 * if we know the block isn't shared 983 * we can void checking its backrefs. 984 */ 985 if (btrfs_block_can_be_shared(root, eb)) 986 upper->checked = 0; 987 else 988 upper->checked = 1; 989 990 /* 991 * add the block to pending list if we 992 * need check its backrefs, we only do this once 993 * while walking up a tree as we will catch 994 * anything else later on. 995 */ 996 if (!upper->checked && need_check) { 997 need_check = false; 998 list_add_tail(&edge->list[UPPER], 999 &list); 1000 } else { 1001 if (upper->checked) 1002 need_check = true; 1003 INIT_LIST_HEAD(&edge->list[UPPER]); 1004 } 1005 } else { 1006 upper = rb_entry(rb_node, struct backref_node, 1007 rb_node); 1008 ASSERT(upper->checked); 1009 INIT_LIST_HEAD(&edge->list[UPPER]); 1010 if (!upper->owner) 1011 upper->owner = btrfs_header_owner(eb); 1012 } 1013 list_add_tail(&edge->list[LOWER], &lower->upper); 1014 edge->node[LOWER] = lower; 1015 edge->node[UPPER] = upper; 1016 1017 if (rb_node) 1018 break; 1019 lower = upper; 1020 upper = NULL; 1021 } 1022 btrfs_release_path(path2); 1023 next: 1024 if (ptr < end) { 1025 ptr += btrfs_extent_inline_ref_size(key.type); 1026 if (ptr >= end) { 1027 WARN_ON(ptr > end); 1028 ptr = 0; 1029 end = 0; 1030 } 1031 } 1032 if (ptr >= end) 1033 path1->slots[0]++; 1034 } 1035 btrfs_release_path(path1); 1036 1037 cur->checked = 1; 1038 WARN_ON(exist); 1039 1040 /* the pending list isn't empty, take the first block to process */ 1041 if (!list_empty(&list)) { 1042 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1043 list_del_init(&edge->list[UPPER]); 1044 cur = edge->node[UPPER]; 1045 goto again; 1046 } 1047 1048 /* 1049 * everything goes well, connect backref nodes and insert backref nodes 1050 * into the cache. 1051 */ 1052 ASSERT(node->checked); 1053 cowonly = node->cowonly; 1054 if (!cowonly) { 1055 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1056 &node->rb_node); 1057 if (rb_node) 1058 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1059 list_add_tail(&node->lower, &cache->leaves); 1060 } 1061 1062 list_for_each_entry(edge, &node->upper, list[LOWER]) 1063 list_add_tail(&edge->list[UPPER], &list); 1064 1065 while (!list_empty(&list)) { 1066 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1067 list_del_init(&edge->list[UPPER]); 1068 upper = edge->node[UPPER]; 1069 if (upper->detached) { 1070 list_del(&edge->list[LOWER]); 1071 lower = edge->node[LOWER]; 1072 free_backref_edge(cache, edge); 1073 if (list_empty(&lower->upper)) 1074 list_add(&lower->list, &useless); 1075 continue; 1076 } 1077 1078 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1079 if (upper->lowest) { 1080 list_del_init(&upper->lower); 1081 upper->lowest = 0; 1082 } 1083 1084 list_add_tail(&edge->list[UPPER], &upper->lower); 1085 continue; 1086 } 1087 1088 if (!upper->checked) { 1089 /* 1090 * Still want to blow up for developers since this is a 1091 * logic bug. 1092 */ 1093 ASSERT(0); 1094 err = -EINVAL; 1095 goto out; 1096 } 1097 if (cowonly != upper->cowonly) { 1098 ASSERT(0); 1099 err = -EINVAL; 1100 goto out; 1101 } 1102 1103 if (!cowonly) { 1104 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1105 &upper->rb_node); 1106 if (rb_node) 1107 backref_tree_panic(rb_node, -EEXIST, 1108 upper->bytenr); 1109 } 1110 1111 list_add_tail(&edge->list[UPPER], &upper->lower); 1112 1113 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1114 list_add_tail(&edge->list[UPPER], &list); 1115 } 1116 /* 1117 * process useless backref nodes. backref nodes for tree leaves 1118 * are deleted from the cache. backref nodes for upper level 1119 * tree blocks are left in the cache to avoid unnecessary backref 1120 * lookup. 1121 */ 1122 while (!list_empty(&useless)) { 1123 upper = list_entry(useless.next, struct backref_node, list); 1124 list_del_init(&upper->list); 1125 ASSERT(list_empty(&upper->upper)); 1126 if (upper == node) 1127 node = NULL; 1128 if (upper->lowest) { 1129 list_del_init(&upper->lower); 1130 upper->lowest = 0; 1131 } 1132 while (!list_empty(&upper->lower)) { 1133 edge = list_entry(upper->lower.next, 1134 struct backref_edge, list[UPPER]); 1135 list_del(&edge->list[UPPER]); 1136 list_del(&edge->list[LOWER]); 1137 lower = edge->node[LOWER]; 1138 free_backref_edge(cache, edge); 1139 1140 if (list_empty(&lower->upper)) 1141 list_add(&lower->list, &useless); 1142 } 1143 __mark_block_processed(rc, upper); 1144 if (upper->level > 0) { 1145 list_add(&upper->list, &cache->detached); 1146 upper->detached = 1; 1147 } else { 1148 rb_erase(&upper->rb_node, &cache->rb_root); 1149 free_backref_node(cache, upper); 1150 } 1151 } 1152 out: 1153 btrfs_free_path(path1); 1154 btrfs_free_path(path2); 1155 if (err) { 1156 while (!list_empty(&useless)) { 1157 lower = list_entry(useless.next, 1158 struct backref_node, list); 1159 list_del_init(&lower->list); 1160 } 1161 while (!list_empty(&list)) { 1162 edge = list_first_entry(&list, struct backref_edge, 1163 list[UPPER]); 1164 list_del(&edge->list[UPPER]); 1165 list_del(&edge->list[LOWER]); 1166 lower = edge->node[LOWER]; 1167 upper = edge->node[UPPER]; 1168 free_backref_edge(cache, edge); 1169 1170 /* 1171 * Lower is no longer linked to any upper backref nodes 1172 * and isn't in the cache, we can free it ourselves. 1173 */ 1174 if (list_empty(&lower->upper) && 1175 RB_EMPTY_NODE(&lower->rb_node)) 1176 list_add(&lower->list, &useless); 1177 1178 if (!RB_EMPTY_NODE(&upper->rb_node)) 1179 continue; 1180 1181 /* Add this guy's upper edges to the list to process */ 1182 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1183 list_add_tail(&edge->list[UPPER], &list); 1184 if (list_empty(&upper->upper)) 1185 list_add(&upper->list, &useless); 1186 } 1187 1188 while (!list_empty(&useless)) { 1189 lower = list_entry(useless.next, 1190 struct backref_node, list); 1191 list_del_init(&lower->list); 1192 if (lower == node) 1193 node = NULL; 1194 free_backref_node(cache, lower); 1195 } 1196 1197 free_backref_node(cache, node); 1198 return ERR_PTR(err); 1199 } 1200 ASSERT(!node || !node->detached); 1201 return node; 1202 } 1203 1204 /* 1205 * helper to add backref node for the newly created snapshot. 1206 * the backref node is created by cloning backref node that 1207 * corresponds to root of source tree 1208 */ 1209 static int clone_backref_node(struct btrfs_trans_handle *trans, 1210 struct reloc_control *rc, 1211 struct btrfs_root *src, 1212 struct btrfs_root *dest) 1213 { 1214 struct btrfs_root *reloc_root = src->reloc_root; 1215 struct backref_cache *cache = &rc->backref_cache; 1216 struct backref_node *node = NULL; 1217 struct backref_node *new_node; 1218 struct backref_edge *edge; 1219 struct backref_edge *new_edge; 1220 struct rb_node *rb_node; 1221 1222 if (cache->last_trans > 0) 1223 update_backref_cache(trans, cache); 1224 1225 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1226 if (rb_node) { 1227 node = rb_entry(rb_node, struct backref_node, rb_node); 1228 if (node->detached) 1229 node = NULL; 1230 else 1231 BUG_ON(node->new_bytenr != reloc_root->node->start); 1232 } 1233 1234 if (!node) { 1235 rb_node = tree_search(&cache->rb_root, 1236 reloc_root->commit_root->start); 1237 if (rb_node) { 1238 node = rb_entry(rb_node, struct backref_node, 1239 rb_node); 1240 BUG_ON(node->detached); 1241 } 1242 } 1243 1244 if (!node) 1245 return 0; 1246 1247 new_node = alloc_backref_node(cache); 1248 if (!new_node) 1249 return -ENOMEM; 1250 1251 new_node->bytenr = dest->node->start; 1252 new_node->level = node->level; 1253 new_node->lowest = node->lowest; 1254 new_node->checked = 1; 1255 new_node->root = dest; 1256 1257 if (!node->lowest) { 1258 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1259 new_edge = alloc_backref_edge(cache); 1260 if (!new_edge) 1261 goto fail; 1262 1263 new_edge->node[UPPER] = new_node; 1264 new_edge->node[LOWER] = edge->node[LOWER]; 1265 list_add_tail(&new_edge->list[UPPER], 1266 &new_node->lower); 1267 } 1268 } else { 1269 list_add_tail(&new_node->lower, &cache->leaves); 1270 } 1271 1272 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1273 &new_node->rb_node); 1274 if (rb_node) 1275 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr); 1276 1277 if (!new_node->lowest) { 1278 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1279 list_add_tail(&new_edge->list[LOWER], 1280 &new_edge->node[LOWER]->upper); 1281 } 1282 } 1283 return 0; 1284 fail: 1285 while (!list_empty(&new_node->lower)) { 1286 new_edge = list_entry(new_node->lower.next, 1287 struct backref_edge, list[UPPER]); 1288 list_del(&new_edge->list[UPPER]); 1289 free_backref_edge(cache, new_edge); 1290 } 1291 free_backref_node(cache, new_node); 1292 return -ENOMEM; 1293 } 1294 1295 /* 1296 * helper to add 'address of tree root -> reloc tree' mapping 1297 */ 1298 static int __must_check __add_reloc_root(struct btrfs_root *root) 1299 { 1300 struct btrfs_fs_info *fs_info = root->fs_info; 1301 struct rb_node *rb_node; 1302 struct mapping_node *node; 1303 struct reloc_control *rc = fs_info->reloc_ctl; 1304 1305 node = kmalloc(sizeof(*node), GFP_NOFS); 1306 if (!node) 1307 return -ENOMEM; 1308 1309 node->bytenr = root->node->start; 1310 node->data = root; 1311 1312 spin_lock(&rc->reloc_root_tree.lock); 1313 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1314 node->bytenr, &node->rb_node); 1315 spin_unlock(&rc->reloc_root_tree.lock); 1316 if (rb_node) { 1317 btrfs_panic(fs_info, -EEXIST, 1318 "Duplicate root found for start=%llu while inserting into relocation tree", 1319 node->bytenr); 1320 } 1321 1322 list_add_tail(&root->root_list, &rc->reloc_roots); 1323 return 0; 1324 } 1325 1326 /* 1327 * helper to delete the 'address of tree root -> reloc tree' 1328 * mapping 1329 */ 1330 static void __del_reloc_root(struct btrfs_root *root) 1331 { 1332 struct btrfs_fs_info *fs_info = root->fs_info; 1333 struct rb_node *rb_node; 1334 struct mapping_node *node = NULL; 1335 struct reloc_control *rc = fs_info->reloc_ctl; 1336 1337 spin_lock(&rc->reloc_root_tree.lock); 1338 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1339 root->node->start); 1340 if (rb_node) { 1341 node = rb_entry(rb_node, struct mapping_node, rb_node); 1342 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1343 } 1344 spin_unlock(&rc->reloc_root_tree.lock); 1345 1346 if (!node) 1347 return; 1348 BUG_ON((struct btrfs_root *)node->data != root); 1349 1350 spin_lock(&fs_info->trans_lock); 1351 list_del_init(&root->root_list); 1352 spin_unlock(&fs_info->trans_lock); 1353 kfree(node); 1354 } 1355 1356 /* 1357 * helper to update the 'address of tree root -> reloc tree' 1358 * mapping 1359 */ 1360 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) 1361 { 1362 struct btrfs_fs_info *fs_info = root->fs_info; 1363 struct rb_node *rb_node; 1364 struct mapping_node *node = NULL; 1365 struct reloc_control *rc = fs_info->reloc_ctl; 1366 1367 spin_lock(&rc->reloc_root_tree.lock); 1368 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1369 root->node->start); 1370 if (rb_node) { 1371 node = rb_entry(rb_node, struct mapping_node, rb_node); 1372 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1373 } 1374 spin_unlock(&rc->reloc_root_tree.lock); 1375 1376 if (!node) 1377 return 0; 1378 BUG_ON((struct btrfs_root *)node->data != root); 1379 1380 spin_lock(&rc->reloc_root_tree.lock); 1381 node->bytenr = new_bytenr; 1382 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1383 node->bytenr, &node->rb_node); 1384 spin_unlock(&rc->reloc_root_tree.lock); 1385 if (rb_node) 1386 backref_tree_panic(rb_node, -EEXIST, node->bytenr); 1387 return 0; 1388 } 1389 1390 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1391 struct btrfs_root *root, u64 objectid) 1392 { 1393 struct btrfs_fs_info *fs_info = root->fs_info; 1394 struct btrfs_root *reloc_root; 1395 struct extent_buffer *eb; 1396 struct btrfs_root_item *root_item; 1397 struct btrfs_key root_key; 1398 int ret; 1399 1400 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1401 BUG_ON(!root_item); 1402 1403 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1404 root_key.type = BTRFS_ROOT_ITEM_KEY; 1405 root_key.offset = objectid; 1406 1407 if (root->root_key.objectid == objectid) { 1408 u64 commit_root_gen; 1409 1410 /* called by btrfs_init_reloc_root */ 1411 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1412 BTRFS_TREE_RELOC_OBJECTID); 1413 BUG_ON(ret); 1414 /* 1415 * Set the last_snapshot field to the generation of the commit 1416 * root - like this ctree.c:btrfs_block_can_be_shared() behaves 1417 * correctly (returns true) when the relocation root is created 1418 * either inside the critical section of a transaction commit 1419 * (through transaction.c:qgroup_account_snapshot()) and when 1420 * it's created before the transaction commit is started. 1421 */ 1422 commit_root_gen = btrfs_header_generation(root->commit_root); 1423 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); 1424 } else { 1425 /* 1426 * called by btrfs_reloc_post_snapshot_hook. 1427 * the source tree is a reloc tree, all tree blocks 1428 * modified after it was created have RELOC flag 1429 * set in their headers. so it's OK to not update 1430 * the 'last_snapshot'. 1431 */ 1432 ret = btrfs_copy_root(trans, root, root->node, &eb, 1433 BTRFS_TREE_RELOC_OBJECTID); 1434 BUG_ON(ret); 1435 } 1436 1437 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1438 btrfs_set_root_bytenr(root_item, eb->start); 1439 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1440 btrfs_set_root_generation(root_item, trans->transid); 1441 1442 if (root->root_key.objectid == objectid) { 1443 btrfs_set_root_refs(root_item, 0); 1444 memset(&root_item->drop_progress, 0, 1445 sizeof(struct btrfs_disk_key)); 1446 root_item->drop_level = 0; 1447 } 1448 1449 btrfs_tree_unlock(eb); 1450 free_extent_buffer(eb); 1451 1452 ret = btrfs_insert_root(trans, fs_info->tree_root, 1453 &root_key, root_item); 1454 BUG_ON(ret); 1455 kfree(root_item); 1456 1457 reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key); 1458 BUG_ON(IS_ERR(reloc_root)); 1459 reloc_root->last_trans = trans->transid; 1460 return reloc_root; 1461 } 1462 1463 /* 1464 * create reloc tree for a given fs tree. reloc tree is just a 1465 * snapshot of the fs tree with special root objectid. 1466 */ 1467 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1468 struct btrfs_root *root) 1469 { 1470 struct btrfs_fs_info *fs_info = root->fs_info; 1471 struct btrfs_root *reloc_root; 1472 struct reloc_control *rc = fs_info->reloc_ctl; 1473 struct btrfs_block_rsv *rsv; 1474 int clear_rsv = 0; 1475 int ret; 1476 1477 if (root->reloc_root) { 1478 reloc_root = root->reloc_root; 1479 reloc_root->last_trans = trans->transid; 1480 return 0; 1481 } 1482 1483 if (!rc || !rc->create_reloc_tree || 1484 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1485 return 0; 1486 1487 if (!trans->reloc_reserved) { 1488 rsv = trans->block_rsv; 1489 trans->block_rsv = rc->block_rsv; 1490 clear_rsv = 1; 1491 } 1492 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1493 if (clear_rsv) 1494 trans->block_rsv = rsv; 1495 1496 ret = __add_reloc_root(reloc_root); 1497 BUG_ON(ret < 0); 1498 root->reloc_root = reloc_root; 1499 return 0; 1500 } 1501 1502 /* 1503 * update root item of reloc tree 1504 */ 1505 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1506 struct btrfs_root *root) 1507 { 1508 struct btrfs_fs_info *fs_info = root->fs_info; 1509 struct btrfs_root *reloc_root; 1510 struct btrfs_root_item *root_item; 1511 int ret; 1512 1513 if (!root->reloc_root) 1514 goto out; 1515 1516 reloc_root = root->reloc_root; 1517 root_item = &reloc_root->root_item; 1518 1519 if (fs_info->reloc_ctl->merge_reloc_tree && 1520 btrfs_root_refs(root_item) == 0) { 1521 root->reloc_root = NULL; 1522 __del_reloc_root(reloc_root); 1523 } 1524 1525 if (reloc_root->commit_root != reloc_root->node) { 1526 btrfs_set_root_node(root_item, reloc_root->node); 1527 free_extent_buffer(reloc_root->commit_root); 1528 reloc_root->commit_root = btrfs_root_node(reloc_root); 1529 } 1530 1531 ret = btrfs_update_root(trans, fs_info->tree_root, 1532 &reloc_root->root_key, root_item); 1533 BUG_ON(ret); 1534 1535 out: 1536 return 0; 1537 } 1538 1539 /* 1540 * helper to find first cached inode with inode number >= objectid 1541 * in a subvolume 1542 */ 1543 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1544 { 1545 struct rb_node *node; 1546 struct rb_node *prev; 1547 struct btrfs_inode *entry; 1548 struct inode *inode; 1549 1550 spin_lock(&root->inode_lock); 1551 again: 1552 node = root->inode_tree.rb_node; 1553 prev = NULL; 1554 while (node) { 1555 prev = node; 1556 entry = rb_entry(node, struct btrfs_inode, rb_node); 1557 1558 if (objectid < btrfs_ino(entry)) 1559 node = node->rb_left; 1560 else if (objectid > btrfs_ino(entry)) 1561 node = node->rb_right; 1562 else 1563 break; 1564 } 1565 if (!node) { 1566 while (prev) { 1567 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1568 if (objectid <= btrfs_ino(entry)) { 1569 node = prev; 1570 break; 1571 } 1572 prev = rb_next(prev); 1573 } 1574 } 1575 while (node) { 1576 entry = rb_entry(node, struct btrfs_inode, rb_node); 1577 inode = igrab(&entry->vfs_inode); 1578 if (inode) { 1579 spin_unlock(&root->inode_lock); 1580 return inode; 1581 } 1582 1583 objectid = btrfs_ino(entry) + 1; 1584 if (cond_resched_lock(&root->inode_lock)) 1585 goto again; 1586 1587 node = rb_next(node); 1588 } 1589 spin_unlock(&root->inode_lock); 1590 return NULL; 1591 } 1592 1593 static int in_block_group(u64 bytenr, 1594 struct btrfs_block_group_cache *block_group) 1595 { 1596 if (bytenr >= block_group->key.objectid && 1597 bytenr < block_group->key.objectid + block_group->key.offset) 1598 return 1; 1599 return 0; 1600 } 1601 1602 /* 1603 * get new location of data 1604 */ 1605 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1606 u64 bytenr, u64 num_bytes) 1607 { 1608 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1609 struct btrfs_path *path; 1610 struct btrfs_file_extent_item *fi; 1611 struct extent_buffer *leaf; 1612 int ret; 1613 1614 path = btrfs_alloc_path(); 1615 if (!path) 1616 return -ENOMEM; 1617 1618 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1619 ret = btrfs_lookup_file_extent(NULL, root, path, 1620 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); 1621 if (ret < 0) 1622 goto out; 1623 if (ret > 0) { 1624 ret = -ENOENT; 1625 goto out; 1626 } 1627 1628 leaf = path->nodes[0]; 1629 fi = btrfs_item_ptr(leaf, path->slots[0], 1630 struct btrfs_file_extent_item); 1631 1632 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1633 btrfs_file_extent_compression(leaf, fi) || 1634 btrfs_file_extent_encryption(leaf, fi) || 1635 btrfs_file_extent_other_encoding(leaf, fi)); 1636 1637 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1638 ret = -EINVAL; 1639 goto out; 1640 } 1641 1642 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1643 ret = 0; 1644 out: 1645 btrfs_free_path(path); 1646 return ret; 1647 } 1648 1649 /* 1650 * update file extent items in the tree leaf to point to 1651 * the new locations. 1652 */ 1653 static noinline_for_stack 1654 int replace_file_extents(struct btrfs_trans_handle *trans, 1655 struct reloc_control *rc, 1656 struct btrfs_root *root, 1657 struct extent_buffer *leaf) 1658 { 1659 struct btrfs_fs_info *fs_info = root->fs_info; 1660 struct btrfs_key key; 1661 struct btrfs_file_extent_item *fi; 1662 struct inode *inode = NULL; 1663 u64 parent; 1664 u64 bytenr; 1665 u64 new_bytenr = 0; 1666 u64 num_bytes; 1667 u64 end; 1668 u32 nritems; 1669 u32 i; 1670 int ret = 0; 1671 int first = 1; 1672 int dirty = 0; 1673 1674 if (rc->stage != UPDATE_DATA_PTRS) 1675 return 0; 1676 1677 /* reloc trees always use full backref */ 1678 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1679 parent = leaf->start; 1680 else 1681 parent = 0; 1682 1683 nritems = btrfs_header_nritems(leaf); 1684 for (i = 0; i < nritems; i++) { 1685 cond_resched(); 1686 btrfs_item_key_to_cpu(leaf, &key, i); 1687 if (key.type != BTRFS_EXTENT_DATA_KEY) 1688 continue; 1689 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1690 if (btrfs_file_extent_type(leaf, fi) == 1691 BTRFS_FILE_EXTENT_INLINE) 1692 continue; 1693 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1694 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1695 if (bytenr == 0) 1696 continue; 1697 if (!in_block_group(bytenr, rc->block_group)) 1698 continue; 1699 1700 /* 1701 * if we are modifying block in fs tree, wait for readpage 1702 * to complete and drop the extent cache 1703 */ 1704 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1705 if (first) { 1706 inode = find_next_inode(root, key.objectid); 1707 first = 0; 1708 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) { 1709 btrfs_add_delayed_iput(inode); 1710 inode = find_next_inode(root, key.objectid); 1711 } 1712 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) { 1713 end = key.offset + 1714 btrfs_file_extent_num_bytes(leaf, fi); 1715 WARN_ON(!IS_ALIGNED(key.offset, 1716 fs_info->sectorsize)); 1717 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1718 end--; 1719 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1720 key.offset, end); 1721 if (!ret) 1722 continue; 1723 1724 btrfs_drop_extent_cache(BTRFS_I(inode), 1725 key.offset, end, 1); 1726 unlock_extent(&BTRFS_I(inode)->io_tree, 1727 key.offset, end); 1728 } 1729 } 1730 1731 ret = get_new_location(rc->data_inode, &new_bytenr, 1732 bytenr, num_bytes); 1733 if (ret) { 1734 /* 1735 * Don't have to abort since we've not changed anything 1736 * in the file extent yet. 1737 */ 1738 break; 1739 } 1740 1741 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1742 dirty = 1; 1743 1744 key.offset -= btrfs_file_extent_offset(leaf, fi); 1745 ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr, 1746 num_bytes, parent, 1747 btrfs_header_owner(leaf), 1748 key.objectid, key.offset); 1749 if (ret) { 1750 btrfs_abort_transaction(trans, ret); 1751 break; 1752 } 1753 1754 ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes, 1755 parent, btrfs_header_owner(leaf), 1756 key.objectid, key.offset); 1757 if (ret) { 1758 btrfs_abort_transaction(trans, ret); 1759 break; 1760 } 1761 } 1762 if (dirty) 1763 btrfs_mark_buffer_dirty(leaf); 1764 if (inode) 1765 btrfs_add_delayed_iput(inode); 1766 return ret; 1767 } 1768 1769 static noinline_for_stack 1770 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1771 struct btrfs_path *path, int level) 1772 { 1773 struct btrfs_disk_key key1; 1774 struct btrfs_disk_key key2; 1775 btrfs_node_key(eb, &key1, slot); 1776 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1777 return memcmp(&key1, &key2, sizeof(key1)); 1778 } 1779 1780 /* 1781 * try to replace tree blocks in fs tree with the new blocks 1782 * in reloc tree. tree blocks haven't been modified since the 1783 * reloc tree was create can be replaced. 1784 * 1785 * if a block was replaced, level of the block + 1 is returned. 1786 * if no block got replaced, 0 is returned. if there are other 1787 * errors, a negative error number is returned. 1788 */ 1789 static noinline_for_stack 1790 int replace_path(struct btrfs_trans_handle *trans, 1791 struct btrfs_root *dest, struct btrfs_root *src, 1792 struct btrfs_path *path, struct btrfs_key *next_key, 1793 int lowest_level, int max_level) 1794 { 1795 struct btrfs_fs_info *fs_info = dest->fs_info; 1796 struct extent_buffer *eb; 1797 struct extent_buffer *parent; 1798 struct btrfs_key key; 1799 u64 old_bytenr; 1800 u64 new_bytenr; 1801 u64 old_ptr_gen; 1802 u64 new_ptr_gen; 1803 u64 last_snapshot; 1804 u32 blocksize; 1805 int cow = 0; 1806 int level; 1807 int ret; 1808 int slot; 1809 1810 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1811 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1812 1813 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1814 again: 1815 slot = path->slots[lowest_level]; 1816 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1817 1818 eb = btrfs_lock_root_node(dest); 1819 btrfs_set_lock_blocking(eb); 1820 level = btrfs_header_level(eb); 1821 1822 if (level < lowest_level) { 1823 btrfs_tree_unlock(eb); 1824 free_extent_buffer(eb); 1825 return 0; 1826 } 1827 1828 if (cow) { 1829 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1830 BUG_ON(ret); 1831 } 1832 btrfs_set_lock_blocking(eb); 1833 1834 if (next_key) { 1835 next_key->objectid = (u64)-1; 1836 next_key->type = (u8)-1; 1837 next_key->offset = (u64)-1; 1838 } 1839 1840 parent = eb; 1841 while (1) { 1842 level = btrfs_header_level(parent); 1843 BUG_ON(level < lowest_level); 1844 1845 ret = btrfs_bin_search(parent, &key, level, &slot); 1846 if (ret && slot > 0) 1847 slot--; 1848 1849 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1850 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1851 1852 old_bytenr = btrfs_node_blockptr(parent, slot); 1853 blocksize = fs_info->nodesize; 1854 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1855 1856 if (level <= max_level) { 1857 eb = path->nodes[level]; 1858 new_bytenr = btrfs_node_blockptr(eb, 1859 path->slots[level]); 1860 new_ptr_gen = btrfs_node_ptr_generation(eb, 1861 path->slots[level]); 1862 } else { 1863 new_bytenr = 0; 1864 new_ptr_gen = 0; 1865 } 1866 1867 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1868 ret = level; 1869 break; 1870 } 1871 1872 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1873 memcmp_node_keys(parent, slot, path, level)) { 1874 if (level <= lowest_level) { 1875 ret = 0; 1876 break; 1877 } 1878 1879 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen); 1880 if (IS_ERR(eb)) { 1881 ret = PTR_ERR(eb); 1882 break; 1883 } else if (!extent_buffer_uptodate(eb)) { 1884 ret = -EIO; 1885 free_extent_buffer(eb); 1886 break; 1887 } 1888 btrfs_tree_lock(eb); 1889 if (cow) { 1890 ret = btrfs_cow_block(trans, dest, eb, parent, 1891 slot, &eb); 1892 BUG_ON(ret); 1893 } 1894 btrfs_set_lock_blocking(eb); 1895 1896 btrfs_tree_unlock(parent); 1897 free_extent_buffer(parent); 1898 1899 parent = eb; 1900 continue; 1901 } 1902 1903 if (!cow) { 1904 btrfs_tree_unlock(parent); 1905 free_extent_buffer(parent); 1906 cow = 1; 1907 goto again; 1908 } 1909 1910 btrfs_node_key_to_cpu(path->nodes[level], &key, 1911 path->slots[level]); 1912 btrfs_release_path(path); 1913 1914 path->lowest_level = level; 1915 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1916 path->lowest_level = 0; 1917 BUG_ON(ret); 1918 1919 /* 1920 * Info qgroup to trace both subtrees. 1921 * 1922 * We must trace both trees. 1923 * 1) Tree reloc subtree 1924 * If not traced, we will leak data numbers 1925 * 2) Fs subtree 1926 * If not traced, we will double count old data 1927 * and tree block numbers, if current trans doesn't free 1928 * data reloc tree inode. 1929 */ 1930 ret = btrfs_qgroup_trace_subtree(trans, src, parent, 1931 btrfs_header_generation(parent), 1932 btrfs_header_level(parent)); 1933 if (ret < 0) 1934 break; 1935 ret = btrfs_qgroup_trace_subtree(trans, dest, 1936 path->nodes[level], 1937 btrfs_header_generation(path->nodes[level]), 1938 btrfs_header_level(path->nodes[level])); 1939 if (ret < 0) 1940 break; 1941 1942 /* 1943 * swap blocks in fs tree and reloc tree. 1944 */ 1945 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1946 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1947 btrfs_mark_buffer_dirty(parent); 1948 1949 btrfs_set_node_blockptr(path->nodes[level], 1950 path->slots[level], old_bytenr); 1951 btrfs_set_node_ptr_generation(path->nodes[level], 1952 path->slots[level], old_ptr_gen); 1953 btrfs_mark_buffer_dirty(path->nodes[level]); 1954 1955 ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr, 1956 blocksize, path->nodes[level]->start, 1957 src->root_key.objectid, level - 1, 0); 1958 BUG_ON(ret); 1959 ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr, 1960 blocksize, 0, dest->root_key.objectid, 1961 level - 1, 0); 1962 BUG_ON(ret); 1963 1964 ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize, 1965 path->nodes[level]->start, 1966 src->root_key.objectid, level - 1, 0); 1967 BUG_ON(ret); 1968 1969 ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize, 1970 0, dest->root_key.objectid, level - 1, 1971 0); 1972 BUG_ON(ret); 1973 1974 btrfs_unlock_up_safe(path, 0); 1975 1976 ret = level; 1977 break; 1978 } 1979 btrfs_tree_unlock(parent); 1980 free_extent_buffer(parent); 1981 return ret; 1982 } 1983 1984 /* 1985 * helper to find next relocated block in reloc tree 1986 */ 1987 static noinline_for_stack 1988 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1989 int *level) 1990 { 1991 struct extent_buffer *eb; 1992 int i; 1993 u64 last_snapshot; 1994 u32 nritems; 1995 1996 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1997 1998 for (i = 0; i < *level; i++) { 1999 free_extent_buffer(path->nodes[i]); 2000 path->nodes[i] = NULL; 2001 } 2002 2003 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 2004 eb = path->nodes[i]; 2005 nritems = btrfs_header_nritems(eb); 2006 while (path->slots[i] + 1 < nritems) { 2007 path->slots[i]++; 2008 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 2009 last_snapshot) 2010 continue; 2011 2012 *level = i; 2013 return 0; 2014 } 2015 free_extent_buffer(path->nodes[i]); 2016 path->nodes[i] = NULL; 2017 } 2018 return 1; 2019 } 2020 2021 /* 2022 * walk down reloc tree to find relocated block of lowest level 2023 */ 2024 static noinline_for_stack 2025 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 2026 int *level) 2027 { 2028 struct btrfs_fs_info *fs_info = root->fs_info; 2029 struct extent_buffer *eb = NULL; 2030 int i; 2031 u64 bytenr; 2032 u64 ptr_gen = 0; 2033 u64 last_snapshot; 2034 u32 nritems; 2035 2036 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2037 2038 for (i = *level; i > 0; i--) { 2039 eb = path->nodes[i]; 2040 nritems = btrfs_header_nritems(eb); 2041 while (path->slots[i] < nritems) { 2042 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 2043 if (ptr_gen > last_snapshot) 2044 break; 2045 path->slots[i]++; 2046 } 2047 if (path->slots[i] >= nritems) { 2048 if (i == *level) 2049 break; 2050 *level = i + 1; 2051 return 0; 2052 } 2053 if (i == 1) { 2054 *level = i; 2055 return 0; 2056 } 2057 2058 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 2059 eb = read_tree_block(fs_info, bytenr, ptr_gen); 2060 if (IS_ERR(eb)) { 2061 return PTR_ERR(eb); 2062 } else if (!extent_buffer_uptodate(eb)) { 2063 free_extent_buffer(eb); 2064 return -EIO; 2065 } 2066 BUG_ON(btrfs_header_level(eb) != i - 1); 2067 path->nodes[i - 1] = eb; 2068 path->slots[i - 1] = 0; 2069 } 2070 return 1; 2071 } 2072 2073 /* 2074 * invalidate extent cache for file extents whose key in range of 2075 * [min_key, max_key) 2076 */ 2077 static int invalidate_extent_cache(struct btrfs_root *root, 2078 struct btrfs_key *min_key, 2079 struct btrfs_key *max_key) 2080 { 2081 struct btrfs_fs_info *fs_info = root->fs_info; 2082 struct inode *inode = NULL; 2083 u64 objectid; 2084 u64 start, end; 2085 u64 ino; 2086 2087 objectid = min_key->objectid; 2088 while (1) { 2089 cond_resched(); 2090 iput(inode); 2091 2092 if (objectid > max_key->objectid) 2093 break; 2094 2095 inode = find_next_inode(root, objectid); 2096 if (!inode) 2097 break; 2098 ino = btrfs_ino(BTRFS_I(inode)); 2099 2100 if (ino > max_key->objectid) { 2101 iput(inode); 2102 break; 2103 } 2104 2105 objectid = ino + 1; 2106 if (!S_ISREG(inode->i_mode)) 2107 continue; 2108 2109 if (unlikely(min_key->objectid == ino)) { 2110 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 2111 continue; 2112 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 2113 start = 0; 2114 else { 2115 start = min_key->offset; 2116 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); 2117 } 2118 } else { 2119 start = 0; 2120 } 2121 2122 if (unlikely(max_key->objectid == ino)) { 2123 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 2124 continue; 2125 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 2126 end = (u64)-1; 2127 } else { 2128 if (max_key->offset == 0) 2129 continue; 2130 end = max_key->offset; 2131 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 2132 end--; 2133 } 2134 } else { 2135 end = (u64)-1; 2136 } 2137 2138 /* the lock_extent waits for readpage to complete */ 2139 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 2140 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); 2141 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 2142 } 2143 return 0; 2144 } 2145 2146 static int find_next_key(struct btrfs_path *path, int level, 2147 struct btrfs_key *key) 2148 2149 { 2150 while (level < BTRFS_MAX_LEVEL) { 2151 if (!path->nodes[level]) 2152 break; 2153 if (path->slots[level] + 1 < 2154 btrfs_header_nritems(path->nodes[level])) { 2155 btrfs_node_key_to_cpu(path->nodes[level], key, 2156 path->slots[level] + 1); 2157 return 0; 2158 } 2159 level++; 2160 } 2161 return 1; 2162 } 2163 2164 /* 2165 * merge the relocated tree blocks in reloc tree with corresponding 2166 * fs tree. 2167 */ 2168 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 2169 struct btrfs_root *root) 2170 { 2171 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2172 LIST_HEAD(inode_list); 2173 struct btrfs_key key; 2174 struct btrfs_key next_key; 2175 struct btrfs_trans_handle *trans = NULL; 2176 struct btrfs_root *reloc_root; 2177 struct btrfs_root_item *root_item; 2178 struct btrfs_path *path; 2179 struct extent_buffer *leaf; 2180 int level; 2181 int max_level; 2182 int replaced = 0; 2183 int ret; 2184 int err = 0; 2185 u32 min_reserved; 2186 2187 path = btrfs_alloc_path(); 2188 if (!path) 2189 return -ENOMEM; 2190 path->reada = READA_FORWARD; 2191 2192 reloc_root = root->reloc_root; 2193 root_item = &reloc_root->root_item; 2194 2195 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2196 level = btrfs_root_level(root_item); 2197 extent_buffer_get(reloc_root->node); 2198 path->nodes[level] = reloc_root->node; 2199 path->slots[level] = 0; 2200 } else { 2201 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2202 2203 level = root_item->drop_level; 2204 BUG_ON(level == 0); 2205 path->lowest_level = level; 2206 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2207 path->lowest_level = 0; 2208 if (ret < 0) { 2209 btrfs_free_path(path); 2210 return ret; 2211 } 2212 2213 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2214 path->slots[level]); 2215 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2216 2217 btrfs_unlock_up_safe(path, 0); 2218 } 2219 2220 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2221 memset(&next_key, 0, sizeof(next_key)); 2222 2223 while (1) { 2224 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, 2225 BTRFS_RESERVE_FLUSH_ALL); 2226 if (ret) { 2227 err = ret; 2228 goto out; 2229 } 2230 trans = btrfs_start_transaction(root, 0); 2231 if (IS_ERR(trans)) { 2232 err = PTR_ERR(trans); 2233 trans = NULL; 2234 goto out; 2235 } 2236 trans->block_rsv = rc->block_rsv; 2237 2238 replaced = 0; 2239 max_level = level; 2240 2241 ret = walk_down_reloc_tree(reloc_root, path, &level); 2242 if (ret < 0) { 2243 err = ret; 2244 goto out; 2245 } 2246 if (ret > 0) 2247 break; 2248 2249 if (!find_next_key(path, level, &key) && 2250 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2251 ret = 0; 2252 } else { 2253 ret = replace_path(trans, root, reloc_root, path, 2254 &next_key, level, max_level); 2255 } 2256 if (ret < 0) { 2257 err = ret; 2258 goto out; 2259 } 2260 2261 if (ret > 0) { 2262 level = ret; 2263 btrfs_node_key_to_cpu(path->nodes[level], &key, 2264 path->slots[level]); 2265 replaced = 1; 2266 } 2267 2268 ret = walk_up_reloc_tree(reloc_root, path, &level); 2269 if (ret > 0) 2270 break; 2271 2272 BUG_ON(level == 0); 2273 /* 2274 * save the merging progress in the drop_progress. 2275 * this is OK since root refs == 1 in this case. 2276 */ 2277 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2278 path->slots[level]); 2279 root_item->drop_level = level; 2280 2281 btrfs_end_transaction_throttle(trans); 2282 trans = NULL; 2283 2284 btrfs_btree_balance_dirty(fs_info); 2285 2286 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2287 invalidate_extent_cache(root, &key, &next_key); 2288 } 2289 2290 /* 2291 * handle the case only one block in the fs tree need to be 2292 * relocated and the block is tree root. 2293 */ 2294 leaf = btrfs_lock_root_node(root); 2295 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2296 btrfs_tree_unlock(leaf); 2297 free_extent_buffer(leaf); 2298 if (ret < 0) 2299 err = ret; 2300 out: 2301 btrfs_free_path(path); 2302 2303 if (err == 0) { 2304 memset(&root_item->drop_progress, 0, 2305 sizeof(root_item->drop_progress)); 2306 root_item->drop_level = 0; 2307 btrfs_set_root_refs(root_item, 0); 2308 btrfs_update_reloc_root(trans, root); 2309 } 2310 2311 if (trans) 2312 btrfs_end_transaction_throttle(trans); 2313 2314 btrfs_btree_balance_dirty(fs_info); 2315 2316 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2317 invalidate_extent_cache(root, &key, &next_key); 2318 2319 return err; 2320 } 2321 2322 static noinline_for_stack 2323 int prepare_to_merge(struct reloc_control *rc, int err) 2324 { 2325 struct btrfs_root *root = rc->extent_root; 2326 struct btrfs_fs_info *fs_info = root->fs_info; 2327 struct btrfs_root *reloc_root; 2328 struct btrfs_trans_handle *trans; 2329 LIST_HEAD(reloc_roots); 2330 u64 num_bytes = 0; 2331 int ret; 2332 2333 mutex_lock(&fs_info->reloc_mutex); 2334 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2335 rc->merging_rsv_size += rc->nodes_relocated * 2; 2336 mutex_unlock(&fs_info->reloc_mutex); 2337 2338 again: 2339 if (!err) { 2340 num_bytes = rc->merging_rsv_size; 2341 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes, 2342 BTRFS_RESERVE_FLUSH_ALL); 2343 if (ret) 2344 err = ret; 2345 } 2346 2347 trans = btrfs_join_transaction(rc->extent_root); 2348 if (IS_ERR(trans)) { 2349 if (!err) 2350 btrfs_block_rsv_release(fs_info, rc->block_rsv, 2351 num_bytes); 2352 return PTR_ERR(trans); 2353 } 2354 2355 if (!err) { 2356 if (num_bytes != rc->merging_rsv_size) { 2357 btrfs_end_transaction(trans); 2358 btrfs_block_rsv_release(fs_info, rc->block_rsv, 2359 num_bytes); 2360 goto again; 2361 } 2362 } 2363 2364 rc->merge_reloc_tree = 1; 2365 2366 while (!list_empty(&rc->reloc_roots)) { 2367 reloc_root = list_entry(rc->reloc_roots.next, 2368 struct btrfs_root, root_list); 2369 list_del_init(&reloc_root->root_list); 2370 2371 root = read_fs_root(fs_info, reloc_root->root_key.offset); 2372 BUG_ON(IS_ERR(root)); 2373 BUG_ON(root->reloc_root != reloc_root); 2374 2375 /* 2376 * set reference count to 1, so btrfs_recover_relocation 2377 * knows it should resumes merging 2378 */ 2379 if (!err) 2380 btrfs_set_root_refs(&reloc_root->root_item, 1); 2381 btrfs_update_reloc_root(trans, root); 2382 2383 list_add(&reloc_root->root_list, &reloc_roots); 2384 } 2385 2386 list_splice(&reloc_roots, &rc->reloc_roots); 2387 2388 if (!err) 2389 btrfs_commit_transaction(trans); 2390 else 2391 btrfs_end_transaction(trans); 2392 return err; 2393 } 2394 2395 static noinline_for_stack 2396 void free_reloc_roots(struct list_head *list) 2397 { 2398 struct btrfs_root *reloc_root; 2399 2400 while (!list_empty(list)) { 2401 reloc_root = list_entry(list->next, struct btrfs_root, 2402 root_list); 2403 free_extent_buffer(reloc_root->node); 2404 free_extent_buffer(reloc_root->commit_root); 2405 reloc_root->node = NULL; 2406 reloc_root->commit_root = NULL; 2407 __del_reloc_root(reloc_root); 2408 } 2409 } 2410 2411 static noinline_for_stack 2412 void merge_reloc_roots(struct reloc_control *rc) 2413 { 2414 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2415 struct btrfs_root *root; 2416 struct btrfs_root *reloc_root; 2417 LIST_HEAD(reloc_roots); 2418 int found = 0; 2419 int ret = 0; 2420 again: 2421 root = rc->extent_root; 2422 2423 /* 2424 * this serializes us with btrfs_record_root_in_transaction, 2425 * we have to make sure nobody is in the middle of 2426 * adding their roots to the list while we are 2427 * doing this splice 2428 */ 2429 mutex_lock(&fs_info->reloc_mutex); 2430 list_splice_init(&rc->reloc_roots, &reloc_roots); 2431 mutex_unlock(&fs_info->reloc_mutex); 2432 2433 while (!list_empty(&reloc_roots)) { 2434 found = 1; 2435 reloc_root = list_entry(reloc_roots.next, 2436 struct btrfs_root, root_list); 2437 2438 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2439 root = read_fs_root(fs_info, 2440 reloc_root->root_key.offset); 2441 BUG_ON(IS_ERR(root)); 2442 BUG_ON(root->reloc_root != reloc_root); 2443 2444 ret = merge_reloc_root(rc, root); 2445 if (ret) { 2446 if (list_empty(&reloc_root->root_list)) 2447 list_add_tail(&reloc_root->root_list, 2448 &reloc_roots); 2449 goto out; 2450 } 2451 } else { 2452 list_del_init(&reloc_root->root_list); 2453 } 2454 2455 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2456 if (ret < 0) { 2457 if (list_empty(&reloc_root->root_list)) 2458 list_add_tail(&reloc_root->root_list, 2459 &reloc_roots); 2460 goto out; 2461 } 2462 } 2463 2464 if (found) { 2465 found = 0; 2466 goto again; 2467 } 2468 out: 2469 if (ret) { 2470 btrfs_handle_fs_error(fs_info, ret, NULL); 2471 if (!list_empty(&reloc_roots)) 2472 free_reloc_roots(&reloc_roots); 2473 2474 /* new reloc root may be added */ 2475 mutex_lock(&fs_info->reloc_mutex); 2476 list_splice_init(&rc->reloc_roots, &reloc_roots); 2477 mutex_unlock(&fs_info->reloc_mutex); 2478 if (!list_empty(&reloc_roots)) 2479 free_reloc_roots(&reloc_roots); 2480 } 2481 2482 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2483 } 2484 2485 static void free_block_list(struct rb_root *blocks) 2486 { 2487 struct tree_block *block; 2488 struct rb_node *rb_node; 2489 while ((rb_node = rb_first(blocks))) { 2490 block = rb_entry(rb_node, struct tree_block, rb_node); 2491 rb_erase(rb_node, blocks); 2492 kfree(block); 2493 } 2494 } 2495 2496 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2497 struct btrfs_root *reloc_root) 2498 { 2499 struct btrfs_fs_info *fs_info = reloc_root->fs_info; 2500 struct btrfs_root *root; 2501 2502 if (reloc_root->last_trans == trans->transid) 2503 return 0; 2504 2505 root = read_fs_root(fs_info, reloc_root->root_key.offset); 2506 BUG_ON(IS_ERR(root)); 2507 BUG_ON(root->reloc_root != reloc_root); 2508 2509 return btrfs_record_root_in_trans(trans, root); 2510 } 2511 2512 static noinline_for_stack 2513 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2514 struct reloc_control *rc, 2515 struct backref_node *node, 2516 struct backref_edge *edges[]) 2517 { 2518 struct backref_node *next; 2519 struct btrfs_root *root; 2520 int index = 0; 2521 2522 next = node; 2523 while (1) { 2524 cond_resched(); 2525 next = walk_up_backref(next, edges, &index); 2526 root = next->root; 2527 BUG_ON(!root); 2528 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state)); 2529 2530 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2531 record_reloc_root_in_trans(trans, root); 2532 break; 2533 } 2534 2535 btrfs_record_root_in_trans(trans, root); 2536 root = root->reloc_root; 2537 2538 if (next->new_bytenr != root->node->start) { 2539 BUG_ON(next->new_bytenr); 2540 BUG_ON(!list_empty(&next->list)); 2541 next->new_bytenr = root->node->start; 2542 next->root = root; 2543 list_add_tail(&next->list, 2544 &rc->backref_cache.changed); 2545 __mark_block_processed(rc, next); 2546 break; 2547 } 2548 2549 WARN_ON(1); 2550 root = NULL; 2551 next = walk_down_backref(edges, &index); 2552 if (!next || next->level <= node->level) 2553 break; 2554 } 2555 if (!root) 2556 return NULL; 2557 2558 next = node; 2559 /* setup backref node path for btrfs_reloc_cow_block */ 2560 while (1) { 2561 rc->backref_cache.path[next->level] = next; 2562 if (--index < 0) 2563 break; 2564 next = edges[index]->node[UPPER]; 2565 } 2566 return root; 2567 } 2568 2569 /* 2570 * select a tree root for relocation. return NULL if the block 2571 * is reference counted. we should use do_relocation() in this 2572 * case. return a tree root pointer if the block isn't reference 2573 * counted. return -ENOENT if the block is root of reloc tree. 2574 */ 2575 static noinline_for_stack 2576 struct btrfs_root *select_one_root(struct backref_node *node) 2577 { 2578 struct backref_node *next; 2579 struct btrfs_root *root; 2580 struct btrfs_root *fs_root = NULL; 2581 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2582 int index = 0; 2583 2584 next = node; 2585 while (1) { 2586 cond_resched(); 2587 next = walk_up_backref(next, edges, &index); 2588 root = next->root; 2589 BUG_ON(!root); 2590 2591 /* no other choice for non-references counted tree */ 2592 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 2593 return root; 2594 2595 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2596 fs_root = root; 2597 2598 if (next != node) 2599 return NULL; 2600 2601 next = walk_down_backref(edges, &index); 2602 if (!next || next->level <= node->level) 2603 break; 2604 } 2605 2606 if (!fs_root) 2607 return ERR_PTR(-ENOENT); 2608 return fs_root; 2609 } 2610 2611 static noinline_for_stack 2612 u64 calcu_metadata_size(struct reloc_control *rc, 2613 struct backref_node *node, int reserve) 2614 { 2615 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2616 struct backref_node *next = node; 2617 struct backref_edge *edge; 2618 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2619 u64 num_bytes = 0; 2620 int index = 0; 2621 2622 BUG_ON(reserve && node->processed); 2623 2624 while (next) { 2625 cond_resched(); 2626 while (1) { 2627 if (next->processed && (reserve || next != node)) 2628 break; 2629 2630 num_bytes += fs_info->nodesize; 2631 2632 if (list_empty(&next->upper)) 2633 break; 2634 2635 edge = list_entry(next->upper.next, 2636 struct backref_edge, list[LOWER]); 2637 edges[index++] = edge; 2638 next = edge->node[UPPER]; 2639 } 2640 next = walk_down_backref(edges, &index); 2641 } 2642 return num_bytes; 2643 } 2644 2645 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2646 struct reloc_control *rc, 2647 struct backref_node *node) 2648 { 2649 struct btrfs_root *root = rc->extent_root; 2650 struct btrfs_fs_info *fs_info = root->fs_info; 2651 u64 num_bytes; 2652 int ret; 2653 u64 tmp; 2654 2655 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2656 2657 trans->block_rsv = rc->block_rsv; 2658 rc->reserved_bytes += num_bytes; 2659 2660 /* 2661 * We are under a transaction here so we can only do limited flushing. 2662 * If we get an enospc just kick back -EAGAIN so we know to drop the 2663 * transaction and try to refill when we can flush all the things. 2664 */ 2665 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes, 2666 BTRFS_RESERVE_FLUSH_LIMIT); 2667 if (ret) { 2668 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; 2669 while (tmp <= rc->reserved_bytes) 2670 tmp <<= 1; 2671 /* 2672 * only one thread can access block_rsv at this point, 2673 * so we don't need hold lock to protect block_rsv. 2674 * we expand more reservation size here to allow enough 2675 * space for relocation and we will return eailer in 2676 * enospc case. 2677 */ 2678 rc->block_rsv->size = tmp + fs_info->nodesize * 2679 RELOCATION_RESERVED_NODES; 2680 return -EAGAIN; 2681 } 2682 2683 return 0; 2684 } 2685 2686 /* 2687 * relocate a block tree, and then update pointers in upper level 2688 * blocks that reference the block to point to the new location. 2689 * 2690 * if called by link_to_upper, the block has already been relocated. 2691 * in that case this function just updates pointers. 2692 */ 2693 static int do_relocation(struct btrfs_trans_handle *trans, 2694 struct reloc_control *rc, 2695 struct backref_node *node, 2696 struct btrfs_key *key, 2697 struct btrfs_path *path, int lowest) 2698 { 2699 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2700 struct backref_node *upper; 2701 struct backref_edge *edge; 2702 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2703 struct btrfs_root *root; 2704 struct extent_buffer *eb; 2705 u32 blocksize; 2706 u64 bytenr; 2707 u64 generation; 2708 int slot; 2709 int ret; 2710 int err = 0; 2711 2712 BUG_ON(lowest && node->eb); 2713 2714 path->lowest_level = node->level + 1; 2715 rc->backref_cache.path[node->level] = node; 2716 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2717 cond_resched(); 2718 2719 upper = edge->node[UPPER]; 2720 root = select_reloc_root(trans, rc, upper, edges); 2721 BUG_ON(!root); 2722 2723 if (upper->eb && !upper->locked) { 2724 if (!lowest) { 2725 ret = btrfs_bin_search(upper->eb, key, 2726 upper->level, &slot); 2727 BUG_ON(ret); 2728 bytenr = btrfs_node_blockptr(upper->eb, slot); 2729 if (node->eb->start == bytenr) 2730 goto next; 2731 } 2732 drop_node_buffer(upper); 2733 } 2734 2735 if (!upper->eb) { 2736 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2737 if (ret) { 2738 if (ret < 0) 2739 err = ret; 2740 else 2741 err = -ENOENT; 2742 2743 btrfs_release_path(path); 2744 break; 2745 } 2746 2747 if (!upper->eb) { 2748 upper->eb = path->nodes[upper->level]; 2749 path->nodes[upper->level] = NULL; 2750 } else { 2751 BUG_ON(upper->eb != path->nodes[upper->level]); 2752 } 2753 2754 upper->locked = 1; 2755 path->locks[upper->level] = 0; 2756 2757 slot = path->slots[upper->level]; 2758 btrfs_release_path(path); 2759 } else { 2760 ret = btrfs_bin_search(upper->eb, key, upper->level, 2761 &slot); 2762 BUG_ON(ret); 2763 } 2764 2765 bytenr = btrfs_node_blockptr(upper->eb, slot); 2766 if (lowest) { 2767 if (bytenr != node->bytenr) { 2768 btrfs_err(root->fs_info, 2769 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2770 bytenr, node->bytenr, slot, 2771 upper->eb->start); 2772 err = -EIO; 2773 goto next; 2774 } 2775 } else { 2776 if (node->eb->start == bytenr) 2777 goto next; 2778 } 2779 2780 blocksize = root->fs_info->nodesize; 2781 generation = btrfs_node_ptr_generation(upper->eb, slot); 2782 eb = read_tree_block(fs_info, bytenr, generation); 2783 if (IS_ERR(eb)) { 2784 err = PTR_ERR(eb); 2785 goto next; 2786 } else if (!extent_buffer_uptodate(eb)) { 2787 free_extent_buffer(eb); 2788 err = -EIO; 2789 goto next; 2790 } 2791 btrfs_tree_lock(eb); 2792 btrfs_set_lock_blocking(eb); 2793 2794 if (!node->eb) { 2795 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2796 slot, &eb); 2797 btrfs_tree_unlock(eb); 2798 free_extent_buffer(eb); 2799 if (ret < 0) { 2800 err = ret; 2801 goto next; 2802 } 2803 BUG_ON(node->eb != eb); 2804 } else { 2805 btrfs_set_node_blockptr(upper->eb, slot, 2806 node->eb->start); 2807 btrfs_set_node_ptr_generation(upper->eb, slot, 2808 trans->transid); 2809 btrfs_mark_buffer_dirty(upper->eb); 2810 2811 ret = btrfs_inc_extent_ref(trans, root->fs_info, 2812 node->eb->start, blocksize, 2813 upper->eb->start, 2814 btrfs_header_owner(upper->eb), 2815 node->level, 0); 2816 BUG_ON(ret); 2817 2818 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2819 BUG_ON(ret); 2820 } 2821 next: 2822 if (!upper->pending) 2823 drop_node_buffer(upper); 2824 else 2825 unlock_node_buffer(upper); 2826 if (err) 2827 break; 2828 } 2829 2830 if (!err && node->pending) { 2831 drop_node_buffer(node); 2832 list_move_tail(&node->list, &rc->backref_cache.changed); 2833 node->pending = 0; 2834 } 2835 2836 path->lowest_level = 0; 2837 BUG_ON(err == -ENOSPC); 2838 return err; 2839 } 2840 2841 static int link_to_upper(struct btrfs_trans_handle *trans, 2842 struct reloc_control *rc, 2843 struct backref_node *node, 2844 struct btrfs_path *path) 2845 { 2846 struct btrfs_key key; 2847 2848 btrfs_node_key_to_cpu(node->eb, &key, 0); 2849 return do_relocation(trans, rc, node, &key, path, 0); 2850 } 2851 2852 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2853 struct reloc_control *rc, 2854 struct btrfs_path *path, int err) 2855 { 2856 LIST_HEAD(list); 2857 struct backref_cache *cache = &rc->backref_cache; 2858 struct backref_node *node; 2859 int level; 2860 int ret; 2861 2862 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2863 while (!list_empty(&cache->pending[level])) { 2864 node = list_entry(cache->pending[level].next, 2865 struct backref_node, list); 2866 list_move_tail(&node->list, &list); 2867 BUG_ON(!node->pending); 2868 2869 if (!err) { 2870 ret = link_to_upper(trans, rc, node, path); 2871 if (ret < 0) 2872 err = ret; 2873 } 2874 } 2875 list_splice_init(&list, &cache->pending[level]); 2876 } 2877 return err; 2878 } 2879 2880 static void mark_block_processed(struct reloc_control *rc, 2881 u64 bytenr, u32 blocksize) 2882 { 2883 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2884 EXTENT_DIRTY); 2885 } 2886 2887 static void __mark_block_processed(struct reloc_control *rc, 2888 struct backref_node *node) 2889 { 2890 u32 blocksize; 2891 if (node->level == 0 || 2892 in_block_group(node->bytenr, rc->block_group)) { 2893 blocksize = rc->extent_root->fs_info->nodesize; 2894 mark_block_processed(rc, node->bytenr, blocksize); 2895 } 2896 node->processed = 1; 2897 } 2898 2899 /* 2900 * mark a block and all blocks directly/indirectly reference the block 2901 * as processed. 2902 */ 2903 static void update_processed_blocks(struct reloc_control *rc, 2904 struct backref_node *node) 2905 { 2906 struct backref_node *next = node; 2907 struct backref_edge *edge; 2908 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2909 int index = 0; 2910 2911 while (next) { 2912 cond_resched(); 2913 while (1) { 2914 if (next->processed) 2915 break; 2916 2917 __mark_block_processed(rc, next); 2918 2919 if (list_empty(&next->upper)) 2920 break; 2921 2922 edge = list_entry(next->upper.next, 2923 struct backref_edge, list[LOWER]); 2924 edges[index++] = edge; 2925 next = edge->node[UPPER]; 2926 } 2927 next = walk_down_backref(edges, &index); 2928 } 2929 } 2930 2931 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2932 { 2933 u32 blocksize = rc->extent_root->fs_info->nodesize; 2934 2935 if (test_range_bit(&rc->processed_blocks, bytenr, 2936 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2937 return 1; 2938 return 0; 2939 } 2940 2941 static int get_tree_block_key(struct btrfs_fs_info *fs_info, 2942 struct tree_block *block) 2943 { 2944 struct extent_buffer *eb; 2945 2946 BUG_ON(block->key_ready); 2947 eb = read_tree_block(fs_info, block->bytenr, block->key.offset); 2948 if (IS_ERR(eb)) { 2949 return PTR_ERR(eb); 2950 } else if (!extent_buffer_uptodate(eb)) { 2951 free_extent_buffer(eb); 2952 return -EIO; 2953 } 2954 WARN_ON(btrfs_header_level(eb) != block->level); 2955 if (block->level == 0) 2956 btrfs_item_key_to_cpu(eb, &block->key, 0); 2957 else 2958 btrfs_node_key_to_cpu(eb, &block->key, 0); 2959 free_extent_buffer(eb); 2960 block->key_ready = 1; 2961 return 0; 2962 } 2963 2964 /* 2965 * helper function to relocate a tree block 2966 */ 2967 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2968 struct reloc_control *rc, 2969 struct backref_node *node, 2970 struct btrfs_key *key, 2971 struct btrfs_path *path) 2972 { 2973 struct btrfs_root *root; 2974 int ret = 0; 2975 2976 if (!node) 2977 return 0; 2978 2979 BUG_ON(node->processed); 2980 root = select_one_root(node); 2981 if (root == ERR_PTR(-ENOENT)) { 2982 update_processed_blocks(rc, node); 2983 goto out; 2984 } 2985 2986 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2987 ret = reserve_metadata_space(trans, rc, node); 2988 if (ret) 2989 goto out; 2990 } 2991 2992 if (root) { 2993 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 2994 BUG_ON(node->new_bytenr); 2995 BUG_ON(!list_empty(&node->list)); 2996 btrfs_record_root_in_trans(trans, root); 2997 root = root->reloc_root; 2998 node->new_bytenr = root->node->start; 2999 node->root = root; 3000 list_add_tail(&node->list, &rc->backref_cache.changed); 3001 } else { 3002 path->lowest_level = node->level; 3003 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 3004 btrfs_release_path(path); 3005 if (ret > 0) 3006 ret = 0; 3007 } 3008 if (!ret) 3009 update_processed_blocks(rc, node); 3010 } else { 3011 ret = do_relocation(trans, rc, node, key, path, 1); 3012 } 3013 out: 3014 if (ret || node->level == 0 || node->cowonly) 3015 remove_backref_node(&rc->backref_cache, node); 3016 return ret; 3017 } 3018 3019 /* 3020 * relocate a list of blocks 3021 */ 3022 static noinline_for_stack 3023 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 3024 struct reloc_control *rc, struct rb_root *blocks) 3025 { 3026 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3027 struct backref_node *node; 3028 struct btrfs_path *path; 3029 struct tree_block *block; 3030 struct rb_node *rb_node; 3031 int ret; 3032 int err = 0; 3033 3034 path = btrfs_alloc_path(); 3035 if (!path) { 3036 err = -ENOMEM; 3037 goto out_free_blocks; 3038 } 3039 3040 rb_node = rb_first(blocks); 3041 while (rb_node) { 3042 block = rb_entry(rb_node, struct tree_block, rb_node); 3043 if (!block->key_ready) 3044 readahead_tree_block(fs_info, block->bytenr); 3045 rb_node = rb_next(rb_node); 3046 } 3047 3048 rb_node = rb_first(blocks); 3049 while (rb_node) { 3050 block = rb_entry(rb_node, struct tree_block, rb_node); 3051 if (!block->key_ready) { 3052 err = get_tree_block_key(fs_info, block); 3053 if (err) 3054 goto out_free_path; 3055 } 3056 rb_node = rb_next(rb_node); 3057 } 3058 3059 rb_node = rb_first(blocks); 3060 while (rb_node) { 3061 block = rb_entry(rb_node, struct tree_block, rb_node); 3062 3063 node = build_backref_tree(rc, &block->key, 3064 block->level, block->bytenr); 3065 if (IS_ERR(node)) { 3066 err = PTR_ERR(node); 3067 goto out; 3068 } 3069 3070 ret = relocate_tree_block(trans, rc, node, &block->key, 3071 path); 3072 if (ret < 0) { 3073 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 3074 err = ret; 3075 goto out; 3076 } 3077 rb_node = rb_next(rb_node); 3078 } 3079 out: 3080 err = finish_pending_nodes(trans, rc, path, err); 3081 3082 out_free_path: 3083 btrfs_free_path(path); 3084 out_free_blocks: 3085 free_block_list(blocks); 3086 return err; 3087 } 3088 3089 static noinline_for_stack 3090 int prealloc_file_extent_cluster(struct inode *inode, 3091 struct file_extent_cluster *cluster) 3092 { 3093 u64 alloc_hint = 0; 3094 u64 start; 3095 u64 end; 3096 u64 offset = BTRFS_I(inode)->index_cnt; 3097 u64 num_bytes; 3098 int nr = 0; 3099 int ret = 0; 3100 u64 prealloc_start = cluster->start - offset; 3101 u64 prealloc_end = cluster->end - offset; 3102 u64 cur_offset; 3103 struct extent_changeset *data_reserved = NULL; 3104 3105 BUG_ON(cluster->start != cluster->boundary[0]); 3106 inode_lock(inode); 3107 3108 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start, 3109 prealloc_end + 1 - prealloc_start); 3110 if (ret) 3111 goto out; 3112 3113 cur_offset = prealloc_start; 3114 while (nr < cluster->nr) { 3115 start = cluster->boundary[nr] - offset; 3116 if (nr + 1 < cluster->nr) 3117 end = cluster->boundary[nr + 1] - 1 - offset; 3118 else 3119 end = cluster->end - offset; 3120 3121 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3122 num_bytes = end + 1 - start; 3123 if (cur_offset < start) 3124 btrfs_free_reserved_data_space(inode, data_reserved, 3125 cur_offset, start - cur_offset); 3126 ret = btrfs_prealloc_file_range(inode, 0, start, 3127 num_bytes, num_bytes, 3128 end + 1, &alloc_hint); 3129 cur_offset = end + 1; 3130 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3131 if (ret) 3132 break; 3133 nr++; 3134 } 3135 if (cur_offset < prealloc_end) 3136 btrfs_free_reserved_data_space(inode, data_reserved, 3137 cur_offset, prealloc_end + 1 - cur_offset); 3138 out: 3139 inode_unlock(inode); 3140 extent_changeset_free(data_reserved); 3141 return ret; 3142 } 3143 3144 static noinline_for_stack 3145 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 3146 u64 block_start) 3147 { 3148 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3149 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 3150 struct extent_map *em; 3151 int ret = 0; 3152 3153 em = alloc_extent_map(); 3154 if (!em) 3155 return -ENOMEM; 3156 3157 em->start = start; 3158 em->len = end + 1 - start; 3159 em->block_len = em->len; 3160 em->block_start = block_start; 3161 em->bdev = fs_info->fs_devices->latest_bdev; 3162 set_bit(EXTENT_FLAG_PINNED, &em->flags); 3163 3164 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3165 while (1) { 3166 write_lock(&em_tree->lock); 3167 ret = add_extent_mapping(em_tree, em, 0); 3168 write_unlock(&em_tree->lock); 3169 if (ret != -EEXIST) { 3170 free_extent_map(em); 3171 break; 3172 } 3173 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); 3174 } 3175 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3176 return ret; 3177 } 3178 3179 static int relocate_file_extent_cluster(struct inode *inode, 3180 struct file_extent_cluster *cluster) 3181 { 3182 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3183 u64 page_start; 3184 u64 page_end; 3185 u64 offset = BTRFS_I(inode)->index_cnt; 3186 unsigned long index; 3187 unsigned long last_index; 3188 struct page *page; 3189 struct file_ra_state *ra; 3190 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 3191 int nr = 0; 3192 int ret = 0; 3193 3194 if (!cluster->nr) 3195 return 0; 3196 3197 ra = kzalloc(sizeof(*ra), GFP_NOFS); 3198 if (!ra) 3199 return -ENOMEM; 3200 3201 ret = prealloc_file_extent_cluster(inode, cluster); 3202 if (ret) 3203 goto out; 3204 3205 file_ra_state_init(ra, inode->i_mapping); 3206 3207 ret = setup_extent_mapping(inode, cluster->start - offset, 3208 cluster->end - offset, cluster->start); 3209 if (ret) 3210 goto out; 3211 3212 index = (cluster->start - offset) >> PAGE_SHIFT; 3213 last_index = (cluster->end - offset) >> PAGE_SHIFT; 3214 while (index <= last_index) { 3215 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 3216 PAGE_SIZE); 3217 if (ret) 3218 goto out; 3219 3220 page = find_lock_page(inode->i_mapping, index); 3221 if (!page) { 3222 page_cache_sync_readahead(inode->i_mapping, 3223 ra, NULL, index, 3224 last_index + 1 - index); 3225 page = find_or_create_page(inode->i_mapping, index, 3226 mask); 3227 if (!page) { 3228 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3229 PAGE_SIZE); 3230 ret = -ENOMEM; 3231 goto out; 3232 } 3233 } 3234 3235 if (PageReadahead(page)) { 3236 page_cache_async_readahead(inode->i_mapping, 3237 ra, NULL, page, index, 3238 last_index + 1 - index); 3239 } 3240 3241 if (!PageUptodate(page)) { 3242 btrfs_readpage(NULL, page); 3243 lock_page(page); 3244 if (!PageUptodate(page)) { 3245 unlock_page(page); 3246 put_page(page); 3247 btrfs_delalloc_release_metadata(BTRFS_I(inode), 3248 PAGE_SIZE); 3249 ret = -EIO; 3250 goto out; 3251 } 3252 } 3253 3254 page_start = page_offset(page); 3255 page_end = page_start + PAGE_SIZE - 1; 3256 3257 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); 3258 3259 set_page_extent_mapped(page); 3260 3261 if (nr < cluster->nr && 3262 page_start + offset == cluster->boundary[nr]) { 3263 set_extent_bits(&BTRFS_I(inode)->io_tree, 3264 page_start, page_end, 3265 EXTENT_BOUNDARY); 3266 nr++; 3267 } 3268 3269 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0); 3270 set_page_dirty(page); 3271 3272 unlock_extent(&BTRFS_I(inode)->io_tree, 3273 page_start, page_end); 3274 unlock_page(page); 3275 put_page(page); 3276 3277 index++; 3278 balance_dirty_pages_ratelimited(inode->i_mapping); 3279 btrfs_throttle(fs_info); 3280 } 3281 WARN_ON(nr != cluster->nr); 3282 out: 3283 kfree(ra); 3284 return ret; 3285 } 3286 3287 static noinline_for_stack 3288 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3289 struct file_extent_cluster *cluster) 3290 { 3291 int ret; 3292 3293 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3294 ret = relocate_file_extent_cluster(inode, cluster); 3295 if (ret) 3296 return ret; 3297 cluster->nr = 0; 3298 } 3299 3300 if (!cluster->nr) 3301 cluster->start = extent_key->objectid; 3302 else 3303 BUG_ON(cluster->nr >= MAX_EXTENTS); 3304 cluster->end = extent_key->objectid + extent_key->offset - 1; 3305 cluster->boundary[cluster->nr] = extent_key->objectid; 3306 cluster->nr++; 3307 3308 if (cluster->nr >= MAX_EXTENTS) { 3309 ret = relocate_file_extent_cluster(inode, cluster); 3310 if (ret) 3311 return ret; 3312 cluster->nr = 0; 3313 } 3314 return 0; 3315 } 3316 3317 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3318 static int get_ref_objectid_v0(struct reloc_control *rc, 3319 struct btrfs_path *path, 3320 struct btrfs_key *extent_key, 3321 u64 *ref_objectid, int *path_change) 3322 { 3323 struct btrfs_key key; 3324 struct extent_buffer *leaf; 3325 struct btrfs_extent_ref_v0 *ref0; 3326 int ret; 3327 int slot; 3328 3329 leaf = path->nodes[0]; 3330 slot = path->slots[0]; 3331 while (1) { 3332 if (slot >= btrfs_header_nritems(leaf)) { 3333 ret = btrfs_next_leaf(rc->extent_root, path); 3334 if (ret < 0) 3335 return ret; 3336 BUG_ON(ret > 0); 3337 leaf = path->nodes[0]; 3338 slot = path->slots[0]; 3339 if (path_change) 3340 *path_change = 1; 3341 } 3342 btrfs_item_key_to_cpu(leaf, &key, slot); 3343 if (key.objectid != extent_key->objectid) 3344 return -ENOENT; 3345 3346 if (key.type != BTRFS_EXTENT_REF_V0_KEY) { 3347 slot++; 3348 continue; 3349 } 3350 ref0 = btrfs_item_ptr(leaf, slot, 3351 struct btrfs_extent_ref_v0); 3352 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0); 3353 break; 3354 } 3355 return 0; 3356 } 3357 #endif 3358 3359 /* 3360 * helper to add a tree block to the list. 3361 * the major work is getting the generation and level of the block 3362 */ 3363 static int add_tree_block(struct reloc_control *rc, 3364 struct btrfs_key *extent_key, 3365 struct btrfs_path *path, 3366 struct rb_root *blocks) 3367 { 3368 struct extent_buffer *eb; 3369 struct btrfs_extent_item *ei; 3370 struct btrfs_tree_block_info *bi; 3371 struct tree_block *block; 3372 struct rb_node *rb_node; 3373 u32 item_size; 3374 int level = -1; 3375 u64 generation; 3376 3377 eb = path->nodes[0]; 3378 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3379 3380 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3381 item_size >= sizeof(*ei) + sizeof(*bi)) { 3382 ei = btrfs_item_ptr(eb, path->slots[0], 3383 struct btrfs_extent_item); 3384 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3385 bi = (struct btrfs_tree_block_info *)(ei + 1); 3386 level = btrfs_tree_block_level(eb, bi); 3387 } else { 3388 level = (int)extent_key->offset; 3389 } 3390 generation = btrfs_extent_generation(eb, ei); 3391 } else { 3392 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3393 u64 ref_owner; 3394 int ret; 3395 3396 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 3397 ret = get_ref_objectid_v0(rc, path, extent_key, 3398 &ref_owner, NULL); 3399 if (ret < 0) 3400 return ret; 3401 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); 3402 level = (int)ref_owner; 3403 /* FIXME: get real generation */ 3404 generation = 0; 3405 #else 3406 BUG(); 3407 #endif 3408 } 3409 3410 btrfs_release_path(path); 3411 3412 BUG_ON(level == -1); 3413 3414 block = kmalloc(sizeof(*block), GFP_NOFS); 3415 if (!block) 3416 return -ENOMEM; 3417 3418 block->bytenr = extent_key->objectid; 3419 block->key.objectid = rc->extent_root->fs_info->nodesize; 3420 block->key.offset = generation; 3421 block->level = level; 3422 block->key_ready = 0; 3423 3424 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3425 if (rb_node) 3426 backref_tree_panic(rb_node, -EEXIST, block->bytenr); 3427 3428 return 0; 3429 } 3430 3431 /* 3432 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3433 */ 3434 static int __add_tree_block(struct reloc_control *rc, 3435 u64 bytenr, u32 blocksize, 3436 struct rb_root *blocks) 3437 { 3438 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3439 struct btrfs_path *path; 3440 struct btrfs_key key; 3441 int ret; 3442 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 3443 3444 if (tree_block_processed(bytenr, rc)) 3445 return 0; 3446 3447 if (tree_search(blocks, bytenr)) 3448 return 0; 3449 3450 path = btrfs_alloc_path(); 3451 if (!path) 3452 return -ENOMEM; 3453 again: 3454 key.objectid = bytenr; 3455 if (skinny) { 3456 key.type = BTRFS_METADATA_ITEM_KEY; 3457 key.offset = (u64)-1; 3458 } else { 3459 key.type = BTRFS_EXTENT_ITEM_KEY; 3460 key.offset = blocksize; 3461 } 3462 3463 path->search_commit_root = 1; 3464 path->skip_locking = 1; 3465 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3466 if (ret < 0) 3467 goto out; 3468 3469 if (ret > 0 && skinny) { 3470 if (path->slots[0]) { 3471 path->slots[0]--; 3472 btrfs_item_key_to_cpu(path->nodes[0], &key, 3473 path->slots[0]); 3474 if (key.objectid == bytenr && 3475 (key.type == BTRFS_METADATA_ITEM_KEY || 3476 (key.type == BTRFS_EXTENT_ITEM_KEY && 3477 key.offset == blocksize))) 3478 ret = 0; 3479 } 3480 3481 if (ret) { 3482 skinny = false; 3483 btrfs_release_path(path); 3484 goto again; 3485 } 3486 } 3487 if (ret) { 3488 ASSERT(ret == 1); 3489 btrfs_print_leaf(path->nodes[0]); 3490 btrfs_err(fs_info, 3491 "tree block extent item (%llu) is not found in extent tree", 3492 bytenr); 3493 WARN_ON(1); 3494 ret = -EINVAL; 3495 goto out; 3496 } 3497 3498 ret = add_tree_block(rc, &key, path, blocks); 3499 out: 3500 btrfs_free_path(path); 3501 return ret; 3502 } 3503 3504 /* 3505 * helper to check if the block use full backrefs for pointers in it 3506 */ 3507 static int block_use_full_backref(struct reloc_control *rc, 3508 struct extent_buffer *eb) 3509 { 3510 u64 flags; 3511 int ret; 3512 3513 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3514 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3515 return 1; 3516 3517 ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info, 3518 eb->start, btrfs_header_level(eb), 1, 3519 NULL, &flags); 3520 BUG_ON(ret); 3521 3522 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3523 ret = 1; 3524 else 3525 ret = 0; 3526 return ret; 3527 } 3528 3529 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3530 struct btrfs_block_group_cache *block_group, 3531 struct inode *inode, 3532 u64 ino) 3533 { 3534 struct btrfs_key key; 3535 struct btrfs_root *root = fs_info->tree_root; 3536 struct btrfs_trans_handle *trans; 3537 int ret = 0; 3538 3539 if (inode) 3540 goto truncate; 3541 3542 key.objectid = ino; 3543 key.type = BTRFS_INODE_ITEM_KEY; 3544 key.offset = 0; 3545 3546 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3547 if (IS_ERR(inode) || is_bad_inode(inode)) { 3548 if (!IS_ERR(inode)) 3549 iput(inode); 3550 return -ENOENT; 3551 } 3552 3553 truncate: 3554 ret = btrfs_check_trunc_cache_free_space(fs_info, 3555 &fs_info->global_block_rsv); 3556 if (ret) 3557 goto out; 3558 3559 trans = btrfs_join_transaction(root); 3560 if (IS_ERR(trans)) { 3561 ret = PTR_ERR(trans); 3562 goto out; 3563 } 3564 3565 ret = btrfs_truncate_free_space_cache(trans, block_group, inode); 3566 3567 btrfs_end_transaction(trans); 3568 btrfs_btree_balance_dirty(fs_info); 3569 out: 3570 iput(inode); 3571 return ret; 3572 } 3573 3574 /* 3575 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3576 * this function scans fs tree to find blocks reference the data extent 3577 */ 3578 static int find_data_references(struct reloc_control *rc, 3579 struct btrfs_key *extent_key, 3580 struct extent_buffer *leaf, 3581 struct btrfs_extent_data_ref *ref, 3582 struct rb_root *blocks) 3583 { 3584 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3585 struct btrfs_path *path; 3586 struct tree_block *block; 3587 struct btrfs_root *root; 3588 struct btrfs_file_extent_item *fi; 3589 struct rb_node *rb_node; 3590 struct btrfs_key key; 3591 u64 ref_root; 3592 u64 ref_objectid; 3593 u64 ref_offset; 3594 u32 ref_count; 3595 u32 nritems; 3596 int err = 0; 3597 int added = 0; 3598 int counted; 3599 int ret; 3600 3601 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3602 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3603 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3604 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3605 3606 /* 3607 * This is an extent belonging to the free space cache, lets just delete 3608 * it and redo the search. 3609 */ 3610 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3611 ret = delete_block_group_cache(fs_info, rc->block_group, 3612 NULL, ref_objectid); 3613 if (ret != -ENOENT) 3614 return ret; 3615 ret = 0; 3616 } 3617 3618 path = btrfs_alloc_path(); 3619 if (!path) 3620 return -ENOMEM; 3621 path->reada = READA_FORWARD; 3622 3623 root = read_fs_root(fs_info, ref_root); 3624 if (IS_ERR(root)) { 3625 err = PTR_ERR(root); 3626 goto out; 3627 } 3628 3629 key.objectid = ref_objectid; 3630 key.type = BTRFS_EXTENT_DATA_KEY; 3631 if (ref_offset > ((u64)-1 << 32)) 3632 key.offset = 0; 3633 else 3634 key.offset = ref_offset; 3635 3636 path->search_commit_root = 1; 3637 path->skip_locking = 1; 3638 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3639 if (ret < 0) { 3640 err = ret; 3641 goto out; 3642 } 3643 3644 leaf = path->nodes[0]; 3645 nritems = btrfs_header_nritems(leaf); 3646 /* 3647 * the references in tree blocks that use full backrefs 3648 * are not counted in 3649 */ 3650 if (block_use_full_backref(rc, leaf)) 3651 counted = 0; 3652 else 3653 counted = 1; 3654 rb_node = tree_search(blocks, leaf->start); 3655 if (rb_node) { 3656 if (counted) 3657 added = 1; 3658 else 3659 path->slots[0] = nritems; 3660 } 3661 3662 while (ref_count > 0) { 3663 while (path->slots[0] >= nritems) { 3664 ret = btrfs_next_leaf(root, path); 3665 if (ret < 0) { 3666 err = ret; 3667 goto out; 3668 } 3669 if (WARN_ON(ret > 0)) 3670 goto out; 3671 3672 leaf = path->nodes[0]; 3673 nritems = btrfs_header_nritems(leaf); 3674 added = 0; 3675 3676 if (block_use_full_backref(rc, leaf)) 3677 counted = 0; 3678 else 3679 counted = 1; 3680 rb_node = tree_search(blocks, leaf->start); 3681 if (rb_node) { 3682 if (counted) 3683 added = 1; 3684 else 3685 path->slots[0] = nritems; 3686 } 3687 } 3688 3689 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3690 if (WARN_ON(key.objectid != ref_objectid || 3691 key.type != BTRFS_EXTENT_DATA_KEY)) 3692 break; 3693 3694 fi = btrfs_item_ptr(leaf, path->slots[0], 3695 struct btrfs_file_extent_item); 3696 3697 if (btrfs_file_extent_type(leaf, fi) == 3698 BTRFS_FILE_EXTENT_INLINE) 3699 goto next; 3700 3701 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3702 extent_key->objectid) 3703 goto next; 3704 3705 key.offset -= btrfs_file_extent_offset(leaf, fi); 3706 if (key.offset != ref_offset) 3707 goto next; 3708 3709 if (counted) 3710 ref_count--; 3711 if (added) 3712 goto next; 3713 3714 if (!tree_block_processed(leaf->start, rc)) { 3715 block = kmalloc(sizeof(*block), GFP_NOFS); 3716 if (!block) { 3717 err = -ENOMEM; 3718 break; 3719 } 3720 block->bytenr = leaf->start; 3721 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3722 block->level = 0; 3723 block->key_ready = 1; 3724 rb_node = tree_insert(blocks, block->bytenr, 3725 &block->rb_node); 3726 if (rb_node) 3727 backref_tree_panic(rb_node, -EEXIST, 3728 block->bytenr); 3729 } 3730 if (counted) 3731 added = 1; 3732 else 3733 path->slots[0] = nritems; 3734 next: 3735 path->slots[0]++; 3736 3737 } 3738 out: 3739 btrfs_free_path(path); 3740 return err; 3741 } 3742 3743 /* 3744 * helper to find all tree blocks that reference a given data extent 3745 */ 3746 static noinline_for_stack 3747 int add_data_references(struct reloc_control *rc, 3748 struct btrfs_key *extent_key, 3749 struct btrfs_path *path, 3750 struct rb_root *blocks) 3751 { 3752 struct btrfs_key key; 3753 struct extent_buffer *eb; 3754 struct btrfs_extent_data_ref *dref; 3755 struct btrfs_extent_inline_ref *iref; 3756 unsigned long ptr; 3757 unsigned long end; 3758 u32 blocksize = rc->extent_root->fs_info->nodesize; 3759 int ret = 0; 3760 int err = 0; 3761 3762 eb = path->nodes[0]; 3763 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3764 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3765 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3766 if (ptr + sizeof(struct btrfs_extent_item_v0) == end) 3767 ptr = end; 3768 else 3769 #endif 3770 ptr += sizeof(struct btrfs_extent_item); 3771 3772 while (ptr < end) { 3773 iref = (struct btrfs_extent_inline_ref *)ptr; 3774 key.type = btrfs_get_extent_inline_ref_type(eb, iref, 3775 BTRFS_REF_TYPE_DATA); 3776 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3777 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3778 ret = __add_tree_block(rc, key.offset, blocksize, 3779 blocks); 3780 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3781 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3782 ret = find_data_references(rc, extent_key, 3783 eb, dref, blocks); 3784 } else { 3785 ret = -EINVAL; 3786 btrfs_err(rc->extent_root->fs_info, 3787 "extent %llu slot %d has an invalid inline ref type", 3788 eb->start, path->slots[0]); 3789 } 3790 if (ret) { 3791 err = ret; 3792 goto out; 3793 } 3794 ptr += btrfs_extent_inline_ref_size(key.type); 3795 } 3796 WARN_ON(ptr > end); 3797 3798 while (1) { 3799 cond_resched(); 3800 eb = path->nodes[0]; 3801 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3802 ret = btrfs_next_leaf(rc->extent_root, path); 3803 if (ret < 0) { 3804 err = ret; 3805 break; 3806 } 3807 if (ret > 0) 3808 break; 3809 eb = path->nodes[0]; 3810 } 3811 3812 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3813 if (key.objectid != extent_key->objectid) 3814 break; 3815 3816 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3817 if (key.type == BTRFS_SHARED_DATA_REF_KEY || 3818 key.type == BTRFS_EXTENT_REF_V0_KEY) { 3819 #else 3820 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 3821 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3822 #endif 3823 ret = __add_tree_block(rc, key.offset, blocksize, 3824 blocks); 3825 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3826 dref = btrfs_item_ptr(eb, path->slots[0], 3827 struct btrfs_extent_data_ref); 3828 ret = find_data_references(rc, extent_key, 3829 eb, dref, blocks); 3830 } else { 3831 ret = 0; 3832 } 3833 if (ret) { 3834 err = ret; 3835 break; 3836 } 3837 path->slots[0]++; 3838 } 3839 out: 3840 btrfs_release_path(path); 3841 if (err) 3842 free_block_list(blocks); 3843 return err; 3844 } 3845 3846 /* 3847 * helper to find next unprocessed extent 3848 */ 3849 static noinline_for_stack 3850 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3851 struct btrfs_key *extent_key) 3852 { 3853 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3854 struct btrfs_key key; 3855 struct extent_buffer *leaf; 3856 u64 start, end, last; 3857 int ret; 3858 3859 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3860 while (1) { 3861 cond_resched(); 3862 if (rc->search_start >= last) { 3863 ret = 1; 3864 break; 3865 } 3866 3867 key.objectid = rc->search_start; 3868 key.type = BTRFS_EXTENT_ITEM_KEY; 3869 key.offset = 0; 3870 3871 path->search_commit_root = 1; 3872 path->skip_locking = 1; 3873 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3874 0, 0); 3875 if (ret < 0) 3876 break; 3877 next: 3878 leaf = path->nodes[0]; 3879 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3880 ret = btrfs_next_leaf(rc->extent_root, path); 3881 if (ret != 0) 3882 break; 3883 leaf = path->nodes[0]; 3884 } 3885 3886 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3887 if (key.objectid >= last) { 3888 ret = 1; 3889 break; 3890 } 3891 3892 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3893 key.type != BTRFS_METADATA_ITEM_KEY) { 3894 path->slots[0]++; 3895 goto next; 3896 } 3897 3898 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3899 key.objectid + key.offset <= rc->search_start) { 3900 path->slots[0]++; 3901 goto next; 3902 } 3903 3904 if (key.type == BTRFS_METADATA_ITEM_KEY && 3905 key.objectid + fs_info->nodesize <= 3906 rc->search_start) { 3907 path->slots[0]++; 3908 goto next; 3909 } 3910 3911 ret = find_first_extent_bit(&rc->processed_blocks, 3912 key.objectid, &start, &end, 3913 EXTENT_DIRTY, NULL); 3914 3915 if (ret == 0 && start <= key.objectid) { 3916 btrfs_release_path(path); 3917 rc->search_start = end + 1; 3918 } else { 3919 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3920 rc->search_start = key.objectid + key.offset; 3921 else 3922 rc->search_start = key.objectid + 3923 fs_info->nodesize; 3924 memcpy(extent_key, &key, sizeof(key)); 3925 return 0; 3926 } 3927 } 3928 btrfs_release_path(path); 3929 return ret; 3930 } 3931 3932 static void set_reloc_control(struct reloc_control *rc) 3933 { 3934 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3935 3936 mutex_lock(&fs_info->reloc_mutex); 3937 fs_info->reloc_ctl = rc; 3938 mutex_unlock(&fs_info->reloc_mutex); 3939 } 3940 3941 static void unset_reloc_control(struct reloc_control *rc) 3942 { 3943 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3944 3945 mutex_lock(&fs_info->reloc_mutex); 3946 fs_info->reloc_ctl = NULL; 3947 mutex_unlock(&fs_info->reloc_mutex); 3948 } 3949 3950 static int check_extent_flags(u64 flags) 3951 { 3952 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3953 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3954 return 1; 3955 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3956 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3957 return 1; 3958 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3959 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3960 return 1; 3961 return 0; 3962 } 3963 3964 static noinline_for_stack 3965 int prepare_to_relocate(struct reloc_control *rc) 3966 { 3967 struct btrfs_trans_handle *trans; 3968 int ret; 3969 3970 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, 3971 BTRFS_BLOCK_RSV_TEMP); 3972 if (!rc->block_rsv) 3973 return -ENOMEM; 3974 3975 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3976 rc->search_start = rc->block_group->key.objectid; 3977 rc->extents_found = 0; 3978 rc->nodes_relocated = 0; 3979 rc->merging_rsv_size = 0; 3980 rc->reserved_bytes = 0; 3981 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * 3982 RELOCATION_RESERVED_NODES; 3983 ret = btrfs_block_rsv_refill(rc->extent_root, 3984 rc->block_rsv, rc->block_rsv->size, 3985 BTRFS_RESERVE_FLUSH_ALL); 3986 if (ret) 3987 return ret; 3988 3989 rc->create_reloc_tree = 1; 3990 set_reloc_control(rc); 3991 3992 trans = btrfs_join_transaction(rc->extent_root); 3993 if (IS_ERR(trans)) { 3994 unset_reloc_control(rc); 3995 /* 3996 * extent tree is not a ref_cow tree and has no reloc_root to 3997 * cleanup. And callers are responsible to free the above 3998 * block rsv. 3999 */ 4000 return PTR_ERR(trans); 4001 } 4002 btrfs_commit_transaction(trans); 4003 return 0; 4004 } 4005 4006 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 4007 { 4008 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 4009 struct rb_root blocks = RB_ROOT; 4010 struct btrfs_key key; 4011 struct btrfs_trans_handle *trans = NULL; 4012 struct btrfs_path *path; 4013 struct btrfs_extent_item *ei; 4014 u64 flags; 4015 u32 item_size; 4016 int ret; 4017 int err = 0; 4018 int progress = 0; 4019 4020 path = btrfs_alloc_path(); 4021 if (!path) 4022 return -ENOMEM; 4023 path->reada = READA_FORWARD; 4024 4025 ret = prepare_to_relocate(rc); 4026 if (ret) { 4027 err = ret; 4028 goto out_free; 4029 } 4030 4031 while (1) { 4032 rc->reserved_bytes = 0; 4033 ret = btrfs_block_rsv_refill(rc->extent_root, 4034 rc->block_rsv, rc->block_rsv->size, 4035 BTRFS_RESERVE_FLUSH_ALL); 4036 if (ret) { 4037 err = ret; 4038 break; 4039 } 4040 progress++; 4041 trans = btrfs_start_transaction(rc->extent_root, 0); 4042 if (IS_ERR(trans)) { 4043 err = PTR_ERR(trans); 4044 trans = NULL; 4045 break; 4046 } 4047 restart: 4048 if (update_backref_cache(trans, &rc->backref_cache)) { 4049 btrfs_end_transaction(trans); 4050 continue; 4051 } 4052 4053 ret = find_next_extent(rc, path, &key); 4054 if (ret < 0) 4055 err = ret; 4056 if (ret != 0) 4057 break; 4058 4059 rc->extents_found++; 4060 4061 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 4062 struct btrfs_extent_item); 4063 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 4064 if (item_size >= sizeof(*ei)) { 4065 flags = btrfs_extent_flags(path->nodes[0], ei); 4066 ret = check_extent_flags(flags); 4067 BUG_ON(ret); 4068 4069 } else { 4070 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 4071 u64 ref_owner; 4072 int path_change = 0; 4073 4074 BUG_ON(item_size != 4075 sizeof(struct btrfs_extent_item_v0)); 4076 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, 4077 &path_change); 4078 if (ret < 0) { 4079 err = ret; 4080 break; 4081 } 4082 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID) 4083 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK; 4084 else 4085 flags = BTRFS_EXTENT_FLAG_DATA; 4086 4087 if (path_change) { 4088 btrfs_release_path(path); 4089 4090 path->search_commit_root = 1; 4091 path->skip_locking = 1; 4092 ret = btrfs_search_slot(NULL, rc->extent_root, 4093 &key, path, 0, 0); 4094 if (ret < 0) { 4095 err = ret; 4096 break; 4097 } 4098 BUG_ON(ret > 0); 4099 } 4100 #else 4101 BUG(); 4102 #endif 4103 } 4104 4105 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 4106 ret = add_tree_block(rc, &key, path, &blocks); 4107 } else if (rc->stage == UPDATE_DATA_PTRS && 4108 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4109 ret = add_data_references(rc, &key, path, &blocks); 4110 } else { 4111 btrfs_release_path(path); 4112 ret = 0; 4113 } 4114 if (ret < 0) { 4115 err = ret; 4116 break; 4117 } 4118 4119 if (!RB_EMPTY_ROOT(&blocks)) { 4120 ret = relocate_tree_blocks(trans, rc, &blocks); 4121 if (ret < 0) { 4122 /* 4123 * if we fail to relocate tree blocks, force to update 4124 * backref cache when committing transaction. 4125 */ 4126 rc->backref_cache.last_trans = trans->transid - 1; 4127 4128 if (ret != -EAGAIN) { 4129 err = ret; 4130 break; 4131 } 4132 rc->extents_found--; 4133 rc->search_start = key.objectid; 4134 } 4135 } 4136 4137 btrfs_end_transaction_throttle(trans); 4138 btrfs_btree_balance_dirty(fs_info); 4139 trans = NULL; 4140 4141 if (rc->stage == MOVE_DATA_EXTENTS && 4142 (flags & BTRFS_EXTENT_FLAG_DATA)) { 4143 rc->found_file_extent = 1; 4144 ret = relocate_data_extent(rc->data_inode, 4145 &key, &rc->cluster); 4146 if (ret < 0) { 4147 err = ret; 4148 break; 4149 } 4150 } 4151 } 4152 if (trans && progress && err == -ENOSPC) { 4153 ret = btrfs_force_chunk_alloc(trans, fs_info, 4154 rc->block_group->flags); 4155 if (ret == 1) { 4156 err = 0; 4157 progress = 0; 4158 goto restart; 4159 } 4160 } 4161 4162 btrfs_release_path(path); 4163 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); 4164 4165 if (trans) { 4166 btrfs_end_transaction_throttle(trans); 4167 btrfs_btree_balance_dirty(fs_info); 4168 } 4169 4170 if (!err) { 4171 ret = relocate_file_extent_cluster(rc->data_inode, 4172 &rc->cluster); 4173 if (ret < 0) 4174 err = ret; 4175 } 4176 4177 rc->create_reloc_tree = 0; 4178 set_reloc_control(rc); 4179 4180 backref_cache_cleanup(&rc->backref_cache); 4181 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1); 4182 4183 err = prepare_to_merge(rc, err); 4184 4185 merge_reloc_roots(rc); 4186 4187 rc->merge_reloc_tree = 0; 4188 unset_reloc_control(rc); 4189 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1); 4190 4191 /* get rid of pinned extents */ 4192 trans = btrfs_join_transaction(rc->extent_root); 4193 if (IS_ERR(trans)) { 4194 err = PTR_ERR(trans); 4195 goto out_free; 4196 } 4197 btrfs_commit_transaction(trans); 4198 out_free: 4199 btrfs_free_block_rsv(fs_info, rc->block_rsv); 4200 btrfs_free_path(path); 4201 return err; 4202 } 4203 4204 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 4205 struct btrfs_root *root, u64 objectid) 4206 { 4207 struct btrfs_path *path; 4208 struct btrfs_inode_item *item; 4209 struct extent_buffer *leaf; 4210 int ret; 4211 4212 path = btrfs_alloc_path(); 4213 if (!path) 4214 return -ENOMEM; 4215 4216 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 4217 if (ret) 4218 goto out; 4219 4220 leaf = path->nodes[0]; 4221 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 4222 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 4223 btrfs_set_inode_generation(leaf, item, 1); 4224 btrfs_set_inode_size(leaf, item, 0); 4225 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 4226 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 4227 BTRFS_INODE_PREALLOC); 4228 btrfs_mark_buffer_dirty(leaf); 4229 out: 4230 btrfs_free_path(path); 4231 return ret; 4232 } 4233 4234 /* 4235 * helper to create inode for data relocation. 4236 * the inode is in data relocation tree and its link count is 0 4237 */ 4238 static noinline_for_stack 4239 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 4240 struct btrfs_block_group_cache *group) 4241 { 4242 struct inode *inode = NULL; 4243 struct btrfs_trans_handle *trans; 4244 struct btrfs_root *root; 4245 struct btrfs_key key; 4246 u64 objectid; 4247 int err = 0; 4248 4249 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4250 if (IS_ERR(root)) 4251 return ERR_CAST(root); 4252 4253 trans = btrfs_start_transaction(root, 6); 4254 if (IS_ERR(trans)) 4255 return ERR_CAST(trans); 4256 4257 err = btrfs_find_free_objectid(root, &objectid); 4258 if (err) 4259 goto out; 4260 4261 err = __insert_orphan_inode(trans, root, objectid); 4262 BUG_ON(err); 4263 4264 key.objectid = objectid; 4265 key.type = BTRFS_INODE_ITEM_KEY; 4266 key.offset = 0; 4267 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4268 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 4269 BTRFS_I(inode)->index_cnt = group->key.objectid; 4270 4271 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4272 out: 4273 btrfs_end_transaction(trans); 4274 btrfs_btree_balance_dirty(fs_info); 4275 if (err) { 4276 if (inode) 4277 iput(inode); 4278 inode = ERR_PTR(err); 4279 } 4280 return inode; 4281 } 4282 4283 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 4284 { 4285 struct reloc_control *rc; 4286 4287 rc = kzalloc(sizeof(*rc), GFP_NOFS); 4288 if (!rc) 4289 return NULL; 4290 4291 INIT_LIST_HEAD(&rc->reloc_roots); 4292 backref_cache_init(&rc->backref_cache); 4293 mapping_tree_init(&rc->reloc_root_tree); 4294 extent_io_tree_init(&rc->processed_blocks, NULL); 4295 return rc; 4296 } 4297 4298 /* 4299 * Print the block group being relocated 4300 */ 4301 static void describe_relocation(struct btrfs_fs_info *fs_info, 4302 struct btrfs_block_group_cache *block_group) 4303 { 4304 char buf[128]; /* prefixed by a '|' that'll be dropped */ 4305 u64 flags = block_group->flags; 4306 4307 /* Shouldn't happen */ 4308 if (!flags) { 4309 strcpy(buf, "|NONE"); 4310 } else { 4311 char *bp = buf; 4312 4313 #define DESCRIBE_FLAG(f, d) \ 4314 if (flags & BTRFS_BLOCK_GROUP_##f) { \ 4315 bp += snprintf(bp, buf - bp + sizeof(buf), "|%s", d); \ 4316 flags &= ~BTRFS_BLOCK_GROUP_##f; \ 4317 } 4318 DESCRIBE_FLAG(DATA, "data"); 4319 DESCRIBE_FLAG(SYSTEM, "system"); 4320 DESCRIBE_FLAG(METADATA, "metadata"); 4321 DESCRIBE_FLAG(RAID0, "raid0"); 4322 DESCRIBE_FLAG(RAID1, "raid1"); 4323 DESCRIBE_FLAG(DUP, "dup"); 4324 DESCRIBE_FLAG(RAID10, "raid10"); 4325 DESCRIBE_FLAG(RAID5, "raid5"); 4326 DESCRIBE_FLAG(RAID6, "raid6"); 4327 if (flags) 4328 snprintf(buf, buf - bp + sizeof(buf), "|0x%llx", flags); 4329 #undef DESCRIBE_FLAG 4330 } 4331 4332 btrfs_info(fs_info, 4333 "relocating block group %llu flags %s", 4334 block_group->key.objectid, buf + 1); 4335 } 4336 4337 /* 4338 * function to relocate all extents in a block group. 4339 */ 4340 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) 4341 { 4342 struct btrfs_root *extent_root = fs_info->extent_root; 4343 struct reloc_control *rc; 4344 struct inode *inode; 4345 struct btrfs_path *path; 4346 int ret; 4347 int rw = 0; 4348 int err = 0; 4349 4350 rc = alloc_reloc_control(fs_info); 4351 if (!rc) 4352 return -ENOMEM; 4353 4354 rc->extent_root = extent_root; 4355 4356 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 4357 BUG_ON(!rc->block_group); 4358 4359 ret = btrfs_inc_block_group_ro(fs_info, rc->block_group); 4360 if (ret) { 4361 err = ret; 4362 goto out; 4363 } 4364 rw = 1; 4365 4366 path = btrfs_alloc_path(); 4367 if (!path) { 4368 err = -ENOMEM; 4369 goto out; 4370 } 4371 4372 inode = lookup_free_space_inode(fs_info, rc->block_group, path); 4373 btrfs_free_path(path); 4374 4375 if (!IS_ERR(inode)) 4376 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0); 4377 else 4378 ret = PTR_ERR(inode); 4379 4380 if (ret && ret != -ENOENT) { 4381 err = ret; 4382 goto out; 4383 } 4384 4385 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4386 if (IS_ERR(rc->data_inode)) { 4387 err = PTR_ERR(rc->data_inode); 4388 rc->data_inode = NULL; 4389 goto out; 4390 } 4391 4392 describe_relocation(fs_info, rc->block_group); 4393 4394 btrfs_wait_block_group_reservations(rc->block_group); 4395 btrfs_wait_nocow_writers(rc->block_group); 4396 btrfs_wait_ordered_roots(fs_info, U64_MAX, 4397 rc->block_group->key.objectid, 4398 rc->block_group->key.offset); 4399 4400 while (1) { 4401 mutex_lock(&fs_info->cleaner_mutex); 4402 ret = relocate_block_group(rc); 4403 mutex_unlock(&fs_info->cleaner_mutex); 4404 if (ret < 0) { 4405 err = ret; 4406 goto out; 4407 } 4408 4409 if (rc->extents_found == 0) 4410 break; 4411 4412 btrfs_info(fs_info, "found %llu extents", rc->extents_found); 4413 4414 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4415 ret = btrfs_wait_ordered_range(rc->data_inode, 0, 4416 (u64)-1); 4417 if (ret) { 4418 err = ret; 4419 goto out; 4420 } 4421 invalidate_mapping_pages(rc->data_inode->i_mapping, 4422 0, -1); 4423 rc->stage = UPDATE_DATA_PTRS; 4424 } 4425 } 4426 4427 WARN_ON(rc->block_group->pinned > 0); 4428 WARN_ON(rc->block_group->reserved > 0); 4429 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4430 out: 4431 if (err && rw) 4432 btrfs_dec_block_group_ro(rc->block_group); 4433 iput(rc->data_inode); 4434 btrfs_put_block_group(rc->block_group); 4435 kfree(rc); 4436 return err; 4437 } 4438 4439 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4440 { 4441 struct btrfs_fs_info *fs_info = root->fs_info; 4442 struct btrfs_trans_handle *trans; 4443 int ret, err; 4444 4445 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4446 if (IS_ERR(trans)) 4447 return PTR_ERR(trans); 4448 4449 memset(&root->root_item.drop_progress, 0, 4450 sizeof(root->root_item.drop_progress)); 4451 root->root_item.drop_level = 0; 4452 btrfs_set_root_refs(&root->root_item, 0); 4453 ret = btrfs_update_root(trans, fs_info->tree_root, 4454 &root->root_key, &root->root_item); 4455 4456 err = btrfs_end_transaction(trans); 4457 if (err) 4458 return err; 4459 return ret; 4460 } 4461 4462 /* 4463 * recover relocation interrupted by system crash. 4464 * 4465 * this function resumes merging reloc trees with corresponding fs trees. 4466 * this is important for keeping the sharing of tree blocks 4467 */ 4468 int btrfs_recover_relocation(struct btrfs_root *root) 4469 { 4470 struct btrfs_fs_info *fs_info = root->fs_info; 4471 LIST_HEAD(reloc_roots); 4472 struct btrfs_key key; 4473 struct btrfs_root *fs_root; 4474 struct btrfs_root *reloc_root; 4475 struct btrfs_path *path; 4476 struct extent_buffer *leaf; 4477 struct reloc_control *rc = NULL; 4478 struct btrfs_trans_handle *trans; 4479 int ret; 4480 int err = 0; 4481 4482 path = btrfs_alloc_path(); 4483 if (!path) 4484 return -ENOMEM; 4485 path->reada = READA_BACK; 4486 4487 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4488 key.type = BTRFS_ROOT_ITEM_KEY; 4489 key.offset = (u64)-1; 4490 4491 while (1) { 4492 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, 4493 path, 0, 0); 4494 if (ret < 0) { 4495 err = ret; 4496 goto out; 4497 } 4498 if (ret > 0) { 4499 if (path->slots[0] == 0) 4500 break; 4501 path->slots[0]--; 4502 } 4503 leaf = path->nodes[0]; 4504 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4505 btrfs_release_path(path); 4506 4507 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4508 key.type != BTRFS_ROOT_ITEM_KEY) 4509 break; 4510 4511 reloc_root = btrfs_read_fs_root(root, &key); 4512 if (IS_ERR(reloc_root)) { 4513 err = PTR_ERR(reloc_root); 4514 goto out; 4515 } 4516 4517 list_add(&reloc_root->root_list, &reloc_roots); 4518 4519 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4520 fs_root = read_fs_root(fs_info, 4521 reloc_root->root_key.offset); 4522 if (IS_ERR(fs_root)) { 4523 ret = PTR_ERR(fs_root); 4524 if (ret != -ENOENT) { 4525 err = ret; 4526 goto out; 4527 } 4528 ret = mark_garbage_root(reloc_root); 4529 if (ret < 0) { 4530 err = ret; 4531 goto out; 4532 } 4533 } 4534 } 4535 4536 if (key.offset == 0) 4537 break; 4538 4539 key.offset--; 4540 } 4541 btrfs_release_path(path); 4542 4543 if (list_empty(&reloc_roots)) 4544 goto out; 4545 4546 rc = alloc_reloc_control(fs_info); 4547 if (!rc) { 4548 err = -ENOMEM; 4549 goto out; 4550 } 4551 4552 rc->extent_root = fs_info->extent_root; 4553 4554 set_reloc_control(rc); 4555 4556 trans = btrfs_join_transaction(rc->extent_root); 4557 if (IS_ERR(trans)) { 4558 unset_reloc_control(rc); 4559 err = PTR_ERR(trans); 4560 goto out_free; 4561 } 4562 4563 rc->merge_reloc_tree = 1; 4564 4565 while (!list_empty(&reloc_roots)) { 4566 reloc_root = list_entry(reloc_roots.next, 4567 struct btrfs_root, root_list); 4568 list_del(&reloc_root->root_list); 4569 4570 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4571 list_add_tail(&reloc_root->root_list, 4572 &rc->reloc_roots); 4573 continue; 4574 } 4575 4576 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset); 4577 if (IS_ERR(fs_root)) { 4578 err = PTR_ERR(fs_root); 4579 goto out_free; 4580 } 4581 4582 err = __add_reloc_root(reloc_root); 4583 BUG_ON(err < 0); /* -ENOMEM or logic error */ 4584 fs_root->reloc_root = reloc_root; 4585 } 4586 4587 err = btrfs_commit_transaction(trans); 4588 if (err) 4589 goto out_free; 4590 4591 merge_reloc_roots(rc); 4592 4593 unset_reloc_control(rc); 4594 4595 trans = btrfs_join_transaction(rc->extent_root); 4596 if (IS_ERR(trans)) { 4597 err = PTR_ERR(trans); 4598 goto out_free; 4599 } 4600 err = btrfs_commit_transaction(trans); 4601 out_free: 4602 kfree(rc); 4603 out: 4604 if (!list_empty(&reloc_roots)) 4605 free_reloc_roots(&reloc_roots); 4606 4607 btrfs_free_path(path); 4608 4609 if (err == 0) { 4610 /* cleanup orphan inode in data relocation tree */ 4611 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 4612 if (IS_ERR(fs_root)) 4613 err = PTR_ERR(fs_root); 4614 else 4615 err = btrfs_orphan_cleanup(fs_root); 4616 } 4617 return err; 4618 } 4619 4620 /* 4621 * helper to add ordered checksum for data relocation. 4622 * 4623 * cloning checksum properly handles the nodatasum extents. 4624 * it also saves CPU time to re-calculate the checksum. 4625 */ 4626 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4627 { 4628 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4629 struct btrfs_ordered_sum *sums; 4630 struct btrfs_ordered_extent *ordered; 4631 int ret; 4632 u64 disk_bytenr; 4633 u64 new_bytenr; 4634 LIST_HEAD(list); 4635 4636 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4637 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4638 4639 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4640 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr, 4641 disk_bytenr + len - 1, &list, 0); 4642 if (ret) 4643 goto out; 4644 4645 while (!list_empty(&list)) { 4646 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4647 list_del_init(&sums->list); 4648 4649 /* 4650 * We need to offset the new_bytenr based on where the csum is. 4651 * We need to do this because we will read in entire prealloc 4652 * extents but we may have written to say the middle of the 4653 * prealloc extent, so we need to make sure the csum goes with 4654 * the right disk offset. 4655 * 4656 * We can do this because the data reloc inode refers strictly 4657 * to the on disk bytes, so we don't have to worry about 4658 * disk_len vs real len like with real inodes since it's all 4659 * disk length. 4660 */ 4661 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr); 4662 sums->bytenr = new_bytenr; 4663 4664 btrfs_add_ordered_sum(inode, ordered, sums); 4665 } 4666 out: 4667 btrfs_put_ordered_extent(ordered); 4668 return ret; 4669 } 4670 4671 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4672 struct btrfs_root *root, struct extent_buffer *buf, 4673 struct extent_buffer *cow) 4674 { 4675 struct btrfs_fs_info *fs_info = root->fs_info; 4676 struct reloc_control *rc; 4677 struct backref_node *node; 4678 int first_cow = 0; 4679 int level; 4680 int ret = 0; 4681 4682 rc = fs_info->reloc_ctl; 4683 if (!rc) 4684 return 0; 4685 4686 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4687 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4688 4689 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 4690 if (buf == root->node) 4691 __update_reloc_root(root, cow->start); 4692 } 4693 4694 level = btrfs_header_level(buf); 4695 if (btrfs_header_generation(buf) <= 4696 btrfs_root_last_snapshot(&root->root_item)) 4697 first_cow = 1; 4698 4699 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4700 rc->create_reloc_tree) { 4701 WARN_ON(!first_cow && level == 0); 4702 4703 node = rc->backref_cache.path[level]; 4704 BUG_ON(node->bytenr != buf->start && 4705 node->new_bytenr != buf->start); 4706 4707 drop_node_buffer(node); 4708 extent_buffer_get(cow); 4709 node->eb = cow; 4710 node->new_bytenr = cow->start; 4711 4712 if (!node->pending) { 4713 list_move_tail(&node->list, 4714 &rc->backref_cache.pending[level]); 4715 node->pending = 1; 4716 } 4717 4718 if (first_cow) 4719 __mark_block_processed(rc, node); 4720 4721 if (first_cow && level > 0) 4722 rc->nodes_relocated += buf->len; 4723 } 4724 4725 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4726 ret = replace_file_extents(trans, rc, root, cow); 4727 return ret; 4728 } 4729 4730 /* 4731 * called before creating snapshot. it calculates metadata reservation 4732 * required for relocating tree blocks in the snapshot 4733 */ 4734 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4735 u64 *bytes_to_reserve) 4736 { 4737 struct btrfs_root *root; 4738 struct reloc_control *rc; 4739 4740 root = pending->root; 4741 if (!root->reloc_root) 4742 return; 4743 4744 rc = root->fs_info->reloc_ctl; 4745 if (!rc->merge_reloc_tree) 4746 return; 4747 4748 root = root->reloc_root; 4749 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4750 /* 4751 * relocation is in the stage of merging trees. the space 4752 * used by merging a reloc tree is twice the size of 4753 * relocated tree nodes in the worst case. half for cowing 4754 * the reloc tree, half for cowing the fs tree. the space 4755 * used by cowing the reloc tree will be freed after the 4756 * tree is dropped. if we create snapshot, cowing the fs 4757 * tree may use more space than it frees. so we need 4758 * reserve extra space. 4759 */ 4760 *bytes_to_reserve += rc->nodes_relocated; 4761 } 4762 4763 /* 4764 * called after snapshot is created. migrate block reservation 4765 * and create reloc root for the newly created snapshot 4766 */ 4767 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4768 struct btrfs_pending_snapshot *pending) 4769 { 4770 struct btrfs_root *root = pending->root; 4771 struct btrfs_root *reloc_root; 4772 struct btrfs_root *new_root; 4773 struct reloc_control *rc; 4774 int ret; 4775 4776 if (!root->reloc_root) 4777 return 0; 4778 4779 rc = root->fs_info->reloc_ctl; 4780 rc->merging_rsv_size += rc->nodes_relocated; 4781 4782 if (rc->merge_reloc_tree) { 4783 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4784 rc->block_rsv, 4785 rc->nodes_relocated, 1); 4786 if (ret) 4787 return ret; 4788 } 4789 4790 new_root = pending->snap; 4791 reloc_root = create_reloc_root(trans, root->reloc_root, 4792 new_root->root_key.objectid); 4793 if (IS_ERR(reloc_root)) 4794 return PTR_ERR(reloc_root); 4795 4796 ret = __add_reloc_root(reloc_root); 4797 BUG_ON(ret < 0); 4798 new_root->reloc_root = reloc_root; 4799 4800 if (rc->create_reloc_tree) 4801 ret = clone_backref_node(trans, rc, root, reloc_root); 4802 return ret; 4803 } 4804