1 /* 2 * Copyright (C) 2009 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "volumes.h" 29 #include "locking.h" 30 #include "btrfs_inode.h" 31 #include "async-thread.h" 32 #include "free-space-cache.h" 33 #include "inode-map.h" 34 35 /* 36 * backref_node, mapping_node and tree_block start with this 37 */ 38 struct tree_entry { 39 struct rb_node rb_node; 40 u64 bytenr; 41 }; 42 43 /* 44 * present a tree block in the backref cache 45 */ 46 struct backref_node { 47 struct rb_node rb_node; 48 u64 bytenr; 49 50 u64 new_bytenr; 51 /* objectid of tree block owner, can be not uptodate */ 52 u64 owner; 53 /* link to pending, changed or detached list */ 54 struct list_head list; 55 /* list of upper level blocks reference this block */ 56 struct list_head upper; 57 /* list of child blocks in the cache */ 58 struct list_head lower; 59 /* NULL if this node is not tree root */ 60 struct btrfs_root *root; 61 /* extent buffer got by COW the block */ 62 struct extent_buffer *eb; 63 /* level of tree block */ 64 unsigned int level:8; 65 /* is the block in non-reference counted tree */ 66 unsigned int cowonly:1; 67 /* 1 if no child node in the cache */ 68 unsigned int lowest:1; 69 /* is the extent buffer locked */ 70 unsigned int locked:1; 71 /* has the block been processed */ 72 unsigned int processed:1; 73 /* have backrefs of this block been checked */ 74 unsigned int checked:1; 75 /* 76 * 1 if corresponding block has been cowed but some upper 77 * level block pointers may not point to the new location 78 */ 79 unsigned int pending:1; 80 /* 81 * 1 if the backref node isn't connected to any other 82 * backref node. 83 */ 84 unsigned int detached:1; 85 }; 86 87 /* 88 * present a block pointer in the backref cache 89 */ 90 struct backref_edge { 91 struct list_head list[2]; 92 struct backref_node *node[2]; 93 }; 94 95 #define LOWER 0 96 #define UPPER 1 97 98 struct backref_cache { 99 /* red black tree of all backref nodes in the cache */ 100 struct rb_root rb_root; 101 /* for passing backref nodes to btrfs_reloc_cow_block */ 102 struct backref_node *path[BTRFS_MAX_LEVEL]; 103 /* 104 * list of blocks that have been cowed but some block 105 * pointers in upper level blocks may not reflect the 106 * new location 107 */ 108 struct list_head pending[BTRFS_MAX_LEVEL]; 109 /* list of backref nodes with no child node */ 110 struct list_head leaves; 111 /* list of blocks that have been cowed in current transaction */ 112 struct list_head changed; 113 /* list of detached backref node. */ 114 struct list_head detached; 115 116 u64 last_trans; 117 118 int nr_nodes; 119 int nr_edges; 120 }; 121 122 /* 123 * map address of tree root to tree 124 */ 125 struct mapping_node { 126 struct rb_node rb_node; 127 u64 bytenr; 128 void *data; 129 }; 130 131 struct mapping_tree { 132 struct rb_root rb_root; 133 spinlock_t lock; 134 }; 135 136 /* 137 * present a tree block to process 138 */ 139 struct tree_block { 140 struct rb_node rb_node; 141 u64 bytenr; 142 struct btrfs_key key; 143 unsigned int level:8; 144 unsigned int key_ready:1; 145 }; 146 147 #define MAX_EXTENTS 128 148 149 struct file_extent_cluster { 150 u64 start; 151 u64 end; 152 u64 boundary[MAX_EXTENTS]; 153 unsigned int nr; 154 }; 155 156 struct reloc_control { 157 /* block group to relocate */ 158 struct btrfs_block_group_cache *block_group; 159 /* extent tree */ 160 struct btrfs_root *extent_root; 161 /* inode for moving data */ 162 struct inode *data_inode; 163 164 struct btrfs_block_rsv *block_rsv; 165 166 struct backref_cache backref_cache; 167 168 struct file_extent_cluster cluster; 169 /* tree blocks have been processed */ 170 struct extent_io_tree processed_blocks; 171 /* map start of tree root to corresponding reloc tree */ 172 struct mapping_tree reloc_root_tree; 173 /* list of reloc trees */ 174 struct list_head reloc_roots; 175 /* size of metadata reservation for merging reloc trees */ 176 u64 merging_rsv_size; 177 /* size of relocated tree nodes */ 178 u64 nodes_relocated; 179 180 u64 search_start; 181 u64 extents_found; 182 183 unsigned int stage:8; 184 unsigned int create_reloc_tree:1; 185 unsigned int merge_reloc_tree:1; 186 unsigned int found_file_extent:1; 187 unsigned int commit_transaction:1; 188 }; 189 190 /* stages of data relocation */ 191 #define MOVE_DATA_EXTENTS 0 192 #define UPDATE_DATA_PTRS 1 193 194 static void remove_backref_node(struct backref_cache *cache, 195 struct backref_node *node); 196 static void __mark_block_processed(struct reloc_control *rc, 197 struct backref_node *node); 198 199 static void mapping_tree_init(struct mapping_tree *tree) 200 { 201 tree->rb_root = RB_ROOT; 202 spin_lock_init(&tree->lock); 203 } 204 205 static void backref_cache_init(struct backref_cache *cache) 206 { 207 int i; 208 cache->rb_root = RB_ROOT; 209 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 210 INIT_LIST_HEAD(&cache->pending[i]); 211 INIT_LIST_HEAD(&cache->changed); 212 INIT_LIST_HEAD(&cache->detached); 213 INIT_LIST_HEAD(&cache->leaves); 214 } 215 216 static void backref_cache_cleanup(struct backref_cache *cache) 217 { 218 struct backref_node *node; 219 int i; 220 221 while (!list_empty(&cache->detached)) { 222 node = list_entry(cache->detached.next, 223 struct backref_node, list); 224 remove_backref_node(cache, node); 225 } 226 227 while (!list_empty(&cache->leaves)) { 228 node = list_entry(cache->leaves.next, 229 struct backref_node, lower); 230 remove_backref_node(cache, node); 231 } 232 233 cache->last_trans = 0; 234 235 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 236 BUG_ON(!list_empty(&cache->pending[i])); 237 BUG_ON(!list_empty(&cache->changed)); 238 BUG_ON(!list_empty(&cache->detached)); 239 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root)); 240 BUG_ON(cache->nr_nodes); 241 BUG_ON(cache->nr_edges); 242 } 243 244 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 245 { 246 struct backref_node *node; 247 248 node = kzalloc(sizeof(*node), GFP_NOFS); 249 if (node) { 250 INIT_LIST_HEAD(&node->list); 251 INIT_LIST_HEAD(&node->upper); 252 INIT_LIST_HEAD(&node->lower); 253 RB_CLEAR_NODE(&node->rb_node); 254 cache->nr_nodes++; 255 } 256 return node; 257 } 258 259 static void free_backref_node(struct backref_cache *cache, 260 struct backref_node *node) 261 { 262 if (node) { 263 cache->nr_nodes--; 264 kfree(node); 265 } 266 } 267 268 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 269 { 270 struct backref_edge *edge; 271 272 edge = kzalloc(sizeof(*edge), GFP_NOFS); 273 if (edge) 274 cache->nr_edges++; 275 return edge; 276 } 277 278 static void free_backref_edge(struct backref_cache *cache, 279 struct backref_edge *edge) 280 { 281 if (edge) { 282 cache->nr_edges--; 283 kfree(edge); 284 } 285 } 286 287 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 288 struct rb_node *node) 289 { 290 struct rb_node **p = &root->rb_node; 291 struct rb_node *parent = NULL; 292 struct tree_entry *entry; 293 294 while (*p) { 295 parent = *p; 296 entry = rb_entry(parent, struct tree_entry, rb_node); 297 298 if (bytenr < entry->bytenr) 299 p = &(*p)->rb_left; 300 else if (bytenr > entry->bytenr) 301 p = &(*p)->rb_right; 302 else 303 return parent; 304 } 305 306 rb_link_node(node, parent, p); 307 rb_insert_color(node, root); 308 return NULL; 309 } 310 311 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 312 { 313 struct rb_node *n = root->rb_node; 314 struct tree_entry *entry; 315 316 while (n) { 317 entry = rb_entry(n, struct tree_entry, rb_node); 318 319 if (bytenr < entry->bytenr) 320 n = n->rb_left; 321 else if (bytenr > entry->bytenr) 322 n = n->rb_right; 323 else 324 return n; 325 } 326 return NULL; 327 } 328 329 /* 330 * walk up backref nodes until reach node presents tree root 331 */ 332 static struct backref_node *walk_up_backref(struct backref_node *node, 333 struct backref_edge *edges[], 334 int *index) 335 { 336 struct backref_edge *edge; 337 int idx = *index; 338 339 while (!list_empty(&node->upper)) { 340 edge = list_entry(node->upper.next, 341 struct backref_edge, list[LOWER]); 342 edges[idx++] = edge; 343 node = edge->node[UPPER]; 344 } 345 BUG_ON(node->detached); 346 *index = idx; 347 return node; 348 } 349 350 /* 351 * walk down backref nodes to find start of next reference path 352 */ 353 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 354 int *index) 355 { 356 struct backref_edge *edge; 357 struct backref_node *lower; 358 int idx = *index; 359 360 while (idx > 0) { 361 edge = edges[idx - 1]; 362 lower = edge->node[LOWER]; 363 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 364 idx--; 365 continue; 366 } 367 edge = list_entry(edge->list[LOWER].next, 368 struct backref_edge, list[LOWER]); 369 edges[idx - 1] = edge; 370 *index = idx; 371 return edge->node[UPPER]; 372 } 373 *index = 0; 374 return NULL; 375 } 376 377 static void unlock_node_buffer(struct backref_node *node) 378 { 379 if (node->locked) { 380 btrfs_tree_unlock(node->eb); 381 node->locked = 0; 382 } 383 } 384 385 static void drop_node_buffer(struct backref_node *node) 386 { 387 if (node->eb) { 388 unlock_node_buffer(node); 389 free_extent_buffer(node->eb); 390 node->eb = NULL; 391 } 392 } 393 394 static void drop_backref_node(struct backref_cache *tree, 395 struct backref_node *node) 396 { 397 BUG_ON(!list_empty(&node->upper)); 398 399 drop_node_buffer(node); 400 list_del(&node->list); 401 list_del(&node->lower); 402 if (!RB_EMPTY_NODE(&node->rb_node)) 403 rb_erase(&node->rb_node, &tree->rb_root); 404 free_backref_node(tree, node); 405 } 406 407 /* 408 * remove a backref node from the backref cache 409 */ 410 static void remove_backref_node(struct backref_cache *cache, 411 struct backref_node *node) 412 { 413 struct backref_node *upper; 414 struct backref_edge *edge; 415 416 if (!node) 417 return; 418 419 BUG_ON(!node->lowest && !node->detached); 420 while (!list_empty(&node->upper)) { 421 edge = list_entry(node->upper.next, struct backref_edge, 422 list[LOWER]); 423 upper = edge->node[UPPER]; 424 list_del(&edge->list[LOWER]); 425 list_del(&edge->list[UPPER]); 426 free_backref_edge(cache, edge); 427 428 if (RB_EMPTY_NODE(&upper->rb_node)) { 429 BUG_ON(!list_empty(&node->upper)); 430 drop_backref_node(cache, node); 431 node = upper; 432 node->lowest = 1; 433 continue; 434 } 435 /* 436 * add the node to leaf node list if no other 437 * child block cached. 438 */ 439 if (list_empty(&upper->lower)) { 440 list_add_tail(&upper->lower, &cache->leaves); 441 upper->lowest = 1; 442 } 443 } 444 445 drop_backref_node(cache, node); 446 } 447 448 static void update_backref_node(struct backref_cache *cache, 449 struct backref_node *node, u64 bytenr) 450 { 451 struct rb_node *rb_node; 452 rb_erase(&node->rb_node, &cache->rb_root); 453 node->bytenr = bytenr; 454 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 455 BUG_ON(rb_node); 456 } 457 458 /* 459 * update backref cache after a transaction commit 460 */ 461 static int update_backref_cache(struct btrfs_trans_handle *trans, 462 struct backref_cache *cache) 463 { 464 struct backref_node *node; 465 int level = 0; 466 467 if (cache->last_trans == 0) { 468 cache->last_trans = trans->transid; 469 return 0; 470 } 471 472 if (cache->last_trans == trans->transid) 473 return 0; 474 475 /* 476 * detached nodes are used to avoid unnecessary backref 477 * lookup. transaction commit changes the extent tree. 478 * so the detached nodes are no longer useful. 479 */ 480 while (!list_empty(&cache->detached)) { 481 node = list_entry(cache->detached.next, 482 struct backref_node, list); 483 remove_backref_node(cache, node); 484 } 485 486 while (!list_empty(&cache->changed)) { 487 node = list_entry(cache->changed.next, 488 struct backref_node, list); 489 list_del_init(&node->list); 490 BUG_ON(node->pending); 491 update_backref_node(cache, node, node->new_bytenr); 492 } 493 494 /* 495 * some nodes can be left in the pending list if there were 496 * errors during processing the pending nodes. 497 */ 498 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 499 list_for_each_entry(node, &cache->pending[level], list) { 500 BUG_ON(!node->pending); 501 if (node->bytenr == node->new_bytenr) 502 continue; 503 update_backref_node(cache, node, node->new_bytenr); 504 } 505 } 506 507 cache->last_trans = 0; 508 return 1; 509 } 510 511 512 static int should_ignore_root(struct btrfs_root *root) 513 { 514 struct btrfs_root *reloc_root; 515 516 if (!root->ref_cows) 517 return 0; 518 519 reloc_root = root->reloc_root; 520 if (!reloc_root) 521 return 0; 522 523 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 524 root->fs_info->running_transaction->transid - 1) 525 return 0; 526 /* 527 * if there is reloc tree and it was created in previous 528 * transaction backref lookup can find the reloc tree, 529 * so backref node for the fs tree root is useless for 530 * relocation. 531 */ 532 return 1; 533 } 534 /* 535 * find reloc tree by address of tree root 536 */ 537 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 538 u64 bytenr) 539 { 540 struct rb_node *rb_node; 541 struct mapping_node *node; 542 struct btrfs_root *root = NULL; 543 544 spin_lock(&rc->reloc_root_tree.lock); 545 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 546 if (rb_node) { 547 node = rb_entry(rb_node, struct mapping_node, rb_node); 548 root = (struct btrfs_root *)node->data; 549 } 550 spin_unlock(&rc->reloc_root_tree.lock); 551 return root; 552 } 553 554 static int is_cowonly_root(u64 root_objectid) 555 { 556 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 557 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 558 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 559 root_objectid == BTRFS_DEV_TREE_OBJECTID || 560 root_objectid == BTRFS_TREE_LOG_OBJECTID || 561 root_objectid == BTRFS_CSUM_TREE_OBJECTID) 562 return 1; 563 return 0; 564 } 565 566 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 567 u64 root_objectid) 568 { 569 struct btrfs_key key; 570 571 key.objectid = root_objectid; 572 key.type = BTRFS_ROOT_ITEM_KEY; 573 if (is_cowonly_root(root_objectid)) 574 key.offset = 0; 575 else 576 key.offset = (u64)-1; 577 578 return btrfs_read_fs_root_no_name(fs_info, &key); 579 } 580 581 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 582 static noinline_for_stack 583 struct btrfs_root *find_tree_root(struct reloc_control *rc, 584 struct extent_buffer *leaf, 585 struct btrfs_extent_ref_v0 *ref0) 586 { 587 struct btrfs_root *root; 588 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0); 589 u64 generation = btrfs_ref_generation_v0(leaf, ref0); 590 591 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID); 592 593 root = read_fs_root(rc->extent_root->fs_info, root_objectid); 594 BUG_ON(IS_ERR(root)); 595 596 if (root->ref_cows && 597 generation != btrfs_root_generation(&root->root_item)) 598 return NULL; 599 600 return root; 601 } 602 #endif 603 604 static noinline_for_stack 605 int find_inline_backref(struct extent_buffer *leaf, int slot, 606 unsigned long *ptr, unsigned long *end) 607 { 608 struct btrfs_extent_item *ei; 609 struct btrfs_tree_block_info *bi; 610 u32 item_size; 611 612 item_size = btrfs_item_size_nr(leaf, slot); 613 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 614 if (item_size < sizeof(*ei)) { 615 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 616 return 1; 617 } 618 #endif 619 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 620 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 621 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 622 623 if (item_size <= sizeof(*ei) + sizeof(*bi)) { 624 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 625 return 1; 626 } 627 628 bi = (struct btrfs_tree_block_info *)(ei + 1); 629 *ptr = (unsigned long)(bi + 1); 630 *end = (unsigned long)ei + item_size; 631 return 0; 632 } 633 634 /* 635 * build backref tree for a given tree block. root of the backref tree 636 * corresponds the tree block, leaves of the backref tree correspond 637 * roots of b-trees that reference the tree block. 638 * 639 * the basic idea of this function is check backrefs of a given block 640 * to find upper level blocks that refernece the block, and then check 641 * bakcrefs of these upper level blocks recursively. the recursion stop 642 * when tree root is reached or backrefs for the block is cached. 643 * 644 * NOTE: if we find backrefs for a block are cached, we know backrefs 645 * for all upper level blocks that directly/indirectly reference the 646 * block are also cached. 647 */ 648 static noinline_for_stack 649 struct backref_node *build_backref_tree(struct reloc_control *rc, 650 struct btrfs_key *node_key, 651 int level, u64 bytenr) 652 { 653 struct backref_cache *cache = &rc->backref_cache; 654 struct btrfs_path *path1; 655 struct btrfs_path *path2; 656 struct extent_buffer *eb; 657 struct btrfs_root *root; 658 struct backref_node *cur; 659 struct backref_node *upper; 660 struct backref_node *lower; 661 struct backref_node *node = NULL; 662 struct backref_node *exist = NULL; 663 struct backref_edge *edge; 664 struct rb_node *rb_node; 665 struct btrfs_key key; 666 unsigned long end; 667 unsigned long ptr; 668 LIST_HEAD(list); 669 LIST_HEAD(useless); 670 int cowonly; 671 int ret; 672 int err = 0; 673 674 path1 = btrfs_alloc_path(); 675 path2 = btrfs_alloc_path(); 676 if (!path1 || !path2) { 677 err = -ENOMEM; 678 goto out; 679 } 680 path1->reada = 1; 681 path2->reada = 2; 682 683 node = alloc_backref_node(cache); 684 if (!node) { 685 err = -ENOMEM; 686 goto out; 687 } 688 689 node->bytenr = bytenr; 690 node->level = level; 691 node->lowest = 1; 692 cur = node; 693 again: 694 end = 0; 695 ptr = 0; 696 key.objectid = cur->bytenr; 697 key.type = BTRFS_EXTENT_ITEM_KEY; 698 key.offset = (u64)-1; 699 700 path1->search_commit_root = 1; 701 path1->skip_locking = 1; 702 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 703 0, 0); 704 if (ret < 0) { 705 err = ret; 706 goto out; 707 } 708 BUG_ON(!ret || !path1->slots[0]); 709 710 path1->slots[0]--; 711 712 WARN_ON(cur->checked); 713 if (!list_empty(&cur->upper)) { 714 /* 715 * the backref was added previously when processing 716 * backref of type BTRFS_TREE_BLOCK_REF_KEY 717 */ 718 BUG_ON(!list_is_singular(&cur->upper)); 719 edge = list_entry(cur->upper.next, struct backref_edge, 720 list[LOWER]); 721 BUG_ON(!list_empty(&edge->list[UPPER])); 722 exist = edge->node[UPPER]; 723 /* 724 * add the upper level block to pending list if we need 725 * check its backrefs 726 */ 727 if (!exist->checked) 728 list_add_tail(&edge->list[UPPER], &list); 729 } else { 730 exist = NULL; 731 } 732 733 while (1) { 734 cond_resched(); 735 eb = path1->nodes[0]; 736 737 if (ptr >= end) { 738 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 739 ret = btrfs_next_leaf(rc->extent_root, path1); 740 if (ret < 0) { 741 err = ret; 742 goto out; 743 } 744 if (ret > 0) 745 break; 746 eb = path1->nodes[0]; 747 } 748 749 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 750 if (key.objectid != cur->bytenr) { 751 WARN_ON(exist); 752 break; 753 } 754 755 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 756 ret = find_inline_backref(eb, path1->slots[0], 757 &ptr, &end); 758 if (ret) 759 goto next; 760 } 761 } 762 763 if (ptr < end) { 764 /* update key for inline back ref */ 765 struct btrfs_extent_inline_ref *iref; 766 iref = (struct btrfs_extent_inline_ref *)ptr; 767 key.type = btrfs_extent_inline_ref_type(eb, iref); 768 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 769 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 770 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 771 } 772 773 if (exist && 774 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 775 exist->owner == key.offset) || 776 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 777 exist->bytenr == key.offset))) { 778 exist = NULL; 779 goto next; 780 } 781 782 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 783 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 784 key.type == BTRFS_EXTENT_REF_V0_KEY) { 785 if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 786 struct btrfs_extent_ref_v0 *ref0; 787 ref0 = btrfs_item_ptr(eb, path1->slots[0], 788 struct btrfs_extent_ref_v0); 789 if (key.objectid == key.offset) { 790 root = find_tree_root(rc, eb, ref0); 791 if (root && !should_ignore_root(root)) 792 cur->root = root; 793 else 794 list_add(&cur->list, &useless); 795 break; 796 } 797 if (is_cowonly_root(btrfs_ref_root_v0(eb, 798 ref0))) 799 cur->cowonly = 1; 800 } 801 #else 802 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 803 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 804 #endif 805 if (key.objectid == key.offset) { 806 /* 807 * only root blocks of reloc trees use 808 * backref of this type. 809 */ 810 root = find_reloc_root(rc, cur->bytenr); 811 BUG_ON(!root); 812 cur->root = root; 813 break; 814 } 815 816 edge = alloc_backref_edge(cache); 817 if (!edge) { 818 err = -ENOMEM; 819 goto out; 820 } 821 rb_node = tree_search(&cache->rb_root, key.offset); 822 if (!rb_node) { 823 upper = alloc_backref_node(cache); 824 if (!upper) { 825 free_backref_edge(cache, edge); 826 err = -ENOMEM; 827 goto out; 828 } 829 upper->bytenr = key.offset; 830 upper->level = cur->level + 1; 831 /* 832 * backrefs for the upper level block isn't 833 * cached, add the block to pending list 834 */ 835 list_add_tail(&edge->list[UPPER], &list); 836 } else { 837 upper = rb_entry(rb_node, struct backref_node, 838 rb_node); 839 BUG_ON(!upper->checked); 840 INIT_LIST_HEAD(&edge->list[UPPER]); 841 } 842 list_add_tail(&edge->list[LOWER], &cur->upper); 843 edge->node[LOWER] = cur; 844 edge->node[UPPER] = upper; 845 846 goto next; 847 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 848 goto next; 849 } 850 851 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 852 root = read_fs_root(rc->extent_root->fs_info, key.offset); 853 if (IS_ERR(root)) { 854 err = PTR_ERR(root); 855 goto out; 856 } 857 858 if (!root->ref_cows) 859 cur->cowonly = 1; 860 861 if (btrfs_root_level(&root->root_item) == cur->level) { 862 /* tree root */ 863 BUG_ON(btrfs_root_bytenr(&root->root_item) != 864 cur->bytenr); 865 if (should_ignore_root(root)) 866 list_add(&cur->list, &useless); 867 else 868 cur->root = root; 869 break; 870 } 871 872 level = cur->level + 1; 873 874 /* 875 * searching the tree to find upper level blocks 876 * reference the block. 877 */ 878 path2->search_commit_root = 1; 879 path2->skip_locking = 1; 880 path2->lowest_level = level; 881 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 882 path2->lowest_level = 0; 883 if (ret < 0) { 884 err = ret; 885 goto out; 886 } 887 if (ret > 0 && path2->slots[level] > 0) 888 path2->slots[level]--; 889 890 eb = path2->nodes[level]; 891 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 892 cur->bytenr); 893 894 lower = cur; 895 for (; level < BTRFS_MAX_LEVEL; level++) { 896 if (!path2->nodes[level]) { 897 BUG_ON(btrfs_root_bytenr(&root->root_item) != 898 lower->bytenr); 899 if (should_ignore_root(root)) 900 list_add(&lower->list, &useless); 901 else 902 lower->root = root; 903 break; 904 } 905 906 edge = alloc_backref_edge(cache); 907 if (!edge) { 908 err = -ENOMEM; 909 goto out; 910 } 911 912 eb = path2->nodes[level]; 913 rb_node = tree_search(&cache->rb_root, eb->start); 914 if (!rb_node) { 915 upper = alloc_backref_node(cache); 916 if (!upper) { 917 free_backref_edge(cache, edge); 918 err = -ENOMEM; 919 goto out; 920 } 921 upper->bytenr = eb->start; 922 upper->owner = btrfs_header_owner(eb); 923 upper->level = lower->level + 1; 924 if (!root->ref_cows) 925 upper->cowonly = 1; 926 927 /* 928 * if we know the block isn't shared 929 * we can void checking its backrefs. 930 */ 931 if (btrfs_block_can_be_shared(root, eb)) 932 upper->checked = 0; 933 else 934 upper->checked = 1; 935 936 /* 937 * add the block to pending list if we 938 * need check its backrefs. only block 939 * at 'cur->level + 1' is added to the 940 * tail of pending list. this guarantees 941 * we check backrefs from lower level 942 * blocks to upper level blocks. 943 */ 944 if (!upper->checked && 945 level == cur->level + 1) { 946 list_add_tail(&edge->list[UPPER], 947 &list); 948 } else 949 INIT_LIST_HEAD(&edge->list[UPPER]); 950 } else { 951 upper = rb_entry(rb_node, struct backref_node, 952 rb_node); 953 BUG_ON(!upper->checked); 954 INIT_LIST_HEAD(&edge->list[UPPER]); 955 if (!upper->owner) 956 upper->owner = btrfs_header_owner(eb); 957 } 958 list_add_tail(&edge->list[LOWER], &lower->upper); 959 edge->node[LOWER] = lower; 960 edge->node[UPPER] = upper; 961 962 if (rb_node) 963 break; 964 lower = upper; 965 upper = NULL; 966 } 967 btrfs_release_path(path2); 968 next: 969 if (ptr < end) { 970 ptr += btrfs_extent_inline_ref_size(key.type); 971 if (ptr >= end) { 972 WARN_ON(ptr > end); 973 ptr = 0; 974 end = 0; 975 } 976 } 977 if (ptr >= end) 978 path1->slots[0]++; 979 } 980 btrfs_release_path(path1); 981 982 cur->checked = 1; 983 WARN_ON(exist); 984 985 /* the pending list isn't empty, take the first block to process */ 986 if (!list_empty(&list)) { 987 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 988 list_del_init(&edge->list[UPPER]); 989 cur = edge->node[UPPER]; 990 goto again; 991 } 992 993 /* 994 * everything goes well, connect backref nodes and insert backref nodes 995 * into the cache. 996 */ 997 BUG_ON(!node->checked); 998 cowonly = node->cowonly; 999 if (!cowonly) { 1000 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1001 &node->rb_node); 1002 BUG_ON(rb_node); 1003 list_add_tail(&node->lower, &cache->leaves); 1004 } 1005 1006 list_for_each_entry(edge, &node->upper, list[LOWER]) 1007 list_add_tail(&edge->list[UPPER], &list); 1008 1009 while (!list_empty(&list)) { 1010 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1011 list_del_init(&edge->list[UPPER]); 1012 upper = edge->node[UPPER]; 1013 if (upper->detached) { 1014 list_del(&edge->list[LOWER]); 1015 lower = edge->node[LOWER]; 1016 free_backref_edge(cache, edge); 1017 if (list_empty(&lower->upper)) 1018 list_add(&lower->list, &useless); 1019 continue; 1020 } 1021 1022 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1023 if (upper->lowest) { 1024 list_del_init(&upper->lower); 1025 upper->lowest = 0; 1026 } 1027 1028 list_add_tail(&edge->list[UPPER], &upper->lower); 1029 continue; 1030 } 1031 1032 BUG_ON(!upper->checked); 1033 BUG_ON(cowonly != upper->cowonly); 1034 if (!cowonly) { 1035 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1036 &upper->rb_node); 1037 BUG_ON(rb_node); 1038 } 1039 1040 list_add_tail(&edge->list[UPPER], &upper->lower); 1041 1042 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1043 list_add_tail(&edge->list[UPPER], &list); 1044 } 1045 /* 1046 * process useless backref nodes. backref nodes for tree leaves 1047 * are deleted from the cache. backref nodes for upper level 1048 * tree blocks are left in the cache to avoid unnecessary backref 1049 * lookup. 1050 */ 1051 while (!list_empty(&useless)) { 1052 upper = list_entry(useless.next, struct backref_node, list); 1053 list_del_init(&upper->list); 1054 BUG_ON(!list_empty(&upper->upper)); 1055 if (upper == node) 1056 node = NULL; 1057 if (upper->lowest) { 1058 list_del_init(&upper->lower); 1059 upper->lowest = 0; 1060 } 1061 while (!list_empty(&upper->lower)) { 1062 edge = list_entry(upper->lower.next, 1063 struct backref_edge, list[UPPER]); 1064 list_del(&edge->list[UPPER]); 1065 list_del(&edge->list[LOWER]); 1066 lower = edge->node[LOWER]; 1067 free_backref_edge(cache, edge); 1068 1069 if (list_empty(&lower->upper)) 1070 list_add(&lower->list, &useless); 1071 } 1072 __mark_block_processed(rc, upper); 1073 if (upper->level > 0) { 1074 list_add(&upper->list, &cache->detached); 1075 upper->detached = 1; 1076 } else { 1077 rb_erase(&upper->rb_node, &cache->rb_root); 1078 free_backref_node(cache, upper); 1079 } 1080 } 1081 out: 1082 btrfs_free_path(path1); 1083 btrfs_free_path(path2); 1084 if (err) { 1085 while (!list_empty(&useless)) { 1086 lower = list_entry(useless.next, 1087 struct backref_node, upper); 1088 list_del_init(&lower->upper); 1089 } 1090 upper = node; 1091 INIT_LIST_HEAD(&list); 1092 while (upper) { 1093 if (RB_EMPTY_NODE(&upper->rb_node)) { 1094 list_splice_tail(&upper->upper, &list); 1095 free_backref_node(cache, upper); 1096 } 1097 1098 if (list_empty(&list)) 1099 break; 1100 1101 edge = list_entry(list.next, struct backref_edge, 1102 list[LOWER]); 1103 list_del(&edge->list[LOWER]); 1104 upper = edge->node[UPPER]; 1105 free_backref_edge(cache, edge); 1106 } 1107 return ERR_PTR(err); 1108 } 1109 BUG_ON(node && node->detached); 1110 return node; 1111 } 1112 1113 /* 1114 * helper to add backref node for the newly created snapshot. 1115 * the backref node is created by cloning backref node that 1116 * corresponds to root of source tree 1117 */ 1118 static int clone_backref_node(struct btrfs_trans_handle *trans, 1119 struct reloc_control *rc, 1120 struct btrfs_root *src, 1121 struct btrfs_root *dest) 1122 { 1123 struct btrfs_root *reloc_root = src->reloc_root; 1124 struct backref_cache *cache = &rc->backref_cache; 1125 struct backref_node *node = NULL; 1126 struct backref_node *new_node; 1127 struct backref_edge *edge; 1128 struct backref_edge *new_edge; 1129 struct rb_node *rb_node; 1130 1131 if (cache->last_trans > 0) 1132 update_backref_cache(trans, cache); 1133 1134 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1135 if (rb_node) { 1136 node = rb_entry(rb_node, struct backref_node, rb_node); 1137 if (node->detached) 1138 node = NULL; 1139 else 1140 BUG_ON(node->new_bytenr != reloc_root->node->start); 1141 } 1142 1143 if (!node) { 1144 rb_node = tree_search(&cache->rb_root, 1145 reloc_root->commit_root->start); 1146 if (rb_node) { 1147 node = rb_entry(rb_node, struct backref_node, 1148 rb_node); 1149 BUG_ON(node->detached); 1150 } 1151 } 1152 1153 if (!node) 1154 return 0; 1155 1156 new_node = alloc_backref_node(cache); 1157 if (!new_node) 1158 return -ENOMEM; 1159 1160 new_node->bytenr = dest->node->start; 1161 new_node->level = node->level; 1162 new_node->lowest = node->lowest; 1163 new_node->checked = 1; 1164 new_node->root = dest; 1165 1166 if (!node->lowest) { 1167 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1168 new_edge = alloc_backref_edge(cache); 1169 if (!new_edge) 1170 goto fail; 1171 1172 new_edge->node[UPPER] = new_node; 1173 new_edge->node[LOWER] = edge->node[LOWER]; 1174 list_add_tail(&new_edge->list[UPPER], 1175 &new_node->lower); 1176 } 1177 } 1178 1179 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1180 &new_node->rb_node); 1181 BUG_ON(rb_node); 1182 1183 if (!new_node->lowest) { 1184 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1185 list_add_tail(&new_edge->list[LOWER], 1186 &new_edge->node[LOWER]->upper); 1187 } 1188 } 1189 return 0; 1190 fail: 1191 while (!list_empty(&new_node->lower)) { 1192 new_edge = list_entry(new_node->lower.next, 1193 struct backref_edge, list[UPPER]); 1194 list_del(&new_edge->list[UPPER]); 1195 free_backref_edge(cache, new_edge); 1196 } 1197 free_backref_node(cache, new_node); 1198 return -ENOMEM; 1199 } 1200 1201 /* 1202 * helper to add 'address of tree root -> reloc tree' mapping 1203 */ 1204 static int __add_reloc_root(struct btrfs_root *root) 1205 { 1206 struct rb_node *rb_node; 1207 struct mapping_node *node; 1208 struct reloc_control *rc = root->fs_info->reloc_ctl; 1209 1210 node = kmalloc(sizeof(*node), GFP_NOFS); 1211 BUG_ON(!node); 1212 1213 node->bytenr = root->node->start; 1214 node->data = root; 1215 1216 spin_lock(&rc->reloc_root_tree.lock); 1217 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1218 node->bytenr, &node->rb_node); 1219 spin_unlock(&rc->reloc_root_tree.lock); 1220 BUG_ON(rb_node); 1221 1222 list_add_tail(&root->root_list, &rc->reloc_roots); 1223 return 0; 1224 } 1225 1226 /* 1227 * helper to update/delete the 'address of tree root -> reloc tree' 1228 * mapping 1229 */ 1230 static int __update_reloc_root(struct btrfs_root *root, int del) 1231 { 1232 struct rb_node *rb_node; 1233 struct mapping_node *node = NULL; 1234 struct reloc_control *rc = root->fs_info->reloc_ctl; 1235 1236 spin_lock(&rc->reloc_root_tree.lock); 1237 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1238 root->commit_root->start); 1239 if (rb_node) { 1240 node = rb_entry(rb_node, struct mapping_node, rb_node); 1241 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1242 } 1243 spin_unlock(&rc->reloc_root_tree.lock); 1244 1245 BUG_ON((struct btrfs_root *)node->data != root); 1246 1247 if (!del) { 1248 spin_lock(&rc->reloc_root_tree.lock); 1249 node->bytenr = root->node->start; 1250 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1251 node->bytenr, &node->rb_node); 1252 spin_unlock(&rc->reloc_root_tree.lock); 1253 BUG_ON(rb_node); 1254 } else { 1255 list_del_init(&root->root_list); 1256 kfree(node); 1257 } 1258 return 0; 1259 } 1260 1261 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1262 struct btrfs_root *root, u64 objectid) 1263 { 1264 struct btrfs_root *reloc_root; 1265 struct extent_buffer *eb; 1266 struct btrfs_root_item *root_item; 1267 struct btrfs_key root_key; 1268 int ret; 1269 1270 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1271 BUG_ON(!root_item); 1272 1273 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1274 root_key.type = BTRFS_ROOT_ITEM_KEY; 1275 root_key.offset = objectid; 1276 1277 if (root->root_key.objectid == objectid) { 1278 /* called by btrfs_init_reloc_root */ 1279 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1280 BTRFS_TREE_RELOC_OBJECTID); 1281 BUG_ON(ret); 1282 1283 btrfs_set_root_last_snapshot(&root->root_item, 1284 trans->transid - 1); 1285 } else { 1286 /* 1287 * called by btrfs_reloc_post_snapshot_hook. 1288 * the source tree is a reloc tree, all tree blocks 1289 * modified after it was created have RELOC flag 1290 * set in their headers. so it's OK to not update 1291 * the 'last_snapshot'. 1292 */ 1293 ret = btrfs_copy_root(trans, root, root->node, &eb, 1294 BTRFS_TREE_RELOC_OBJECTID); 1295 BUG_ON(ret); 1296 } 1297 1298 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1299 btrfs_set_root_bytenr(root_item, eb->start); 1300 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1301 btrfs_set_root_generation(root_item, trans->transid); 1302 1303 if (root->root_key.objectid == objectid) { 1304 btrfs_set_root_refs(root_item, 0); 1305 memset(&root_item->drop_progress, 0, 1306 sizeof(struct btrfs_disk_key)); 1307 root_item->drop_level = 0; 1308 } 1309 1310 btrfs_tree_unlock(eb); 1311 free_extent_buffer(eb); 1312 1313 ret = btrfs_insert_root(trans, root->fs_info->tree_root, 1314 &root_key, root_item); 1315 BUG_ON(ret); 1316 kfree(root_item); 1317 1318 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, 1319 &root_key); 1320 BUG_ON(IS_ERR(reloc_root)); 1321 reloc_root->last_trans = trans->transid; 1322 return reloc_root; 1323 } 1324 1325 /* 1326 * create reloc tree for a given fs tree. reloc tree is just a 1327 * snapshot of the fs tree with special root objectid. 1328 */ 1329 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1330 struct btrfs_root *root) 1331 { 1332 struct btrfs_root *reloc_root; 1333 struct reloc_control *rc = root->fs_info->reloc_ctl; 1334 int clear_rsv = 0; 1335 1336 if (root->reloc_root) { 1337 reloc_root = root->reloc_root; 1338 reloc_root->last_trans = trans->transid; 1339 return 0; 1340 } 1341 1342 if (!rc || !rc->create_reloc_tree || 1343 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1344 return 0; 1345 1346 if (!trans->block_rsv) { 1347 trans->block_rsv = rc->block_rsv; 1348 clear_rsv = 1; 1349 } 1350 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1351 if (clear_rsv) 1352 trans->block_rsv = NULL; 1353 1354 __add_reloc_root(reloc_root); 1355 root->reloc_root = reloc_root; 1356 return 0; 1357 } 1358 1359 /* 1360 * update root item of reloc tree 1361 */ 1362 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1363 struct btrfs_root *root) 1364 { 1365 struct btrfs_root *reloc_root; 1366 struct btrfs_root_item *root_item; 1367 int del = 0; 1368 int ret; 1369 1370 if (!root->reloc_root) 1371 goto out; 1372 1373 reloc_root = root->reloc_root; 1374 root_item = &reloc_root->root_item; 1375 1376 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1377 btrfs_root_refs(root_item) == 0) { 1378 root->reloc_root = NULL; 1379 del = 1; 1380 } 1381 1382 __update_reloc_root(reloc_root, del); 1383 1384 if (reloc_root->commit_root != reloc_root->node) { 1385 btrfs_set_root_node(root_item, reloc_root->node); 1386 free_extent_buffer(reloc_root->commit_root); 1387 reloc_root->commit_root = btrfs_root_node(reloc_root); 1388 } 1389 1390 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1391 &reloc_root->root_key, root_item); 1392 BUG_ON(ret); 1393 1394 out: 1395 return 0; 1396 } 1397 1398 /* 1399 * helper to find first cached inode with inode number >= objectid 1400 * in a subvolume 1401 */ 1402 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1403 { 1404 struct rb_node *node; 1405 struct rb_node *prev; 1406 struct btrfs_inode *entry; 1407 struct inode *inode; 1408 1409 spin_lock(&root->inode_lock); 1410 again: 1411 node = root->inode_tree.rb_node; 1412 prev = NULL; 1413 while (node) { 1414 prev = node; 1415 entry = rb_entry(node, struct btrfs_inode, rb_node); 1416 1417 if (objectid < btrfs_ino(&entry->vfs_inode)) 1418 node = node->rb_left; 1419 else if (objectid > btrfs_ino(&entry->vfs_inode)) 1420 node = node->rb_right; 1421 else 1422 break; 1423 } 1424 if (!node) { 1425 while (prev) { 1426 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1427 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 1428 node = prev; 1429 break; 1430 } 1431 prev = rb_next(prev); 1432 } 1433 } 1434 while (node) { 1435 entry = rb_entry(node, struct btrfs_inode, rb_node); 1436 inode = igrab(&entry->vfs_inode); 1437 if (inode) { 1438 spin_unlock(&root->inode_lock); 1439 return inode; 1440 } 1441 1442 objectid = btrfs_ino(&entry->vfs_inode) + 1; 1443 if (cond_resched_lock(&root->inode_lock)) 1444 goto again; 1445 1446 node = rb_next(node); 1447 } 1448 spin_unlock(&root->inode_lock); 1449 return NULL; 1450 } 1451 1452 static int in_block_group(u64 bytenr, 1453 struct btrfs_block_group_cache *block_group) 1454 { 1455 if (bytenr >= block_group->key.objectid && 1456 bytenr < block_group->key.objectid + block_group->key.offset) 1457 return 1; 1458 return 0; 1459 } 1460 1461 /* 1462 * get new location of data 1463 */ 1464 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1465 u64 bytenr, u64 num_bytes) 1466 { 1467 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1468 struct btrfs_path *path; 1469 struct btrfs_file_extent_item *fi; 1470 struct extent_buffer *leaf; 1471 int ret; 1472 1473 path = btrfs_alloc_path(); 1474 if (!path) 1475 return -ENOMEM; 1476 1477 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1478 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), 1479 bytenr, 0); 1480 if (ret < 0) 1481 goto out; 1482 if (ret > 0) { 1483 ret = -ENOENT; 1484 goto out; 1485 } 1486 1487 leaf = path->nodes[0]; 1488 fi = btrfs_item_ptr(leaf, path->slots[0], 1489 struct btrfs_file_extent_item); 1490 1491 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1492 btrfs_file_extent_compression(leaf, fi) || 1493 btrfs_file_extent_encryption(leaf, fi) || 1494 btrfs_file_extent_other_encoding(leaf, fi)); 1495 1496 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1497 ret = 1; 1498 goto out; 1499 } 1500 1501 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1502 ret = 0; 1503 out: 1504 btrfs_free_path(path); 1505 return ret; 1506 } 1507 1508 /* 1509 * update file extent items in the tree leaf to point to 1510 * the new locations. 1511 */ 1512 static noinline_for_stack 1513 int replace_file_extents(struct btrfs_trans_handle *trans, 1514 struct reloc_control *rc, 1515 struct btrfs_root *root, 1516 struct extent_buffer *leaf) 1517 { 1518 struct btrfs_key key; 1519 struct btrfs_file_extent_item *fi; 1520 struct inode *inode = NULL; 1521 u64 parent; 1522 u64 bytenr; 1523 u64 new_bytenr = 0; 1524 u64 num_bytes; 1525 u64 end; 1526 u32 nritems; 1527 u32 i; 1528 int ret; 1529 int first = 1; 1530 int dirty = 0; 1531 1532 if (rc->stage != UPDATE_DATA_PTRS) 1533 return 0; 1534 1535 /* reloc trees always use full backref */ 1536 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1537 parent = leaf->start; 1538 else 1539 parent = 0; 1540 1541 nritems = btrfs_header_nritems(leaf); 1542 for (i = 0; i < nritems; i++) { 1543 cond_resched(); 1544 btrfs_item_key_to_cpu(leaf, &key, i); 1545 if (key.type != BTRFS_EXTENT_DATA_KEY) 1546 continue; 1547 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1548 if (btrfs_file_extent_type(leaf, fi) == 1549 BTRFS_FILE_EXTENT_INLINE) 1550 continue; 1551 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1552 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1553 if (bytenr == 0) 1554 continue; 1555 if (!in_block_group(bytenr, rc->block_group)) 1556 continue; 1557 1558 /* 1559 * if we are modifying block in fs tree, wait for readpage 1560 * to complete and drop the extent cache 1561 */ 1562 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1563 if (first) { 1564 inode = find_next_inode(root, key.objectid); 1565 first = 0; 1566 } else if (inode && btrfs_ino(inode) < key.objectid) { 1567 btrfs_add_delayed_iput(inode); 1568 inode = find_next_inode(root, key.objectid); 1569 } 1570 if (inode && btrfs_ino(inode) == key.objectid) { 1571 end = key.offset + 1572 btrfs_file_extent_num_bytes(leaf, fi); 1573 WARN_ON(!IS_ALIGNED(key.offset, 1574 root->sectorsize)); 1575 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1576 end--; 1577 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1578 key.offset, end, 1579 GFP_NOFS); 1580 if (!ret) 1581 continue; 1582 1583 btrfs_drop_extent_cache(inode, key.offset, end, 1584 1); 1585 unlock_extent(&BTRFS_I(inode)->io_tree, 1586 key.offset, end, GFP_NOFS); 1587 } 1588 } 1589 1590 ret = get_new_location(rc->data_inode, &new_bytenr, 1591 bytenr, num_bytes); 1592 if (ret > 0) { 1593 WARN_ON(1); 1594 continue; 1595 } 1596 BUG_ON(ret < 0); 1597 1598 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1599 dirty = 1; 1600 1601 key.offset -= btrfs_file_extent_offset(leaf, fi); 1602 ret = btrfs_inc_extent_ref(trans, root, new_bytenr, 1603 num_bytes, parent, 1604 btrfs_header_owner(leaf), 1605 key.objectid, key.offset); 1606 BUG_ON(ret); 1607 1608 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1609 parent, btrfs_header_owner(leaf), 1610 key.objectid, key.offset); 1611 BUG_ON(ret); 1612 } 1613 if (dirty) 1614 btrfs_mark_buffer_dirty(leaf); 1615 if (inode) 1616 btrfs_add_delayed_iput(inode); 1617 return 0; 1618 } 1619 1620 static noinline_for_stack 1621 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1622 struct btrfs_path *path, int level) 1623 { 1624 struct btrfs_disk_key key1; 1625 struct btrfs_disk_key key2; 1626 btrfs_node_key(eb, &key1, slot); 1627 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1628 return memcmp(&key1, &key2, sizeof(key1)); 1629 } 1630 1631 /* 1632 * try to replace tree blocks in fs tree with the new blocks 1633 * in reloc tree. tree blocks haven't been modified since the 1634 * reloc tree was create can be replaced. 1635 * 1636 * if a block was replaced, level of the block + 1 is returned. 1637 * if no block got replaced, 0 is returned. if there are other 1638 * errors, a negative error number is returned. 1639 */ 1640 static noinline_for_stack 1641 int replace_path(struct btrfs_trans_handle *trans, 1642 struct btrfs_root *dest, struct btrfs_root *src, 1643 struct btrfs_path *path, struct btrfs_key *next_key, 1644 int lowest_level, int max_level) 1645 { 1646 struct extent_buffer *eb; 1647 struct extent_buffer *parent; 1648 struct btrfs_key key; 1649 u64 old_bytenr; 1650 u64 new_bytenr; 1651 u64 old_ptr_gen; 1652 u64 new_ptr_gen; 1653 u64 last_snapshot; 1654 u32 blocksize; 1655 int cow = 0; 1656 int level; 1657 int ret; 1658 int slot; 1659 1660 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1661 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1662 1663 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1664 again: 1665 slot = path->slots[lowest_level]; 1666 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1667 1668 eb = btrfs_lock_root_node(dest); 1669 btrfs_set_lock_blocking(eb); 1670 level = btrfs_header_level(eb); 1671 1672 if (level < lowest_level) { 1673 btrfs_tree_unlock(eb); 1674 free_extent_buffer(eb); 1675 return 0; 1676 } 1677 1678 if (cow) { 1679 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1680 BUG_ON(ret); 1681 } 1682 btrfs_set_lock_blocking(eb); 1683 1684 if (next_key) { 1685 next_key->objectid = (u64)-1; 1686 next_key->type = (u8)-1; 1687 next_key->offset = (u64)-1; 1688 } 1689 1690 parent = eb; 1691 while (1) { 1692 level = btrfs_header_level(parent); 1693 BUG_ON(level < lowest_level); 1694 1695 ret = btrfs_bin_search(parent, &key, level, &slot); 1696 if (ret && slot > 0) 1697 slot--; 1698 1699 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1700 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1701 1702 old_bytenr = btrfs_node_blockptr(parent, slot); 1703 blocksize = btrfs_level_size(dest, level - 1); 1704 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1705 1706 if (level <= max_level) { 1707 eb = path->nodes[level]; 1708 new_bytenr = btrfs_node_blockptr(eb, 1709 path->slots[level]); 1710 new_ptr_gen = btrfs_node_ptr_generation(eb, 1711 path->slots[level]); 1712 } else { 1713 new_bytenr = 0; 1714 new_ptr_gen = 0; 1715 } 1716 1717 if (new_bytenr > 0 && new_bytenr == old_bytenr) { 1718 WARN_ON(1); 1719 ret = level; 1720 break; 1721 } 1722 1723 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1724 memcmp_node_keys(parent, slot, path, level)) { 1725 if (level <= lowest_level) { 1726 ret = 0; 1727 break; 1728 } 1729 1730 eb = read_tree_block(dest, old_bytenr, blocksize, 1731 old_ptr_gen); 1732 BUG_ON(!eb); 1733 btrfs_tree_lock(eb); 1734 if (cow) { 1735 ret = btrfs_cow_block(trans, dest, eb, parent, 1736 slot, &eb); 1737 BUG_ON(ret); 1738 } 1739 btrfs_set_lock_blocking(eb); 1740 1741 btrfs_tree_unlock(parent); 1742 free_extent_buffer(parent); 1743 1744 parent = eb; 1745 continue; 1746 } 1747 1748 if (!cow) { 1749 btrfs_tree_unlock(parent); 1750 free_extent_buffer(parent); 1751 cow = 1; 1752 goto again; 1753 } 1754 1755 btrfs_node_key_to_cpu(path->nodes[level], &key, 1756 path->slots[level]); 1757 btrfs_release_path(path); 1758 1759 path->lowest_level = level; 1760 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1761 path->lowest_level = 0; 1762 BUG_ON(ret); 1763 1764 /* 1765 * swap blocks in fs tree and reloc tree. 1766 */ 1767 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1768 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1769 btrfs_mark_buffer_dirty(parent); 1770 1771 btrfs_set_node_blockptr(path->nodes[level], 1772 path->slots[level], old_bytenr); 1773 btrfs_set_node_ptr_generation(path->nodes[level], 1774 path->slots[level], old_ptr_gen); 1775 btrfs_mark_buffer_dirty(path->nodes[level]); 1776 1777 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize, 1778 path->nodes[level]->start, 1779 src->root_key.objectid, level - 1, 0); 1780 BUG_ON(ret); 1781 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize, 1782 0, dest->root_key.objectid, level - 1, 1783 0); 1784 BUG_ON(ret); 1785 1786 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, 1787 path->nodes[level]->start, 1788 src->root_key.objectid, level - 1, 0); 1789 BUG_ON(ret); 1790 1791 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, 1792 0, dest->root_key.objectid, level - 1, 1793 0); 1794 BUG_ON(ret); 1795 1796 btrfs_unlock_up_safe(path, 0); 1797 1798 ret = level; 1799 break; 1800 } 1801 btrfs_tree_unlock(parent); 1802 free_extent_buffer(parent); 1803 return ret; 1804 } 1805 1806 /* 1807 * helper to find next relocated block in reloc tree 1808 */ 1809 static noinline_for_stack 1810 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1811 int *level) 1812 { 1813 struct extent_buffer *eb; 1814 int i; 1815 u64 last_snapshot; 1816 u32 nritems; 1817 1818 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1819 1820 for (i = 0; i < *level; i++) { 1821 free_extent_buffer(path->nodes[i]); 1822 path->nodes[i] = NULL; 1823 } 1824 1825 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1826 eb = path->nodes[i]; 1827 nritems = btrfs_header_nritems(eb); 1828 while (path->slots[i] + 1 < nritems) { 1829 path->slots[i]++; 1830 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1831 last_snapshot) 1832 continue; 1833 1834 *level = i; 1835 return 0; 1836 } 1837 free_extent_buffer(path->nodes[i]); 1838 path->nodes[i] = NULL; 1839 } 1840 return 1; 1841 } 1842 1843 /* 1844 * walk down reloc tree to find relocated block of lowest level 1845 */ 1846 static noinline_for_stack 1847 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1848 int *level) 1849 { 1850 struct extent_buffer *eb = NULL; 1851 int i; 1852 u64 bytenr; 1853 u64 ptr_gen = 0; 1854 u64 last_snapshot; 1855 u32 blocksize; 1856 u32 nritems; 1857 1858 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1859 1860 for (i = *level; i > 0; i--) { 1861 eb = path->nodes[i]; 1862 nritems = btrfs_header_nritems(eb); 1863 while (path->slots[i] < nritems) { 1864 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1865 if (ptr_gen > last_snapshot) 1866 break; 1867 path->slots[i]++; 1868 } 1869 if (path->slots[i] >= nritems) { 1870 if (i == *level) 1871 break; 1872 *level = i + 1; 1873 return 0; 1874 } 1875 if (i == 1) { 1876 *level = i; 1877 return 0; 1878 } 1879 1880 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 1881 blocksize = btrfs_level_size(root, i - 1); 1882 eb = read_tree_block(root, bytenr, blocksize, ptr_gen); 1883 BUG_ON(btrfs_header_level(eb) != i - 1); 1884 path->nodes[i - 1] = eb; 1885 path->slots[i - 1] = 0; 1886 } 1887 return 1; 1888 } 1889 1890 /* 1891 * invalidate extent cache for file extents whose key in range of 1892 * [min_key, max_key) 1893 */ 1894 static int invalidate_extent_cache(struct btrfs_root *root, 1895 struct btrfs_key *min_key, 1896 struct btrfs_key *max_key) 1897 { 1898 struct inode *inode = NULL; 1899 u64 objectid; 1900 u64 start, end; 1901 u64 ino; 1902 1903 objectid = min_key->objectid; 1904 while (1) { 1905 cond_resched(); 1906 iput(inode); 1907 1908 if (objectid > max_key->objectid) 1909 break; 1910 1911 inode = find_next_inode(root, objectid); 1912 if (!inode) 1913 break; 1914 ino = btrfs_ino(inode); 1915 1916 if (ino > max_key->objectid) { 1917 iput(inode); 1918 break; 1919 } 1920 1921 objectid = ino + 1; 1922 if (!S_ISREG(inode->i_mode)) 1923 continue; 1924 1925 if (unlikely(min_key->objectid == ino)) { 1926 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1927 continue; 1928 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1929 start = 0; 1930 else { 1931 start = min_key->offset; 1932 WARN_ON(!IS_ALIGNED(start, root->sectorsize)); 1933 } 1934 } else { 1935 start = 0; 1936 } 1937 1938 if (unlikely(max_key->objectid == ino)) { 1939 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1940 continue; 1941 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1942 end = (u64)-1; 1943 } else { 1944 if (max_key->offset == 0) 1945 continue; 1946 end = max_key->offset; 1947 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1948 end--; 1949 } 1950 } else { 1951 end = (u64)-1; 1952 } 1953 1954 /* the lock_extent waits for readpage to complete */ 1955 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1956 btrfs_drop_extent_cache(inode, start, end, 1); 1957 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1958 } 1959 return 0; 1960 } 1961 1962 static int find_next_key(struct btrfs_path *path, int level, 1963 struct btrfs_key *key) 1964 1965 { 1966 while (level < BTRFS_MAX_LEVEL) { 1967 if (!path->nodes[level]) 1968 break; 1969 if (path->slots[level] + 1 < 1970 btrfs_header_nritems(path->nodes[level])) { 1971 btrfs_node_key_to_cpu(path->nodes[level], key, 1972 path->slots[level] + 1); 1973 return 0; 1974 } 1975 level++; 1976 } 1977 return 1; 1978 } 1979 1980 /* 1981 * merge the relocated tree blocks in reloc tree with corresponding 1982 * fs tree. 1983 */ 1984 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 1985 struct btrfs_root *root) 1986 { 1987 LIST_HEAD(inode_list); 1988 struct btrfs_key key; 1989 struct btrfs_key next_key; 1990 struct btrfs_trans_handle *trans; 1991 struct btrfs_root *reloc_root; 1992 struct btrfs_root_item *root_item; 1993 struct btrfs_path *path; 1994 struct extent_buffer *leaf; 1995 unsigned long nr; 1996 int level; 1997 int max_level; 1998 int replaced = 0; 1999 int ret; 2000 int err = 0; 2001 u32 min_reserved; 2002 2003 path = btrfs_alloc_path(); 2004 if (!path) 2005 return -ENOMEM; 2006 path->reada = 1; 2007 2008 reloc_root = root->reloc_root; 2009 root_item = &reloc_root->root_item; 2010 2011 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2012 level = btrfs_root_level(root_item); 2013 extent_buffer_get(reloc_root->node); 2014 path->nodes[level] = reloc_root->node; 2015 path->slots[level] = 0; 2016 } else { 2017 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2018 2019 level = root_item->drop_level; 2020 BUG_ON(level == 0); 2021 path->lowest_level = level; 2022 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2023 path->lowest_level = 0; 2024 if (ret < 0) { 2025 btrfs_free_path(path); 2026 return ret; 2027 } 2028 2029 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2030 path->slots[level]); 2031 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2032 2033 btrfs_unlock_up_safe(path, 0); 2034 } 2035 2036 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2037 memset(&next_key, 0, sizeof(next_key)); 2038 2039 while (1) { 2040 trans = btrfs_start_transaction(root, 0); 2041 BUG_ON(IS_ERR(trans)); 2042 trans->block_rsv = rc->block_rsv; 2043 2044 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved); 2045 if (ret) { 2046 BUG_ON(ret != -EAGAIN); 2047 ret = btrfs_commit_transaction(trans, root); 2048 BUG_ON(ret); 2049 continue; 2050 } 2051 2052 replaced = 0; 2053 max_level = level; 2054 2055 ret = walk_down_reloc_tree(reloc_root, path, &level); 2056 if (ret < 0) { 2057 err = ret; 2058 goto out; 2059 } 2060 if (ret > 0) 2061 break; 2062 2063 if (!find_next_key(path, level, &key) && 2064 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2065 ret = 0; 2066 } else { 2067 ret = replace_path(trans, root, reloc_root, path, 2068 &next_key, level, max_level); 2069 } 2070 if (ret < 0) { 2071 err = ret; 2072 goto out; 2073 } 2074 2075 if (ret > 0) { 2076 level = ret; 2077 btrfs_node_key_to_cpu(path->nodes[level], &key, 2078 path->slots[level]); 2079 replaced = 1; 2080 } 2081 2082 ret = walk_up_reloc_tree(reloc_root, path, &level); 2083 if (ret > 0) 2084 break; 2085 2086 BUG_ON(level == 0); 2087 /* 2088 * save the merging progress in the drop_progress. 2089 * this is OK since root refs == 1 in this case. 2090 */ 2091 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2092 path->slots[level]); 2093 root_item->drop_level = level; 2094 2095 nr = trans->blocks_used; 2096 btrfs_end_transaction_throttle(trans, root); 2097 2098 btrfs_btree_balance_dirty(root, nr); 2099 2100 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2101 invalidate_extent_cache(root, &key, &next_key); 2102 } 2103 2104 /* 2105 * handle the case only one block in the fs tree need to be 2106 * relocated and the block is tree root. 2107 */ 2108 leaf = btrfs_lock_root_node(root); 2109 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2110 btrfs_tree_unlock(leaf); 2111 free_extent_buffer(leaf); 2112 if (ret < 0) 2113 err = ret; 2114 out: 2115 btrfs_free_path(path); 2116 2117 if (err == 0) { 2118 memset(&root_item->drop_progress, 0, 2119 sizeof(root_item->drop_progress)); 2120 root_item->drop_level = 0; 2121 btrfs_set_root_refs(root_item, 0); 2122 btrfs_update_reloc_root(trans, root); 2123 } 2124 2125 nr = trans->blocks_used; 2126 btrfs_end_transaction_throttle(trans, root); 2127 2128 btrfs_btree_balance_dirty(root, nr); 2129 2130 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2131 invalidate_extent_cache(root, &key, &next_key); 2132 2133 return err; 2134 } 2135 2136 static noinline_for_stack 2137 int prepare_to_merge(struct reloc_control *rc, int err) 2138 { 2139 struct btrfs_root *root = rc->extent_root; 2140 struct btrfs_root *reloc_root; 2141 struct btrfs_trans_handle *trans; 2142 LIST_HEAD(reloc_roots); 2143 u64 num_bytes = 0; 2144 int ret; 2145 2146 mutex_lock(&root->fs_info->reloc_mutex); 2147 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2148 rc->merging_rsv_size += rc->nodes_relocated * 2; 2149 mutex_unlock(&root->fs_info->reloc_mutex); 2150 2151 again: 2152 if (!err) { 2153 num_bytes = rc->merging_rsv_size; 2154 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes); 2155 if (ret) 2156 err = ret; 2157 } 2158 2159 trans = btrfs_join_transaction(rc->extent_root); 2160 if (IS_ERR(trans)) { 2161 if (!err) 2162 btrfs_block_rsv_release(rc->extent_root, 2163 rc->block_rsv, num_bytes); 2164 return PTR_ERR(trans); 2165 } 2166 2167 if (!err) { 2168 if (num_bytes != rc->merging_rsv_size) { 2169 btrfs_end_transaction(trans, rc->extent_root); 2170 btrfs_block_rsv_release(rc->extent_root, 2171 rc->block_rsv, num_bytes); 2172 goto again; 2173 } 2174 } 2175 2176 rc->merge_reloc_tree = 1; 2177 2178 while (!list_empty(&rc->reloc_roots)) { 2179 reloc_root = list_entry(rc->reloc_roots.next, 2180 struct btrfs_root, root_list); 2181 list_del_init(&reloc_root->root_list); 2182 2183 root = read_fs_root(reloc_root->fs_info, 2184 reloc_root->root_key.offset); 2185 BUG_ON(IS_ERR(root)); 2186 BUG_ON(root->reloc_root != reloc_root); 2187 2188 /* 2189 * set reference count to 1, so btrfs_recover_relocation 2190 * knows it should resumes merging 2191 */ 2192 if (!err) 2193 btrfs_set_root_refs(&reloc_root->root_item, 1); 2194 btrfs_update_reloc_root(trans, root); 2195 2196 list_add(&reloc_root->root_list, &reloc_roots); 2197 } 2198 2199 list_splice(&reloc_roots, &rc->reloc_roots); 2200 2201 if (!err) 2202 btrfs_commit_transaction(trans, rc->extent_root); 2203 else 2204 btrfs_end_transaction(trans, rc->extent_root); 2205 return err; 2206 } 2207 2208 static noinline_for_stack 2209 int merge_reloc_roots(struct reloc_control *rc) 2210 { 2211 struct btrfs_root *root; 2212 struct btrfs_root *reloc_root; 2213 LIST_HEAD(reloc_roots); 2214 int found = 0; 2215 int ret; 2216 again: 2217 root = rc->extent_root; 2218 2219 /* 2220 * this serializes us with btrfs_record_root_in_transaction, 2221 * we have to make sure nobody is in the middle of 2222 * adding their roots to the list while we are 2223 * doing this splice 2224 */ 2225 mutex_lock(&root->fs_info->reloc_mutex); 2226 list_splice_init(&rc->reloc_roots, &reloc_roots); 2227 mutex_unlock(&root->fs_info->reloc_mutex); 2228 2229 while (!list_empty(&reloc_roots)) { 2230 found = 1; 2231 reloc_root = list_entry(reloc_roots.next, 2232 struct btrfs_root, root_list); 2233 2234 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2235 root = read_fs_root(reloc_root->fs_info, 2236 reloc_root->root_key.offset); 2237 BUG_ON(IS_ERR(root)); 2238 BUG_ON(root->reloc_root != reloc_root); 2239 2240 ret = merge_reloc_root(rc, root); 2241 BUG_ON(ret); 2242 } else { 2243 list_del_init(&reloc_root->root_list); 2244 } 2245 btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); 2246 } 2247 2248 if (found) { 2249 found = 0; 2250 goto again; 2251 } 2252 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2253 return 0; 2254 } 2255 2256 static void free_block_list(struct rb_root *blocks) 2257 { 2258 struct tree_block *block; 2259 struct rb_node *rb_node; 2260 while ((rb_node = rb_first(blocks))) { 2261 block = rb_entry(rb_node, struct tree_block, rb_node); 2262 rb_erase(rb_node, blocks); 2263 kfree(block); 2264 } 2265 } 2266 2267 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2268 struct btrfs_root *reloc_root) 2269 { 2270 struct btrfs_root *root; 2271 2272 if (reloc_root->last_trans == trans->transid) 2273 return 0; 2274 2275 root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset); 2276 BUG_ON(IS_ERR(root)); 2277 BUG_ON(root->reloc_root != reloc_root); 2278 2279 return btrfs_record_root_in_trans(trans, root); 2280 } 2281 2282 static noinline_for_stack 2283 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2284 struct reloc_control *rc, 2285 struct backref_node *node, 2286 struct backref_edge *edges[], int *nr) 2287 { 2288 struct backref_node *next; 2289 struct btrfs_root *root; 2290 int index = 0; 2291 2292 next = node; 2293 while (1) { 2294 cond_resched(); 2295 next = walk_up_backref(next, edges, &index); 2296 root = next->root; 2297 BUG_ON(!root); 2298 BUG_ON(!root->ref_cows); 2299 2300 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2301 record_reloc_root_in_trans(trans, root); 2302 break; 2303 } 2304 2305 btrfs_record_root_in_trans(trans, root); 2306 root = root->reloc_root; 2307 2308 if (next->new_bytenr != root->node->start) { 2309 BUG_ON(next->new_bytenr); 2310 BUG_ON(!list_empty(&next->list)); 2311 next->new_bytenr = root->node->start; 2312 next->root = root; 2313 list_add_tail(&next->list, 2314 &rc->backref_cache.changed); 2315 __mark_block_processed(rc, next); 2316 break; 2317 } 2318 2319 WARN_ON(1); 2320 root = NULL; 2321 next = walk_down_backref(edges, &index); 2322 if (!next || next->level <= node->level) 2323 break; 2324 } 2325 if (!root) 2326 return NULL; 2327 2328 *nr = index; 2329 next = node; 2330 /* setup backref node path for btrfs_reloc_cow_block */ 2331 while (1) { 2332 rc->backref_cache.path[next->level] = next; 2333 if (--index < 0) 2334 break; 2335 next = edges[index]->node[UPPER]; 2336 } 2337 return root; 2338 } 2339 2340 /* 2341 * select a tree root for relocation. return NULL if the block 2342 * is reference counted. we should use do_relocation() in this 2343 * case. return a tree root pointer if the block isn't reference 2344 * counted. return -ENOENT if the block is root of reloc tree. 2345 */ 2346 static noinline_for_stack 2347 struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans, 2348 struct backref_node *node) 2349 { 2350 struct backref_node *next; 2351 struct btrfs_root *root; 2352 struct btrfs_root *fs_root = NULL; 2353 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2354 int index = 0; 2355 2356 next = node; 2357 while (1) { 2358 cond_resched(); 2359 next = walk_up_backref(next, edges, &index); 2360 root = next->root; 2361 BUG_ON(!root); 2362 2363 /* no other choice for non-references counted tree */ 2364 if (!root->ref_cows) 2365 return root; 2366 2367 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2368 fs_root = root; 2369 2370 if (next != node) 2371 return NULL; 2372 2373 next = walk_down_backref(edges, &index); 2374 if (!next || next->level <= node->level) 2375 break; 2376 } 2377 2378 if (!fs_root) 2379 return ERR_PTR(-ENOENT); 2380 return fs_root; 2381 } 2382 2383 static noinline_for_stack 2384 u64 calcu_metadata_size(struct reloc_control *rc, 2385 struct backref_node *node, int reserve) 2386 { 2387 struct backref_node *next = node; 2388 struct backref_edge *edge; 2389 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2390 u64 num_bytes = 0; 2391 int index = 0; 2392 2393 BUG_ON(reserve && node->processed); 2394 2395 while (next) { 2396 cond_resched(); 2397 while (1) { 2398 if (next->processed && (reserve || next != node)) 2399 break; 2400 2401 num_bytes += btrfs_level_size(rc->extent_root, 2402 next->level); 2403 2404 if (list_empty(&next->upper)) 2405 break; 2406 2407 edge = list_entry(next->upper.next, 2408 struct backref_edge, list[LOWER]); 2409 edges[index++] = edge; 2410 next = edge->node[UPPER]; 2411 } 2412 next = walk_down_backref(edges, &index); 2413 } 2414 return num_bytes; 2415 } 2416 2417 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2418 struct reloc_control *rc, 2419 struct backref_node *node) 2420 { 2421 struct btrfs_root *root = rc->extent_root; 2422 u64 num_bytes; 2423 int ret; 2424 2425 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2426 2427 trans->block_rsv = rc->block_rsv; 2428 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes); 2429 if (ret) { 2430 if (ret == -EAGAIN) 2431 rc->commit_transaction = 1; 2432 return ret; 2433 } 2434 2435 return 0; 2436 } 2437 2438 static void release_metadata_space(struct reloc_control *rc, 2439 struct backref_node *node) 2440 { 2441 u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2; 2442 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes); 2443 } 2444 2445 /* 2446 * relocate a block tree, and then update pointers in upper level 2447 * blocks that reference the block to point to the new location. 2448 * 2449 * if called by link_to_upper, the block has already been relocated. 2450 * in that case this function just updates pointers. 2451 */ 2452 static int do_relocation(struct btrfs_trans_handle *trans, 2453 struct reloc_control *rc, 2454 struct backref_node *node, 2455 struct btrfs_key *key, 2456 struct btrfs_path *path, int lowest) 2457 { 2458 struct backref_node *upper; 2459 struct backref_edge *edge; 2460 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2461 struct btrfs_root *root; 2462 struct extent_buffer *eb; 2463 u32 blocksize; 2464 u64 bytenr; 2465 u64 generation; 2466 int nr; 2467 int slot; 2468 int ret; 2469 int err = 0; 2470 2471 BUG_ON(lowest && node->eb); 2472 2473 path->lowest_level = node->level + 1; 2474 rc->backref_cache.path[node->level] = node; 2475 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2476 cond_resched(); 2477 2478 upper = edge->node[UPPER]; 2479 root = select_reloc_root(trans, rc, upper, edges, &nr); 2480 BUG_ON(!root); 2481 2482 if (upper->eb && !upper->locked) { 2483 if (!lowest) { 2484 ret = btrfs_bin_search(upper->eb, key, 2485 upper->level, &slot); 2486 BUG_ON(ret); 2487 bytenr = btrfs_node_blockptr(upper->eb, slot); 2488 if (node->eb->start == bytenr) 2489 goto next; 2490 } 2491 drop_node_buffer(upper); 2492 } 2493 2494 if (!upper->eb) { 2495 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2496 if (ret < 0) { 2497 err = ret; 2498 break; 2499 } 2500 BUG_ON(ret > 0); 2501 2502 if (!upper->eb) { 2503 upper->eb = path->nodes[upper->level]; 2504 path->nodes[upper->level] = NULL; 2505 } else { 2506 BUG_ON(upper->eb != path->nodes[upper->level]); 2507 } 2508 2509 upper->locked = 1; 2510 path->locks[upper->level] = 0; 2511 2512 slot = path->slots[upper->level]; 2513 btrfs_release_path(path); 2514 } else { 2515 ret = btrfs_bin_search(upper->eb, key, upper->level, 2516 &slot); 2517 BUG_ON(ret); 2518 } 2519 2520 bytenr = btrfs_node_blockptr(upper->eb, slot); 2521 if (lowest) { 2522 BUG_ON(bytenr != node->bytenr); 2523 } else { 2524 if (node->eb->start == bytenr) 2525 goto next; 2526 } 2527 2528 blocksize = btrfs_level_size(root, node->level); 2529 generation = btrfs_node_ptr_generation(upper->eb, slot); 2530 eb = read_tree_block(root, bytenr, blocksize, generation); 2531 if (!eb) { 2532 err = -EIO; 2533 goto next; 2534 } 2535 btrfs_tree_lock(eb); 2536 btrfs_set_lock_blocking(eb); 2537 2538 if (!node->eb) { 2539 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2540 slot, &eb); 2541 btrfs_tree_unlock(eb); 2542 free_extent_buffer(eb); 2543 if (ret < 0) { 2544 err = ret; 2545 goto next; 2546 } 2547 BUG_ON(node->eb != eb); 2548 } else { 2549 btrfs_set_node_blockptr(upper->eb, slot, 2550 node->eb->start); 2551 btrfs_set_node_ptr_generation(upper->eb, slot, 2552 trans->transid); 2553 btrfs_mark_buffer_dirty(upper->eb); 2554 2555 ret = btrfs_inc_extent_ref(trans, root, 2556 node->eb->start, blocksize, 2557 upper->eb->start, 2558 btrfs_header_owner(upper->eb), 2559 node->level, 0); 2560 BUG_ON(ret); 2561 2562 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2563 BUG_ON(ret); 2564 } 2565 next: 2566 if (!upper->pending) 2567 drop_node_buffer(upper); 2568 else 2569 unlock_node_buffer(upper); 2570 if (err) 2571 break; 2572 } 2573 2574 if (!err && node->pending) { 2575 drop_node_buffer(node); 2576 list_move_tail(&node->list, &rc->backref_cache.changed); 2577 node->pending = 0; 2578 } 2579 2580 path->lowest_level = 0; 2581 BUG_ON(err == -ENOSPC); 2582 return err; 2583 } 2584 2585 static int link_to_upper(struct btrfs_trans_handle *trans, 2586 struct reloc_control *rc, 2587 struct backref_node *node, 2588 struct btrfs_path *path) 2589 { 2590 struct btrfs_key key; 2591 2592 btrfs_node_key_to_cpu(node->eb, &key, 0); 2593 return do_relocation(trans, rc, node, &key, path, 0); 2594 } 2595 2596 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2597 struct reloc_control *rc, 2598 struct btrfs_path *path, int err) 2599 { 2600 LIST_HEAD(list); 2601 struct backref_cache *cache = &rc->backref_cache; 2602 struct backref_node *node; 2603 int level; 2604 int ret; 2605 2606 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2607 while (!list_empty(&cache->pending[level])) { 2608 node = list_entry(cache->pending[level].next, 2609 struct backref_node, list); 2610 list_move_tail(&node->list, &list); 2611 BUG_ON(!node->pending); 2612 2613 if (!err) { 2614 ret = link_to_upper(trans, rc, node, path); 2615 if (ret < 0) 2616 err = ret; 2617 } 2618 } 2619 list_splice_init(&list, &cache->pending[level]); 2620 } 2621 return err; 2622 } 2623 2624 static void mark_block_processed(struct reloc_control *rc, 2625 u64 bytenr, u32 blocksize) 2626 { 2627 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2628 EXTENT_DIRTY, GFP_NOFS); 2629 } 2630 2631 static void __mark_block_processed(struct reloc_control *rc, 2632 struct backref_node *node) 2633 { 2634 u32 blocksize; 2635 if (node->level == 0 || 2636 in_block_group(node->bytenr, rc->block_group)) { 2637 blocksize = btrfs_level_size(rc->extent_root, node->level); 2638 mark_block_processed(rc, node->bytenr, blocksize); 2639 } 2640 node->processed = 1; 2641 } 2642 2643 /* 2644 * mark a block and all blocks directly/indirectly reference the block 2645 * as processed. 2646 */ 2647 static void update_processed_blocks(struct reloc_control *rc, 2648 struct backref_node *node) 2649 { 2650 struct backref_node *next = node; 2651 struct backref_edge *edge; 2652 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2653 int index = 0; 2654 2655 while (next) { 2656 cond_resched(); 2657 while (1) { 2658 if (next->processed) 2659 break; 2660 2661 __mark_block_processed(rc, next); 2662 2663 if (list_empty(&next->upper)) 2664 break; 2665 2666 edge = list_entry(next->upper.next, 2667 struct backref_edge, list[LOWER]); 2668 edges[index++] = edge; 2669 next = edge->node[UPPER]; 2670 } 2671 next = walk_down_backref(edges, &index); 2672 } 2673 } 2674 2675 static int tree_block_processed(u64 bytenr, u32 blocksize, 2676 struct reloc_control *rc) 2677 { 2678 if (test_range_bit(&rc->processed_blocks, bytenr, 2679 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2680 return 1; 2681 return 0; 2682 } 2683 2684 static int get_tree_block_key(struct reloc_control *rc, 2685 struct tree_block *block) 2686 { 2687 struct extent_buffer *eb; 2688 2689 BUG_ON(block->key_ready); 2690 eb = read_tree_block(rc->extent_root, block->bytenr, 2691 block->key.objectid, block->key.offset); 2692 BUG_ON(!eb); 2693 WARN_ON(btrfs_header_level(eb) != block->level); 2694 if (block->level == 0) 2695 btrfs_item_key_to_cpu(eb, &block->key, 0); 2696 else 2697 btrfs_node_key_to_cpu(eb, &block->key, 0); 2698 free_extent_buffer(eb); 2699 block->key_ready = 1; 2700 return 0; 2701 } 2702 2703 static int reada_tree_block(struct reloc_control *rc, 2704 struct tree_block *block) 2705 { 2706 BUG_ON(block->key_ready); 2707 readahead_tree_block(rc->extent_root, block->bytenr, 2708 block->key.objectid, block->key.offset); 2709 return 0; 2710 } 2711 2712 /* 2713 * helper function to relocate a tree block 2714 */ 2715 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2716 struct reloc_control *rc, 2717 struct backref_node *node, 2718 struct btrfs_key *key, 2719 struct btrfs_path *path) 2720 { 2721 struct btrfs_root *root; 2722 int release = 0; 2723 int ret = 0; 2724 2725 if (!node) 2726 return 0; 2727 2728 BUG_ON(node->processed); 2729 root = select_one_root(trans, node); 2730 if (root == ERR_PTR(-ENOENT)) { 2731 update_processed_blocks(rc, node); 2732 goto out; 2733 } 2734 2735 if (!root || root->ref_cows) { 2736 ret = reserve_metadata_space(trans, rc, node); 2737 if (ret) 2738 goto out; 2739 release = 1; 2740 } 2741 2742 if (root) { 2743 if (root->ref_cows) { 2744 BUG_ON(node->new_bytenr); 2745 BUG_ON(!list_empty(&node->list)); 2746 btrfs_record_root_in_trans(trans, root); 2747 root = root->reloc_root; 2748 node->new_bytenr = root->node->start; 2749 node->root = root; 2750 list_add_tail(&node->list, &rc->backref_cache.changed); 2751 } else { 2752 path->lowest_level = node->level; 2753 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2754 btrfs_release_path(path); 2755 if (ret > 0) 2756 ret = 0; 2757 } 2758 if (!ret) 2759 update_processed_blocks(rc, node); 2760 } else { 2761 ret = do_relocation(trans, rc, node, key, path, 1); 2762 } 2763 out: 2764 if (ret || node->level == 0 || node->cowonly) { 2765 if (release) 2766 release_metadata_space(rc, node); 2767 remove_backref_node(&rc->backref_cache, node); 2768 } 2769 return ret; 2770 } 2771 2772 /* 2773 * relocate a list of blocks 2774 */ 2775 static noinline_for_stack 2776 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2777 struct reloc_control *rc, struct rb_root *blocks) 2778 { 2779 struct backref_node *node; 2780 struct btrfs_path *path; 2781 struct tree_block *block; 2782 struct rb_node *rb_node; 2783 int ret; 2784 int err = 0; 2785 2786 path = btrfs_alloc_path(); 2787 if (!path) 2788 return -ENOMEM; 2789 2790 rb_node = rb_first(blocks); 2791 while (rb_node) { 2792 block = rb_entry(rb_node, struct tree_block, rb_node); 2793 if (!block->key_ready) 2794 reada_tree_block(rc, block); 2795 rb_node = rb_next(rb_node); 2796 } 2797 2798 rb_node = rb_first(blocks); 2799 while (rb_node) { 2800 block = rb_entry(rb_node, struct tree_block, rb_node); 2801 if (!block->key_ready) 2802 get_tree_block_key(rc, block); 2803 rb_node = rb_next(rb_node); 2804 } 2805 2806 rb_node = rb_first(blocks); 2807 while (rb_node) { 2808 block = rb_entry(rb_node, struct tree_block, rb_node); 2809 2810 node = build_backref_tree(rc, &block->key, 2811 block->level, block->bytenr); 2812 if (IS_ERR(node)) { 2813 err = PTR_ERR(node); 2814 goto out; 2815 } 2816 2817 ret = relocate_tree_block(trans, rc, node, &block->key, 2818 path); 2819 if (ret < 0) { 2820 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 2821 err = ret; 2822 goto out; 2823 } 2824 rb_node = rb_next(rb_node); 2825 } 2826 out: 2827 free_block_list(blocks); 2828 err = finish_pending_nodes(trans, rc, path, err); 2829 2830 btrfs_free_path(path); 2831 return err; 2832 } 2833 2834 static noinline_for_stack 2835 int prealloc_file_extent_cluster(struct inode *inode, 2836 struct file_extent_cluster *cluster) 2837 { 2838 u64 alloc_hint = 0; 2839 u64 start; 2840 u64 end; 2841 u64 offset = BTRFS_I(inode)->index_cnt; 2842 u64 num_bytes; 2843 int nr = 0; 2844 int ret = 0; 2845 2846 BUG_ON(cluster->start != cluster->boundary[0]); 2847 mutex_lock(&inode->i_mutex); 2848 2849 ret = btrfs_check_data_free_space(inode, cluster->end + 2850 1 - cluster->start); 2851 if (ret) 2852 goto out; 2853 2854 while (nr < cluster->nr) { 2855 start = cluster->boundary[nr] - offset; 2856 if (nr + 1 < cluster->nr) 2857 end = cluster->boundary[nr + 1] - 1 - offset; 2858 else 2859 end = cluster->end - offset; 2860 2861 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2862 num_bytes = end + 1 - start; 2863 ret = btrfs_prealloc_file_range(inode, 0, start, 2864 num_bytes, num_bytes, 2865 end + 1, &alloc_hint); 2866 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2867 if (ret) 2868 break; 2869 nr++; 2870 } 2871 btrfs_free_reserved_data_space(inode, cluster->end + 2872 1 - cluster->start); 2873 out: 2874 mutex_unlock(&inode->i_mutex); 2875 return ret; 2876 } 2877 2878 static noinline_for_stack 2879 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 2880 u64 block_start) 2881 { 2882 struct btrfs_root *root = BTRFS_I(inode)->root; 2883 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2884 struct extent_map *em; 2885 int ret = 0; 2886 2887 em = alloc_extent_map(); 2888 if (!em) 2889 return -ENOMEM; 2890 2891 em->start = start; 2892 em->len = end + 1 - start; 2893 em->block_len = em->len; 2894 em->block_start = block_start; 2895 em->bdev = root->fs_info->fs_devices->latest_bdev; 2896 set_bit(EXTENT_FLAG_PINNED, &em->flags); 2897 2898 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2899 while (1) { 2900 write_lock(&em_tree->lock); 2901 ret = add_extent_mapping(em_tree, em); 2902 write_unlock(&em_tree->lock); 2903 if (ret != -EEXIST) { 2904 free_extent_map(em); 2905 break; 2906 } 2907 btrfs_drop_extent_cache(inode, start, end, 0); 2908 } 2909 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2910 return ret; 2911 } 2912 2913 static int relocate_file_extent_cluster(struct inode *inode, 2914 struct file_extent_cluster *cluster) 2915 { 2916 u64 page_start; 2917 u64 page_end; 2918 u64 offset = BTRFS_I(inode)->index_cnt; 2919 unsigned long index; 2920 unsigned long last_index; 2921 struct page *page; 2922 struct file_ra_state *ra; 2923 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 2924 int nr = 0; 2925 int ret = 0; 2926 2927 if (!cluster->nr) 2928 return 0; 2929 2930 ra = kzalloc(sizeof(*ra), GFP_NOFS); 2931 if (!ra) 2932 return -ENOMEM; 2933 2934 ret = prealloc_file_extent_cluster(inode, cluster); 2935 if (ret) 2936 goto out; 2937 2938 file_ra_state_init(ra, inode->i_mapping); 2939 2940 ret = setup_extent_mapping(inode, cluster->start - offset, 2941 cluster->end - offset, cluster->start); 2942 if (ret) 2943 goto out; 2944 2945 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 2946 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 2947 while (index <= last_index) { 2948 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 2949 if (ret) 2950 goto out; 2951 2952 page = find_lock_page(inode->i_mapping, index); 2953 if (!page) { 2954 page_cache_sync_readahead(inode->i_mapping, 2955 ra, NULL, index, 2956 last_index + 1 - index); 2957 page = find_or_create_page(inode->i_mapping, index, 2958 mask); 2959 if (!page) { 2960 btrfs_delalloc_release_metadata(inode, 2961 PAGE_CACHE_SIZE); 2962 ret = -ENOMEM; 2963 goto out; 2964 } 2965 } 2966 2967 if (PageReadahead(page)) { 2968 page_cache_async_readahead(inode->i_mapping, 2969 ra, NULL, page, index, 2970 last_index + 1 - index); 2971 } 2972 2973 if (!PageUptodate(page)) { 2974 btrfs_readpage(NULL, page); 2975 lock_page(page); 2976 if (!PageUptodate(page)) { 2977 unlock_page(page); 2978 page_cache_release(page); 2979 btrfs_delalloc_release_metadata(inode, 2980 PAGE_CACHE_SIZE); 2981 ret = -EIO; 2982 goto out; 2983 } 2984 } 2985 2986 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 2987 page_end = page_start + PAGE_CACHE_SIZE - 1; 2988 2989 lock_extent(&BTRFS_I(inode)->io_tree, 2990 page_start, page_end, GFP_NOFS); 2991 2992 set_page_extent_mapped(page); 2993 2994 if (nr < cluster->nr && 2995 page_start + offset == cluster->boundary[nr]) { 2996 set_extent_bits(&BTRFS_I(inode)->io_tree, 2997 page_start, page_end, 2998 EXTENT_BOUNDARY, GFP_NOFS); 2999 nr++; 3000 } 3001 3002 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); 3003 set_page_dirty(page); 3004 3005 unlock_extent(&BTRFS_I(inode)->io_tree, 3006 page_start, page_end, GFP_NOFS); 3007 unlock_page(page); 3008 page_cache_release(page); 3009 3010 index++; 3011 balance_dirty_pages_ratelimited(inode->i_mapping); 3012 btrfs_throttle(BTRFS_I(inode)->root); 3013 } 3014 WARN_ON(nr != cluster->nr); 3015 out: 3016 kfree(ra); 3017 return ret; 3018 } 3019 3020 static noinline_for_stack 3021 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 3022 struct file_extent_cluster *cluster) 3023 { 3024 int ret; 3025 3026 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3027 ret = relocate_file_extent_cluster(inode, cluster); 3028 if (ret) 3029 return ret; 3030 cluster->nr = 0; 3031 } 3032 3033 if (!cluster->nr) 3034 cluster->start = extent_key->objectid; 3035 else 3036 BUG_ON(cluster->nr >= MAX_EXTENTS); 3037 cluster->end = extent_key->objectid + extent_key->offset - 1; 3038 cluster->boundary[cluster->nr] = extent_key->objectid; 3039 cluster->nr++; 3040 3041 if (cluster->nr >= MAX_EXTENTS) { 3042 ret = relocate_file_extent_cluster(inode, cluster); 3043 if (ret) 3044 return ret; 3045 cluster->nr = 0; 3046 } 3047 return 0; 3048 } 3049 3050 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3051 static int get_ref_objectid_v0(struct reloc_control *rc, 3052 struct btrfs_path *path, 3053 struct btrfs_key *extent_key, 3054 u64 *ref_objectid, int *path_change) 3055 { 3056 struct btrfs_key key; 3057 struct extent_buffer *leaf; 3058 struct btrfs_extent_ref_v0 *ref0; 3059 int ret; 3060 int slot; 3061 3062 leaf = path->nodes[0]; 3063 slot = path->slots[0]; 3064 while (1) { 3065 if (slot >= btrfs_header_nritems(leaf)) { 3066 ret = btrfs_next_leaf(rc->extent_root, path); 3067 if (ret < 0) 3068 return ret; 3069 BUG_ON(ret > 0); 3070 leaf = path->nodes[0]; 3071 slot = path->slots[0]; 3072 if (path_change) 3073 *path_change = 1; 3074 } 3075 btrfs_item_key_to_cpu(leaf, &key, slot); 3076 if (key.objectid != extent_key->objectid) 3077 return -ENOENT; 3078 3079 if (key.type != BTRFS_EXTENT_REF_V0_KEY) { 3080 slot++; 3081 continue; 3082 } 3083 ref0 = btrfs_item_ptr(leaf, slot, 3084 struct btrfs_extent_ref_v0); 3085 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0); 3086 break; 3087 } 3088 return 0; 3089 } 3090 #endif 3091 3092 /* 3093 * helper to add a tree block to the list. 3094 * the major work is getting the generation and level of the block 3095 */ 3096 static int add_tree_block(struct reloc_control *rc, 3097 struct btrfs_key *extent_key, 3098 struct btrfs_path *path, 3099 struct rb_root *blocks) 3100 { 3101 struct extent_buffer *eb; 3102 struct btrfs_extent_item *ei; 3103 struct btrfs_tree_block_info *bi; 3104 struct tree_block *block; 3105 struct rb_node *rb_node; 3106 u32 item_size; 3107 int level = -1; 3108 int generation; 3109 3110 eb = path->nodes[0]; 3111 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3112 3113 if (item_size >= sizeof(*ei) + sizeof(*bi)) { 3114 ei = btrfs_item_ptr(eb, path->slots[0], 3115 struct btrfs_extent_item); 3116 bi = (struct btrfs_tree_block_info *)(ei + 1); 3117 generation = btrfs_extent_generation(eb, ei); 3118 level = btrfs_tree_block_level(eb, bi); 3119 } else { 3120 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3121 u64 ref_owner; 3122 int ret; 3123 3124 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 3125 ret = get_ref_objectid_v0(rc, path, extent_key, 3126 &ref_owner, NULL); 3127 if (ret < 0) 3128 return ret; 3129 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); 3130 level = (int)ref_owner; 3131 /* FIXME: get real generation */ 3132 generation = 0; 3133 #else 3134 BUG(); 3135 #endif 3136 } 3137 3138 btrfs_release_path(path); 3139 3140 BUG_ON(level == -1); 3141 3142 block = kmalloc(sizeof(*block), GFP_NOFS); 3143 if (!block) 3144 return -ENOMEM; 3145 3146 block->bytenr = extent_key->objectid; 3147 block->key.objectid = extent_key->offset; 3148 block->key.offset = generation; 3149 block->level = level; 3150 block->key_ready = 0; 3151 3152 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3153 BUG_ON(rb_node); 3154 3155 return 0; 3156 } 3157 3158 /* 3159 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3160 */ 3161 static int __add_tree_block(struct reloc_control *rc, 3162 u64 bytenr, u32 blocksize, 3163 struct rb_root *blocks) 3164 { 3165 struct btrfs_path *path; 3166 struct btrfs_key key; 3167 int ret; 3168 3169 if (tree_block_processed(bytenr, blocksize, rc)) 3170 return 0; 3171 3172 if (tree_search(blocks, bytenr)) 3173 return 0; 3174 3175 path = btrfs_alloc_path(); 3176 if (!path) 3177 return -ENOMEM; 3178 3179 key.objectid = bytenr; 3180 key.type = BTRFS_EXTENT_ITEM_KEY; 3181 key.offset = blocksize; 3182 3183 path->search_commit_root = 1; 3184 path->skip_locking = 1; 3185 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3186 if (ret < 0) 3187 goto out; 3188 BUG_ON(ret); 3189 3190 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3191 ret = add_tree_block(rc, &key, path, blocks); 3192 out: 3193 btrfs_free_path(path); 3194 return ret; 3195 } 3196 3197 /* 3198 * helper to check if the block use full backrefs for pointers in it 3199 */ 3200 static int block_use_full_backref(struct reloc_control *rc, 3201 struct extent_buffer *eb) 3202 { 3203 u64 flags; 3204 int ret; 3205 3206 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3207 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3208 return 1; 3209 3210 ret = btrfs_lookup_extent_info(NULL, rc->extent_root, 3211 eb->start, eb->len, NULL, &flags); 3212 BUG_ON(ret); 3213 3214 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3215 ret = 1; 3216 else 3217 ret = 0; 3218 return ret; 3219 } 3220 3221 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3222 struct inode *inode, u64 ino) 3223 { 3224 struct btrfs_key key; 3225 struct btrfs_path *path; 3226 struct btrfs_root *root = fs_info->tree_root; 3227 struct btrfs_trans_handle *trans; 3228 unsigned long nr; 3229 int ret = 0; 3230 3231 if (inode) 3232 goto truncate; 3233 3234 key.objectid = ino; 3235 key.type = BTRFS_INODE_ITEM_KEY; 3236 key.offset = 0; 3237 3238 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3239 if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) { 3240 if (inode && !IS_ERR(inode)) 3241 iput(inode); 3242 return -ENOENT; 3243 } 3244 3245 truncate: 3246 path = btrfs_alloc_path(); 3247 if (!path) { 3248 ret = -ENOMEM; 3249 goto out; 3250 } 3251 3252 trans = btrfs_join_transaction(root); 3253 if (IS_ERR(trans)) { 3254 btrfs_free_path(path); 3255 ret = PTR_ERR(trans); 3256 goto out; 3257 } 3258 3259 ret = btrfs_truncate_free_space_cache(root, trans, path, inode); 3260 3261 btrfs_free_path(path); 3262 nr = trans->blocks_used; 3263 btrfs_end_transaction(trans, root); 3264 btrfs_btree_balance_dirty(root, nr); 3265 out: 3266 iput(inode); 3267 return ret; 3268 } 3269 3270 /* 3271 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3272 * this function scans fs tree to find blocks reference the data extent 3273 */ 3274 static int find_data_references(struct reloc_control *rc, 3275 struct btrfs_key *extent_key, 3276 struct extent_buffer *leaf, 3277 struct btrfs_extent_data_ref *ref, 3278 struct rb_root *blocks) 3279 { 3280 struct btrfs_path *path; 3281 struct tree_block *block; 3282 struct btrfs_root *root; 3283 struct btrfs_file_extent_item *fi; 3284 struct rb_node *rb_node; 3285 struct btrfs_key key; 3286 u64 ref_root; 3287 u64 ref_objectid; 3288 u64 ref_offset; 3289 u32 ref_count; 3290 u32 nritems; 3291 int err = 0; 3292 int added = 0; 3293 int counted; 3294 int ret; 3295 3296 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3297 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3298 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3299 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3300 3301 /* 3302 * This is an extent belonging to the free space cache, lets just delete 3303 * it and redo the search. 3304 */ 3305 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3306 ret = delete_block_group_cache(rc->extent_root->fs_info, 3307 NULL, ref_objectid); 3308 if (ret != -ENOENT) 3309 return ret; 3310 ret = 0; 3311 } 3312 3313 path = btrfs_alloc_path(); 3314 if (!path) 3315 return -ENOMEM; 3316 path->reada = 1; 3317 3318 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3319 if (IS_ERR(root)) { 3320 err = PTR_ERR(root); 3321 goto out; 3322 } 3323 3324 key.objectid = ref_objectid; 3325 key.type = BTRFS_EXTENT_DATA_KEY; 3326 if (ref_offset > ((u64)-1 << 32)) 3327 key.offset = 0; 3328 else 3329 key.offset = ref_offset; 3330 3331 path->search_commit_root = 1; 3332 path->skip_locking = 1; 3333 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3334 if (ret < 0) { 3335 err = ret; 3336 goto out; 3337 } 3338 3339 leaf = path->nodes[0]; 3340 nritems = btrfs_header_nritems(leaf); 3341 /* 3342 * the references in tree blocks that use full backrefs 3343 * are not counted in 3344 */ 3345 if (block_use_full_backref(rc, leaf)) 3346 counted = 0; 3347 else 3348 counted = 1; 3349 rb_node = tree_search(blocks, leaf->start); 3350 if (rb_node) { 3351 if (counted) 3352 added = 1; 3353 else 3354 path->slots[0] = nritems; 3355 } 3356 3357 while (ref_count > 0) { 3358 while (path->slots[0] >= nritems) { 3359 ret = btrfs_next_leaf(root, path); 3360 if (ret < 0) { 3361 err = ret; 3362 goto out; 3363 } 3364 if (ret > 0) { 3365 WARN_ON(1); 3366 goto out; 3367 } 3368 3369 leaf = path->nodes[0]; 3370 nritems = btrfs_header_nritems(leaf); 3371 added = 0; 3372 3373 if (block_use_full_backref(rc, leaf)) 3374 counted = 0; 3375 else 3376 counted = 1; 3377 rb_node = tree_search(blocks, leaf->start); 3378 if (rb_node) { 3379 if (counted) 3380 added = 1; 3381 else 3382 path->slots[0] = nritems; 3383 } 3384 } 3385 3386 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3387 if (key.objectid != ref_objectid || 3388 key.type != BTRFS_EXTENT_DATA_KEY) { 3389 WARN_ON(1); 3390 break; 3391 } 3392 3393 fi = btrfs_item_ptr(leaf, path->slots[0], 3394 struct btrfs_file_extent_item); 3395 3396 if (btrfs_file_extent_type(leaf, fi) == 3397 BTRFS_FILE_EXTENT_INLINE) 3398 goto next; 3399 3400 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3401 extent_key->objectid) 3402 goto next; 3403 3404 key.offset -= btrfs_file_extent_offset(leaf, fi); 3405 if (key.offset != ref_offset) 3406 goto next; 3407 3408 if (counted) 3409 ref_count--; 3410 if (added) 3411 goto next; 3412 3413 if (!tree_block_processed(leaf->start, leaf->len, rc)) { 3414 block = kmalloc(sizeof(*block), GFP_NOFS); 3415 if (!block) { 3416 err = -ENOMEM; 3417 break; 3418 } 3419 block->bytenr = leaf->start; 3420 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3421 block->level = 0; 3422 block->key_ready = 1; 3423 rb_node = tree_insert(blocks, block->bytenr, 3424 &block->rb_node); 3425 BUG_ON(rb_node); 3426 } 3427 if (counted) 3428 added = 1; 3429 else 3430 path->slots[0] = nritems; 3431 next: 3432 path->slots[0]++; 3433 3434 } 3435 out: 3436 btrfs_free_path(path); 3437 return err; 3438 } 3439 3440 /* 3441 * hepler to find all tree blocks that reference a given data extent 3442 */ 3443 static noinline_for_stack 3444 int add_data_references(struct reloc_control *rc, 3445 struct btrfs_key *extent_key, 3446 struct btrfs_path *path, 3447 struct rb_root *blocks) 3448 { 3449 struct btrfs_key key; 3450 struct extent_buffer *eb; 3451 struct btrfs_extent_data_ref *dref; 3452 struct btrfs_extent_inline_ref *iref; 3453 unsigned long ptr; 3454 unsigned long end; 3455 u32 blocksize = btrfs_level_size(rc->extent_root, 0); 3456 int ret; 3457 int err = 0; 3458 3459 eb = path->nodes[0]; 3460 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3461 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3462 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3463 if (ptr + sizeof(struct btrfs_extent_item_v0) == end) 3464 ptr = end; 3465 else 3466 #endif 3467 ptr += sizeof(struct btrfs_extent_item); 3468 3469 while (ptr < end) { 3470 iref = (struct btrfs_extent_inline_ref *)ptr; 3471 key.type = btrfs_extent_inline_ref_type(eb, iref); 3472 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3473 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3474 ret = __add_tree_block(rc, key.offset, blocksize, 3475 blocks); 3476 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3477 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3478 ret = find_data_references(rc, extent_key, 3479 eb, dref, blocks); 3480 } else { 3481 BUG(); 3482 } 3483 ptr += btrfs_extent_inline_ref_size(key.type); 3484 } 3485 WARN_ON(ptr > end); 3486 3487 while (1) { 3488 cond_resched(); 3489 eb = path->nodes[0]; 3490 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3491 ret = btrfs_next_leaf(rc->extent_root, path); 3492 if (ret < 0) { 3493 err = ret; 3494 break; 3495 } 3496 if (ret > 0) 3497 break; 3498 eb = path->nodes[0]; 3499 } 3500 3501 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3502 if (key.objectid != extent_key->objectid) 3503 break; 3504 3505 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3506 if (key.type == BTRFS_SHARED_DATA_REF_KEY || 3507 key.type == BTRFS_EXTENT_REF_V0_KEY) { 3508 #else 3509 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 3510 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3511 #endif 3512 ret = __add_tree_block(rc, key.offset, blocksize, 3513 blocks); 3514 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3515 dref = btrfs_item_ptr(eb, path->slots[0], 3516 struct btrfs_extent_data_ref); 3517 ret = find_data_references(rc, extent_key, 3518 eb, dref, blocks); 3519 } else { 3520 ret = 0; 3521 } 3522 if (ret) { 3523 err = ret; 3524 break; 3525 } 3526 path->slots[0]++; 3527 } 3528 btrfs_release_path(path); 3529 if (err) 3530 free_block_list(blocks); 3531 return err; 3532 } 3533 3534 /* 3535 * hepler to find next unprocessed extent 3536 */ 3537 static noinline_for_stack 3538 int find_next_extent(struct btrfs_trans_handle *trans, 3539 struct reloc_control *rc, struct btrfs_path *path, 3540 struct btrfs_key *extent_key) 3541 { 3542 struct btrfs_key key; 3543 struct extent_buffer *leaf; 3544 u64 start, end, last; 3545 int ret; 3546 3547 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3548 while (1) { 3549 cond_resched(); 3550 if (rc->search_start >= last) { 3551 ret = 1; 3552 break; 3553 } 3554 3555 key.objectid = rc->search_start; 3556 key.type = BTRFS_EXTENT_ITEM_KEY; 3557 key.offset = 0; 3558 3559 path->search_commit_root = 1; 3560 path->skip_locking = 1; 3561 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3562 0, 0); 3563 if (ret < 0) 3564 break; 3565 next: 3566 leaf = path->nodes[0]; 3567 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3568 ret = btrfs_next_leaf(rc->extent_root, path); 3569 if (ret != 0) 3570 break; 3571 leaf = path->nodes[0]; 3572 } 3573 3574 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3575 if (key.objectid >= last) { 3576 ret = 1; 3577 break; 3578 } 3579 3580 if (key.type != BTRFS_EXTENT_ITEM_KEY || 3581 key.objectid + key.offset <= rc->search_start) { 3582 path->slots[0]++; 3583 goto next; 3584 } 3585 3586 ret = find_first_extent_bit(&rc->processed_blocks, 3587 key.objectid, &start, &end, 3588 EXTENT_DIRTY); 3589 3590 if (ret == 0 && start <= key.objectid) { 3591 btrfs_release_path(path); 3592 rc->search_start = end + 1; 3593 } else { 3594 rc->search_start = key.objectid + key.offset; 3595 memcpy(extent_key, &key, sizeof(key)); 3596 return 0; 3597 } 3598 } 3599 btrfs_release_path(path); 3600 return ret; 3601 } 3602 3603 static void set_reloc_control(struct reloc_control *rc) 3604 { 3605 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3606 3607 mutex_lock(&fs_info->reloc_mutex); 3608 fs_info->reloc_ctl = rc; 3609 mutex_unlock(&fs_info->reloc_mutex); 3610 } 3611 3612 static void unset_reloc_control(struct reloc_control *rc) 3613 { 3614 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3615 3616 mutex_lock(&fs_info->reloc_mutex); 3617 fs_info->reloc_ctl = NULL; 3618 mutex_unlock(&fs_info->reloc_mutex); 3619 } 3620 3621 static int check_extent_flags(u64 flags) 3622 { 3623 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3624 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3625 return 1; 3626 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3627 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3628 return 1; 3629 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3630 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3631 return 1; 3632 return 0; 3633 } 3634 3635 static noinline_for_stack 3636 int prepare_to_relocate(struct reloc_control *rc) 3637 { 3638 struct btrfs_trans_handle *trans; 3639 int ret; 3640 3641 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root); 3642 if (!rc->block_rsv) 3643 return -ENOMEM; 3644 3645 /* 3646 * reserve some space for creating reloc trees. 3647 * btrfs_init_reloc_root will use them when there 3648 * is no reservation in transaction handle. 3649 */ 3650 ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv, 3651 rc->extent_root->nodesize * 256); 3652 if (ret) 3653 return ret; 3654 3655 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3656 rc->search_start = rc->block_group->key.objectid; 3657 rc->extents_found = 0; 3658 rc->nodes_relocated = 0; 3659 rc->merging_rsv_size = 0; 3660 3661 rc->create_reloc_tree = 1; 3662 set_reloc_control(rc); 3663 3664 trans = btrfs_join_transaction(rc->extent_root); 3665 BUG_ON(IS_ERR(trans)); 3666 btrfs_commit_transaction(trans, rc->extent_root); 3667 return 0; 3668 } 3669 3670 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3671 { 3672 struct rb_root blocks = RB_ROOT; 3673 struct btrfs_key key; 3674 struct btrfs_trans_handle *trans = NULL; 3675 struct btrfs_path *path; 3676 struct btrfs_extent_item *ei; 3677 unsigned long nr; 3678 u64 flags; 3679 u32 item_size; 3680 int ret; 3681 int err = 0; 3682 int progress = 0; 3683 3684 path = btrfs_alloc_path(); 3685 if (!path) 3686 return -ENOMEM; 3687 path->reada = 1; 3688 3689 ret = prepare_to_relocate(rc); 3690 if (ret) { 3691 err = ret; 3692 goto out_free; 3693 } 3694 3695 while (1) { 3696 progress++; 3697 trans = btrfs_start_transaction(rc->extent_root, 0); 3698 BUG_ON(IS_ERR(trans)); 3699 restart: 3700 if (update_backref_cache(trans, &rc->backref_cache)) { 3701 btrfs_end_transaction(trans, rc->extent_root); 3702 continue; 3703 } 3704 3705 ret = find_next_extent(trans, rc, path, &key); 3706 if (ret < 0) 3707 err = ret; 3708 if (ret != 0) 3709 break; 3710 3711 rc->extents_found++; 3712 3713 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3714 struct btrfs_extent_item); 3715 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 3716 if (item_size >= sizeof(*ei)) { 3717 flags = btrfs_extent_flags(path->nodes[0], ei); 3718 ret = check_extent_flags(flags); 3719 BUG_ON(ret); 3720 3721 } else { 3722 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3723 u64 ref_owner; 3724 int path_change = 0; 3725 3726 BUG_ON(item_size != 3727 sizeof(struct btrfs_extent_item_v0)); 3728 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, 3729 &path_change); 3730 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID) 3731 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK; 3732 else 3733 flags = BTRFS_EXTENT_FLAG_DATA; 3734 3735 if (path_change) { 3736 btrfs_release_path(path); 3737 3738 path->search_commit_root = 1; 3739 path->skip_locking = 1; 3740 ret = btrfs_search_slot(NULL, rc->extent_root, 3741 &key, path, 0, 0); 3742 if (ret < 0) { 3743 err = ret; 3744 break; 3745 } 3746 BUG_ON(ret > 0); 3747 } 3748 #else 3749 BUG(); 3750 #endif 3751 } 3752 3753 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3754 ret = add_tree_block(rc, &key, path, &blocks); 3755 } else if (rc->stage == UPDATE_DATA_PTRS && 3756 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3757 ret = add_data_references(rc, &key, path, &blocks); 3758 } else { 3759 btrfs_release_path(path); 3760 ret = 0; 3761 } 3762 if (ret < 0) { 3763 err = ret; 3764 break; 3765 } 3766 3767 if (!RB_EMPTY_ROOT(&blocks)) { 3768 ret = relocate_tree_blocks(trans, rc, &blocks); 3769 if (ret < 0) { 3770 if (ret != -EAGAIN) { 3771 err = ret; 3772 break; 3773 } 3774 rc->extents_found--; 3775 rc->search_start = key.objectid; 3776 } 3777 } 3778 3779 ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5); 3780 if (ret < 0) { 3781 if (ret != -EAGAIN) { 3782 err = ret; 3783 WARN_ON(1); 3784 break; 3785 } 3786 rc->commit_transaction = 1; 3787 } 3788 3789 if (rc->commit_transaction) { 3790 rc->commit_transaction = 0; 3791 ret = btrfs_commit_transaction(trans, rc->extent_root); 3792 BUG_ON(ret); 3793 } else { 3794 nr = trans->blocks_used; 3795 btrfs_end_transaction_throttle(trans, rc->extent_root); 3796 btrfs_btree_balance_dirty(rc->extent_root, nr); 3797 } 3798 trans = NULL; 3799 3800 if (rc->stage == MOVE_DATA_EXTENTS && 3801 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3802 rc->found_file_extent = 1; 3803 ret = relocate_data_extent(rc->data_inode, 3804 &key, &rc->cluster); 3805 if (ret < 0) { 3806 err = ret; 3807 break; 3808 } 3809 } 3810 } 3811 if (trans && progress && err == -ENOSPC) { 3812 ret = btrfs_force_chunk_alloc(trans, rc->extent_root, 3813 rc->block_group->flags); 3814 if (ret == 0) { 3815 err = 0; 3816 progress = 0; 3817 goto restart; 3818 } 3819 } 3820 3821 btrfs_release_path(path); 3822 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 3823 GFP_NOFS); 3824 3825 if (trans) { 3826 nr = trans->blocks_used; 3827 btrfs_end_transaction_throttle(trans, rc->extent_root); 3828 btrfs_btree_balance_dirty(rc->extent_root, nr); 3829 } 3830 3831 if (!err) { 3832 ret = relocate_file_extent_cluster(rc->data_inode, 3833 &rc->cluster); 3834 if (ret < 0) 3835 err = ret; 3836 } 3837 3838 rc->create_reloc_tree = 0; 3839 set_reloc_control(rc); 3840 3841 backref_cache_cleanup(&rc->backref_cache); 3842 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 3843 3844 err = prepare_to_merge(rc, err); 3845 3846 merge_reloc_roots(rc); 3847 3848 rc->merge_reloc_tree = 0; 3849 unset_reloc_control(rc); 3850 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 3851 3852 /* get rid of pinned extents */ 3853 trans = btrfs_join_transaction(rc->extent_root); 3854 if (IS_ERR(trans)) 3855 err = PTR_ERR(trans); 3856 else 3857 btrfs_commit_transaction(trans, rc->extent_root); 3858 out_free: 3859 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); 3860 btrfs_free_path(path); 3861 return err; 3862 } 3863 3864 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 3865 struct btrfs_root *root, u64 objectid) 3866 { 3867 struct btrfs_path *path; 3868 struct btrfs_inode_item *item; 3869 struct extent_buffer *leaf; 3870 int ret; 3871 3872 path = btrfs_alloc_path(); 3873 if (!path) 3874 return -ENOMEM; 3875 3876 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 3877 if (ret) 3878 goto out; 3879 3880 leaf = path->nodes[0]; 3881 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 3882 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 3883 btrfs_set_inode_generation(leaf, item, 1); 3884 btrfs_set_inode_size(leaf, item, 0); 3885 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 3886 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 3887 BTRFS_INODE_PREALLOC); 3888 btrfs_mark_buffer_dirty(leaf); 3889 btrfs_release_path(path); 3890 out: 3891 btrfs_free_path(path); 3892 return ret; 3893 } 3894 3895 /* 3896 * helper to create inode for data relocation. 3897 * the inode is in data relocation tree and its link count is 0 3898 */ 3899 static noinline_for_stack 3900 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 3901 struct btrfs_block_group_cache *group) 3902 { 3903 struct inode *inode = NULL; 3904 struct btrfs_trans_handle *trans; 3905 struct btrfs_root *root; 3906 struct btrfs_key key; 3907 unsigned long nr; 3908 u64 objectid = BTRFS_FIRST_FREE_OBJECTID; 3909 int err = 0; 3910 3911 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 3912 if (IS_ERR(root)) 3913 return ERR_CAST(root); 3914 3915 trans = btrfs_start_transaction(root, 6); 3916 if (IS_ERR(trans)) 3917 return ERR_CAST(trans); 3918 3919 err = btrfs_find_free_objectid(root, &objectid); 3920 if (err) 3921 goto out; 3922 3923 err = __insert_orphan_inode(trans, root, objectid); 3924 BUG_ON(err); 3925 3926 key.objectid = objectid; 3927 key.type = BTRFS_INODE_ITEM_KEY; 3928 key.offset = 0; 3929 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 3930 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 3931 BTRFS_I(inode)->index_cnt = group->key.objectid; 3932 3933 err = btrfs_orphan_add(trans, inode); 3934 out: 3935 nr = trans->blocks_used; 3936 btrfs_end_transaction(trans, root); 3937 btrfs_btree_balance_dirty(root, nr); 3938 if (err) { 3939 if (inode) 3940 iput(inode); 3941 inode = ERR_PTR(err); 3942 } 3943 return inode; 3944 } 3945 3946 static struct reloc_control *alloc_reloc_control(void) 3947 { 3948 struct reloc_control *rc; 3949 3950 rc = kzalloc(sizeof(*rc), GFP_NOFS); 3951 if (!rc) 3952 return NULL; 3953 3954 INIT_LIST_HEAD(&rc->reloc_roots); 3955 backref_cache_init(&rc->backref_cache); 3956 mapping_tree_init(&rc->reloc_root_tree); 3957 extent_io_tree_init(&rc->processed_blocks, NULL); 3958 return rc; 3959 } 3960 3961 /* 3962 * function to relocate all extents in a block group. 3963 */ 3964 int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) 3965 { 3966 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3967 struct reloc_control *rc; 3968 struct inode *inode; 3969 struct btrfs_path *path; 3970 int ret; 3971 int rw = 0; 3972 int err = 0; 3973 3974 rc = alloc_reloc_control(); 3975 if (!rc) 3976 return -ENOMEM; 3977 3978 rc->extent_root = extent_root; 3979 3980 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 3981 BUG_ON(!rc->block_group); 3982 3983 if (!rc->block_group->ro) { 3984 ret = btrfs_set_block_group_ro(extent_root, rc->block_group); 3985 if (ret) { 3986 err = ret; 3987 goto out; 3988 } 3989 rw = 1; 3990 } 3991 3992 path = btrfs_alloc_path(); 3993 if (!path) { 3994 err = -ENOMEM; 3995 goto out; 3996 } 3997 3998 inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group, 3999 path); 4000 btrfs_free_path(path); 4001 4002 if (!IS_ERR(inode)) 4003 ret = delete_block_group_cache(fs_info, inode, 0); 4004 else 4005 ret = PTR_ERR(inode); 4006 4007 if (ret && ret != -ENOENT) { 4008 err = ret; 4009 goto out; 4010 } 4011 4012 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 4013 if (IS_ERR(rc->data_inode)) { 4014 err = PTR_ERR(rc->data_inode); 4015 rc->data_inode = NULL; 4016 goto out; 4017 } 4018 4019 printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n", 4020 (unsigned long long)rc->block_group->key.objectid, 4021 (unsigned long long)rc->block_group->flags); 4022 4023 btrfs_start_delalloc_inodes(fs_info->tree_root, 0); 4024 btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0); 4025 4026 while (1) { 4027 mutex_lock(&fs_info->cleaner_mutex); 4028 4029 btrfs_clean_old_snapshots(fs_info->tree_root); 4030 ret = relocate_block_group(rc); 4031 4032 mutex_unlock(&fs_info->cleaner_mutex); 4033 if (ret < 0) { 4034 err = ret; 4035 goto out; 4036 } 4037 4038 if (rc->extents_found == 0) 4039 break; 4040 4041 printk(KERN_INFO "btrfs: found %llu extents\n", 4042 (unsigned long long)rc->extents_found); 4043 4044 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4045 btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1); 4046 invalidate_mapping_pages(rc->data_inode->i_mapping, 4047 0, -1); 4048 rc->stage = UPDATE_DATA_PTRS; 4049 } 4050 } 4051 4052 filemap_write_and_wait_range(fs_info->btree_inode->i_mapping, 4053 rc->block_group->key.objectid, 4054 rc->block_group->key.objectid + 4055 rc->block_group->key.offset - 1); 4056 4057 WARN_ON(rc->block_group->pinned > 0); 4058 WARN_ON(rc->block_group->reserved > 0); 4059 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4060 out: 4061 if (err && rw) 4062 btrfs_set_block_group_rw(extent_root, rc->block_group); 4063 iput(rc->data_inode); 4064 btrfs_put_block_group(rc->block_group); 4065 kfree(rc); 4066 return err; 4067 } 4068 4069 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4070 { 4071 struct btrfs_trans_handle *trans; 4072 int ret; 4073 4074 trans = btrfs_start_transaction(root->fs_info->tree_root, 0); 4075 BUG_ON(IS_ERR(trans)); 4076 4077 memset(&root->root_item.drop_progress, 0, 4078 sizeof(root->root_item.drop_progress)); 4079 root->root_item.drop_level = 0; 4080 btrfs_set_root_refs(&root->root_item, 0); 4081 ret = btrfs_update_root(trans, root->fs_info->tree_root, 4082 &root->root_key, &root->root_item); 4083 BUG_ON(ret); 4084 4085 ret = btrfs_end_transaction(trans, root->fs_info->tree_root); 4086 BUG_ON(ret); 4087 return 0; 4088 } 4089 4090 /* 4091 * recover relocation interrupted by system crash. 4092 * 4093 * this function resumes merging reloc trees with corresponding fs trees. 4094 * this is important for keeping the sharing of tree blocks 4095 */ 4096 int btrfs_recover_relocation(struct btrfs_root *root) 4097 { 4098 LIST_HEAD(reloc_roots); 4099 struct btrfs_key key; 4100 struct btrfs_root *fs_root; 4101 struct btrfs_root *reloc_root; 4102 struct btrfs_path *path; 4103 struct extent_buffer *leaf; 4104 struct reloc_control *rc = NULL; 4105 struct btrfs_trans_handle *trans; 4106 int ret; 4107 int err = 0; 4108 4109 path = btrfs_alloc_path(); 4110 if (!path) 4111 return -ENOMEM; 4112 path->reada = -1; 4113 4114 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4115 key.type = BTRFS_ROOT_ITEM_KEY; 4116 key.offset = (u64)-1; 4117 4118 while (1) { 4119 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, 4120 path, 0, 0); 4121 if (ret < 0) { 4122 err = ret; 4123 goto out; 4124 } 4125 if (ret > 0) { 4126 if (path->slots[0] == 0) 4127 break; 4128 path->slots[0]--; 4129 } 4130 leaf = path->nodes[0]; 4131 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4132 btrfs_release_path(path); 4133 4134 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4135 key.type != BTRFS_ROOT_ITEM_KEY) 4136 break; 4137 4138 reloc_root = btrfs_read_fs_root_no_radix(root, &key); 4139 if (IS_ERR(reloc_root)) { 4140 err = PTR_ERR(reloc_root); 4141 goto out; 4142 } 4143 4144 list_add(&reloc_root->root_list, &reloc_roots); 4145 4146 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4147 fs_root = read_fs_root(root->fs_info, 4148 reloc_root->root_key.offset); 4149 if (IS_ERR(fs_root)) { 4150 ret = PTR_ERR(fs_root); 4151 if (ret != -ENOENT) { 4152 err = ret; 4153 goto out; 4154 } 4155 mark_garbage_root(reloc_root); 4156 } 4157 } 4158 4159 if (key.offset == 0) 4160 break; 4161 4162 key.offset--; 4163 } 4164 btrfs_release_path(path); 4165 4166 if (list_empty(&reloc_roots)) 4167 goto out; 4168 4169 rc = alloc_reloc_control(); 4170 if (!rc) { 4171 err = -ENOMEM; 4172 goto out; 4173 } 4174 4175 rc->extent_root = root->fs_info->extent_root; 4176 4177 set_reloc_control(rc); 4178 4179 trans = btrfs_join_transaction(rc->extent_root); 4180 if (IS_ERR(trans)) { 4181 unset_reloc_control(rc); 4182 err = PTR_ERR(trans); 4183 goto out_free; 4184 } 4185 4186 rc->merge_reloc_tree = 1; 4187 4188 while (!list_empty(&reloc_roots)) { 4189 reloc_root = list_entry(reloc_roots.next, 4190 struct btrfs_root, root_list); 4191 list_del(&reloc_root->root_list); 4192 4193 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4194 list_add_tail(&reloc_root->root_list, 4195 &rc->reloc_roots); 4196 continue; 4197 } 4198 4199 fs_root = read_fs_root(root->fs_info, 4200 reloc_root->root_key.offset); 4201 BUG_ON(IS_ERR(fs_root)); 4202 4203 __add_reloc_root(reloc_root); 4204 fs_root->reloc_root = reloc_root; 4205 } 4206 4207 btrfs_commit_transaction(trans, rc->extent_root); 4208 4209 merge_reloc_roots(rc); 4210 4211 unset_reloc_control(rc); 4212 4213 trans = btrfs_join_transaction(rc->extent_root); 4214 if (IS_ERR(trans)) 4215 err = PTR_ERR(trans); 4216 else 4217 btrfs_commit_transaction(trans, rc->extent_root); 4218 out_free: 4219 kfree(rc); 4220 out: 4221 while (!list_empty(&reloc_roots)) { 4222 reloc_root = list_entry(reloc_roots.next, 4223 struct btrfs_root, root_list); 4224 list_del(&reloc_root->root_list); 4225 free_extent_buffer(reloc_root->node); 4226 free_extent_buffer(reloc_root->commit_root); 4227 kfree(reloc_root); 4228 } 4229 btrfs_free_path(path); 4230 4231 if (err == 0) { 4232 /* cleanup orphan inode in data relocation tree */ 4233 fs_root = read_fs_root(root->fs_info, 4234 BTRFS_DATA_RELOC_TREE_OBJECTID); 4235 if (IS_ERR(fs_root)) 4236 err = PTR_ERR(fs_root); 4237 else 4238 err = btrfs_orphan_cleanup(fs_root); 4239 } 4240 return err; 4241 } 4242 4243 /* 4244 * helper to add ordered checksum for data relocation. 4245 * 4246 * cloning checksum properly handles the nodatasum extents. 4247 * it also saves CPU time to re-calculate the checksum. 4248 */ 4249 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4250 { 4251 struct btrfs_ordered_sum *sums; 4252 struct btrfs_sector_sum *sector_sum; 4253 struct btrfs_ordered_extent *ordered; 4254 struct btrfs_root *root = BTRFS_I(inode)->root; 4255 size_t offset; 4256 int ret; 4257 u64 disk_bytenr; 4258 LIST_HEAD(list); 4259 4260 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4261 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4262 4263 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4264 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4265 disk_bytenr + len - 1, &list, 0); 4266 4267 while (!list_empty(&list)) { 4268 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4269 list_del_init(&sums->list); 4270 4271 sector_sum = sums->sums; 4272 sums->bytenr = ordered->start; 4273 4274 offset = 0; 4275 while (offset < sums->len) { 4276 sector_sum->bytenr += ordered->start - disk_bytenr; 4277 sector_sum++; 4278 offset += root->sectorsize; 4279 } 4280 4281 btrfs_add_ordered_sum(inode, ordered, sums); 4282 } 4283 btrfs_put_ordered_extent(ordered); 4284 return ret; 4285 } 4286 4287 void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4288 struct btrfs_root *root, struct extent_buffer *buf, 4289 struct extent_buffer *cow) 4290 { 4291 struct reloc_control *rc; 4292 struct backref_node *node; 4293 int first_cow = 0; 4294 int level; 4295 int ret; 4296 4297 rc = root->fs_info->reloc_ctl; 4298 if (!rc) 4299 return; 4300 4301 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4302 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4303 4304 level = btrfs_header_level(buf); 4305 if (btrfs_header_generation(buf) <= 4306 btrfs_root_last_snapshot(&root->root_item)) 4307 first_cow = 1; 4308 4309 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4310 rc->create_reloc_tree) { 4311 WARN_ON(!first_cow && level == 0); 4312 4313 node = rc->backref_cache.path[level]; 4314 BUG_ON(node->bytenr != buf->start && 4315 node->new_bytenr != buf->start); 4316 4317 drop_node_buffer(node); 4318 extent_buffer_get(cow); 4319 node->eb = cow; 4320 node->new_bytenr = cow->start; 4321 4322 if (!node->pending) { 4323 list_move_tail(&node->list, 4324 &rc->backref_cache.pending[level]); 4325 node->pending = 1; 4326 } 4327 4328 if (first_cow) 4329 __mark_block_processed(rc, node); 4330 4331 if (first_cow && level > 0) 4332 rc->nodes_relocated += buf->len; 4333 } 4334 4335 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) { 4336 ret = replace_file_extents(trans, rc, root, cow); 4337 BUG_ON(ret); 4338 } 4339 } 4340 4341 /* 4342 * called before creating snapshot. it calculates metadata reservation 4343 * requried for relocating tree blocks in the snapshot 4344 */ 4345 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 4346 struct btrfs_pending_snapshot *pending, 4347 u64 *bytes_to_reserve) 4348 { 4349 struct btrfs_root *root; 4350 struct reloc_control *rc; 4351 4352 root = pending->root; 4353 if (!root->reloc_root) 4354 return; 4355 4356 rc = root->fs_info->reloc_ctl; 4357 if (!rc->merge_reloc_tree) 4358 return; 4359 4360 root = root->reloc_root; 4361 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4362 /* 4363 * relocation is in the stage of merging trees. the space 4364 * used by merging a reloc tree is twice the size of 4365 * relocated tree nodes in the worst case. half for cowing 4366 * the reloc tree, half for cowing the fs tree. the space 4367 * used by cowing the reloc tree will be freed after the 4368 * tree is dropped. if we create snapshot, cowing the fs 4369 * tree may use more space than it frees. so we need 4370 * reserve extra space. 4371 */ 4372 *bytes_to_reserve += rc->nodes_relocated; 4373 } 4374 4375 /* 4376 * called after snapshot is created. migrate block reservation 4377 * and create reloc root for the newly created snapshot 4378 */ 4379 void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4380 struct btrfs_pending_snapshot *pending) 4381 { 4382 struct btrfs_root *root = pending->root; 4383 struct btrfs_root *reloc_root; 4384 struct btrfs_root *new_root; 4385 struct reloc_control *rc; 4386 int ret; 4387 4388 if (!root->reloc_root) 4389 return; 4390 4391 rc = root->fs_info->reloc_ctl; 4392 rc->merging_rsv_size += rc->nodes_relocated; 4393 4394 if (rc->merge_reloc_tree) { 4395 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4396 rc->block_rsv, 4397 rc->nodes_relocated); 4398 BUG_ON(ret); 4399 } 4400 4401 new_root = pending->snap; 4402 reloc_root = create_reloc_root(trans, root->reloc_root, 4403 new_root->root_key.objectid); 4404 4405 __add_reloc_root(reloc_root); 4406 new_root->reloc_root = reloc_root; 4407 4408 if (rc->create_reloc_tree) { 4409 ret = clone_backref_node(trans, rc, root, reloc_root); 4410 BUG_ON(ret); 4411 } 4412 } 4413