1 /* 2 * Copyright (C) 2009 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "volumes.h" 29 #include "locking.h" 30 #include "btrfs_inode.h" 31 #include "async-thread.h" 32 #include "free-space-cache.h" 33 34 /* 35 * backref_node, mapping_node and tree_block start with this 36 */ 37 struct tree_entry { 38 struct rb_node rb_node; 39 u64 bytenr; 40 }; 41 42 /* 43 * present a tree block in the backref cache 44 */ 45 struct backref_node { 46 struct rb_node rb_node; 47 u64 bytenr; 48 49 u64 new_bytenr; 50 /* objectid of tree block owner, can be not uptodate */ 51 u64 owner; 52 /* link to pending, changed or detached list */ 53 struct list_head list; 54 /* list of upper level blocks reference this block */ 55 struct list_head upper; 56 /* list of child blocks in the cache */ 57 struct list_head lower; 58 /* NULL if this node is not tree root */ 59 struct btrfs_root *root; 60 /* extent buffer got by COW the block */ 61 struct extent_buffer *eb; 62 /* level of tree block */ 63 unsigned int level:8; 64 /* is the block in non-reference counted tree */ 65 unsigned int cowonly:1; 66 /* 1 if no child node in the cache */ 67 unsigned int lowest:1; 68 /* is the extent buffer locked */ 69 unsigned int locked:1; 70 /* has the block been processed */ 71 unsigned int processed:1; 72 /* have backrefs of this block been checked */ 73 unsigned int checked:1; 74 /* 75 * 1 if corresponding block has been cowed but some upper 76 * level block pointers may not point to the new location 77 */ 78 unsigned int pending:1; 79 /* 80 * 1 if the backref node isn't connected to any other 81 * backref node. 82 */ 83 unsigned int detached:1; 84 }; 85 86 /* 87 * present a block pointer in the backref cache 88 */ 89 struct backref_edge { 90 struct list_head list[2]; 91 struct backref_node *node[2]; 92 }; 93 94 #define LOWER 0 95 #define UPPER 1 96 97 struct backref_cache { 98 /* red black tree of all backref nodes in the cache */ 99 struct rb_root rb_root; 100 /* for passing backref nodes to btrfs_reloc_cow_block */ 101 struct backref_node *path[BTRFS_MAX_LEVEL]; 102 /* 103 * list of blocks that have been cowed but some block 104 * pointers in upper level blocks may not reflect the 105 * new location 106 */ 107 struct list_head pending[BTRFS_MAX_LEVEL]; 108 /* list of backref nodes with no child node */ 109 struct list_head leaves; 110 /* list of blocks that have been cowed in current transaction */ 111 struct list_head changed; 112 /* list of detached backref node. */ 113 struct list_head detached; 114 115 u64 last_trans; 116 117 int nr_nodes; 118 int nr_edges; 119 }; 120 121 /* 122 * map address of tree root to tree 123 */ 124 struct mapping_node { 125 struct rb_node rb_node; 126 u64 bytenr; 127 void *data; 128 }; 129 130 struct mapping_tree { 131 struct rb_root rb_root; 132 spinlock_t lock; 133 }; 134 135 /* 136 * present a tree block to process 137 */ 138 struct tree_block { 139 struct rb_node rb_node; 140 u64 bytenr; 141 struct btrfs_key key; 142 unsigned int level:8; 143 unsigned int key_ready:1; 144 }; 145 146 #define MAX_EXTENTS 128 147 148 struct file_extent_cluster { 149 u64 start; 150 u64 end; 151 u64 boundary[MAX_EXTENTS]; 152 unsigned int nr; 153 }; 154 155 struct reloc_control { 156 /* block group to relocate */ 157 struct btrfs_block_group_cache *block_group; 158 /* extent tree */ 159 struct btrfs_root *extent_root; 160 /* inode for moving data */ 161 struct inode *data_inode; 162 163 struct btrfs_block_rsv *block_rsv; 164 165 struct backref_cache backref_cache; 166 167 struct file_extent_cluster cluster; 168 /* tree blocks have been processed */ 169 struct extent_io_tree processed_blocks; 170 /* map start of tree root to corresponding reloc tree */ 171 struct mapping_tree reloc_root_tree; 172 /* list of reloc trees */ 173 struct list_head reloc_roots; 174 /* size of metadata reservation for merging reloc trees */ 175 u64 merging_rsv_size; 176 /* size of relocated tree nodes */ 177 u64 nodes_relocated; 178 179 u64 search_start; 180 u64 extents_found; 181 182 unsigned int stage:8; 183 unsigned int create_reloc_tree:1; 184 unsigned int merge_reloc_tree:1; 185 unsigned int found_file_extent:1; 186 unsigned int commit_transaction:1; 187 }; 188 189 /* stages of data relocation */ 190 #define MOVE_DATA_EXTENTS 0 191 #define UPDATE_DATA_PTRS 1 192 193 static void remove_backref_node(struct backref_cache *cache, 194 struct backref_node *node); 195 static void __mark_block_processed(struct reloc_control *rc, 196 struct backref_node *node); 197 198 static void mapping_tree_init(struct mapping_tree *tree) 199 { 200 tree->rb_root = RB_ROOT; 201 spin_lock_init(&tree->lock); 202 } 203 204 static void backref_cache_init(struct backref_cache *cache) 205 { 206 int i; 207 cache->rb_root = RB_ROOT; 208 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 209 INIT_LIST_HEAD(&cache->pending[i]); 210 INIT_LIST_HEAD(&cache->changed); 211 INIT_LIST_HEAD(&cache->detached); 212 INIT_LIST_HEAD(&cache->leaves); 213 } 214 215 static void backref_cache_cleanup(struct backref_cache *cache) 216 { 217 struct backref_node *node; 218 int i; 219 220 while (!list_empty(&cache->detached)) { 221 node = list_entry(cache->detached.next, 222 struct backref_node, list); 223 remove_backref_node(cache, node); 224 } 225 226 while (!list_empty(&cache->leaves)) { 227 node = list_entry(cache->leaves.next, 228 struct backref_node, lower); 229 remove_backref_node(cache, node); 230 } 231 232 cache->last_trans = 0; 233 234 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 235 BUG_ON(!list_empty(&cache->pending[i])); 236 BUG_ON(!list_empty(&cache->changed)); 237 BUG_ON(!list_empty(&cache->detached)); 238 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root)); 239 BUG_ON(cache->nr_nodes); 240 BUG_ON(cache->nr_edges); 241 } 242 243 static struct backref_node *alloc_backref_node(struct backref_cache *cache) 244 { 245 struct backref_node *node; 246 247 node = kzalloc(sizeof(*node), GFP_NOFS); 248 if (node) { 249 INIT_LIST_HEAD(&node->list); 250 INIT_LIST_HEAD(&node->upper); 251 INIT_LIST_HEAD(&node->lower); 252 RB_CLEAR_NODE(&node->rb_node); 253 cache->nr_nodes++; 254 } 255 return node; 256 } 257 258 static void free_backref_node(struct backref_cache *cache, 259 struct backref_node *node) 260 { 261 if (node) { 262 cache->nr_nodes--; 263 kfree(node); 264 } 265 } 266 267 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) 268 { 269 struct backref_edge *edge; 270 271 edge = kzalloc(sizeof(*edge), GFP_NOFS); 272 if (edge) 273 cache->nr_edges++; 274 return edge; 275 } 276 277 static void free_backref_edge(struct backref_cache *cache, 278 struct backref_edge *edge) 279 { 280 if (edge) { 281 cache->nr_edges--; 282 kfree(edge); 283 } 284 } 285 286 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 287 struct rb_node *node) 288 { 289 struct rb_node **p = &root->rb_node; 290 struct rb_node *parent = NULL; 291 struct tree_entry *entry; 292 293 while (*p) { 294 parent = *p; 295 entry = rb_entry(parent, struct tree_entry, rb_node); 296 297 if (bytenr < entry->bytenr) 298 p = &(*p)->rb_left; 299 else if (bytenr > entry->bytenr) 300 p = &(*p)->rb_right; 301 else 302 return parent; 303 } 304 305 rb_link_node(node, parent, p); 306 rb_insert_color(node, root); 307 return NULL; 308 } 309 310 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) 311 { 312 struct rb_node *n = root->rb_node; 313 struct tree_entry *entry; 314 315 while (n) { 316 entry = rb_entry(n, struct tree_entry, rb_node); 317 318 if (bytenr < entry->bytenr) 319 n = n->rb_left; 320 else if (bytenr > entry->bytenr) 321 n = n->rb_right; 322 else 323 return n; 324 } 325 return NULL; 326 } 327 328 /* 329 * walk up backref nodes until reach node presents tree root 330 */ 331 static struct backref_node *walk_up_backref(struct backref_node *node, 332 struct backref_edge *edges[], 333 int *index) 334 { 335 struct backref_edge *edge; 336 int idx = *index; 337 338 while (!list_empty(&node->upper)) { 339 edge = list_entry(node->upper.next, 340 struct backref_edge, list[LOWER]); 341 edges[idx++] = edge; 342 node = edge->node[UPPER]; 343 } 344 BUG_ON(node->detached); 345 *index = idx; 346 return node; 347 } 348 349 /* 350 * walk down backref nodes to find start of next reference path 351 */ 352 static struct backref_node *walk_down_backref(struct backref_edge *edges[], 353 int *index) 354 { 355 struct backref_edge *edge; 356 struct backref_node *lower; 357 int idx = *index; 358 359 while (idx > 0) { 360 edge = edges[idx - 1]; 361 lower = edge->node[LOWER]; 362 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 363 idx--; 364 continue; 365 } 366 edge = list_entry(edge->list[LOWER].next, 367 struct backref_edge, list[LOWER]); 368 edges[idx - 1] = edge; 369 *index = idx; 370 return edge->node[UPPER]; 371 } 372 *index = 0; 373 return NULL; 374 } 375 376 static void unlock_node_buffer(struct backref_node *node) 377 { 378 if (node->locked) { 379 btrfs_tree_unlock(node->eb); 380 node->locked = 0; 381 } 382 } 383 384 static void drop_node_buffer(struct backref_node *node) 385 { 386 if (node->eb) { 387 unlock_node_buffer(node); 388 free_extent_buffer(node->eb); 389 node->eb = NULL; 390 } 391 } 392 393 static void drop_backref_node(struct backref_cache *tree, 394 struct backref_node *node) 395 { 396 BUG_ON(!list_empty(&node->upper)); 397 398 drop_node_buffer(node); 399 list_del(&node->list); 400 list_del(&node->lower); 401 if (!RB_EMPTY_NODE(&node->rb_node)) 402 rb_erase(&node->rb_node, &tree->rb_root); 403 free_backref_node(tree, node); 404 } 405 406 /* 407 * remove a backref node from the backref cache 408 */ 409 static void remove_backref_node(struct backref_cache *cache, 410 struct backref_node *node) 411 { 412 struct backref_node *upper; 413 struct backref_edge *edge; 414 415 if (!node) 416 return; 417 418 BUG_ON(!node->lowest && !node->detached); 419 while (!list_empty(&node->upper)) { 420 edge = list_entry(node->upper.next, struct backref_edge, 421 list[LOWER]); 422 upper = edge->node[UPPER]; 423 list_del(&edge->list[LOWER]); 424 list_del(&edge->list[UPPER]); 425 free_backref_edge(cache, edge); 426 427 if (RB_EMPTY_NODE(&upper->rb_node)) { 428 BUG_ON(!list_empty(&node->upper)); 429 drop_backref_node(cache, node); 430 node = upper; 431 node->lowest = 1; 432 continue; 433 } 434 /* 435 * add the node to leaf node list if no other 436 * child block cached. 437 */ 438 if (list_empty(&upper->lower)) { 439 list_add_tail(&upper->lower, &cache->leaves); 440 upper->lowest = 1; 441 } 442 } 443 444 drop_backref_node(cache, node); 445 } 446 447 static void update_backref_node(struct backref_cache *cache, 448 struct backref_node *node, u64 bytenr) 449 { 450 struct rb_node *rb_node; 451 rb_erase(&node->rb_node, &cache->rb_root); 452 node->bytenr = bytenr; 453 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 454 BUG_ON(rb_node); 455 } 456 457 /* 458 * update backref cache after a transaction commit 459 */ 460 static int update_backref_cache(struct btrfs_trans_handle *trans, 461 struct backref_cache *cache) 462 { 463 struct backref_node *node; 464 int level = 0; 465 466 if (cache->last_trans == 0) { 467 cache->last_trans = trans->transid; 468 return 0; 469 } 470 471 if (cache->last_trans == trans->transid) 472 return 0; 473 474 /* 475 * detached nodes are used to avoid unnecessary backref 476 * lookup. transaction commit changes the extent tree. 477 * so the detached nodes are no longer useful. 478 */ 479 while (!list_empty(&cache->detached)) { 480 node = list_entry(cache->detached.next, 481 struct backref_node, list); 482 remove_backref_node(cache, node); 483 } 484 485 while (!list_empty(&cache->changed)) { 486 node = list_entry(cache->changed.next, 487 struct backref_node, list); 488 list_del_init(&node->list); 489 BUG_ON(node->pending); 490 update_backref_node(cache, node, node->new_bytenr); 491 } 492 493 /* 494 * some nodes can be left in the pending list if there were 495 * errors during processing the pending nodes. 496 */ 497 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 498 list_for_each_entry(node, &cache->pending[level], list) { 499 BUG_ON(!node->pending); 500 if (node->bytenr == node->new_bytenr) 501 continue; 502 update_backref_node(cache, node, node->new_bytenr); 503 } 504 } 505 506 cache->last_trans = 0; 507 return 1; 508 } 509 510 static int should_ignore_root(struct btrfs_root *root) 511 { 512 struct btrfs_root *reloc_root; 513 514 if (!root->ref_cows) 515 return 0; 516 517 reloc_root = root->reloc_root; 518 if (!reloc_root) 519 return 0; 520 521 if (btrfs_root_last_snapshot(&reloc_root->root_item) == 522 root->fs_info->running_transaction->transid - 1) 523 return 0; 524 /* 525 * if there is reloc tree and it was created in previous 526 * transaction backref lookup can find the reloc tree, 527 * so backref node for the fs tree root is useless for 528 * relocation. 529 */ 530 return 1; 531 } 532 533 /* 534 * find reloc tree by address of tree root 535 */ 536 static struct btrfs_root *find_reloc_root(struct reloc_control *rc, 537 u64 bytenr) 538 { 539 struct rb_node *rb_node; 540 struct mapping_node *node; 541 struct btrfs_root *root = NULL; 542 543 spin_lock(&rc->reloc_root_tree.lock); 544 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr); 545 if (rb_node) { 546 node = rb_entry(rb_node, struct mapping_node, rb_node); 547 root = (struct btrfs_root *)node->data; 548 } 549 spin_unlock(&rc->reloc_root_tree.lock); 550 return root; 551 } 552 553 static int is_cowonly_root(u64 root_objectid) 554 { 555 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 556 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 557 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 558 root_objectid == BTRFS_DEV_TREE_OBJECTID || 559 root_objectid == BTRFS_TREE_LOG_OBJECTID || 560 root_objectid == BTRFS_CSUM_TREE_OBJECTID) 561 return 1; 562 return 0; 563 } 564 565 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info, 566 u64 root_objectid) 567 { 568 struct btrfs_key key; 569 570 key.objectid = root_objectid; 571 key.type = BTRFS_ROOT_ITEM_KEY; 572 if (is_cowonly_root(root_objectid)) 573 key.offset = 0; 574 else 575 key.offset = (u64)-1; 576 577 return btrfs_read_fs_root_no_name(fs_info, &key); 578 } 579 580 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 581 static noinline_for_stack 582 struct btrfs_root *find_tree_root(struct reloc_control *rc, 583 struct extent_buffer *leaf, 584 struct btrfs_extent_ref_v0 *ref0) 585 { 586 struct btrfs_root *root; 587 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0); 588 u64 generation = btrfs_ref_generation_v0(leaf, ref0); 589 590 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID); 591 592 root = read_fs_root(rc->extent_root->fs_info, root_objectid); 593 BUG_ON(IS_ERR(root)); 594 595 if (root->ref_cows && 596 generation != btrfs_root_generation(&root->root_item)) 597 return NULL; 598 599 return root; 600 } 601 #endif 602 603 static noinline_for_stack 604 int find_inline_backref(struct extent_buffer *leaf, int slot, 605 unsigned long *ptr, unsigned long *end) 606 { 607 struct btrfs_extent_item *ei; 608 struct btrfs_tree_block_info *bi; 609 u32 item_size; 610 611 item_size = btrfs_item_size_nr(leaf, slot); 612 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 613 if (item_size < sizeof(*ei)) { 614 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 615 return 1; 616 } 617 #endif 618 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 619 WARN_ON(!(btrfs_extent_flags(leaf, ei) & 620 BTRFS_EXTENT_FLAG_TREE_BLOCK)); 621 622 if (item_size <= sizeof(*ei) + sizeof(*bi)) { 623 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi)); 624 return 1; 625 } 626 627 bi = (struct btrfs_tree_block_info *)(ei + 1); 628 *ptr = (unsigned long)(bi + 1); 629 *end = (unsigned long)ei + item_size; 630 return 0; 631 } 632 633 /* 634 * build backref tree for a given tree block. root of the backref tree 635 * corresponds the tree block, leaves of the backref tree correspond 636 * roots of b-trees that reference the tree block. 637 * 638 * the basic idea of this function is check backrefs of a given block 639 * to find upper level blocks that refernece the block, and then check 640 * bakcrefs of these upper level blocks recursively. the recursion stop 641 * when tree root is reached or backrefs for the block is cached. 642 * 643 * NOTE: if we find backrefs for a block are cached, we know backrefs 644 * for all upper level blocks that directly/indirectly reference the 645 * block are also cached. 646 */ 647 static noinline_for_stack 648 struct backref_node *build_backref_tree(struct reloc_control *rc, 649 struct btrfs_key *node_key, 650 int level, u64 bytenr) 651 { 652 struct backref_cache *cache = &rc->backref_cache; 653 struct btrfs_path *path1; 654 struct btrfs_path *path2; 655 struct extent_buffer *eb; 656 struct btrfs_root *root; 657 struct backref_node *cur; 658 struct backref_node *upper; 659 struct backref_node *lower; 660 struct backref_node *node = NULL; 661 struct backref_node *exist = NULL; 662 struct backref_edge *edge; 663 struct rb_node *rb_node; 664 struct btrfs_key key; 665 unsigned long end; 666 unsigned long ptr; 667 LIST_HEAD(list); 668 LIST_HEAD(useless); 669 int cowonly; 670 int ret; 671 int err = 0; 672 673 path1 = btrfs_alloc_path(); 674 path2 = btrfs_alloc_path(); 675 if (!path1 || !path2) { 676 err = -ENOMEM; 677 goto out; 678 } 679 680 node = alloc_backref_node(cache); 681 if (!node) { 682 err = -ENOMEM; 683 goto out; 684 } 685 686 node->bytenr = bytenr; 687 node->level = level; 688 node->lowest = 1; 689 cur = node; 690 again: 691 end = 0; 692 ptr = 0; 693 key.objectid = cur->bytenr; 694 key.type = BTRFS_EXTENT_ITEM_KEY; 695 key.offset = (u64)-1; 696 697 path1->search_commit_root = 1; 698 path1->skip_locking = 1; 699 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1, 700 0, 0); 701 if (ret < 0) { 702 err = ret; 703 goto out; 704 } 705 BUG_ON(!ret || !path1->slots[0]); 706 707 path1->slots[0]--; 708 709 WARN_ON(cur->checked); 710 if (!list_empty(&cur->upper)) { 711 /* 712 * the backref was added previously when processsing 713 * backref of type BTRFS_TREE_BLOCK_REF_KEY 714 */ 715 BUG_ON(!list_is_singular(&cur->upper)); 716 edge = list_entry(cur->upper.next, struct backref_edge, 717 list[LOWER]); 718 BUG_ON(!list_empty(&edge->list[UPPER])); 719 exist = edge->node[UPPER]; 720 /* 721 * add the upper level block to pending list if we need 722 * check its backrefs 723 */ 724 if (!exist->checked) 725 list_add_tail(&edge->list[UPPER], &list); 726 } else { 727 exist = NULL; 728 } 729 730 while (1) { 731 cond_resched(); 732 eb = path1->nodes[0]; 733 734 if (ptr >= end) { 735 if (path1->slots[0] >= btrfs_header_nritems(eb)) { 736 ret = btrfs_next_leaf(rc->extent_root, path1); 737 if (ret < 0) { 738 err = ret; 739 goto out; 740 } 741 if (ret > 0) 742 break; 743 eb = path1->nodes[0]; 744 } 745 746 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); 747 if (key.objectid != cur->bytenr) { 748 WARN_ON(exist); 749 break; 750 } 751 752 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 753 ret = find_inline_backref(eb, path1->slots[0], 754 &ptr, &end); 755 if (ret) 756 goto next; 757 } 758 } 759 760 if (ptr < end) { 761 /* update key for inline back ref */ 762 struct btrfs_extent_inline_ref *iref; 763 iref = (struct btrfs_extent_inline_ref *)ptr; 764 key.type = btrfs_extent_inline_ref_type(eb, iref); 765 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 766 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY && 767 key.type != BTRFS_SHARED_BLOCK_REF_KEY); 768 } 769 770 if (exist && 771 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 772 exist->owner == key.offset) || 773 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 774 exist->bytenr == key.offset))) { 775 exist = NULL; 776 goto next; 777 } 778 779 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 780 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 781 key.type == BTRFS_EXTENT_REF_V0_KEY) { 782 if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 783 struct btrfs_extent_ref_v0 *ref0; 784 ref0 = btrfs_item_ptr(eb, path1->slots[0], 785 struct btrfs_extent_ref_v0); 786 if (key.objectid == key.offset) { 787 root = find_tree_root(rc, eb, ref0); 788 if (root && !should_ignore_root(root)) 789 cur->root = root; 790 else 791 list_add(&cur->list, &useless); 792 break; 793 } 794 if (is_cowonly_root(btrfs_ref_root_v0(eb, 795 ref0))) 796 cur->cowonly = 1; 797 } 798 #else 799 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 800 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 801 #endif 802 if (key.objectid == key.offset) { 803 /* 804 * only root blocks of reloc trees use 805 * backref of this type. 806 */ 807 root = find_reloc_root(rc, cur->bytenr); 808 BUG_ON(!root); 809 cur->root = root; 810 break; 811 } 812 813 edge = alloc_backref_edge(cache); 814 if (!edge) { 815 err = -ENOMEM; 816 goto out; 817 } 818 rb_node = tree_search(&cache->rb_root, key.offset); 819 if (!rb_node) { 820 upper = alloc_backref_node(cache); 821 if (!upper) { 822 free_backref_edge(cache, edge); 823 err = -ENOMEM; 824 goto out; 825 } 826 upper->bytenr = key.offset; 827 upper->level = cur->level + 1; 828 /* 829 * backrefs for the upper level block isn't 830 * cached, add the block to pending list 831 */ 832 list_add_tail(&edge->list[UPPER], &list); 833 } else { 834 upper = rb_entry(rb_node, struct backref_node, 835 rb_node); 836 BUG_ON(!upper->checked); 837 INIT_LIST_HEAD(&edge->list[UPPER]); 838 } 839 list_add_tail(&edge->list[LOWER], &cur->upper); 840 edge->node[LOWER] = cur; 841 edge->node[UPPER] = upper; 842 843 goto next; 844 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 845 goto next; 846 } 847 848 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */ 849 root = read_fs_root(rc->extent_root->fs_info, key.offset); 850 if (IS_ERR(root)) { 851 err = PTR_ERR(root); 852 goto out; 853 } 854 855 if (!root->ref_cows) 856 cur->cowonly = 1; 857 858 if (btrfs_root_level(&root->root_item) == cur->level) { 859 /* tree root */ 860 BUG_ON(btrfs_root_bytenr(&root->root_item) != 861 cur->bytenr); 862 if (should_ignore_root(root)) 863 list_add(&cur->list, &useless); 864 else 865 cur->root = root; 866 break; 867 } 868 869 level = cur->level + 1; 870 871 /* 872 * searching the tree to find upper level blocks 873 * reference the block. 874 */ 875 path2->search_commit_root = 1; 876 path2->skip_locking = 1; 877 path2->lowest_level = level; 878 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0); 879 path2->lowest_level = 0; 880 if (ret < 0) { 881 err = ret; 882 goto out; 883 } 884 if (ret > 0 && path2->slots[level] > 0) 885 path2->slots[level]--; 886 887 eb = path2->nodes[level]; 888 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != 889 cur->bytenr); 890 891 lower = cur; 892 for (; level < BTRFS_MAX_LEVEL; level++) { 893 if (!path2->nodes[level]) { 894 BUG_ON(btrfs_root_bytenr(&root->root_item) != 895 lower->bytenr); 896 if (should_ignore_root(root)) 897 list_add(&lower->list, &useless); 898 else 899 lower->root = root; 900 break; 901 } 902 903 edge = alloc_backref_edge(cache); 904 if (!edge) { 905 err = -ENOMEM; 906 goto out; 907 } 908 909 eb = path2->nodes[level]; 910 rb_node = tree_search(&cache->rb_root, eb->start); 911 if (!rb_node) { 912 upper = alloc_backref_node(cache); 913 if (!upper) { 914 free_backref_edge(cache, edge); 915 err = -ENOMEM; 916 goto out; 917 } 918 upper->bytenr = eb->start; 919 upper->owner = btrfs_header_owner(eb); 920 upper->level = lower->level + 1; 921 if (!root->ref_cows) 922 upper->cowonly = 1; 923 924 /* 925 * if we know the block isn't shared 926 * we can void checking its backrefs. 927 */ 928 if (btrfs_block_can_be_shared(root, eb)) 929 upper->checked = 0; 930 else 931 upper->checked = 1; 932 933 /* 934 * add the block to pending list if we 935 * need check its backrefs. only block 936 * at 'cur->level + 1' is added to the 937 * tail of pending list. this guarantees 938 * we check backrefs from lower level 939 * blocks to upper level blocks. 940 */ 941 if (!upper->checked && 942 level == cur->level + 1) { 943 list_add_tail(&edge->list[UPPER], 944 &list); 945 } else 946 INIT_LIST_HEAD(&edge->list[UPPER]); 947 } else { 948 upper = rb_entry(rb_node, struct backref_node, 949 rb_node); 950 BUG_ON(!upper->checked); 951 INIT_LIST_HEAD(&edge->list[UPPER]); 952 if (!upper->owner) 953 upper->owner = btrfs_header_owner(eb); 954 } 955 list_add_tail(&edge->list[LOWER], &lower->upper); 956 edge->node[LOWER] = lower; 957 edge->node[UPPER] = upper; 958 959 if (rb_node) 960 break; 961 lower = upper; 962 upper = NULL; 963 } 964 btrfs_release_path(root, path2); 965 next: 966 if (ptr < end) { 967 ptr += btrfs_extent_inline_ref_size(key.type); 968 if (ptr >= end) { 969 WARN_ON(ptr > end); 970 ptr = 0; 971 end = 0; 972 } 973 } 974 if (ptr >= end) 975 path1->slots[0]++; 976 } 977 btrfs_release_path(rc->extent_root, path1); 978 979 cur->checked = 1; 980 WARN_ON(exist); 981 982 /* the pending list isn't empty, take the first block to process */ 983 if (!list_empty(&list)) { 984 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 985 list_del_init(&edge->list[UPPER]); 986 cur = edge->node[UPPER]; 987 goto again; 988 } 989 990 /* 991 * everything goes well, connect backref nodes and insert backref nodes 992 * into the cache. 993 */ 994 BUG_ON(!node->checked); 995 cowonly = node->cowonly; 996 if (!cowonly) { 997 rb_node = tree_insert(&cache->rb_root, node->bytenr, 998 &node->rb_node); 999 BUG_ON(rb_node); 1000 list_add_tail(&node->lower, &cache->leaves); 1001 } 1002 1003 list_for_each_entry(edge, &node->upper, list[LOWER]) 1004 list_add_tail(&edge->list[UPPER], &list); 1005 1006 while (!list_empty(&list)) { 1007 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1008 list_del_init(&edge->list[UPPER]); 1009 upper = edge->node[UPPER]; 1010 if (upper->detached) { 1011 list_del(&edge->list[LOWER]); 1012 lower = edge->node[LOWER]; 1013 free_backref_edge(cache, edge); 1014 if (list_empty(&lower->upper)) 1015 list_add(&lower->list, &useless); 1016 continue; 1017 } 1018 1019 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1020 if (upper->lowest) { 1021 list_del_init(&upper->lower); 1022 upper->lowest = 0; 1023 } 1024 1025 list_add_tail(&edge->list[UPPER], &upper->lower); 1026 continue; 1027 } 1028 1029 BUG_ON(!upper->checked); 1030 BUG_ON(cowonly != upper->cowonly); 1031 if (!cowonly) { 1032 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1033 &upper->rb_node); 1034 BUG_ON(rb_node); 1035 } 1036 1037 list_add_tail(&edge->list[UPPER], &upper->lower); 1038 1039 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1040 list_add_tail(&edge->list[UPPER], &list); 1041 } 1042 /* 1043 * process useless backref nodes. backref nodes for tree leaves 1044 * are deleted from the cache. backref nodes for upper level 1045 * tree blocks are left in the cache to avoid unnecessary backref 1046 * lookup. 1047 */ 1048 while (!list_empty(&useless)) { 1049 upper = list_entry(useless.next, struct backref_node, list); 1050 list_del_init(&upper->list); 1051 BUG_ON(!list_empty(&upper->upper)); 1052 if (upper == node) 1053 node = NULL; 1054 if (upper->lowest) { 1055 list_del_init(&upper->lower); 1056 upper->lowest = 0; 1057 } 1058 while (!list_empty(&upper->lower)) { 1059 edge = list_entry(upper->lower.next, 1060 struct backref_edge, list[UPPER]); 1061 list_del(&edge->list[UPPER]); 1062 list_del(&edge->list[LOWER]); 1063 lower = edge->node[LOWER]; 1064 free_backref_edge(cache, edge); 1065 1066 if (list_empty(&lower->upper)) 1067 list_add(&lower->list, &useless); 1068 } 1069 __mark_block_processed(rc, upper); 1070 if (upper->level > 0) { 1071 list_add(&upper->list, &cache->detached); 1072 upper->detached = 1; 1073 } else { 1074 rb_erase(&upper->rb_node, &cache->rb_root); 1075 free_backref_node(cache, upper); 1076 } 1077 } 1078 out: 1079 btrfs_free_path(path1); 1080 btrfs_free_path(path2); 1081 if (err) { 1082 while (!list_empty(&useless)) { 1083 lower = list_entry(useless.next, 1084 struct backref_node, upper); 1085 list_del_init(&lower->upper); 1086 } 1087 upper = node; 1088 INIT_LIST_HEAD(&list); 1089 while (upper) { 1090 if (RB_EMPTY_NODE(&upper->rb_node)) { 1091 list_splice_tail(&upper->upper, &list); 1092 free_backref_node(cache, upper); 1093 } 1094 1095 if (list_empty(&list)) 1096 break; 1097 1098 edge = list_entry(list.next, struct backref_edge, 1099 list[LOWER]); 1100 list_del(&edge->list[LOWER]); 1101 upper = edge->node[UPPER]; 1102 free_backref_edge(cache, edge); 1103 } 1104 return ERR_PTR(err); 1105 } 1106 BUG_ON(node && node->detached); 1107 return node; 1108 } 1109 1110 /* 1111 * helper to add backref node for the newly created snapshot. 1112 * the backref node is created by cloning backref node that 1113 * corresponds to root of source tree 1114 */ 1115 static int clone_backref_node(struct btrfs_trans_handle *trans, 1116 struct reloc_control *rc, 1117 struct btrfs_root *src, 1118 struct btrfs_root *dest) 1119 { 1120 struct btrfs_root *reloc_root = src->reloc_root; 1121 struct backref_cache *cache = &rc->backref_cache; 1122 struct backref_node *node = NULL; 1123 struct backref_node *new_node; 1124 struct backref_edge *edge; 1125 struct backref_edge *new_edge; 1126 struct rb_node *rb_node; 1127 1128 if (cache->last_trans > 0) 1129 update_backref_cache(trans, cache); 1130 1131 rb_node = tree_search(&cache->rb_root, src->commit_root->start); 1132 if (rb_node) { 1133 node = rb_entry(rb_node, struct backref_node, rb_node); 1134 if (node->detached) 1135 node = NULL; 1136 else 1137 BUG_ON(node->new_bytenr != reloc_root->node->start); 1138 } 1139 1140 if (!node) { 1141 rb_node = tree_search(&cache->rb_root, 1142 reloc_root->commit_root->start); 1143 if (rb_node) { 1144 node = rb_entry(rb_node, struct backref_node, 1145 rb_node); 1146 BUG_ON(node->detached); 1147 } 1148 } 1149 1150 if (!node) 1151 return 0; 1152 1153 new_node = alloc_backref_node(cache); 1154 if (!new_node) 1155 return -ENOMEM; 1156 1157 new_node->bytenr = dest->node->start; 1158 new_node->level = node->level; 1159 new_node->lowest = node->lowest; 1160 new_node->root = dest; 1161 1162 if (!node->lowest) { 1163 list_for_each_entry(edge, &node->lower, list[UPPER]) { 1164 new_edge = alloc_backref_edge(cache); 1165 if (!new_edge) 1166 goto fail; 1167 1168 new_edge->node[UPPER] = new_node; 1169 new_edge->node[LOWER] = edge->node[LOWER]; 1170 list_add_tail(&new_edge->list[UPPER], 1171 &new_node->lower); 1172 } 1173 } 1174 1175 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1176 &new_node->rb_node); 1177 BUG_ON(rb_node); 1178 1179 if (!new_node->lowest) { 1180 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1181 list_add_tail(&new_edge->list[LOWER], 1182 &new_edge->node[LOWER]->upper); 1183 } 1184 } 1185 return 0; 1186 fail: 1187 while (!list_empty(&new_node->lower)) { 1188 new_edge = list_entry(new_node->lower.next, 1189 struct backref_edge, list[UPPER]); 1190 list_del(&new_edge->list[UPPER]); 1191 free_backref_edge(cache, new_edge); 1192 } 1193 free_backref_node(cache, new_node); 1194 return -ENOMEM; 1195 } 1196 1197 /* 1198 * helper to add 'address of tree root -> reloc tree' mapping 1199 */ 1200 static int __add_reloc_root(struct btrfs_root *root) 1201 { 1202 struct rb_node *rb_node; 1203 struct mapping_node *node; 1204 struct reloc_control *rc = root->fs_info->reloc_ctl; 1205 1206 node = kmalloc(sizeof(*node), GFP_NOFS); 1207 BUG_ON(!node); 1208 1209 node->bytenr = root->node->start; 1210 node->data = root; 1211 1212 spin_lock(&rc->reloc_root_tree.lock); 1213 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1214 node->bytenr, &node->rb_node); 1215 spin_unlock(&rc->reloc_root_tree.lock); 1216 BUG_ON(rb_node); 1217 1218 list_add_tail(&root->root_list, &rc->reloc_roots); 1219 return 0; 1220 } 1221 1222 /* 1223 * helper to update/delete the 'address of tree root -> reloc tree' 1224 * mapping 1225 */ 1226 static int __update_reloc_root(struct btrfs_root *root, int del) 1227 { 1228 struct rb_node *rb_node; 1229 struct mapping_node *node = NULL; 1230 struct reloc_control *rc = root->fs_info->reloc_ctl; 1231 1232 spin_lock(&rc->reloc_root_tree.lock); 1233 rb_node = tree_search(&rc->reloc_root_tree.rb_root, 1234 root->commit_root->start); 1235 if (rb_node) { 1236 node = rb_entry(rb_node, struct mapping_node, rb_node); 1237 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 1238 } 1239 spin_unlock(&rc->reloc_root_tree.lock); 1240 1241 BUG_ON((struct btrfs_root *)node->data != root); 1242 1243 if (!del) { 1244 spin_lock(&rc->reloc_root_tree.lock); 1245 node->bytenr = root->node->start; 1246 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1247 node->bytenr, &node->rb_node); 1248 spin_unlock(&rc->reloc_root_tree.lock); 1249 BUG_ON(rb_node); 1250 } else { 1251 list_del_init(&root->root_list); 1252 kfree(node); 1253 } 1254 return 0; 1255 } 1256 1257 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 1258 struct btrfs_root *root, u64 objectid) 1259 { 1260 struct btrfs_root *reloc_root; 1261 struct extent_buffer *eb; 1262 struct btrfs_root_item *root_item; 1263 struct btrfs_key root_key; 1264 int ret; 1265 1266 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1267 BUG_ON(!root_item); 1268 1269 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1270 root_key.type = BTRFS_ROOT_ITEM_KEY; 1271 root_key.offset = objectid; 1272 1273 if (root->root_key.objectid == objectid) { 1274 /* called by btrfs_init_reloc_root */ 1275 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1276 BTRFS_TREE_RELOC_OBJECTID); 1277 BUG_ON(ret); 1278 1279 btrfs_set_root_last_snapshot(&root->root_item, 1280 trans->transid - 1); 1281 } else { 1282 /* 1283 * called by btrfs_reloc_post_snapshot_hook. 1284 * the source tree is a reloc tree, all tree blocks 1285 * modified after it was created have RELOC flag 1286 * set in their headers. so it's OK to not update 1287 * the 'last_snapshot'. 1288 */ 1289 ret = btrfs_copy_root(trans, root, root->node, &eb, 1290 BTRFS_TREE_RELOC_OBJECTID); 1291 BUG_ON(ret); 1292 } 1293 1294 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1295 btrfs_set_root_bytenr(root_item, eb->start); 1296 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1297 btrfs_set_root_generation(root_item, trans->transid); 1298 1299 if (root->root_key.objectid == objectid) { 1300 btrfs_set_root_refs(root_item, 0); 1301 memset(&root_item->drop_progress, 0, 1302 sizeof(struct btrfs_disk_key)); 1303 root_item->drop_level = 0; 1304 } 1305 1306 btrfs_tree_unlock(eb); 1307 free_extent_buffer(eb); 1308 1309 ret = btrfs_insert_root(trans, root->fs_info->tree_root, 1310 &root_key, root_item); 1311 BUG_ON(ret); 1312 kfree(root_item); 1313 1314 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, 1315 &root_key); 1316 BUG_ON(IS_ERR(reloc_root)); 1317 reloc_root->last_trans = trans->transid; 1318 return reloc_root; 1319 } 1320 1321 /* 1322 * create reloc tree for a given fs tree. reloc tree is just a 1323 * snapshot of the fs tree with special root objectid. 1324 */ 1325 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 1326 struct btrfs_root *root) 1327 { 1328 struct btrfs_root *reloc_root; 1329 struct reloc_control *rc = root->fs_info->reloc_ctl; 1330 int clear_rsv = 0; 1331 1332 if (root->reloc_root) { 1333 reloc_root = root->reloc_root; 1334 reloc_root->last_trans = trans->transid; 1335 return 0; 1336 } 1337 1338 if (!rc || !rc->create_reloc_tree || 1339 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1340 return 0; 1341 1342 if (!trans->block_rsv) { 1343 trans->block_rsv = rc->block_rsv; 1344 clear_rsv = 1; 1345 } 1346 reloc_root = create_reloc_root(trans, root, root->root_key.objectid); 1347 if (clear_rsv) 1348 trans->block_rsv = NULL; 1349 1350 __add_reloc_root(reloc_root); 1351 root->reloc_root = reloc_root; 1352 return 0; 1353 } 1354 1355 /* 1356 * update root item of reloc tree 1357 */ 1358 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 1359 struct btrfs_root *root) 1360 { 1361 struct btrfs_root *reloc_root; 1362 struct btrfs_root_item *root_item; 1363 int del = 0; 1364 int ret; 1365 1366 if (!root->reloc_root) 1367 return 0; 1368 1369 reloc_root = root->reloc_root; 1370 root_item = &reloc_root->root_item; 1371 1372 if (root->fs_info->reloc_ctl->merge_reloc_tree && 1373 btrfs_root_refs(root_item) == 0) { 1374 root->reloc_root = NULL; 1375 del = 1; 1376 } 1377 1378 __update_reloc_root(reloc_root, del); 1379 1380 if (reloc_root->commit_root != reloc_root->node) { 1381 btrfs_set_root_node(root_item, reloc_root->node); 1382 free_extent_buffer(reloc_root->commit_root); 1383 reloc_root->commit_root = btrfs_root_node(reloc_root); 1384 } 1385 1386 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1387 &reloc_root->root_key, root_item); 1388 BUG_ON(ret); 1389 return 0; 1390 } 1391 1392 /* 1393 * helper to find first cached inode with inode number >= objectid 1394 * in a subvolume 1395 */ 1396 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid) 1397 { 1398 struct rb_node *node; 1399 struct rb_node *prev; 1400 struct btrfs_inode *entry; 1401 struct inode *inode; 1402 1403 spin_lock(&root->inode_lock); 1404 again: 1405 node = root->inode_tree.rb_node; 1406 prev = NULL; 1407 while (node) { 1408 prev = node; 1409 entry = rb_entry(node, struct btrfs_inode, rb_node); 1410 1411 if (objectid < entry->vfs_inode.i_ino) 1412 node = node->rb_left; 1413 else if (objectid > entry->vfs_inode.i_ino) 1414 node = node->rb_right; 1415 else 1416 break; 1417 } 1418 if (!node) { 1419 while (prev) { 1420 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1421 if (objectid <= entry->vfs_inode.i_ino) { 1422 node = prev; 1423 break; 1424 } 1425 prev = rb_next(prev); 1426 } 1427 } 1428 while (node) { 1429 entry = rb_entry(node, struct btrfs_inode, rb_node); 1430 inode = igrab(&entry->vfs_inode); 1431 if (inode) { 1432 spin_unlock(&root->inode_lock); 1433 return inode; 1434 } 1435 1436 objectid = entry->vfs_inode.i_ino + 1; 1437 if (cond_resched_lock(&root->inode_lock)) 1438 goto again; 1439 1440 node = rb_next(node); 1441 } 1442 spin_unlock(&root->inode_lock); 1443 return NULL; 1444 } 1445 1446 static int in_block_group(u64 bytenr, 1447 struct btrfs_block_group_cache *block_group) 1448 { 1449 if (bytenr >= block_group->key.objectid && 1450 bytenr < block_group->key.objectid + block_group->key.offset) 1451 return 1; 1452 return 0; 1453 } 1454 1455 /* 1456 * get new location of data 1457 */ 1458 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 1459 u64 bytenr, u64 num_bytes) 1460 { 1461 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 1462 struct btrfs_path *path; 1463 struct btrfs_file_extent_item *fi; 1464 struct extent_buffer *leaf; 1465 int ret; 1466 1467 path = btrfs_alloc_path(); 1468 if (!path) 1469 return -ENOMEM; 1470 1471 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1472 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, 1473 bytenr, 0); 1474 if (ret < 0) 1475 goto out; 1476 if (ret > 0) { 1477 ret = -ENOENT; 1478 goto out; 1479 } 1480 1481 leaf = path->nodes[0]; 1482 fi = btrfs_item_ptr(leaf, path->slots[0], 1483 struct btrfs_file_extent_item); 1484 1485 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 1486 btrfs_file_extent_compression(leaf, fi) || 1487 btrfs_file_extent_encryption(leaf, fi) || 1488 btrfs_file_extent_other_encoding(leaf, fi)); 1489 1490 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1491 ret = 1; 1492 goto out; 1493 } 1494 1495 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1496 ret = 0; 1497 out: 1498 btrfs_free_path(path); 1499 return ret; 1500 } 1501 1502 /* 1503 * update file extent items in the tree leaf to point to 1504 * the new locations. 1505 */ 1506 static noinline_for_stack 1507 int replace_file_extents(struct btrfs_trans_handle *trans, 1508 struct reloc_control *rc, 1509 struct btrfs_root *root, 1510 struct extent_buffer *leaf) 1511 { 1512 struct btrfs_key key; 1513 struct btrfs_file_extent_item *fi; 1514 struct inode *inode = NULL; 1515 u64 parent; 1516 u64 bytenr; 1517 u64 new_bytenr = 0; 1518 u64 num_bytes; 1519 u64 end; 1520 u32 nritems; 1521 u32 i; 1522 int ret; 1523 int first = 1; 1524 int dirty = 0; 1525 1526 if (rc->stage != UPDATE_DATA_PTRS) 1527 return 0; 1528 1529 /* reloc trees always use full backref */ 1530 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1531 parent = leaf->start; 1532 else 1533 parent = 0; 1534 1535 nritems = btrfs_header_nritems(leaf); 1536 for (i = 0; i < nritems; i++) { 1537 cond_resched(); 1538 btrfs_item_key_to_cpu(leaf, &key, i); 1539 if (key.type != BTRFS_EXTENT_DATA_KEY) 1540 continue; 1541 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 1542 if (btrfs_file_extent_type(leaf, fi) == 1543 BTRFS_FILE_EXTENT_INLINE) 1544 continue; 1545 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1546 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1547 if (bytenr == 0) 1548 continue; 1549 if (!in_block_group(bytenr, rc->block_group)) 1550 continue; 1551 1552 /* 1553 * if we are modifying block in fs tree, wait for readpage 1554 * to complete and drop the extent cache 1555 */ 1556 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1557 if (first) { 1558 inode = find_next_inode(root, key.objectid); 1559 first = 0; 1560 } else if (inode && inode->i_ino < key.objectid) { 1561 btrfs_add_delayed_iput(inode); 1562 inode = find_next_inode(root, key.objectid); 1563 } 1564 if (inode && inode->i_ino == key.objectid) { 1565 end = key.offset + 1566 btrfs_file_extent_num_bytes(leaf, fi); 1567 WARN_ON(!IS_ALIGNED(key.offset, 1568 root->sectorsize)); 1569 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1570 end--; 1571 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1572 key.offset, end, 1573 GFP_NOFS); 1574 if (!ret) 1575 continue; 1576 1577 btrfs_drop_extent_cache(inode, key.offset, end, 1578 1); 1579 unlock_extent(&BTRFS_I(inode)->io_tree, 1580 key.offset, end, GFP_NOFS); 1581 } 1582 } 1583 1584 ret = get_new_location(rc->data_inode, &new_bytenr, 1585 bytenr, num_bytes); 1586 if (ret > 0) { 1587 WARN_ON(1); 1588 continue; 1589 } 1590 BUG_ON(ret < 0); 1591 1592 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1593 dirty = 1; 1594 1595 key.offset -= btrfs_file_extent_offset(leaf, fi); 1596 ret = btrfs_inc_extent_ref(trans, root, new_bytenr, 1597 num_bytes, parent, 1598 btrfs_header_owner(leaf), 1599 key.objectid, key.offset); 1600 BUG_ON(ret); 1601 1602 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1603 parent, btrfs_header_owner(leaf), 1604 key.objectid, key.offset); 1605 BUG_ON(ret); 1606 } 1607 if (dirty) 1608 btrfs_mark_buffer_dirty(leaf); 1609 if (inode) 1610 btrfs_add_delayed_iput(inode); 1611 return 0; 1612 } 1613 1614 static noinline_for_stack 1615 int memcmp_node_keys(struct extent_buffer *eb, int slot, 1616 struct btrfs_path *path, int level) 1617 { 1618 struct btrfs_disk_key key1; 1619 struct btrfs_disk_key key2; 1620 btrfs_node_key(eb, &key1, slot); 1621 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 1622 return memcmp(&key1, &key2, sizeof(key1)); 1623 } 1624 1625 /* 1626 * try to replace tree blocks in fs tree with the new blocks 1627 * in reloc tree. tree blocks haven't been modified since the 1628 * reloc tree was create can be replaced. 1629 * 1630 * if a block was replaced, level of the block + 1 is returned. 1631 * if no block got replaced, 0 is returned. if there are other 1632 * errors, a negative error number is returned. 1633 */ 1634 static noinline_for_stack 1635 int replace_path(struct btrfs_trans_handle *trans, 1636 struct btrfs_root *dest, struct btrfs_root *src, 1637 struct btrfs_path *path, struct btrfs_key *next_key, 1638 int lowest_level, int max_level) 1639 { 1640 struct extent_buffer *eb; 1641 struct extent_buffer *parent; 1642 struct btrfs_key key; 1643 u64 old_bytenr; 1644 u64 new_bytenr; 1645 u64 old_ptr_gen; 1646 u64 new_ptr_gen; 1647 u64 last_snapshot; 1648 u32 blocksize; 1649 int cow = 0; 1650 int level; 1651 int ret; 1652 int slot; 1653 1654 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1655 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1656 1657 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1658 again: 1659 slot = path->slots[lowest_level]; 1660 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1661 1662 eb = btrfs_lock_root_node(dest); 1663 btrfs_set_lock_blocking(eb); 1664 level = btrfs_header_level(eb); 1665 1666 if (level < lowest_level) { 1667 btrfs_tree_unlock(eb); 1668 free_extent_buffer(eb); 1669 return 0; 1670 } 1671 1672 if (cow) { 1673 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1674 BUG_ON(ret); 1675 } 1676 btrfs_set_lock_blocking(eb); 1677 1678 if (next_key) { 1679 next_key->objectid = (u64)-1; 1680 next_key->type = (u8)-1; 1681 next_key->offset = (u64)-1; 1682 } 1683 1684 parent = eb; 1685 while (1) { 1686 level = btrfs_header_level(parent); 1687 BUG_ON(level < lowest_level); 1688 1689 ret = btrfs_bin_search(parent, &key, level, &slot); 1690 if (ret && slot > 0) 1691 slot--; 1692 1693 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1694 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1695 1696 old_bytenr = btrfs_node_blockptr(parent, slot); 1697 blocksize = btrfs_level_size(dest, level - 1); 1698 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1699 1700 if (level <= max_level) { 1701 eb = path->nodes[level]; 1702 new_bytenr = btrfs_node_blockptr(eb, 1703 path->slots[level]); 1704 new_ptr_gen = btrfs_node_ptr_generation(eb, 1705 path->slots[level]); 1706 } else { 1707 new_bytenr = 0; 1708 new_ptr_gen = 0; 1709 } 1710 1711 if (new_bytenr > 0 && new_bytenr == old_bytenr) { 1712 WARN_ON(1); 1713 ret = level; 1714 break; 1715 } 1716 1717 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1718 memcmp_node_keys(parent, slot, path, level)) { 1719 if (level <= lowest_level) { 1720 ret = 0; 1721 break; 1722 } 1723 1724 eb = read_tree_block(dest, old_bytenr, blocksize, 1725 old_ptr_gen); 1726 btrfs_tree_lock(eb); 1727 if (cow) { 1728 ret = btrfs_cow_block(trans, dest, eb, parent, 1729 slot, &eb); 1730 BUG_ON(ret); 1731 } 1732 btrfs_set_lock_blocking(eb); 1733 1734 btrfs_tree_unlock(parent); 1735 free_extent_buffer(parent); 1736 1737 parent = eb; 1738 continue; 1739 } 1740 1741 if (!cow) { 1742 btrfs_tree_unlock(parent); 1743 free_extent_buffer(parent); 1744 cow = 1; 1745 goto again; 1746 } 1747 1748 btrfs_node_key_to_cpu(path->nodes[level], &key, 1749 path->slots[level]); 1750 btrfs_release_path(src, path); 1751 1752 path->lowest_level = level; 1753 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1754 path->lowest_level = 0; 1755 BUG_ON(ret); 1756 1757 /* 1758 * swap blocks in fs tree and reloc tree. 1759 */ 1760 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1761 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1762 btrfs_mark_buffer_dirty(parent); 1763 1764 btrfs_set_node_blockptr(path->nodes[level], 1765 path->slots[level], old_bytenr); 1766 btrfs_set_node_ptr_generation(path->nodes[level], 1767 path->slots[level], old_ptr_gen); 1768 btrfs_mark_buffer_dirty(path->nodes[level]); 1769 1770 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize, 1771 path->nodes[level]->start, 1772 src->root_key.objectid, level - 1, 0); 1773 BUG_ON(ret); 1774 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize, 1775 0, dest->root_key.objectid, level - 1, 1776 0); 1777 BUG_ON(ret); 1778 1779 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize, 1780 path->nodes[level]->start, 1781 src->root_key.objectid, level - 1, 0); 1782 BUG_ON(ret); 1783 1784 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize, 1785 0, dest->root_key.objectid, level - 1, 1786 0); 1787 BUG_ON(ret); 1788 1789 btrfs_unlock_up_safe(path, 0); 1790 1791 ret = level; 1792 break; 1793 } 1794 btrfs_tree_unlock(parent); 1795 free_extent_buffer(parent); 1796 return ret; 1797 } 1798 1799 /* 1800 * helper to find next relocated block in reloc tree 1801 */ 1802 static noinline_for_stack 1803 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1804 int *level) 1805 { 1806 struct extent_buffer *eb; 1807 int i; 1808 u64 last_snapshot; 1809 u32 nritems; 1810 1811 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1812 1813 for (i = 0; i < *level; i++) { 1814 free_extent_buffer(path->nodes[i]); 1815 path->nodes[i] = NULL; 1816 } 1817 1818 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1819 eb = path->nodes[i]; 1820 nritems = btrfs_header_nritems(eb); 1821 while (path->slots[i] + 1 < nritems) { 1822 path->slots[i]++; 1823 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1824 last_snapshot) 1825 continue; 1826 1827 *level = i; 1828 return 0; 1829 } 1830 free_extent_buffer(path->nodes[i]); 1831 path->nodes[i] = NULL; 1832 } 1833 return 1; 1834 } 1835 1836 /* 1837 * walk down reloc tree to find relocated block of lowest level 1838 */ 1839 static noinline_for_stack 1840 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1841 int *level) 1842 { 1843 struct extent_buffer *eb = NULL; 1844 int i; 1845 u64 bytenr; 1846 u64 ptr_gen = 0; 1847 u64 last_snapshot; 1848 u32 blocksize; 1849 u32 nritems; 1850 1851 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1852 1853 for (i = *level; i > 0; i--) { 1854 eb = path->nodes[i]; 1855 nritems = btrfs_header_nritems(eb); 1856 while (path->slots[i] < nritems) { 1857 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1858 if (ptr_gen > last_snapshot) 1859 break; 1860 path->slots[i]++; 1861 } 1862 if (path->slots[i] >= nritems) { 1863 if (i == *level) 1864 break; 1865 *level = i + 1; 1866 return 0; 1867 } 1868 if (i == 1) { 1869 *level = i; 1870 return 0; 1871 } 1872 1873 bytenr = btrfs_node_blockptr(eb, path->slots[i]); 1874 blocksize = btrfs_level_size(root, i - 1); 1875 eb = read_tree_block(root, bytenr, blocksize, ptr_gen); 1876 BUG_ON(btrfs_header_level(eb) != i - 1); 1877 path->nodes[i - 1] = eb; 1878 path->slots[i - 1] = 0; 1879 } 1880 return 1; 1881 } 1882 1883 /* 1884 * invalidate extent cache for file extents whose key in range of 1885 * [min_key, max_key) 1886 */ 1887 static int invalidate_extent_cache(struct btrfs_root *root, 1888 struct btrfs_key *min_key, 1889 struct btrfs_key *max_key) 1890 { 1891 struct inode *inode = NULL; 1892 u64 objectid; 1893 u64 start, end; 1894 1895 objectid = min_key->objectid; 1896 while (1) { 1897 cond_resched(); 1898 iput(inode); 1899 1900 if (objectid > max_key->objectid) 1901 break; 1902 1903 inode = find_next_inode(root, objectid); 1904 if (!inode) 1905 break; 1906 1907 if (inode->i_ino > max_key->objectid) { 1908 iput(inode); 1909 break; 1910 } 1911 1912 objectid = inode->i_ino + 1; 1913 if (!S_ISREG(inode->i_mode)) 1914 continue; 1915 1916 if (unlikely(min_key->objectid == inode->i_ino)) { 1917 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1918 continue; 1919 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1920 start = 0; 1921 else { 1922 start = min_key->offset; 1923 WARN_ON(!IS_ALIGNED(start, root->sectorsize)); 1924 } 1925 } else { 1926 start = 0; 1927 } 1928 1929 if (unlikely(max_key->objectid == inode->i_ino)) { 1930 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1931 continue; 1932 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1933 end = (u64)-1; 1934 } else { 1935 if (max_key->offset == 0) 1936 continue; 1937 end = max_key->offset; 1938 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1939 end--; 1940 } 1941 } else { 1942 end = (u64)-1; 1943 } 1944 1945 /* the lock_extent waits for readpage to complete */ 1946 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1947 btrfs_drop_extent_cache(inode, start, end, 1); 1948 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1949 } 1950 return 0; 1951 } 1952 1953 static int find_next_key(struct btrfs_path *path, int level, 1954 struct btrfs_key *key) 1955 1956 { 1957 while (level < BTRFS_MAX_LEVEL) { 1958 if (!path->nodes[level]) 1959 break; 1960 if (path->slots[level] + 1 < 1961 btrfs_header_nritems(path->nodes[level])) { 1962 btrfs_node_key_to_cpu(path->nodes[level], key, 1963 path->slots[level] + 1); 1964 return 0; 1965 } 1966 level++; 1967 } 1968 return 1; 1969 } 1970 1971 /* 1972 * merge the relocated tree blocks in reloc tree with corresponding 1973 * fs tree. 1974 */ 1975 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 1976 struct btrfs_root *root) 1977 { 1978 LIST_HEAD(inode_list); 1979 struct btrfs_key key; 1980 struct btrfs_key next_key; 1981 struct btrfs_trans_handle *trans; 1982 struct btrfs_root *reloc_root; 1983 struct btrfs_root_item *root_item; 1984 struct btrfs_path *path; 1985 struct extent_buffer *leaf; 1986 unsigned long nr; 1987 int level; 1988 int max_level; 1989 int replaced = 0; 1990 int ret; 1991 int err = 0; 1992 u32 min_reserved; 1993 1994 path = btrfs_alloc_path(); 1995 if (!path) 1996 return -ENOMEM; 1997 1998 reloc_root = root->reloc_root; 1999 root_item = &reloc_root->root_item; 2000 2001 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 2002 level = btrfs_root_level(root_item); 2003 extent_buffer_get(reloc_root->node); 2004 path->nodes[level] = reloc_root->node; 2005 path->slots[level] = 0; 2006 } else { 2007 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 2008 2009 level = root_item->drop_level; 2010 BUG_ON(level == 0); 2011 path->lowest_level = level; 2012 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 2013 path->lowest_level = 0; 2014 if (ret < 0) { 2015 btrfs_free_path(path); 2016 return ret; 2017 } 2018 2019 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 2020 path->slots[level]); 2021 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 2022 2023 btrfs_unlock_up_safe(path, 0); 2024 } 2025 2026 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2027 memset(&next_key, 0, sizeof(next_key)); 2028 2029 while (1) { 2030 trans = btrfs_start_transaction(root, 0); 2031 BUG_ON(IS_ERR(trans)); 2032 trans->block_rsv = rc->block_rsv; 2033 2034 ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, 2035 min_reserved, 0); 2036 if (ret) { 2037 BUG_ON(ret != -EAGAIN); 2038 ret = btrfs_commit_transaction(trans, root); 2039 BUG_ON(ret); 2040 continue; 2041 } 2042 2043 replaced = 0; 2044 max_level = level; 2045 2046 ret = walk_down_reloc_tree(reloc_root, path, &level); 2047 if (ret < 0) { 2048 err = ret; 2049 goto out; 2050 } 2051 if (ret > 0) 2052 break; 2053 2054 if (!find_next_key(path, level, &key) && 2055 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2056 ret = 0; 2057 } else { 2058 ret = replace_path(trans, root, reloc_root, path, 2059 &next_key, level, max_level); 2060 } 2061 if (ret < 0) { 2062 err = ret; 2063 goto out; 2064 } 2065 2066 if (ret > 0) { 2067 level = ret; 2068 btrfs_node_key_to_cpu(path->nodes[level], &key, 2069 path->slots[level]); 2070 replaced = 1; 2071 } 2072 2073 ret = walk_up_reloc_tree(reloc_root, path, &level); 2074 if (ret > 0) 2075 break; 2076 2077 BUG_ON(level == 0); 2078 /* 2079 * save the merging progress in the drop_progress. 2080 * this is OK since root refs == 1 in this case. 2081 */ 2082 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 2083 path->slots[level]); 2084 root_item->drop_level = level; 2085 2086 nr = trans->blocks_used; 2087 btrfs_end_transaction_throttle(trans, root); 2088 2089 btrfs_btree_balance_dirty(root, nr); 2090 2091 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2092 invalidate_extent_cache(root, &key, &next_key); 2093 } 2094 2095 /* 2096 * handle the case only one block in the fs tree need to be 2097 * relocated and the block is tree root. 2098 */ 2099 leaf = btrfs_lock_root_node(root); 2100 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf); 2101 btrfs_tree_unlock(leaf); 2102 free_extent_buffer(leaf); 2103 if (ret < 0) 2104 err = ret; 2105 out: 2106 btrfs_free_path(path); 2107 2108 if (err == 0) { 2109 memset(&root_item->drop_progress, 0, 2110 sizeof(root_item->drop_progress)); 2111 root_item->drop_level = 0; 2112 btrfs_set_root_refs(root_item, 0); 2113 btrfs_update_reloc_root(trans, root); 2114 } 2115 2116 nr = trans->blocks_used; 2117 btrfs_end_transaction_throttle(trans, root); 2118 2119 btrfs_btree_balance_dirty(root, nr); 2120 2121 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2122 invalidate_extent_cache(root, &key, &next_key); 2123 2124 return err; 2125 } 2126 2127 static noinline_for_stack 2128 int prepare_to_merge(struct reloc_control *rc, int err) 2129 { 2130 struct btrfs_root *root = rc->extent_root; 2131 struct btrfs_root *reloc_root; 2132 struct btrfs_trans_handle *trans; 2133 LIST_HEAD(reloc_roots); 2134 u64 num_bytes = 0; 2135 int ret; 2136 2137 mutex_lock(&root->fs_info->trans_mutex); 2138 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2139 rc->merging_rsv_size += rc->nodes_relocated * 2; 2140 mutex_unlock(&root->fs_info->trans_mutex); 2141 again: 2142 if (!err) { 2143 num_bytes = rc->merging_rsv_size; 2144 ret = btrfs_block_rsv_add(NULL, root, rc->block_rsv, 2145 num_bytes); 2146 if (ret) 2147 err = ret; 2148 } 2149 2150 trans = btrfs_join_transaction(rc->extent_root, 1); 2151 if (IS_ERR(trans)) { 2152 if (!err) 2153 btrfs_block_rsv_release(rc->extent_root, 2154 rc->block_rsv, num_bytes); 2155 return PTR_ERR(trans); 2156 } 2157 2158 if (!err) { 2159 if (num_bytes != rc->merging_rsv_size) { 2160 btrfs_end_transaction(trans, rc->extent_root); 2161 btrfs_block_rsv_release(rc->extent_root, 2162 rc->block_rsv, num_bytes); 2163 goto again; 2164 } 2165 } 2166 2167 rc->merge_reloc_tree = 1; 2168 2169 while (!list_empty(&rc->reloc_roots)) { 2170 reloc_root = list_entry(rc->reloc_roots.next, 2171 struct btrfs_root, root_list); 2172 list_del_init(&reloc_root->root_list); 2173 2174 root = read_fs_root(reloc_root->fs_info, 2175 reloc_root->root_key.offset); 2176 BUG_ON(IS_ERR(root)); 2177 BUG_ON(root->reloc_root != reloc_root); 2178 2179 /* 2180 * set reference count to 1, so btrfs_recover_relocation 2181 * knows it should resumes merging 2182 */ 2183 if (!err) 2184 btrfs_set_root_refs(&reloc_root->root_item, 1); 2185 btrfs_update_reloc_root(trans, root); 2186 2187 list_add(&reloc_root->root_list, &reloc_roots); 2188 } 2189 2190 list_splice(&reloc_roots, &rc->reloc_roots); 2191 2192 if (!err) 2193 btrfs_commit_transaction(trans, rc->extent_root); 2194 else 2195 btrfs_end_transaction(trans, rc->extent_root); 2196 return err; 2197 } 2198 2199 static noinline_for_stack 2200 int merge_reloc_roots(struct reloc_control *rc) 2201 { 2202 struct btrfs_root *root; 2203 struct btrfs_root *reloc_root; 2204 LIST_HEAD(reloc_roots); 2205 int found = 0; 2206 int ret; 2207 again: 2208 root = rc->extent_root; 2209 mutex_lock(&root->fs_info->trans_mutex); 2210 list_splice_init(&rc->reloc_roots, &reloc_roots); 2211 mutex_unlock(&root->fs_info->trans_mutex); 2212 2213 while (!list_empty(&reloc_roots)) { 2214 found = 1; 2215 reloc_root = list_entry(reloc_roots.next, 2216 struct btrfs_root, root_list); 2217 2218 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 2219 root = read_fs_root(reloc_root->fs_info, 2220 reloc_root->root_key.offset); 2221 BUG_ON(IS_ERR(root)); 2222 BUG_ON(root->reloc_root != reloc_root); 2223 2224 ret = merge_reloc_root(rc, root); 2225 BUG_ON(ret); 2226 } else { 2227 list_del_init(&reloc_root->root_list); 2228 } 2229 btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); 2230 } 2231 2232 if (found) { 2233 found = 0; 2234 goto again; 2235 } 2236 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2237 return 0; 2238 } 2239 2240 static void free_block_list(struct rb_root *blocks) 2241 { 2242 struct tree_block *block; 2243 struct rb_node *rb_node; 2244 while ((rb_node = rb_first(blocks))) { 2245 block = rb_entry(rb_node, struct tree_block, rb_node); 2246 rb_erase(rb_node, blocks); 2247 kfree(block); 2248 } 2249 } 2250 2251 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 2252 struct btrfs_root *reloc_root) 2253 { 2254 struct btrfs_root *root; 2255 2256 if (reloc_root->last_trans == trans->transid) 2257 return 0; 2258 2259 root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset); 2260 BUG_ON(IS_ERR(root)); 2261 BUG_ON(root->reloc_root != reloc_root); 2262 2263 return btrfs_record_root_in_trans(trans, root); 2264 } 2265 2266 static noinline_for_stack 2267 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2268 struct reloc_control *rc, 2269 struct backref_node *node, 2270 struct backref_edge *edges[], int *nr) 2271 { 2272 struct backref_node *next; 2273 struct btrfs_root *root; 2274 int index = 0; 2275 2276 next = node; 2277 while (1) { 2278 cond_resched(); 2279 next = walk_up_backref(next, edges, &index); 2280 root = next->root; 2281 BUG_ON(!root); 2282 BUG_ON(!root->ref_cows); 2283 2284 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2285 record_reloc_root_in_trans(trans, root); 2286 break; 2287 } 2288 2289 btrfs_record_root_in_trans(trans, root); 2290 root = root->reloc_root; 2291 2292 if (next->new_bytenr != root->node->start) { 2293 BUG_ON(next->new_bytenr); 2294 BUG_ON(!list_empty(&next->list)); 2295 next->new_bytenr = root->node->start; 2296 next->root = root; 2297 list_add_tail(&next->list, 2298 &rc->backref_cache.changed); 2299 __mark_block_processed(rc, next); 2300 break; 2301 } 2302 2303 WARN_ON(1); 2304 root = NULL; 2305 next = walk_down_backref(edges, &index); 2306 if (!next || next->level <= node->level) 2307 break; 2308 } 2309 if (!root) 2310 return NULL; 2311 2312 *nr = index; 2313 next = node; 2314 /* setup backref node path for btrfs_reloc_cow_block */ 2315 while (1) { 2316 rc->backref_cache.path[next->level] = next; 2317 if (--index < 0) 2318 break; 2319 next = edges[index]->node[UPPER]; 2320 } 2321 return root; 2322 } 2323 2324 /* 2325 * select a tree root for relocation. return NULL if the block 2326 * is reference counted. we should use do_relocation() in this 2327 * case. return a tree root pointer if the block isn't reference 2328 * counted. return -ENOENT if the block is root of reloc tree. 2329 */ 2330 static noinline_for_stack 2331 struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans, 2332 struct backref_node *node) 2333 { 2334 struct backref_node *next; 2335 struct btrfs_root *root; 2336 struct btrfs_root *fs_root = NULL; 2337 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2338 int index = 0; 2339 2340 next = node; 2341 while (1) { 2342 cond_resched(); 2343 next = walk_up_backref(next, edges, &index); 2344 root = next->root; 2345 BUG_ON(!root); 2346 2347 /* no other choice for non-refernce counted tree */ 2348 if (!root->ref_cows) 2349 return root; 2350 2351 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) 2352 fs_root = root; 2353 2354 if (next != node) 2355 return NULL; 2356 2357 next = walk_down_backref(edges, &index); 2358 if (!next || next->level <= node->level) 2359 break; 2360 } 2361 2362 if (!fs_root) 2363 return ERR_PTR(-ENOENT); 2364 return fs_root; 2365 } 2366 2367 static noinline_for_stack 2368 u64 calcu_metadata_size(struct reloc_control *rc, 2369 struct backref_node *node, int reserve) 2370 { 2371 struct backref_node *next = node; 2372 struct backref_edge *edge; 2373 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2374 u64 num_bytes = 0; 2375 int index = 0; 2376 2377 BUG_ON(reserve && node->processed); 2378 2379 while (next) { 2380 cond_resched(); 2381 while (1) { 2382 if (next->processed && (reserve || next != node)) 2383 break; 2384 2385 num_bytes += btrfs_level_size(rc->extent_root, 2386 next->level); 2387 2388 if (list_empty(&next->upper)) 2389 break; 2390 2391 edge = list_entry(next->upper.next, 2392 struct backref_edge, list[LOWER]); 2393 edges[index++] = edge; 2394 next = edge->node[UPPER]; 2395 } 2396 next = walk_down_backref(edges, &index); 2397 } 2398 return num_bytes; 2399 } 2400 2401 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2402 struct reloc_control *rc, 2403 struct backref_node *node) 2404 { 2405 struct btrfs_root *root = rc->extent_root; 2406 u64 num_bytes; 2407 int ret; 2408 2409 num_bytes = calcu_metadata_size(rc, node, 1) * 2; 2410 2411 trans->block_rsv = rc->block_rsv; 2412 ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes); 2413 if (ret) { 2414 if (ret == -EAGAIN) 2415 rc->commit_transaction = 1; 2416 return ret; 2417 } 2418 2419 return 0; 2420 } 2421 2422 static void release_metadata_space(struct reloc_control *rc, 2423 struct backref_node *node) 2424 { 2425 u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2; 2426 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes); 2427 } 2428 2429 /* 2430 * relocate a block tree, and then update pointers in upper level 2431 * blocks that reference the block to point to the new location. 2432 * 2433 * if called by link_to_upper, the block has already been relocated. 2434 * in that case this function just updates pointers. 2435 */ 2436 static int do_relocation(struct btrfs_trans_handle *trans, 2437 struct reloc_control *rc, 2438 struct backref_node *node, 2439 struct btrfs_key *key, 2440 struct btrfs_path *path, int lowest) 2441 { 2442 struct backref_node *upper; 2443 struct backref_edge *edge; 2444 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2445 struct btrfs_root *root; 2446 struct extent_buffer *eb; 2447 u32 blocksize; 2448 u64 bytenr; 2449 u64 generation; 2450 int nr; 2451 int slot; 2452 int ret; 2453 int err = 0; 2454 2455 BUG_ON(lowest && node->eb); 2456 2457 path->lowest_level = node->level + 1; 2458 rc->backref_cache.path[node->level] = node; 2459 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2460 cond_resched(); 2461 2462 upper = edge->node[UPPER]; 2463 root = select_reloc_root(trans, rc, upper, edges, &nr); 2464 BUG_ON(!root); 2465 2466 if (upper->eb && !upper->locked) { 2467 if (!lowest) { 2468 ret = btrfs_bin_search(upper->eb, key, 2469 upper->level, &slot); 2470 BUG_ON(ret); 2471 bytenr = btrfs_node_blockptr(upper->eb, slot); 2472 if (node->eb->start == bytenr) 2473 goto next; 2474 } 2475 drop_node_buffer(upper); 2476 } 2477 2478 if (!upper->eb) { 2479 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2480 if (ret < 0) { 2481 err = ret; 2482 break; 2483 } 2484 BUG_ON(ret > 0); 2485 2486 if (!upper->eb) { 2487 upper->eb = path->nodes[upper->level]; 2488 path->nodes[upper->level] = NULL; 2489 } else { 2490 BUG_ON(upper->eb != path->nodes[upper->level]); 2491 } 2492 2493 upper->locked = 1; 2494 path->locks[upper->level] = 0; 2495 2496 slot = path->slots[upper->level]; 2497 btrfs_release_path(NULL, path); 2498 } else { 2499 ret = btrfs_bin_search(upper->eb, key, upper->level, 2500 &slot); 2501 BUG_ON(ret); 2502 } 2503 2504 bytenr = btrfs_node_blockptr(upper->eb, slot); 2505 if (lowest) { 2506 BUG_ON(bytenr != node->bytenr); 2507 } else { 2508 if (node->eb->start == bytenr) 2509 goto next; 2510 } 2511 2512 blocksize = btrfs_level_size(root, node->level); 2513 generation = btrfs_node_ptr_generation(upper->eb, slot); 2514 eb = read_tree_block(root, bytenr, blocksize, generation); 2515 btrfs_tree_lock(eb); 2516 btrfs_set_lock_blocking(eb); 2517 2518 if (!node->eb) { 2519 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2520 slot, &eb); 2521 btrfs_tree_unlock(eb); 2522 free_extent_buffer(eb); 2523 if (ret < 0) { 2524 err = ret; 2525 goto next; 2526 } 2527 BUG_ON(node->eb != eb); 2528 } else { 2529 btrfs_set_node_blockptr(upper->eb, slot, 2530 node->eb->start); 2531 btrfs_set_node_ptr_generation(upper->eb, slot, 2532 trans->transid); 2533 btrfs_mark_buffer_dirty(upper->eb); 2534 2535 ret = btrfs_inc_extent_ref(trans, root, 2536 node->eb->start, blocksize, 2537 upper->eb->start, 2538 btrfs_header_owner(upper->eb), 2539 node->level, 0); 2540 BUG_ON(ret); 2541 2542 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2543 BUG_ON(ret); 2544 } 2545 next: 2546 if (!upper->pending) 2547 drop_node_buffer(upper); 2548 else 2549 unlock_node_buffer(upper); 2550 if (err) 2551 break; 2552 } 2553 2554 if (!err && node->pending) { 2555 drop_node_buffer(node); 2556 list_move_tail(&node->list, &rc->backref_cache.changed); 2557 node->pending = 0; 2558 } 2559 2560 path->lowest_level = 0; 2561 BUG_ON(err == -ENOSPC); 2562 return err; 2563 } 2564 2565 static int link_to_upper(struct btrfs_trans_handle *trans, 2566 struct reloc_control *rc, 2567 struct backref_node *node, 2568 struct btrfs_path *path) 2569 { 2570 struct btrfs_key key; 2571 2572 btrfs_node_key_to_cpu(node->eb, &key, 0); 2573 return do_relocation(trans, rc, node, &key, path, 0); 2574 } 2575 2576 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2577 struct reloc_control *rc, 2578 struct btrfs_path *path, int err) 2579 { 2580 LIST_HEAD(list); 2581 struct backref_cache *cache = &rc->backref_cache; 2582 struct backref_node *node; 2583 int level; 2584 int ret; 2585 2586 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2587 while (!list_empty(&cache->pending[level])) { 2588 node = list_entry(cache->pending[level].next, 2589 struct backref_node, list); 2590 list_move_tail(&node->list, &list); 2591 BUG_ON(!node->pending); 2592 2593 if (!err) { 2594 ret = link_to_upper(trans, rc, node, path); 2595 if (ret < 0) 2596 err = ret; 2597 } 2598 } 2599 list_splice_init(&list, &cache->pending[level]); 2600 } 2601 return err; 2602 } 2603 2604 static void mark_block_processed(struct reloc_control *rc, 2605 u64 bytenr, u32 blocksize) 2606 { 2607 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, 2608 EXTENT_DIRTY, GFP_NOFS); 2609 } 2610 2611 static void __mark_block_processed(struct reloc_control *rc, 2612 struct backref_node *node) 2613 { 2614 u32 blocksize; 2615 if (node->level == 0 || 2616 in_block_group(node->bytenr, rc->block_group)) { 2617 blocksize = btrfs_level_size(rc->extent_root, node->level); 2618 mark_block_processed(rc, node->bytenr, blocksize); 2619 } 2620 node->processed = 1; 2621 } 2622 2623 /* 2624 * mark a block and all blocks directly/indirectly reference the block 2625 * as processed. 2626 */ 2627 static void update_processed_blocks(struct reloc_control *rc, 2628 struct backref_node *node) 2629 { 2630 struct backref_node *next = node; 2631 struct backref_edge *edge; 2632 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2633 int index = 0; 2634 2635 while (next) { 2636 cond_resched(); 2637 while (1) { 2638 if (next->processed) 2639 break; 2640 2641 __mark_block_processed(rc, next); 2642 2643 if (list_empty(&next->upper)) 2644 break; 2645 2646 edge = list_entry(next->upper.next, 2647 struct backref_edge, list[LOWER]); 2648 edges[index++] = edge; 2649 next = edge->node[UPPER]; 2650 } 2651 next = walk_down_backref(edges, &index); 2652 } 2653 } 2654 2655 static int tree_block_processed(u64 bytenr, u32 blocksize, 2656 struct reloc_control *rc) 2657 { 2658 if (test_range_bit(&rc->processed_blocks, bytenr, 2659 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) 2660 return 1; 2661 return 0; 2662 } 2663 2664 static int get_tree_block_key(struct reloc_control *rc, 2665 struct tree_block *block) 2666 { 2667 struct extent_buffer *eb; 2668 2669 BUG_ON(block->key_ready); 2670 eb = read_tree_block(rc->extent_root, block->bytenr, 2671 block->key.objectid, block->key.offset); 2672 WARN_ON(btrfs_header_level(eb) != block->level); 2673 if (block->level == 0) 2674 btrfs_item_key_to_cpu(eb, &block->key, 0); 2675 else 2676 btrfs_node_key_to_cpu(eb, &block->key, 0); 2677 free_extent_buffer(eb); 2678 block->key_ready = 1; 2679 return 0; 2680 } 2681 2682 static int reada_tree_block(struct reloc_control *rc, 2683 struct tree_block *block) 2684 { 2685 BUG_ON(block->key_ready); 2686 readahead_tree_block(rc->extent_root, block->bytenr, 2687 block->key.objectid, block->key.offset); 2688 return 0; 2689 } 2690 2691 /* 2692 * helper function to relocate a tree block 2693 */ 2694 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2695 struct reloc_control *rc, 2696 struct backref_node *node, 2697 struct btrfs_key *key, 2698 struct btrfs_path *path) 2699 { 2700 struct btrfs_root *root; 2701 int release = 0; 2702 int ret = 0; 2703 2704 if (!node) 2705 return 0; 2706 2707 BUG_ON(node->processed); 2708 root = select_one_root(trans, node); 2709 if (root == ERR_PTR(-ENOENT)) { 2710 update_processed_blocks(rc, node); 2711 goto out; 2712 } 2713 2714 if (!root || root->ref_cows) { 2715 ret = reserve_metadata_space(trans, rc, node); 2716 if (ret) 2717 goto out; 2718 release = 1; 2719 } 2720 2721 if (root) { 2722 if (root->ref_cows) { 2723 BUG_ON(node->new_bytenr); 2724 BUG_ON(!list_empty(&node->list)); 2725 btrfs_record_root_in_trans(trans, root); 2726 root = root->reloc_root; 2727 node->new_bytenr = root->node->start; 2728 node->root = root; 2729 list_add_tail(&node->list, &rc->backref_cache.changed); 2730 } else { 2731 path->lowest_level = node->level; 2732 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2733 btrfs_release_path(root, path); 2734 if (ret > 0) 2735 ret = 0; 2736 } 2737 if (!ret) 2738 update_processed_blocks(rc, node); 2739 } else { 2740 ret = do_relocation(trans, rc, node, key, path, 1); 2741 } 2742 out: 2743 if (ret || node->level == 0 || node->cowonly) { 2744 if (release) 2745 release_metadata_space(rc, node); 2746 remove_backref_node(&rc->backref_cache, node); 2747 } 2748 return ret; 2749 } 2750 2751 /* 2752 * relocate a list of blocks 2753 */ 2754 static noinline_for_stack 2755 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2756 struct reloc_control *rc, struct rb_root *blocks) 2757 { 2758 struct backref_node *node; 2759 struct btrfs_path *path; 2760 struct tree_block *block; 2761 struct rb_node *rb_node; 2762 int ret; 2763 int err = 0; 2764 2765 path = btrfs_alloc_path(); 2766 if (!path) 2767 return -ENOMEM; 2768 2769 rb_node = rb_first(blocks); 2770 while (rb_node) { 2771 block = rb_entry(rb_node, struct tree_block, rb_node); 2772 if (!block->key_ready) 2773 reada_tree_block(rc, block); 2774 rb_node = rb_next(rb_node); 2775 } 2776 2777 rb_node = rb_first(blocks); 2778 while (rb_node) { 2779 block = rb_entry(rb_node, struct tree_block, rb_node); 2780 if (!block->key_ready) 2781 get_tree_block_key(rc, block); 2782 rb_node = rb_next(rb_node); 2783 } 2784 2785 rb_node = rb_first(blocks); 2786 while (rb_node) { 2787 block = rb_entry(rb_node, struct tree_block, rb_node); 2788 2789 node = build_backref_tree(rc, &block->key, 2790 block->level, block->bytenr); 2791 if (IS_ERR(node)) { 2792 err = PTR_ERR(node); 2793 goto out; 2794 } 2795 2796 ret = relocate_tree_block(trans, rc, node, &block->key, 2797 path); 2798 if (ret < 0) { 2799 if (ret != -EAGAIN || rb_node == rb_first(blocks)) 2800 err = ret; 2801 goto out; 2802 } 2803 rb_node = rb_next(rb_node); 2804 } 2805 out: 2806 free_block_list(blocks); 2807 err = finish_pending_nodes(trans, rc, path, err); 2808 2809 btrfs_free_path(path); 2810 return err; 2811 } 2812 2813 static noinline_for_stack 2814 int prealloc_file_extent_cluster(struct inode *inode, 2815 struct file_extent_cluster *cluster) 2816 { 2817 u64 alloc_hint = 0; 2818 u64 start; 2819 u64 end; 2820 u64 offset = BTRFS_I(inode)->index_cnt; 2821 u64 num_bytes; 2822 int nr = 0; 2823 int ret = 0; 2824 2825 BUG_ON(cluster->start != cluster->boundary[0]); 2826 mutex_lock(&inode->i_mutex); 2827 2828 ret = btrfs_check_data_free_space(inode, cluster->end + 2829 1 - cluster->start); 2830 if (ret) 2831 goto out; 2832 2833 while (nr < cluster->nr) { 2834 start = cluster->boundary[nr] - offset; 2835 if (nr + 1 < cluster->nr) 2836 end = cluster->boundary[nr + 1] - 1 - offset; 2837 else 2838 end = cluster->end - offset; 2839 2840 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2841 num_bytes = end + 1 - start; 2842 ret = btrfs_prealloc_file_range(inode, 0, start, 2843 num_bytes, num_bytes, 2844 end + 1, &alloc_hint); 2845 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2846 if (ret) 2847 break; 2848 nr++; 2849 } 2850 btrfs_free_reserved_data_space(inode, cluster->end + 2851 1 - cluster->start); 2852 out: 2853 mutex_unlock(&inode->i_mutex); 2854 return ret; 2855 } 2856 2857 static noinline_for_stack 2858 int setup_extent_mapping(struct inode *inode, u64 start, u64 end, 2859 u64 block_start) 2860 { 2861 struct btrfs_root *root = BTRFS_I(inode)->root; 2862 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2863 struct extent_map *em; 2864 int ret = 0; 2865 2866 em = alloc_extent_map(GFP_NOFS); 2867 if (!em) 2868 return -ENOMEM; 2869 2870 em->start = start; 2871 em->len = end + 1 - start; 2872 em->block_len = em->len; 2873 em->block_start = block_start; 2874 em->bdev = root->fs_info->fs_devices->latest_bdev; 2875 set_bit(EXTENT_FLAG_PINNED, &em->flags); 2876 2877 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2878 while (1) { 2879 write_lock(&em_tree->lock); 2880 ret = add_extent_mapping(em_tree, em); 2881 write_unlock(&em_tree->lock); 2882 if (ret != -EEXIST) { 2883 free_extent_map(em); 2884 break; 2885 } 2886 btrfs_drop_extent_cache(inode, start, end, 0); 2887 } 2888 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2889 return ret; 2890 } 2891 2892 static int relocate_file_extent_cluster(struct inode *inode, 2893 struct file_extent_cluster *cluster) 2894 { 2895 u64 page_start; 2896 u64 page_end; 2897 u64 offset = BTRFS_I(inode)->index_cnt; 2898 unsigned long index; 2899 unsigned long last_index; 2900 struct page *page; 2901 struct file_ra_state *ra; 2902 int nr = 0; 2903 int ret = 0; 2904 2905 if (!cluster->nr) 2906 return 0; 2907 2908 ra = kzalloc(sizeof(*ra), GFP_NOFS); 2909 if (!ra) 2910 return -ENOMEM; 2911 2912 ret = prealloc_file_extent_cluster(inode, cluster); 2913 if (ret) 2914 goto out; 2915 2916 file_ra_state_init(ra, inode->i_mapping); 2917 2918 ret = setup_extent_mapping(inode, cluster->start - offset, 2919 cluster->end - offset, cluster->start); 2920 if (ret) 2921 goto out; 2922 2923 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 2924 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 2925 while (index <= last_index) { 2926 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); 2927 if (ret) 2928 goto out; 2929 2930 page = find_lock_page(inode->i_mapping, index); 2931 if (!page) { 2932 page_cache_sync_readahead(inode->i_mapping, 2933 ra, NULL, index, 2934 last_index + 1 - index); 2935 page = grab_cache_page(inode->i_mapping, index); 2936 if (!page) { 2937 btrfs_delalloc_release_metadata(inode, 2938 PAGE_CACHE_SIZE); 2939 ret = -ENOMEM; 2940 goto out; 2941 } 2942 } 2943 2944 if (PageReadahead(page)) { 2945 page_cache_async_readahead(inode->i_mapping, 2946 ra, NULL, page, index, 2947 last_index + 1 - index); 2948 } 2949 2950 if (!PageUptodate(page)) { 2951 btrfs_readpage(NULL, page); 2952 lock_page(page); 2953 if (!PageUptodate(page)) { 2954 unlock_page(page); 2955 page_cache_release(page); 2956 btrfs_delalloc_release_metadata(inode, 2957 PAGE_CACHE_SIZE); 2958 ret = -EIO; 2959 goto out; 2960 } 2961 } 2962 2963 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 2964 page_end = page_start + PAGE_CACHE_SIZE - 1; 2965 2966 lock_extent(&BTRFS_I(inode)->io_tree, 2967 page_start, page_end, GFP_NOFS); 2968 2969 set_page_extent_mapped(page); 2970 2971 if (nr < cluster->nr && 2972 page_start + offset == cluster->boundary[nr]) { 2973 set_extent_bits(&BTRFS_I(inode)->io_tree, 2974 page_start, page_end, 2975 EXTENT_BOUNDARY, GFP_NOFS); 2976 nr++; 2977 } 2978 2979 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); 2980 set_page_dirty(page); 2981 2982 unlock_extent(&BTRFS_I(inode)->io_tree, 2983 page_start, page_end, GFP_NOFS); 2984 unlock_page(page); 2985 page_cache_release(page); 2986 2987 index++; 2988 balance_dirty_pages_ratelimited(inode->i_mapping); 2989 btrfs_throttle(BTRFS_I(inode)->root); 2990 } 2991 WARN_ON(nr != cluster->nr); 2992 out: 2993 kfree(ra); 2994 return ret; 2995 } 2996 2997 static noinline_for_stack 2998 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key, 2999 struct file_extent_cluster *cluster) 3000 { 3001 int ret; 3002 3003 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 3004 ret = relocate_file_extent_cluster(inode, cluster); 3005 if (ret) 3006 return ret; 3007 cluster->nr = 0; 3008 } 3009 3010 if (!cluster->nr) 3011 cluster->start = extent_key->objectid; 3012 else 3013 BUG_ON(cluster->nr >= MAX_EXTENTS); 3014 cluster->end = extent_key->objectid + extent_key->offset - 1; 3015 cluster->boundary[cluster->nr] = extent_key->objectid; 3016 cluster->nr++; 3017 3018 if (cluster->nr >= MAX_EXTENTS) { 3019 ret = relocate_file_extent_cluster(inode, cluster); 3020 if (ret) 3021 return ret; 3022 cluster->nr = 0; 3023 } 3024 return 0; 3025 } 3026 3027 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3028 static int get_ref_objectid_v0(struct reloc_control *rc, 3029 struct btrfs_path *path, 3030 struct btrfs_key *extent_key, 3031 u64 *ref_objectid, int *path_change) 3032 { 3033 struct btrfs_key key; 3034 struct extent_buffer *leaf; 3035 struct btrfs_extent_ref_v0 *ref0; 3036 int ret; 3037 int slot; 3038 3039 leaf = path->nodes[0]; 3040 slot = path->slots[0]; 3041 while (1) { 3042 if (slot >= btrfs_header_nritems(leaf)) { 3043 ret = btrfs_next_leaf(rc->extent_root, path); 3044 if (ret < 0) 3045 return ret; 3046 BUG_ON(ret > 0); 3047 leaf = path->nodes[0]; 3048 slot = path->slots[0]; 3049 if (path_change) 3050 *path_change = 1; 3051 } 3052 btrfs_item_key_to_cpu(leaf, &key, slot); 3053 if (key.objectid != extent_key->objectid) 3054 return -ENOENT; 3055 3056 if (key.type != BTRFS_EXTENT_REF_V0_KEY) { 3057 slot++; 3058 continue; 3059 } 3060 ref0 = btrfs_item_ptr(leaf, slot, 3061 struct btrfs_extent_ref_v0); 3062 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0); 3063 break; 3064 } 3065 return 0; 3066 } 3067 #endif 3068 3069 /* 3070 * helper to add a tree block to the list. 3071 * the major work is getting the generation and level of the block 3072 */ 3073 static int add_tree_block(struct reloc_control *rc, 3074 struct btrfs_key *extent_key, 3075 struct btrfs_path *path, 3076 struct rb_root *blocks) 3077 { 3078 struct extent_buffer *eb; 3079 struct btrfs_extent_item *ei; 3080 struct btrfs_tree_block_info *bi; 3081 struct tree_block *block; 3082 struct rb_node *rb_node; 3083 u32 item_size; 3084 int level = -1; 3085 int generation; 3086 3087 eb = path->nodes[0]; 3088 item_size = btrfs_item_size_nr(eb, path->slots[0]); 3089 3090 if (item_size >= sizeof(*ei) + sizeof(*bi)) { 3091 ei = btrfs_item_ptr(eb, path->slots[0], 3092 struct btrfs_extent_item); 3093 bi = (struct btrfs_tree_block_info *)(ei + 1); 3094 generation = btrfs_extent_generation(eb, ei); 3095 level = btrfs_tree_block_level(eb, bi); 3096 } else { 3097 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3098 u64 ref_owner; 3099 int ret; 3100 3101 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 3102 ret = get_ref_objectid_v0(rc, path, extent_key, 3103 &ref_owner, NULL); 3104 if (ret < 0) 3105 return ret; 3106 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL); 3107 level = (int)ref_owner; 3108 /* FIXME: get real generation */ 3109 generation = 0; 3110 #else 3111 BUG(); 3112 #endif 3113 } 3114 3115 btrfs_release_path(rc->extent_root, path); 3116 3117 BUG_ON(level == -1); 3118 3119 block = kmalloc(sizeof(*block), GFP_NOFS); 3120 if (!block) 3121 return -ENOMEM; 3122 3123 block->bytenr = extent_key->objectid; 3124 block->key.objectid = extent_key->offset; 3125 block->key.offset = generation; 3126 block->level = level; 3127 block->key_ready = 0; 3128 3129 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3130 BUG_ON(rb_node); 3131 3132 return 0; 3133 } 3134 3135 /* 3136 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3137 */ 3138 static int __add_tree_block(struct reloc_control *rc, 3139 u64 bytenr, u32 blocksize, 3140 struct rb_root *blocks) 3141 { 3142 struct btrfs_path *path; 3143 struct btrfs_key key; 3144 int ret; 3145 3146 if (tree_block_processed(bytenr, blocksize, rc)) 3147 return 0; 3148 3149 if (tree_search(blocks, bytenr)) 3150 return 0; 3151 3152 path = btrfs_alloc_path(); 3153 if (!path) 3154 return -ENOMEM; 3155 3156 key.objectid = bytenr; 3157 key.type = BTRFS_EXTENT_ITEM_KEY; 3158 key.offset = blocksize; 3159 3160 path->search_commit_root = 1; 3161 path->skip_locking = 1; 3162 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3163 if (ret < 0) 3164 goto out; 3165 BUG_ON(ret); 3166 3167 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3168 ret = add_tree_block(rc, &key, path, blocks); 3169 out: 3170 btrfs_free_path(path); 3171 return ret; 3172 } 3173 3174 /* 3175 * helper to check if the block use full backrefs for pointers in it 3176 */ 3177 static int block_use_full_backref(struct reloc_control *rc, 3178 struct extent_buffer *eb) 3179 { 3180 u64 flags; 3181 int ret; 3182 3183 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || 3184 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3185 return 1; 3186 3187 ret = btrfs_lookup_extent_info(NULL, rc->extent_root, 3188 eb->start, eb->len, NULL, &flags); 3189 BUG_ON(ret); 3190 3191 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3192 ret = 1; 3193 else 3194 ret = 0; 3195 return ret; 3196 } 3197 3198 static int delete_block_group_cache(struct btrfs_fs_info *fs_info, 3199 struct inode *inode, u64 ino) 3200 { 3201 struct btrfs_key key; 3202 struct btrfs_path *path; 3203 struct btrfs_root *root = fs_info->tree_root; 3204 struct btrfs_trans_handle *trans; 3205 unsigned long nr; 3206 int ret = 0; 3207 3208 if (inode) 3209 goto truncate; 3210 3211 key.objectid = ino; 3212 key.type = BTRFS_INODE_ITEM_KEY; 3213 key.offset = 0; 3214 3215 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3216 if (!inode || IS_ERR(inode) || is_bad_inode(inode)) { 3217 if (inode && !IS_ERR(inode)) 3218 iput(inode); 3219 return -ENOENT; 3220 } 3221 3222 truncate: 3223 path = btrfs_alloc_path(); 3224 if (!path) { 3225 ret = -ENOMEM; 3226 goto out; 3227 } 3228 3229 trans = btrfs_join_transaction(root, 0); 3230 if (IS_ERR(trans)) { 3231 btrfs_free_path(path); 3232 ret = PTR_ERR(trans); 3233 goto out; 3234 } 3235 3236 ret = btrfs_truncate_free_space_cache(root, trans, path, inode); 3237 3238 btrfs_free_path(path); 3239 nr = trans->blocks_used; 3240 btrfs_end_transaction(trans, root); 3241 btrfs_btree_balance_dirty(root, nr); 3242 out: 3243 iput(inode); 3244 return ret; 3245 } 3246 3247 /* 3248 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY 3249 * this function scans fs tree to find blocks reference the data extent 3250 */ 3251 static int find_data_references(struct reloc_control *rc, 3252 struct btrfs_key *extent_key, 3253 struct extent_buffer *leaf, 3254 struct btrfs_extent_data_ref *ref, 3255 struct rb_root *blocks) 3256 { 3257 struct btrfs_path *path; 3258 struct tree_block *block; 3259 struct btrfs_root *root; 3260 struct btrfs_file_extent_item *fi; 3261 struct rb_node *rb_node; 3262 struct btrfs_key key; 3263 u64 ref_root; 3264 u64 ref_objectid; 3265 u64 ref_offset; 3266 u32 ref_count; 3267 u32 nritems; 3268 int err = 0; 3269 int added = 0; 3270 int counted; 3271 int ret; 3272 3273 ref_root = btrfs_extent_data_ref_root(leaf, ref); 3274 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref); 3275 ref_offset = btrfs_extent_data_ref_offset(leaf, ref); 3276 ref_count = btrfs_extent_data_ref_count(leaf, ref); 3277 3278 /* 3279 * This is an extent belonging to the free space cache, lets just delete 3280 * it and redo the search. 3281 */ 3282 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { 3283 ret = delete_block_group_cache(rc->extent_root->fs_info, 3284 NULL, ref_objectid); 3285 if (ret != -ENOENT) 3286 return ret; 3287 ret = 0; 3288 } 3289 3290 path = btrfs_alloc_path(); 3291 if (!path) 3292 return -ENOMEM; 3293 3294 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3295 if (IS_ERR(root)) { 3296 err = PTR_ERR(root); 3297 goto out; 3298 } 3299 3300 key.objectid = ref_objectid; 3301 key.offset = ref_offset; 3302 key.type = BTRFS_EXTENT_DATA_KEY; 3303 3304 path->search_commit_root = 1; 3305 path->skip_locking = 1; 3306 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3307 if (ret < 0) { 3308 err = ret; 3309 goto out; 3310 } 3311 3312 leaf = path->nodes[0]; 3313 nritems = btrfs_header_nritems(leaf); 3314 /* 3315 * the references in tree blocks that use full backrefs 3316 * are not counted in 3317 */ 3318 if (block_use_full_backref(rc, leaf)) 3319 counted = 0; 3320 else 3321 counted = 1; 3322 rb_node = tree_search(blocks, leaf->start); 3323 if (rb_node) { 3324 if (counted) 3325 added = 1; 3326 else 3327 path->slots[0] = nritems; 3328 } 3329 3330 while (ref_count > 0) { 3331 while (path->slots[0] >= nritems) { 3332 ret = btrfs_next_leaf(root, path); 3333 if (ret < 0) { 3334 err = ret; 3335 goto out; 3336 } 3337 if (ret > 0) { 3338 WARN_ON(1); 3339 goto out; 3340 } 3341 3342 leaf = path->nodes[0]; 3343 nritems = btrfs_header_nritems(leaf); 3344 added = 0; 3345 3346 if (block_use_full_backref(rc, leaf)) 3347 counted = 0; 3348 else 3349 counted = 1; 3350 rb_node = tree_search(blocks, leaf->start); 3351 if (rb_node) { 3352 if (counted) 3353 added = 1; 3354 else 3355 path->slots[0] = nritems; 3356 } 3357 } 3358 3359 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3360 if (key.objectid != ref_objectid || 3361 key.type != BTRFS_EXTENT_DATA_KEY) { 3362 WARN_ON(1); 3363 break; 3364 } 3365 3366 fi = btrfs_item_ptr(leaf, path->slots[0], 3367 struct btrfs_file_extent_item); 3368 3369 if (btrfs_file_extent_type(leaf, fi) == 3370 BTRFS_FILE_EXTENT_INLINE) 3371 goto next; 3372 3373 if (btrfs_file_extent_disk_bytenr(leaf, fi) != 3374 extent_key->objectid) 3375 goto next; 3376 3377 key.offset -= btrfs_file_extent_offset(leaf, fi); 3378 if (key.offset != ref_offset) 3379 goto next; 3380 3381 if (counted) 3382 ref_count--; 3383 if (added) 3384 goto next; 3385 3386 if (!tree_block_processed(leaf->start, leaf->len, rc)) { 3387 block = kmalloc(sizeof(*block), GFP_NOFS); 3388 if (!block) { 3389 err = -ENOMEM; 3390 break; 3391 } 3392 block->bytenr = leaf->start; 3393 btrfs_item_key_to_cpu(leaf, &block->key, 0); 3394 block->level = 0; 3395 block->key_ready = 1; 3396 rb_node = tree_insert(blocks, block->bytenr, 3397 &block->rb_node); 3398 BUG_ON(rb_node); 3399 } 3400 if (counted) 3401 added = 1; 3402 else 3403 path->slots[0] = nritems; 3404 next: 3405 path->slots[0]++; 3406 3407 } 3408 out: 3409 btrfs_free_path(path); 3410 return err; 3411 } 3412 3413 /* 3414 * hepler to find all tree blocks that reference a given data extent 3415 */ 3416 static noinline_for_stack 3417 int add_data_references(struct reloc_control *rc, 3418 struct btrfs_key *extent_key, 3419 struct btrfs_path *path, 3420 struct rb_root *blocks) 3421 { 3422 struct btrfs_key key; 3423 struct extent_buffer *eb; 3424 struct btrfs_extent_data_ref *dref; 3425 struct btrfs_extent_inline_ref *iref; 3426 unsigned long ptr; 3427 unsigned long end; 3428 u32 blocksize = btrfs_level_size(rc->extent_root, 0); 3429 int ret; 3430 int err = 0; 3431 3432 eb = path->nodes[0]; 3433 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3434 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3435 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3436 if (ptr + sizeof(struct btrfs_extent_item_v0) == end) 3437 ptr = end; 3438 else 3439 #endif 3440 ptr += sizeof(struct btrfs_extent_item); 3441 3442 while (ptr < end) { 3443 iref = (struct btrfs_extent_inline_ref *)ptr; 3444 key.type = btrfs_extent_inline_ref_type(eb, iref); 3445 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3446 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3447 ret = __add_tree_block(rc, key.offset, blocksize, 3448 blocks); 3449 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3450 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 3451 ret = find_data_references(rc, extent_key, 3452 eb, dref, blocks); 3453 } else { 3454 BUG(); 3455 } 3456 ptr += btrfs_extent_inline_ref_size(key.type); 3457 } 3458 WARN_ON(ptr > end); 3459 3460 while (1) { 3461 cond_resched(); 3462 eb = path->nodes[0]; 3463 if (path->slots[0] >= btrfs_header_nritems(eb)) { 3464 ret = btrfs_next_leaf(rc->extent_root, path); 3465 if (ret < 0) { 3466 err = ret; 3467 break; 3468 } 3469 if (ret > 0) 3470 break; 3471 eb = path->nodes[0]; 3472 } 3473 3474 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 3475 if (key.objectid != extent_key->objectid) 3476 break; 3477 3478 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3479 if (key.type == BTRFS_SHARED_DATA_REF_KEY || 3480 key.type == BTRFS_EXTENT_REF_V0_KEY) { 3481 #else 3482 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 3483 if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 3484 #endif 3485 ret = __add_tree_block(rc, key.offset, blocksize, 3486 blocks); 3487 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 3488 dref = btrfs_item_ptr(eb, path->slots[0], 3489 struct btrfs_extent_data_ref); 3490 ret = find_data_references(rc, extent_key, 3491 eb, dref, blocks); 3492 } else { 3493 ret = 0; 3494 } 3495 if (ret) { 3496 err = ret; 3497 break; 3498 } 3499 path->slots[0]++; 3500 } 3501 btrfs_release_path(rc->extent_root, path); 3502 if (err) 3503 free_block_list(blocks); 3504 return err; 3505 } 3506 3507 /* 3508 * hepler to find next unprocessed extent 3509 */ 3510 static noinline_for_stack 3511 int find_next_extent(struct btrfs_trans_handle *trans, 3512 struct reloc_control *rc, struct btrfs_path *path, 3513 struct btrfs_key *extent_key) 3514 { 3515 struct btrfs_key key; 3516 struct extent_buffer *leaf; 3517 u64 start, end, last; 3518 int ret; 3519 3520 last = rc->block_group->key.objectid + rc->block_group->key.offset; 3521 while (1) { 3522 cond_resched(); 3523 if (rc->search_start >= last) { 3524 ret = 1; 3525 break; 3526 } 3527 3528 key.objectid = rc->search_start; 3529 key.type = BTRFS_EXTENT_ITEM_KEY; 3530 key.offset = 0; 3531 3532 path->search_commit_root = 1; 3533 path->skip_locking = 1; 3534 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3535 0, 0); 3536 if (ret < 0) 3537 break; 3538 next: 3539 leaf = path->nodes[0]; 3540 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3541 ret = btrfs_next_leaf(rc->extent_root, path); 3542 if (ret != 0) 3543 break; 3544 leaf = path->nodes[0]; 3545 } 3546 3547 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3548 if (key.objectid >= last) { 3549 ret = 1; 3550 break; 3551 } 3552 3553 if (key.type != BTRFS_EXTENT_ITEM_KEY || 3554 key.objectid + key.offset <= rc->search_start) { 3555 path->slots[0]++; 3556 goto next; 3557 } 3558 3559 ret = find_first_extent_bit(&rc->processed_blocks, 3560 key.objectid, &start, &end, 3561 EXTENT_DIRTY); 3562 3563 if (ret == 0 && start <= key.objectid) { 3564 btrfs_release_path(rc->extent_root, path); 3565 rc->search_start = end + 1; 3566 } else { 3567 rc->search_start = key.objectid + key.offset; 3568 memcpy(extent_key, &key, sizeof(key)); 3569 return 0; 3570 } 3571 } 3572 btrfs_release_path(rc->extent_root, path); 3573 return ret; 3574 } 3575 3576 static void set_reloc_control(struct reloc_control *rc) 3577 { 3578 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3579 mutex_lock(&fs_info->trans_mutex); 3580 fs_info->reloc_ctl = rc; 3581 mutex_unlock(&fs_info->trans_mutex); 3582 } 3583 3584 static void unset_reloc_control(struct reloc_control *rc) 3585 { 3586 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3587 mutex_lock(&fs_info->trans_mutex); 3588 fs_info->reloc_ctl = NULL; 3589 mutex_unlock(&fs_info->trans_mutex); 3590 } 3591 3592 static int check_extent_flags(u64 flags) 3593 { 3594 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3595 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3596 return 1; 3597 if (!(flags & BTRFS_EXTENT_FLAG_DATA) && 3598 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) 3599 return 1; 3600 if ((flags & BTRFS_EXTENT_FLAG_DATA) && 3601 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 3602 return 1; 3603 return 0; 3604 } 3605 3606 static noinline_for_stack 3607 int prepare_to_relocate(struct reloc_control *rc) 3608 { 3609 struct btrfs_trans_handle *trans; 3610 int ret; 3611 3612 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root); 3613 if (!rc->block_rsv) 3614 return -ENOMEM; 3615 3616 /* 3617 * reserve some space for creating reloc trees. 3618 * btrfs_init_reloc_root will use them when there 3619 * is no reservation in transaction handle. 3620 */ 3621 ret = btrfs_block_rsv_add(NULL, rc->extent_root, rc->block_rsv, 3622 rc->extent_root->nodesize * 256); 3623 if (ret) 3624 return ret; 3625 3626 rc->block_rsv->refill_used = 1; 3627 btrfs_add_durable_block_rsv(rc->extent_root->fs_info, rc->block_rsv); 3628 3629 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3630 rc->search_start = rc->block_group->key.objectid; 3631 rc->extents_found = 0; 3632 rc->nodes_relocated = 0; 3633 rc->merging_rsv_size = 0; 3634 3635 rc->create_reloc_tree = 1; 3636 set_reloc_control(rc); 3637 3638 trans = btrfs_join_transaction(rc->extent_root, 1); 3639 BUG_ON(IS_ERR(trans)); 3640 btrfs_commit_transaction(trans, rc->extent_root); 3641 return 0; 3642 } 3643 3644 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3645 { 3646 struct rb_root blocks = RB_ROOT; 3647 struct btrfs_key key; 3648 struct btrfs_trans_handle *trans = NULL; 3649 struct btrfs_path *path; 3650 struct btrfs_extent_item *ei; 3651 unsigned long nr; 3652 u64 flags; 3653 u32 item_size; 3654 int ret; 3655 int err = 0; 3656 3657 path = btrfs_alloc_path(); 3658 if (!path) 3659 return -ENOMEM; 3660 3661 ret = prepare_to_relocate(rc); 3662 if (ret) { 3663 err = ret; 3664 goto out_free; 3665 } 3666 3667 while (1) { 3668 trans = btrfs_start_transaction(rc->extent_root, 0); 3669 BUG_ON(IS_ERR(trans)); 3670 3671 if (update_backref_cache(trans, &rc->backref_cache)) { 3672 btrfs_end_transaction(trans, rc->extent_root); 3673 continue; 3674 } 3675 3676 ret = find_next_extent(trans, rc, path, &key); 3677 if (ret < 0) 3678 err = ret; 3679 if (ret != 0) 3680 break; 3681 3682 rc->extents_found++; 3683 3684 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3685 struct btrfs_extent_item); 3686 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 3687 if (item_size >= sizeof(*ei)) { 3688 flags = btrfs_extent_flags(path->nodes[0], ei); 3689 ret = check_extent_flags(flags); 3690 BUG_ON(ret); 3691 3692 } else { 3693 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 3694 u64 ref_owner; 3695 int path_change = 0; 3696 3697 BUG_ON(item_size != 3698 sizeof(struct btrfs_extent_item_v0)); 3699 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner, 3700 &path_change); 3701 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID) 3702 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK; 3703 else 3704 flags = BTRFS_EXTENT_FLAG_DATA; 3705 3706 if (path_change) { 3707 btrfs_release_path(rc->extent_root, path); 3708 3709 path->search_commit_root = 1; 3710 path->skip_locking = 1; 3711 ret = btrfs_search_slot(NULL, rc->extent_root, 3712 &key, path, 0, 0); 3713 if (ret < 0) { 3714 err = ret; 3715 break; 3716 } 3717 BUG_ON(ret > 0); 3718 } 3719 #else 3720 BUG(); 3721 #endif 3722 } 3723 3724 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3725 ret = add_tree_block(rc, &key, path, &blocks); 3726 } else if (rc->stage == UPDATE_DATA_PTRS && 3727 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3728 ret = add_data_references(rc, &key, path, &blocks); 3729 } else { 3730 btrfs_release_path(rc->extent_root, path); 3731 ret = 0; 3732 } 3733 if (ret < 0) { 3734 err = ret; 3735 break; 3736 } 3737 3738 if (!RB_EMPTY_ROOT(&blocks)) { 3739 ret = relocate_tree_blocks(trans, rc, &blocks); 3740 if (ret < 0) { 3741 if (ret != -EAGAIN) { 3742 err = ret; 3743 break; 3744 } 3745 rc->extents_found--; 3746 rc->search_start = key.objectid; 3747 } 3748 } 3749 3750 ret = btrfs_block_rsv_check(trans, rc->extent_root, 3751 rc->block_rsv, 0, 5); 3752 if (ret < 0) { 3753 if (ret != -EAGAIN) { 3754 err = ret; 3755 WARN_ON(1); 3756 break; 3757 } 3758 rc->commit_transaction = 1; 3759 } 3760 3761 if (rc->commit_transaction) { 3762 rc->commit_transaction = 0; 3763 ret = btrfs_commit_transaction(trans, rc->extent_root); 3764 BUG_ON(ret); 3765 } else { 3766 nr = trans->blocks_used; 3767 btrfs_end_transaction_throttle(trans, rc->extent_root); 3768 btrfs_btree_balance_dirty(rc->extent_root, nr); 3769 } 3770 trans = NULL; 3771 3772 if (rc->stage == MOVE_DATA_EXTENTS && 3773 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3774 rc->found_file_extent = 1; 3775 ret = relocate_data_extent(rc->data_inode, 3776 &key, &rc->cluster); 3777 if (ret < 0) { 3778 err = ret; 3779 break; 3780 } 3781 } 3782 } 3783 3784 btrfs_release_path(rc->extent_root, path); 3785 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 3786 GFP_NOFS); 3787 3788 if (trans) { 3789 nr = trans->blocks_used; 3790 btrfs_end_transaction_throttle(trans, rc->extent_root); 3791 btrfs_btree_balance_dirty(rc->extent_root, nr); 3792 } 3793 3794 if (!err) { 3795 ret = relocate_file_extent_cluster(rc->data_inode, 3796 &rc->cluster); 3797 if (ret < 0) 3798 err = ret; 3799 } 3800 3801 rc->create_reloc_tree = 0; 3802 set_reloc_control(rc); 3803 3804 backref_cache_cleanup(&rc->backref_cache); 3805 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 3806 3807 err = prepare_to_merge(rc, err); 3808 3809 merge_reloc_roots(rc); 3810 3811 rc->merge_reloc_tree = 0; 3812 unset_reloc_control(rc); 3813 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 3814 3815 /* get rid of pinned extents */ 3816 trans = btrfs_join_transaction(rc->extent_root, 1); 3817 if (IS_ERR(trans)) 3818 err = PTR_ERR(trans); 3819 else 3820 btrfs_commit_transaction(trans, rc->extent_root); 3821 out_free: 3822 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); 3823 btrfs_free_path(path); 3824 return err; 3825 } 3826 3827 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 3828 struct btrfs_root *root, u64 objectid) 3829 { 3830 struct btrfs_path *path; 3831 struct btrfs_inode_item *item; 3832 struct extent_buffer *leaf; 3833 int ret; 3834 3835 path = btrfs_alloc_path(); 3836 if (!path) 3837 return -ENOMEM; 3838 3839 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 3840 if (ret) 3841 goto out; 3842 3843 leaf = path->nodes[0]; 3844 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 3845 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item)); 3846 btrfs_set_inode_generation(leaf, item, 1); 3847 btrfs_set_inode_size(leaf, item, 0); 3848 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 3849 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 3850 BTRFS_INODE_PREALLOC); 3851 btrfs_mark_buffer_dirty(leaf); 3852 btrfs_release_path(root, path); 3853 out: 3854 btrfs_free_path(path); 3855 return ret; 3856 } 3857 3858 /* 3859 * helper to create inode for data relocation. 3860 * the inode is in data relocation tree and its link count is 0 3861 */ 3862 static noinline_for_stack 3863 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 3864 struct btrfs_block_group_cache *group) 3865 { 3866 struct inode *inode = NULL; 3867 struct btrfs_trans_handle *trans; 3868 struct btrfs_root *root; 3869 struct btrfs_key key; 3870 unsigned long nr; 3871 u64 objectid = BTRFS_FIRST_FREE_OBJECTID; 3872 int err = 0; 3873 3874 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID); 3875 if (IS_ERR(root)) 3876 return ERR_CAST(root); 3877 3878 trans = btrfs_start_transaction(root, 6); 3879 if (IS_ERR(trans)) 3880 return ERR_CAST(trans); 3881 3882 err = btrfs_find_free_objectid(trans, root, objectid, &objectid); 3883 if (err) 3884 goto out; 3885 3886 err = __insert_orphan_inode(trans, root, objectid); 3887 BUG_ON(err); 3888 3889 key.objectid = objectid; 3890 key.type = BTRFS_INODE_ITEM_KEY; 3891 key.offset = 0; 3892 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 3893 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 3894 BTRFS_I(inode)->index_cnt = group->key.objectid; 3895 3896 err = btrfs_orphan_add(trans, inode); 3897 out: 3898 nr = trans->blocks_used; 3899 btrfs_end_transaction(trans, root); 3900 btrfs_btree_balance_dirty(root, nr); 3901 if (err) { 3902 if (inode) 3903 iput(inode); 3904 inode = ERR_PTR(err); 3905 } 3906 return inode; 3907 } 3908 3909 static struct reloc_control *alloc_reloc_control(void) 3910 { 3911 struct reloc_control *rc; 3912 3913 rc = kzalloc(sizeof(*rc), GFP_NOFS); 3914 if (!rc) 3915 return NULL; 3916 3917 INIT_LIST_HEAD(&rc->reloc_roots); 3918 backref_cache_init(&rc->backref_cache); 3919 mapping_tree_init(&rc->reloc_root_tree); 3920 extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); 3921 return rc; 3922 } 3923 3924 /* 3925 * function to relocate all extents in a block group. 3926 */ 3927 int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) 3928 { 3929 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3930 struct reloc_control *rc; 3931 struct inode *inode; 3932 struct btrfs_path *path; 3933 int ret; 3934 int rw = 0; 3935 int err = 0; 3936 3937 rc = alloc_reloc_control(); 3938 if (!rc) 3939 return -ENOMEM; 3940 3941 rc->extent_root = extent_root; 3942 3943 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 3944 BUG_ON(!rc->block_group); 3945 3946 if (!rc->block_group->ro) { 3947 ret = btrfs_set_block_group_ro(extent_root, rc->block_group); 3948 if (ret) { 3949 err = ret; 3950 goto out; 3951 } 3952 rw = 1; 3953 } 3954 3955 path = btrfs_alloc_path(); 3956 if (!path) { 3957 err = -ENOMEM; 3958 goto out; 3959 } 3960 3961 inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group, 3962 path); 3963 btrfs_free_path(path); 3964 3965 if (!IS_ERR(inode)) 3966 ret = delete_block_group_cache(fs_info, inode, 0); 3967 else 3968 ret = PTR_ERR(inode); 3969 3970 if (ret && ret != -ENOENT) { 3971 err = ret; 3972 goto out; 3973 } 3974 3975 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 3976 if (IS_ERR(rc->data_inode)) { 3977 err = PTR_ERR(rc->data_inode); 3978 rc->data_inode = NULL; 3979 goto out; 3980 } 3981 3982 printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n", 3983 (unsigned long long)rc->block_group->key.objectid, 3984 (unsigned long long)rc->block_group->flags); 3985 3986 btrfs_start_delalloc_inodes(fs_info->tree_root, 0); 3987 btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0); 3988 3989 while (1) { 3990 mutex_lock(&fs_info->cleaner_mutex); 3991 3992 btrfs_clean_old_snapshots(fs_info->tree_root); 3993 ret = relocate_block_group(rc); 3994 3995 mutex_unlock(&fs_info->cleaner_mutex); 3996 if (ret < 0) { 3997 err = ret; 3998 goto out; 3999 } 4000 4001 if (rc->extents_found == 0) 4002 break; 4003 4004 printk(KERN_INFO "btrfs: found %llu extents\n", 4005 (unsigned long long)rc->extents_found); 4006 4007 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 4008 btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1); 4009 invalidate_mapping_pages(rc->data_inode->i_mapping, 4010 0, -1); 4011 rc->stage = UPDATE_DATA_PTRS; 4012 } 4013 } 4014 4015 filemap_write_and_wait_range(fs_info->btree_inode->i_mapping, 4016 rc->block_group->key.objectid, 4017 rc->block_group->key.objectid + 4018 rc->block_group->key.offset - 1); 4019 4020 WARN_ON(rc->block_group->pinned > 0); 4021 WARN_ON(rc->block_group->reserved > 0); 4022 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 4023 out: 4024 if (err && rw) 4025 btrfs_set_block_group_rw(extent_root, rc->block_group); 4026 iput(rc->data_inode); 4027 btrfs_put_block_group(rc->block_group); 4028 kfree(rc); 4029 return err; 4030 } 4031 4032 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4033 { 4034 struct btrfs_trans_handle *trans; 4035 int ret; 4036 4037 trans = btrfs_start_transaction(root->fs_info->tree_root, 0); 4038 BUG_ON(IS_ERR(trans)); 4039 4040 memset(&root->root_item.drop_progress, 0, 4041 sizeof(root->root_item.drop_progress)); 4042 root->root_item.drop_level = 0; 4043 btrfs_set_root_refs(&root->root_item, 0); 4044 ret = btrfs_update_root(trans, root->fs_info->tree_root, 4045 &root->root_key, &root->root_item); 4046 BUG_ON(ret); 4047 4048 ret = btrfs_end_transaction(trans, root->fs_info->tree_root); 4049 BUG_ON(ret); 4050 return 0; 4051 } 4052 4053 /* 4054 * recover relocation interrupted by system crash. 4055 * 4056 * this function resumes merging reloc trees with corresponding fs trees. 4057 * this is important for keeping the sharing of tree blocks 4058 */ 4059 int btrfs_recover_relocation(struct btrfs_root *root) 4060 { 4061 LIST_HEAD(reloc_roots); 4062 struct btrfs_key key; 4063 struct btrfs_root *fs_root; 4064 struct btrfs_root *reloc_root; 4065 struct btrfs_path *path; 4066 struct extent_buffer *leaf; 4067 struct reloc_control *rc = NULL; 4068 struct btrfs_trans_handle *trans; 4069 int ret; 4070 int err = 0; 4071 4072 path = btrfs_alloc_path(); 4073 if (!path) 4074 return -ENOMEM; 4075 4076 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4077 key.type = BTRFS_ROOT_ITEM_KEY; 4078 key.offset = (u64)-1; 4079 4080 while (1) { 4081 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, 4082 path, 0, 0); 4083 if (ret < 0) { 4084 err = ret; 4085 goto out; 4086 } 4087 if (ret > 0) { 4088 if (path->slots[0] == 0) 4089 break; 4090 path->slots[0]--; 4091 } 4092 leaf = path->nodes[0]; 4093 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4094 btrfs_release_path(root->fs_info->tree_root, path); 4095 4096 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 4097 key.type != BTRFS_ROOT_ITEM_KEY) 4098 break; 4099 4100 reloc_root = btrfs_read_fs_root_no_radix(root, &key); 4101 if (IS_ERR(reloc_root)) { 4102 err = PTR_ERR(reloc_root); 4103 goto out; 4104 } 4105 4106 list_add(&reloc_root->root_list, &reloc_roots); 4107 4108 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 4109 fs_root = read_fs_root(root->fs_info, 4110 reloc_root->root_key.offset); 4111 if (IS_ERR(fs_root)) { 4112 ret = PTR_ERR(fs_root); 4113 if (ret != -ENOENT) { 4114 err = ret; 4115 goto out; 4116 } 4117 mark_garbage_root(reloc_root); 4118 } 4119 } 4120 4121 if (key.offset == 0) 4122 break; 4123 4124 key.offset--; 4125 } 4126 btrfs_release_path(root->fs_info->tree_root, path); 4127 4128 if (list_empty(&reloc_roots)) 4129 goto out; 4130 4131 rc = alloc_reloc_control(); 4132 if (!rc) { 4133 err = -ENOMEM; 4134 goto out; 4135 } 4136 4137 rc->extent_root = root->fs_info->extent_root; 4138 4139 set_reloc_control(rc); 4140 4141 trans = btrfs_join_transaction(rc->extent_root, 1); 4142 if (IS_ERR(trans)) { 4143 unset_reloc_control(rc); 4144 err = PTR_ERR(trans); 4145 goto out_free; 4146 } 4147 4148 rc->merge_reloc_tree = 1; 4149 4150 while (!list_empty(&reloc_roots)) { 4151 reloc_root = list_entry(reloc_roots.next, 4152 struct btrfs_root, root_list); 4153 list_del(&reloc_root->root_list); 4154 4155 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 4156 list_add_tail(&reloc_root->root_list, 4157 &rc->reloc_roots); 4158 continue; 4159 } 4160 4161 fs_root = read_fs_root(root->fs_info, 4162 reloc_root->root_key.offset); 4163 BUG_ON(IS_ERR(fs_root)); 4164 4165 __add_reloc_root(reloc_root); 4166 fs_root->reloc_root = reloc_root; 4167 } 4168 4169 btrfs_commit_transaction(trans, rc->extent_root); 4170 4171 merge_reloc_roots(rc); 4172 4173 unset_reloc_control(rc); 4174 4175 trans = btrfs_join_transaction(rc->extent_root, 1); 4176 if (IS_ERR(trans)) 4177 err = PTR_ERR(trans); 4178 else 4179 btrfs_commit_transaction(trans, rc->extent_root); 4180 out_free: 4181 kfree(rc); 4182 out: 4183 while (!list_empty(&reloc_roots)) { 4184 reloc_root = list_entry(reloc_roots.next, 4185 struct btrfs_root, root_list); 4186 list_del(&reloc_root->root_list); 4187 free_extent_buffer(reloc_root->node); 4188 free_extent_buffer(reloc_root->commit_root); 4189 kfree(reloc_root); 4190 } 4191 btrfs_free_path(path); 4192 4193 if (err == 0) { 4194 /* cleanup orphan inode in data relocation tree */ 4195 fs_root = read_fs_root(root->fs_info, 4196 BTRFS_DATA_RELOC_TREE_OBJECTID); 4197 if (IS_ERR(fs_root)) 4198 err = PTR_ERR(fs_root); 4199 else 4200 btrfs_orphan_cleanup(fs_root); 4201 } 4202 return err; 4203 } 4204 4205 /* 4206 * helper to add ordered checksum for data relocation. 4207 * 4208 * cloning checksum properly handles the nodatasum extents. 4209 * it also saves CPU time to re-calculate the checksum. 4210 */ 4211 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) 4212 { 4213 struct btrfs_ordered_sum *sums; 4214 struct btrfs_sector_sum *sector_sum; 4215 struct btrfs_ordered_extent *ordered; 4216 struct btrfs_root *root = BTRFS_I(inode)->root; 4217 size_t offset; 4218 int ret; 4219 u64 disk_bytenr; 4220 LIST_HEAD(list); 4221 4222 ordered = btrfs_lookup_ordered_extent(inode, file_pos); 4223 BUG_ON(ordered->file_offset != file_pos || ordered->len != len); 4224 4225 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4226 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4227 disk_bytenr + len - 1, &list); 4228 4229 while (!list_empty(&list)) { 4230 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4231 list_del_init(&sums->list); 4232 4233 sector_sum = sums->sums; 4234 sums->bytenr = ordered->start; 4235 4236 offset = 0; 4237 while (offset < sums->len) { 4238 sector_sum->bytenr += ordered->start - disk_bytenr; 4239 sector_sum++; 4240 offset += root->sectorsize; 4241 } 4242 4243 btrfs_add_ordered_sum(inode, ordered, sums); 4244 } 4245 btrfs_put_ordered_extent(ordered); 4246 return ret; 4247 } 4248 4249 void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4250 struct btrfs_root *root, struct extent_buffer *buf, 4251 struct extent_buffer *cow) 4252 { 4253 struct reloc_control *rc; 4254 struct backref_node *node; 4255 int first_cow = 0; 4256 int level; 4257 int ret; 4258 4259 rc = root->fs_info->reloc_ctl; 4260 if (!rc) 4261 return; 4262 4263 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4264 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4265 4266 level = btrfs_header_level(buf); 4267 if (btrfs_header_generation(buf) <= 4268 btrfs_root_last_snapshot(&root->root_item)) 4269 first_cow = 1; 4270 4271 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && 4272 rc->create_reloc_tree) { 4273 WARN_ON(!first_cow && level == 0); 4274 4275 node = rc->backref_cache.path[level]; 4276 BUG_ON(node->bytenr != buf->start && 4277 node->new_bytenr != buf->start); 4278 4279 drop_node_buffer(node); 4280 extent_buffer_get(cow); 4281 node->eb = cow; 4282 node->new_bytenr = cow->start; 4283 4284 if (!node->pending) { 4285 list_move_tail(&node->list, 4286 &rc->backref_cache.pending[level]); 4287 node->pending = 1; 4288 } 4289 4290 if (first_cow) 4291 __mark_block_processed(rc, node); 4292 4293 if (first_cow && level > 0) 4294 rc->nodes_relocated += buf->len; 4295 } 4296 4297 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) { 4298 ret = replace_file_extents(trans, rc, root, cow); 4299 BUG_ON(ret); 4300 } 4301 } 4302 4303 /* 4304 * called before creating snapshot. it calculates metadata reservation 4305 * requried for relocating tree blocks in the snapshot 4306 */ 4307 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 4308 struct btrfs_pending_snapshot *pending, 4309 u64 *bytes_to_reserve) 4310 { 4311 struct btrfs_root *root; 4312 struct reloc_control *rc; 4313 4314 root = pending->root; 4315 if (!root->reloc_root) 4316 return; 4317 4318 rc = root->fs_info->reloc_ctl; 4319 if (!rc->merge_reloc_tree) 4320 return; 4321 4322 root = root->reloc_root; 4323 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 4324 /* 4325 * relocation is in the stage of merging trees. the space 4326 * used by merging a reloc tree is twice the size of 4327 * relocated tree nodes in the worst case. half for cowing 4328 * the reloc tree, half for cowing the fs tree. the space 4329 * used by cowing the reloc tree will be freed after the 4330 * tree is dropped. if we create snapshot, cowing the fs 4331 * tree may use more space than it frees. so we need 4332 * reserve extra space. 4333 */ 4334 *bytes_to_reserve += rc->nodes_relocated; 4335 } 4336 4337 /* 4338 * called after snapshot is created. migrate block reservation 4339 * and create reloc root for the newly created snapshot 4340 */ 4341 void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4342 struct btrfs_pending_snapshot *pending) 4343 { 4344 struct btrfs_root *root = pending->root; 4345 struct btrfs_root *reloc_root; 4346 struct btrfs_root *new_root; 4347 struct reloc_control *rc; 4348 int ret; 4349 4350 if (!root->reloc_root) 4351 return; 4352 4353 rc = root->fs_info->reloc_ctl; 4354 rc->merging_rsv_size += rc->nodes_relocated; 4355 4356 if (rc->merge_reloc_tree) { 4357 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4358 rc->block_rsv, 4359 rc->nodes_relocated); 4360 BUG_ON(ret); 4361 } 4362 4363 new_root = pending->snap; 4364 reloc_root = create_reloc_root(trans, root->reloc_root, 4365 new_root->root_key.objectid); 4366 4367 __add_reloc_root(reloc_root); 4368 new_root->reloc_root = reloc_root; 4369 4370 if (rc->create_reloc_tree) { 4371 ret = clone_backref_node(trans, rc, root, reloc_root); 4372 BUG_ON(ret); 4373 } 4374 } 4375