1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2009 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/error-injection.h> 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "volumes.h" 17 #include "locking.h" 18 #include "btrfs_inode.h" 19 #include "async-thread.h" 20 #include "free-space-cache.h" 21 #include "qgroup.h" 22 #include "print-tree.h" 23 #include "delalloc-space.h" 24 #include "block-group.h" 25 #include "backref.h" 26 #include "misc.h" 27 #include "subpage.h" 28 #include "zoned.h" 29 #include "inode-item.h" 30 #include "space-info.h" 31 #include "fs.h" 32 #include "accessors.h" 33 #include "extent-tree.h" 34 #include "root-tree.h" 35 #include "file-item.h" 36 #include "relocation.h" 37 #include "super.h" 38 #include "tree-checker.h" 39 #include "raid-stripe-tree.h" 40 #include "free-space-tree.h" 41 42 /* 43 * Relocation overview 44 * 45 * [What does relocation do] 46 * 47 * The objective of relocation is to relocate all extents of the target block 48 * group to other block groups. 49 * This is utilized by resize (shrink only), profile converting, compacting 50 * space, or balance routine to spread chunks over devices. 51 * 52 * Before | After 53 * ------------------------------------------------------------------ 54 * BG A: 10 data extents | BG A: deleted 55 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated) 56 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated) 57 * 58 * [How does relocation work] 59 * 60 * 1. Mark the target block group read-only 61 * New extents won't be allocated from the target block group. 62 * 63 * 2.1 Record each extent in the target block group 64 * To build a proper map of extents to be relocated. 65 * 66 * 2.2 Build data reloc tree and reloc trees 67 * Data reloc tree will contain an inode, recording all newly relocated 68 * data extents. 69 * There will be only one data reloc tree for one data block group. 70 * 71 * Reloc tree will be a special snapshot of its source tree, containing 72 * relocated tree blocks. 73 * Each tree referring to a tree block in target block group will get its 74 * reloc tree built. 75 * 76 * 2.3 Swap source tree with its corresponding reloc tree 77 * Each involved tree only refers to new extents after swap. 78 * 79 * 3. Cleanup reloc trees and data reloc tree. 80 * As old extents in the target block group are still referenced by reloc 81 * trees, we need to clean them up before really freeing the target block 82 * group. 83 * 84 * The main complexity is in steps 2.2 and 2.3. 85 * 86 * The entry point of relocation is relocate_block_group() function. 87 */ 88 89 #define RELOCATION_RESERVED_NODES 256 90 /* 91 * map address of tree root to tree 92 */ 93 struct mapping_node { 94 union { 95 /* Use rb_simple_node for search/insert */ 96 struct { 97 struct rb_node rb_node; 98 u64 bytenr; 99 }; 100 101 struct rb_simple_node simple_node; 102 }; 103 void *data; 104 }; 105 106 struct mapping_tree { 107 struct rb_root rb_root; 108 spinlock_t lock; 109 }; 110 111 /* 112 * present a tree block to process 113 */ 114 struct tree_block { 115 union { 116 /* Use rb_simple_node for search/insert */ 117 struct { 118 struct rb_node rb_node; 119 u64 bytenr; 120 }; 121 122 struct rb_simple_node simple_node; 123 }; 124 u64 owner; 125 struct btrfs_key key; 126 u8 level; 127 bool key_ready; 128 }; 129 130 #define MAX_EXTENTS 128 131 132 struct file_extent_cluster { 133 u64 start; 134 u64 end; 135 u64 boundary[MAX_EXTENTS]; 136 unsigned int nr; 137 u64 owning_root; 138 }; 139 140 /* Stages of data relocation. */ 141 enum reloc_stage { 142 MOVE_DATA_EXTENTS, 143 UPDATE_DATA_PTRS 144 }; 145 146 struct reloc_control { 147 /* block group to relocate */ 148 struct btrfs_block_group *block_group; 149 /* extent tree */ 150 struct btrfs_root *extent_root; 151 /* inode for moving data */ 152 struct inode *data_inode; 153 154 struct btrfs_block_rsv *block_rsv; 155 156 struct btrfs_backref_cache backref_cache; 157 158 struct file_extent_cluster cluster; 159 /* tree blocks have been processed */ 160 struct extent_io_tree processed_blocks; 161 /* map start of tree root to corresponding reloc tree */ 162 struct mapping_tree reloc_root_tree; 163 /* list of reloc trees */ 164 struct list_head reloc_roots; 165 /* list of subvolume trees that get relocated */ 166 struct list_head dirty_subvol_roots; 167 /* size of metadata reservation for merging reloc trees */ 168 u64 merging_rsv_size; 169 /* size of relocated tree nodes */ 170 u64 nodes_relocated; 171 /* reserved size for block group relocation*/ 172 u64 reserved_bytes; 173 174 u64 search_start; 175 u64 extents_found; 176 177 enum reloc_stage stage; 178 bool create_reloc_tree; 179 bool merge_reloc_tree; 180 bool found_file_extent; 181 }; 182 183 static void mark_block_processed(struct reloc_control *rc, 184 struct btrfs_backref_node *node) 185 { 186 u32 blocksize; 187 188 if (node->level == 0 || 189 in_range(node->bytenr, rc->block_group->start, 190 rc->block_group->length)) { 191 blocksize = rc->extent_root->fs_info->nodesize; 192 btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr, 193 node->bytenr + blocksize - 1, EXTENT_DIRTY, 194 NULL); 195 } 196 node->processed = 1; 197 } 198 199 /* 200 * walk up backref nodes until reach node presents tree root 201 */ 202 static struct btrfs_backref_node *walk_up_backref( 203 struct btrfs_backref_node *node, 204 struct btrfs_backref_edge *edges[], int *index) 205 { 206 struct btrfs_backref_edge *edge; 207 int idx = *index; 208 209 while (!list_empty(&node->upper)) { 210 edge = list_first_entry(&node->upper, struct btrfs_backref_edge, 211 list[LOWER]); 212 edges[idx++] = edge; 213 node = edge->node[UPPER]; 214 } 215 BUG_ON(node->detached); 216 *index = idx; 217 return node; 218 } 219 220 /* 221 * walk down backref nodes to find start of next reference path 222 */ 223 static struct btrfs_backref_node *walk_down_backref( 224 struct btrfs_backref_edge *edges[], int *index) 225 { 226 struct btrfs_backref_edge *edge; 227 struct btrfs_backref_node *lower; 228 int idx = *index; 229 230 while (idx > 0) { 231 edge = edges[idx - 1]; 232 lower = edge->node[LOWER]; 233 if (list_is_last(&edge->list[LOWER], &lower->upper)) { 234 idx--; 235 continue; 236 } 237 edge = list_first_entry(&edge->list[LOWER], struct btrfs_backref_edge, 238 list[LOWER]); 239 edges[idx - 1] = edge; 240 *index = idx; 241 return edge->node[UPPER]; 242 } 243 *index = 0; 244 return NULL; 245 } 246 247 static bool reloc_root_is_dead(const struct btrfs_root *root) 248 { 249 /* 250 * Pair with set_bit/clear_bit in clean_dirty_subvols and 251 * btrfs_update_reloc_root. We need to see the updated bit before 252 * trying to access reloc_root 253 */ 254 smp_rmb(); 255 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)) 256 return true; 257 return false; 258 } 259 260 /* 261 * Check if this subvolume tree has valid reloc tree. 262 * 263 * Reloc tree after swap is considered dead, thus not considered as valid. 264 * This is enough for most callers, as they don't distinguish dead reloc root 265 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a 266 * special case. 267 */ 268 static bool have_reloc_root(const struct btrfs_root *root) 269 { 270 if (reloc_root_is_dead(root)) 271 return false; 272 if (!root->reloc_root) 273 return false; 274 return true; 275 } 276 277 bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root) 278 { 279 struct btrfs_root *reloc_root; 280 281 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 282 return false; 283 284 /* This root has been merged with its reloc tree, we can ignore it */ 285 if (reloc_root_is_dead(root)) 286 return true; 287 288 reloc_root = root->reloc_root; 289 if (!reloc_root) 290 return false; 291 292 if (btrfs_header_generation(reloc_root->commit_root) == 293 root->fs_info->running_transaction->transid) 294 return false; 295 /* 296 * If there is reloc tree and it was created in previous transaction 297 * backref lookup can find the reloc tree, so backref node for the fs 298 * tree root is useless for relocation. 299 */ 300 return true; 301 } 302 303 /* 304 * find reloc tree by address of tree root 305 */ 306 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) 307 { 308 struct reloc_control *rc = fs_info->reloc_ctl; 309 struct rb_node *rb_node; 310 struct mapping_node *node; 311 struct btrfs_root *root = NULL; 312 313 ASSERT(rc); 314 spin_lock(&rc->reloc_root_tree.lock); 315 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr); 316 if (rb_node) { 317 node = rb_entry(rb_node, struct mapping_node, rb_node); 318 root = node->data; 319 } 320 spin_unlock(&rc->reloc_root_tree.lock); 321 return btrfs_grab_root(root); 322 } 323 324 /* 325 * For useless nodes, do two major clean ups: 326 * 327 * - Cleanup the children edges and nodes 328 * If child node is also orphan (no parent) during cleanup, then the child 329 * node will also be cleaned up. 330 * 331 * - Freeing up leaves (level 0), keeps nodes detached 332 * For nodes, the node is still cached as "detached" 333 * 334 * Return false if @node is not in the @useless_nodes list. 335 * Return true if @node is in the @useless_nodes list. 336 */ 337 static bool handle_useless_nodes(struct reloc_control *rc, 338 struct btrfs_backref_node *node) 339 { 340 struct btrfs_backref_cache *cache = &rc->backref_cache; 341 struct list_head *useless_node = &cache->useless_node; 342 bool ret = false; 343 344 while (!list_empty(useless_node)) { 345 struct btrfs_backref_node *cur; 346 347 cur = list_first_entry(useless_node, struct btrfs_backref_node, 348 list); 349 list_del_init(&cur->list); 350 351 /* Only tree root nodes can be added to @useless_nodes */ 352 ASSERT(list_empty(&cur->upper)); 353 354 if (cur == node) 355 ret = true; 356 357 /* Cleanup the lower edges */ 358 while (!list_empty(&cur->lower)) { 359 struct btrfs_backref_edge *edge; 360 struct btrfs_backref_node *lower; 361 362 edge = list_first_entry(&cur->lower, struct btrfs_backref_edge, 363 list[UPPER]); 364 list_del(&edge->list[UPPER]); 365 list_del(&edge->list[LOWER]); 366 lower = edge->node[LOWER]; 367 btrfs_backref_free_edge(cache, edge); 368 369 /* Child node is also orphan, queue for cleanup */ 370 if (list_empty(&lower->upper)) 371 list_add(&lower->list, useless_node); 372 } 373 /* Mark this block processed for relocation */ 374 mark_block_processed(rc, cur); 375 376 /* 377 * Backref nodes for tree leaves are deleted from the cache. 378 * Backref nodes for upper level tree blocks are left in the 379 * cache to avoid unnecessary backref lookup. 380 */ 381 if (cur->level > 0) { 382 cur->detached = 1; 383 } else { 384 rb_erase(&cur->rb_node, &cache->rb_root); 385 btrfs_backref_free_node(cache, cur); 386 } 387 } 388 return ret; 389 } 390 391 /* 392 * Build backref tree for a given tree block. Root of the backref tree 393 * corresponds the tree block, leaves of the backref tree correspond roots of 394 * b-trees that reference the tree block. 395 * 396 * The basic idea of this function is check backrefs of a given block to find 397 * upper level blocks that reference the block, and then check backrefs of 398 * these upper level blocks recursively. The recursion stops when tree root is 399 * reached or backrefs for the block is cached. 400 * 401 * NOTE: if we find that backrefs for a block are cached, we know backrefs for 402 * all upper level blocks that directly/indirectly reference the block are also 403 * cached. 404 */ 405 static noinline_for_stack struct btrfs_backref_node *build_backref_tree( 406 struct btrfs_trans_handle *trans, 407 struct reloc_control *rc, struct btrfs_key *node_key, 408 int level, u64 bytenr) 409 { 410 struct btrfs_backref_iter *iter; 411 struct btrfs_backref_cache *cache = &rc->backref_cache; 412 /* For searching parent of TREE_BLOCK_REF */ 413 struct btrfs_path *path; 414 struct btrfs_backref_node *cur; 415 struct btrfs_backref_node *node = NULL; 416 struct btrfs_backref_edge *edge; 417 int ret; 418 419 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info); 420 if (!iter) 421 return ERR_PTR(-ENOMEM); 422 path = btrfs_alloc_path(); 423 if (!path) { 424 ret = -ENOMEM; 425 goto out; 426 } 427 428 node = btrfs_backref_alloc_node(cache, bytenr, level); 429 if (!node) { 430 ret = -ENOMEM; 431 goto out; 432 } 433 434 cur = node; 435 436 /* Breadth-first search to build backref cache */ 437 do { 438 ret = btrfs_backref_add_tree_node(trans, cache, path, iter, 439 node_key, cur); 440 if (ret < 0) 441 goto out; 442 443 edge = list_first_entry_or_null(&cache->pending_edge, 444 struct btrfs_backref_edge, list[UPPER]); 445 /* 446 * The pending list isn't empty, take the first block to 447 * process 448 */ 449 if (edge) { 450 list_del_init(&edge->list[UPPER]); 451 cur = edge->node[UPPER]; 452 } 453 } while (edge); 454 455 /* Finish the upper linkage of newly added edges/nodes */ 456 ret = btrfs_backref_finish_upper_links(cache, node); 457 if (ret < 0) 458 goto out; 459 460 if (handle_useless_nodes(rc, node)) 461 node = NULL; 462 out: 463 btrfs_free_path(iter->path); 464 kfree(iter); 465 btrfs_free_path(path); 466 if (ret) { 467 btrfs_backref_error_cleanup(cache, node); 468 return ERR_PTR(ret); 469 } 470 ASSERT(!node || !node->detached); 471 ASSERT(list_empty(&cache->useless_node) && 472 list_empty(&cache->pending_edge)); 473 return node; 474 } 475 476 /* 477 * helper to add 'address of tree root -> reloc tree' mapping 478 */ 479 static int __add_reloc_root(struct btrfs_root *root) 480 { 481 struct btrfs_fs_info *fs_info = root->fs_info; 482 struct rb_node *rb_node; 483 struct mapping_node *node; 484 struct reloc_control *rc = fs_info->reloc_ctl; 485 486 node = kmalloc(sizeof(*node), GFP_NOFS); 487 if (!node) 488 return -ENOMEM; 489 490 node->bytenr = root->commit_root->start; 491 node->data = root; 492 493 spin_lock(&rc->reloc_root_tree.lock); 494 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node); 495 spin_unlock(&rc->reloc_root_tree.lock); 496 if (rb_node) { 497 btrfs_err(fs_info, 498 "Duplicate root found for start=%llu while inserting into relocation tree", 499 node->bytenr); 500 return -EEXIST; 501 } 502 503 list_add_tail(&root->root_list, &rc->reloc_roots); 504 return 0; 505 } 506 507 /* 508 * helper to delete the 'address of tree root -> reloc tree' 509 * mapping 510 */ 511 static void __del_reloc_root(struct btrfs_root *root) 512 { 513 struct btrfs_fs_info *fs_info = root->fs_info; 514 struct rb_node *rb_node; 515 struct mapping_node AUTO_KFREE(node); 516 struct reloc_control *rc = fs_info->reloc_ctl; 517 bool put_ref = false; 518 519 if (rc && root->node) { 520 spin_lock(&rc->reloc_root_tree.lock); 521 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 522 root->commit_root->start); 523 if (rb_node) { 524 node = rb_entry(rb_node, struct mapping_node, rb_node); 525 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 526 RB_CLEAR_NODE(&node->rb_node); 527 } 528 spin_unlock(&rc->reloc_root_tree.lock); 529 ASSERT(!node || (struct btrfs_root *)node->data == root); 530 } 531 532 /* 533 * We only put the reloc root here if it's on the list. There's a lot 534 * of places where the pattern is to splice the rc->reloc_roots, process 535 * the reloc roots, and then add the reloc root back onto 536 * rc->reloc_roots. If we call __del_reloc_root while it's off of the 537 * list we don't want the reference being dropped, because the guy 538 * messing with the list is in charge of the reference. 539 */ 540 spin_lock(&fs_info->trans_lock); 541 if (!list_empty(&root->root_list)) { 542 put_ref = true; 543 list_del_init(&root->root_list); 544 } 545 spin_unlock(&fs_info->trans_lock); 546 if (put_ref) 547 btrfs_put_root(root); 548 } 549 550 /* 551 * helper to update the 'address of tree root -> reloc tree' 552 * mapping 553 */ 554 static int __update_reloc_root(struct btrfs_root *root) 555 { 556 struct btrfs_fs_info *fs_info = root->fs_info; 557 struct rb_node *rb_node; 558 struct mapping_node *node = NULL; 559 struct reloc_control *rc = fs_info->reloc_ctl; 560 561 spin_lock(&rc->reloc_root_tree.lock); 562 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, 563 root->commit_root->start); 564 if (rb_node) { 565 node = rb_entry(rb_node, struct mapping_node, rb_node); 566 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); 567 } 568 spin_unlock(&rc->reloc_root_tree.lock); 569 570 if (!node) 571 return 0; 572 BUG_ON((struct btrfs_root *)node->data != root); 573 574 spin_lock(&rc->reloc_root_tree.lock); 575 node->bytenr = root->node->start; 576 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node); 577 spin_unlock(&rc->reloc_root_tree.lock); 578 if (rb_node) 579 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST); 580 return 0; 581 } 582 583 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, 584 struct btrfs_root *root, u64 objectid) 585 { 586 struct btrfs_fs_info *fs_info = root->fs_info; 587 struct btrfs_root *reloc_root; 588 struct extent_buffer *eb; 589 struct btrfs_root_item AUTO_KFREE(root_item); 590 struct btrfs_key root_key; 591 int ret = 0; 592 593 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 594 if (!root_item) 595 return ERR_PTR(-ENOMEM); 596 597 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 598 root_key.type = BTRFS_ROOT_ITEM_KEY; 599 root_key.offset = objectid; 600 601 if (btrfs_root_id(root) == objectid) { 602 u64 commit_root_gen; 603 604 /* 605 * Relocation will wait for cleaner thread, and any half-dropped 606 * subvolume will be fully cleaned up at mount time. 607 * So here we shouldn't hit a subvolume with non-zero drop_progress. 608 * 609 * If this isn't the case, error out since it can make us attempt to 610 * drop references for extents that were already dropped before. 611 */ 612 if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) { 613 struct btrfs_key cpu_key; 614 615 btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress); 616 btrfs_err(fs_info, 617 "cannot relocate partially dropped subvolume %llu, drop progress key " BTRFS_KEY_FMT, 618 objectid, BTRFS_KEY_FMT_VALUE(&cpu_key)); 619 return ERR_PTR(-EUCLEAN); 620 } 621 622 /* called by btrfs_init_reloc_root */ 623 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 624 BTRFS_TREE_RELOC_OBJECTID); 625 if (ret) 626 return ERR_PTR(ret); 627 628 /* 629 * Set the last_snapshot field to the generation of the commit 630 * root - like this ctree.c:btrfs_block_can_be_shared() behaves 631 * correctly (returns true) when the relocation root is created 632 * either inside the critical section of a transaction commit 633 * (through transaction.c:qgroup_account_snapshot()) and when 634 * it's created before the transaction commit is started. 635 */ 636 commit_root_gen = btrfs_header_generation(root->commit_root); 637 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen); 638 } else { 639 /* 640 * called by btrfs_reloc_post_snapshot_hook. 641 * the source tree is a reloc tree, all tree blocks 642 * modified after it was created have RELOC flag 643 * set in their headers. so it's OK to not update 644 * the 'last_snapshot'. 645 */ 646 ret = btrfs_copy_root(trans, root, root->node, &eb, 647 BTRFS_TREE_RELOC_OBJECTID); 648 if (ret) 649 return ERR_PTR(ret); 650 } 651 652 /* 653 * We have changed references at this point, we must abort the 654 * transaction if anything fails (i.e. 'goto abort'). 655 */ 656 657 memcpy(root_item, &root->root_item, sizeof(*root_item)); 658 btrfs_set_root_bytenr(root_item, eb->start); 659 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 660 btrfs_set_root_generation(root_item, trans->transid); 661 662 if (btrfs_root_id(root) == objectid) { 663 btrfs_set_root_refs(root_item, 0); 664 memset(&root_item->drop_progress, 0, 665 sizeof(struct btrfs_disk_key)); 666 btrfs_set_root_drop_level(root_item, 0); 667 } 668 669 btrfs_tree_unlock(eb); 670 free_extent_buffer(eb); 671 672 ret = btrfs_insert_root(trans, fs_info->tree_root, 673 &root_key, root_item); 674 if (ret) 675 goto abort; 676 677 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key); 678 if (IS_ERR(reloc_root)) { 679 ret = PTR_ERR(reloc_root); 680 goto abort; 681 } 682 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 683 btrfs_set_root_last_trans(reloc_root, trans->transid); 684 return reloc_root; 685 686 abort: 687 btrfs_abort_transaction(trans, ret); 688 return ERR_PTR(ret); 689 } 690 691 /* 692 * create reloc tree for a given fs tree. reloc tree is just a 693 * snapshot of the fs tree with special root objectid. 694 * 695 * The reloc_root comes out of here with two references, one for 696 * root->reloc_root, and another for being on the rc->reloc_roots list. 697 */ 698 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 699 struct btrfs_root *root) 700 { 701 struct btrfs_fs_info *fs_info = root->fs_info; 702 struct btrfs_root *reloc_root; 703 struct reloc_control *rc = fs_info->reloc_ctl; 704 struct btrfs_block_rsv *rsv; 705 int clear_rsv = 0; 706 int ret; 707 708 if (!rc) 709 return 0; 710 711 /* 712 * The subvolume has reloc tree but the swap is finished, no need to 713 * create/update the dead reloc tree 714 */ 715 if (reloc_root_is_dead(root)) 716 return 0; 717 718 /* 719 * This is subtle but important. We do not do 720 * record_root_in_transaction for reloc roots, instead we record their 721 * corresponding fs root, and then here we update the last trans for the 722 * reloc root. This means that we have to do this for the entire life 723 * of the reloc root, regardless of which stage of the relocation we are 724 * in. 725 */ 726 if (root->reloc_root) { 727 reloc_root = root->reloc_root; 728 btrfs_set_root_last_trans(reloc_root, trans->transid); 729 return 0; 730 } 731 732 /* 733 * We are merging reloc roots, we do not need new reloc trees. Also 734 * reloc trees never need their own reloc tree. 735 */ 736 if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 737 return 0; 738 739 if (!trans->reloc_reserved) { 740 rsv = trans->block_rsv; 741 trans->block_rsv = rc->block_rsv; 742 clear_rsv = 1; 743 } 744 reloc_root = create_reloc_root(trans, root, btrfs_root_id(root)); 745 if (clear_rsv) 746 trans->block_rsv = rsv; 747 if (IS_ERR(reloc_root)) 748 return PTR_ERR(reloc_root); 749 750 ret = __add_reloc_root(reloc_root); 751 ASSERT(ret != -EEXIST); 752 if (ret) { 753 /* Pairs with create_reloc_root */ 754 btrfs_put_root(reloc_root); 755 return ret; 756 } 757 root->reloc_root = btrfs_grab_root(reloc_root); 758 return 0; 759 } 760 761 /* 762 * update root item of reloc tree 763 */ 764 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 765 struct btrfs_root *root) 766 { 767 struct btrfs_fs_info *fs_info = root->fs_info; 768 struct btrfs_root *reloc_root; 769 struct btrfs_root_item *root_item; 770 int ret; 771 772 if (!have_reloc_root(root)) 773 return 0; 774 775 reloc_root = root->reloc_root; 776 root_item = &reloc_root->root_item; 777 778 /* 779 * We are probably ok here, but __del_reloc_root() will drop its ref of 780 * the root. We have the ref for root->reloc_root, but just in case 781 * hold it while we update the reloc root. 782 */ 783 btrfs_grab_root(reloc_root); 784 785 /* root->reloc_root will stay until current relocation finished */ 786 if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree && 787 btrfs_root_refs(root_item) == 0) { 788 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 789 /* 790 * Mark the tree as dead before we change reloc_root so 791 * have_reloc_root will not touch it from now on. 792 */ 793 smp_wmb(); 794 __del_reloc_root(reloc_root); 795 } 796 797 if (reloc_root->commit_root != reloc_root->node) { 798 __update_reloc_root(reloc_root); 799 btrfs_set_root_node(root_item, reloc_root->node); 800 free_extent_buffer(reloc_root->commit_root); 801 reloc_root->commit_root = btrfs_root_node(reloc_root); 802 } 803 804 ret = btrfs_update_root(trans, fs_info->tree_root, 805 &reloc_root->root_key, root_item); 806 btrfs_put_root(reloc_root); 807 return ret; 808 } 809 810 /* 811 * get new location of data 812 */ 813 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, 814 u64 bytenr, u64 num_bytes) 815 { 816 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 817 BTRFS_PATH_AUTO_FREE(path); 818 struct btrfs_file_extent_item *fi; 819 struct extent_buffer *leaf; 820 int ret; 821 822 path = btrfs_alloc_path(); 823 if (!path) 824 return -ENOMEM; 825 826 bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start; 827 ret = btrfs_lookup_file_extent(NULL, root, path, 828 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0); 829 if (ret < 0) 830 return ret; 831 if (ret > 0) 832 return -ENOENT; 833 834 leaf = path->nodes[0]; 835 fi = btrfs_item_ptr(leaf, path->slots[0], 836 struct btrfs_file_extent_item); 837 838 BUG_ON(btrfs_file_extent_offset(leaf, fi) || 839 btrfs_file_extent_compression(leaf, fi) || 840 btrfs_file_extent_encryption(leaf, fi) || 841 btrfs_file_extent_other_encoding(leaf, fi)); 842 843 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) 844 return -EINVAL; 845 846 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 847 return 0; 848 } 849 850 /* 851 * update file extent items in the tree leaf to point to 852 * the new locations. 853 */ 854 static noinline_for_stack 855 int replace_file_extents(struct btrfs_trans_handle *trans, 856 struct reloc_control *rc, 857 struct btrfs_root *root, 858 struct extent_buffer *leaf) 859 { 860 struct btrfs_fs_info *fs_info = root->fs_info; 861 struct btrfs_key key; 862 struct btrfs_file_extent_item *fi; 863 struct btrfs_inode *inode = NULL; 864 u64 parent; 865 u64 bytenr; 866 u64 new_bytenr = 0; 867 u64 num_bytes; 868 u64 end; 869 u32 nritems; 870 u32 i; 871 int ret = 0; 872 int first = 1; 873 874 if (rc->stage != UPDATE_DATA_PTRS) 875 return 0; 876 877 /* reloc trees always use full backref */ 878 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 879 parent = leaf->start; 880 else 881 parent = 0; 882 883 nritems = btrfs_header_nritems(leaf); 884 for (i = 0; i < nritems; i++) { 885 struct btrfs_ref ref = { 0 }; 886 887 cond_resched(); 888 btrfs_item_key_to_cpu(leaf, &key, i); 889 if (key.type != BTRFS_EXTENT_DATA_KEY) 890 continue; 891 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 892 if (btrfs_file_extent_type(leaf, fi) == 893 BTRFS_FILE_EXTENT_INLINE) 894 continue; 895 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 896 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 897 if (bytenr == 0) 898 continue; 899 if (!in_range(bytenr, rc->block_group->start, 900 rc->block_group->length)) 901 continue; 902 903 /* 904 * if we are modifying block in fs tree, wait for read_folio 905 * to complete and drop the extent cache 906 */ 907 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) { 908 if (first) { 909 inode = btrfs_find_first_inode(root, key.objectid); 910 first = 0; 911 } else if (inode && btrfs_ino(inode) < key.objectid) { 912 btrfs_add_delayed_iput(inode); 913 inode = btrfs_find_first_inode(root, key.objectid); 914 } 915 if (inode && btrfs_ino(inode) == key.objectid) { 916 struct extent_state *cached_state = NULL; 917 918 end = key.offset + 919 btrfs_file_extent_num_bytes(leaf, fi); 920 WARN_ON(!IS_ALIGNED(key.offset, 921 fs_info->sectorsize)); 922 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 923 end--; 924 /* Take mmap lock to serialize with reflinks. */ 925 if (!down_read_trylock(&inode->i_mmap_lock)) 926 continue; 927 ret = btrfs_try_lock_extent(&inode->io_tree, key.offset, 928 end, &cached_state); 929 if (!ret) { 930 up_read(&inode->i_mmap_lock); 931 continue; 932 } 933 934 btrfs_drop_extent_map_range(inode, key.offset, end, true); 935 btrfs_unlock_extent(&inode->io_tree, key.offset, end, 936 &cached_state); 937 up_read(&inode->i_mmap_lock); 938 } 939 } 940 941 ret = get_new_location(rc->data_inode, &new_bytenr, 942 bytenr, num_bytes); 943 if (ret) { 944 /* 945 * Don't have to abort since we've not changed anything 946 * in the file extent yet. 947 */ 948 break; 949 } 950 951 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 952 953 key.offset -= btrfs_file_extent_offset(leaf, fi); 954 ref.action = BTRFS_ADD_DELAYED_REF; 955 ref.bytenr = new_bytenr; 956 ref.num_bytes = num_bytes; 957 ref.parent = parent; 958 ref.owning_root = btrfs_root_id(root); 959 ref.ref_root = btrfs_header_owner(leaf); 960 btrfs_init_data_ref(&ref, key.objectid, key.offset, 961 btrfs_root_id(root), false); 962 ret = btrfs_inc_extent_ref(trans, &ref); 963 if (unlikely(ret)) { 964 btrfs_abort_transaction(trans, ret); 965 break; 966 } 967 968 ref.action = BTRFS_DROP_DELAYED_REF; 969 ref.bytenr = bytenr; 970 ref.num_bytes = num_bytes; 971 ref.parent = parent; 972 ref.owning_root = btrfs_root_id(root); 973 ref.ref_root = btrfs_header_owner(leaf); 974 btrfs_init_data_ref(&ref, key.objectid, key.offset, 975 btrfs_root_id(root), false); 976 ret = btrfs_free_extent(trans, &ref); 977 if (unlikely(ret)) { 978 btrfs_abort_transaction(trans, ret); 979 break; 980 } 981 } 982 if (inode) 983 btrfs_add_delayed_iput(inode); 984 return ret; 985 } 986 987 static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb, 988 int slot, const struct btrfs_path *path, 989 int level) 990 { 991 struct btrfs_disk_key key1; 992 struct btrfs_disk_key key2; 993 btrfs_node_key(eb, &key1, slot); 994 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); 995 return memcmp(&key1, &key2, sizeof(key1)); 996 } 997 998 /* 999 * try to replace tree blocks in fs tree with the new blocks 1000 * in reloc tree. tree blocks haven't been modified since the 1001 * reloc tree was create can be replaced. 1002 * 1003 * if a block was replaced, level of the block + 1 is returned. 1004 * if no block got replaced, 0 is returned. if there are other 1005 * errors, a negative error number is returned. 1006 */ 1007 static noinline_for_stack 1008 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, 1009 struct btrfs_root *dest, struct btrfs_root *src, 1010 struct btrfs_path *path, struct btrfs_key *next_key, 1011 int lowest_level, int max_level) 1012 { 1013 struct btrfs_fs_info *fs_info = dest->fs_info; 1014 struct extent_buffer *eb; 1015 struct extent_buffer *parent; 1016 struct btrfs_ref ref = { 0 }; 1017 struct btrfs_key key; 1018 u64 old_bytenr; 1019 u64 new_bytenr; 1020 u64 old_ptr_gen; 1021 u64 new_ptr_gen; 1022 u64 last_snapshot; 1023 u32 blocksize; 1024 int cow = 0; 1025 int level; 1026 int ret; 1027 int slot; 1028 1029 ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID); 1030 ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID); 1031 1032 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1033 again: 1034 slot = path->slots[lowest_level]; 1035 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1036 1037 eb = btrfs_lock_root_node(dest); 1038 level = btrfs_header_level(eb); 1039 1040 if (level < lowest_level) { 1041 btrfs_tree_unlock(eb); 1042 free_extent_buffer(eb); 1043 return 0; 1044 } 1045 1046 if (cow) { 1047 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb, 1048 BTRFS_NESTING_COW); 1049 if (ret) { 1050 btrfs_tree_unlock(eb); 1051 free_extent_buffer(eb); 1052 return ret; 1053 } 1054 } 1055 1056 if (next_key) { 1057 next_key->objectid = (u64)-1; 1058 next_key->type = (u8)-1; 1059 next_key->offset = (u64)-1; 1060 } 1061 1062 parent = eb; 1063 while (1) { 1064 level = btrfs_header_level(parent); 1065 ASSERT(level >= lowest_level); 1066 1067 ret = btrfs_bin_search(parent, 0, &key, &slot); 1068 if (ret < 0) 1069 break; 1070 if (ret && slot > 0) 1071 slot--; 1072 1073 if (next_key && slot + 1 < btrfs_header_nritems(parent)) 1074 btrfs_node_key_to_cpu(parent, next_key, slot + 1); 1075 1076 old_bytenr = btrfs_node_blockptr(parent, slot); 1077 blocksize = fs_info->nodesize; 1078 old_ptr_gen = btrfs_node_ptr_generation(parent, slot); 1079 1080 if (level <= max_level) { 1081 eb = path->nodes[level]; 1082 new_bytenr = btrfs_node_blockptr(eb, 1083 path->slots[level]); 1084 new_ptr_gen = btrfs_node_ptr_generation(eb, 1085 path->slots[level]); 1086 } else { 1087 new_bytenr = 0; 1088 new_ptr_gen = 0; 1089 } 1090 1091 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) { 1092 ret = level; 1093 break; 1094 } 1095 1096 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1097 memcmp_node_keys(parent, slot, path, level)) { 1098 if (level <= lowest_level) { 1099 ret = 0; 1100 break; 1101 } 1102 1103 eb = btrfs_read_node_slot(parent, slot); 1104 if (IS_ERR(eb)) { 1105 ret = PTR_ERR(eb); 1106 break; 1107 } 1108 btrfs_tree_lock(eb); 1109 if (cow) { 1110 ret = btrfs_cow_block(trans, dest, eb, parent, 1111 slot, &eb, 1112 BTRFS_NESTING_COW); 1113 if (ret) { 1114 btrfs_tree_unlock(eb); 1115 free_extent_buffer(eb); 1116 break; 1117 } 1118 } 1119 1120 btrfs_tree_unlock(parent); 1121 free_extent_buffer(parent); 1122 1123 parent = eb; 1124 continue; 1125 } 1126 1127 if (!cow) { 1128 btrfs_tree_unlock(parent); 1129 free_extent_buffer(parent); 1130 cow = 1; 1131 goto again; 1132 } 1133 1134 btrfs_node_key_to_cpu(path->nodes[level], &key, 1135 path->slots[level]); 1136 btrfs_release_path(path); 1137 1138 path->lowest_level = level; 1139 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1140 ret = btrfs_search_slot(trans, src, &key, path, 0, 1); 1141 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state); 1142 path->lowest_level = 0; 1143 if (ret) { 1144 if (ret > 0) 1145 ret = -ENOENT; 1146 break; 1147 } 1148 1149 /* 1150 * Info qgroup to trace both subtrees. 1151 * 1152 * We must trace both trees. 1153 * 1) Tree reloc subtree 1154 * If not traced, we will leak data numbers 1155 * 2) Fs subtree 1156 * If not traced, we will double count old data 1157 * 1158 * We don't scan the subtree right now, but only record 1159 * the swapped tree blocks. 1160 * The real subtree rescan is delayed until we have new 1161 * CoW on the subtree root node before transaction commit. 1162 */ 1163 ret = btrfs_qgroup_add_swapped_blocks(dest, 1164 rc->block_group, parent, slot, 1165 path->nodes[level], path->slots[level], 1166 last_snapshot); 1167 if (ret < 0) 1168 break; 1169 /* 1170 * swap blocks in fs tree and reloc tree. 1171 */ 1172 btrfs_set_node_blockptr(parent, slot, new_bytenr); 1173 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen); 1174 1175 btrfs_set_node_blockptr(path->nodes[level], 1176 path->slots[level], old_bytenr); 1177 btrfs_set_node_ptr_generation(path->nodes[level], 1178 path->slots[level], old_ptr_gen); 1179 1180 ref.action = BTRFS_ADD_DELAYED_REF; 1181 ref.bytenr = old_bytenr; 1182 ref.num_bytes = blocksize; 1183 ref.parent = path->nodes[level]->start; 1184 ref.owning_root = btrfs_root_id(src); 1185 ref.ref_root = btrfs_root_id(src); 1186 btrfs_init_tree_ref(&ref, level - 1, 0, true); 1187 ret = btrfs_inc_extent_ref(trans, &ref); 1188 if (unlikely(ret)) { 1189 btrfs_abort_transaction(trans, ret); 1190 break; 1191 } 1192 1193 ref.action = BTRFS_ADD_DELAYED_REF; 1194 ref.bytenr = new_bytenr; 1195 ref.num_bytes = blocksize; 1196 ref.parent = 0; 1197 ref.owning_root = btrfs_root_id(dest); 1198 ref.ref_root = btrfs_root_id(dest); 1199 btrfs_init_tree_ref(&ref, level - 1, 0, true); 1200 ret = btrfs_inc_extent_ref(trans, &ref); 1201 if (unlikely(ret)) { 1202 btrfs_abort_transaction(trans, ret); 1203 break; 1204 } 1205 1206 /* We don't know the real owning_root, use 0. */ 1207 ref.action = BTRFS_DROP_DELAYED_REF; 1208 ref.bytenr = new_bytenr; 1209 ref.num_bytes = blocksize; 1210 ref.parent = path->nodes[level]->start; 1211 ref.owning_root = 0; 1212 ref.ref_root = btrfs_root_id(src); 1213 btrfs_init_tree_ref(&ref, level - 1, 0, true); 1214 ret = btrfs_free_extent(trans, &ref); 1215 if (unlikely(ret)) { 1216 btrfs_abort_transaction(trans, ret); 1217 break; 1218 } 1219 1220 /* We don't know the real owning_root, use 0. */ 1221 ref.action = BTRFS_DROP_DELAYED_REF; 1222 ref.bytenr = old_bytenr; 1223 ref.num_bytes = blocksize; 1224 ref.parent = 0; 1225 ref.owning_root = 0; 1226 ref.ref_root = btrfs_root_id(dest); 1227 btrfs_init_tree_ref(&ref, level - 1, 0, true); 1228 ret = btrfs_free_extent(trans, &ref); 1229 if (unlikely(ret)) { 1230 btrfs_abort_transaction(trans, ret); 1231 break; 1232 } 1233 1234 btrfs_unlock_up_safe(path, 0); 1235 1236 ret = level; 1237 break; 1238 } 1239 btrfs_tree_unlock(parent); 1240 free_extent_buffer(parent); 1241 return ret; 1242 } 1243 1244 /* 1245 * helper to find next relocated block in reloc tree 1246 */ 1247 static noinline_for_stack 1248 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1249 int *level) 1250 { 1251 struct extent_buffer *eb; 1252 int i; 1253 u64 last_snapshot; 1254 u32 nritems; 1255 1256 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1257 1258 for (i = 0; i < *level; i++) { 1259 free_extent_buffer(path->nodes[i]); 1260 path->nodes[i] = NULL; 1261 } 1262 1263 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 1264 eb = path->nodes[i]; 1265 nritems = btrfs_header_nritems(eb); 1266 while (path->slots[i] + 1 < nritems) { 1267 path->slots[i]++; 1268 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= 1269 last_snapshot) 1270 continue; 1271 1272 *level = i; 1273 return 0; 1274 } 1275 free_extent_buffer(path->nodes[i]); 1276 path->nodes[i] = NULL; 1277 } 1278 return 1; 1279 } 1280 1281 /* 1282 * walk down reloc tree to find relocated block of lowest level 1283 */ 1284 static noinline_for_stack 1285 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, 1286 int *level) 1287 { 1288 struct extent_buffer *eb = NULL; 1289 int i; 1290 u64 ptr_gen = 0; 1291 u64 last_snapshot; 1292 u32 nritems; 1293 1294 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 1295 1296 for (i = *level; i > 0; i--) { 1297 eb = path->nodes[i]; 1298 nritems = btrfs_header_nritems(eb); 1299 while (path->slots[i] < nritems) { 1300 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); 1301 if (ptr_gen > last_snapshot) 1302 break; 1303 path->slots[i]++; 1304 } 1305 if (path->slots[i] >= nritems) { 1306 if (i == *level) 1307 break; 1308 *level = i + 1; 1309 return 0; 1310 } 1311 if (i == 1) { 1312 *level = i; 1313 return 0; 1314 } 1315 1316 eb = btrfs_read_node_slot(eb, path->slots[i]); 1317 if (IS_ERR(eb)) 1318 return PTR_ERR(eb); 1319 BUG_ON(btrfs_header_level(eb) != i - 1); 1320 path->nodes[i - 1] = eb; 1321 path->slots[i - 1] = 0; 1322 } 1323 return 1; 1324 } 1325 1326 /* 1327 * invalidate extent cache for file extents whose key in range of 1328 * [min_key, max_key) 1329 */ 1330 static int invalidate_extent_cache(struct btrfs_root *root, 1331 const struct btrfs_key *min_key, 1332 const struct btrfs_key *max_key) 1333 { 1334 struct btrfs_fs_info *fs_info = root->fs_info; 1335 struct btrfs_inode *inode = NULL; 1336 u64 objectid; 1337 u64 start, end; 1338 u64 ino; 1339 1340 objectid = min_key->objectid; 1341 while (1) { 1342 struct extent_state *cached_state = NULL; 1343 1344 cond_resched(); 1345 if (inode) 1346 iput(&inode->vfs_inode); 1347 1348 if (objectid > max_key->objectid) 1349 break; 1350 1351 inode = btrfs_find_first_inode(root, objectid); 1352 if (!inode) 1353 break; 1354 ino = btrfs_ino(inode); 1355 1356 if (ino > max_key->objectid) { 1357 iput(&inode->vfs_inode); 1358 break; 1359 } 1360 1361 objectid = ino + 1; 1362 if (!S_ISREG(inode->vfs_inode.i_mode)) 1363 continue; 1364 1365 if (unlikely(min_key->objectid == ino)) { 1366 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1367 continue; 1368 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1369 start = 0; 1370 else { 1371 start = min_key->offset; 1372 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize)); 1373 } 1374 } else { 1375 start = 0; 1376 } 1377 1378 if (unlikely(max_key->objectid == ino)) { 1379 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1380 continue; 1381 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1382 end = (u64)-1; 1383 } else { 1384 if (max_key->offset == 0) 1385 continue; 1386 end = max_key->offset; 1387 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize)); 1388 end--; 1389 } 1390 } else { 1391 end = (u64)-1; 1392 } 1393 1394 /* the lock_extent waits for read_folio to complete */ 1395 btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); 1396 btrfs_drop_extent_map_range(inode, start, end, true); 1397 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); 1398 } 1399 return 0; 1400 } 1401 1402 static int find_next_key(struct btrfs_path *path, int level, 1403 struct btrfs_key *key) 1404 1405 { 1406 while (level < BTRFS_MAX_LEVEL) { 1407 if (!path->nodes[level]) 1408 break; 1409 if (path->slots[level] + 1 < 1410 btrfs_header_nritems(path->nodes[level])) { 1411 btrfs_node_key_to_cpu(path->nodes[level], key, 1412 path->slots[level] + 1); 1413 return 0; 1414 } 1415 level++; 1416 } 1417 return 1; 1418 } 1419 1420 /* 1421 * Insert current subvolume into reloc_control::dirty_subvol_roots 1422 */ 1423 static int insert_dirty_subvol(struct btrfs_trans_handle *trans, 1424 struct reloc_control *rc, 1425 struct btrfs_root *root) 1426 { 1427 struct btrfs_root *reloc_root = root->reloc_root; 1428 struct btrfs_root_item *reloc_root_item; 1429 int ret; 1430 1431 /* @root must be a subvolume tree root with a valid reloc tree */ 1432 ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID); 1433 ASSERT(reloc_root); 1434 1435 reloc_root_item = &reloc_root->root_item; 1436 memset(&reloc_root_item->drop_progress, 0, 1437 sizeof(reloc_root_item->drop_progress)); 1438 btrfs_set_root_drop_level(reloc_root_item, 0); 1439 btrfs_set_root_refs(reloc_root_item, 0); 1440 ret = btrfs_update_reloc_root(trans, root); 1441 if (ret) 1442 return ret; 1443 1444 if (list_empty(&root->reloc_dirty_list)) { 1445 btrfs_grab_root(root); 1446 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots); 1447 } 1448 1449 return 0; 1450 } 1451 1452 static int clean_dirty_subvols(struct reloc_control *rc) 1453 { 1454 struct btrfs_root *root; 1455 struct btrfs_root *next; 1456 int ret = 0; 1457 int ret2; 1458 1459 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots, 1460 reloc_dirty_list) { 1461 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) { 1462 /* Merged subvolume, cleanup its reloc root */ 1463 struct btrfs_root *reloc_root = root->reloc_root; 1464 1465 list_del_init(&root->reloc_dirty_list); 1466 root->reloc_root = NULL; 1467 /* 1468 * Need barrier to ensure clear_bit() only happens after 1469 * root->reloc_root = NULL. Pairs with have_reloc_root. 1470 */ 1471 smp_wmb(); 1472 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); 1473 if (reloc_root) { 1474 /* 1475 * btrfs_drop_snapshot drops our ref we hold for 1476 * ->reloc_root. If it fails however we must 1477 * drop the ref ourselves. 1478 */ 1479 ret2 = btrfs_drop_snapshot(reloc_root, false, true); 1480 if (ret2 < 0) { 1481 btrfs_put_root(reloc_root); 1482 if (!ret) 1483 ret = ret2; 1484 } 1485 } 1486 btrfs_put_root(root); 1487 } else { 1488 /* Orphan reloc tree, just clean it up */ 1489 ret2 = btrfs_drop_snapshot(root, false, true); 1490 if (ret2 < 0) { 1491 btrfs_put_root(root); 1492 if (!ret) 1493 ret = ret2; 1494 } 1495 } 1496 } 1497 return ret; 1498 } 1499 1500 /* 1501 * merge the relocated tree blocks in reloc tree with corresponding 1502 * fs tree. 1503 */ 1504 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, 1505 struct btrfs_root *root) 1506 { 1507 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1508 struct btrfs_key key; 1509 struct btrfs_key next_key; 1510 struct btrfs_trans_handle *trans = NULL; 1511 struct btrfs_root *reloc_root; 1512 struct btrfs_root_item *root_item; 1513 struct btrfs_path *path; 1514 struct extent_buffer *leaf; 1515 int reserve_level; 1516 int level; 1517 int max_level; 1518 int replaced = 0; 1519 int ret = 0; 1520 u32 min_reserved; 1521 1522 path = btrfs_alloc_path(); 1523 if (!path) 1524 return -ENOMEM; 1525 path->reada = READA_FORWARD; 1526 1527 reloc_root = root->reloc_root; 1528 root_item = &reloc_root->root_item; 1529 1530 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 1531 level = btrfs_root_level(root_item); 1532 refcount_inc(&reloc_root->node->refs); 1533 path->nodes[level] = reloc_root->node; 1534 path->slots[level] = 0; 1535 } else { 1536 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 1537 1538 level = btrfs_root_drop_level(root_item); 1539 BUG_ON(level == 0); 1540 path->lowest_level = level; 1541 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); 1542 path->lowest_level = 0; 1543 if (ret < 0) { 1544 btrfs_free_path(path); 1545 return ret; 1546 } 1547 1548 btrfs_node_key_to_cpu(path->nodes[level], &next_key, 1549 path->slots[level]); 1550 WARN_ON(memcmp(&key, &next_key, sizeof(key))); 1551 1552 btrfs_unlock_up_safe(path, 0); 1553 } 1554 1555 /* 1556 * In merge_reloc_root(), we modify the upper level pointer to swap the 1557 * tree blocks between reloc tree and subvolume tree. Thus for tree 1558 * block COW, we COW at most from level 1 to root level for each tree. 1559 * 1560 * Thus the needed metadata size is at most root_level * nodesize, 1561 * and * 2 since we have two trees to COW. 1562 */ 1563 reserve_level = max_t(int, 1, btrfs_root_level(root_item)); 1564 min_reserved = fs_info->nodesize * reserve_level * 2; 1565 memset(&next_key, 0, sizeof(next_key)); 1566 1567 while (1) { 1568 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 1569 min_reserved, 1570 BTRFS_RESERVE_FLUSH_LIMIT); 1571 if (ret) 1572 goto out; 1573 trans = btrfs_start_transaction(root, 0); 1574 if (IS_ERR(trans)) { 1575 ret = PTR_ERR(trans); 1576 trans = NULL; 1577 goto out; 1578 } 1579 1580 /* 1581 * At this point we no longer have a reloc_control, so we can't 1582 * depend on btrfs_init_reloc_root to update our last_trans. 1583 * 1584 * But that's ok, we started the trans handle on our 1585 * corresponding fs_root, which means it's been added to the 1586 * dirty list. At commit time we'll still call 1587 * btrfs_update_reloc_root() and update our root item 1588 * appropriately. 1589 */ 1590 btrfs_set_root_last_trans(reloc_root, trans->transid); 1591 trans->block_rsv = rc->block_rsv; 1592 1593 replaced = 0; 1594 max_level = level; 1595 1596 ret = walk_down_reloc_tree(reloc_root, path, &level); 1597 if (ret < 0) 1598 goto out; 1599 if (ret > 0) 1600 break; 1601 1602 if (!find_next_key(path, level, &key) && 1603 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 1604 ret = 0; 1605 } else { 1606 ret = replace_path(trans, rc, root, reloc_root, path, 1607 &next_key, level, max_level); 1608 } 1609 if (ret < 0) 1610 goto out; 1611 if (ret > 0) { 1612 level = ret; 1613 btrfs_node_key_to_cpu(path->nodes[level], &key, 1614 path->slots[level]); 1615 replaced = 1; 1616 } 1617 1618 ret = walk_up_reloc_tree(reloc_root, path, &level); 1619 if (ret > 0) 1620 break; 1621 1622 BUG_ON(level == 0); 1623 /* 1624 * save the merging progress in the drop_progress. 1625 * this is OK since root refs == 1 in this case. 1626 */ 1627 btrfs_node_key(path->nodes[level], &root_item->drop_progress, 1628 path->slots[level]); 1629 btrfs_set_root_drop_level(root_item, level); 1630 1631 btrfs_end_transaction_throttle(trans); 1632 trans = NULL; 1633 1634 btrfs_btree_balance_dirty(fs_info); 1635 1636 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1637 invalidate_extent_cache(root, &key, &next_key); 1638 } 1639 1640 /* 1641 * handle the case only one block in the fs tree need to be 1642 * relocated and the block is tree root. 1643 */ 1644 leaf = btrfs_lock_root_node(root); 1645 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf, 1646 BTRFS_NESTING_COW); 1647 btrfs_tree_unlock(leaf); 1648 free_extent_buffer(leaf); 1649 out: 1650 btrfs_free_path(path); 1651 1652 if (ret == 0) { 1653 ret = insert_dirty_subvol(trans, rc, root); 1654 if (ret) 1655 btrfs_abort_transaction(trans, ret); 1656 } 1657 1658 if (trans) 1659 btrfs_end_transaction_throttle(trans); 1660 1661 btrfs_btree_balance_dirty(fs_info); 1662 1663 if (replaced && rc->stage == UPDATE_DATA_PTRS) 1664 invalidate_extent_cache(root, &key, &next_key); 1665 1666 return ret; 1667 } 1668 1669 static noinline_for_stack 1670 int prepare_to_merge(struct reloc_control *rc, int err) 1671 { 1672 struct btrfs_root *root = rc->extent_root; 1673 struct btrfs_fs_info *fs_info = root->fs_info; 1674 struct btrfs_root *reloc_root; 1675 struct btrfs_trans_handle *trans; 1676 LIST_HEAD(reloc_roots); 1677 u64 num_bytes = 0; 1678 int ret; 1679 1680 mutex_lock(&fs_info->reloc_mutex); 1681 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 1682 rc->merging_rsv_size += rc->nodes_relocated * 2; 1683 mutex_unlock(&fs_info->reloc_mutex); 1684 1685 again: 1686 if (!err) { 1687 num_bytes = rc->merging_rsv_size; 1688 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes, 1689 BTRFS_RESERVE_FLUSH_ALL); 1690 if (ret) 1691 err = ret; 1692 } 1693 1694 trans = btrfs_join_transaction(rc->extent_root); 1695 if (IS_ERR(trans)) { 1696 if (!err) 1697 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1698 num_bytes, NULL); 1699 return PTR_ERR(trans); 1700 } 1701 1702 if (!err) { 1703 if (num_bytes != rc->merging_rsv_size) { 1704 btrfs_end_transaction(trans); 1705 btrfs_block_rsv_release(fs_info, rc->block_rsv, 1706 num_bytes, NULL); 1707 goto again; 1708 } 1709 } 1710 1711 rc->merge_reloc_tree = true; 1712 1713 while (!list_empty(&rc->reloc_roots)) { 1714 reloc_root = list_first_entry(&rc->reloc_roots, 1715 struct btrfs_root, root_list); 1716 list_del_init(&reloc_root->root_list); 1717 1718 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1719 false); 1720 if (IS_ERR(root)) { 1721 /* 1722 * Even if we have an error we need this reloc root 1723 * back on our list so we can clean up properly. 1724 */ 1725 list_add(&reloc_root->root_list, &reloc_roots); 1726 btrfs_abort_transaction(trans, (int)PTR_ERR(root)); 1727 if (!err) 1728 err = PTR_ERR(root); 1729 break; 1730 } 1731 1732 if (unlikely(root->reloc_root != reloc_root)) { 1733 if (root->reloc_root) { 1734 btrfs_err(fs_info, 1735 "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu", 1736 btrfs_root_id(root), 1737 btrfs_root_id(root->reloc_root), 1738 root->reloc_root->root_key.type, 1739 root->reloc_root->root_key.offset, 1740 btrfs_root_generation( 1741 &root->reloc_root->root_item), 1742 btrfs_root_id(reloc_root), 1743 reloc_root->root_key.type, 1744 reloc_root->root_key.offset, 1745 btrfs_root_generation( 1746 &reloc_root->root_item)); 1747 } else { 1748 btrfs_err(fs_info, 1749 "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu", 1750 btrfs_root_id(root), 1751 btrfs_root_id(reloc_root), 1752 reloc_root->root_key.type, 1753 reloc_root->root_key.offset, 1754 btrfs_root_generation( 1755 &reloc_root->root_item)); 1756 } 1757 list_add(&reloc_root->root_list, &reloc_roots); 1758 btrfs_put_root(root); 1759 btrfs_abort_transaction(trans, -EUCLEAN); 1760 if (!err) 1761 err = -EUCLEAN; 1762 break; 1763 } 1764 1765 /* 1766 * set reference count to 1, so btrfs_recover_relocation 1767 * knows it should resumes merging 1768 */ 1769 if (!err) 1770 btrfs_set_root_refs(&reloc_root->root_item, 1); 1771 ret = btrfs_update_reloc_root(trans, root); 1772 1773 /* 1774 * Even if we have an error we need this reloc root back on our 1775 * list so we can clean up properly. 1776 */ 1777 list_add(&reloc_root->root_list, &reloc_roots); 1778 btrfs_put_root(root); 1779 1780 if (unlikely(ret)) { 1781 btrfs_abort_transaction(trans, ret); 1782 if (!err) 1783 err = ret; 1784 break; 1785 } 1786 } 1787 1788 list_splice(&reloc_roots, &rc->reloc_roots); 1789 1790 if (!err) 1791 err = btrfs_commit_transaction(trans); 1792 else 1793 btrfs_end_transaction(trans); 1794 return err; 1795 } 1796 1797 static noinline_for_stack 1798 void free_reloc_roots(struct list_head *list) 1799 { 1800 struct btrfs_root *reloc_root, *tmp; 1801 1802 list_for_each_entry_safe(reloc_root, tmp, list, root_list) 1803 __del_reloc_root(reloc_root); 1804 } 1805 1806 static noinline_for_stack 1807 void merge_reloc_roots(struct reloc_control *rc) 1808 { 1809 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 1810 struct btrfs_root *root; 1811 struct btrfs_root *reloc_root; 1812 LIST_HEAD(reloc_roots); 1813 int found = 0; 1814 int ret = 0; 1815 again: 1816 root = rc->extent_root; 1817 1818 /* 1819 * this serializes us with btrfs_record_root_in_transaction, 1820 * we have to make sure nobody is in the middle of 1821 * adding their roots to the list while we are 1822 * doing this splice 1823 */ 1824 mutex_lock(&fs_info->reloc_mutex); 1825 list_splice_init(&rc->reloc_roots, &reloc_roots); 1826 mutex_unlock(&fs_info->reloc_mutex); 1827 1828 while (!list_empty(&reloc_roots)) { 1829 found = 1; 1830 reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list); 1831 1832 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 1833 false); 1834 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 1835 if (WARN_ON(IS_ERR(root))) { 1836 /* 1837 * For recovery we read the fs roots on mount, 1838 * and if we didn't find the root then we marked 1839 * the reloc root as a garbage root. For normal 1840 * relocation obviously the root should exist in 1841 * memory. However there's no reason we can't 1842 * handle the error properly here just in case. 1843 */ 1844 ret = PTR_ERR(root); 1845 goto out; 1846 } 1847 if (WARN_ON(root->reloc_root != reloc_root)) { 1848 /* 1849 * This can happen if on-disk metadata has some 1850 * corruption, e.g. bad reloc tree key offset. 1851 */ 1852 ret = -EINVAL; 1853 goto out; 1854 } 1855 ret = merge_reloc_root(rc, root); 1856 btrfs_put_root(root); 1857 if (ret) { 1858 if (list_empty(&reloc_root->root_list)) 1859 list_add_tail(&reloc_root->root_list, 1860 &reloc_roots); 1861 goto out; 1862 } 1863 } else { 1864 if (!IS_ERR(root)) { 1865 if (root->reloc_root == reloc_root) { 1866 root->reloc_root = NULL; 1867 btrfs_put_root(reloc_root); 1868 } 1869 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, 1870 &root->state); 1871 btrfs_put_root(root); 1872 } 1873 1874 list_del_init(&reloc_root->root_list); 1875 /* Don't forget to queue this reloc root for cleanup */ 1876 list_add_tail(&reloc_root->reloc_dirty_list, 1877 &rc->dirty_subvol_roots); 1878 } 1879 } 1880 1881 if (found) { 1882 found = 0; 1883 goto again; 1884 } 1885 out: 1886 if (ret) { 1887 btrfs_handle_fs_error(fs_info, ret, NULL); 1888 free_reloc_roots(&reloc_roots); 1889 1890 /* new reloc root may be added */ 1891 mutex_lock(&fs_info->reloc_mutex); 1892 list_splice_init(&rc->reloc_roots, &reloc_roots); 1893 mutex_unlock(&fs_info->reloc_mutex); 1894 free_reloc_roots(&reloc_roots); 1895 } 1896 1897 /* 1898 * We used to have 1899 * 1900 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 1901 * 1902 * here, but it's wrong. If we fail to start the transaction in 1903 * prepare_to_merge() we will have only 0 ref reloc roots, none of which 1904 * have actually been removed from the reloc_root_tree rb tree. This is 1905 * fine because we're bailing here, and we hold a reference on the root 1906 * for the list that holds it, so these roots will be cleaned up when we 1907 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root 1908 * will be cleaned up on unmount. 1909 * 1910 * The remaining nodes will be cleaned up by free_reloc_control. 1911 */ 1912 } 1913 1914 static void free_block_list(struct rb_root *blocks) 1915 { 1916 struct tree_block *block; 1917 struct rb_node *rb_node; 1918 while ((rb_node = rb_first(blocks))) { 1919 block = rb_entry(rb_node, struct tree_block, rb_node); 1920 rb_erase(rb_node, blocks); 1921 kfree(block); 1922 } 1923 } 1924 1925 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, 1926 struct btrfs_root *reloc_root) 1927 { 1928 struct btrfs_fs_info *fs_info = reloc_root->fs_info; 1929 struct btrfs_root *root; 1930 int ret; 1931 1932 if (btrfs_get_root_last_trans(reloc_root) == trans->transid) 1933 return 0; 1934 1935 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); 1936 1937 /* 1938 * This should succeed, since we can't have a reloc root without having 1939 * already looked up the actual root and created the reloc root for this 1940 * root. 1941 * 1942 * However if there's some sort of corruption where we have a ref to a 1943 * reloc root without a corresponding root this could return ENOENT. 1944 */ 1945 if (IS_ERR(root)) { 1946 DEBUG_WARN("error %ld reading root for reloc root", PTR_ERR(root)); 1947 return PTR_ERR(root); 1948 } 1949 if (unlikely(root->reloc_root != reloc_root)) { 1950 DEBUG_WARN("unexpected reloc root found"); 1951 btrfs_err(fs_info, 1952 "root %llu has two reloc roots associated with it", 1953 reloc_root->root_key.offset); 1954 btrfs_put_root(root); 1955 return -EUCLEAN; 1956 } 1957 ret = btrfs_record_root_in_trans(trans, root); 1958 btrfs_put_root(root); 1959 1960 return ret; 1961 } 1962 1963 static noinline_for_stack 1964 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 1965 struct reloc_control *rc, 1966 struct btrfs_backref_node *node, 1967 struct btrfs_backref_edge *edges[]) 1968 { 1969 struct btrfs_backref_node *next; 1970 struct btrfs_root *root; 1971 int index = 0; 1972 int ret; 1973 1974 next = walk_up_backref(node, edges, &index); 1975 root = next->root; 1976 1977 /* 1978 * If there is no root, then our references for this block are 1979 * incomplete, as we should be able to walk all the way up to a block 1980 * that is owned by a root. 1981 * 1982 * This path is only for SHAREABLE roots, so if we come upon a 1983 * non-SHAREABLE root then we have backrefs that resolve improperly. 1984 * 1985 * Both of these cases indicate file system corruption, or a bug in the 1986 * backref walking code. 1987 */ 1988 if (unlikely(!root)) { 1989 btrfs_err(trans->fs_info, 1990 "bytenr %llu doesn't have a backref path ending in a root", 1991 node->bytenr); 1992 return ERR_PTR(-EUCLEAN); 1993 } 1994 if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) { 1995 btrfs_err(trans->fs_info, 1996 "bytenr %llu has multiple refs with one ending in a non-shareable root", 1997 node->bytenr); 1998 return ERR_PTR(-EUCLEAN); 1999 } 2000 2001 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 2002 ret = record_reloc_root_in_trans(trans, root); 2003 if (ret) 2004 return ERR_PTR(ret); 2005 goto found; 2006 } 2007 2008 ret = btrfs_record_root_in_trans(trans, root); 2009 if (ret) 2010 return ERR_PTR(ret); 2011 root = root->reloc_root; 2012 2013 /* 2014 * We could have raced with another thread which failed, so 2015 * root->reloc_root may not be set, return ENOENT in this case. 2016 */ 2017 if (!root) 2018 return ERR_PTR(-ENOENT); 2019 2020 if (unlikely(next->new_bytenr)) { 2021 /* 2022 * We just created the reloc root, so we shouldn't have 2023 * ->new_bytenr set yet. If it is then we have multiple roots 2024 * pointing at the same bytenr which indicates corruption, or 2025 * we've made a mistake in the backref walking code. 2026 */ 2027 ASSERT(next->new_bytenr == 0); 2028 btrfs_err(trans->fs_info, 2029 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu", 2030 node->bytenr, next->bytenr); 2031 return ERR_PTR(-EUCLEAN); 2032 } 2033 2034 next->new_bytenr = root->node->start; 2035 btrfs_put_root(next->root); 2036 next->root = btrfs_grab_root(root); 2037 ASSERT(next->root); 2038 mark_block_processed(rc, next); 2039 found: 2040 next = node; 2041 /* setup backref node path for btrfs_reloc_cow_block */ 2042 while (1) { 2043 rc->backref_cache.path[next->level] = next; 2044 if (--index < 0) 2045 break; 2046 next = edges[index]->node[UPPER]; 2047 } 2048 return root; 2049 } 2050 2051 /* 2052 * Select a tree root for relocation. 2053 * 2054 * Return NULL if the block is not shareable. We should use do_relocation() in 2055 * this case. 2056 * 2057 * Return a tree root pointer if the block is shareable. 2058 * Return -ENOENT if the block is root of reloc tree. 2059 */ 2060 static noinline_for_stack 2061 struct btrfs_root *select_one_root(struct btrfs_backref_node *node) 2062 { 2063 struct btrfs_backref_node *next; 2064 struct btrfs_root *root; 2065 struct btrfs_root *fs_root = NULL; 2066 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2067 int index = 0; 2068 2069 next = node; 2070 while (1) { 2071 cond_resched(); 2072 next = walk_up_backref(next, edges, &index); 2073 root = next->root; 2074 2075 /* 2076 * This can occur if we have incomplete extent refs leading all 2077 * the way up a particular path, in this case return -EUCLEAN. 2078 */ 2079 if (unlikely(!root)) 2080 return ERR_PTR(-EUCLEAN); 2081 2082 /* No other choice for non-shareable tree */ 2083 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2084 return root; 2085 2086 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) 2087 fs_root = root; 2088 2089 if (next != node) 2090 return NULL; 2091 2092 next = walk_down_backref(edges, &index); 2093 if (!next || next->level <= node->level) 2094 break; 2095 } 2096 2097 if (!fs_root) 2098 return ERR_PTR(-ENOENT); 2099 return fs_root; 2100 } 2101 2102 static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc, 2103 struct btrfs_backref_node *node) 2104 { 2105 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2106 struct btrfs_backref_node *next = node; 2107 struct btrfs_backref_edge *edge; 2108 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2109 u64 num_bytes = 0; 2110 int index = 0; 2111 2112 BUG_ON(node->processed); 2113 2114 while (next) { 2115 cond_resched(); 2116 while (1) { 2117 if (next->processed) 2118 break; 2119 2120 num_bytes += fs_info->nodesize; 2121 2122 if (list_empty(&next->upper)) 2123 break; 2124 2125 edge = list_first_entry(&next->upper, struct btrfs_backref_edge, 2126 list[LOWER]); 2127 edges[index++] = edge; 2128 next = edge->node[UPPER]; 2129 } 2130 next = walk_down_backref(edges, &index); 2131 } 2132 return num_bytes; 2133 } 2134 2135 static int refill_metadata_space(struct btrfs_trans_handle *trans, 2136 struct reloc_control *rc, u64 num_bytes) 2137 { 2138 struct btrfs_fs_info *fs_info = trans->fs_info; 2139 int ret; 2140 2141 trans->block_rsv = rc->block_rsv; 2142 rc->reserved_bytes += num_bytes; 2143 2144 /* 2145 * We are under a transaction here so we can only do limited flushing. 2146 * If we get an enospc just kick back -EAGAIN so we know to drop the 2147 * transaction and try to refill when we can flush all the things. 2148 */ 2149 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes, 2150 BTRFS_RESERVE_FLUSH_LIMIT); 2151 if (ret) { 2152 u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES; 2153 2154 while (tmp <= rc->reserved_bytes) 2155 tmp <<= 1; 2156 /* 2157 * only one thread can access block_rsv at this point, 2158 * so we don't need hold lock to protect block_rsv. 2159 * we expand more reservation size here to allow enough 2160 * space for relocation and we will return earlier in 2161 * enospc case. 2162 */ 2163 rc->block_rsv->size = tmp + fs_info->nodesize * 2164 RELOCATION_RESERVED_NODES; 2165 return -EAGAIN; 2166 } 2167 2168 return 0; 2169 } 2170 2171 static int reserve_metadata_space(struct btrfs_trans_handle *trans, 2172 struct reloc_control *rc, 2173 struct btrfs_backref_node *node) 2174 { 2175 u64 num_bytes; 2176 2177 num_bytes = calcu_metadata_size(rc, node) * 2; 2178 return refill_metadata_space(trans, rc, num_bytes); 2179 } 2180 2181 /* 2182 * relocate a block tree, and then update pointers in upper level 2183 * blocks that reference the block to point to the new location. 2184 * 2185 * if called by link_to_upper, the block has already been relocated. 2186 * in that case this function just updates pointers. 2187 */ 2188 static int do_relocation(struct btrfs_trans_handle *trans, 2189 struct reloc_control *rc, 2190 struct btrfs_backref_node *node, 2191 struct btrfs_key *key, 2192 struct btrfs_path *path, int lowest) 2193 { 2194 struct btrfs_backref_node *upper; 2195 struct btrfs_backref_edge *edge; 2196 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2197 struct btrfs_root *root; 2198 struct extent_buffer *eb; 2199 u32 blocksize; 2200 u64 bytenr; 2201 int slot; 2202 int ret = 0; 2203 2204 /* 2205 * If we are lowest then this is the first time we're processing this 2206 * block, and thus shouldn't have an eb associated with it yet. 2207 */ 2208 ASSERT(!lowest || !node->eb); 2209 2210 path->lowest_level = node->level + 1; 2211 rc->backref_cache.path[node->level] = node; 2212 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2213 cond_resched(); 2214 2215 upper = edge->node[UPPER]; 2216 root = select_reloc_root(trans, rc, upper, edges); 2217 if (IS_ERR(root)) { 2218 ret = PTR_ERR(root); 2219 goto next; 2220 } 2221 2222 if (upper->eb && !upper->locked) { 2223 if (!lowest) { 2224 ret = btrfs_bin_search(upper->eb, 0, key, &slot); 2225 if (ret < 0) 2226 goto next; 2227 BUG_ON(ret); 2228 bytenr = btrfs_node_blockptr(upper->eb, slot); 2229 if (node->eb->start == bytenr) 2230 goto next; 2231 } 2232 btrfs_backref_drop_node_buffer(upper); 2233 } 2234 2235 if (!upper->eb) { 2236 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2237 if (ret) { 2238 if (ret > 0) 2239 ret = -ENOENT; 2240 2241 btrfs_release_path(path); 2242 break; 2243 } 2244 2245 if (!upper->eb) { 2246 upper->eb = path->nodes[upper->level]; 2247 path->nodes[upper->level] = NULL; 2248 } else { 2249 BUG_ON(upper->eb != path->nodes[upper->level]); 2250 } 2251 2252 upper->locked = 1; 2253 path->locks[upper->level] = 0; 2254 2255 slot = path->slots[upper->level]; 2256 btrfs_release_path(path); 2257 } else { 2258 ret = btrfs_bin_search(upper->eb, 0, key, &slot); 2259 if (ret < 0) 2260 goto next; 2261 BUG_ON(ret); 2262 } 2263 2264 bytenr = btrfs_node_blockptr(upper->eb, slot); 2265 if (lowest) { 2266 if (unlikely(bytenr != node->bytenr)) { 2267 btrfs_err(root->fs_info, 2268 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", 2269 bytenr, node->bytenr, slot, 2270 upper->eb->start); 2271 ret = -EIO; 2272 goto next; 2273 } 2274 } else { 2275 if (node->eb->start == bytenr) 2276 goto next; 2277 } 2278 2279 blocksize = root->fs_info->nodesize; 2280 eb = btrfs_read_node_slot(upper->eb, slot); 2281 if (IS_ERR(eb)) { 2282 ret = PTR_ERR(eb); 2283 goto next; 2284 } 2285 btrfs_tree_lock(eb); 2286 2287 if (!node->eb) { 2288 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2289 slot, &eb, BTRFS_NESTING_COW); 2290 btrfs_tree_unlock(eb); 2291 free_extent_buffer(eb); 2292 if (ret < 0) 2293 goto next; 2294 /* 2295 * We've just COWed this block, it should have updated 2296 * the correct backref node entry. 2297 */ 2298 ASSERT(node->eb == eb); 2299 } else { 2300 struct btrfs_ref ref = { 2301 .action = BTRFS_ADD_DELAYED_REF, 2302 .bytenr = node->eb->start, 2303 .num_bytes = blocksize, 2304 .parent = upper->eb->start, 2305 .owning_root = btrfs_header_owner(upper->eb), 2306 .ref_root = btrfs_header_owner(upper->eb), 2307 }; 2308 2309 btrfs_set_node_blockptr(upper->eb, slot, 2310 node->eb->start); 2311 btrfs_set_node_ptr_generation(upper->eb, slot, 2312 trans->transid); 2313 btrfs_mark_buffer_dirty(trans, upper->eb); 2314 2315 btrfs_init_tree_ref(&ref, node->level, 2316 btrfs_root_id(root), false); 2317 ret = btrfs_inc_extent_ref(trans, &ref); 2318 if (!ret) 2319 ret = btrfs_drop_subtree(trans, root, eb, 2320 upper->eb); 2321 if (unlikely(ret)) 2322 btrfs_abort_transaction(trans, ret); 2323 } 2324 next: 2325 if (!upper->pending) 2326 btrfs_backref_drop_node_buffer(upper); 2327 else 2328 btrfs_backref_unlock_node_buffer(upper); 2329 if (ret) 2330 break; 2331 } 2332 2333 if (!ret && node->pending) { 2334 btrfs_backref_drop_node_buffer(node); 2335 list_del_init(&node->list); 2336 node->pending = 0; 2337 } 2338 2339 path->lowest_level = 0; 2340 2341 /* 2342 * We should have allocated all of our space in the block rsv and thus 2343 * shouldn't ENOSPC. 2344 */ 2345 ASSERT(ret != -ENOSPC); 2346 return ret; 2347 } 2348 2349 static int link_to_upper(struct btrfs_trans_handle *trans, 2350 struct reloc_control *rc, 2351 struct btrfs_backref_node *node, 2352 struct btrfs_path *path) 2353 { 2354 struct btrfs_key key; 2355 2356 btrfs_node_key_to_cpu(node->eb, &key, 0); 2357 return do_relocation(trans, rc, node, &key, path, 0); 2358 } 2359 2360 static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2361 struct reloc_control *rc, 2362 struct btrfs_path *path, int err) 2363 { 2364 LIST_HEAD(list); 2365 struct btrfs_backref_cache *cache = &rc->backref_cache; 2366 struct btrfs_backref_node *node; 2367 int level; 2368 int ret; 2369 2370 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2371 while (!list_empty(&cache->pending[level])) { 2372 node = list_first_entry(&cache->pending[level], 2373 struct btrfs_backref_node, list); 2374 list_move_tail(&node->list, &list); 2375 BUG_ON(!node->pending); 2376 2377 if (!err) { 2378 ret = link_to_upper(trans, rc, node, path); 2379 if (ret < 0) 2380 err = ret; 2381 } 2382 } 2383 list_splice_init(&list, &cache->pending[level]); 2384 } 2385 return err; 2386 } 2387 2388 /* 2389 * mark a block and all blocks directly/indirectly reference the block 2390 * as processed. 2391 */ 2392 static void update_processed_blocks(struct reloc_control *rc, 2393 struct btrfs_backref_node *node) 2394 { 2395 struct btrfs_backref_node *next = node; 2396 struct btrfs_backref_edge *edge; 2397 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2398 int index = 0; 2399 2400 while (next) { 2401 cond_resched(); 2402 while (1) { 2403 if (next->processed) 2404 break; 2405 2406 mark_block_processed(rc, next); 2407 2408 if (list_empty(&next->upper)) 2409 break; 2410 2411 edge = list_first_entry(&next->upper, struct btrfs_backref_edge, 2412 list[LOWER]); 2413 edges[index++] = edge; 2414 next = edge->node[UPPER]; 2415 } 2416 next = walk_down_backref(edges, &index); 2417 } 2418 } 2419 2420 static int tree_block_processed(u64 bytenr, struct reloc_control *rc) 2421 { 2422 u32 blocksize = rc->extent_root->fs_info->nodesize; 2423 2424 if (btrfs_test_range_bit(&rc->processed_blocks, bytenr, 2425 bytenr + blocksize - 1, EXTENT_DIRTY, NULL)) 2426 return 1; 2427 return 0; 2428 } 2429 2430 static int get_tree_block_key(struct btrfs_fs_info *fs_info, 2431 struct tree_block *block) 2432 { 2433 struct btrfs_tree_parent_check check = { 2434 .level = block->level, 2435 .owner_root = block->owner, 2436 .transid = block->key.offset 2437 }; 2438 struct extent_buffer *eb; 2439 2440 eb = read_tree_block(fs_info, block->bytenr, &check); 2441 if (IS_ERR(eb)) 2442 return PTR_ERR(eb); 2443 if (unlikely(!extent_buffer_uptodate(eb))) { 2444 free_extent_buffer(eb); 2445 return -EIO; 2446 } 2447 if (block->level == 0) 2448 btrfs_item_key_to_cpu(eb, &block->key, 0); 2449 else 2450 btrfs_node_key_to_cpu(eb, &block->key, 0); 2451 free_extent_buffer(eb); 2452 block->key_ready = true; 2453 return 0; 2454 } 2455 2456 /* 2457 * helper function to relocate a tree block 2458 */ 2459 static int relocate_tree_block(struct btrfs_trans_handle *trans, 2460 struct reloc_control *rc, 2461 struct btrfs_backref_node *node, 2462 struct btrfs_key *key, 2463 struct btrfs_path *path) 2464 { 2465 struct btrfs_root *root; 2466 int ret = 0; 2467 2468 if (!node) 2469 return 0; 2470 2471 /* 2472 * If we fail here we want to drop our backref_node because we are going 2473 * to start over and regenerate the tree for it. 2474 */ 2475 ret = reserve_metadata_space(trans, rc, node); 2476 if (ret) 2477 goto out; 2478 2479 BUG_ON(node->processed); 2480 root = select_one_root(node); 2481 if (IS_ERR(root)) { 2482 ret = PTR_ERR(root); 2483 2484 /* See explanation in select_one_root for the -EUCLEAN case. */ 2485 ASSERT(ret == -ENOENT); 2486 if (ret == -ENOENT) { 2487 ret = 0; 2488 update_processed_blocks(rc, node); 2489 } 2490 goto out; 2491 } 2492 2493 if (root) { 2494 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 2495 /* 2496 * This block was the root block of a root, and this is 2497 * the first time we're processing the block and thus it 2498 * should not have had the ->new_bytenr modified. 2499 * 2500 * However in the case of corruption we could have 2501 * multiple refs pointing to the same block improperly, 2502 * and thus we would trip over these checks. ASSERT() 2503 * for the developer case, because it could indicate a 2504 * bug in the backref code, however error out for a 2505 * normal user in the case of corruption. 2506 */ 2507 ASSERT(node->new_bytenr == 0); 2508 if (unlikely(node->new_bytenr)) { 2509 btrfs_err(root->fs_info, 2510 "bytenr %llu has improper references to it", 2511 node->bytenr); 2512 ret = -EUCLEAN; 2513 goto out; 2514 } 2515 ret = btrfs_record_root_in_trans(trans, root); 2516 if (ret) 2517 goto out; 2518 /* 2519 * Another thread could have failed, need to check if we 2520 * have reloc_root actually set. 2521 */ 2522 if (!root->reloc_root) { 2523 ret = -ENOENT; 2524 goto out; 2525 } 2526 root = root->reloc_root; 2527 node->new_bytenr = root->node->start; 2528 btrfs_put_root(node->root); 2529 node->root = btrfs_grab_root(root); 2530 ASSERT(node->root); 2531 } else { 2532 btrfs_err(root->fs_info, 2533 "bytenr %llu resolved to a non-shareable root", 2534 node->bytenr); 2535 ret = -EUCLEAN; 2536 goto out; 2537 } 2538 if (!ret) 2539 update_processed_blocks(rc, node); 2540 } else { 2541 ret = do_relocation(trans, rc, node, key, path, 1); 2542 } 2543 out: 2544 if (ret || node->level == 0) 2545 btrfs_backref_cleanup_node(&rc->backref_cache, node); 2546 return ret; 2547 } 2548 2549 static int relocate_cowonly_block(struct btrfs_trans_handle *trans, 2550 struct reloc_control *rc, struct tree_block *block, 2551 struct btrfs_path *path) 2552 { 2553 struct btrfs_fs_info *fs_info = trans->fs_info; 2554 struct btrfs_root *root; 2555 u64 num_bytes; 2556 int nr_levels; 2557 int ret; 2558 2559 root = btrfs_get_fs_root(fs_info, block->owner, true); 2560 if (IS_ERR(root)) 2561 return PTR_ERR(root); 2562 2563 nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1; 2564 2565 num_bytes = fs_info->nodesize * nr_levels; 2566 ret = refill_metadata_space(trans, rc, num_bytes); 2567 if (ret) { 2568 btrfs_put_root(root); 2569 return ret; 2570 } 2571 path->lowest_level = block->level; 2572 if (root == root->fs_info->chunk_root) 2573 btrfs_reserve_chunk_metadata(trans, false); 2574 2575 ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1); 2576 path->lowest_level = 0; 2577 btrfs_release_path(path); 2578 2579 if (root == root->fs_info->chunk_root) 2580 btrfs_trans_release_chunk_metadata(trans); 2581 if (ret > 0) 2582 ret = 0; 2583 btrfs_put_root(root); 2584 2585 return ret; 2586 } 2587 2588 /* 2589 * relocate a list of blocks 2590 */ 2591 static noinline_for_stack 2592 int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2593 struct reloc_control *rc, struct rb_root *blocks) 2594 { 2595 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 2596 struct btrfs_backref_node *node; 2597 struct btrfs_path *path; 2598 struct tree_block *block; 2599 struct tree_block *next; 2600 int ret = 0; 2601 2602 path = btrfs_alloc_path(); 2603 if (!path) { 2604 ret = -ENOMEM; 2605 goto out_free_blocks; 2606 } 2607 2608 /* Kick in readahead for tree blocks with missing keys */ 2609 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2610 if (!block->key_ready) 2611 btrfs_readahead_tree_block(fs_info, block->bytenr, 2612 block->owner, 0, 2613 block->level); 2614 } 2615 2616 /* Get first keys */ 2617 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2618 if (!block->key_ready) { 2619 ret = get_tree_block_key(fs_info, block); 2620 if (ret) 2621 goto out_free_path; 2622 } 2623 } 2624 2625 /* Do tree relocation */ 2626 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) { 2627 /* 2628 * For COWonly blocks, or the data reloc tree, we only need to 2629 * COW down to the block, there's no need to generate a backref 2630 * tree. 2631 */ 2632 if (block->owner && 2633 (!btrfs_is_fstree(block->owner) || 2634 block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) { 2635 ret = relocate_cowonly_block(trans, rc, block, path); 2636 if (ret) 2637 break; 2638 continue; 2639 } 2640 2641 node = build_backref_tree(trans, rc, &block->key, 2642 block->level, block->bytenr); 2643 if (IS_ERR(node)) { 2644 ret = PTR_ERR(node); 2645 goto out; 2646 } 2647 2648 ret = relocate_tree_block(trans, rc, node, &block->key, 2649 path); 2650 if (ret < 0) 2651 break; 2652 } 2653 out: 2654 ret = finish_pending_nodes(trans, rc, path, ret); 2655 2656 out_free_path: 2657 btrfs_free_path(path); 2658 out_free_blocks: 2659 free_block_list(blocks); 2660 return ret; 2661 } 2662 2663 static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc) 2664 { 2665 const struct file_extent_cluster *cluster = &rc->cluster; 2666 struct btrfs_inode *inode = BTRFS_I(rc->data_inode); 2667 u64 alloc_hint = 0; 2668 u64 start; 2669 u64 end; 2670 u64 offset = inode->reloc_block_group_start; 2671 u64 num_bytes; 2672 int nr; 2673 int ret = 0; 2674 u64 prealloc_start = cluster->start - offset; 2675 u64 prealloc_end = cluster->end - offset; 2676 u64 cur_offset = prealloc_start; 2677 2678 /* 2679 * For blocksize < folio size case (either bs < page size or large folios), 2680 * beyond i_size, all blocks are filled with zero. 2681 * 2682 * If the current cluster covers the above range, btrfs_do_readpage() 2683 * will skip the read, and relocate_one_folio() will later writeback 2684 * the padding zeros as new data, causing data corruption. 2685 * 2686 * Here we have to invalidate the cache covering our cluster. 2687 */ 2688 ret = filemap_invalidate_inode(&inode->vfs_inode, true, prealloc_start, 2689 prealloc_end); 2690 if (ret < 0) 2691 return ret; 2692 2693 BUG_ON(cluster->start != cluster->boundary[0]); 2694 ret = btrfs_alloc_data_chunk_ondemand(inode, 2695 prealloc_end + 1 - prealloc_start); 2696 if (ret) 2697 return ret; 2698 2699 btrfs_inode_lock(inode, 0); 2700 for (nr = 0; nr < cluster->nr; nr++) { 2701 struct extent_state *cached_state = NULL; 2702 2703 start = cluster->boundary[nr] - offset; 2704 if (nr + 1 < cluster->nr) 2705 end = cluster->boundary[nr + 1] - 1 - offset; 2706 else 2707 end = cluster->end - offset; 2708 2709 btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); 2710 num_bytes = end + 1 - start; 2711 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, 2712 num_bytes, num_bytes, 2713 end + 1, &alloc_hint); 2714 cur_offset = end + 1; 2715 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); 2716 if (ret) 2717 break; 2718 } 2719 btrfs_inode_unlock(inode, 0); 2720 2721 if (cur_offset < prealloc_end) 2722 btrfs_free_reserved_data_space_noquota(inode, 2723 prealloc_end + 1 - cur_offset); 2724 return ret; 2725 } 2726 2727 static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc) 2728 { 2729 struct btrfs_inode *inode = BTRFS_I(rc->data_inode); 2730 struct extent_map *em; 2731 struct extent_state *cached_state = NULL; 2732 u64 offset = inode->reloc_block_group_start; 2733 u64 start = rc->cluster.start - offset; 2734 u64 end = rc->cluster.end - offset; 2735 int ret = 0; 2736 2737 em = btrfs_alloc_extent_map(); 2738 if (!em) 2739 return -ENOMEM; 2740 2741 em->start = start; 2742 em->len = end + 1 - start; 2743 em->disk_bytenr = rc->cluster.start; 2744 em->disk_num_bytes = em->len; 2745 em->ram_bytes = em->len; 2746 em->flags |= EXTENT_FLAG_PINNED; 2747 2748 btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); 2749 ret = btrfs_replace_extent_map_range(inode, em, false); 2750 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); 2751 btrfs_free_extent_map(em); 2752 2753 return ret; 2754 } 2755 2756 /* 2757 * Allow error injection to test balance/relocation cancellation 2758 */ 2759 noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info) 2760 { 2761 return atomic_read(&fs_info->balance_cancel_req) || 2762 atomic_read(&fs_info->reloc_cancel_req) || 2763 fatal_signal_pending(current); 2764 } 2765 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE); 2766 2767 static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster, 2768 int cluster_nr) 2769 { 2770 /* Last extent, use cluster end directly */ 2771 if (cluster_nr >= cluster->nr - 1) 2772 return cluster->end; 2773 2774 /* Use next boundary start*/ 2775 return cluster->boundary[cluster_nr + 1] - 1; 2776 } 2777 2778 static int relocate_one_folio(struct reloc_control *rc, 2779 struct file_ra_state *ra, 2780 int *cluster_nr, u64 *file_offset_ret) 2781 { 2782 const struct file_extent_cluster *cluster = &rc->cluster; 2783 struct inode *inode = rc->data_inode; 2784 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); 2785 const u64 orig_file_offset = *file_offset_ret; 2786 u64 offset = BTRFS_I(inode)->reloc_block_group_start; 2787 const pgoff_t last_index = (cluster->end - offset) >> PAGE_SHIFT; 2788 const pgoff_t index = orig_file_offset >> PAGE_SHIFT; 2789 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 2790 struct folio *folio; 2791 u64 folio_start; 2792 u64 folio_end; 2793 u64 cur; 2794 int ret; 2795 const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags); 2796 2797 ASSERT(index <= last_index); 2798 again: 2799 folio = filemap_lock_folio(inode->i_mapping, index); 2800 if (IS_ERR(folio)) { 2801 2802 /* 2803 * On relocation we're doing readahead on the relocation inode, 2804 * but if the filesystem is backed by a RAID stripe tree we can 2805 * get ENOENT (e.g. due to preallocated extents not being 2806 * mapped in the RST) from the lookup. 2807 * 2808 * But readahead doesn't handle the error and submits invalid 2809 * reads to the device, causing a assertion failures. 2810 */ 2811 if (!use_rst) 2812 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 2813 index, last_index + 1 - index); 2814 folio = __filemap_get_folio(inode->i_mapping, index, 2815 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 2816 mask); 2817 if (IS_ERR(folio)) 2818 return PTR_ERR(folio); 2819 } 2820 2821 if (folio_test_readahead(folio) && !use_rst) 2822 page_cache_async_readahead(inode->i_mapping, ra, NULL, 2823 folio, last_index + 1 - index); 2824 2825 if (!folio_test_uptodate(folio)) { 2826 btrfs_read_folio(NULL, folio); 2827 folio_lock(folio); 2828 if (unlikely(!folio_test_uptodate(folio))) { 2829 ret = -EIO; 2830 goto release_folio; 2831 } 2832 if (folio->mapping != inode->i_mapping) { 2833 folio_unlock(folio); 2834 folio_put(folio); 2835 goto again; 2836 } 2837 } 2838 2839 /* 2840 * We could have lost folio private when we dropped the lock to read the 2841 * folio above, make sure we set_folio_extent_mapped() here so we have any 2842 * of the subpage blocksize stuff we need in place. 2843 */ 2844 ret = set_folio_extent_mapped(folio); 2845 if (ret < 0) 2846 goto release_folio; 2847 2848 folio_start = folio_pos(folio); 2849 folio_end = folio_start + folio_size(folio) - 1; 2850 2851 /* 2852 * Start from the cluster, as for subpage case, the cluster can start 2853 * inside the folio. 2854 */ 2855 cur = max(folio_start, cluster->boundary[*cluster_nr] - offset); 2856 while (cur <= folio_end) { 2857 struct extent_state *cached_state = NULL; 2858 u64 extent_start = cluster->boundary[*cluster_nr] - offset; 2859 u64 extent_end = get_cluster_boundary_end(cluster, 2860 *cluster_nr) - offset; 2861 u64 clamped_start = max(folio_start, extent_start); 2862 u64 clamped_end = min(folio_end, extent_end); 2863 u32 clamped_len = clamped_end + 1 - clamped_start; 2864 2865 /* Reserve metadata for this range */ 2866 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), 2867 clamped_len, clamped_len, 2868 false); 2869 if (ret) 2870 goto release_folio; 2871 2872 /* Mark the range delalloc and dirty for later writeback */ 2873 btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, 2874 clamped_end, &cached_state); 2875 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start, 2876 clamped_end, 0, &cached_state); 2877 if (ret) { 2878 btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 2879 clamped_start, clamped_end, 2880 EXTENT_LOCKED | EXTENT_BOUNDARY, 2881 &cached_state); 2882 btrfs_delalloc_release_metadata(BTRFS_I(inode), 2883 clamped_len, true); 2884 btrfs_delalloc_release_extents(BTRFS_I(inode), 2885 clamped_len); 2886 goto release_folio; 2887 } 2888 btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len); 2889 2890 /* 2891 * Set the boundary if it's inside the folio. 2892 * Data relocation requires the destination extents to have the 2893 * same size as the source. 2894 * EXTENT_BOUNDARY bit prevents current extent from being merged 2895 * with previous extent. 2896 */ 2897 if (in_range(cluster->boundary[*cluster_nr] - offset, 2898 folio_start, folio_size(folio))) { 2899 u64 boundary_start = cluster->boundary[*cluster_nr] - 2900 offset; 2901 u64 boundary_end = boundary_start + 2902 fs_info->sectorsize - 1; 2903 2904 btrfs_set_extent_bit(&BTRFS_I(inode)->io_tree, 2905 boundary_start, boundary_end, 2906 EXTENT_BOUNDARY, NULL); 2907 } 2908 btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, 2909 &cached_state); 2910 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); 2911 cur += clamped_len; 2912 2913 /* Crossed extent end, go to next extent */ 2914 if (cur >= extent_end) { 2915 (*cluster_nr)++; 2916 /* Just finished the last extent of the cluster, exit. */ 2917 if (*cluster_nr >= cluster->nr) 2918 break; 2919 } 2920 } 2921 folio_unlock(folio); 2922 folio_put(folio); 2923 2924 balance_dirty_pages_ratelimited(inode->i_mapping); 2925 btrfs_throttle(fs_info); 2926 if (btrfs_should_cancel_balance(fs_info)) 2927 ret = -ECANCELED; 2928 *file_offset_ret = folio_end + 1; 2929 return ret; 2930 2931 release_folio: 2932 folio_unlock(folio); 2933 folio_put(folio); 2934 return ret; 2935 } 2936 2937 static int relocate_file_extent_cluster(struct reloc_control *rc) 2938 { 2939 struct inode *inode = rc->data_inode; 2940 const struct file_extent_cluster *cluster = &rc->cluster; 2941 u64 offset = BTRFS_I(inode)->reloc_block_group_start; 2942 u64 cur_file_offset = cluster->start - offset; 2943 struct file_ra_state AUTO_KFREE(ra); 2944 int cluster_nr = 0; 2945 int ret = 0; 2946 2947 if (!cluster->nr) 2948 return 0; 2949 2950 ra = kzalloc(sizeof(*ra), GFP_NOFS); 2951 if (!ra) 2952 return -ENOMEM; 2953 2954 ret = prealloc_file_extent_cluster(rc); 2955 if (ret) 2956 return ret; 2957 2958 file_ra_state_init(ra, inode->i_mapping); 2959 2960 ret = setup_relocation_extent_mapping(rc); 2961 if (ret) 2962 return ret; 2963 2964 while (cur_file_offset < cluster->end - offset) { 2965 ret = relocate_one_folio(rc, ra, &cluster_nr, &cur_file_offset); 2966 if (ret) 2967 break; 2968 } 2969 if (ret == 0) 2970 WARN_ON(cluster_nr != cluster->nr); 2971 return ret; 2972 } 2973 2974 static noinline_for_stack int relocate_data_extent(struct reloc_control *rc, 2975 const struct btrfs_key *extent_key) 2976 { 2977 struct inode *inode = rc->data_inode; 2978 struct file_extent_cluster *cluster = &rc->cluster; 2979 int ret; 2980 struct btrfs_root *root = BTRFS_I(inode)->root; 2981 2982 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { 2983 ret = relocate_file_extent_cluster(rc); 2984 if (ret) 2985 return ret; 2986 cluster->nr = 0; 2987 } 2988 2989 /* 2990 * Under simple quotas, we set root->relocation_src_root when we find 2991 * the extent. If adjacent extents have different owners, we can't merge 2992 * them while relocating. Handle this by storing the owning root that 2993 * started a cluster and if we see an extent from a different root break 2994 * cluster formation (just like the above case of non-adjacent extents). 2995 * 2996 * Without simple quotas, relocation_src_root is always 0, so we should 2997 * never see a mismatch, and it should have no effect on relocation 2998 * clusters. 2999 */ 3000 if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) { 3001 u64 tmp = root->relocation_src_root; 3002 3003 /* 3004 * root->relocation_src_root is the state that actually affects 3005 * the preallocation we do here, so set it to the root owning 3006 * the cluster we need to relocate. 3007 */ 3008 root->relocation_src_root = cluster->owning_root; 3009 ret = relocate_file_extent_cluster(rc); 3010 if (ret) 3011 return ret; 3012 cluster->nr = 0; 3013 /* And reset it back for the current extent's owning root. */ 3014 root->relocation_src_root = tmp; 3015 } 3016 3017 if (!cluster->nr) { 3018 cluster->start = extent_key->objectid; 3019 cluster->owning_root = root->relocation_src_root; 3020 } 3021 else 3022 BUG_ON(cluster->nr >= MAX_EXTENTS); 3023 cluster->end = extent_key->objectid + extent_key->offset - 1; 3024 cluster->boundary[cluster->nr] = extent_key->objectid; 3025 cluster->nr++; 3026 3027 if (cluster->nr >= MAX_EXTENTS) { 3028 ret = relocate_file_extent_cluster(rc); 3029 if (ret) 3030 return ret; 3031 cluster->nr = 0; 3032 } 3033 return 0; 3034 } 3035 3036 /* 3037 * helper to add a tree block to the list. 3038 * the major work is getting the generation and level of the block 3039 */ 3040 static int add_tree_block(struct reloc_control *rc, 3041 const struct btrfs_key *extent_key, 3042 struct btrfs_path *path, 3043 struct rb_root *blocks) 3044 { 3045 struct extent_buffer *eb; 3046 struct btrfs_extent_item *ei; 3047 struct btrfs_tree_block_info *bi; 3048 struct tree_block *block; 3049 struct rb_node *rb_node; 3050 u32 item_size; 3051 int level = -1; 3052 u64 generation; 3053 u64 owner = 0; 3054 3055 eb = path->nodes[0]; 3056 item_size = btrfs_item_size(eb, path->slots[0]); 3057 3058 if (extent_key->type == BTRFS_METADATA_ITEM_KEY || 3059 item_size >= sizeof(*ei) + sizeof(*bi)) { 3060 unsigned long ptr = 0, end; 3061 3062 ei = btrfs_item_ptr(eb, path->slots[0], 3063 struct btrfs_extent_item); 3064 end = (unsigned long)ei + item_size; 3065 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) { 3066 bi = (struct btrfs_tree_block_info *)(ei + 1); 3067 level = btrfs_tree_block_level(eb, bi); 3068 ptr = (unsigned long)(bi + 1); 3069 } else { 3070 level = (int)extent_key->offset; 3071 ptr = (unsigned long)(ei + 1); 3072 } 3073 generation = btrfs_extent_generation(eb, ei); 3074 3075 /* 3076 * We're reading random blocks without knowing their owner ahead 3077 * of time. This is ok most of the time, as all reloc roots and 3078 * fs roots have the same lock type. However normal trees do 3079 * not, and the only way to know ahead of time is to read the 3080 * inline ref offset. We know it's an fs root if 3081 * 3082 * 1. There's more than one ref. 3083 * 2. There's a SHARED_DATA_REF_KEY set. 3084 * 3. FULL_BACKREF is set on the flags. 3085 * 3086 * Otherwise it's safe to assume that the ref offset == the 3087 * owner of this block, so we can use that when calling 3088 * read_tree_block. 3089 */ 3090 if (btrfs_extent_refs(eb, ei) == 1 && 3091 !(btrfs_extent_flags(eb, ei) & 3092 BTRFS_BLOCK_FLAG_FULL_BACKREF) && 3093 ptr < end) { 3094 struct btrfs_extent_inline_ref *iref; 3095 int type; 3096 3097 iref = (struct btrfs_extent_inline_ref *)ptr; 3098 type = btrfs_get_extent_inline_ref_type(eb, iref, 3099 BTRFS_REF_TYPE_BLOCK); 3100 if (type == BTRFS_REF_TYPE_INVALID) 3101 return -EINVAL; 3102 if (type == BTRFS_TREE_BLOCK_REF_KEY) 3103 owner = btrfs_extent_inline_ref_offset(eb, iref); 3104 } 3105 } else { 3106 btrfs_print_leaf(eb); 3107 btrfs_err(rc->block_group->fs_info, 3108 "unrecognized tree backref at tree block %llu slot %u", 3109 eb->start, path->slots[0]); 3110 btrfs_release_path(path); 3111 return -EUCLEAN; 3112 } 3113 3114 btrfs_release_path(path); 3115 3116 BUG_ON(level == -1); 3117 3118 block = kmalloc(sizeof(*block), GFP_NOFS); 3119 if (!block) 3120 return -ENOMEM; 3121 3122 block->bytenr = extent_key->objectid; 3123 block->key.objectid = rc->extent_root->fs_info->nodesize; 3124 block->key.offset = generation; 3125 block->level = level; 3126 block->key_ready = false; 3127 block->owner = owner; 3128 3129 rb_node = rb_simple_insert(blocks, &block->simple_node); 3130 if (rb_node) 3131 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr, 3132 -EEXIST); 3133 3134 return 0; 3135 } 3136 3137 /* 3138 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY 3139 */ 3140 static int __add_tree_block(struct reloc_control *rc, 3141 u64 bytenr, u32 blocksize, 3142 struct rb_root *blocks) 3143 { 3144 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3145 BTRFS_PATH_AUTO_FREE(path); 3146 struct btrfs_key key; 3147 int ret; 3148 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 3149 3150 if (tree_block_processed(bytenr, rc)) 3151 return 0; 3152 3153 if (rb_simple_search(blocks, bytenr)) 3154 return 0; 3155 3156 path = btrfs_alloc_path(); 3157 if (!path) 3158 return -ENOMEM; 3159 again: 3160 key.objectid = bytenr; 3161 if (skinny) { 3162 key.type = BTRFS_METADATA_ITEM_KEY; 3163 key.offset = (u64)-1; 3164 } else { 3165 key.type = BTRFS_EXTENT_ITEM_KEY; 3166 key.offset = blocksize; 3167 } 3168 3169 path->search_commit_root = true; 3170 path->skip_locking = true; 3171 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0); 3172 if (ret < 0) 3173 return ret; 3174 3175 if (ret > 0 && skinny) { 3176 if (path->slots[0]) { 3177 path->slots[0]--; 3178 btrfs_item_key_to_cpu(path->nodes[0], &key, 3179 path->slots[0]); 3180 if (key.objectid == bytenr && 3181 (key.type == BTRFS_METADATA_ITEM_KEY || 3182 (key.type == BTRFS_EXTENT_ITEM_KEY && 3183 key.offset == blocksize))) 3184 ret = 0; 3185 } 3186 3187 if (ret) { 3188 skinny = false; 3189 btrfs_release_path(path); 3190 goto again; 3191 } 3192 } 3193 if (ret) { 3194 ASSERT(ret == 1); 3195 btrfs_print_leaf(path->nodes[0]); 3196 btrfs_err(fs_info, 3197 "tree block extent item (%llu) is not found in extent tree", 3198 bytenr); 3199 WARN_ON(1); 3200 return -EINVAL; 3201 } 3202 3203 return add_tree_block(rc, &key, path, blocks); 3204 } 3205 3206 static int delete_block_group_cache(struct btrfs_block_group *block_group, 3207 struct inode *inode, 3208 u64 ino) 3209 { 3210 struct btrfs_fs_info *fs_info = block_group->fs_info; 3211 struct btrfs_root *root = fs_info->tree_root; 3212 struct btrfs_trans_handle *trans; 3213 struct btrfs_inode *btrfs_inode; 3214 int ret = 0; 3215 3216 if (inode) 3217 goto truncate; 3218 3219 btrfs_inode = btrfs_iget(ino, root); 3220 if (IS_ERR(btrfs_inode)) 3221 return -ENOENT; 3222 inode = &btrfs_inode->vfs_inode; 3223 3224 truncate: 3225 ret = btrfs_check_trunc_cache_free_space(fs_info, 3226 &fs_info->global_block_rsv); 3227 if (ret) 3228 goto out; 3229 3230 trans = btrfs_join_transaction(root); 3231 if (IS_ERR(trans)) { 3232 ret = PTR_ERR(trans); 3233 goto out; 3234 } 3235 3236 ret = btrfs_truncate_free_space_cache(trans, block_group, inode); 3237 3238 btrfs_end_transaction(trans); 3239 btrfs_btree_balance_dirty(fs_info); 3240 out: 3241 iput(inode); 3242 return ret; 3243 } 3244 3245 /* 3246 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the 3247 * cache inode, to avoid free space cache data extent blocking data relocation. 3248 */ 3249 static int delete_v1_space_cache(struct extent_buffer *leaf, 3250 struct btrfs_block_group *block_group, 3251 u64 data_bytenr) 3252 { 3253 u64 space_cache_ino; 3254 struct btrfs_file_extent_item *ei; 3255 struct btrfs_key key; 3256 bool found = false; 3257 int i; 3258 3259 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID) 3260 return 0; 3261 3262 for (i = 0; i < btrfs_header_nritems(leaf); i++) { 3263 u8 type; 3264 3265 btrfs_item_key_to_cpu(leaf, &key, i); 3266 if (key.type != BTRFS_EXTENT_DATA_KEY) 3267 continue; 3268 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 3269 type = btrfs_file_extent_type(leaf, ei); 3270 3271 if ((type == BTRFS_FILE_EXTENT_REG || 3272 type == BTRFS_FILE_EXTENT_PREALLOC) && 3273 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) { 3274 found = true; 3275 space_cache_ino = key.objectid; 3276 break; 3277 } 3278 } 3279 if (!found) 3280 return -ENOENT; 3281 3282 return delete_block_group_cache(block_group, NULL, space_cache_ino); 3283 } 3284 3285 /* 3286 * helper to find all tree blocks that reference a given data extent 3287 */ 3288 static noinline_for_stack int add_data_references(struct reloc_control *rc, 3289 const struct btrfs_key *extent_key, 3290 struct btrfs_path *path, 3291 struct rb_root *blocks) 3292 { 3293 struct btrfs_backref_walk_ctx ctx = { 0 }; 3294 struct ulist_iterator leaf_uiter; 3295 struct ulist_node *ref_node = NULL; 3296 const u32 blocksize = rc->extent_root->fs_info->nodesize; 3297 int ret = 0; 3298 3299 btrfs_release_path(path); 3300 3301 ctx.bytenr = extent_key->objectid; 3302 ctx.skip_inode_ref_list = true; 3303 ctx.fs_info = rc->extent_root->fs_info; 3304 3305 ret = btrfs_find_all_leafs(&ctx); 3306 if (ret < 0) 3307 return ret; 3308 3309 ULIST_ITER_INIT(&leaf_uiter); 3310 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) { 3311 struct btrfs_tree_parent_check check = { 0 }; 3312 struct extent_buffer *eb; 3313 3314 eb = read_tree_block(ctx.fs_info, ref_node->val, &check); 3315 if (IS_ERR(eb)) { 3316 ret = PTR_ERR(eb); 3317 break; 3318 } 3319 ret = delete_v1_space_cache(eb, rc->block_group, 3320 extent_key->objectid); 3321 free_extent_buffer(eb); 3322 if (ret < 0) 3323 break; 3324 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks); 3325 if (ret < 0) 3326 break; 3327 } 3328 if (ret < 0) 3329 free_block_list(blocks); 3330 ulist_free(ctx.refs); 3331 return ret; 3332 } 3333 3334 /* 3335 * helper to find next unprocessed extent 3336 */ 3337 static noinline_for_stack 3338 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, 3339 struct btrfs_key *extent_key) 3340 { 3341 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3342 struct btrfs_key key; 3343 struct extent_buffer *leaf; 3344 u64 start, end, last; 3345 int ret; 3346 3347 last = rc->block_group->start + rc->block_group->length; 3348 while (1) { 3349 bool block_found; 3350 3351 cond_resched(); 3352 if (rc->search_start >= last) { 3353 ret = 1; 3354 break; 3355 } 3356 3357 key.objectid = rc->search_start; 3358 key.type = BTRFS_EXTENT_ITEM_KEY; 3359 key.offset = 0; 3360 3361 path->search_commit_root = true; 3362 path->skip_locking = true; 3363 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 3364 0, 0); 3365 if (ret < 0) 3366 break; 3367 next: 3368 leaf = path->nodes[0]; 3369 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3370 ret = btrfs_next_leaf(rc->extent_root, path); 3371 if (ret != 0) 3372 break; 3373 leaf = path->nodes[0]; 3374 } 3375 3376 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3377 if (key.objectid >= last) { 3378 ret = 1; 3379 break; 3380 } 3381 3382 if (key.type != BTRFS_EXTENT_ITEM_KEY && 3383 key.type != BTRFS_METADATA_ITEM_KEY) { 3384 path->slots[0]++; 3385 goto next; 3386 } 3387 3388 if (key.type == BTRFS_EXTENT_ITEM_KEY && 3389 key.objectid + key.offset <= rc->search_start) { 3390 path->slots[0]++; 3391 goto next; 3392 } 3393 3394 if (key.type == BTRFS_METADATA_ITEM_KEY && 3395 key.objectid + fs_info->nodesize <= 3396 rc->search_start) { 3397 path->slots[0]++; 3398 goto next; 3399 } 3400 3401 block_found = btrfs_find_first_extent_bit(&rc->processed_blocks, 3402 key.objectid, &start, &end, 3403 EXTENT_DIRTY, NULL); 3404 3405 if (block_found && start <= key.objectid) { 3406 btrfs_release_path(path); 3407 rc->search_start = end + 1; 3408 } else { 3409 if (key.type == BTRFS_EXTENT_ITEM_KEY) 3410 rc->search_start = key.objectid + key.offset; 3411 else 3412 rc->search_start = key.objectid + 3413 fs_info->nodesize; 3414 memcpy(extent_key, &key, sizeof(key)); 3415 return 0; 3416 } 3417 } 3418 btrfs_release_path(path); 3419 return ret; 3420 } 3421 3422 static void set_reloc_control(struct reloc_control *rc) 3423 { 3424 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3425 3426 mutex_lock(&fs_info->reloc_mutex); 3427 fs_info->reloc_ctl = rc; 3428 mutex_unlock(&fs_info->reloc_mutex); 3429 } 3430 3431 static void unset_reloc_control(struct reloc_control *rc) 3432 { 3433 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3434 3435 mutex_lock(&fs_info->reloc_mutex); 3436 fs_info->reloc_ctl = NULL; 3437 mutex_unlock(&fs_info->reloc_mutex); 3438 } 3439 3440 static noinline_for_stack 3441 int prepare_to_relocate(struct reloc_control *rc) 3442 { 3443 struct btrfs_trans_handle *trans; 3444 int ret; 3445 3446 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info, 3447 BTRFS_BLOCK_RSV_TEMP); 3448 if (!rc->block_rsv) 3449 return -ENOMEM; 3450 3451 memset(&rc->cluster, 0, sizeof(rc->cluster)); 3452 rc->search_start = rc->block_group->start; 3453 rc->extents_found = 0; 3454 rc->nodes_relocated = 0; 3455 rc->merging_rsv_size = 0; 3456 rc->reserved_bytes = 0; 3457 rc->block_rsv->size = rc->extent_root->fs_info->nodesize * 3458 RELOCATION_RESERVED_NODES; 3459 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info, 3460 rc->block_rsv, rc->block_rsv->size, 3461 BTRFS_RESERVE_FLUSH_ALL); 3462 if (ret) 3463 return ret; 3464 3465 rc->create_reloc_tree = true; 3466 set_reloc_control(rc); 3467 3468 trans = btrfs_join_transaction(rc->extent_root); 3469 if (IS_ERR(trans)) { 3470 unset_reloc_control(rc); 3471 /* 3472 * extent tree is not a ref_cow tree and has no reloc_root to 3473 * cleanup. And callers are responsible to free the above 3474 * block rsv. 3475 */ 3476 return PTR_ERR(trans); 3477 } 3478 3479 ret = btrfs_commit_transaction(trans); 3480 if (ret) 3481 unset_reloc_control(rc); 3482 3483 return ret; 3484 } 3485 3486 static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3487 { 3488 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3489 struct rb_root blocks = RB_ROOT; 3490 struct btrfs_key key; 3491 struct btrfs_trans_handle *trans = NULL; 3492 BTRFS_PATH_AUTO_FREE(path); 3493 struct btrfs_extent_item *ei; 3494 u64 flags; 3495 int ret; 3496 int err = 0; 3497 int progress = 0; 3498 3499 path = btrfs_alloc_path(); 3500 if (!path) 3501 return -ENOMEM; 3502 path->reada = READA_FORWARD; 3503 3504 ret = prepare_to_relocate(rc); 3505 if (ret) { 3506 err = ret; 3507 goto out_free; 3508 } 3509 3510 while (1) { 3511 rc->reserved_bytes = 0; 3512 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, 3513 rc->block_rsv->size, 3514 BTRFS_RESERVE_FLUSH_ALL); 3515 if (ret) { 3516 err = ret; 3517 break; 3518 } 3519 progress++; 3520 trans = btrfs_start_transaction(rc->extent_root, 0); 3521 if (IS_ERR(trans)) { 3522 err = PTR_ERR(trans); 3523 trans = NULL; 3524 break; 3525 } 3526 restart: 3527 if (rc->backref_cache.last_trans != trans->transid) 3528 btrfs_backref_release_cache(&rc->backref_cache); 3529 rc->backref_cache.last_trans = trans->transid; 3530 3531 ret = find_next_extent(rc, path, &key); 3532 if (ret < 0) 3533 err = ret; 3534 if (ret != 0) 3535 break; 3536 3537 rc->extents_found++; 3538 3539 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3540 struct btrfs_extent_item); 3541 flags = btrfs_extent_flags(path->nodes[0], ei); 3542 3543 /* 3544 * If we are relocating a simple quota owned extent item, we 3545 * need to note the owner on the reloc data root so that when 3546 * we allocate the replacement item, we can attribute it to the 3547 * correct eventual owner (rather than the reloc data root). 3548 */ 3549 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { 3550 struct btrfs_root *root = BTRFS_I(rc->data_inode)->root; 3551 u64 owning_root_id = btrfs_get_extent_owner_root(fs_info, 3552 path->nodes[0], 3553 path->slots[0]); 3554 3555 root->relocation_src_root = owning_root_id; 3556 } 3557 3558 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3559 ret = add_tree_block(rc, &key, path, &blocks); 3560 } else if (rc->stage == UPDATE_DATA_PTRS && 3561 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3562 ret = add_data_references(rc, &key, path, &blocks); 3563 } else { 3564 btrfs_release_path(path); 3565 ret = 0; 3566 } 3567 if (ret < 0) { 3568 err = ret; 3569 break; 3570 } 3571 3572 if (!RB_EMPTY_ROOT(&blocks)) { 3573 ret = relocate_tree_blocks(trans, rc, &blocks); 3574 if (ret < 0) { 3575 if (ret != -EAGAIN) { 3576 err = ret; 3577 break; 3578 } 3579 rc->extents_found--; 3580 rc->search_start = key.objectid; 3581 } 3582 } 3583 3584 btrfs_end_transaction_throttle(trans); 3585 btrfs_btree_balance_dirty(fs_info); 3586 trans = NULL; 3587 3588 if (rc->stage == MOVE_DATA_EXTENTS && 3589 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3590 rc->found_file_extent = true; 3591 ret = relocate_data_extent(rc, &key); 3592 if (ret < 0) { 3593 err = ret; 3594 break; 3595 } 3596 } 3597 if (btrfs_should_cancel_balance(fs_info)) { 3598 err = -ECANCELED; 3599 break; 3600 } 3601 } 3602 if (trans && progress && err == -ENOSPC) { 3603 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags); 3604 if (ret == 1) { 3605 err = 0; 3606 progress = 0; 3607 goto restart; 3608 } 3609 } 3610 3611 btrfs_release_path(path); 3612 btrfs_clear_extent_bit(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, NULL); 3613 3614 if (trans) { 3615 btrfs_end_transaction_throttle(trans); 3616 btrfs_btree_balance_dirty(fs_info); 3617 } 3618 3619 if (!err && !btrfs_fs_incompat(fs_info, REMAP_TREE)) { 3620 ret = relocate_file_extent_cluster(rc); 3621 if (ret < 0) 3622 err = ret; 3623 } 3624 3625 rc->create_reloc_tree = false; 3626 set_reloc_control(rc); 3627 3628 btrfs_backref_release_cache(&rc->backref_cache); 3629 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3630 3631 /* 3632 * Even in the case when the relocation is cancelled, we should all go 3633 * through prepare_to_merge() and merge_reloc_roots(). 3634 * 3635 * For error (including cancelled balance), prepare_to_merge() will 3636 * mark all reloc trees orphan, then queue them for cleanup in 3637 * merge_reloc_roots() 3638 */ 3639 err = prepare_to_merge(rc, err); 3640 3641 merge_reloc_roots(rc); 3642 3643 rc->merge_reloc_tree = false; 3644 unset_reloc_control(rc); 3645 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL); 3646 3647 /* get rid of pinned extents */ 3648 trans = btrfs_join_transaction(rc->extent_root); 3649 if (IS_ERR(trans)) { 3650 err = PTR_ERR(trans); 3651 goto out_free; 3652 } 3653 ret = btrfs_commit_transaction(trans); 3654 if (ret && !err) 3655 err = ret; 3656 out_free: 3657 ret = clean_dirty_subvols(rc); 3658 if (ret < 0 && !err) 3659 err = ret; 3660 btrfs_free_block_rsv(fs_info, rc->block_rsv); 3661 return err; 3662 } 3663 3664 static int __insert_orphan_inode(struct btrfs_trans_handle *trans, 3665 struct btrfs_root *root, u64 objectid) 3666 { 3667 BTRFS_PATH_AUTO_FREE(path); 3668 struct btrfs_inode_item *item; 3669 struct extent_buffer *leaf; 3670 int ret; 3671 3672 path = btrfs_alloc_path(); 3673 if (!path) 3674 return -ENOMEM; 3675 3676 ret = btrfs_insert_empty_inode(trans, root, path, objectid); 3677 if (ret) 3678 return ret; 3679 3680 leaf = path->nodes[0]; 3681 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); 3682 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); 3683 btrfs_set_inode_generation(leaf, item, 1); 3684 btrfs_set_inode_size(leaf, item, 0); 3685 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 3686 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | 3687 BTRFS_INODE_PREALLOC); 3688 return 0; 3689 } 3690 3691 static void delete_orphan_inode(struct btrfs_trans_handle *trans, 3692 struct btrfs_root *root, u64 objectid) 3693 { 3694 BTRFS_PATH_AUTO_FREE(path); 3695 struct btrfs_key key; 3696 int ret = 0; 3697 3698 path = btrfs_alloc_path(); 3699 if (!path) { 3700 ret = -ENOMEM; 3701 goto out; 3702 } 3703 3704 key.objectid = objectid; 3705 key.type = BTRFS_INODE_ITEM_KEY; 3706 key.offset = 0; 3707 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3708 if (ret) { 3709 if (ret > 0) 3710 ret = -ENOENT; 3711 goto out; 3712 } 3713 ret = btrfs_del_item(trans, root, path); 3714 out: 3715 if (ret) 3716 btrfs_abort_transaction(trans, ret); 3717 } 3718 3719 /* 3720 * helper to create inode for data relocation. 3721 * the inode is in data relocation tree and its link count is 0 3722 */ 3723 static noinline_for_stack struct inode *create_reloc_inode( 3724 const struct btrfs_block_group *group) 3725 { 3726 struct btrfs_fs_info *fs_info = group->fs_info; 3727 struct btrfs_inode *inode = NULL; 3728 struct btrfs_trans_handle *trans; 3729 struct btrfs_root *root; 3730 u64 objectid; 3731 int ret = 0; 3732 3733 root = btrfs_grab_root(fs_info->data_reloc_root); 3734 trans = btrfs_start_transaction(root, 6); 3735 if (IS_ERR(trans)) { 3736 btrfs_put_root(root); 3737 return ERR_CAST(trans); 3738 } 3739 3740 ret = btrfs_get_free_objectid(root, &objectid); 3741 if (ret) 3742 goto out; 3743 3744 ret = __insert_orphan_inode(trans, root, objectid); 3745 if (ret) 3746 goto out; 3747 3748 inode = btrfs_iget(objectid, root); 3749 if (IS_ERR(inode)) { 3750 delete_orphan_inode(trans, root, objectid); 3751 ret = PTR_ERR(inode); 3752 inode = NULL; 3753 goto out; 3754 } 3755 inode->reloc_block_group_start = group->start; 3756 3757 ret = btrfs_orphan_add(trans, inode); 3758 out: 3759 btrfs_put_root(root); 3760 btrfs_end_transaction(trans); 3761 btrfs_btree_balance_dirty(fs_info); 3762 if (ret) { 3763 if (inode) 3764 iput(&inode->vfs_inode); 3765 return ERR_PTR(ret); 3766 } 3767 return &inode->vfs_inode; 3768 } 3769 3770 /* 3771 * Mark start of chunk relocation that is cancellable. Check if the cancellation 3772 * has been requested meanwhile and don't start in that case. 3773 * NOTE: if this returns an error, reloc_chunk_end() must not be called. 3774 * 3775 * Return: 3776 * 0 success 3777 * -EINPROGRESS operation is already in progress, that's probably a bug 3778 * -ECANCELED cancellation request was set before the operation started 3779 */ 3780 static int reloc_chunk_start(struct btrfs_fs_info *fs_info) 3781 { 3782 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) { 3783 /* This should not happen */ 3784 btrfs_err(fs_info, "reloc already running, cannot start"); 3785 return -EINPROGRESS; 3786 } 3787 3788 if (atomic_read(&fs_info->reloc_cancel_req) > 0) { 3789 btrfs_info(fs_info, "chunk relocation canceled on start"); 3790 /* On cancel, clear all requests. */ 3791 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); 3792 atomic_set(&fs_info->reloc_cancel_req, 0); 3793 return -ECANCELED; 3794 } 3795 return 0; 3796 } 3797 3798 /* 3799 * Mark end of chunk relocation that is cancellable and wake any waiters. 3800 * NOTE: call only if a previous call to reloc_chunk_start() succeeded. 3801 */ 3802 static void reloc_chunk_end(struct btrfs_fs_info *fs_info) 3803 { 3804 ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)); 3805 /* Requested after start, clear bit first so any waiters can continue */ 3806 if (atomic_read(&fs_info->reloc_cancel_req) > 0) 3807 btrfs_info(fs_info, "chunk relocation canceled during operation"); 3808 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags); 3809 atomic_set(&fs_info->reloc_cancel_req, 0); 3810 } 3811 3812 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) 3813 { 3814 struct reloc_control *rc; 3815 3816 rc = kzalloc(sizeof(*rc), GFP_NOFS); 3817 if (!rc) 3818 return NULL; 3819 3820 INIT_LIST_HEAD(&rc->reloc_roots); 3821 INIT_LIST_HEAD(&rc->dirty_subvol_roots); 3822 btrfs_backref_init_cache(fs_info, &rc->backref_cache, true); 3823 rc->reloc_root_tree.rb_root = RB_ROOT; 3824 spin_lock_init(&rc->reloc_root_tree.lock); 3825 btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); 3826 return rc; 3827 } 3828 3829 static void free_reloc_control(struct reloc_control *rc) 3830 { 3831 struct mapping_node *node, *tmp; 3832 3833 free_reloc_roots(&rc->reloc_roots); 3834 rbtree_postorder_for_each_entry_safe(node, tmp, 3835 &rc->reloc_root_tree.rb_root, rb_node) 3836 kfree(node); 3837 3838 kfree(rc); 3839 } 3840 3841 /* 3842 * Print the block group being relocated 3843 */ 3844 static void describe_relocation(struct btrfs_block_group *block_group) 3845 { 3846 char buf[128] = "NONE"; 3847 3848 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf)); 3849 3850 btrfs_info(block_group->fs_info, "relocating block group %llu flags %s", 3851 block_group->start, buf); 3852 } 3853 3854 static const char *stage_to_string(enum reloc_stage stage) 3855 { 3856 if (stage == MOVE_DATA_EXTENTS) 3857 return "move data extents"; 3858 if (stage == UPDATE_DATA_PTRS) 3859 return "update data pointers"; 3860 return "unknown"; 3861 } 3862 3863 static int add_remap_tree_entries(struct btrfs_trans_handle *trans, struct btrfs_path *path, 3864 struct btrfs_key *entries, unsigned int num_entries) 3865 { 3866 int ret; 3867 struct btrfs_fs_info *fs_info = trans->fs_info; 3868 struct btrfs_item_batch batch; 3869 u32 *data_sizes; 3870 u32 max_items; 3871 3872 max_items = BTRFS_LEAF_DATA_SIZE(trans->fs_info) / sizeof(struct btrfs_item); 3873 3874 data_sizes = kzalloc(sizeof(u32) * min_t(u32, num_entries, max_items), GFP_NOFS); 3875 if (!data_sizes) 3876 return -ENOMEM; 3877 3878 while (true) { 3879 batch.keys = entries; 3880 batch.data_sizes = data_sizes; 3881 batch.total_data_size = 0; 3882 batch.nr = min_t(u32, num_entries, max_items); 3883 3884 ret = btrfs_insert_empty_items(trans, fs_info->remap_root, path, &batch); 3885 btrfs_release_path(path); 3886 3887 if (num_entries <= max_items) 3888 break; 3889 3890 num_entries -= max_items; 3891 entries += max_items; 3892 } 3893 3894 kfree(data_sizes); 3895 3896 return ret; 3897 } 3898 3899 struct space_run { 3900 u64 start; 3901 u64 end; 3902 }; 3903 3904 static void parse_bitmap(u64 block_size, const unsigned long *bitmap, 3905 unsigned long size, u64 address, struct space_run *space_runs, 3906 unsigned int *num_space_runs) 3907 { 3908 unsigned long pos, end; 3909 u64 run_start, run_length; 3910 3911 pos = find_first_bit(bitmap, size); 3912 if (pos == size) 3913 return; 3914 3915 while (true) { 3916 end = find_next_zero_bit(bitmap, size, pos); 3917 3918 run_start = address + (pos * block_size); 3919 run_length = (end - pos) * block_size; 3920 3921 if (*num_space_runs != 0 && 3922 space_runs[*num_space_runs - 1].end == run_start) { 3923 space_runs[*num_space_runs - 1].end += run_length; 3924 } else { 3925 space_runs[*num_space_runs].start = run_start; 3926 space_runs[*num_space_runs].end = run_start + run_length; 3927 3928 (*num_space_runs)++; 3929 } 3930 3931 if (end == size) 3932 break; 3933 3934 pos = find_next_bit(bitmap, size, end + 1); 3935 if (pos == size) 3936 break; 3937 } 3938 } 3939 3940 static void adjust_block_group_remap_bytes(struct btrfs_trans_handle *trans, 3941 struct btrfs_block_group *bg, s64 diff) 3942 { 3943 struct btrfs_fs_info *fs_info = trans->fs_info; 3944 bool bg_already_dirty = true; 3945 bool mark_unused = false; 3946 3947 spin_lock(&bg->lock); 3948 bg->remap_bytes += diff; 3949 if (bg->used == 0 && bg->remap_bytes == 0) 3950 mark_unused = true; 3951 spin_unlock(&bg->lock); 3952 3953 if (mark_unused) 3954 btrfs_mark_bg_unused(bg); 3955 3956 spin_lock(&trans->transaction->dirty_bgs_lock); 3957 if (list_empty(&bg->dirty_list)) { 3958 list_add_tail(&bg->dirty_list, &trans->transaction->dirty_bgs); 3959 bg_already_dirty = false; 3960 btrfs_get_block_group(bg); 3961 } 3962 spin_unlock(&trans->transaction->dirty_bgs_lock); 3963 3964 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3965 if (!bg_already_dirty) 3966 btrfs_inc_delayed_refs_rsv_bg_updates(fs_info); 3967 } 3968 3969 /* Private structure for I/O from copy_remapped_data(). */ 3970 struct reloc_io_private { 3971 struct completion done; 3972 refcount_t pending_refs; 3973 blk_status_t status; 3974 }; 3975 3976 static void reloc_endio(struct btrfs_bio *bbio) 3977 { 3978 struct reloc_io_private *priv = bbio->private; 3979 3980 if (bbio->bio.bi_status) 3981 WRITE_ONCE(priv->status, bbio->bio.bi_status); 3982 3983 if (refcount_dec_and_test(&priv->pending_refs)) 3984 complete(&priv->done); 3985 3986 bio_put(&bbio->bio); 3987 } 3988 3989 static int copy_remapped_data_io(struct btrfs_fs_info *fs_info, 3990 struct reloc_io_private *priv, 3991 struct page **pages, u64 addr, u64 length, 3992 blk_opf_t op) 3993 { 3994 struct btrfs_bio *bbio; 3995 int i; 3996 3997 init_completion(&priv->done); 3998 refcount_set(&priv->pending_refs, 1); 3999 priv->status = 0; 4000 4001 bbio = btrfs_bio_alloc(BIO_MAX_VECS, op, BTRFS_I(fs_info->btree_inode), 4002 addr, reloc_endio, priv); 4003 bbio->bio.bi_iter.bi_sector = (addr >> SECTOR_SHIFT); 4004 bbio->is_remap = true; 4005 4006 i = 0; 4007 do { 4008 size_t bytes = min_t(u64, length, PAGE_SIZE); 4009 4010 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { 4011 refcount_inc(&priv->pending_refs); 4012 btrfs_submit_bbio(bbio, 0); 4013 4014 bbio = btrfs_bio_alloc(BIO_MAX_VECS, op, 4015 BTRFS_I(fs_info->btree_inode), 4016 addr, reloc_endio, priv); 4017 bbio->bio.bi_iter.bi_sector = (addr >> SECTOR_SHIFT); 4018 bbio->is_remap = true; 4019 continue; 4020 } 4021 4022 i++; 4023 addr += bytes; 4024 length -= bytes; 4025 } while (length); 4026 4027 refcount_inc(&priv->pending_refs); 4028 btrfs_submit_bbio(bbio, 0); 4029 4030 if (!refcount_dec_and_test(&priv->pending_refs)) 4031 wait_for_completion_io(&priv->done); 4032 4033 return blk_status_to_errno(READ_ONCE(priv->status)); 4034 } 4035 4036 static int copy_remapped_data(struct btrfs_fs_info *fs_info, u64 old_addr, 4037 u64 new_addr, u64 length) 4038 { 4039 int ret; 4040 u64 copy_len = min_t(u64, length, SZ_1M); 4041 struct page **pages; 4042 struct reloc_io_private priv; 4043 unsigned int nr_pages = DIV_ROUND_UP(length, PAGE_SIZE); 4044 4045 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 4046 if (!pages) 4047 return -ENOMEM; 4048 4049 ret = btrfs_alloc_page_array(nr_pages, pages, 0); 4050 if (ret) { 4051 ret = -ENOMEM; 4052 goto end; 4053 } 4054 4055 /* Copy 1MB at a time, to avoid using too much memory. */ 4056 do { 4057 u64 to_copy = min_t(u64, length, copy_len); 4058 4059 /* Limit to one bio. */ 4060 to_copy = min_t(u64, to_copy, BIO_MAX_VECS << PAGE_SHIFT); 4061 4062 ret = copy_remapped_data_io(fs_info, &priv, pages, old_addr, 4063 to_copy, REQ_OP_READ); 4064 if (ret) 4065 goto end; 4066 4067 ret = copy_remapped_data_io(fs_info, &priv, pages, new_addr, 4068 to_copy, REQ_OP_WRITE); 4069 if (ret) 4070 goto end; 4071 4072 if (to_copy == length) 4073 break; 4074 4075 old_addr += to_copy; 4076 new_addr += to_copy; 4077 length -= to_copy; 4078 } while (true); 4079 4080 ret = 0; 4081 end: 4082 for (int i = 0; i < nr_pages; i++) { 4083 if (pages[i]) 4084 __free_page(pages[i]); 4085 } 4086 kfree(pages); 4087 4088 return ret; 4089 } 4090 4091 static int add_remap_item(struct btrfs_trans_handle *trans, 4092 struct btrfs_path *path, u64 new_addr, u64 length, 4093 u64 old_addr) 4094 { 4095 struct btrfs_fs_info *fs_info = trans->fs_info; 4096 struct btrfs_remap_item remap = { 0 }; 4097 struct btrfs_key key; 4098 struct extent_buffer *leaf; 4099 int ret; 4100 4101 key.objectid = old_addr; 4102 key.type = BTRFS_REMAP_KEY; 4103 key.offset = length; 4104 4105 ret = btrfs_insert_empty_item(trans, fs_info->remap_root, path, 4106 &key, sizeof(struct btrfs_remap_item)); 4107 if (ret) 4108 return ret; 4109 4110 leaf = path->nodes[0]; 4111 btrfs_set_stack_remap_address(&remap, new_addr); 4112 write_extent_buffer(leaf, &remap, btrfs_item_ptr_offset(leaf, path->slots[0]), 4113 sizeof(struct btrfs_remap_item)); 4114 4115 btrfs_release_path(path); 4116 4117 return 0; 4118 } 4119 4120 static int add_remap_backref_item(struct btrfs_trans_handle *trans, 4121 struct btrfs_path *path, u64 new_addr, 4122 u64 length, u64 old_addr) 4123 { 4124 struct btrfs_fs_info *fs_info = trans->fs_info; 4125 struct btrfs_remap_item remap = { 0 }; 4126 struct btrfs_key key; 4127 struct extent_buffer *leaf; 4128 int ret; 4129 4130 key.objectid = new_addr; 4131 key.type = BTRFS_REMAP_BACKREF_KEY; 4132 key.offset = length; 4133 4134 ret = btrfs_insert_empty_item(trans, fs_info->remap_root, path, &key, 4135 sizeof(struct btrfs_remap_item)); 4136 if (ret) 4137 return ret; 4138 4139 leaf = path->nodes[0]; 4140 btrfs_set_stack_remap_address(&remap, old_addr); 4141 write_extent_buffer(leaf, &remap, btrfs_item_ptr_offset(leaf, path->slots[0]), 4142 sizeof(struct btrfs_remap_item)); 4143 4144 btrfs_release_path(path); 4145 4146 return 0; 4147 } 4148 4149 static int move_existing_remap(struct btrfs_fs_info *fs_info, 4150 struct btrfs_path *path, 4151 struct btrfs_block_group *bg, u64 new_addr, 4152 u64 length, u64 old_addr) 4153 { 4154 struct btrfs_trans_handle *trans; 4155 struct extent_buffer *leaf; 4156 struct btrfs_remap_item *remap_ptr; 4157 struct btrfs_remap_item remap = { 0 }; 4158 struct btrfs_key key, ins; 4159 u64 dest_addr, dest_length, min_size; 4160 struct btrfs_block_group *dest_bg; 4161 int ret; 4162 const bool is_data = (bg->flags & BTRFS_BLOCK_GROUP_DATA); 4163 struct btrfs_space_info *sinfo = bg->space_info; 4164 bool mutex_taken = false; 4165 bool bg_needs_free_space; 4166 4167 spin_lock(&sinfo->lock); 4168 btrfs_space_info_update_bytes_may_use(sinfo, length); 4169 spin_unlock(&sinfo->lock); 4170 4171 if (is_data) 4172 min_size = fs_info->sectorsize; 4173 else 4174 min_size = fs_info->nodesize; 4175 4176 ret = btrfs_reserve_extent(fs_info->fs_root, length, length, min_size, 4177 0, 0, &ins, is_data, false); 4178 if (unlikely(ret)) { 4179 spin_lock(&sinfo->lock); 4180 btrfs_space_info_update_bytes_may_use(sinfo, -length); 4181 spin_unlock(&sinfo->lock); 4182 return ret; 4183 } 4184 4185 dest_addr = ins.objectid; 4186 dest_length = ins.offset; 4187 4188 if (!is_data && !IS_ALIGNED(dest_length, fs_info->nodesize)) { 4189 u64 new_length = ALIGN_DOWN(dest_length, fs_info->nodesize); 4190 4191 btrfs_free_reserved_extent(fs_info, dest_addr + new_length, 4192 dest_length - new_length, 0); 4193 4194 dest_length = new_length; 4195 } 4196 4197 trans = btrfs_join_transaction(fs_info->remap_root); 4198 if (IS_ERR(trans)) { 4199 ret = PTR_ERR(trans); 4200 trans = NULL; 4201 goto end; 4202 } 4203 4204 mutex_lock(&fs_info->remap_mutex); 4205 mutex_taken = true; 4206 4207 /* Find old remap entry. */ 4208 key.objectid = old_addr; 4209 key.type = BTRFS_REMAP_KEY; 4210 key.offset = length; 4211 4212 ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, 0, 1); 4213 if (ret == 1) { 4214 /* 4215 * Not a problem if the remap entry wasn't found: that means 4216 * that another transaction has deallocated the data. 4217 * move_existing_remaps() loops until the BG contains no 4218 * remaps, so we can just return 0 in this case. 4219 */ 4220 btrfs_release_path(path); 4221 ret = 0; 4222 goto end; 4223 } else if (unlikely(ret)) { 4224 goto end; 4225 } 4226 4227 ret = copy_remapped_data(fs_info, new_addr, dest_addr, dest_length); 4228 if (unlikely(ret)) 4229 goto end; 4230 4231 /* Change data of old remap entry. */ 4232 leaf = path->nodes[0]; 4233 remap_ptr = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item); 4234 btrfs_set_remap_address(leaf, remap_ptr, dest_addr); 4235 btrfs_mark_buffer_dirty(trans, leaf); 4236 4237 if (dest_length != length) { 4238 key.offset = dest_length; 4239 btrfs_set_item_key_safe(trans, path, &key); 4240 } 4241 4242 btrfs_release_path(path); 4243 4244 if (dest_length != length) { 4245 /* Add remap item for remainder. */ 4246 ret = add_remap_item(trans, path, new_addr + dest_length, 4247 length - dest_length, old_addr + dest_length); 4248 if (unlikely(ret)) 4249 goto end; 4250 } 4251 4252 /* Change or remove old backref. */ 4253 key.objectid = new_addr; 4254 key.type = BTRFS_REMAP_BACKREF_KEY; 4255 key.offset = length; 4256 4257 ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1); 4258 if (unlikely(ret)) { 4259 if (ret == 1) { 4260 btrfs_release_path(path); 4261 ret = -ENOENT; 4262 } 4263 goto end; 4264 } 4265 4266 leaf = path->nodes[0]; 4267 4268 if (dest_length == length) { 4269 ret = btrfs_del_item(trans, fs_info->remap_root, path); 4270 if (unlikely(ret)) { 4271 btrfs_release_path(path); 4272 goto end; 4273 } 4274 } else { 4275 key.objectid += dest_length; 4276 key.offset -= dest_length; 4277 btrfs_set_item_key_safe(trans, path, &key); 4278 btrfs_set_stack_remap_address(&remap, old_addr + dest_length); 4279 4280 write_extent_buffer(leaf, &remap, 4281 btrfs_item_ptr_offset(leaf, path->slots[0]), 4282 sizeof(struct btrfs_remap_item)); 4283 } 4284 4285 btrfs_release_path(path); 4286 4287 /* Add new backref. */ 4288 ret = add_remap_backref_item(trans, path, dest_addr, dest_length, old_addr); 4289 if (unlikely(ret)) 4290 goto end; 4291 4292 adjust_block_group_remap_bytes(trans, bg, -dest_length); 4293 4294 ret = btrfs_add_to_free_space_tree(trans, new_addr, dest_length); 4295 if (unlikely(ret)) 4296 goto end; 4297 4298 dest_bg = btrfs_lookup_block_group(fs_info, dest_addr); 4299 4300 adjust_block_group_remap_bytes(trans, dest_bg, dest_length); 4301 4302 mutex_lock(&dest_bg->free_space_lock); 4303 bg_needs_free_space = test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, 4304 &dest_bg->runtime_flags); 4305 mutex_unlock(&dest_bg->free_space_lock); 4306 btrfs_put_block_group(dest_bg); 4307 4308 if (bg_needs_free_space) { 4309 ret = btrfs_add_block_group_free_space(trans, dest_bg); 4310 if (unlikely(ret)) 4311 goto end; 4312 } 4313 4314 ret = btrfs_remove_from_free_space_tree(trans, dest_addr, dest_length); 4315 if (unlikely(ret)) { 4316 btrfs_remove_from_free_space_tree(trans, new_addr, dest_length); 4317 goto end; 4318 } 4319 4320 ret = 0; 4321 4322 end: 4323 if (mutex_taken) 4324 mutex_unlock(&fs_info->remap_mutex); 4325 4326 btrfs_dec_block_group_reservations(fs_info, dest_addr); 4327 4328 if (unlikely(ret)) { 4329 btrfs_free_reserved_extent(fs_info, dest_addr, dest_length, 0); 4330 4331 if (trans) { 4332 btrfs_abort_transaction(trans, ret); 4333 btrfs_end_transaction(trans); 4334 } 4335 } else { 4336 dest_bg = btrfs_lookup_block_group(fs_info, dest_addr); 4337 btrfs_free_reserved_bytes(dest_bg, dest_length, 0); 4338 btrfs_put_block_group(dest_bg); 4339 4340 ret = btrfs_commit_transaction(trans); 4341 } 4342 4343 return ret; 4344 } 4345 4346 static int move_existing_remaps(struct btrfs_fs_info *fs_info, 4347 struct btrfs_block_group *bg, 4348 struct btrfs_path *path) 4349 { 4350 int ret; 4351 struct btrfs_key key; 4352 struct extent_buffer *leaf; 4353 struct btrfs_remap_item *remap; 4354 u64 old_addr; 4355 4356 /* Look for backrefs in remap tree. */ 4357 while (bg->remap_bytes > 0) { 4358 key.objectid = bg->start; 4359 key.type = BTRFS_REMAP_BACKREF_KEY; 4360 key.offset = 0; 4361 4362 ret = btrfs_search_slot(NULL, fs_info->remap_root, &key, path, 0, 0); 4363 if (ret < 0) 4364 return ret; 4365 4366 leaf = path->nodes[0]; 4367 4368 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 4369 ret = btrfs_next_leaf(fs_info->remap_root, path); 4370 if (ret < 0) { 4371 btrfs_release_path(path); 4372 return ret; 4373 } 4374 4375 if (ret) { 4376 btrfs_release_path(path); 4377 break; 4378 } 4379 4380 leaf = path->nodes[0]; 4381 } 4382 4383 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4384 4385 if (key.type != BTRFS_REMAP_BACKREF_KEY) { 4386 path->slots[0]++; 4387 4388 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 4389 ret = btrfs_next_leaf(fs_info->remap_root, path); 4390 if (ret < 0) { 4391 btrfs_release_path(path); 4392 return ret; 4393 } 4394 4395 if (ret) { 4396 btrfs_release_path(path); 4397 break; 4398 } 4399 4400 leaf = path->nodes[0]; 4401 } 4402 } 4403 4404 remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item); 4405 old_addr = btrfs_remap_address(leaf, remap); 4406 4407 btrfs_release_path(path); 4408 4409 ret = move_existing_remap(fs_info, path, bg, key.objectid, 4410 key.offset, old_addr); 4411 if (ret) 4412 return ret; 4413 } 4414 4415 ASSERT(bg->remap_bytes == 0); 4416 4417 return 0; 4418 } 4419 4420 static int create_remap_tree_entries(struct btrfs_trans_handle *trans, 4421 struct btrfs_path *path, 4422 struct btrfs_block_group *bg) 4423 { 4424 struct btrfs_fs_info *fs_info = trans->fs_info; 4425 struct btrfs_free_space_info *fsi; 4426 struct btrfs_key key, found_key; 4427 struct extent_buffer *leaf; 4428 struct btrfs_root *space_root; 4429 u32 extent_count; 4430 struct space_run *space_runs = NULL; 4431 unsigned int num_space_runs = 0; 4432 struct btrfs_key *entries = NULL; 4433 unsigned int max_entries, num_entries; 4434 int ret; 4435 4436 mutex_lock(&bg->free_space_lock); 4437 4438 if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &bg->runtime_flags)) { 4439 mutex_unlock(&bg->free_space_lock); 4440 4441 ret = btrfs_add_block_group_free_space(trans, bg); 4442 if (ret) 4443 return ret; 4444 4445 mutex_lock(&bg->free_space_lock); 4446 } 4447 4448 fsi = btrfs_search_free_space_info(trans, bg, path, 0); 4449 if (IS_ERR(fsi)) { 4450 mutex_unlock(&bg->free_space_lock); 4451 return PTR_ERR(fsi); 4452 } 4453 4454 extent_count = btrfs_free_space_extent_count(path->nodes[0], fsi); 4455 4456 btrfs_release_path(path); 4457 4458 space_runs = kmalloc(sizeof(*space_runs) * extent_count, GFP_NOFS); 4459 if (!space_runs) { 4460 mutex_unlock(&bg->free_space_lock); 4461 return -ENOMEM; 4462 } 4463 4464 key.objectid = bg->start; 4465 key.type = 0; 4466 key.offset = 0; 4467 4468 space_root = btrfs_free_space_root(bg); 4469 4470 ret = btrfs_search_slot(trans, space_root, &key, path, 0, 0); 4471 if (ret < 0) { 4472 mutex_unlock(&bg->free_space_lock); 4473 goto out; 4474 } 4475 4476 ret = 0; 4477 4478 while (true) { 4479 leaf = path->nodes[0]; 4480 4481 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4482 4483 if (found_key.objectid >= bg->start + bg->length) 4484 break; 4485 4486 if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { 4487 if (num_space_runs != 0 && 4488 space_runs[num_space_runs - 1].end == found_key.objectid) { 4489 space_runs[num_space_runs - 1].end = 4490 found_key.objectid + found_key.offset; 4491 } else { 4492 ASSERT(num_space_runs < extent_count); 4493 4494 space_runs[num_space_runs].start = found_key.objectid; 4495 space_runs[num_space_runs].end = 4496 found_key.objectid + found_key.offset; 4497 4498 num_space_runs++; 4499 } 4500 } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 4501 void *bitmap; 4502 unsigned long offset; 4503 u32 data_size; 4504 4505 offset = btrfs_item_ptr_offset(leaf, path->slots[0]); 4506 data_size = btrfs_item_size(leaf, path->slots[0]); 4507 4508 if (data_size != 0) { 4509 bitmap = kmalloc(data_size, GFP_NOFS); 4510 if (!bitmap) { 4511 mutex_unlock(&bg->free_space_lock); 4512 ret = -ENOMEM; 4513 goto out; 4514 } 4515 4516 read_extent_buffer(leaf, bitmap, offset, data_size); 4517 4518 parse_bitmap(fs_info->sectorsize, bitmap, 4519 data_size * BITS_PER_BYTE, 4520 found_key.objectid, space_runs, 4521 &num_space_runs); 4522 4523 ASSERT(num_space_runs <= extent_count); 4524 4525 kfree(bitmap); 4526 } 4527 } 4528 4529 path->slots[0]++; 4530 4531 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 4532 ret = btrfs_next_leaf(space_root, path); 4533 if (ret != 0) { 4534 if (ret == 1) 4535 ret = 0; 4536 break; 4537 } 4538 leaf = path->nodes[0]; 4539 } 4540 } 4541 4542 btrfs_release_path(path); 4543 4544 mutex_unlock(&bg->free_space_lock); 4545 4546 max_entries = extent_count + 2; 4547 entries = kmalloc(sizeof(*entries) * max_entries, GFP_NOFS); 4548 if (!entries) { 4549 ret = -ENOMEM; 4550 goto out; 4551 } 4552 4553 num_entries = 0; 4554 4555 if (num_space_runs == 0) { 4556 entries[num_entries].objectid = bg->start; 4557 entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY; 4558 entries[num_entries].offset = bg->length; 4559 num_entries++; 4560 } else { 4561 if (space_runs[0].start > bg->start) { 4562 entries[num_entries].objectid = bg->start; 4563 entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY; 4564 entries[num_entries].offset = space_runs[0].start - bg->start; 4565 num_entries++; 4566 } 4567 4568 for (unsigned int i = 1; i < num_space_runs; i++) { 4569 entries[num_entries].objectid = space_runs[i - 1].end; 4570 entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY; 4571 entries[num_entries].offset = 4572 space_runs[i].start - space_runs[i - 1].end; 4573 num_entries++; 4574 } 4575 4576 if (space_runs[num_space_runs - 1].end < bg->start + bg->length) { 4577 entries[num_entries].objectid = 4578 space_runs[num_space_runs - 1].end; 4579 entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY; 4580 entries[num_entries].offset = 4581 bg->start + bg->length - space_runs[num_space_runs - 1].end; 4582 num_entries++; 4583 } 4584 4585 if (num_entries == 0) 4586 goto out; 4587 } 4588 4589 bg->identity_remap_count = num_entries; 4590 4591 ret = add_remap_tree_entries(trans, path, entries, num_entries); 4592 4593 out: 4594 kfree(entries); 4595 kfree(space_runs); 4596 4597 return ret; 4598 } 4599 4600 static int find_next_identity_remap(struct btrfs_trans_handle *trans, 4601 struct btrfs_path *path, u64 bg_end, 4602 u64 last_start, u64 *start, u64 *length) 4603 { 4604 int ret; 4605 struct btrfs_key key, found_key; 4606 struct btrfs_root *remap_root = trans->fs_info->remap_root; 4607 struct extent_buffer *leaf; 4608 4609 key.objectid = last_start; 4610 key.type = BTRFS_IDENTITY_REMAP_KEY; 4611 key.offset = 0; 4612 4613 ret = btrfs_search_slot(trans, remap_root, &key, path, 0, 0); 4614 if (ret < 0) 4615 goto out; 4616 4617 leaf = path->nodes[0]; 4618 while (true) { 4619 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 4620 ret = btrfs_next_leaf(remap_root, path); 4621 4622 if (ret != 0) { 4623 if (ret == 1) 4624 ret = -ENOENT; 4625 goto out; 4626 } 4627 4628 leaf = path->nodes[0]; 4629 } 4630 4631 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4632 4633 if (found_key.objectid >= bg_end) { 4634 ret = -ENOENT; 4635 goto out; 4636 } 4637 4638 if (found_key.type == BTRFS_IDENTITY_REMAP_KEY) { 4639 *start = found_key.objectid; 4640 *length = found_key.offset; 4641 ret = 0; 4642 goto out; 4643 } 4644 4645 path->slots[0]++; 4646 } 4647 4648 out: 4649 btrfs_release_path(path); 4650 4651 return ret; 4652 } 4653 4654 static int remove_chunk_stripes(struct btrfs_trans_handle *trans, 4655 struct btrfs_chunk_map *chunk_map, 4656 struct btrfs_path *path) 4657 { 4658 struct btrfs_fs_info *fs_info = trans->fs_info; 4659 struct btrfs_key key; 4660 struct extent_buffer *leaf; 4661 struct btrfs_chunk *chunk; 4662 int ret; 4663 4664 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4665 key.type = BTRFS_CHUNK_ITEM_KEY; 4666 key.offset = chunk_map->start; 4667 4668 btrfs_reserve_chunk_metadata(trans, false); 4669 4670 ret = btrfs_search_slot(trans, fs_info->chunk_root, &key, path, 0, 1); 4671 if (ret) { 4672 if (ret == 1) { 4673 btrfs_release_path(path); 4674 ret = -ENOENT; 4675 } 4676 btrfs_trans_release_chunk_metadata(trans); 4677 return ret; 4678 } 4679 4680 leaf = path->nodes[0]; 4681 4682 chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); 4683 btrfs_set_chunk_num_stripes(leaf, chunk, 0); 4684 btrfs_set_chunk_sub_stripes(leaf, chunk, 0); 4685 4686 btrfs_truncate_item(trans, path, offsetof(struct btrfs_chunk, stripe), 1); 4687 4688 btrfs_mark_buffer_dirty(trans, leaf); 4689 4690 btrfs_release_path(path); 4691 btrfs_trans_release_chunk_metadata(trans); 4692 4693 return 0; 4694 } 4695 4696 int btrfs_last_identity_remap_gone(struct btrfs_chunk_map *chunk_map, 4697 struct btrfs_block_group *bg) 4698 { 4699 struct btrfs_fs_info *fs_info = bg->fs_info; 4700 struct btrfs_trans_handle *trans; 4701 int ret; 4702 unsigned int num_items; 4703 BTRFS_PATH_AUTO_FREE(path); 4704 4705 path = btrfs_alloc_path(); 4706 if (!path) 4707 return -ENOMEM; 4708 4709 /* 4710 * One item for each entry we're removing in the dev extents tree, and 4711 * another for each device. DUP chunks are all on one device, 4712 * everything else has one device per stripe. 4713 */ 4714 if (bg->flags & BTRFS_BLOCK_GROUP_DUP) 4715 num_items = chunk_map->num_stripes + 1; 4716 else 4717 num_items = 2 * chunk_map->num_stripes; 4718 4719 trans = btrfs_start_transaction_fallback_global_rsv(fs_info->tree_root, num_items); 4720 if (IS_ERR(trans)) 4721 return PTR_ERR(trans); 4722 4723 ret = btrfs_remove_dev_extents(trans, chunk_map); 4724 if (unlikely(ret)) { 4725 btrfs_abort_transaction(trans, ret); 4726 return ret; 4727 } 4728 4729 mutex_lock(&trans->fs_info->chunk_mutex); 4730 for (unsigned int i = 0; i < chunk_map->num_stripes; i++) { 4731 ret = btrfs_update_device(trans, chunk_map->stripes[i].dev); 4732 if (unlikely(ret)) { 4733 mutex_unlock(&trans->fs_info->chunk_mutex); 4734 btrfs_abort_transaction(trans, ret); 4735 return ret; 4736 } 4737 } 4738 mutex_unlock(&trans->fs_info->chunk_mutex); 4739 4740 write_lock(&trans->fs_info->mapping_tree_lock); 4741 btrfs_chunk_map_device_clear_bits(chunk_map, CHUNK_ALLOCATED); 4742 write_unlock(&trans->fs_info->mapping_tree_lock); 4743 4744 btrfs_remove_bg_from_sinfo(bg); 4745 4746 spin_lock(&bg->lock); 4747 clear_bit(BLOCK_GROUP_FLAG_STRIPE_REMOVAL_PENDING, &bg->runtime_flags); 4748 spin_unlock(&bg->lock); 4749 4750 ret = remove_chunk_stripes(trans, chunk_map, path); 4751 if (unlikely(ret)) { 4752 btrfs_abort_transaction(trans, ret); 4753 return ret; 4754 } 4755 4756 ret = btrfs_commit_transaction(trans); 4757 if (ret) 4758 return ret; 4759 4760 return 0; 4761 } 4762 4763 static void adjust_identity_remap_count(struct btrfs_trans_handle *trans, 4764 struct btrfs_block_group *bg, int delta) 4765 { 4766 struct btrfs_fs_info *fs_info = trans->fs_info; 4767 bool bg_already_dirty = true; 4768 bool mark_fully_remapped = false; 4769 4770 WARN_ON(delta < 0 && -delta > bg->identity_remap_count); 4771 4772 spin_lock(&bg->lock); 4773 4774 bg->identity_remap_count += delta; 4775 4776 if (bg->identity_remap_count == 0 && 4777 !test_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &bg->runtime_flags)) { 4778 set_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &bg->runtime_flags); 4779 mark_fully_remapped = true; 4780 } 4781 4782 spin_unlock(&bg->lock); 4783 4784 spin_lock(&trans->transaction->dirty_bgs_lock); 4785 if (list_empty(&bg->dirty_list)) { 4786 list_add_tail(&bg->dirty_list, &trans->transaction->dirty_bgs); 4787 bg_already_dirty = false; 4788 btrfs_get_block_group(bg); 4789 } 4790 spin_unlock(&trans->transaction->dirty_bgs_lock); 4791 4792 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 4793 if (!bg_already_dirty) 4794 btrfs_inc_delayed_refs_rsv_bg_updates(fs_info); 4795 4796 if (mark_fully_remapped) 4797 btrfs_mark_bg_fully_remapped(bg, trans); 4798 } 4799 4800 static int add_remap_entry(struct btrfs_trans_handle *trans, 4801 struct btrfs_path *path, 4802 struct btrfs_block_group *src_bg, u64 old_addr, 4803 u64 new_addr, u64 length) 4804 { 4805 struct btrfs_fs_info *fs_info = trans->fs_info; 4806 struct btrfs_key key, new_key; 4807 int ret; 4808 int identity_count_delta = 0; 4809 4810 key.objectid = old_addr; 4811 key.type = (u8)-1; 4812 key.offset = (u64)-1; 4813 4814 ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1); 4815 if (ret < 0) 4816 goto end; 4817 4818 if (path->slots[0] == 0) { 4819 ret = -ENOENT; 4820 goto end; 4821 } 4822 4823 path->slots[0]--; 4824 4825 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4826 4827 if (key.type != BTRFS_IDENTITY_REMAP_KEY || 4828 key.objectid > old_addr || 4829 key.objectid + key.offset <= old_addr) { 4830 ret = -ENOENT; 4831 goto end; 4832 } 4833 4834 /* Shorten or delete identity mapping entry. */ 4835 if (key.objectid == old_addr) { 4836 ret = btrfs_del_item(trans, fs_info->remap_root, path); 4837 if (ret) 4838 goto end; 4839 4840 identity_count_delta--; 4841 } else { 4842 new_key.objectid = key.objectid; 4843 new_key.type = BTRFS_IDENTITY_REMAP_KEY; 4844 new_key.offset = old_addr - key.objectid; 4845 4846 btrfs_set_item_key_safe(trans, path, &new_key); 4847 } 4848 4849 btrfs_release_path(path); 4850 4851 /* Create new remap entry. */ 4852 ret = add_remap_item(trans, path, new_addr, length, old_addr); 4853 if (ret) 4854 goto end; 4855 4856 /* Add entry for remainder of identity mapping, if necessary. */ 4857 if (key.objectid + key.offset != old_addr + length) { 4858 new_key.objectid = old_addr + length; 4859 new_key.type = BTRFS_IDENTITY_REMAP_KEY; 4860 new_key.offset = key.objectid + key.offset - old_addr - length; 4861 4862 ret = btrfs_insert_empty_item(trans, fs_info->remap_root, 4863 path, &new_key, 0); 4864 if (ret) 4865 goto end; 4866 4867 btrfs_release_path(path); 4868 4869 identity_count_delta++; 4870 } 4871 4872 /* Add backref. */ 4873 ret = add_remap_backref_item(trans, path, new_addr, length, old_addr); 4874 if (ret) 4875 goto end; 4876 4877 if (identity_count_delta != 0) 4878 adjust_identity_remap_count(trans, src_bg, identity_count_delta); 4879 4880 end: 4881 btrfs_release_path(path); 4882 4883 return ret; 4884 } 4885 4886 static int mark_chunk_remapped(struct btrfs_trans_handle *trans, 4887 struct btrfs_path *path, u64 start) 4888 { 4889 struct btrfs_fs_info *fs_info = trans->fs_info; 4890 struct btrfs_chunk_map *chunk_map; 4891 struct btrfs_key key; 4892 u64 type; 4893 int ret; 4894 struct extent_buffer *leaf; 4895 struct btrfs_chunk *chunk; 4896 4897 read_lock(&fs_info->mapping_tree_lock); 4898 4899 chunk_map = btrfs_find_chunk_map_nolock(fs_info, start, 1); 4900 if (!chunk_map) { 4901 read_unlock(&fs_info->mapping_tree_lock); 4902 return -ENOENT; 4903 } 4904 4905 chunk_map->type |= BTRFS_BLOCK_GROUP_REMAPPED; 4906 type = chunk_map->type; 4907 4908 read_unlock(&fs_info->mapping_tree_lock); 4909 4910 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; 4911 key.type = BTRFS_CHUNK_ITEM_KEY; 4912 key.offset = start; 4913 4914 ret = btrfs_search_slot(trans, fs_info->chunk_root, &key, path, 0, 1); 4915 if (ret == 1) { 4916 ret = -ENOENT; 4917 goto end; 4918 } else if (ret < 0) 4919 goto end; 4920 4921 leaf = path->nodes[0]; 4922 4923 chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); 4924 btrfs_set_chunk_type(leaf, chunk, type); 4925 btrfs_mark_buffer_dirty(trans, leaf); 4926 4927 ret = 0; 4928 end: 4929 btrfs_free_chunk_map(chunk_map); 4930 btrfs_release_path(path); 4931 4932 return ret; 4933 } 4934 4935 static int do_remap_reloc_trans(struct btrfs_fs_info *fs_info, 4936 struct btrfs_block_group *src_bg, 4937 struct btrfs_path *path, u64 *last_start) 4938 { 4939 struct btrfs_trans_handle *trans; 4940 struct btrfs_root *extent_root; 4941 struct btrfs_key ins; 4942 struct btrfs_block_group *dest_bg = NULL; 4943 u64 start = 0, remap_length = 0; 4944 u64 length, new_addr, min_size; 4945 int ret; 4946 const bool is_data = (src_bg->flags & BTRFS_BLOCK_GROUP_DATA); 4947 bool no_more = false; 4948 bool made_reservation = false, bg_needs_free_space; 4949 struct btrfs_space_info *sinfo = src_bg->space_info; 4950 4951 extent_root = btrfs_extent_root(fs_info, src_bg->start); 4952 4953 trans = btrfs_start_transaction(extent_root, 0); 4954 if (IS_ERR(trans)) 4955 return PTR_ERR(trans); 4956 4957 mutex_lock(&fs_info->remap_mutex); 4958 4959 ret = find_next_identity_remap(trans, path, src_bg->start + src_bg->length, 4960 *last_start, &start, &remap_length); 4961 if (ret == -ENOENT) { 4962 no_more = true; 4963 goto next; 4964 } else if (ret) { 4965 mutex_unlock(&fs_info->remap_mutex); 4966 btrfs_end_transaction(trans); 4967 return ret; 4968 } 4969 4970 /* Try to reserve enough space for block. */ 4971 spin_lock(&sinfo->lock); 4972 btrfs_space_info_update_bytes_may_use(sinfo, remap_length); 4973 spin_unlock(&sinfo->lock); 4974 4975 if (is_data) 4976 min_size = fs_info->sectorsize; 4977 else 4978 min_size = fs_info->nodesize; 4979 4980 /* 4981 * We're using btrfs_reserve_extent() to allocate a contiguous 4982 * logical address range, but this will become a remap item rather than 4983 * an extent in the extent tree. 4984 * 4985 * Short allocations are fine: it means that we chop off the beginning 4986 * of the identity remap that we're processing, and will tackle the 4987 * rest of it the next time round. 4988 */ 4989 ret = btrfs_reserve_extent(fs_info->fs_root, remap_length, remap_length, 4990 min_size, 0, 0, &ins, is_data, false); 4991 if (ret) { 4992 spin_lock(&sinfo->lock); 4993 btrfs_space_info_update_bytes_may_use(sinfo, -remap_length); 4994 spin_unlock(&sinfo->lock); 4995 4996 mutex_unlock(&fs_info->remap_mutex); 4997 btrfs_end_transaction(trans); 4998 return ret; 4999 } 5000 5001 made_reservation = true; 5002 5003 new_addr = ins.objectid; 5004 length = ins.offset; 5005 5006 if (!is_data && !IS_ALIGNED(length, fs_info->nodesize)) { 5007 u64 new_length = ALIGN_DOWN(length, fs_info->nodesize); 5008 5009 btrfs_free_reserved_extent(fs_info, new_addr + new_length, 5010 length - new_length, 0); 5011 5012 length = new_length; 5013 } 5014 5015 dest_bg = btrfs_lookup_block_group(fs_info, new_addr); 5016 5017 mutex_lock(&dest_bg->free_space_lock); 5018 bg_needs_free_space = test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, 5019 &dest_bg->runtime_flags); 5020 mutex_unlock(&dest_bg->free_space_lock); 5021 5022 if (bg_needs_free_space) { 5023 ret = btrfs_add_block_group_free_space(trans, dest_bg); 5024 if (ret) 5025 goto fail; 5026 } 5027 5028 ret = copy_remapped_data(fs_info, start, new_addr, length); 5029 if (ret) 5030 goto fail; 5031 5032 ret = btrfs_remove_from_free_space_tree(trans, new_addr, length); 5033 if (ret) 5034 goto fail; 5035 5036 ret = add_remap_entry(trans, path, src_bg, start, new_addr, length); 5037 if (ret) { 5038 btrfs_add_to_free_space_tree(trans, new_addr, length); 5039 goto fail; 5040 } 5041 5042 adjust_block_group_remap_bytes(trans, dest_bg, length); 5043 btrfs_free_reserved_bytes(dest_bg, length, 0); 5044 5045 spin_lock(&sinfo->lock); 5046 sinfo->bytes_readonly += length; 5047 spin_unlock(&sinfo->lock); 5048 5049 next: 5050 if (dest_bg) 5051 btrfs_put_block_group(dest_bg); 5052 5053 if (made_reservation) 5054 btrfs_dec_block_group_reservations(fs_info, new_addr); 5055 5056 mutex_unlock(&fs_info->remap_mutex); 5057 5058 if (src_bg->identity_remap_count == 0) { 5059 bool mark_fully_remapped = false; 5060 5061 spin_lock(&src_bg->lock); 5062 if (!test_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &src_bg->runtime_flags)) { 5063 mark_fully_remapped = true; 5064 set_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &src_bg->runtime_flags); 5065 } 5066 spin_unlock(&src_bg->lock); 5067 5068 if (mark_fully_remapped) 5069 btrfs_mark_bg_fully_remapped(src_bg, trans); 5070 } 5071 5072 ret = btrfs_end_transaction(trans); 5073 if (ret) 5074 return ret; 5075 5076 if (no_more) 5077 return 1; 5078 5079 *last_start = start; 5080 5081 return 0; 5082 5083 fail: 5084 if (dest_bg) 5085 btrfs_put_block_group(dest_bg); 5086 5087 btrfs_free_reserved_extent(fs_info, new_addr, length, 0); 5088 5089 mutex_unlock(&fs_info->remap_mutex); 5090 btrfs_end_transaction(trans); 5091 5092 return ret; 5093 } 5094 5095 static int do_remap_reloc(struct btrfs_fs_info *fs_info, struct btrfs_path *path, 5096 struct btrfs_block_group *bg) 5097 { 5098 u64 last_start = bg->start; 5099 int ret; 5100 5101 while (true) { 5102 ret = do_remap_reloc_trans(fs_info, bg, path, &last_start); 5103 if (ret) { 5104 if (ret == 1) 5105 ret = 0; 5106 break; 5107 } 5108 } 5109 5110 return ret; 5111 } 5112 5113 int btrfs_translate_remap(struct btrfs_fs_info *fs_info, u64 *logical, u64 *length) 5114 { 5115 int ret; 5116 struct btrfs_key key, found_key; 5117 struct extent_buffer *leaf; 5118 struct btrfs_remap_item *remap; 5119 BTRFS_PATH_AUTO_FREE(path); 5120 5121 path = btrfs_alloc_path(); 5122 if (!path) 5123 return -ENOMEM; 5124 5125 key.objectid = *logical; 5126 key.type = (u8)-1; 5127 key.offset = (u64)-1; 5128 5129 ret = btrfs_search_slot(NULL, fs_info->remap_root, &key, path, 0, 0); 5130 if (ret < 0) 5131 return ret; 5132 5133 leaf = path->nodes[0]; 5134 if (path->slots[0] == 0) 5135 return -ENOENT; 5136 5137 path->slots[0]--; 5138 5139 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5140 5141 if (found_key.type != BTRFS_REMAP_KEY && 5142 found_key.type != BTRFS_IDENTITY_REMAP_KEY) { 5143 return -ENOENT; 5144 } 5145 5146 if (found_key.objectid > *logical || 5147 found_key.objectid + found_key.offset <= *logical) { 5148 return -ENOENT; 5149 } 5150 5151 if (*logical + *length > found_key.objectid + found_key.offset) 5152 *length = found_key.objectid + found_key.offset - *logical; 5153 5154 if (found_key.type == BTRFS_IDENTITY_REMAP_KEY) 5155 return 0; 5156 5157 remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item); 5158 *logical += btrfs_remap_address(leaf, remap) - found_key.objectid; 5159 5160 return 0; 5161 } 5162 5163 static int start_block_group_remapping(struct btrfs_fs_info *fs_info, 5164 struct btrfs_path *path, 5165 struct btrfs_block_group *bg) 5166 { 5167 struct btrfs_trans_handle *trans; 5168 bool bg_already_dirty = true; 5169 int ret, ret2; 5170 5171 ret = btrfs_cache_block_group(bg, true); 5172 if (ret) 5173 return ret; 5174 5175 trans = btrfs_start_transaction(fs_info->remap_root, 0); 5176 if (IS_ERR(trans)) 5177 return PTR_ERR(trans); 5178 5179 /* We need to run delayed refs, to make sure FST is up to date. */ 5180 ret = btrfs_run_delayed_refs(trans, U64_MAX); 5181 if (ret) { 5182 btrfs_end_transaction(trans); 5183 return ret; 5184 } 5185 5186 mutex_lock(&fs_info->remap_mutex); 5187 5188 if (bg->flags & BTRFS_BLOCK_GROUP_REMAPPED) { 5189 ret = 0; 5190 goto end; 5191 } 5192 5193 ret = create_remap_tree_entries(trans, path, bg); 5194 if (unlikely(ret)) { 5195 btrfs_abort_transaction(trans, ret); 5196 goto end; 5197 } 5198 5199 spin_lock(&bg->lock); 5200 bg->flags |= BTRFS_BLOCK_GROUP_REMAPPED; 5201 spin_unlock(&bg->lock); 5202 5203 spin_lock(&trans->transaction->dirty_bgs_lock); 5204 if (list_empty(&bg->dirty_list)) { 5205 list_add_tail(&bg->dirty_list, &trans->transaction->dirty_bgs); 5206 bg_already_dirty = false; 5207 btrfs_get_block_group(bg); 5208 } 5209 spin_unlock(&trans->transaction->dirty_bgs_lock); 5210 5211 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 5212 if (!bg_already_dirty) 5213 btrfs_inc_delayed_refs_rsv_bg_updates(fs_info); 5214 5215 ret = mark_chunk_remapped(trans, path, bg->start); 5216 if (unlikely(ret)) { 5217 btrfs_abort_transaction(trans, ret); 5218 goto end; 5219 } 5220 5221 ret = btrfs_remove_block_group_free_space(trans, bg); 5222 if (unlikely(ret)) { 5223 btrfs_abort_transaction(trans, ret); 5224 goto end; 5225 } 5226 5227 btrfs_remove_free_space_cache(bg); 5228 5229 end: 5230 mutex_unlock(&fs_info->remap_mutex); 5231 5232 ret2 = btrfs_end_transaction(trans); 5233 if (!ret) 5234 ret = ret2; 5235 5236 return ret; 5237 } 5238 5239 static int do_nonremap_reloc(struct btrfs_fs_info *fs_info, bool verbose, 5240 struct reloc_control *rc) 5241 { 5242 int ret; 5243 5244 while (1) { 5245 enum reloc_stage finishes_stage; 5246 5247 mutex_lock(&fs_info->cleaner_mutex); 5248 ret = relocate_block_group(rc); 5249 mutex_unlock(&fs_info->cleaner_mutex); 5250 5251 finishes_stage = rc->stage; 5252 /* 5253 * We may have gotten ENOSPC after we already dirtied some 5254 * extents. If writeout happens while we're relocating a 5255 * different block group we could end up hitting the 5256 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in 5257 * btrfs_reloc_cow_block. Make sure we write everything out 5258 * properly so we don't trip over this problem, and then break 5259 * out of the loop if we hit an error. 5260 */ 5261 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { 5262 int wb_ret; 5263 5264 wb_ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 5265 0, (u64)-1); 5266 if (wb_ret && ret == 0) 5267 ret = wb_ret; 5268 invalidate_mapping_pages(rc->data_inode->i_mapping, 0, -1); 5269 rc->stage = UPDATE_DATA_PTRS; 5270 } 5271 5272 if (ret < 0) 5273 return ret; 5274 5275 if (rc->extents_found == 0) 5276 break; 5277 5278 if (verbose) 5279 btrfs_info(fs_info, "found %llu extents, stage: %s", 5280 rc->extents_found, stage_to_string(finishes_stage)); 5281 } 5282 5283 WARN_ON(rc->block_group->pinned > 0); 5284 WARN_ON(rc->block_group->reserved > 0); 5285 WARN_ON(rc->block_group->used > 0); 5286 5287 return 0; 5288 } 5289 5290 /* 5291 * function to relocate all extents in a block group. 5292 */ 5293 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start, 5294 bool verbose) 5295 { 5296 struct btrfs_block_group *bg; 5297 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start); 5298 struct reloc_control *rc; 5299 struct inode *inode; 5300 struct btrfs_path *path = NULL; 5301 int ret; 5302 bool bg_is_ro = false; 5303 5304 /* 5305 * This only gets set if we had a half-deleted snapshot on mount. We 5306 * cannot allow relocation to start while we're still trying to clean up 5307 * these pending deletions. 5308 */ 5309 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE); 5310 if (ret) 5311 return ret; 5312 5313 /* We may have been woken up by close_ctree, so bail if we're closing. */ 5314 if (btrfs_fs_closing(fs_info)) 5315 return -EINTR; 5316 5317 bg = btrfs_lookup_block_group(fs_info, group_start); 5318 if (!bg) 5319 return -ENOENT; 5320 5321 /* 5322 * Relocation of a data block group creates ordered extents. Without 5323 * sb_start_write(), we can freeze the filesystem while unfinished 5324 * ordered extents are left. Such ordered extents can cause a deadlock 5325 * e.g. when syncfs() is waiting for their completion but they can't 5326 * finish because they block when joining a transaction, due to the 5327 * fact that the freeze locks are being held in write mode. 5328 */ 5329 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) 5330 ASSERT(sb_write_started(fs_info->sb)); 5331 5332 if (btrfs_pinned_by_swapfile(fs_info, bg)) { 5333 btrfs_put_block_group(bg); 5334 return -ETXTBSY; 5335 } 5336 5337 rc = alloc_reloc_control(fs_info); 5338 if (!rc) { 5339 btrfs_put_block_group(bg); 5340 return -ENOMEM; 5341 } 5342 5343 ret = reloc_chunk_start(fs_info); 5344 if (ret < 0) 5345 goto out_put_bg; 5346 5347 rc->extent_root = extent_root; 5348 rc->block_group = bg; 5349 5350 ret = btrfs_inc_block_group_ro(rc->block_group, true); 5351 if (ret) 5352 goto out; 5353 bg_is_ro = true; 5354 5355 path = btrfs_alloc_path(); 5356 if (!path) { 5357 ret = -ENOMEM; 5358 goto out; 5359 } 5360 5361 inode = lookup_free_space_inode(rc->block_group, path); 5362 btrfs_release_path(path); 5363 5364 if (!IS_ERR(inode)) 5365 ret = delete_block_group_cache(rc->block_group, inode, 0); 5366 else 5367 ret = PTR_ERR(inode); 5368 5369 if (ret && ret != -ENOENT) 5370 goto out; 5371 5372 if (!btrfs_fs_incompat(fs_info, REMAP_TREE)) { 5373 rc->data_inode = create_reloc_inode(rc->block_group); 5374 if (IS_ERR(rc->data_inode)) { 5375 ret = PTR_ERR(rc->data_inode); 5376 rc->data_inode = NULL; 5377 goto out; 5378 } 5379 } 5380 5381 if (verbose) 5382 describe_relocation(rc->block_group); 5383 5384 btrfs_wait_block_group_reservations(rc->block_group); 5385 btrfs_wait_nocow_writers(rc->block_group); 5386 btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group); 5387 5388 ret = btrfs_zone_finish(rc->block_group); 5389 WARN_ON(ret && ret != -EAGAIN); 5390 5391 if (should_relocate_using_remap_tree(bg)) { 5392 if (bg->remap_bytes != 0) { 5393 ret = move_existing_remaps(fs_info, bg, path); 5394 if (ret) 5395 goto out; 5396 } 5397 ret = start_block_group_remapping(fs_info, path, bg); 5398 if (ret) 5399 goto out; 5400 5401 ret = do_remap_reloc(fs_info, path, rc->block_group); 5402 if (ret) 5403 goto out; 5404 5405 btrfs_delete_unused_bgs(fs_info); 5406 } else { 5407 ret = do_nonremap_reloc(fs_info, verbose, rc); 5408 } 5409 5410 out: 5411 if (ret && bg_is_ro) 5412 btrfs_dec_block_group_ro(rc->block_group); 5413 if (!btrfs_fs_incompat(fs_info, REMAP_TREE)) 5414 iput(rc->data_inode); 5415 btrfs_free_path(path); 5416 reloc_chunk_end(fs_info); 5417 out_put_bg: 5418 btrfs_put_block_group(bg); 5419 free_reloc_control(rc); 5420 return ret; 5421 } 5422 5423 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 5424 { 5425 struct btrfs_fs_info *fs_info = root->fs_info; 5426 struct btrfs_trans_handle *trans; 5427 int ret, err; 5428 5429 trans = btrfs_start_transaction(fs_info->tree_root, 0); 5430 if (IS_ERR(trans)) 5431 return PTR_ERR(trans); 5432 5433 memset(&root->root_item.drop_progress, 0, 5434 sizeof(root->root_item.drop_progress)); 5435 btrfs_set_root_drop_level(&root->root_item, 0); 5436 btrfs_set_root_refs(&root->root_item, 0); 5437 ret = btrfs_update_root(trans, fs_info->tree_root, 5438 &root->root_key, &root->root_item); 5439 5440 err = btrfs_end_transaction(trans); 5441 if (err) 5442 return err; 5443 return ret; 5444 } 5445 5446 /* 5447 * recover relocation interrupted by system crash. 5448 * 5449 * this function resumes merging reloc trees with corresponding fs trees. 5450 * this is important for keeping the sharing of tree blocks 5451 */ 5452 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) 5453 { 5454 LIST_HEAD(reloc_roots); 5455 struct btrfs_key key; 5456 struct btrfs_root *fs_root; 5457 struct btrfs_root *reloc_root; 5458 struct btrfs_path *path; 5459 struct extent_buffer *leaf; 5460 struct reloc_control *rc = NULL; 5461 struct btrfs_trans_handle *trans; 5462 int ret2; 5463 int ret = 0; 5464 5465 path = btrfs_alloc_path(); 5466 if (!path) 5467 return -ENOMEM; 5468 path->reada = READA_BACK; 5469 5470 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 5471 key.type = BTRFS_ROOT_ITEM_KEY; 5472 key.offset = (u64)-1; 5473 5474 while (1) { 5475 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, 5476 path, 0, 0); 5477 if (ret < 0) 5478 goto out; 5479 if (ret > 0) { 5480 if (path->slots[0] == 0) 5481 break; 5482 path->slots[0]--; 5483 } 5484 ret = 0; 5485 leaf = path->nodes[0]; 5486 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5487 btrfs_release_path(path); 5488 5489 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || 5490 key.type != BTRFS_ROOT_ITEM_KEY) 5491 break; 5492 5493 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key); 5494 if (IS_ERR(reloc_root)) { 5495 ret = PTR_ERR(reloc_root); 5496 goto out; 5497 } 5498 5499 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state); 5500 list_add(&reloc_root->root_list, &reloc_roots); 5501 5502 if (btrfs_root_refs(&reloc_root->root_item) > 0) { 5503 fs_root = btrfs_get_fs_root(fs_info, 5504 reloc_root->root_key.offset, false); 5505 if (IS_ERR(fs_root)) { 5506 ret = PTR_ERR(fs_root); 5507 if (ret != -ENOENT) 5508 goto out; 5509 ret = mark_garbage_root(reloc_root); 5510 if (ret < 0) 5511 goto out; 5512 ret = 0; 5513 } else { 5514 btrfs_put_root(fs_root); 5515 } 5516 } 5517 5518 if (key.offset == 0) 5519 break; 5520 5521 key.offset--; 5522 } 5523 btrfs_release_path(path); 5524 5525 if (list_empty(&reloc_roots)) 5526 goto out; 5527 5528 rc = alloc_reloc_control(fs_info); 5529 if (!rc) { 5530 ret = -ENOMEM; 5531 goto out; 5532 } 5533 5534 ret = reloc_chunk_start(fs_info); 5535 if (ret < 0) 5536 goto out_end; 5537 5538 rc->extent_root = btrfs_extent_root(fs_info, 0); 5539 5540 set_reloc_control(rc); 5541 5542 trans = btrfs_join_transaction(rc->extent_root); 5543 if (IS_ERR(trans)) { 5544 ret = PTR_ERR(trans); 5545 goto out_unset; 5546 } 5547 5548 rc->merge_reloc_tree = true; 5549 5550 while (!list_empty(&reloc_roots)) { 5551 reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list); 5552 list_del(&reloc_root->root_list); 5553 5554 if (btrfs_root_refs(&reloc_root->root_item) == 0) { 5555 list_add_tail(&reloc_root->root_list, 5556 &rc->reloc_roots); 5557 continue; 5558 } 5559 5560 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, 5561 false); 5562 if (IS_ERR(fs_root)) { 5563 ret = PTR_ERR(fs_root); 5564 list_add_tail(&reloc_root->root_list, &reloc_roots); 5565 btrfs_end_transaction(trans); 5566 goto out_unset; 5567 } 5568 5569 ret = __add_reloc_root(reloc_root); 5570 ASSERT(ret != -EEXIST); 5571 if (ret) { 5572 list_add_tail(&reloc_root->root_list, &reloc_roots); 5573 btrfs_put_root(fs_root); 5574 btrfs_end_transaction(trans); 5575 goto out_unset; 5576 } 5577 fs_root->reloc_root = btrfs_grab_root(reloc_root); 5578 btrfs_put_root(fs_root); 5579 } 5580 5581 ret = btrfs_commit_transaction(trans); 5582 if (ret) 5583 goto out_unset; 5584 5585 merge_reloc_roots(rc); 5586 5587 unset_reloc_control(rc); 5588 5589 trans = btrfs_join_transaction(rc->extent_root); 5590 if (IS_ERR(trans)) { 5591 ret = PTR_ERR(trans); 5592 goto out_clean; 5593 } 5594 ret = btrfs_commit_transaction(trans); 5595 out_clean: 5596 ret2 = clean_dirty_subvols(rc); 5597 if (ret2 < 0 && !ret) 5598 ret = ret2; 5599 out_unset: 5600 unset_reloc_control(rc); 5601 reloc_chunk_end(fs_info); 5602 out_end: 5603 free_reloc_control(rc); 5604 out: 5605 free_reloc_roots(&reloc_roots); 5606 5607 btrfs_free_path(path); 5608 5609 if (ret == 0 && !btrfs_fs_incompat(fs_info, REMAP_TREE)) { 5610 /* cleanup orphan inode in data relocation tree */ 5611 fs_root = btrfs_grab_root(fs_info->data_reloc_root); 5612 ASSERT(fs_root); 5613 ret = btrfs_orphan_cleanup(fs_root); 5614 btrfs_put_root(fs_root); 5615 } 5616 return ret; 5617 } 5618 5619 /* 5620 * helper to add ordered checksum for data relocation. 5621 * 5622 * cloning checksum properly handles the nodatasum extents. 5623 * it also saves CPU time to re-calculate the checksum. 5624 */ 5625 int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered) 5626 { 5627 struct btrfs_inode *inode = ordered->inode; 5628 struct btrfs_fs_info *fs_info = inode->root->fs_info; 5629 u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start; 5630 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr); 5631 LIST_HEAD(list); 5632 int ret; 5633 5634 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr, 5635 disk_bytenr + ordered->num_bytes - 1, 5636 &list, false); 5637 if (ret < 0) { 5638 btrfs_mark_ordered_extent_error(ordered); 5639 return ret; 5640 } 5641 5642 while (!list_empty(&list)) { 5643 struct btrfs_ordered_sum *sums = 5644 list_first_entry(&list, struct btrfs_ordered_sum, list); 5645 5646 list_del_init(&sums->list); 5647 5648 /* 5649 * We need to offset the new_bytenr based on where the csum is. 5650 * We need to do this because we will read in entire prealloc 5651 * extents but we may have written to say the middle of the 5652 * prealloc extent, so we need to make sure the csum goes with 5653 * the right disk offset. 5654 * 5655 * We can do this because the data reloc inode refers strictly 5656 * to the on disk bytes, so we don't have to worry about 5657 * disk_len vs real len like with real inodes since it's all 5658 * disk length. 5659 */ 5660 sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr; 5661 btrfs_add_ordered_sum(ordered, sums); 5662 } 5663 5664 return 0; 5665 } 5666 5667 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 5668 struct btrfs_root *root, 5669 const struct extent_buffer *buf, 5670 struct extent_buffer *cow) 5671 { 5672 struct btrfs_fs_info *fs_info = root->fs_info; 5673 struct reloc_control *rc; 5674 struct btrfs_backref_node *node; 5675 int first_cow = 0; 5676 int level; 5677 int ret = 0; 5678 5679 rc = fs_info->reloc_ctl; 5680 if (!rc) 5681 return 0; 5682 5683 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root)); 5684 5685 level = btrfs_header_level(buf); 5686 if (btrfs_header_generation(buf) <= 5687 btrfs_root_last_snapshot(&root->root_item)) 5688 first_cow = 1; 5689 5690 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) { 5691 WARN_ON(!first_cow && level == 0); 5692 5693 node = rc->backref_cache.path[level]; 5694 5695 /* 5696 * If node->bytenr != buf->start and node->new_bytenr != 5697 * buf->start then we've got the wrong backref node for what we 5698 * expected to see here and the cache is incorrect. 5699 */ 5700 if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) { 5701 btrfs_err(fs_info, 5702 "bytenr %llu was found but our backref cache was expecting %llu or %llu", 5703 buf->start, node->bytenr, node->new_bytenr); 5704 return -EUCLEAN; 5705 } 5706 5707 btrfs_backref_drop_node_buffer(node); 5708 refcount_inc(&cow->refs); 5709 node->eb = cow; 5710 node->new_bytenr = cow->start; 5711 5712 if (!node->pending) { 5713 list_move_tail(&node->list, 5714 &rc->backref_cache.pending[level]); 5715 node->pending = 1; 5716 } 5717 5718 if (first_cow) 5719 mark_block_processed(rc, node); 5720 5721 if (first_cow && level > 0) 5722 rc->nodes_relocated += buf->len; 5723 } 5724 5725 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 5726 ret = replace_file_extents(trans, rc, root, cow); 5727 return ret; 5728 } 5729 5730 /* 5731 * called before creating snapshot. it calculates metadata reservation 5732 * required for relocating tree blocks in the snapshot 5733 */ 5734 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 5735 u64 *bytes_to_reserve) 5736 { 5737 struct btrfs_root *root = pending->root; 5738 struct reloc_control *rc = root->fs_info->reloc_ctl; 5739 5740 if (!rc || !have_reloc_root(root)) 5741 return; 5742 5743 if (!rc->merge_reloc_tree) 5744 return; 5745 5746 root = root->reloc_root; 5747 BUG_ON(btrfs_root_refs(&root->root_item) == 0); 5748 /* 5749 * relocation is in the stage of merging trees. the space 5750 * used by merging a reloc tree is twice the size of 5751 * relocated tree nodes in the worst case. half for cowing 5752 * the reloc tree, half for cowing the fs tree. the space 5753 * used by cowing the reloc tree will be freed after the 5754 * tree is dropped. if we create snapshot, cowing the fs 5755 * tree may use more space than it frees. so we need 5756 * reserve extra space. 5757 */ 5758 *bytes_to_reserve += rc->nodes_relocated; 5759 } 5760 5761 /* 5762 * called after snapshot is created. migrate block reservation 5763 * and create reloc root for the newly created snapshot 5764 * 5765 * This is similar to btrfs_init_reloc_root(), we come out of here with two 5766 * references held on the reloc_root, one for root->reloc_root and one for 5767 * rc->reloc_roots. 5768 */ 5769 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 5770 struct btrfs_pending_snapshot *pending) 5771 { 5772 struct btrfs_root *root = pending->root; 5773 struct btrfs_root *reloc_root; 5774 struct btrfs_root *new_root; 5775 struct reloc_control *rc = root->fs_info->reloc_ctl; 5776 int ret; 5777 5778 if (!rc || !have_reloc_root(root)) 5779 return 0; 5780 5781 rc = root->fs_info->reloc_ctl; 5782 rc->merging_rsv_size += rc->nodes_relocated; 5783 5784 if (rc->merge_reloc_tree) { 5785 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 5786 rc->block_rsv, 5787 rc->nodes_relocated, true); 5788 if (ret) 5789 return ret; 5790 } 5791 5792 new_root = pending->snap; 5793 reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root)); 5794 if (IS_ERR(reloc_root)) 5795 return PTR_ERR(reloc_root); 5796 5797 ret = __add_reloc_root(reloc_root); 5798 ASSERT(ret != -EEXIST); 5799 if (ret) { 5800 /* Pairs with create_reloc_root */ 5801 btrfs_put_root(reloc_root); 5802 return ret; 5803 } 5804 new_root->reloc_root = btrfs_grab_root(reloc_root); 5805 return 0; 5806 } 5807 5808 /* 5809 * Get the current bytenr for the block group which is being relocated. 5810 * 5811 * Return U64_MAX if no running relocation. 5812 */ 5813 u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info) 5814 { 5815 u64 logical = U64_MAX; 5816 5817 lockdep_assert_held(&fs_info->reloc_mutex); 5818 5819 if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group) 5820 logical = fs_info->reloc_ctl->block_group->start; 5821 return logical; 5822 } 5823 5824 static int insert_remap_item(struct btrfs_trans_handle *trans, struct btrfs_path *path, 5825 u64 old_addr, u64 length, u64 new_addr) 5826 { 5827 int ret; 5828 struct btrfs_fs_info *fs_info = trans->fs_info; 5829 struct btrfs_key key; 5830 struct btrfs_remap_item remap = { 0 }; 5831 5832 if (old_addr == new_addr) { 5833 /* Add new identity remap item. */ 5834 key.objectid = old_addr; 5835 key.type = BTRFS_IDENTITY_REMAP_KEY; 5836 key.offset = length; 5837 5838 ret = btrfs_insert_empty_item(trans, fs_info->remap_root, path, 5839 &key, 0); 5840 if (ret) 5841 return ret; 5842 } else { 5843 /* Add new remap item. */ 5844 key.objectid = old_addr; 5845 key.type = BTRFS_REMAP_KEY; 5846 key.offset = length; 5847 5848 ret = btrfs_insert_empty_item(trans, fs_info->remap_root, 5849 path, &key, sizeof(struct btrfs_remap_item)); 5850 if (ret) 5851 return ret; 5852 5853 btrfs_set_stack_remap_address(&remap, new_addr); 5854 5855 write_extent_buffer(path->nodes[0], &remap, 5856 btrfs_item_ptr_offset(path->nodes[0], path->slots[0]), 5857 sizeof(struct btrfs_remap_item)); 5858 5859 btrfs_release_path(path); 5860 5861 /* Add new backref item. */ 5862 key.objectid = new_addr; 5863 key.type = BTRFS_REMAP_BACKREF_KEY; 5864 key.offset = length; 5865 5866 ret = btrfs_insert_empty_item(trans, fs_info->remap_root, 5867 path, &key, 5868 sizeof(struct btrfs_remap_item)); 5869 if (ret) 5870 return ret; 5871 5872 btrfs_set_stack_remap_address(&remap, old_addr); 5873 5874 write_extent_buffer(path->nodes[0], &remap, 5875 btrfs_item_ptr_offset(path->nodes[0], path->slots[0]), 5876 sizeof(struct btrfs_remap_item)); 5877 } 5878 5879 btrfs_release_path(path); 5880 5881 return 0; 5882 } 5883 5884 /* 5885 * Punch a hole in the remap item or identity remap item pointed to by path, 5886 * for the range [hole_start, hole_start + hole_length). 5887 */ 5888 static int remove_range_from_remap_tree(struct btrfs_trans_handle *trans, 5889 struct btrfs_path *path, 5890 struct btrfs_block_group *bg, 5891 u64 hole_start, u64 hole_length) 5892 { 5893 int ret; 5894 struct btrfs_fs_info *fs_info = trans->fs_info; 5895 struct extent_buffer *leaf = path->nodes[0]; 5896 struct btrfs_key key; 5897 u64 hole_end, new_addr, remap_start, remap_length, remap_end; 5898 u64 overlap_length; 5899 bool is_identity_remap; 5900 int identity_count_delta = 0; 5901 5902 hole_end = hole_start + hole_length; 5903 5904 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5905 5906 is_identity_remap = (key.type == BTRFS_IDENTITY_REMAP_KEY); 5907 5908 remap_start = key.objectid; 5909 remap_length = key.offset; 5910 remap_end = remap_start + remap_length; 5911 5912 if (is_identity_remap) { 5913 new_addr = remap_start; 5914 } else { 5915 struct btrfs_remap_item *remap_ptr; 5916 5917 remap_ptr = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item); 5918 new_addr = btrfs_remap_address(leaf, remap_ptr); 5919 } 5920 5921 /* Delete old item. */ 5922 ret = btrfs_del_item(trans, fs_info->remap_root, path); 5923 btrfs_release_path(path); 5924 if (ret) 5925 return ret; 5926 5927 if (is_identity_remap) { 5928 identity_count_delta = -1; 5929 } else { 5930 /* Remove backref. */ 5931 key.objectid = new_addr; 5932 key.type = BTRFS_REMAP_BACKREF_KEY; 5933 key.offset = remap_length; 5934 5935 ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1); 5936 if (ret) { 5937 if (ret == 1) { 5938 btrfs_release_path(path); 5939 ret = -ENOENT; 5940 } 5941 return ret; 5942 } 5943 5944 ret = btrfs_del_item(trans, fs_info->remap_root, path); 5945 5946 btrfs_release_path(path); 5947 5948 if (ret) 5949 return ret; 5950 } 5951 5952 /* If hole_start > remap_start, re-add the start of the remap item. */ 5953 if (hole_start > remap_start) { 5954 ret = insert_remap_item(trans, path, remap_start, 5955 hole_start - remap_start, new_addr); 5956 if (ret) 5957 return ret; 5958 5959 if (is_identity_remap) 5960 identity_count_delta++; 5961 } 5962 5963 /* If hole_end < remap_end, re-add the end of the remap item. */ 5964 if (hole_end < remap_end) { 5965 ret = insert_remap_item(trans, path, hole_end, 5966 remap_end - hole_end, 5967 hole_end - remap_start + new_addr); 5968 if (ret) 5969 return ret; 5970 5971 if (is_identity_remap) 5972 identity_count_delta++; 5973 } 5974 5975 if (identity_count_delta != 0) 5976 adjust_identity_remap_count(trans, bg, identity_count_delta); 5977 5978 overlap_length = min_t(u64, hole_end, remap_end) - 5979 max_t(u64, hole_start, remap_start); 5980 5981 if (!is_identity_remap) { 5982 struct btrfs_block_group *dest_bg; 5983 5984 dest_bg = btrfs_lookup_block_group(fs_info, new_addr); 5985 adjust_block_group_remap_bytes(trans, dest_bg, -overlap_length); 5986 btrfs_put_block_group(dest_bg); 5987 ret = btrfs_add_to_free_space_tree(trans, 5988 hole_start - remap_start + new_addr, 5989 overlap_length); 5990 if (ret) 5991 return ret; 5992 } 5993 5994 ret = overlap_length; 5995 5996 return ret; 5997 } 5998 5999 /* 6000 * Return 1 if remove_range_from_remap_tree() has been called successfully, 6001 * 0 if block group wasn't remapped, and a negative number on error. 6002 */ 6003 int btrfs_remove_extent_from_remap_tree(struct btrfs_trans_handle *trans, 6004 struct btrfs_path *path, 6005 u64 bytenr, u64 num_bytes) 6006 { 6007 struct btrfs_fs_info *fs_info = trans->fs_info; 6008 struct btrfs_key key, found_key; 6009 struct extent_buffer *leaf; 6010 struct btrfs_block_group *bg; 6011 int ret, length; 6012 6013 if (!(btrfs_super_incompat_flags(fs_info->super_copy) & 6014 BTRFS_FEATURE_INCOMPAT_REMAP_TREE)) 6015 return 0; 6016 6017 bg = btrfs_lookup_block_group(fs_info, bytenr); 6018 if (!bg) 6019 return 0; 6020 6021 mutex_lock(&fs_info->remap_mutex); 6022 6023 if (!(bg->flags & BTRFS_BLOCK_GROUP_REMAPPED)) { 6024 mutex_unlock(&fs_info->remap_mutex); 6025 btrfs_put_block_group(bg); 6026 return 0; 6027 } 6028 6029 do { 6030 key.objectid = bytenr; 6031 key.type = (u8)-1; 6032 key.offset = (u64)-1; 6033 6034 ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1); 6035 if (ret < 0) 6036 goto end; 6037 6038 leaf = path->nodes[0]; 6039 if (path->slots[0] == 0) { 6040 ret = -ENOENT; 6041 goto end; 6042 } 6043 6044 path->slots[0]--; 6045 6046 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6047 6048 if (found_key.type != BTRFS_IDENTITY_REMAP_KEY && 6049 found_key.type != BTRFS_REMAP_KEY) { 6050 ret = -ENOENT; 6051 goto end; 6052 } 6053 6054 if (bytenr < found_key.objectid || 6055 bytenr >= found_key.objectid + found_key.offset) { 6056 ret = -ENOENT; 6057 goto end; 6058 } 6059 6060 length = remove_range_from_remap_tree(trans, path, bg, bytenr, num_bytes); 6061 if (length < 0) { 6062 ret = length; 6063 goto end; 6064 } 6065 6066 bytenr += length; 6067 num_bytes -= length; 6068 } while (num_bytes > 0); 6069 6070 ret = 1; 6071 6072 end: 6073 mutex_unlock(&fs_info->remap_mutex); 6074 6075 btrfs_put_block_group(bg); 6076 btrfs_release_path(path); 6077 6078 return ret; 6079 } 6080