1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/rbtree.h> 8 #include <trace/events/btrfs.h> 9 #include "ctree.h" 10 #include "disk-io.h" 11 #include "backref.h" 12 #include "ulist.h" 13 #include "transaction.h" 14 #include "delayed-ref.h" 15 #include "locking.h" 16 #include "misc.h" 17 #include "tree-mod-log.h" 18 19 /* Just an arbitrary number so we can be sure this happened */ 20 #define BACKREF_FOUND_SHARED 6 21 22 struct extent_inode_elem { 23 u64 inum; 24 u64 offset; 25 struct extent_inode_elem *next; 26 }; 27 28 static int check_extent_in_eb(const struct btrfs_key *key, 29 const struct extent_buffer *eb, 30 const struct btrfs_file_extent_item *fi, 31 u64 extent_item_pos, 32 struct extent_inode_elem **eie, 33 bool ignore_offset) 34 { 35 u64 offset = 0; 36 struct extent_inode_elem *e; 37 38 if (!ignore_offset && 39 !btrfs_file_extent_compression(eb, fi) && 40 !btrfs_file_extent_encryption(eb, fi) && 41 !btrfs_file_extent_other_encoding(eb, fi)) { 42 u64 data_offset; 43 u64 data_len; 44 45 data_offset = btrfs_file_extent_offset(eb, fi); 46 data_len = btrfs_file_extent_num_bytes(eb, fi); 47 48 if (extent_item_pos < data_offset || 49 extent_item_pos >= data_offset + data_len) 50 return 1; 51 offset = extent_item_pos - data_offset; 52 } 53 54 e = kmalloc(sizeof(*e), GFP_NOFS); 55 if (!e) 56 return -ENOMEM; 57 58 e->next = *eie; 59 e->inum = key->objectid; 60 e->offset = key->offset + offset; 61 *eie = e; 62 63 return 0; 64 } 65 66 static void free_inode_elem_list(struct extent_inode_elem *eie) 67 { 68 struct extent_inode_elem *eie_next; 69 70 for (; eie; eie = eie_next) { 71 eie_next = eie->next; 72 kfree(eie); 73 } 74 } 75 76 static int find_extent_in_eb(const struct extent_buffer *eb, 77 u64 wanted_disk_byte, u64 extent_item_pos, 78 struct extent_inode_elem **eie, 79 bool ignore_offset) 80 { 81 u64 disk_byte; 82 struct btrfs_key key; 83 struct btrfs_file_extent_item *fi; 84 int slot; 85 int nritems; 86 int extent_type; 87 int ret; 88 89 /* 90 * from the shared data ref, we only have the leaf but we need 91 * the key. thus, we must look into all items and see that we 92 * find one (some) with a reference to our extent item. 93 */ 94 nritems = btrfs_header_nritems(eb); 95 for (slot = 0; slot < nritems; ++slot) { 96 btrfs_item_key_to_cpu(eb, &key, slot); 97 if (key.type != BTRFS_EXTENT_DATA_KEY) 98 continue; 99 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 100 extent_type = btrfs_file_extent_type(eb, fi); 101 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 102 continue; 103 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 104 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 105 if (disk_byte != wanted_disk_byte) 106 continue; 107 108 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset); 109 if (ret < 0) 110 return ret; 111 } 112 113 return 0; 114 } 115 116 struct preftree { 117 struct rb_root_cached root; 118 unsigned int count; 119 }; 120 121 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 } 122 123 struct preftrees { 124 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */ 125 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */ 126 struct preftree indirect_missing_keys; 127 }; 128 129 /* 130 * Checks for a shared extent during backref search. 131 * 132 * The share_count tracks prelim_refs (direct and indirect) having a 133 * ref->count >0: 134 * - incremented when a ref->count transitions to >0 135 * - decremented when a ref->count transitions to <1 136 */ 137 struct share_check { 138 u64 root_objectid; 139 u64 inum; 140 int share_count; 141 }; 142 143 static inline int extent_is_shared(struct share_check *sc) 144 { 145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; 146 } 147 148 static struct kmem_cache *btrfs_prelim_ref_cache; 149 150 int __init btrfs_prelim_ref_init(void) 151 { 152 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", 153 sizeof(struct prelim_ref), 154 0, 155 SLAB_MEM_SPREAD, 156 NULL); 157 if (!btrfs_prelim_ref_cache) 158 return -ENOMEM; 159 return 0; 160 } 161 162 void __cold btrfs_prelim_ref_exit(void) 163 { 164 kmem_cache_destroy(btrfs_prelim_ref_cache); 165 } 166 167 static void free_pref(struct prelim_ref *ref) 168 { 169 kmem_cache_free(btrfs_prelim_ref_cache, ref); 170 } 171 172 /* 173 * Return 0 when both refs are for the same block (and can be merged). 174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 175 * indicates a 'higher' block. 176 */ 177 static int prelim_ref_compare(struct prelim_ref *ref1, 178 struct prelim_ref *ref2) 179 { 180 if (ref1->level < ref2->level) 181 return -1; 182 if (ref1->level > ref2->level) 183 return 1; 184 if (ref1->root_id < ref2->root_id) 185 return -1; 186 if (ref1->root_id > ref2->root_id) 187 return 1; 188 if (ref1->key_for_search.type < ref2->key_for_search.type) 189 return -1; 190 if (ref1->key_for_search.type > ref2->key_for_search.type) 191 return 1; 192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) 193 return -1; 194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) 195 return 1; 196 if (ref1->key_for_search.offset < ref2->key_for_search.offset) 197 return -1; 198 if (ref1->key_for_search.offset > ref2->key_for_search.offset) 199 return 1; 200 if (ref1->parent < ref2->parent) 201 return -1; 202 if (ref1->parent > ref2->parent) 203 return 1; 204 205 return 0; 206 } 207 208 static void update_share_count(struct share_check *sc, int oldcount, 209 int newcount) 210 { 211 if ((!sc) || (oldcount == 0 && newcount < 1)) 212 return; 213 214 if (oldcount > 0 && newcount < 1) 215 sc->share_count--; 216 else if (oldcount < 1 && newcount > 0) 217 sc->share_count++; 218 } 219 220 /* 221 * Add @newref to the @root rbtree, merging identical refs. 222 * 223 * Callers should assume that newref has been freed after calling. 224 */ 225 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info, 226 struct preftree *preftree, 227 struct prelim_ref *newref, 228 struct share_check *sc) 229 { 230 struct rb_root_cached *root; 231 struct rb_node **p; 232 struct rb_node *parent = NULL; 233 struct prelim_ref *ref; 234 int result; 235 bool leftmost = true; 236 237 root = &preftree->root; 238 p = &root->rb_root.rb_node; 239 240 while (*p) { 241 parent = *p; 242 ref = rb_entry(parent, struct prelim_ref, rbnode); 243 result = prelim_ref_compare(ref, newref); 244 if (result < 0) { 245 p = &(*p)->rb_left; 246 } else if (result > 0) { 247 p = &(*p)->rb_right; 248 leftmost = false; 249 } else { 250 /* Identical refs, merge them and free @newref */ 251 struct extent_inode_elem *eie = ref->inode_list; 252 253 while (eie && eie->next) 254 eie = eie->next; 255 256 if (!eie) 257 ref->inode_list = newref->inode_list; 258 else 259 eie->next = newref->inode_list; 260 trace_btrfs_prelim_ref_merge(fs_info, ref, newref, 261 preftree->count); 262 /* 263 * A delayed ref can have newref->count < 0. 264 * The ref->count is updated to follow any 265 * BTRFS_[ADD|DROP]_DELAYED_REF actions. 266 */ 267 update_share_count(sc, ref->count, 268 ref->count + newref->count); 269 ref->count += newref->count; 270 free_pref(newref); 271 return; 272 } 273 } 274 275 update_share_count(sc, 0, newref->count); 276 preftree->count++; 277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); 278 rb_link_node(&newref->rbnode, parent, p); 279 rb_insert_color_cached(&newref->rbnode, root, leftmost); 280 } 281 282 /* 283 * Release the entire tree. We don't care about internal consistency so 284 * just free everything and then reset the tree root. 285 */ 286 static void prelim_release(struct preftree *preftree) 287 { 288 struct prelim_ref *ref, *next_ref; 289 290 rbtree_postorder_for_each_entry_safe(ref, next_ref, 291 &preftree->root.rb_root, rbnode) 292 free_pref(ref); 293 294 preftree->root = RB_ROOT_CACHED; 295 preftree->count = 0; 296 } 297 298 /* 299 * the rules for all callers of this function are: 300 * - obtaining the parent is the goal 301 * - if you add a key, you must know that it is a correct key 302 * - if you cannot add the parent or a correct key, then we will look into the 303 * block later to set a correct key 304 * 305 * delayed refs 306 * ============ 307 * backref type | shared | indirect | shared | indirect 308 * information | tree | tree | data | data 309 * --------------------+--------+----------+--------+---------- 310 * parent logical | y | - | - | - 311 * key to resolve | - | y | y | y 312 * tree block logical | - | - | - | - 313 * root for resolving | y | y | y | y 314 * 315 * - column 1: we've the parent -> done 316 * - column 2, 3, 4: we use the key to find the parent 317 * 318 * on disk refs (inline or keyed) 319 * ============================== 320 * backref type | shared | indirect | shared | indirect 321 * information | tree | tree | data | data 322 * --------------------+--------+----------+--------+---------- 323 * parent logical | y | - | y | - 324 * key to resolve | - | - | - | y 325 * tree block logical | y | y | y | y 326 * root for resolving | - | y | y | y 327 * 328 * - column 1, 3: we've the parent -> done 329 * - column 2: we take the first key from the block to find the parent 330 * (see add_missing_keys) 331 * - column 4: we use the key to find the parent 332 * 333 * additional information that's available but not required to find the parent 334 * block might help in merging entries to gain some speed. 335 */ 336 static int add_prelim_ref(const struct btrfs_fs_info *fs_info, 337 struct preftree *preftree, u64 root_id, 338 const struct btrfs_key *key, int level, u64 parent, 339 u64 wanted_disk_byte, int count, 340 struct share_check *sc, gfp_t gfp_mask) 341 { 342 struct prelim_ref *ref; 343 344 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) 345 return 0; 346 347 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); 348 if (!ref) 349 return -ENOMEM; 350 351 ref->root_id = root_id; 352 if (key) 353 ref->key_for_search = *key; 354 else 355 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); 356 357 ref->inode_list = NULL; 358 ref->level = level; 359 ref->count = count; 360 ref->parent = parent; 361 ref->wanted_disk_byte = wanted_disk_byte; 362 prelim_ref_insert(fs_info, preftree, ref, sc); 363 return extent_is_shared(sc); 364 } 365 366 /* direct refs use root == 0, key == NULL */ 367 static int add_direct_ref(const struct btrfs_fs_info *fs_info, 368 struct preftrees *preftrees, int level, u64 parent, 369 u64 wanted_disk_byte, int count, 370 struct share_check *sc, gfp_t gfp_mask) 371 { 372 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, 373 parent, wanted_disk_byte, count, sc, gfp_mask); 374 } 375 376 /* indirect refs use parent == 0 */ 377 static int add_indirect_ref(const struct btrfs_fs_info *fs_info, 378 struct preftrees *preftrees, u64 root_id, 379 const struct btrfs_key *key, int level, 380 u64 wanted_disk_byte, int count, 381 struct share_check *sc, gfp_t gfp_mask) 382 { 383 struct preftree *tree = &preftrees->indirect; 384 385 if (!key) 386 tree = &preftrees->indirect_missing_keys; 387 return add_prelim_ref(fs_info, tree, root_id, key, level, 0, 388 wanted_disk_byte, count, sc, gfp_mask); 389 } 390 391 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) 392 { 393 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; 394 struct rb_node *parent = NULL; 395 struct prelim_ref *ref = NULL; 396 struct prelim_ref target = {}; 397 int result; 398 399 target.parent = bytenr; 400 401 while (*p) { 402 parent = *p; 403 ref = rb_entry(parent, struct prelim_ref, rbnode); 404 result = prelim_ref_compare(ref, &target); 405 406 if (result < 0) 407 p = &(*p)->rb_left; 408 else if (result > 0) 409 p = &(*p)->rb_right; 410 else 411 return 1; 412 } 413 return 0; 414 } 415 416 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 417 struct ulist *parents, 418 struct preftrees *preftrees, struct prelim_ref *ref, 419 int level, u64 time_seq, const u64 *extent_item_pos, 420 bool ignore_offset) 421 { 422 int ret = 0; 423 int slot; 424 struct extent_buffer *eb; 425 struct btrfs_key key; 426 struct btrfs_key *key_for_search = &ref->key_for_search; 427 struct btrfs_file_extent_item *fi; 428 struct extent_inode_elem *eie = NULL, *old = NULL; 429 u64 disk_byte; 430 u64 wanted_disk_byte = ref->wanted_disk_byte; 431 u64 count = 0; 432 u64 data_offset; 433 434 if (level != 0) { 435 eb = path->nodes[level]; 436 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 437 if (ret < 0) 438 return ret; 439 return 0; 440 } 441 442 /* 443 * 1. We normally enter this function with the path already pointing to 444 * the first item to check. But sometimes, we may enter it with 445 * slot == nritems. 446 * 2. We are searching for normal backref but bytenr of this leaf 447 * matches shared data backref 448 * 3. The leaf owner is not equal to the root we are searching 449 * 450 * For these cases, go to the next leaf before we continue. 451 */ 452 eb = path->nodes[0]; 453 if (path->slots[0] >= btrfs_header_nritems(eb) || 454 is_shared_data_backref(preftrees, eb->start) || 455 ref->root_id != btrfs_header_owner(eb)) { 456 if (time_seq == BTRFS_SEQ_LAST) 457 ret = btrfs_next_leaf(root, path); 458 else 459 ret = btrfs_next_old_leaf(root, path, time_seq); 460 } 461 462 while (!ret && count < ref->count) { 463 eb = path->nodes[0]; 464 slot = path->slots[0]; 465 466 btrfs_item_key_to_cpu(eb, &key, slot); 467 468 if (key.objectid != key_for_search->objectid || 469 key.type != BTRFS_EXTENT_DATA_KEY) 470 break; 471 472 /* 473 * We are searching for normal backref but bytenr of this leaf 474 * matches shared data backref, OR 475 * the leaf owner is not equal to the root we are searching for 476 */ 477 if (slot == 0 && 478 (is_shared_data_backref(preftrees, eb->start) || 479 ref->root_id != btrfs_header_owner(eb))) { 480 if (time_seq == BTRFS_SEQ_LAST) 481 ret = btrfs_next_leaf(root, path); 482 else 483 ret = btrfs_next_old_leaf(root, path, time_seq); 484 continue; 485 } 486 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 487 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 488 data_offset = btrfs_file_extent_offset(eb, fi); 489 490 if (disk_byte == wanted_disk_byte) { 491 eie = NULL; 492 old = NULL; 493 if (ref->key_for_search.offset == key.offset - data_offset) 494 count++; 495 else 496 goto next; 497 if (extent_item_pos) { 498 ret = check_extent_in_eb(&key, eb, fi, 499 *extent_item_pos, 500 &eie, ignore_offset); 501 if (ret < 0) 502 break; 503 } 504 if (ret > 0) 505 goto next; 506 ret = ulist_add_merge_ptr(parents, eb->start, 507 eie, (void **)&old, GFP_NOFS); 508 if (ret < 0) 509 break; 510 if (!ret && extent_item_pos) { 511 while (old->next) 512 old = old->next; 513 old->next = eie; 514 } 515 eie = NULL; 516 } 517 next: 518 if (time_seq == BTRFS_SEQ_LAST) 519 ret = btrfs_next_item(root, path); 520 else 521 ret = btrfs_next_old_item(root, path, time_seq); 522 } 523 524 if (ret > 0) 525 ret = 0; 526 else if (ret < 0) 527 free_inode_elem_list(eie); 528 return ret; 529 } 530 531 /* 532 * resolve an indirect backref in the form (root_id, key, level) 533 * to a logical address 534 */ 535 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, 536 struct btrfs_path *path, u64 time_seq, 537 struct preftrees *preftrees, 538 struct prelim_ref *ref, struct ulist *parents, 539 const u64 *extent_item_pos, bool ignore_offset) 540 { 541 struct btrfs_root *root; 542 struct extent_buffer *eb; 543 int ret = 0; 544 int root_level; 545 int level = ref->level; 546 struct btrfs_key search_key = ref->key_for_search; 547 548 /* 549 * If we're search_commit_root we could possibly be holding locks on 550 * other tree nodes. This happens when qgroups does backref walks when 551 * adding new delayed refs. To deal with this we need to look in cache 552 * for the root, and if we don't find it then we need to search the 553 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage 554 * here. 555 */ 556 if (path->search_commit_root) 557 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); 558 else 559 root = btrfs_get_fs_root(fs_info, ref->root_id, false); 560 if (IS_ERR(root)) { 561 ret = PTR_ERR(root); 562 goto out_free; 563 } 564 565 if (!path->search_commit_root && 566 test_bit(BTRFS_ROOT_DELETING, &root->state)) { 567 ret = -ENOENT; 568 goto out; 569 } 570 571 if (btrfs_is_testing(fs_info)) { 572 ret = -ENOENT; 573 goto out; 574 } 575 576 if (path->search_commit_root) 577 root_level = btrfs_header_level(root->commit_root); 578 else if (time_seq == BTRFS_SEQ_LAST) 579 root_level = btrfs_header_level(root->node); 580 else 581 root_level = btrfs_old_root_level(root, time_seq); 582 583 if (root_level + 1 == level) 584 goto out; 585 586 /* 587 * We can often find data backrefs with an offset that is too large 588 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when 589 * subtracting a file's offset with the data offset of its 590 * corresponding extent data item. This can happen for example in the 591 * clone ioctl. 592 * 593 * So if we detect such case we set the search key's offset to zero to 594 * make sure we will find the matching file extent item at 595 * add_all_parents(), otherwise we will miss it because the offset 596 * taken form the backref is much larger then the offset of the file 597 * extent item. This can make us scan a very large number of file 598 * extent items, but at least it will not make us miss any. 599 * 600 * This is an ugly workaround for a behaviour that should have never 601 * existed, but it does and a fix for the clone ioctl would touch a lot 602 * of places, cause backwards incompatibility and would not fix the 603 * problem for extents cloned with older kernels. 604 */ 605 if (search_key.type == BTRFS_EXTENT_DATA_KEY && 606 search_key.offset >= LLONG_MAX) 607 search_key.offset = 0; 608 path->lowest_level = level; 609 if (time_seq == BTRFS_SEQ_LAST) 610 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 611 else 612 ret = btrfs_search_old_slot(root, &search_key, path, time_seq); 613 614 btrfs_debug(fs_info, 615 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)", 616 ref->root_id, level, ref->count, ret, 617 ref->key_for_search.objectid, ref->key_for_search.type, 618 ref->key_for_search.offset); 619 if (ret < 0) 620 goto out; 621 622 eb = path->nodes[level]; 623 while (!eb) { 624 if (WARN_ON(!level)) { 625 ret = 1; 626 goto out; 627 } 628 level--; 629 eb = path->nodes[level]; 630 } 631 632 ret = add_all_parents(root, path, parents, preftrees, ref, level, 633 time_seq, extent_item_pos, ignore_offset); 634 out: 635 btrfs_put_root(root); 636 out_free: 637 path->lowest_level = 0; 638 btrfs_release_path(path); 639 return ret; 640 } 641 642 static struct extent_inode_elem * 643 unode_aux_to_inode_list(struct ulist_node *node) 644 { 645 if (!node) 646 return NULL; 647 return (struct extent_inode_elem *)(uintptr_t)node->aux; 648 } 649 650 /* 651 * We maintain three separate rbtrees: one for direct refs, one for 652 * indirect refs which have a key, and one for indirect refs which do not 653 * have a key. Each tree does merge on insertion. 654 * 655 * Once all of the references are located, we iterate over the tree of 656 * indirect refs with missing keys. An appropriate key is located and 657 * the ref is moved onto the tree for indirect refs. After all missing 658 * keys are thus located, we iterate over the indirect ref tree, resolve 659 * each reference, and then insert the resolved reference onto the 660 * direct tree (merging there too). 661 * 662 * New backrefs (i.e., for parent nodes) are added to the appropriate 663 * rbtree as they are encountered. The new backrefs are subsequently 664 * resolved as above. 665 */ 666 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, 667 struct btrfs_path *path, u64 time_seq, 668 struct preftrees *preftrees, 669 const u64 *extent_item_pos, 670 struct share_check *sc, bool ignore_offset) 671 { 672 int err; 673 int ret = 0; 674 struct ulist *parents; 675 struct ulist_node *node; 676 struct ulist_iterator uiter; 677 struct rb_node *rnode; 678 679 parents = ulist_alloc(GFP_NOFS); 680 if (!parents) 681 return -ENOMEM; 682 683 /* 684 * We could trade memory usage for performance here by iterating 685 * the tree, allocating new refs for each insertion, and then 686 * freeing the entire indirect tree when we're done. In some test 687 * cases, the tree can grow quite large (~200k objects). 688 */ 689 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { 690 struct prelim_ref *ref; 691 692 ref = rb_entry(rnode, struct prelim_ref, rbnode); 693 if (WARN(ref->parent, 694 "BUG: direct ref found in indirect tree")) { 695 ret = -EINVAL; 696 goto out; 697 } 698 699 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); 700 preftrees->indirect.count--; 701 702 if (ref->count == 0) { 703 free_pref(ref); 704 continue; 705 } 706 707 if (sc && sc->root_objectid && 708 ref->root_id != sc->root_objectid) { 709 free_pref(ref); 710 ret = BACKREF_FOUND_SHARED; 711 goto out; 712 } 713 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees, 714 ref, parents, extent_item_pos, 715 ignore_offset); 716 /* 717 * we can only tolerate ENOENT,otherwise,we should catch error 718 * and return directly. 719 */ 720 if (err == -ENOENT) { 721 prelim_ref_insert(fs_info, &preftrees->direct, ref, 722 NULL); 723 continue; 724 } else if (err) { 725 free_pref(ref); 726 ret = err; 727 goto out; 728 } 729 730 /* we put the first parent into the ref at hand */ 731 ULIST_ITER_INIT(&uiter); 732 node = ulist_next(parents, &uiter); 733 ref->parent = node ? node->val : 0; 734 ref->inode_list = unode_aux_to_inode_list(node); 735 736 /* Add a prelim_ref(s) for any other parent(s). */ 737 while ((node = ulist_next(parents, &uiter))) { 738 struct prelim_ref *new_ref; 739 740 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, 741 GFP_NOFS); 742 if (!new_ref) { 743 free_pref(ref); 744 ret = -ENOMEM; 745 goto out; 746 } 747 memcpy(new_ref, ref, sizeof(*ref)); 748 new_ref->parent = node->val; 749 new_ref->inode_list = unode_aux_to_inode_list(node); 750 prelim_ref_insert(fs_info, &preftrees->direct, 751 new_ref, NULL); 752 } 753 754 /* 755 * Now it's a direct ref, put it in the direct tree. We must 756 * do this last because the ref could be merged/freed here. 757 */ 758 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); 759 760 ulist_reinit(parents); 761 cond_resched(); 762 } 763 out: 764 ulist_free(parents); 765 return ret; 766 } 767 768 /* 769 * read tree blocks and add keys where required. 770 */ 771 static int add_missing_keys(struct btrfs_fs_info *fs_info, 772 struct preftrees *preftrees, bool lock) 773 { 774 struct prelim_ref *ref; 775 struct extent_buffer *eb; 776 struct preftree *tree = &preftrees->indirect_missing_keys; 777 struct rb_node *node; 778 779 while ((node = rb_first_cached(&tree->root))) { 780 ref = rb_entry(node, struct prelim_ref, rbnode); 781 rb_erase_cached(node, &tree->root); 782 783 BUG_ON(ref->parent); /* should not be a direct ref */ 784 BUG_ON(ref->key_for_search.type); 785 BUG_ON(!ref->wanted_disk_byte); 786 787 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 788 ref->root_id, 0, ref->level - 1, NULL); 789 if (IS_ERR(eb)) { 790 free_pref(ref); 791 return PTR_ERR(eb); 792 } 793 if (!extent_buffer_uptodate(eb)) { 794 free_pref(ref); 795 free_extent_buffer(eb); 796 return -EIO; 797 } 798 799 if (lock) 800 btrfs_tree_read_lock(eb); 801 if (btrfs_header_level(eb) == 0) 802 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); 803 else 804 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); 805 if (lock) 806 btrfs_tree_read_unlock(eb); 807 free_extent_buffer(eb); 808 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); 809 cond_resched(); 810 } 811 return 0; 812 } 813 814 /* 815 * add all currently queued delayed refs from this head whose seq nr is 816 * smaller or equal that seq to the list 817 */ 818 static int add_delayed_refs(const struct btrfs_fs_info *fs_info, 819 struct btrfs_delayed_ref_head *head, u64 seq, 820 struct preftrees *preftrees, struct share_check *sc) 821 { 822 struct btrfs_delayed_ref_node *node; 823 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 824 struct btrfs_key key; 825 struct btrfs_key tmp_op_key; 826 struct rb_node *n; 827 int count; 828 int ret = 0; 829 830 if (extent_op && extent_op->update_key) 831 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); 832 833 spin_lock(&head->lock); 834 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { 835 node = rb_entry(n, struct btrfs_delayed_ref_node, 836 ref_node); 837 if (node->seq > seq) 838 continue; 839 840 switch (node->action) { 841 case BTRFS_ADD_DELAYED_EXTENT: 842 case BTRFS_UPDATE_DELAYED_HEAD: 843 WARN_ON(1); 844 continue; 845 case BTRFS_ADD_DELAYED_REF: 846 count = node->ref_mod; 847 break; 848 case BTRFS_DROP_DELAYED_REF: 849 count = node->ref_mod * -1; 850 break; 851 default: 852 BUG(); 853 } 854 switch (node->type) { 855 case BTRFS_TREE_BLOCK_REF_KEY: { 856 /* NORMAL INDIRECT METADATA backref */ 857 struct btrfs_delayed_tree_ref *ref; 858 859 ref = btrfs_delayed_node_to_tree_ref(node); 860 ret = add_indirect_ref(fs_info, preftrees, ref->root, 861 &tmp_op_key, ref->level + 1, 862 node->bytenr, count, sc, 863 GFP_ATOMIC); 864 break; 865 } 866 case BTRFS_SHARED_BLOCK_REF_KEY: { 867 /* SHARED DIRECT METADATA backref */ 868 struct btrfs_delayed_tree_ref *ref; 869 870 ref = btrfs_delayed_node_to_tree_ref(node); 871 872 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, 873 ref->parent, node->bytenr, count, 874 sc, GFP_ATOMIC); 875 break; 876 } 877 case BTRFS_EXTENT_DATA_REF_KEY: { 878 /* NORMAL INDIRECT DATA backref */ 879 struct btrfs_delayed_data_ref *ref; 880 ref = btrfs_delayed_node_to_data_ref(node); 881 882 key.objectid = ref->objectid; 883 key.type = BTRFS_EXTENT_DATA_KEY; 884 key.offset = ref->offset; 885 886 /* 887 * Found a inum that doesn't match our known inum, we 888 * know it's shared. 889 */ 890 if (sc && sc->inum && ref->objectid != sc->inum) { 891 ret = BACKREF_FOUND_SHARED; 892 goto out; 893 } 894 895 ret = add_indirect_ref(fs_info, preftrees, ref->root, 896 &key, 0, node->bytenr, count, sc, 897 GFP_ATOMIC); 898 break; 899 } 900 case BTRFS_SHARED_DATA_REF_KEY: { 901 /* SHARED DIRECT FULL backref */ 902 struct btrfs_delayed_data_ref *ref; 903 904 ref = btrfs_delayed_node_to_data_ref(node); 905 906 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, 907 node->bytenr, count, sc, 908 GFP_ATOMIC); 909 break; 910 } 911 default: 912 WARN_ON(1); 913 } 914 /* 915 * We must ignore BACKREF_FOUND_SHARED until all delayed 916 * refs have been checked. 917 */ 918 if (ret && (ret != BACKREF_FOUND_SHARED)) 919 break; 920 } 921 if (!ret) 922 ret = extent_is_shared(sc); 923 out: 924 spin_unlock(&head->lock); 925 return ret; 926 } 927 928 /* 929 * add all inline backrefs for bytenr to the list 930 * 931 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. 932 */ 933 static int add_inline_refs(const struct btrfs_fs_info *fs_info, 934 struct btrfs_path *path, u64 bytenr, 935 int *info_level, struct preftrees *preftrees, 936 struct share_check *sc) 937 { 938 int ret = 0; 939 int slot; 940 struct extent_buffer *leaf; 941 struct btrfs_key key; 942 struct btrfs_key found_key; 943 unsigned long ptr; 944 unsigned long end; 945 struct btrfs_extent_item *ei; 946 u64 flags; 947 u64 item_size; 948 949 /* 950 * enumerate all inline refs 951 */ 952 leaf = path->nodes[0]; 953 slot = path->slots[0]; 954 955 item_size = btrfs_item_size(leaf, slot); 956 BUG_ON(item_size < sizeof(*ei)); 957 958 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 959 flags = btrfs_extent_flags(leaf, ei); 960 btrfs_item_key_to_cpu(leaf, &found_key, slot); 961 962 ptr = (unsigned long)(ei + 1); 963 end = (unsigned long)ei + item_size; 964 965 if (found_key.type == BTRFS_EXTENT_ITEM_KEY && 966 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 967 struct btrfs_tree_block_info *info; 968 969 info = (struct btrfs_tree_block_info *)ptr; 970 *info_level = btrfs_tree_block_level(leaf, info); 971 ptr += sizeof(struct btrfs_tree_block_info); 972 BUG_ON(ptr > end); 973 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { 974 *info_level = found_key.offset; 975 } else { 976 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 977 } 978 979 while (ptr < end) { 980 struct btrfs_extent_inline_ref *iref; 981 u64 offset; 982 int type; 983 984 iref = (struct btrfs_extent_inline_ref *)ptr; 985 type = btrfs_get_extent_inline_ref_type(leaf, iref, 986 BTRFS_REF_TYPE_ANY); 987 if (type == BTRFS_REF_TYPE_INVALID) 988 return -EUCLEAN; 989 990 offset = btrfs_extent_inline_ref_offset(leaf, iref); 991 992 switch (type) { 993 case BTRFS_SHARED_BLOCK_REF_KEY: 994 ret = add_direct_ref(fs_info, preftrees, 995 *info_level + 1, offset, 996 bytenr, 1, NULL, GFP_NOFS); 997 break; 998 case BTRFS_SHARED_DATA_REF_KEY: { 999 struct btrfs_shared_data_ref *sdref; 1000 int count; 1001 1002 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 1003 count = btrfs_shared_data_ref_count(leaf, sdref); 1004 1005 ret = add_direct_ref(fs_info, preftrees, 0, offset, 1006 bytenr, count, sc, GFP_NOFS); 1007 break; 1008 } 1009 case BTRFS_TREE_BLOCK_REF_KEY: 1010 ret = add_indirect_ref(fs_info, preftrees, offset, 1011 NULL, *info_level + 1, 1012 bytenr, 1, NULL, GFP_NOFS); 1013 break; 1014 case BTRFS_EXTENT_DATA_REF_KEY: { 1015 struct btrfs_extent_data_ref *dref; 1016 int count; 1017 u64 root; 1018 1019 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1020 count = btrfs_extent_data_ref_count(leaf, dref); 1021 key.objectid = btrfs_extent_data_ref_objectid(leaf, 1022 dref); 1023 key.type = BTRFS_EXTENT_DATA_KEY; 1024 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 1025 1026 if (sc && sc->inum && key.objectid != sc->inum) { 1027 ret = BACKREF_FOUND_SHARED; 1028 break; 1029 } 1030 1031 root = btrfs_extent_data_ref_root(leaf, dref); 1032 1033 ret = add_indirect_ref(fs_info, preftrees, root, 1034 &key, 0, bytenr, count, 1035 sc, GFP_NOFS); 1036 break; 1037 } 1038 default: 1039 WARN_ON(1); 1040 } 1041 if (ret) 1042 return ret; 1043 ptr += btrfs_extent_inline_ref_size(type); 1044 } 1045 1046 return 0; 1047 } 1048 1049 /* 1050 * add all non-inline backrefs for bytenr to the list 1051 * 1052 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. 1053 */ 1054 static int add_keyed_refs(struct btrfs_root *extent_root, 1055 struct btrfs_path *path, u64 bytenr, 1056 int info_level, struct preftrees *preftrees, 1057 struct share_check *sc) 1058 { 1059 struct btrfs_fs_info *fs_info = extent_root->fs_info; 1060 int ret; 1061 int slot; 1062 struct extent_buffer *leaf; 1063 struct btrfs_key key; 1064 1065 while (1) { 1066 ret = btrfs_next_item(extent_root, path); 1067 if (ret < 0) 1068 break; 1069 if (ret) { 1070 ret = 0; 1071 break; 1072 } 1073 1074 slot = path->slots[0]; 1075 leaf = path->nodes[0]; 1076 btrfs_item_key_to_cpu(leaf, &key, slot); 1077 1078 if (key.objectid != bytenr) 1079 break; 1080 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 1081 continue; 1082 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 1083 break; 1084 1085 switch (key.type) { 1086 case BTRFS_SHARED_BLOCK_REF_KEY: 1087 /* SHARED DIRECT METADATA backref */ 1088 ret = add_direct_ref(fs_info, preftrees, 1089 info_level + 1, key.offset, 1090 bytenr, 1, NULL, GFP_NOFS); 1091 break; 1092 case BTRFS_SHARED_DATA_REF_KEY: { 1093 /* SHARED DIRECT FULL backref */ 1094 struct btrfs_shared_data_ref *sdref; 1095 int count; 1096 1097 sdref = btrfs_item_ptr(leaf, slot, 1098 struct btrfs_shared_data_ref); 1099 count = btrfs_shared_data_ref_count(leaf, sdref); 1100 ret = add_direct_ref(fs_info, preftrees, 0, 1101 key.offset, bytenr, count, 1102 sc, GFP_NOFS); 1103 break; 1104 } 1105 case BTRFS_TREE_BLOCK_REF_KEY: 1106 /* NORMAL INDIRECT METADATA backref */ 1107 ret = add_indirect_ref(fs_info, preftrees, key.offset, 1108 NULL, info_level + 1, bytenr, 1109 1, NULL, GFP_NOFS); 1110 break; 1111 case BTRFS_EXTENT_DATA_REF_KEY: { 1112 /* NORMAL INDIRECT DATA backref */ 1113 struct btrfs_extent_data_ref *dref; 1114 int count; 1115 u64 root; 1116 1117 dref = btrfs_item_ptr(leaf, slot, 1118 struct btrfs_extent_data_ref); 1119 count = btrfs_extent_data_ref_count(leaf, dref); 1120 key.objectid = btrfs_extent_data_ref_objectid(leaf, 1121 dref); 1122 key.type = BTRFS_EXTENT_DATA_KEY; 1123 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 1124 1125 if (sc && sc->inum && key.objectid != sc->inum) { 1126 ret = BACKREF_FOUND_SHARED; 1127 break; 1128 } 1129 1130 root = btrfs_extent_data_ref_root(leaf, dref); 1131 ret = add_indirect_ref(fs_info, preftrees, root, 1132 &key, 0, bytenr, count, 1133 sc, GFP_NOFS); 1134 break; 1135 } 1136 default: 1137 WARN_ON(1); 1138 } 1139 if (ret) 1140 return ret; 1141 1142 } 1143 1144 return ret; 1145 } 1146 1147 /* 1148 * this adds all existing backrefs (inline backrefs, backrefs and delayed 1149 * refs) for the given bytenr to the refs list, merges duplicates and resolves 1150 * indirect refs to their parent bytenr. 1151 * When roots are found, they're added to the roots list 1152 * 1153 * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and 1154 * behave much like trans == NULL case, the difference only lies in it will not 1155 * commit root. 1156 * The special case is for qgroup to search roots in commit_transaction(). 1157 * 1158 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a 1159 * shared extent is detected. 1160 * 1161 * Otherwise this returns 0 for success and <0 for an error. 1162 * 1163 * If ignore_offset is set to false, only extent refs whose offsets match 1164 * extent_item_pos are returned. If true, every extent ref is returned 1165 * and extent_item_pos is ignored. 1166 * 1167 * FIXME some caching might speed things up 1168 */ 1169 static int find_parent_nodes(struct btrfs_trans_handle *trans, 1170 struct btrfs_fs_info *fs_info, u64 bytenr, 1171 u64 time_seq, struct ulist *refs, 1172 struct ulist *roots, const u64 *extent_item_pos, 1173 struct share_check *sc, bool ignore_offset) 1174 { 1175 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); 1176 struct btrfs_key key; 1177 struct btrfs_path *path; 1178 struct btrfs_delayed_ref_root *delayed_refs = NULL; 1179 struct btrfs_delayed_ref_head *head; 1180 int info_level = 0; 1181 int ret; 1182 struct prelim_ref *ref; 1183 struct rb_node *node; 1184 struct extent_inode_elem *eie = NULL; 1185 struct preftrees preftrees = { 1186 .direct = PREFTREE_INIT, 1187 .indirect = PREFTREE_INIT, 1188 .indirect_missing_keys = PREFTREE_INIT 1189 }; 1190 1191 key.objectid = bytenr; 1192 key.offset = (u64)-1; 1193 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1194 key.type = BTRFS_METADATA_ITEM_KEY; 1195 else 1196 key.type = BTRFS_EXTENT_ITEM_KEY; 1197 1198 path = btrfs_alloc_path(); 1199 if (!path) 1200 return -ENOMEM; 1201 if (!trans) { 1202 path->search_commit_root = 1; 1203 path->skip_locking = 1; 1204 } 1205 1206 if (time_seq == BTRFS_SEQ_LAST) 1207 path->skip_locking = 1; 1208 1209 again: 1210 head = NULL; 1211 1212 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1213 if (ret < 0) 1214 goto out; 1215 if (ret == 0) { 1216 /* This shouldn't happen, indicates a bug or fs corruption. */ 1217 ASSERT(ret != 0); 1218 ret = -EUCLEAN; 1219 goto out; 1220 } 1221 1222 if (trans && likely(trans->type != __TRANS_DUMMY) && 1223 time_seq != BTRFS_SEQ_LAST) { 1224 /* 1225 * We have a specific time_seq we care about and trans which 1226 * means we have the path lock, we need to grab the ref head and 1227 * lock it so we have a consistent view of the refs at the given 1228 * time. 1229 */ 1230 delayed_refs = &trans->transaction->delayed_refs; 1231 spin_lock(&delayed_refs->lock); 1232 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 1233 if (head) { 1234 if (!mutex_trylock(&head->mutex)) { 1235 refcount_inc(&head->refs); 1236 spin_unlock(&delayed_refs->lock); 1237 1238 btrfs_release_path(path); 1239 1240 /* 1241 * Mutex was contended, block until it's 1242 * released and try again 1243 */ 1244 mutex_lock(&head->mutex); 1245 mutex_unlock(&head->mutex); 1246 btrfs_put_delayed_ref_head(head); 1247 goto again; 1248 } 1249 spin_unlock(&delayed_refs->lock); 1250 ret = add_delayed_refs(fs_info, head, time_seq, 1251 &preftrees, sc); 1252 mutex_unlock(&head->mutex); 1253 if (ret) 1254 goto out; 1255 } else { 1256 spin_unlock(&delayed_refs->lock); 1257 } 1258 } 1259 1260 if (path->slots[0]) { 1261 struct extent_buffer *leaf; 1262 int slot; 1263 1264 path->slots[0]--; 1265 leaf = path->nodes[0]; 1266 slot = path->slots[0]; 1267 btrfs_item_key_to_cpu(leaf, &key, slot); 1268 if (key.objectid == bytenr && 1269 (key.type == BTRFS_EXTENT_ITEM_KEY || 1270 key.type == BTRFS_METADATA_ITEM_KEY)) { 1271 ret = add_inline_refs(fs_info, path, bytenr, 1272 &info_level, &preftrees, sc); 1273 if (ret) 1274 goto out; 1275 ret = add_keyed_refs(root, path, bytenr, info_level, 1276 &preftrees, sc); 1277 if (ret) 1278 goto out; 1279 } 1280 } 1281 1282 btrfs_release_path(path); 1283 1284 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0); 1285 if (ret) 1286 goto out; 1287 1288 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); 1289 1290 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees, 1291 extent_item_pos, sc, ignore_offset); 1292 if (ret) 1293 goto out; 1294 1295 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root)); 1296 1297 /* 1298 * This walks the tree of merged and resolved refs. Tree blocks are 1299 * read in as needed. Unique entries are added to the ulist, and 1300 * the list of found roots is updated. 1301 * 1302 * We release the entire tree in one go before returning. 1303 */ 1304 node = rb_first_cached(&preftrees.direct.root); 1305 while (node) { 1306 ref = rb_entry(node, struct prelim_ref, rbnode); 1307 node = rb_next(&ref->rbnode); 1308 /* 1309 * ref->count < 0 can happen here if there are delayed 1310 * refs with a node->action of BTRFS_DROP_DELAYED_REF. 1311 * prelim_ref_insert() relies on this when merging 1312 * identical refs to keep the overall count correct. 1313 * prelim_ref_insert() will merge only those refs 1314 * which compare identically. Any refs having 1315 * e.g. different offsets would not be merged, 1316 * and would retain their original ref->count < 0. 1317 */ 1318 if (roots && ref->count && ref->root_id && ref->parent == 0) { 1319 if (sc && sc->root_objectid && 1320 ref->root_id != sc->root_objectid) { 1321 ret = BACKREF_FOUND_SHARED; 1322 goto out; 1323 } 1324 1325 /* no parent == root of tree */ 1326 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 1327 if (ret < 0) 1328 goto out; 1329 } 1330 if (ref->count && ref->parent) { 1331 if (extent_item_pos && !ref->inode_list && 1332 ref->level == 0) { 1333 struct extent_buffer *eb; 1334 1335 eb = read_tree_block(fs_info, ref->parent, 0, 1336 0, ref->level, NULL); 1337 if (IS_ERR(eb)) { 1338 ret = PTR_ERR(eb); 1339 goto out; 1340 } 1341 if (!extent_buffer_uptodate(eb)) { 1342 free_extent_buffer(eb); 1343 ret = -EIO; 1344 goto out; 1345 } 1346 1347 if (!path->skip_locking) 1348 btrfs_tree_read_lock(eb); 1349 ret = find_extent_in_eb(eb, bytenr, 1350 *extent_item_pos, &eie, ignore_offset); 1351 if (!path->skip_locking) 1352 btrfs_tree_read_unlock(eb); 1353 free_extent_buffer(eb); 1354 if (ret < 0) 1355 goto out; 1356 ref->inode_list = eie; 1357 } 1358 ret = ulist_add_merge_ptr(refs, ref->parent, 1359 ref->inode_list, 1360 (void **)&eie, GFP_NOFS); 1361 if (ret < 0) 1362 goto out; 1363 if (!ret && extent_item_pos) { 1364 /* 1365 * We've recorded that parent, so we must extend 1366 * its inode list here. 1367 * 1368 * However if there was corruption we may not 1369 * have found an eie, return an error in this 1370 * case. 1371 */ 1372 ASSERT(eie); 1373 if (!eie) { 1374 ret = -EUCLEAN; 1375 goto out; 1376 } 1377 while (eie->next) 1378 eie = eie->next; 1379 eie->next = ref->inode_list; 1380 } 1381 eie = NULL; 1382 } 1383 cond_resched(); 1384 } 1385 1386 out: 1387 btrfs_free_path(path); 1388 1389 prelim_release(&preftrees.direct); 1390 prelim_release(&preftrees.indirect); 1391 prelim_release(&preftrees.indirect_missing_keys); 1392 1393 if (ret < 0) 1394 free_inode_elem_list(eie); 1395 return ret; 1396 } 1397 1398 static void free_leaf_list(struct ulist *blocks) 1399 { 1400 struct ulist_node *node = NULL; 1401 struct extent_inode_elem *eie; 1402 struct ulist_iterator uiter; 1403 1404 ULIST_ITER_INIT(&uiter); 1405 while ((node = ulist_next(blocks, &uiter))) { 1406 if (!node->aux) 1407 continue; 1408 eie = unode_aux_to_inode_list(node); 1409 free_inode_elem_list(eie); 1410 node->aux = 0; 1411 } 1412 1413 ulist_free(blocks); 1414 } 1415 1416 /* 1417 * Finds all leafs with a reference to the specified combination of bytenr and 1418 * offset. key_list_head will point to a list of corresponding keys (caller must 1419 * free each list element). The leafs will be stored in the leafs ulist, which 1420 * must be freed with ulist_free. 1421 * 1422 * returns 0 on success, <0 on error 1423 */ 1424 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 1425 struct btrfs_fs_info *fs_info, u64 bytenr, 1426 u64 time_seq, struct ulist **leafs, 1427 const u64 *extent_item_pos, bool ignore_offset) 1428 { 1429 int ret; 1430 1431 *leafs = ulist_alloc(GFP_NOFS); 1432 if (!*leafs) 1433 return -ENOMEM; 1434 1435 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, 1436 *leafs, NULL, extent_item_pos, NULL, ignore_offset); 1437 if (ret < 0 && ret != -ENOENT) { 1438 free_leaf_list(*leafs); 1439 return ret; 1440 } 1441 1442 return 0; 1443 } 1444 1445 /* 1446 * walk all backrefs for a given extent to find all roots that reference this 1447 * extent. Walking a backref means finding all extents that reference this 1448 * extent and in turn walk the backrefs of those, too. Naturally this is a 1449 * recursive process, but here it is implemented in an iterative fashion: We 1450 * find all referencing extents for the extent in question and put them on a 1451 * list. In turn, we find all referencing extents for those, further appending 1452 * to the list. The way we iterate the list allows adding more elements after 1453 * the current while iterating. The process stops when we reach the end of the 1454 * list. Found roots are added to the roots list. 1455 * 1456 * returns 0 on success, < 0 on error. 1457 */ 1458 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, 1459 struct btrfs_fs_info *fs_info, u64 bytenr, 1460 u64 time_seq, struct ulist **roots, 1461 bool ignore_offset) 1462 { 1463 struct ulist *tmp; 1464 struct ulist_node *node = NULL; 1465 struct ulist_iterator uiter; 1466 int ret; 1467 1468 tmp = ulist_alloc(GFP_NOFS); 1469 if (!tmp) 1470 return -ENOMEM; 1471 *roots = ulist_alloc(GFP_NOFS); 1472 if (!*roots) { 1473 ulist_free(tmp); 1474 return -ENOMEM; 1475 } 1476 1477 ULIST_ITER_INIT(&uiter); 1478 while (1) { 1479 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, 1480 tmp, *roots, NULL, NULL, ignore_offset); 1481 if (ret < 0 && ret != -ENOENT) { 1482 ulist_free(tmp); 1483 ulist_free(*roots); 1484 *roots = NULL; 1485 return ret; 1486 } 1487 node = ulist_next(tmp, &uiter); 1488 if (!node) 1489 break; 1490 bytenr = node->val; 1491 cond_resched(); 1492 } 1493 1494 ulist_free(tmp); 1495 return 0; 1496 } 1497 1498 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1499 struct btrfs_fs_info *fs_info, u64 bytenr, 1500 u64 time_seq, struct ulist **roots, 1501 bool skip_commit_root_sem) 1502 { 1503 int ret; 1504 1505 if (!trans && !skip_commit_root_sem) 1506 down_read(&fs_info->commit_root_sem); 1507 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr, 1508 time_seq, roots, false); 1509 if (!trans && !skip_commit_root_sem) 1510 up_read(&fs_info->commit_root_sem); 1511 return ret; 1512 } 1513 1514 /* 1515 * The caller has joined a transaction or is holding a read lock on the 1516 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last 1517 * snapshot field changing while updating or checking the cache. 1518 */ 1519 static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache, 1520 struct btrfs_root *root, 1521 u64 bytenr, int level, bool *is_shared) 1522 { 1523 struct btrfs_backref_shared_cache_entry *entry; 1524 1525 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) 1526 return false; 1527 1528 /* 1529 * Level -1 is used for the data extent, which is not reliable to cache 1530 * because its reference count can increase or decrease without us 1531 * realizing. We cache results only for extent buffers that lead from 1532 * the root node down to the leaf with the file extent item. 1533 */ 1534 ASSERT(level >= 0); 1535 1536 entry = &cache->entries[level]; 1537 1538 /* Unused cache entry or being used for some other extent buffer. */ 1539 if (entry->bytenr != bytenr) 1540 return false; 1541 1542 /* 1543 * We cached a false result, but the last snapshot generation of the 1544 * root changed, so we now have a snapshot. Don't trust the result. 1545 */ 1546 if (!entry->is_shared && 1547 entry->gen != btrfs_root_last_snapshot(&root->root_item)) 1548 return false; 1549 1550 /* 1551 * If we cached a true result and the last generation used for dropping 1552 * a root changed, we can not trust the result, because the dropped root 1553 * could be a snapshot sharing this extent buffer. 1554 */ 1555 if (entry->is_shared && 1556 entry->gen != btrfs_get_last_root_drop_gen(root->fs_info)) 1557 return false; 1558 1559 *is_shared = entry->is_shared; 1560 1561 return true; 1562 } 1563 1564 /* 1565 * The caller has joined a transaction or is holding a read lock on the 1566 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last 1567 * snapshot field changing while updating or checking the cache. 1568 */ 1569 static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache, 1570 struct btrfs_root *root, 1571 u64 bytenr, int level, bool is_shared) 1572 { 1573 struct btrfs_backref_shared_cache_entry *entry; 1574 u64 gen; 1575 1576 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) 1577 return; 1578 1579 /* 1580 * Level -1 is used for the data extent, which is not reliable to cache 1581 * because its reference count can increase or decrease without us 1582 * realizing. We cache results only for extent buffers that lead from 1583 * the root node down to the leaf with the file extent item. 1584 */ 1585 ASSERT(level >= 0); 1586 1587 if (is_shared) 1588 gen = btrfs_get_last_root_drop_gen(root->fs_info); 1589 else 1590 gen = btrfs_root_last_snapshot(&root->root_item); 1591 1592 entry = &cache->entries[level]; 1593 entry->bytenr = bytenr; 1594 entry->is_shared = is_shared; 1595 entry->gen = gen; 1596 1597 /* 1598 * If we found an extent buffer is shared, set the cache result for all 1599 * extent buffers below it to true. As nodes in the path are COWed, 1600 * their sharedness is moved to their children, and if a leaf is COWed, 1601 * then the sharedness of a data extent becomes direct, the refcount of 1602 * data extent is increased in the extent item at the extent tree. 1603 */ 1604 if (is_shared) { 1605 for (int i = 0; i < level; i++) { 1606 entry = &cache->entries[i]; 1607 entry->is_shared = is_shared; 1608 entry->gen = gen; 1609 } 1610 } 1611 } 1612 1613 /* 1614 * Check if a data extent is shared or not. 1615 * 1616 * @root: The root the inode belongs to. 1617 * @inum: Number of the inode whose extent we are checking. 1618 * @bytenr: Logical bytenr of the extent we are checking. 1619 * @extent_gen: Generation of the extent (file extent item) or 0 if it is 1620 * not known. 1621 * @roots: List of roots this extent is shared among. 1622 * @tmp: Temporary list used for iteration. 1623 * @cache: A backref lookup result cache. 1624 * 1625 * btrfs_is_data_extent_shared uses the backref walking code but will short 1626 * circuit as soon as it finds a root or inode that doesn't match the 1627 * one passed in. This provides a significant performance benefit for 1628 * callers (such as fiemap) which want to know whether the extent is 1629 * shared but do not need a ref count. 1630 * 1631 * This attempts to attach to the running transaction in order to account for 1632 * delayed refs, but continues on even when no running transaction exists. 1633 * 1634 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. 1635 */ 1636 int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, 1637 u64 extent_gen, 1638 struct ulist *roots, struct ulist *tmp, 1639 struct btrfs_backref_shared_cache *cache) 1640 { 1641 struct btrfs_fs_info *fs_info = root->fs_info; 1642 struct btrfs_trans_handle *trans; 1643 struct ulist_iterator uiter; 1644 struct ulist_node *node; 1645 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem); 1646 int ret = 0; 1647 struct share_check shared = { 1648 .root_objectid = root->root_key.objectid, 1649 .inum = inum, 1650 .share_count = 0, 1651 }; 1652 int level; 1653 1654 ulist_init(roots); 1655 ulist_init(tmp); 1656 1657 trans = btrfs_join_transaction_nostart(root); 1658 if (IS_ERR(trans)) { 1659 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { 1660 ret = PTR_ERR(trans); 1661 goto out; 1662 } 1663 trans = NULL; 1664 down_read(&fs_info->commit_root_sem); 1665 } else { 1666 btrfs_get_tree_mod_seq(fs_info, &elem); 1667 } 1668 1669 /* -1 means we are in the bytenr of the data extent. */ 1670 level = -1; 1671 ULIST_ITER_INIT(&uiter); 1672 while (1) { 1673 bool is_shared; 1674 bool cached; 1675 1676 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, 1677 roots, NULL, &shared, false); 1678 if (ret == BACKREF_FOUND_SHARED) { 1679 /* this is the only condition under which we return 1 */ 1680 ret = 1; 1681 if (level >= 0) 1682 store_backref_shared_cache(cache, root, bytenr, 1683 level, true); 1684 break; 1685 } 1686 if (ret < 0 && ret != -ENOENT) 1687 break; 1688 ret = 0; 1689 /* 1690 * If our data extent is not shared through reflinks and it was 1691 * created in a generation after the last one used to create a 1692 * snapshot of the inode's root, then it can not be shared 1693 * indirectly through subtrees, as that can only happen with 1694 * snapshots. In this case bail out, no need to check for the 1695 * sharedness of extent buffers. 1696 */ 1697 if (level == -1 && 1698 extent_gen > btrfs_root_last_snapshot(&root->root_item)) 1699 break; 1700 1701 if (level >= 0) 1702 store_backref_shared_cache(cache, root, bytenr, 1703 level, false); 1704 node = ulist_next(tmp, &uiter); 1705 if (!node) 1706 break; 1707 bytenr = node->val; 1708 level++; 1709 cached = lookup_backref_shared_cache(cache, root, bytenr, level, 1710 &is_shared); 1711 if (cached) { 1712 ret = (is_shared ? 1 : 0); 1713 break; 1714 } 1715 shared.share_count = 0; 1716 cond_resched(); 1717 } 1718 1719 if (trans) { 1720 btrfs_put_tree_mod_seq(fs_info, &elem); 1721 btrfs_end_transaction(trans); 1722 } else { 1723 up_read(&fs_info->commit_root_sem); 1724 } 1725 out: 1726 ulist_release(roots); 1727 ulist_release(tmp); 1728 return ret; 1729 } 1730 1731 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, 1732 u64 start_off, struct btrfs_path *path, 1733 struct btrfs_inode_extref **ret_extref, 1734 u64 *found_off) 1735 { 1736 int ret, slot; 1737 struct btrfs_key key; 1738 struct btrfs_key found_key; 1739 struct btrfs_inode_extref *extref; 1740 const struct extent_buffer *leaf; 1741 unsigned long ptr; 1742 1743 key.objectid = inode_objectid; 1744 key.type = BTRFS_INODE_EXTREF_KEY; 1745 key.offset = start_off; 1746 1747 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1748 if (ret < 0) 1749 return ret; 1750 1751 while (1) { 1752 leaf = path->nodes[0]; 1753 slot = path->slots[0]; 1754 if (slot >= btrfs_header_nritems(leaf)) { 1755 /* 1756 * If the item at offset is not found, 1757 * btrfs_search_slot will point us to the slot 1758 * where it should be inserted. In our case 1759 * that will be the slot directly before the 1760 * next INODE_REF_KEY_V2 item. In the case 1761 * that we're pointing to the last slot in a 1762 * leaf, we must move one leaf over. 1763 */ 1764 ret = btrfs_next_leaf(root, path); 1765 if (ret) { 1766 if (ret >= 1) 1767 ret = -ENOENT; 1768 break; 1769 } 1770 continue; 1771 } 1772 1773 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1774 1775 /* 1776 * Check that we're still looking at an extended ref key for 1777 * this particular objectid. If we have different 1778 * objectid or type then there are no more to be found 1779 * in the tree and we can exit. 1780 */ 1781 ret = -ENOENT; 1782 if (found_key.objectid != inode_objectid) 1783 break; 1784 if (found_key.type != BTRFS_INODE_EXTREF_KEY) 1785 break; 1786 1787 ret = 0; 1788 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1789 extref = (struct btrfs_inode_extref *)ptr; 1790 *ret_extref = extref; 1791 if (found_off) 1792 *found_off = found_key.offset; 1793 break; 1794 } 1795 1796 return ret; 1797 } 1798 1799 /* 1800 * this iterates to turn a name (from iref/extref) into a full filesystem path. 1801 * Elements of the path are separated by '/' and the path is guaranteed to be 1802 * 0-terminated. the path is only given within the current file system. 1803 * Therefore, it never starts with a '/'. the caller is responsible to provide 1804 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1805 * the start point of the resulting string is returned. this pointer is within 1806 * dest, normally. 1807 * in case the path buffer would overflow, the pointer is decremented further 1808 * as if output was written to the buffer, though no more output is actually 1809 * generated. that way, the caller can determine how much space would be 1810 * required for the path to fit into the buffer. in that case, the returned 1811 * value will be smaller than dest. callers must check this! 1812 */ 1813 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1814 u32 name_len, unsigned long name_off, 1815 struct extent_buffer *eb_in, u64 parent, 1816 char *dest, u32 size) 1817 { 1818 int slot; 1819 u64 next_inum; 1820 int ret; 1821 s64 bytes_left = ((s64)size) - 1; 1822 struct extent_buffer *eb = eb_in; 1823 struct btrfs_key found_key; 1824 struct btrfs_inode_ref *iref; 1825 1826 if (bytes_left >= 0) 1827 dest[bytes_left] = '\0'; 1828 1829 while (1) { 1830 bytes_left -= name_len; 1831 if (bytes_left >= 0) 1832 read_extent_buffer(eb, dest + bytes_left, 1833 name_off, name_len); 1834 if (eb != eb_in) { 1835 if (!path->skip_locking) 1836 btrfs_tree_read_unlock(eb); 1837 free_extent_buffer(eb); 1838 } 1839 ret = btrfs_find_item(fs_root, path, parent, 0, 1840 BTRFS_INODE_REF_KEY, &found_key); 1841 if (ret > 0) 1842 ret = -ENOENT; 1843 if (ret) 1844 break; 1845 1846 next_inum = found_key.offset; 1847 1848 /* regular exit ahead */ 1849 if (parent == next_inum) 1850 break; 1851 1852 slot = path->slots[0]; 1853 eb = path->nodes[0]; 1854 /* make sure we can use eb after releasing the path */ 1855 if (eb != eb_in) { 1856 path->nodes[0] = NULL; 1857 path->locks[0] = 0; 1858 } 1859 btrfs_release_path(path); 1860 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1861 1862 name_len = btrfs_inode_ref_name_len(eb, iref); 1863 name_off = (unsigned long)(iref + 1); 1864 1865 parent = next_inum; 1866 --bytes_left; 1867 if (bytes_left >= 0) 1868 dest[bytes_left] = '/'; 1869 } 1870 1871 btrfs_release_path(path); 1872 1873 if (ret) 1874 return ERR_PTR(ret); 1875 1876 return dest + bytes_left; 1877 } 1878 1879 /* 1880 * this makes the path point to (logical EXTENT_ITEM *) 1881 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 1882 * tree blocks and <0 on error. 1883 */ 1884 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 1885 struct btrfs_path *path, struct btrfs_key *found_key, 1886 u64 *flags_ret) 1887 { 1888 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical); 1889 int ret; 1890 u64 flags; 1891 u64 size = 0; 1892 u32 item_size; 1893 const struct extent_buffer *eb; 1894 struct btrfs_extent_item *ei; 1895 struct btrfs_key key; 1896 1897 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1898 key.type = BTRFS_METADATA_ITEM_KEY; 1899 else 1900 key.type = BTRFS_EXTENT_ITEM_KEY; 1901 key.objectid = logical; 1902 key.offset = (u64)-1; 1903 1904 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 1905 if (ret < 0) 1906 return ret; 1907 1908 ret = btrfs_previous_extent_item(extent_root, path, 0); 1909 if (ret) { 1910 if (ret > 0) 1911 ret = -ENOENT; 1912 return ret; 1913 } 1914 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 1915 if (found_key->type == BTRFS_METADATA_ITEM_KEY) 1916 size = fs_info->nodesize; 1917 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) 1918 size = found_key->offset; 1919 1920 if (found_key->objectid > logical || 1921 found_key->objectid + size <= logical) { 1922 btrfs_debug(fs_info, 1923 "logical %llu is not within any extent", logical); 1924 return -ENOENT; 1925 } 1926 1927 eb = path->nodes[0]; 1928 item_size = btrfs_item_size(eb, path->slots[0]); 1929 BUG_ON(item_size < sizeof(*ei)); 1930 1931 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1932 flags = btrfs_extent_flags(eb, ei); 1933 1934 btrfs_debug(fs_info, 1935 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u", 1936 logical, logical - found_key->objectid, found_key->objectid, 1937 found_key->offset, flags, item_size); 1938 1939 WARN_ON(!flags_ret); 1940 if (flags_ret) { 1941 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1942 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; 1943 else if (flags & BTRFS_EXTENT_FLAG_DATA) 1944 *flags_ret = BTRFS_EXTENT_FLAG_DATA; 1945 else 1946 BUG(); 1947 return 0; 1948 } 1949 1950 return -EIO; 1951 } 1952 1953 /* 1954 * helper function to iterate extent inline refs. ptr must point to a 0 value 1955 * for the first call and may be modified. it is used to track state. 1956 * if more refs exist, 0 is returned and the next call to 1957 * get_extent_inline_ref must pass the modified ptr parameter to get the 1958 * next ref. after the last ref was processed, 1 is returned. 1959 * returns <0 on error 1960 */ 1961 static int get_extent_inline_ref(unsigned long *ptr, 1962 const struct extent_buffer *eb, 1963 const struct btrfs_key *key, 1964 const struct btrfs_extent_item *ei, 1965 u32 item_size, 1966 struct btrfs_extent_inline_ref **out_eiref, 1967 int *out_type) 1968 { 1969 unsigned long end; 1970 u64 flags; 1971 struct btrfs_tree_block_info *info; 1972 1973 if (!*ptr) { 1974 /* first call */ 1975 flags = btrfs_extent_flags(eb, ei); 1976 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1977 if (key->type == BTRFS_METADATA_ITEM_KEY) { 1978 /* a skinny metadata extent */ 1979 *out_eiref = 1980 (struct btrfs_extent_inline_ref *)(ei + 1); 1981 } else { 1982 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); 1983 info = (struct btrfs_tree_block_info *)(ei + 1); 1984 *out_eiref = 1985 (struct btrfs_extent_inline_ref *)(info + 1); 1986 } 1987 } else { 1988 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1989 } 1990 *ptr = (unsigned long)*out_eiref; 1991 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) 1992 return -ENOENT; 1993 } 1994 1995 end = (unsigned long)ei + item_size; 1996 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); 1997 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref, 1998 BTRFS_REF_TYPE_ANY); 1999 if (*out_type == BTRFS_REF_TYPE_INVALID) 2000 return -EUCLEAN; 2001 2002 *ptr += btrfs_extent_inline_ref_size(*out_type); 2003 WARN_ON(*ptr > end); 2004 if (*ptr == end) 2005 return 1; /* last */ 2006 2007 return 0; 2008 } 2009 2010 /* 2011 * reads the tree block backref for an extent. tree level and root are returned 2012 * through out_level and out_root. ptr must point to a 0 value for the first 2013 * call and may be modified (see get_extent_inline_ref comment). 2014 * returns 0 if data was provided, 1 if there was no more data to provide or 2015 * <0 on error. 2016 */ 2017 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 2018 struct btrfs_key *key, struct btrfs_extent_item *ei, 2019 u32 item_size, u64 *out_root, u8 *out_level) 2020 { 2021 int ret; 2022 int type; 2023 struct btrfs_extent_inline_ref *eiref; 2024 2025 if (*ptr == (unsigned long)-1) 2026 return 1; 2027 2028 while (1) { 2029 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size, 2030 &eiref, &type); 2031 if (ret < 0) 2032 return ret; 2033 2034 if (type == BTRFS_TREE_BLOCK_REF_KEY || 2035 type == BTRFS_SHARED_BLOCK_REF_KEY) 2036 break; 2037 2038 if (ret == 1) 2039 return 1; 2040 } 2041 2042 /* we can treat both ref types equally here */ 2043 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 2044 2045 if (key->type == BTRFS_EXTENT_ITEM_KEY) { 2046 struct btrfs_tree_block_info *info; 2047 2048 info = (struct btrfs_tree_block_info *)(ei + 1); 2049 *out_level = btrfs_tree_block_level(eb, info); 2050 } else { 2051 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); 2052 *out_level = (u8)key->offset; 2053 } 2054 2055 if (ret == 1) 2056 *ptr = (unsigned long)-1; 2057 2058 return 0; 2059 } 2060 2061 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, 2062 struct extent_inode_elem *inode_list, 2063 u64 root, u64 extent_item_objectid, 2064 iterate_extent_inodes_t *iterate, void *ctx) 2065 { 2066 struct extent_inode_elem *eie; 2067 int ret = 0; 2068 2069 for (eie = inode_list; eie; eie = eie->next) { 2070 btrfs_debug(fs_info, 2071 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu", 2072 extent_item_objectid, eie->inum, 2073 eie->offset, root); 2074 ret = iterate(eie->inum, eie->offset, root, ctx); 2075 if (ret) { 2076 btrfs_debug(fs_info, 2077 "stopping iteration for %llu due to ret=%d", 2078 extent_item_objectid, ret); 2079 break; 2080 } 2081 } 2082 2083 return ret; 2084 } 2085 2086 /* 2087 * calls iterate() for every inode that references the extent identified by 2088 * the given parameters. 2089 * when the iterator function returns a non-zero value, iteration stops. 2090 */ 2091 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 2092 u64 extent_item_objectid, u64 extent_item_pos, 2093 int search_commit_root, 2094 iterate_extent_inodes_t *iterate, void *ctx, 2095 bool ignore_offset) 2096 { 2097 int ret; 2098 struct btrfs_trans_handle *trans = NULL; 2099 struct ulist *refs = NULL; 2100 struct ulist *roots = NULL; 2101 struct ulist_node *ref_node = NULL; 2102 struct ulist_node *root_node = NULL; 2103 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem); 2104 struct ulist_iterator ref_uiter; 2105 struct ulist_iterator root_uiter; 2106 2107 btrfs_debug(fs_info, "resolving all inodes for extent %llu", 2108 extent_item_objectid); 2109 2110 if (!search_commit_root) { 2111 trans = btrfs_attach_transaction(fs_info->tree_root); 2112 if (IS_ERR(trans)) { 2113 if (PTR_ERR(trans) != -ENOENT && 2114 PTR_ERR(trans) != -EROFS) 2115 return PTR_ERR(trans); 2116 trans = NULL; 2117 } 2118 } 2119 2120 if (trans) 2121 btrfs_get_tree_mod_seq(fs_info, &seq_elem); 2122 else 2123 down_read(&fs_info->commit_root_sem); 2124 2125 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 2126 seq_elem.seq, &refs, 2127 &extent_item_pos, ignore_offset); 2128 if (ret) 2129 goto out; 2130 2131 ULIST_ITER_INIT(&ref_uiter); 2132 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 2133 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val, 2134 seq_elem.seq, &roots, 2135 ignore_offset); 2136 if (ret) 2137 break; 2138 ULIST_ITER_INIT(&root_uiter); 2139 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 2140 btrfs_debug(fs_info, 2141 "root %llu references leaf %llu, data list %#llx", 2142 root_node->val, ref_node->val, 2143 ref_node->aux); 2144 ret = iterate_leaf_refs(fs_info, 2145 (struct extent_inode_elem *) 2146 (uintptr_t)ref_node->aux, 2147 root_node->val, 2148 extent_item_objectid, 2149 iterate, ctx); 2150 } 2151 ulist_free(roots); 2152 } 2153 2154 free_leaf_list(refs); 2155 out: 2156 if (trans) { 2157 btrfs_put_tree_mod_seq(fs_info, &seq_elem); 2158 btrfs_end_transaction(trans); 2159 } else { 2160 up_read(&fs_info->commit_root_sem); 2161 } 2162 2163 return ret; 2164 } 2165 2166 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) 2167 { 2168 struct btrfs_data_container *inodes = ctx; 2169 const size_t c = 3 * sizeof(u64); 2170 2171 if (inodes->bytes_left >= c) { 2172 inodes->bytes_left -= c; 2173 inodes->val[inodes->elem_cnt] = inum; 2174 inodes->val[inodes->elem_cnt + 1] = offset; 2175 inodes->val[inodes->elem_cnt + 2] = root; 2176 inodes->elem_cnt += 3; 2177 } else { 2178 inodes->bytes_missing += c - inodes->bytes_left; 2179 inodes->bytes_left = 0; 2180 inodes->elem_missed += 3; 2181 } 2182 2183 return 0; 2184 } 2185 2186 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 2187 struct btrfs_path *path, 2188 void *ctx, bool ignore_offset) 2189 { 2190 int ret; 2191 u64 extent_item_pos; 2192 u64 flags = 0; 2193 struct btrfs_key found_key; 2194 int search_commit_root = path->search_commit_root; 2195 2196 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); 2197 btrfs_release_path(path); 2198 if (ret < 0) 2199 return ret; 2200 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 2201 return -EINVAL; 2202 2203 extent_item_pos = logical - found_key.objectid; 2204 ret = iterate_extent_inodes(fs_info, found_key.objectid, 2205 extent_item_pos, search_commit_root, 2206 build_ino_list, ctx, ignore_offset); 2207 2208 return ret; 2209 } 2210 2211 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, 2212 struct extent_buffer *eb, struct inode_fs_paths *ipath); 2213 2214 static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath) 2215 { 2216 int ret = 0; 2217 int slot; 2218 u32 cur; 2219 u32 len; 2220 u32 name_len; 2221 u64 parent = 0; 2222 int found = 0; 2223 struct btrfs_root *fs_root = ipath->fs_root; 2224 struct btrfs_path *path = ipath->btrfs_path; 2225 struct extent_buffer *eb; 2226 struct btrfs_inode_ref *iref; 2227 struct btrfs_key found_key; 2228 2229 while (!ret) { 2230 ret = btrfs_find_item(fs_root, path, inum, 2231 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, 2232 &found_key); 2233 2234 if (ret < 0) 2235 break; 2236 if (ret) { 2237 ret = found ? 0 : -ENOENT; 2238 break; 2239 } 2240 ++found; 2241 2242 parent = found_key.offset; 2243 slot = path->slots[0]; 2244 eb = btrfs_clone_extent_buffer(path->nodes[0]); 2245 if (!eb) { 2246 ret = -ENOMEM; 2247 break; 2248 } 2249 btrfs_release_path(path); 2250 2251 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 2252 2253 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) { 2254 name_len = btrfs_inode_ref_name_len(eb, iref); 2255 /* path must be released before calling iterate()! */ 2256 btrfs_debug(fs_root->fs_info, 2257 "following ref at offset %u for inode %llu in tree %llu", 2258 cur, found_key.objectid, 2259 fs_root->root_key.objectid); 2260 ret = inode_to_path(parent, name_len, 2261 (unsigned long)(iref + 1), eb, ipath); 2262 if (ret) 2263 break; 2264 len = sizeof(*iref) + name_len; 2265 iref = (struct btrfs_inode_ref *)((char *)iref + len); 2266 } 2267 free_extent_buffer(eb); 2268 } 2269 2270 btrfs_release_path(path); 2271 2272 return ret; 2273 } 2274 2275 static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath) 2276 { 2277 int ret; 2278 int slot; 2279 u64 offset = 0; 2280 u64 parent; 2281 int found = 0; 2282 struct btrfs_root *fs_root = ipath->fs_root; 2283 struct btrfs_path *path = ipath->btrfs_path; 2284 struct extent_buffer *eb; 2285 struct btrfs_inode_extref *extref; 2286 u32 item_size; 2287 u32 cur_offset; 2288 unsigned long ptr; 2289 2290 while (1) { 2291 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, 2292 &offset); 2293 if (ret < 0) 2294 break; 2295 if (ret) { 2296 ret = found ? 0 : -ENOENT; 2297 break; 2298 } 2299 ++found; 2300 2301 slot = path->slots[0]; 2302 eb = btrfs_clone_extent_buffer(path->nodes[0]); 2303 if (!eb) { 2304 ret = -ENOMEM; 2305 break; 2306 } 2307 btrfs_release_path(path); 2308 2309 item_size = btrfs_item_size(eb, slot); 2310 ptr = btrfs_item_ptr_offset(eb, slot); 2311 cur_offset = 0; 2312 2313 while (cur_offset < item_size) { 2314 u32 name_len; 2315 2316 extref = (struct btrfs_inode_extref *)(ptr + cur_offset); 2317 parent = btrfs_inode_extref_parent(eb, extref); 2318 name_len = btrfs_inode_extref_name_len(eb, extref); 2319 ret = inode_to_path(parent, name_len, 2320 (unsigned long)&extref->name, eb, ipath); 2321 if (ret) 2322 break; 2323 2324 cur_offset += btrfs_inode_extref_name_len(eb, extref); 2325 cur_offset += sizeof(*extref); 2326 } 2327 free_extent_buffer(eb); 2328 2329 offset++; 2330 } 2331 2332 btrfs_release_path(path); 2333 2334 return ret; 2335 } 2336 2337 /* 2338 * returns 0 if the path could be dumped (probably truncated) 2339 * returns <0 in case of an error 2340 */ 2341 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, 2342 struct extent_buffer *eb, struct inode_fs_paths *ipath) 2343 { 2344 char *fspath; 2345 char *fspath_min; 2346 int i = ipath->fspath->elem_cnt; 2347 const int s_ptr = sizeof(char *); 2348 u32 bytes_left; 2349 2350 bytes_left = ipath->fspath->bytes_left > s_ptr ? 2351 ipath->fspath->bytes_left - s_ptr : 0; 2352 2353 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 2354 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 2355 name_off, eb, inum, fspath_min, bytes_left); 2356 if (IS_ERR(fspath)) 2357 return PTR_ERR(fspath); 2358 2359 if (fspath > fspath_min) { 2360 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 2361 ++ipath->fspath->elem_cnt; 2362 ipath->fspath->bytes_left = fspath - fspath_min; 2363 } else { 2364 ++ipath->fspath->elem_missed; 2365 ipath->fspath->bytes_missing += fspath_min - fspath; 2366 ipath->fspath->bytes_left = 0; 2367 } 2368 2369 return 0; 2370 } 2371 2372 /* 2373 * this dumps all file system paths to the inode into the ipath struct, provided 2374 * is has been created large enough. each path is zero-terminated and accessed 2375 * from ipath->fspath->val[i]. 2376 * when it returns, there are ipath->fspath->elem_cnt number of paths available 2377 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 2378 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise, 2379 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 2380 * have been needed to return all paths. 2381 */ 2382 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 2383 { 2384 int ret; 2385 int found_refs = 0; 2386 2387 ret = iterate_inode_refs(inum, ipath); 2388 if (!ret) 2389 ++found_refs; 2390 else if (ret != -ENOENT) 2391 return ret; 2392 2393 ret = iterate_inode_extrefs(inum, ipath); 2394 if (ret == -ENOENT && found_refs) 2395 return 0; 2396 2397 return ret; 2398 } 2399 2400 struct btrfs_data_container *init_data_container(u32 total_bytes) 2401 { 2402 struct btrfs_data_container *data; 2403 size_t alloc_bytes; 2404 2405 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 2406 data = kvmalloc(alloc_bytes, GFP_KERNEL); 2407 if (!data) 2408 return ERR_PTR(-ENOMEM); 2409 2410 if (total_bytes >= sizeof(*data)) { 2411 data->bytes_left = total_bytes - sizeof(*data); 2412 data->bytes_missing = 0; 2413 } else { 2414 data->bytes_missing = sizeof(*data) - total_bytes; 2415 data->bytes_left = 0; 2416 } 2417 2418 data->elem_cnt = 0; 2419 data->elem_missed = 0; 2420 2421 return data; 2422 } 2423 2424 /* 2425 * allocates space to return multiple file system paths for an inode. 2426 * total_bytes to allocate are passed, note that space usable for actual path 2427 * information will be total_bytes - sizeof(struct inode_fs_paths). 2428 * the returned pointer must be freed with free_ipath() in the end. 2429 */ 2430 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 2431 struct btrfs_path *path) 2432 { 2433 struct inode_fs_paths *ifp; 2434 struct btrfs_data_container *fspath; 2435 2436 fspath = init_data_container(total_bytes); 2437 if (IS_ERR(fspath)) 2438 return ERR_CAST(fspath); 2439 2440 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL); 2441 if (!ifp) { 2442 kvfree(fspath); 2443 return ERR_PTR(-ENOMEM); 2444 } 2445 2446 ifp->btrfs_path = path; 2447 ifp->fspath = fspath; 2448 ifp->fs_root = fs_root; 2449 2450 return ifp; 2451 } 2452 2453 void free_ipath(struct inode_fs_paths *ipath) 2454 { 2455 if (!ipath) 2456 return; 2457 kvfree(ipath->fspath); 2458 kfree(ipath); 2459 } 2460 2461 struct btrfs_backref_iter *btrfs_backref_iter_alloc( 2462 struct btrfs_fs_info *fs_info, gfp_t gfp_flag) 2463 { 2464 struct btrfs_backref_iter *ret; 2465 2466 ret = kzalloc(sizeof(*ret), gfp_flag); 2467 if (!ret) 2468 return NULL; 2469 2470 ret->path = btrfs_alloc_path(); 2471 if (!ret->path) { 2472 kfree(ret); 2473 return NULL; 2474 } 2475 2476 /* Current backref iterator only supports iteration in commit root */ 2477 ret->path->search_commit_root = 1; 2478 ret->path->skip_locking = 1; 2479 ret->fs_info = fs_info; 2480 2481 return ret; 2482 } 2483 2484 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) 2485 { 2486 struct btrfs_fs_info *fs_info = iter->fs_info; 2487 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); 2488 struct btrfs_path *path = iter->path; 2489 struct btrfs_extent_item *ei; 2490 struct btrfs_key key; 2491 int ret; 2492 2493 key.objectid = bytenr; 2494 key.type = BTRFS_METADATA_ITEM_KEY; 2495 key.offset = (u64)-1; 2496 iter->bytenr = bytenr; 2497 2498 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2499 if (ret < 0) 2500 return ret; 2501 if (ret == 0) { 2502 ret = -EUCLEAN; 2503 goto release; 2504 } 2505 if (path->slots[0] == 0) { 2506 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 2507 ret = -EUCLEAN; 2508 goto release; 2509 } 2510 path->slots[0]--; 2511 2512 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2513 if ((key.type != BTRFS_EXTENT_ITEM_KEY && 2514 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) { 2515 ret = -ENOENT; 2516 goto release; 2517 } 2518 memcpy(&iter->cur_key, &key, sizeof(key)); 2519 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2520 path->slots[0]); 2521 iter->end_ptr = (u32)(iter->item_ptr + 2522 btrfs_item_size(path->nodes[0], path->slots[0])); 2523 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 2524 struct btrfs_extent_item); 2525 2526 /* 2527 * Only support iteration on tree backref yet. 2528 * 2529 * This is an extra precaution for non skinny-metadata, where 2530 * EXTENT_ITEM is also used for tree blocks, that we can only use 2531 * extent flags to determine if it's a tree block. 2532 */ 2533 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { 2534 ret = -ENOTSUPP; 2535 goto release; 2536 } 2537 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); 2538 2539 /* If there is no inline backref, go search for keyed backref */ 2540 if (iter->cur_ptr >= iter->end_ptr) { 2541 ret = btrfs_next_item(extent_root, path); 2542 2543 /* No inline nor keyed ref */ 2544 if (ret > 0) { 2545 ret = -ENOENT; 2546 goto release; 2547 } 2548 if (ret < 0) 2549 goto release; 2550 2551 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, 2552 path->slots[0]); 2553 if (iter->cur_key.objectid != bytenr || 2554 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && 2555 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { 2556 ret = -ENOENT; 2557 goto release; 2558 } 2559 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2560 path->slots[0]); 2561 iter->item_ptr = iter->cur_ptr; 2562 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size( 2563 path->nodes[0], path->slots[0])); 2564 } 2565 2566 return 0; 2567 release: 2568 btrfs_backref_iter_release(iter); 2569 return ret; 2570 } 2571 2572 /* 2573 * Go to the next backref item of current bytenr, can be either inlined or 2574 * keyed. 2575 * 2576 * Caller needs to check whether it's inline ref or not by iter->cur_key. 2577 * 2578 * Return 0 if we get next backref without problem. 2579 * Return >0 if there is no extra backref for this bytenr. 2580 * Return <0 if there is something wrong happened. 2581 */ 2582 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter) 2583 { 2584 struct extent_buffer *eb = btrfs_backref_get_eb(iter); 2585 struct btrfs_root *extent_root; 2586 struct btrfs_path *path = iter->path; 2587 struct btrfs_extent_inline_ref *iref; 2588 int ret; 2589 u32 size; 2590 2591 if (btrfs_backref_iter_is_inline_ref(iter)) { 2592 /* We're still inside the inline refs */ 2593 ASSERT(iter->cur_ptr < iter->end_ptr); 2594 2595 if (btrfs_backref_has_tree_block_info(iter)) { 2596 /* First tree block info */ 2597 size = sizeof(struct btrfs_tree_block_info); 2598 } else { 2599 /* Use inline ref type to determine the size */ 2600 int type; 2601 2602 iref = (struct btrfs_extent_inline_ref *) 2603 ((unsigned long)iter->cur_ptr); 2604 type = btrfs_extent_inline_ref_type(eb, iref); 2605 2606 size = btrfs_extent_inline_ref_size(type); 2607 } 2608 iter->cur_ptr += size; 2609 if (iter->cur_ptr < iter->end_ptr) 2610 return 0; 2611 2612 /* All inline items iterated, fall through */ 2613 } 2614 2615 /* We're at keyed items, there is no inline item, go to the next one */ 2616 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); 2617 ret = btrfs_next_item(extent_root, iter->path); 2618 if (ret) 2619 return ret; 2620 2621 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); 2622 if (iter->cur_key.objectid != iter->bytenr || 2623 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && 2624 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) 2625 return 1; 2626 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2627 path->slots[0]); 2628 iter->cur_ptr = iter->item_ptr; 2629 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0], 2630 path->slots[0]); 2631 return 0; 2632 } 2633 2634 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, 2635 struct btrfs_backref_cache *cache, int is_reloc) 2636 { 2637 int i; 2638 2639 cache->rb_root = RB_ROOT; 2640 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 2641 INIT_LIST_HEAD(&cache->pending[i]); 2642 INIT_LIST_HEAD(&cache->changed); 2643 INIT_LIST_HEAD(&cache->detached); 2644 INIT_LIST_HEAD(&cache->leaves); 2645 INIT_LIST_HEAD(&cache->pending_edge); 2646 INIT_LIST_HEAD(&cache->useless_node); 2647 cache->fs_info = fs_info; 2648 cache->is_reloc = is_reloc; 2649 } 2650 2651 struct btrfs_backref_node *btrfs_backref_alloc_node( 2652 struct btrfs_backref_cache *cache, u64 bytenr, int level) 2653 { 2654 struct btrfs_backref_node *node; 2655 2656 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL); 2657 node = kzalloc(sizeof(*node), GFP_NOFS); 2658 if (!node) 2659 return node; 2660 2661 INIT_LIST_HEAD(&node->list); 2662 INIT_LIST_HEAD(&node->upper); 2663 INIT_LIST_HEAD(&node->lower); 2664 RB_CLEAR_NODE(&node->rb_node); 2665 cache->nr_nodes++; 2666 node->level = level; 2667 node->bytenr = bytenr; 2668 2669 return node; 2670 } 2671 2672 struct btrfs_backref_edge *btrfs_backref_alloc_edge( 2673 struct btrfs_backref_cache *cache) 2674 { 2675 struct btrfs_backref_edge *edge; 2676 2677 edge = kzalloc(sizeof(*edge), GFP_NOFS); 2678 if (edge) 2679 cache->nr_edges++; 2680 return edge; 2681 } 2682 2683 /* 2684 * Drop the backref node from cache, also cleaning up all its 2685 * upper edges and any uncached nodes in the path. 2686 * 2687 * This cleanup happens bottom up, thus the node should either 2688 * be the lowest node in the cache or a detached node. 2689 */ 2690 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, 2691 struct btrfs_backref_node *node) 2692 { 2693 struct btrfs_backref_node *upper; 2694 struct btrfs_backref_edge *edge; 2695 2696 if (!node) 2697 return; 2698 2699 BUG_ON(!node->lowest && !node->detached); 2700 while (!list_empty(&node->upper)) { 2701 edge = list_entry(node->upper.next, struct btrfs_backref_edge, 2702 list[LOWER]); 2703 upper = edge->node[UPPER]; 2704 list_del(&edge->list[LOWER]); 2705 list_del(&edge->list[UPPER]); 2706 btrfs_backref_free_edge(cache, edge); 2707 2708 /* 2709 * Add the node to leaf node list if no other child block 2710 * cached. 2711 */ 2712 if (list_empty(&upper->lower)) { 2713 list_add_tail(&upper->lower, &cache->leaves); 2714 upper->lowest = 1; 2715 } 2716 } 2717 2718 btrfs_backref_drop_node(cache, node); 2719 } 2720 2721 /* 2722 * Release all nodes/edges from current cache 2723 */ 2724 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) 2725 { 2726 struct btrfs_backref_node *node; 2727 int i; 2728 2729 while (!list_empty(&cache->detached)) { 2730 node = list_entry(cache->detached.next, 2731 struct btrfs_backref_node, list); 2732 btrfs_backref_cleanup_node(cache, node); 2733 } 2734 2735 while (!list_empty(&cache->leaves)) { 2736 node = list_entry(cache->leaves.next, 2737 struct btrfs_backref_node, lower); 2738 btrfs_backref_cleanup_node(cache, node); 2739 } 2740 2741 cache->last_trans = 0; 2742 2743 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 2744 ASSERT(list_empty(&cache->pending[i])); 2745 ASSERT(list_empty(&cache->pending_edge)); 2746 ASSERT(list_empty(&cache->useless_node)); 2747 ASSERT(list_empty(&cache->changed)); 2748 ASSERT(list_empty(&cache->detached)); 2749 ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); 2750 ASSERT(!cache->nr_nodes); 2751 ASSERT(!cache->nr_edges); 2752 } 2753 2754 /* 2755 * Handle direct tree backref 2756 * 2757 * Direct tree backref means, the backref item shows its parent bytenr 2758 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined). 2759 * 2760 * @ref_key: The converted backref key. 2761 * For keyed backref, it's the item key. 2762 * For inlined backref, objectid is the bytenr, 2763 * type is btrfs_inline_ref_type, offset is 2764 * btrfs_inline_ref_offset. 2765 */ 2766 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, 2767 struct btrfs_key *ref_key, 2768 struct btrfs_backref_node *cur) 2769 { 2770 struct btrfs_backref_edge *edge; 2771 struct btrfs_backref_node *upper; 2772 struct rb_node *rb_node; 2773 2774 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); 2775 2776 /* Only reloc root uses backref pointing to itself */ 2777 if (ref_key->objectid == ref_key->offset) { 2778 struct btrfs_root *root; 2779 2780 cur->is_reloc_root = 1; 2781 /* Only reloc backref cache cares about a specific root */ 2782 if (cache->is_reloc) { 2783 root = find_reloc_root(cache->fs_info, cur->bytenr); 2784 if (!root) 2785 return -ENOENT; 2786 cur->root = root; 2787 } else { 2788 /* 2789 * For generic purpose backref cache, reloc root node 2790 * is useless. 2791 */ 2792 list_add(&cur->list, &cache->useless_node); 2793 } 2794 return 0; 2795 } 2796 2797 edge = btrfs_backref_alloc_edge(cache); 2798 if (!edge) 2799 return -ENOMEM; 2800 2801 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); 2802 if (!rb_node) { 2803 /* Parent node not yet cached */ 2804 upper = btrfs_backref_alloc_node(cache, ref_key->offset, 2805 cur->level + 1); 2806 if (!upper) { 2807 btrfs_backref_free_edge(cache, edge); 2808 return -ENOMEM; 2809 } 2810 2811 /* 2812 * Backrefs for the upper level block isn't cached, add the 2813 * block to pending list 2814 */ 2815 list_add_tail(&edge->list[UPPER], &cache->pending_edge); 2816 } else { 2817 /* Parent node already cached */ 2818 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); 2819 ASSERT(upper->checked); 2820 INIT_LIST_HEAD(&edge->list[UPPER]); 2821 } 2822 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); 2823 return 0; 2824 } 2825 2826 /* 2827 * Handle indirect tree backref 2828 * 2829 * Indirect tree backref means, we only know which tree the node belongs to. 2830 * We still need to do a tree search to find out the parents. This is for 2831 * TREE_BLOCK_REF backref (keyed or inlined). 2832 * 2833 * @ref_key: The same as @ref_key in handle_direct_tree_backref() 2834 * @tree_key: The first key of this tree block. 2835 * @path: A clean (released) path, to avoid allocating path every time 2836 * the function get called. 2837 */ 2838 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache, 2839 struct btrfs_path *path, 2840 struct btrfs_key *ref_key, 2841 struct btrfs_key *tree_key, 2842 struct btrfs_backref_node *cur) 2843 { 2844 struct btrfs_fs_info *fs_info = cache->fs_info; 2845 struct btrfs_backref_node *upper; 2846 struct btrfs_backref_node *lower; 2847 struct btrfs_backref_edge *edge; 2848 struct extent_buffer *eb; 2849 struct btrfs_root *root; 2850 struct rb_node *rb_node; 2851 int level; 2852 bool need_check = true; 2853 int ret; 2854 2855 root = btrfs_get_fs_root(fs_info, ref_key->offset, false); 2856 if (IS_ERR(root)) 2857 return PTR_ERR(root); 2858 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2859 cur->cowonly = 1; 2860 2861 if (btrfs_root_level(&root->root_item) == cur->level) { 2862 /* Tree root */ 2863 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); 2864 /* 2865 * For reloc backref cache, we may ignore reloc root. But for 2866 * general purpose backref cache, we can't rely on 2867 * btrfs_should_ignore_reloc_root() as it may conflict with 2868 * current running relocation and lead to missing root. 2869 * 2870 * For general purpose backref cache, reloc root detection is 2871 * completely relying on direct backref (key->offset is parent 2872 * bytenr), thus only do such check for reloc cache. 2873 */ 2874 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { 2875 btrfs_put_root(root); 2876 list_add(&cur->list, &cache->useless_node); 2877 } else { 2878 cur->root = root; 2879 } 2880 return 0; 2881 } 2882 2883 level = cur->level + 1; 2884 2885 /* Search the tree to find parent blocks referring to the block */ 2886 path->search_commit_root = 1; 2887 path->skip_locking = 1; 2888 path->lowest_level = level; 2889 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); 2890 path->lowest_level = 0; 2891 if (ret < 0) { 2892 btrfs_put_root(root); 2893 return ret; 2894 } 2895 if (ret > 0 && path->slots[level] > 0) 2896 path->slots[level]--; 2897 2898 eb = path->nodes[level]; 2899 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { 2900 btrfs_err(fs_info, 2901 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", 2902 cur->bytenr, level - 1, root->root_key.objectid, 2903 tree_key->objectid, tree_key->type, tree_key->offset); 2904 btrfs_put_root(root); 2905 ret = -ENOENT; 2906 goto out; 2907 } 2908 lower = cur; 2909 2910 /* Add all nodes and edges in the path */ 2911 for (; level < BTRFS_MAX_LEVEL; level++) { 2912 if (!path->nodes[level]) { 2913 ASSERT(btrfs_root_bytenr(&root->root_item) == 2914 lower->bytenr); 2915 /* Same as previous should_ignore_reloc_root() call */ 2916 if (btrfs_should_ignore_reloc_root(root) && 2917 cache->is_reloc) { 2918 btrfs_put_root(root); 2919 list_add(&lower->list, &cache->useless_node); 2920 } else { 2921 lower->root = root; 2922 } 2923 break; 2924 } 2925 2926 edge = btrfs_backref_alloc_edge(cache); 2927 if (!edge) { 2928 btrfs_put_root(root); 2929 ret = -ENOMEM; 2930 goto out; 2931 } 2932 2933 eb = path->nodes[level]; 2934 rb_node = rb_simple_search(&cache->rb_root, eb->start); 2935 if (!rb_node) { 2936 upper = btrfs_backref_alloc_node(cache, eb->start, 2937 lower->level + 1); 2938 if (!upper) { 2939 btrfs_put_root(root); 2940 btrfs_backref_free_edge(cache, edge); 2941 ret = -ENOMEM; 2942 goto out; 2943 } 2944 upper->owner = btrfs_header_owner(eb); 2945 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 2946 upper->cowonly = 1; 2947 2948 /* 2949 * If we know the block isn't shared we can avoid 2950 * checking its backrefs. 2951 */ 2952 if (btrfs_block_can_be_shared(root, eb)) 2953 upper->checked = 0; 2954 else 2955 upper->checked = 1; 2956 2957 /* 2958 * Add the block to pending list if we need to check its 2959 * backrefs, we only do this once while walking up a 2960 * tree as we will catch anything else later on. 2961 */ 2962 if (!upper->checked && need_check) { 2963 need_check = false; 2964 list_add_tail(&edge->list[UPPER], 2965 &cache->pending_edge); 2966 } else { 2967 if (upper->checked) 2968 need_check = true; 2969 INIT_LIST_HEAD(&edge->list[UPPER]); 2970 } 2971 } else { 2972 upper = rb_entry(rb_node, struct btrfs_backref_node, 2973 rb_node); 2974 ASSERT(upper->checked); 2975 INIT_LIST_HEAD(&edge->list[UPPER]); 2976 if (!upper->owner) 2977 upper->owner = btrfs_header_owner(eb); 2978 } 2979 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); 2980 2981 if (rb_node) { 2982 btrfs_put_root(root); 2983 break; 2984 } 2985 lower = upper; 2986 upper = NULL; 2987 } 2988 out: 2989 btrfs_release_path(path); 2990 return ret; 2991 } 2992 2993 /* 2994 * Add backref node @cur into @cache. 2995 * 2996 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper 2997 * links aren't yet bi-directional. Needs to finish such links. 2998 * Use btrfs_backref_finish_upper_links() to finish such linkage. 2999 * 3000 * @path: Released path for indirect tree backref lookup 3001 * @iter: Released backref iter for extent tree search 3002 * @node_key: The first key of the tree block 3003 */ 3004 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, 3005 struct btrfs_path *path, 3006 struct btrfs_backref_iter *iter, 3007 struct btrfs_key *node_key, 3008 struct btrfs_backref_node *cur) 3009 { 3010 struct btrfs_fs_info *fs_info = cache->fs_info; 3011 struct btrfs_backref_edge *edge; 3012 struct btrfs_backref_node *exist; 3013 int ret; 3014 3015 ret = btrfs_backref_iter_start(iter, cur->bytenr); 3016 if (ret < 0) 3017 return ret; 3018 /* 3019 * We skip the first btrfs_tree_block_info, as we don't use the key 3020 * stored in it, but fetch it from the tree block 3021 */ 3022 if (btrfs_backref_has_tree_block_info(iter)) { 3023 ret = btrfs_backref_iter_next(iter); 3024 if (ret < 0) 3025 goto out; 3026 /* No extra backref? This means the tree block is corrupted */ 3027 if (ret > 0) { 3028 ret = -EUCLEAN; 3029 goto out; 3030 } 3031 } 3032 WARN_ON(cur->checked); 3033 if (!list_empty(&cur->upper)) { 3034 /* 3035 * The backref was added previously when processing backref of 3036 * type BTRFS_TREE_BLOCK_REF_KEY 3037 */ 3038 ASSERT(list_is_singular(&cur->upper)); 3039 edge = list_entry(cur->upper.next, struct btrfs_backref_edge, 3040 list[LOWER]); 3041 ASSERT(list_empty(&edge->list[UPPER])); 3042 exist = edge->node[UPPER]; 3043 /* 3044 * Add the upper level block to pending list if we need check 3045 * its backrefs 3046 */ 3047 if (!exist->checked) 3048 list_add_tail(&edge->list[UPPER], &cache->pending_edge); 3049 } else { 3050 exist = NULL; 3051 } 3052 3053 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) { 3054 struct extent_buffer *eb; 3055 struct btrfs_key key; 3056 int type; 3057 3058 cond_resched(); 3059 eb = btrfs_backref_get_eb(iter); 3060 3061 key.objectid = iter->bytenr; 3062 if (btrfs_backref_iter_is_inline_ref(iter)) { 3063 struct btrfs_extent_inline_ref *iref; 3064 3065 /* Update key for inline backref */ 3066 iref = (struct btrfs_extent_inline_ref *) 3067 ((unsigned long)iter->cur_ptr); 3068 type = btrfs_get_extent_inline_ref_type(eb, iref, 3069 BTRFS_REF_TYPE_BLOCK); 3070 if (type == BTRFS_REF_TYPE_INVALID) { 3071 ret = -EUCLEAN; 3072 goto out; 3073 } 3074 key.type = type; 3075 key.offset = btrfs_extent_inline_ref_offset(eb, iref); 3076 } else { 3077 key.type = iter->cur_key.type; 3078 key.offset = iter->cur_key.offset; 3079 } 3080 3081 /* 3082 * Parent node found and matches current inline ref, no need to 3083 * rebuild this node for this inline ref 3084 */ 3085 if (exist && 3086 ((key.type == BTRFS_TREE_BLOCK_REF_KEY && 3087 exist->owner == key.offset) || 3088 (key.type == BTRFS_SHARED_BLOCK_REF_KEY && 3089 exist->bytenr == key.offset))) { 3090 exist = NULL; 3091 continue; 3092 } 3093 3094 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ 3095 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { 3096 ret = handle_direct_tree_backref(cache, &key, cur); 3097 if (ret < 0) 3098 goto out; 3099 continue; 3100 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 3101 ret = -EINVAL; 3102 btrfs_print_v0_err(fs_info); 3103 btrfs_handle_fs_error(fs_info, ret, NULL); 3104 goto out; 3105 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 3106 continue; 3107 } 3108 3109 /* 3110 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset 3111 * means the root objectid. We need to search the tree to get 3112 * its parent bytenr. 3113 */ 3114 ret = handle_indirect_tree_backref(cache, path, &key, node_key, 3115 cur); 3116 if (ret < 0) 3117 goto out; 3118 } 3119 ret = 0; 3120 cur->checked = 1; 3121 WARN_ON(exist); 3122 out: 3123 btrfs_backref_iter_release(iter); 3124 return ret; 3125 } 3126 3127 /* 3128 * Finish the upwards linkage created by btrfs_backref_add_tree_node() 3129 */ 3130 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, 3131 struct btrfs_backref_node *start) 3132 { 3133 struct list_head *useless_node = &cache->useless_node; 3134 struct btrfs_backref_edge *edge; 3135 struct rb_node *rb_node; 3136 LIST_HEAD(pending_edge); 3137 3138 ASSERT(start->checked); 3139 3140 /* Insert this node to cache if it's not COW-only */ 3141 if (!start->cowonly) { 3142 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, 3143 &start->rb_node); 3144 if (rb_node) 3145 btrfs_backref_panic(cache->fs_info, start->bytenr, 3146 -EEXIST); 3147 list_add_tail(&start->lower, &cache->leaves); 3148 } 3149 3150 /* 3151 * Use breadth first search to iterate all related edges. 3152 * 3153 * The starting points are all the edges of this node 3154 */ 3155 list_for_each_entry(edge, &start->upper, list[LOWER]) 3156 list_add_tail(&edge->list[UPPER], &pending_edge); 3157 3158 while (!list_empty(&pending_edge)) { 3159 struct btrfs_backref_node *upper; 3160 struct btrfs_backref_node *lower; 3161 3162 edge = list_first_entry(&pending_edge, 3163 struct btrfs_backref_edge, list[UPPER]); 3164 list_del_init(&edge->list[UPPER]); 3165 upper = edge->node[UPPER]; 3166 lower = edge->node[LOWER]; 3167 3168 /* Parent is detached, no need to keep any edges */ 3169 if (upper->detached) { 3170 list_del(&edge->list[LOWER]); 3171 btrfs_backref_free_edge(cache, edge); 3172 3173 /* Lower node is orphan, queue for cleanup */ 3174 if (list_empty(&lower->upper)) 3175 list_add(&lower->list, useless_node); 3176 continue; 3177 } 3178 3179 /* 3180 * All new nodes added in current build_backref_tree() haven't 3181 * been linked to the cache rb tree. 3182 * So if we have upper->rb_node populated, this means a cache 3183 * hit. We only need to link the edge, as @upper and all its 3184 * parents have already been linked. 3185 */ 3186 if (!RB_EMPTY_NODE(&upper->rb_node)) { 3187 if (upper->lowest) { 3188 list_del_init(&upper->lower); 3189 upper->lowest = 0; 3190 } 3191 3192 list_add_tail(&edge->list[UPPER], &upper->lower); 3193 continue; 3194 } 3195 3196 /* Sanity check, we shouldn't have any unchecked nodes */ 3197 if (!upper->checked) { 3198 ASSERT(0); 3199 return -EUCLEAN; 3200 } 3201 3202 /* Sanity check, COW-only node has non-COW-only parent */ 3203 if (start->cowonly != upper->cowonly) { 3204 ASSERT(0); 3205 return -EUCLEAN; 3206 } 3207 3208 /* Only cache non-COW-only (subvolume trees) tree blocks */ 3209 if (!upper->cowonly) { 3210 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, 3211 &upper->rb_node); 3212 if (rb_node) { 3213 btrfs_backref_panic(cache->fs_info, 3214 upper->bytenr, -EEXIST); 3215 return -EUCLEAN; 3216 } 3217 } 3218 3219 list_add_tail(&edge->list[UPPER], &upper->lower); 3220 3221 /* 3222 * Also queue all the parent edges of this uncached node 3223 * to finish the upper linkage 3224 */ 3225 list_for_each_entry(edge, &upper->upper, list[LOWER]) 3226 list_add_tail(&edge->list[UPPER], &pending_edge); 3227 } 3228 return 0; 3229 } 3230 3231 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, 3232 struct btrfs_backref_node *node) 3233 { 3234 struct btrfs_backref_node *lower; 3235 struct btrfs_backref_node *upper; 3236 struct btrfs_backref_edge *edge; 3237 3238 while (!list_empty(&cache->useless_node)) { 3239 lower = list_first_entry(&cache->useless_node, 3240 struct btrfs_backref_node, list); 3241 list_del_init(&lower->list); 3242 } 3243 while (!list_empty(&cache->pending_edge)) { 3244 edge = list_first_entry(&cache->pending_edge, 3245 struct btrfs_backref_edge, list[UPPER]); 3246 list_del(&edge->list[UPPER]); 3247 list_del(&edge->list[LOWER]); 3248 lower = edge->node[LOWER]; 3249 upper = edge->node[UPPER]; 3250 btrfs_backref_free_edge(cache, edge); 3251 3252 /* 3253 * Lower is no longer linked to any upper backref nodes and 3254 * isn't in the cache, we can free it ourselves. 3255 */ 3256 if (list_empty(&lower->upper) && 3257 RB_EMPTY_NODE(&lower->rb_node)) 3258 list_add(&lower->list, &cache->useless_node); 3259 3260 if (!RB_EMPTY_NODE(&upper->rb_node)) 3261 continue; 3262 3263 /* Add this guy's upper edges to the list to process */ 3264 list_for_each_entry(edge, &upper->upper, list[LOWER]) 3265 list_add_tail(&edge->list[UPPER], 3266 &cache->pending_edge); 3267 if (list_empty(&upper->upper)) 3268 list_add(&upper->list, &cache->useless_node); 3269 } 3270 3271 while (!list_empty(&cache->useless_node)) { 3272 lower = list_first_entry(&cache->useless_node, 3273 struct btrfs_backref_node, list); 3274 list_del_init(&lower->list); 3275 if (lower == node) 3276 node = NULL; 3277 btrfs_backref_drop_node(cache, lower); 3278 } 3279 3280 btrfs_backref_cleanup_node(cache, node); 3281 ASSERT(list_empty(&cache->useless_node) && 3282 list_empty(&cache->pending_edge)); 3283 } 3284