1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include "ctree.h" 20 #include "disk-io.h" 21 #include "backref.h" 22 #include "ulist.h" 23 #include "transaction.h" 24 #include "delayed-ref.h" 25 #include "locking.h" 26 27 /* 28 * this structure records all encountered refs on the way up to the root 29 */ 30 struct __prelim_ref { 31 struct list_head list; 32 u64 root_id; 33 struct btrfs_key key; 34 int level; 35 int count; 36 u64 parent; 37 u64 wanted_disk_byte; 38 }; 39 40 static int __add_prelim_ref(struct list_head *head, u64 root_id, 41 struct btrfs_key *key, int level, u64 parent, 42 u64 wanted_disk_byte, int count) 43 { 44 struct __prelim_ref *ref; 45 46 /* in case we're adding delayed refs, we're holding the refs spinlock */ 47 ref = kmalloc(sizeof(*ref), GFP_ATOMIC); 48 if (!ref) 49 return -ENOMEM; 50 51 ref->root_id = root_id; 52 if (key) 53 ref->key = *key; 54 else 55 memset(&ref->key, 0, sizeof(ref->key)); 56 57 ref->level = level; 58 ref->count = count; 59 ref->parent = parent; 60 ref->wanted_disk_byte = wanted_disk_byte; 61 list_add_tail(&ref->list, head); 62 63 return 0; 64 } 65 66 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 67 struct ulist *parents, 68 struct extent_buffer *eb, int level, 69 u64 wanted_objectid, u64 wanted_disk_byte) 70 { 71 int ret; 72 int slot; 73 struct btrfs_file_extent_item *fi; 74 struct btrfs_key key; 75 u64 disk_byte; 76 77 add_parent: 78 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 79 if (ret < 0) 80 return ret; 81 82 if (level != 0) 83 return 0; 84 85 /* 86 * if the current leaf is full with EXTENT_DATA items, we must 87 * check the next one if that holds a reference as well. 88 * ref->count cannot be used to skip this check. 89 * repeat this until we don't find any additional EXTENT_DATA items. 90 */ 91 while (1) { 92 ret = btrfs_next_leaf(root, path); 93 if (ret < 0) 94 return ret; 95 if (ret) 96 return 0; 97 98 eb = path->nodes[0]; 99 for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) { 100 btrfs_item_key_to_cpu(eb, &key, slot); 101 if (key.objectid != wanted_objectid || 102 key.type != BTRFS_EXTENT_DATA_KEY) 103 return 0; 104 fi = btrfs_item_ptr(eb, slot, 105 struct btrfs_file_extent_item); 106 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 107 if (disk_byte == wanted_disk_byte) 108 goto add_parent; 109 } 110 } 111 112 return 0; 113 } 114 115 /* 116 * resolve an indirect backref in the form (root_id, key, level) 117 * to a logical address 118 */ 119 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 120 int search_commit_root, 121 struct __prelim_ref *ref, 122 struct ulist *parents) 123 { 124 struct btrfs_path *path; 125 struct btrfs_root *root; 126 struct btrfs_key root_key; 127 struct btrfs_key key = {0}; 128 struct extent_buffer *eb; 129 int ret = 0; 130 int root_level; 131 int level = ref->level; 132 133 path = btrfs_alloc_path(); 134 if (!path) 135 return -ENOMEM; 136 path->search_commit_root = !!search_commit_root; 137 138 root_key.objectid = ref->root_id; 139 root_key.type = BTRFS_ROOT_ITEM_KEY; 140 root_key.offset = (u64)-1; 141 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 142 if (IS_ERR(root)) { 143 ret = PTR_ERR(root); 144 goto out; 145 } 146 147 rcu_read_lock(); 148 root_level = btrfs_header_level(root->node); 149 rcu_read_unlock(); 150 151 if (root_level + 1 == level) 152 goto out; 153 154 path->lowest_level = level; 155 ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0); 156 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 157 "%d for key (%llu %u %llu)\n", 158 (unsigned long long)ref->root_id, level, ref->count, ret, 159 (unsigned long long)ref->key.objectid, ref->key.type, 160 (unsigned long long)ref->key.offset); 161 if (ret < 0) 162 goto out; 163 164 eb = path->nodes[level]; 165 if (!eb) { 166 WARN_ON(1); 167 ret = 1; 168 goto out; 169 } 170 171 if (level == 0) { 172 if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { 173 ret = btrfs_next_leaf(root, path); 174 if (ret) 175 goto out; 176 eb = path->nodes[0]; 177 } 178 179 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 180 } 181 182 /* the last two parameters will only be used for level == 0 */ 183 ret = add_all_parents(root, path, parents, eb, level, key.objectid, 184 ref->wanted_disk_byte); 185 out: 186 btrfs_free_path(path); 187 return ret; 188 } 189 190 /* 191 * resolve all indirect backrefs from the list 192 */ 193 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 194 int search_commit_root, 195 struct list_head *head) 196 { 197 int err; 198 int ret = 0; 199 struct __prelim_ref *ref; 200 struct __prelim_ref *ref_safe; 201 struct __prelim_ref *new_ref; 202 struct ulist *parents; 203 struct ulist_node *node; 204 205 parents = ulist_alloc(GFP_NOFS); 206 if (!parents) 207 return -ENOMEM; 208 209 /* 210 * _safe allows us to insert directly after the current item without 211 * iterating over the newly inserted items. 212 * we're also allowed to re-assign ref during iteration. 213 */ 214 list_for_each_entry_safe(ref, ref_safe, head, list) { 215 if (ref->parent) /* already direct */ 216 continue; 217 if (ref->count == 0) 218 continue; 219 err = __resolve_indirect_ref(fs_info, search_commit_root, 220 ref, parents); 221 if (err) { 222 if (ret == 0) 223 ret = err; 224 continue; 225 } 226 227 /* we put the first parent into the ref at hand */ 228 node = ulist_next(parents, NULL); 229 ref->parent = node ? node->val : 0; 230 231 /* additional parents require new refs being added here */ 232 while ((node = ulist_next(parents, node))) { 233 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS); 234 if (!new_ref) { 235 ret = -ENOMEM; 236 break; 237 } 238 memcpy(new_ref, ref, sizeof(*ref)); 239 new_ref->parent = node->val; 240 list_add(&new_ref->list, &ref->list); 241 } 242 ulist_reinit(parents); 243 } 244 245 ulist_free(parents); 246 return ret; 247 } 248 249 /* 250 * merge two lists of backrefs and adjust counts accordingly 251 * 252 * mode = 1: merge identical keys, if key is set 253 * mode = 2: merge identical parents 254 */ 255 static int __merge_refs(struct list_head *head, int mode) 256 { 257 struct list_head *pos1; 258 259 list_for_each(pos1, head) { 260 struct list_head *n2; 261 struct list_head *pos2; 262 struct __prelim_ref *ref1; 263 264 ref1 = list_entry(pos1, struct __prelim_ref, list); 265 266 if (mode == 1 && ref1->key.type == 0) 267 continue; 268 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head; 269 pos2 = n2, n2 = pos2->next) { 270 struct __prelim_ref *ref2; 271 272 ref2 = list_entry(pos2, struct __prelim_ref, list); 273 274 if (mode == 1) { 275 if (memcmp(&ref1->key, &ref2->key, 276 sizeof(ref1->key)) || 277 ref1->level != ref2->level || 278 ref1->root_id != ref2->root_id) 279 continue; 280 ref1->count += ref2->count; 281 } else { 282 if (ref1->parent != ref2->parent) 283 continue; 284 ref1->count += ref2->count; 285 } 286 list_del(&ref2->list); 287 kfree(ref2); 288 } 289 290 } 291 return 0; 292 } 293 294 /* 295 * add all currently queued delayed refs from this head whose seq nr is 296 * smaller or equal that seq to the list 297 */ 298 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 299 struct btrfs_key *info_key, 300 struct list_head *prefs) 301 { 302 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 303 struct rb_node *n = &head->node.rb_node; 304 int sgn; 305 int ret = 0; 306 307 if (extent_op && extent_op->update_key) 308 btrfs_disk_key_to_cpu(info_key, &extent_op->key); 309 310 while ((n = rb_prev(n))) { 311 struct btrfs_delayed_ref_node *node; 312 node = rb_entry(n, struct btrfs_delayed_ref_node, 313 rb_node); 314 if (node->bytenr != head->node.bytenr) 315 break; 316 WARN_ON(node->is_head); 317 318 if (node->seq > seq) 319 continue; 320 321 switch (node->action) { 322 case BTRFS_ADD_DELAYED_EXTENT: 323 case BTRFS_UPDATE_DELAYED_HEAD: 324 WARN_ON(1); 325 continue; 326 case BTRFS_ADD_DELAYED_REF: 327 sgn = 1; 328 break; 329 case BTRFS_DROP_DELAYED_REF: 330 sgn = -1; 331 break; 332 default: 333 BUG_ON(1); 334 } 335 switch (node->type) { 336 case BTRFS_TREE_BLOCK_REF_KEY: { 337 struct btrfs_delayed_tree_ref *ref; 338 339 ref = btrfs_delayed_node_to_tree_ref(node); 340 ret = __add_prelim_ref(prefs, ref->root, info_key, 341 ref->level + 1, 0, node->bytenr, 342 node->ref_mod * sgn); 343 break; 344 } 345 case BTRFS_SHARED_BLOCK_REF_KEY: { 346 struct btrfs_delayed_tree_ref *ref; 347 348 ref = btrfs_delayed_node_to_tree_ref(node); 349 ret = __add_prelim_ref(prefs, ref->root, info_key, 350 ref->level + 1, ref->parent, 351 node->bytenr, 352 node->ref_mod * sgn); 353 break; 354 } 355 case BTRFS_EXTENT_DATA_REF_KEY: { 356 struct btrfs_delayed_data_ref *ref; 357 struct btrfs_key key; 358 359 ref = btrfs_delayed_node_to_data_ref(node); 360 361 key.objectid = ref->objectid; 362 key.type = BTRFS_EXTENT_DATA_KEY; 363 key.offset = ref->offset; 364 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, 365 node->bytenr, 366 node->ref_mod * sgn); 367 break; 368 } 369 case BTRFS_SHARED_DATA_REF_KEY: { 370 struct btrfs_delayed_data_ref *ref; 371 struct btrfs_key key; 372 373 ref = btrfs_delayed_node_to_data_ref(node); 374 375 key.objectid = ref->objectid; 376 key.type = BTRFS_EXTENT_DATA_KEY; 377 key.offset = ref->offset; 378 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 379 ref->parent, node->bytenr, 380 node->ref_mod * sgn); 381 break; 382 } 383 default: 384 WARN_ON(1); 385 } 386 BUG_ON(ret); 387 } 388 389 return 0; 390 } 391 392 /* 393 * add all inline backrefs for bytenr to the list 394 */ 395 static int __add_inline_refs(struct btrfs_fs_info *fs_info, 396 struct btrfs_path *path, u64 bytenr, 397 struct btrfs_key *info_key, int *info_level, 398 struct list_head *prefs) 399 { 400 int ret = 0; 401 int slot; 402 struct extent_buffer *leaf; 403 struct btrfs_key key; 404 unsigned long ptr; 405 unsigned long end; 406 struct btrfs_extent_item *ei; 407 u64 flags; 408 u64 item_size; 409 410 /* 411 * enumerate all inline refs 412 */ 413 leaf = path->nodes[0]; 414 slot = path->slots[0] - 1; 415 416 item_size = btrfs_item_size_nr(leaf, slot); 417 BUG_ON(item_size < sizeof(*ei)); 418 419 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 420 flags = btrfs_extent_flags(leaf, ei); 421 422 ptr = (unsigned long)(ei + 1); 423 end = (unsigned long)ei + item_size; 424 425 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 426 struct btrfs_tree_block_info *info; 427 struct btrfs_disk_key disk_key; 428 429 info = (struct btrfs_tree_block_info *)ptr; 430 *info_level = btrfs_tree_block_level(leaf, info); 431 btrfs_tree_block_key(leaf, info, &disk_key); 432 btrfs_disk_key_to_cpu(info_key, &disk_key); 433 ptr += sizeof(struct btrfs_tree_block_info); 434 BUG_ON(ptr > end); 435 } else { 436 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 437 } 438 439 while (ptr < end) { 440 struct btrfs_extent_inline_ref *iref; 441 u64 offset; 442 int type; 443 444 iref = (struct btrfs_extent_inline_ref *)ptr; 445 type = btrfs_extent_inline_ref_type(leaf, iref); 446 offset = btrfs_extent_inline_ref_offset(leaf, iref); 447 448 switch (type) { 449 case BTRFS_SHARED_BLOCK_REF_KEY: 450 ret = __add_prelim_ref(prefs, 0, info_key, 451 *info_level + 1, offset, 452 bytenr, 1); 453 break; 454 case BTRFS_SHARED_DATA_REF_KEY: { 455 struct btrfs_shared_data_ref *sdref; 456 int count; 457 458 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 459 count = btrfs_shared_data_ref_count(leaf, sdref); 460 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 461 bytenr, count); 462 break; 463 } 464 case BTRFS_TREE_BLOCK_REF_KEY: 465 ret = __add_prelim_ref(prefs, offset, info_key, 466 *info_level + 1, 0, bytenr, 1); 467 break; 468 case BTRFS_EXTENT_DATA_REF_KEY: { 469 struct btrfs_extent_data_ref *dref; 470 int count; 471 u64 root; 472 473 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 474 count = btrfs_extent_data_ref_count(leaf, dref); 475 key.objectid = btrfs_extent_data_ref_objectid(leaf, 476 dref); 477 key.type = BTRFS_EXTENT_DATA_KEY; 478 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 479 root = btrfs_extent_data_ref_root(leaf, dref); 480 ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr, 481 count); 482 break; 483 } 484 default: 485 WARN_ON(1); 486 } 487 BUG_ON(ret); 488 ptr += btrfs_extent_inline_ref_size(type); 489 } 490 491 return 0; 492 } 493 494 /* 495 * add all non-inline backrefs for bytenr to the list 496 */ 497 static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 498 struct btrfs_path *path, u64 bytenr, 499 struct btrfs_key *info_key, int info_level, 500 struct list_head *prefs) 501 { 502 struct btrfs_root *extent_root = fs_info->extent_root; 503 int ret; 504 int slot; 505 struct extent_buffer *leaf; 506 struct btrfs_key key; 507 508 while (1) { 509 ret = btrfs_next_item(extent_root, path); 510 if (ret < 0) 511 break; 512 if (ret) { 513 ret = 0; 514 break; 515 } 516 517 slot = path->slots[0]; 518 leaf = path->nodes[0]; 519 btrfs_item_key_to_cpu(leaf, &key, slot); 520 521 if (key.objectid != bytenr) 522 break; 523 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 524 continue; 525 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 526 break; 527 528 switch (key.type) { 529 case BTRFS_SHARED_BLOCK_REF_KEY: 530 ret = __add_prelim_ref(prefs, 0, info_key, 531 info_level + 1, key.offset, 532 bytenr, 1); 533 break; 534 case BTRFS_SHARED_DATA_REF_KEY: { 535 struct btrfs_shared_data_ref *sdref; 536 int count; 537 538 sdref = btrfs_item_ptr(leaf, slot, 539 struct btrfs_shared_data_ref); 540 count = btrfs_shared_data_ref_count(leaf, sdref); 541 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 542 bytenr, count); 543 break; 544 } 545 case BTRFS_TREE_BLOCK_REF_KEY: 546 ret = __add_prelim_ref(prefs, key.offset, info_key, 547 info_level + 1, 0, bytenr, 1); 548 break; 549 case BTRFS_EXTENT_DATA_REF_KEY: { 550 struct btrfs_extent_data_ref *dref; 551 int count; 552 u64 root; 553 554 dref = btrfs_item_ptr(leaf, slot, 555 struct btrfs_extent_data_ref); 556 count = btrfs_extent_data_ref_count(leaf, dref); 557 key.objectid = btrfs_extent_data_ref_objectid(leaf, 558 dref); 559 key.type = BTRFS_EXTENT_DATA_KEY; 560 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 561 root = btrfs_extent_data_ref_root(leaf, dref); 562 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 563 bytenr, count); 564 break; 565 } 566 default: 567 WARN_ON(1); 568 } 569 BUG_ON(ret); 570 } 571 572 return ret; 573 } 574 575 /* 576 * this adds all existing backrefs (inline backrefs, backrefs and delayed 577 * refs) for the given bytenr to the refs list, merges duplicates and resolves 578 * indirect refs to their parent bytenr. 579 * When roots are found, they're added to the roots list 580 * 581 * FIXME some caching might speed things up 582 */ 583 static int find_parent_nodes(struct btrfs_trans_handle *trans, 584 struct btrfs_fs_info *fs_info, u64 bytenr, 585 u64 seq, struct ulist *refs, struct ulist *roots) 586 { 587 struct btrfs_key key; 588 struct btrfs_path *path; 589 struct btrfs_key info_key = { 0 }; 590 struct btrfs_delayed_ref_root *delayed_refs = NULL; 591 struct btrfs_delayed_ref_head *head; 592 int info_level = 0; 593 int ret; 594 int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT); 595 struct list_head prefs_delayed; 596 struct list_head prefs; 597 struct __prelim_ref *ref; 598 599 INIT_LIST_HEAD(&prefs); 600 INIT_LIST_HEAD(&prefs_delayed); 601 602 key.objectid = bytenr; 603 key.type = BTRFS_EXTENT_ITEM_KEY; 604 key.offset = (u64)-1; 605 606 path = btrfs_alloc_path(); 607 if (!path) 608 return -ENOMEM; 609 path->search_commit_root = !!search_commit_root; 610 611 /* 612 * grab both a lock on the path and a lock on the delayed ref head. 613 * We need both to get a consistent picture of how the refs look 614 * at a specified point in time 615 */ 616 again: 617 head = NULL; 618 619 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 620 if (ret < 0) 621 goto out; 622 BUG_ON(ret == 0); 623 624 if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) { 625 /* 626 * look if there are updates for this ref queued and lock the 627 * head 628 */ 629 delayed_refs = &trans->transaction->delayed_refs; 630 spin_lock(&delayed_refs->lock); 631 head = btrfs_find_delayed_ref_head(trans, bytenr); 632 if (head) { 633 if (!mutex_trylock(&head->mutex)) { 634 atomic_inc(&head->node.refs); 635 spin_unlock(&delayed_refs->lock); 636 637 btrfs_release_path(path); 638 639 /* 640 * Mutex was contended, block until it's 641 * released and try again 642 */ 643 mutex_lock(&head->mutex); 644 mutex_unlock(&head->mutex); 645 btrfs_put_delayed_ref(&head->node); 646 goto again; 647 } 648 ret = __add_delayed_refs(head, seq, &info_key, 649 &prefs_delayed); 650 if (ret) { 651 spin_unlock(&delayed_refs->lock); 652 goto out; 653 } 654 } 655 spin_unlock(&delayed_refs->lock); 656 } 657 658 if (path->slots[0]) { 659 struct extent_buffer *leaf; 660 int slot; 661 662 leaf = path->nodes[0]; 663 slot = path->slots[0] - 1; 664 btrfs_item_key_to_cpu(leaf, &key, slot); 665 if (key.objectid == bytenr && 666 key.type == BTRFS_EXTENT_ITEM_KEY) { 667 ret = __add_inline_refs(fs_info, path, bytenr, 668 &info_key, &info_level, &prefs); 669 if (ret) 670 goto out; 671 ret = __add_keyed_refs(fs_info, path, bytenr, &info_key, 672 info_level, &prefs); 673 if (ret) 674 goto out; 675 } 676 } 677 btrfs_release_path(path); 678 679 /* 680 * when adding the delayed refs above, the info_key might not have 681 * been known yet. Go over the list and replace the missing keys 682 */ 683 list_for_each_entry(ref, &prefs_delayed, list) { 684 if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0) 685 memcpy(&ref->key, &info_key, sizeof(ref->key)); 686 } 687 list_splice_init(&prefs_delayed, &prefs); 688 689 ret = __merge_refs(&prefs, 1); 690 if (ret) 691 goto out; 692 693 ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs); 694 if (ret) 695 goto out; 696 697 ret = __merge_refs(&prefs, 2); 698 if (ret) 699 goto out; 700 701 while (!list_empty(&prefs)) { 702 ref = list_first_entry(&prefs, struct __prelim_ref, list); 703 list_del(&ref->list); 704 if (ref->count < 0) 705 WARN_ON(1); 706 if (ref->count && ref->root_id && ref->parent == 0) { 707 /* no parent == root of tree */ 708 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 709 BUG_ON(ret < 0); 710 } 711 if (ref->count && ref->parent) { 712 ret = ulist_add(refs, ref->parent, 0, GFP_NOFS); 713 BUG_ON(ret < 0); 714 } 715 kfree(ref); 716 } 717 718 out: 719 if (head) 720 mutex_unlock(&head->mutex); 721 btrfs_free_path(path); 722 while (!list_empty(&prefs)) { 723 ref = list_first_entry(&prefs, struct __prelim_ref, list); 724 list_del(&ref->list); 725 kfree(ref); 726 } 727 while (!list_empty(&prefs_delayed)) { 728 ref = list_first_entry(&prefs_delayed, struct __prelim_ref, 729 list); 730 list_del(&ref->list); 731 kfree(ref); 732 } 733 734 return ret; 735 } 736 737 /* 738 * Finds all leafs with a reference to the specified combination of bytenr and 739 * offset. key_list_head will point to a list of corresponding keys (caller must 740 * free each list element). The leafs will be stored in the leafs ulist, which 741 * must be freed with ulist_free. 742 * 743 * returns 0 on success, <0 on error 744 */ 745 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 746 struct btrfs_fs_info *fs_info, u64 bytenr, 747 u64 num_bytes, u64 seq, struct ulist **leafs) 748 { 749 struct ulist *tmp; 750 int ret; 751 752 tmp = ulist_alloc(GFP_NOFS); 753 if (!tmp) 754 return -ENOMEM; 755 *leafs = ulist_alloc(GFP_NOFS); 756 if (!*leafs) { 757 ulist_free(tmp); 758 return -ENOMEM; 759 } 760 761 ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp); 762 ulist_free(tmp); 763 764 if (ret < 0 && ret != -ENOENT) { 765 ulist_free(*leafs); 766 return ret; 767 } 768 769 return 0; 770 } 771 772 /* 773 * walk all backrefs for a given extent to find all roots that reference this 774 * extent. Walking a backref means finding all extents that reference this 775 * extent and in turn walk the backrefs of those, too. Naturally this is a 776 * recursive process, but here it is implemented in an iterative fashion: We 777 * find all referencing extents for the extent in question and put them on a 778 * list. In turn, we find all referencing extents for those, further appending 779 * to the list. The way we iterate the list allows adding more elements after 780 * the current while iterating. The process stops when we reach the end of the 781 * list. Found roots are added to the roots list. 782 * 783 * returns 0 on success, < 0 on error. 784 */ 785 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 786 struct btrfs_fs_info *fs_info, u64 bytenr, 787 u64 num_bytes, u64 seq, struct ulist **roots) 788 { 789 struct ulist *tmp; 790 struct ulist_node *node = NULL; 791 int ret; 792 793 tmp = ulist_alloc(GFP_NOFS); 794 if (!tmp) 795 return -ENOMEM; 796 *roots = ulist_alloc(GFP_NOFS); 797 if (!*roots) { 798 ulist_free(tmp); 799 return -ENOMEM; 800 } 801 802 while (1) { 803 ret = find_parent_nodes(trans, fs_info, bytenr, seq, 804 tmp, *roots); 805 if (ret < 0 && ret != -ENOENT) { 806 ulist_free(tmp); 807 ulist_free(*roots); 808 return ret; 809 } 810 node = ulist_next(tmp, node); 811 if (!node) 812 break; 813 bytenr = node->val; 814 } 815 816 ulist_free(tmp); 817 return 0; 818 } 819 820 821 static int __inode_info(u64 inum, u64 ioff, u8 key_type, 822 struct btrfs_root *fs_root, struct btrfs_path *path, 823 struct btrfs_key *found_key) 824 { 825 int ret; 826 struct btrfs_key key; 827 struct extent_buffer *eb; 828 829 key.type = key_type; 830 key.objectid = inum; 831 key.offset = ioff; 832 833 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 834 if (ret < 0) 835 return ret; 836 837 eb = path->nodes[0]; 838 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 839 ret = btrfs_next_leaf(fs_root, path); 840 if (ret) 841 return ret; 842 eb = path->nodes[0]; 843 } 844 845 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 846 if (found_key->type != key.type || found_key->objectid != key.objectid) 847 return 1; 848 849 return 0; 850 } 851 852 /* 853 * this makes the path point to (inum INODE_ITEM ioff) 854 */ 855 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 856 struct btrfs_path *path) 857 { 858 struct btrfs_key key; 859 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path, 860 &key); 861 } 862 863 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 864 struct btrfs_path *path, 865 struct btrfs_key *found_key) 866 { 867 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path, 868 found_key); 869 } 870 871 /* 872 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements 873 * of the path are separated by '/' and the path is guaranteed to be 874 * 0-terminated. the path is only given within the current file system. 875 * Therefore, it never starts with a '/'. the caller is responsible to provide 876 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 877 * the start point of the resulting string is returned. this pointer is within 878 * dest, normally. 879 * in case the path buffer would overflow, the pointer is decremented further 880 * as if output was written to the buffer, though no more output is actually 881 * generated. that way, the caller can determine how much space would be 882 * required for the path to fit into the buffer. in that case, the returned 883 * value will be smaller than dest. callers must check this! 884 */ 885 static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 886 struct btrfs_inode_ref *iref, 887 struct extent_buffer *eb_in, u64 parent, 888 char *dest, u32 size) 889 { 890 u32 len; 891 int slot; 892 u64 next_inum; 893 int ret; 894 s64 bytes_left = size - 1; 895 struct extent_buffer *eb = eb_in; 896 struct btrfs_key found_key; 897 int leave_spinning = path->leave_spinning; 898 899 if (bytes_left >= 0) 900 dest[bytes_left] = '\0'; 901 902 path->leave_spinning = 1; 903 while (1) { 904 len = btrfs_inode_ref_name_len(eb, iref); 905 bytes_left -= len; 906 if (bytes_left >= 0) 907 read_extent_buffer(eb, dest + bytes_left, 908 (unsigned long)(iref + 1), len); 909 if (eb != eb_in) { 910 btrfs_tree_read_unlock_blocking(eb); 911 free_extent_buffer(eb); 912 } 913 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 914 if (ret > 0) 915 ret = -ENOENT; 916 if (ret) 917 break; 918 next_inum = found_key.offset; 919 920 /* regular exit ahead */ 921 if (parent == next_inum) 922 break; 923 924 slot = path->slots[0]; 925 eb = path->nodes[0]; 926 /* make sure we can use eb after releasing the path */ 927 if (eb != eb_in) { 928 atomic_inc(&eb->refs); 929 btrfs_tree_read_lock(eb); 930 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 931 } 932 btrfs_release_path(path); 933 934 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 935 parent = next_inum; 936 --bytes_left; 937 if (bytes_left >= 0) 938 dest[bytes_left] = '/'; 939 } 940 941 btrfs_release_path(path); 942 path->leave_spinning = leave_spinning; 943 944 if (ret) 945 return ERR_PTR(ret); 946 947 return dest + bytes_left; 948 } 949 950 /* 951 * this makes the path point to (logical EXTENT_ITEM *) 952 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 953 * tree blocks and <0 on error. 954 */ 955 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 956 struct btrfs_path *path, struct btrfs_key *found_key) 957 { 958 int ret; 959 u64 flags; 960 u32 item_size; 961 struct extent_buffer *eb; 962 struct btrfs_extent_item *ei; 963 struct btrfs_key key; 964 965 key.type = BTRFS_EXTENT_ITEM_KEY; 966 key.objectid = logical; 967 key.offset = (u64)-1; 968 969 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 970 if (ret < 0) 971 return ret; 972 ret = btrfs_previous_item(fs_info->extent_root, path, 973 0, BTRFS_EXTENT_ITEM_KEY); 974 if (ret < 0) 975 return ret; 976 977 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 978 if (found_key->type != BTRFS_EXTENT_ITEM_KEY || 979 found_key->objectid > logical || 980 found_key->objectid + found_key->offset <= logical) { 981 pr_debug("logical %llu is not within any extent\n", 982 (unsigned long long)logical); 983 return -ENOENT; 984 } 985 986 eb = path->nodes[0]; 987 item_size = btrfs_item_size_nr(eb, path->slots[0]); 988 BUG_ON(item_size < sizeof(*ei)); 989 990 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 991 flags = btrfs_extent_flags(eb, ei); 992 993 pr_debug("logical %llu is at position %llu within the extent (%llu " 994 "EXTENT_ITEM %llu) flags %#llx size %u\n", 995 (unsigned long long)logical, 996 (unsigned long long)(logical - found_key->objectid), 997 (unsigned long long)found_key->objectid, 998 (unsigned long long)found_key->offset, 999 (unsigned long long)flags, item_size); 1000 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1001 return BTRFS_EXTENT_FLAG_TREE_BLOCK; 1002 if (flags & BTRFS_EXTENT_FLAG_DATA) 1003 return BTRFS_EXTENT_FLAG_DATA; 1004 1005 return -EIO; 1006 } 1007 1008 /* 1009 * helper function to iterate extent inline refs. ptr must point to a 0 value 1010 * for the first call and may be modified. it is used to track state. 1011 * if more refs exist, 0 is returned and the next call to 1012 * __get_extent_inline_ref must pass the modified ptr parameter to get the 1013 * next ref. after the last ref was processed, 1 is returned. 1014 * returns <0 on error 1015 */ 1016 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, 1017 struct btrfs_extent_item *ei, u32 item_size, 1018 struct btrfs_extent_inline_ref **out_eiref, 1019 int *out_type) 1020 { 1021 unsigned long end; 1022 u64 flags; 1023 struct btrfs_tree_block_info *info; 1024 1025 if (!*ptr) { 1026 /* first call */ 1027 flags = btrfs_extent_flags(eb, ei); 1028 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1029 info = (struct btrfs_tree_block_info *)(ei + 1); 1030 *out_eiref = 1031 (struct btrfs_extent_inline_ref *)(info + 1); 1032 } else { 1033 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1034 } 1035 *ptr = (unsigned long)*out_eiref; 1036 if ((void *)*ptr >= (void *)ei + item_size) 1037 return -ENOENT; 1038 } 1039 1040 end = (unsigned long)ei + item_size; 1041 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr; 1042 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); 1043 1044 *ptr += btrfs_extent_inline_ref_size(*out_type); 1045 WARN_ON(*ptr > end); 1046 if (*ptr == end) 1047 return 1; /* last */ 1048 1049 return 0; 1050 } 1051 1052 /* 1053 * reads the tree block backref for an extent. tree level and root are returned 1054 * through out_level and out_root. ptr must point to a 0 value for the first 1055 * call and may be modified (see __get_extent_inline_ref comment). 1056 * returns 0 if data was provided, 1 if there was no more data to provide or 1057 * <0 on error. 1058 */ 1059 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1060 struct btrfs_extent_item *ei, u32 item_size, 1061 u64 *out_root, u8 *out_level) 1062 { 1063 int ret; 1064 int type; 1065 struct btrfs_tree_block_info *info; 1066 struct btrfs_extent_inline_ref *eiref; 1067 1068 if (*ptr == (unsigned long)-1) 1069 return 1; 1070 1071 while (1) { 1072 ret = __get_extent_inline_ref(ptr, eb, ei, item_size, 1073 &eiref, &type); 1074 if (ret < 0) 1075 return ret; 1076 1077 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1078 type == BTRFS_SHARED_BLOCK_REF_KEY) 1079 break; 1080 1081 if (ret == 1) 1082 return 1; 1083 } 1084 1085 /* we can treat both ref types equally here */ 1086 info = (struct btrfs_tree_block_info *)(ei + 1); 1087 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1088 *out_level = btrfs_tree_block_level(eb, info); 1089 1090 if (ret == 1) 1091 *ptr = (unsigned long)-1; 1092 1093 return 0; 1094 } 1095 1096 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical, 1097 u64 orig_extent_item_objectid, 1098 u64 extent_item_pos, u64 root, 1099 iterate_extent_inodes_t *iterate, void *ctx) 1100 { 1101 u64 disk_byte; 1102 struct btrfs_key key; 1103 struct btrfs_file_extent_item *fi; 1104 struct extent_buffer *eb; 1105 int slot; 1106 int nritems; 1107 int ret = 0; 1108 int extent_type; 1109 u64 data_offset; 1110 u64 data_len; 1111 1112 eb = read_tree_block(fs_info->tree_root, logical, 1113 fs_info->tree_root->leafsize, 0); 1114 if (!eb) 1115 return -EIO; 1116 1117 /* 1118 * from the shared data ref, we only have the leaf but we need 1119 * the key. thus, we must look into all items and see that we 1120 * find one (some) with a reference to our extent item. 1121 */ 1122 nritems = btrfs_header_nritems(eb); 1123 for (slot = 0; slot < nritems; ++slot) { 1124 btrfs_item_key_to_cpu(eb, &key, slot); 1125 if (key.type != BTRFS_EXTENT_DATA_KEY) 1126 continue; 1127 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 1128 extent_type = btrfs_file_extent_type(eb, fi); 1129 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1130 continue; 1131 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 1132 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1133 if (disk_byte != orig_extent_item_objectid) 1134 continue; 1135 1136 data_offset = btrfs_file_extent_offset(eb, fi); 1137 data_len = btrfs_file_extent_num_bytes(eb, fi); 1138 1139 if (extent_item_pos < data_offset || 1140 extent_item_pos >= data_offset + data_len) 1141 continue; 1142 1143 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1144 "root %llu\n", orig_extent_item_objectid, 1145 key.objectid, key.offset, root); 1146 ret = iterate(key.objectid, 1147 key.offset + (extent_item_pos - data_offset), 1148 root, ctx); 1149 if (ret) { 1150 pr_debug("stopping iteration because ret=%d\n", ret); 1151 break; 1152 } 1153 } 1154 1155 free_extent_buffer(eb); 1156 1157 return ret; 1158 } 1159 1160 /* 1161 * calls iterate() for every inode that references the extent identified by 1162 * the given parameters. 1163 * when the iterator function returns a non-zero value, iteration stops. 1164 */ 1165 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1166 u64 extent_item_objectid, u64 extent_item_pos, 1167 int search_commit_root, 1168 iterate_extent_inodes_t *iterate, void *ctx) 1169 { 1170 int ret; 1171 struct list_head data_refs = LIST_HEAD_INIT(data_refs); 1172 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs); 1173 struct btrfs_trans_handle *trans; 1174 struct ulist *refs = NULL; 1175 struct ulist *roots = NULL; 1176 struct ulist_node *ref_node = NULL; 1177 struct ulist_node *root_node = NULL; 1178 struct seq_list seq_elem; 1179 struct btrfs_delayed_ref_root *delayed_refs = NULL; 1180 1181 pr_debug("resolving all inodes for extent %llu\n", 1182 extent_item_objectid); 1183 1184 if (search_commit_root) { 1185 trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT; 1186 } else { 1187 trans = btrfs_join_transaction(fs_info->extent_root); 1188 if (IS_ERR(trans)) 1189 return PTR_ERR(trans); 1190 1191 delayed_refs = &trans->transaction->delayed_refs; 1192 spin_lock(&delayed_refs->lock); 1193 btrfs_get_delayed_seq(delayed_refs, &seq_elem); 1194 spin_unlock(&delayed_refs->lock); 1195 } 1196 1197 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1198 extent_item_pos, seq_elem.seq, 1199 &refs); 1200 1201 if (ret) 1202 goto out; 1203 1204 while (!ret && (ref_node = ulist_next(refs, ref_node))) { 1205 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1, 1206 seq_elem.seq, &roots); 1207 if (ret) 1208 break; 1209 while (!ret && (root_node = ulist_next(roots, root_node))) { 1210 pr_debug("root %llu references leaf %llu\n", 1211 root_node->val, ref_node->val); 1212 ret = iterate_leaf_refs(fs_info, ref_node->val, 1213 extent_item_objectid, 1214 extent_item_pos, root_node->val, 1215 iterate, ctx); 1216 } 1217 } 1218 1219 ulist_free(refs); 1220 ulist_free(roots); 1221 out: 1222 if (!search_commit_root) { 1223 btrfs_put_delayed_seq(delayed_refs, &seq_elem); 1224 btrfs_end_transaction(trans, fs_info->extent_root); 1225 } 1226 1227 return ret; 1228 } 1229 1230 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 1231 struct btrfs_path *path, 1232 iterate_extent_inodes_t *iterate, void *ctx) 1233 { 1234 int ret; 1235 u64 extent_item_pos; 1236 struct btrfs_key found_key; 1237 int search_commit_root = path->search_commit_root; 1238 1239 ret = extent_from_logical(fs_info, logical, path, 1240 &found_key); 1241 btrfs_release_path(path); 1242 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1243 ret = -EINVAL; 1244 if (ret < 0) 1245 return ret; 1246 1247 extent_item_pos = logical - found_key.objectid; 1248 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1249 extent_item_pos, search_commit_root, 1250 iterate, ctx); 1251 1252 return ret; 1253 } 1254 1255 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 1256 struct btrfs_path *path, 1257 iterate_irefs_t *iterate, void *ctx) 1258 { 1259 int ret = 0; 1260 int slot; 1261 u32 cur; 1262 u32 len; 1263 u32 name_len; 1264 u64 parent = 0; 1265 int found = 0; 1266 struct extent_buffer *eb; 1267 struct btrfs_item *item; 1268 struct btrfs_inode_ref *iref; 1269 struct btrfs_key found_key; 1270 1271 while (!ret) { 1272 path->leave_spinning = 1; 1273 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1274 &found_key); 1275 if (ret < 0) 1276 break; 1277 if (ret) { 1278 ret = found ? 0 : -ENOENT; 1279 break; 1280 } 1281 ++found; 1282 1283 parent = found_key.offset; 1284 slot = path->slots[0]; 1285 eb = path->nodes[0]; 1286 /* make sure we can use eb after releasing the path */ 1287 atomic_inc(&eb->refs); 1288 btrfs_tree_read_lock(eb); 1289 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1290 btrfs_release_path(path); 1291 1292 item = btrfs_item_nr(eb, slot); 1293 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1294 1295 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 1296 name_len = btrfs_inode_ref_name_len(eb, iref); 1297 /* path must be released before calling iterate()! */ 1298 pr_debug("following ref at offset %u for inode %llu in " 1299 "tree %llu\n", cur, 1300 (unsigned long long)found_key.objectid, 1301 (unsigned long long)fs_root->objectid); 1302 ret = iterate(parent, iref, eb, ctx); 1303 if (ret) 1304 break; 1305 len = sizeof(*iref) + name_len; 1306 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1307 } 1308 btrfs_tree_read_unlock_blocking(eb); 1309 free_extent_buffer(eb); 1310 } 1311 1312 btrfs_release_path(path); 1313 1314 return ret; 1315 } 1316 1317 /* 1318 * returns 0 if the path could be dumped (probably truncated) 1319 * returns <0 in case of an error 1320 */ 1321 static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, 1322 struct extent_buffer *eb, void *ctx) 1323 { 1324 struct inode_fs_paths *ipath = ctx; 1325 char *fspath; 1326 char *fspath_min; 1327 int i = ipath->fspath->elem_cnt; 1328 const int s_ptr = sizeof(char *); 1329 u32 bytes_left; 1330 1331 bytes_left = ipath->fspath->bytes_left > s_ptr ? 1332 ipath->fspath->bytes_left - s_ptr : 0; 1333 1334 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1335 fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb, 1336 inum, fspath_min, bytes_left); 1337 if (IS_ERR(fspath)) 1338 return PTR_ERR(fspath); 1339 1340 if (fspath > fspath_min) { 1341 pr_debug("path resolved: %s\n", fspath); 1342 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 1343 ++ipath->fspath->elem_cnt; 1344 ipath->fspath->bytes_left = fspath - fspath_min; 1345 } else { 1346 pr_debug("missed path, not enough space. missing bytes: %lu, " 1347 "constructed so far: %s\n", 1348 (unsigned long)(fspath_min - fspath), fspath_min); 1349 ++ipath->fspath->elem_missed; 1350 ipath->fspath->bytes_missing += fspath_min - fspath; 1351 ipath->fspath->bytes_left = 0; 1352 } 1353 1354 return 0; 1355 } 1356 1357 /* 1358 * this dumps all file system paths to the inode into the ipath struct, provided 1359 * is has been created large enough. each path is zero-terminated and accessed 1360 * from ipath->fspath->val[i]. 1361 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1362 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1363 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1364 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1365 * have been needed to return all paths. 1366 */ 1367 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 1368 { 1369 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 1370 inode_to_path, ipath); 1371 } 1372 1373 struct btrfs_data_container *init_data_container(u32 total_bytes) 1374 { 1375 struct btrfs_data_container *data; 1376 size_t alloc_bytes; 1377 1378 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 1379 data = kmalloc(alloc_bytes, GFP_NOFS); 1380 if (!data) 1381 return ERR_PTR(-ENOMEM); 1382 1383 if (total_bytes >= sizeof(*data)) { 1384 data->bytes_left = total_bytes - sizeof(*data); 1385 data->bytes_missing = 0; 1386 } else { 1387 data->bytes_missing = sizeof(*data) - total_bytes; 1388 data->bytes_left = 0; 1389 } 1390 1391 data->elem_cnt = 0; 1392 data->elem_missed = 0; 1393 1394 return data; 1395 } 1396 1397 /* 1398 * allocates space to return multiple file system paths for an inode. 1399 * total_bytes to allocate are passed, note that space usable for actual path 1400 * information will be total_bytes - sizeof(struct inode_fs_paths). 1401 * the returned pointer must be freed with free_ipath() in the end. 1402 */ 1403 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 1404 struct btrfs_path *path) 1405 { 1406 struct inode_fs_paths *ifp; 1407 struct btrfs_data_container *fspath; 1408 1409 fspath = init_data_container(total_bytes); 1410 if (IS_ERR(fspath)) 1411 return (void *)fspath; 1412 1413 ifp = kmalloc(sizeof(*ifp), GFP_NOFS); 1414 if (!ifp) { 1415 kfree(fspath); 1416 return ERR_PTR(-ENOMEM); 1417 } 1418 1419 ifp->btrfs_path = path; 1420 ifp->fspath = fspath; 1421 ifp->fs_root = fs_root; 1422 1423 return ifp; 1424 } 1425 1426 void free_ipath(struct inode_fs_paths *ipath) 1427 { 1428 if (!ipath) 1429 return; 1430 kfree(ipath->fspath); 1431 kfree(ipath); 1432 } 1433