1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include "ctree.h" 20 #include "disk-io.h" 21 #include "backref.h" 22 #include "ulist.h" 23 #include "transaction.h" 24 #include "delayed-ref.h" 25 26 /* 27 * this structure records all encountered refs on the way up to the root 28 */ 29 struct __prelim_ref { 30 struct list_head list; 31 u64 root_id; 32 struct btrfs_key key; 33 int level; 34 int count; 35 u64 parent; 36 u64 wanted_disk_byte; 37 }; 38 39 static int __add_prelim_ref(struct list_head *head, u64 root_id, 40 struct btrfs_key *key, int level, u64 parent, 41 u64 wanted_disk_byte, int count) 42 { 43 struct __prelim_ref *ref; 44 45 /* in case we're adding delayed refs, we're holding the refs spinlock */ 46 ref = kmalloc(sizeof(*ref), GFP_ATOMIC); 47 if (!ref) 48 return -ENOMEM; 49 50 ref->root_id = root_id; 51 if (key) 52 ref->key = *key; 53 else 54 memset(&ref->key, 0, sizeof(ref->key)); 55 56 ref->level = level; 57 ref->count = count; 58 ref->parent = parent; 59 ref->wanted_disk_byte = wanted_disk_byte; 60 list_add_tail(&ref->list, head); 61 62 return 0; 63 } 64 65 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 66 struct ulist *parents, 67 struct extent_buffer *eb, int level, 68 u64 wanted_objectid, u64 wanted_disk_byte) 69 { 70 int ret; 71 int slot; 72 struct btrfs_file_extent_item *fi; 73 struct btrfs_key key; 74 u64 disk_byte; 75 76 add_parent: 77 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 78 if (ret < 0) 79 return ret; 80 81 if (level != 0) 82 return 0; 83 84 /* 85 * if the current leaf is full with EXTENT_DATA items, we must 86 * check the next one if that holds a reference as well. 87 * ref->count cannot be used to skip this check. 88 * repeat this until we don't find any additional EXTENT_DATA items. 89 */ 90 while (1) { 91 ret = btrfs_next_leaf(root, path); 92 if (ret < 0) 93 return ret; 94 if (ret) 95 return 0; 96 97 eb = path->nodes[0]; 98 for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) { 99 btrfs_item_key_to_cpu(eb, &key, slot); 100 if (key.objectid != wanted_objectid || 101 key.type != BTRFS_EXTENT_DATA_KEY) 102 return 0; 103 fi = btrfs_item_ptr(eb, slot, 104 struct btrfs_file_extent_item); 105 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 106 if (disk_byte == wanted_disk_byte) 107 goto add_parent; 108 } 109 } 110 111 return 0; 112 } 113 114 /* 115 * resolve an indirect backref in the form (root_id, key, level) 116 * to a logical address 117 */ 118 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 119 int search_commit_root, 120 struct __prelim_ref *ref, 121 struct ulist *parents) 122 { 123 struct btrfs_path *path; 124 struct btrfs_root *root; 125 struct btrfs_key root_key; 126 struct btrfs_key key = {0}; 127 struct extent_buffer *eb; 128 int ret = 0; 129 int root_level; 130 int level = ref->level; 131 132 path = btrfs_alloc_path(); 133 if (!path) 134 return -ENOMEM; 135 path->search_commit_root = !!search_commit_root; 136 137 root_key.objectid = ref->root_id; 138 root_key.type = BTRFS_ROOT_ITEM_KEY; 139 root_key.offset = (u64)-1; 140 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 141 if (IS_ERR(root)) { 142 ret = PTR_ERR(root); 143 goto out; 144 } 145 146 rcu_read_lock(); 147 root_level = btrfs_header_level(root->node); 148 rcu_read_unlock(); 149 150 if (root_level + 1 == level) 151 goto out; 152 153 path->lowest_level = level; 154 ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0); 155 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 156 "%d for key (%llu %u %llu)\n", 157 (unsigned long long)ref->root_id, level, ref->count, ret, 158 (unsigned long long)ref->key.objectid, ref->key.type, 159 (unsigned long long)ref->key.offset); 160 if (ret < 0) 161 goto out; 162 163 eb = path->nodes[level]; 164 if (!eb) { 165 WARN_ON(1); 166 ret = 1; 167 goto out; 168 } 169 170 if (level == 0) { 171 if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { 172 ret = btrfs_next_leaf(root, path); 173 if (ret) 174 goto out; 175 eb = path->nodes[0]; 176 } 177 178 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 179 } 180 181 /* the last two parameters will only be used for level == 0 */ 182 ret = add_all_parents(root, path, parents, eb, level, key.objectid, 183 ref->wanted_disk_byte); 184 out: 185 btrfs_free_path(path); 186 return ret; 187 } 188 189 /* 190 * resolve all indirect backrefs from the list 191 */ 192 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 193 int search_commit_root, 194 struct list_head *head) 195 { 196 int err; 197 int ret = 0; 198 struct __prelim_ref *ref; 199 struct __prelim_ref *ref_safe; 200 struct __prelim_ref *new_ref; 201 struct ulist *parents; 202 struct ulist_node *node; 203 204 parents = ulist_alloc(GFP_NOFS); 205 if (!parents) 206 return -ENOMEM; 207 208 /* 209 * _safe allows us to insert directly after the current item without 210 * iterating over the newly inserted items. 211 * we're also allowed to re-assign ref during iteration. 212 */ 213 list_for_each_entry_safe(ref, ref_safe, head, list) { 214 if (ref->parent) /* already direct */ 215 continue; 216 if (ref->count == 0) 217 continue; 218 err = __resolve_indirect_ref(fs_info, search_commit_root, 219 ref, parents); 220 if (err) { 221 if (ret == 0) 222 ret = err; 223 continue; 224 } 225 226 /* we put the first parent into the ref at hand */ 227 node = ulist_next(parents, NULL); 228 ref->parent = node ? node->val : 0; 229 230 /* additional parents require new refs being added here */ 231 while ((node = ulist_next(parents, node))) { 232 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS); 233 if (!new_ref) { 234 ret = -ENOMEM; 235 break; 236 } 237 memcpy(new_ref, ref, sizeof(*ref)); 238 new_ref->parent = node->val; 239 list_add(&new_ref->list, &ref->list); 240 } 241 ulist_reinit(parents); 242 } 243 244 ulist_free(parents); 245 return ret; 246 } 247 248 /* 249 * merge two lists of backrefs and adjust counts accordingly 250 * 251 * mode = 1: merge identical keys, if key is set 252 * mode = 2: merge identical parents 253 */ 254 static int __merge_refs(struct list_head *head, int mode) 255 { 256 struct list_head *pos1; 257 258 list_for_each(pos1, head) { 259 struct list_head *n2; 260 struct list_head *pos2; 261 struct __prelim_ref *ref1; 262 263 ref1 = list_entry(pos1, struct __prelim_ref, list); 264 265 if (mode == 1 && ref1->key.type == 0) 266 continue; 267 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head; 268 pos2 = n2, n2 = pos2->next) { 269 struct __prelim_ref *ref2; 270 271 ref2 = list_entry(pos2, struct __prelim_ref, list); 272 273 if (mode == 1) { 274 if (memcmp(&ref1->key, &ref2->key, 275 sizeof(ref1->key)) || 276 ref1->level != ref2->level || 277 ref1->root_id != ref2->root_id) 278 continue; 279 ref1->count += ref2->count; 280 } else { 281 if (ref1->parent != ref2->parent) 282 continue; 283 ref1->count += ref2->count; 284 } 285 list_del(&ref2->list); 286 kfree(ref2); 287 } 288 289 } 290 return 0; 291 } 292 293 /* 294 * add all currently queued delayed refs from this head whose seq nr is 295 * smaller or equal that seq to the list 296 */ 297 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 298 struct btrfs_key *info_key, 299 struct list_head *prefs) 300 { 301 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 302 struct rb_node *n = &head->node.rb_node; 303 int sgn; 304 int ret = 0; 305 306 if (extent_op && extent_op->update_key) 307 btrfs_disk_key_to_cpu(info_key, &extent_op->key); 308 309 while ((n = rb_prev(n))) { 310 struct btrfs_delayed_ref_node *node; 311 node = rb_entry(n, struct btrfs_delayed_ref_node, 312 rb_node); 313 if (node->bytenr != head->node.bytenr) 314 break; 315 WARN_ON(node->is_head); 316 317 if (node->seq > seq) 318 continue; 319 320 switch (node->action) { 321 case BTRFS_ADD_DELAYED_EXTENT: 322 case BTRFS_UPDATE_DELAYED_HEAD: 323 WARN_ON(1); 324 continue; 325 case BTRFS_ADD_DELAYED_REF: 326 sgn = 1; 327 break; 328 case BTRFS_DROP_DELAYED_REF: 329 sgn = -1; 330 break; 331 default: 332 BUG_ON(1); 333 } 334 switch (node->type) { 335 case BTRFS_TREE_BLOCK_REF_KEY: { 336 struct btrfs_delayed_tree_ref *ref; 337 338 ref = btrfs_delayed_node_to_tree_ref(node); 339 ret = __add_prelim_ref(prefs, ref->root, info_key, 340 ref->level + 1, 0, node->bytenr, 341 node->ref_mod * sgn); 342 break; 343 } 344 case BTRFS_SHARED_BLOCK_REF_KEY: { 345 struct btrfs_delayed_tree_ref *ref; 346 347 ref = btrfs_delayed_node_to_tree_ref(node); 348 ret = __add_prelim_ref(prefs, ref->root, info_key, 349 ref->level + 1, ref->parent, 350 node->bytenr, 351 node->ref_mod * sgn); 352 break; 353 } 354 case BTRFS_EXTENT_DATA_REF_KEY: { 355 struct btrfs_delayed_data_ref *ref; 356 struct btrfs_key key; 357 358 ref = btrfs_delayed_node_to_data_ref(node); 359 360 key.objectid = ref->objectid; 361 key.type = BTRFS_EXTENT_DATA_KEY; 362 key.offset = ref->offset; 363 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, 364 node->bytenr, 365 node->ref_mod * sgn); 366 break; 367 } 368 case BTRFS_SHARED_DATA_REF_KEY: { 369 struct btrfs_delayed_data_ref *ref; 370 struct btrfs_key key; 371 372 ref = btrfs_delayed_node_to_data_ref(node); 373 374 key.objectid = ref->objectid; 375 key.type = BTRFS_EXTENT_DATA_KEY; 376 key.offset = ref->offset; 377 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 378 ref->parent, node->bytenr, 379 node->ref_mod * sgn); 380 break; 381 } 382 default: 383 WARN_ON(1); 384 } 385 BUG_ON(ret); 386 } 387 388 return 0; 389 } 390 391 /* 392 * add all inline backrefs for bytenr to the list 393 */ 394 static int __add_inline_refs(struct btrfs_fs_info *fs_info, 395 struct btrfs_path *path, u64 bytenr, 396 struct btrfs_key *info_key, int *info_level, 397 struct list_head *prefs) 398 { 399 int ret = 0; 400 int slot; 401 struct extent_buffer *leaf; 402 struct btrfs_key key; 403 unsigned long ptr; 404 unsigned long end; 405 struct btrfs_extent_item *ei; 406 u64 flags; 407 u64 item_size; 408 409 /* 410 * enumerate all inline refs 411 */ 412 leaf = path->nodes[0]; 413 slot = path->slots[0] - 1; 414 415 item_size = btrfs_item_size_nr(leaf, slot); 416 BUG_ON(item_size < sizeof(*ei)); 417 418 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 419 flags = btrfs_extent_flags(leaf, ei); 420 421 ptr = (unsigned long)(ei + 1); 422 end = (unsigned long)ei + item_size; 423 424 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 425 struct btrfs_tree_block_info *info; 426 struct btrfs_disk_key disk_key; 427 428 info = (struct btrfs_tree_block_info *)ptr; 429 *info_level = btrfs_tree_block_level(leaf, info); 430 btrfs_tree_block_key(leaf, info, &disk_key); 431 btrfs_disk_key_to_cpu(info_key, &disk_key); 432 ptr += sizeof(struct btrfs_tree_block_info); 433 BUG_ON(ptr > end); 434 } else { 435 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 436 } 437 438 while (ptr < end) { 439 struct btrfs_extent_inline_ref *iref; 440 u64 offset; 441 int type; 442 443 iref = (struct btrfs_extent_inline_ref *)ptr; 444 type = btrfs_extent_inline_ref_type(leaf, iref); 445 offset = btrfs_extent_inline_ref_offset(leaf, iref); 446 447 switch (type) { 448 case BTRFS_SHARED_BLOCK_REF_KEY: 449 ret = __add_prelim_ref(prefs, 0, info_key, 450 *info_level + 1, offset, 451 bytenr, 1); 452 break; 453 case BTRFS_SHARED_DATA_REF_KEY: { 454 struct btrfs_shared_data_ref *sdref; 455 int count; 456 457 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 458 count = btrfs_shared_data_ref_count(leaf, sdref); 459 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 460 bytenr, count); 461 break; 462 } 463 case BTRFS_TREE_BLOCK_REF_KEY: 464 ret = __add_prelim_ref(prefs, offset, info_key, 465 *info_level + 1, 0, bytenr, 1); 466 break; 467 case BTRFS_EXTENT_DATA_REF_KEY: { 468 struct btrfs_extent_data_ref *dref; 469 int count; 470 u64 root; 471 472 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 473 count = btrfs_extent_data_ref_count(leaf, dref); 474 key.objectid = btrfs_extent_data_ref_objectid(leaf, 475 dref); 476 key.type = BTRFS_EXTENT_DATA_KEY; 477 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 478 root = btrfs_extent_data_ref_root(leaf, dref); 479 ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr, 480 count); 481 break; 482 } 483 default: 484 WARN_ON(1); 485 } 486 BUG_ON(ret); 487 ptr += btrfs_extent_inline_ref_size(type); 488 } 489 490 return 0; 491 } 492 493 /* 494 * add all non-inline backrefs for bytenr to the list 495 */ 496 static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 497 struct btrfs_path *path, u64 bytenr, 498 struct btrfs_key *info_key, int info_level, 499 struct list_head *prefs) 500 { 501 struct btrfs_root *extent_root = fs_info->extent_root; 502 int ret; 503 int slot; 504 struct extent_buffer *leaf; 505 struct btrfs_key key; 506 507 while (1) { 508 ret = btrfs_next_item(extent_root, path); 509 if (ret < 0) 510 break; 511 if (ret) { 512 ret = 0; 513 break; 514 } 515 516 slot = path->slots[0]; 517 leaf = path->nodes[0]; 518 btrfs_item_key_to_cpu(leaf, &key, slot); 519 520 if (key.objectid != bytenr) 521 break; 522 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 523 continue; 524 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 525 break; 526 527 switch (key.type) { 528 case BTRFS_SHARED_BLOCK_REF_KEY: 529 ret = __add_prelim_ref(prefs, 0, info_key, 530 info_level + 1, key.offset, 531 bytenr, 1); 532 break; 533 case BTRFS_SHARED_DATA_REF_KEY: { 534 struct btrfs_shared_data_ref *sdref; 535 int count; 536 537 sdref = btrfs_item_ptr(leaf, slot, 538 struct btrfs_shared_data_ref); 539 count = btrfs_shared_data_ref_count(leaf, sdref); 540 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 541 bytenr, count); 542 break; 543 } 544 case BTRFS_TREE_BLOCK_REF_KEY: 545 ret = __add_prelim_ref(prefs, key.offset, info_key, 546 info_level + 1, 0, bytenr, 1); 547 break; 548 case BTRFS_EXTENT_DATA_REF_KEY: { 549 struct btrfs_extent_data_ref *dref; 550 int count; 551 u64 root; 552 553 dref = btrfs_item_ptr(leaf, slot, 554 struct btrfs_extent_data_ref); 555 count = btrfs_extent_data_ref_count(leaf, dref); 556 key.objectid = btrfs_extent_data_ref_objectid(leaf, 557 dref); 558 key.type = BTRFS_EXTENT_DATA_KEY; 559 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 560 root = btrfs_extent_data_ref_root(leaf, dref); 561 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 562 bytenr, count); 563 break; 564 } 565 default: 566 WARN_ON(1); 567 } 568 BUG_ON(ret); 569 } 570 571 return ret; 572 } 573 574 /* 575 * this adds all existing backrefs (inline backrefs, backrefs and delayed 576 * refs) for the given bytenr to the refs list, merges duplicates and resolves 577 * indirect refs to their parent bytenr. 578 * When roots are found, they're added to the roots list 579 * 580 * FIXME some caching might speed things up 581 */ 582 static int find_parent_nodes(struct btrfs_trans_handle *trans, 583 struct btrfs_fs_info *fs_info, u64 bytenr, 584 u64 seq, struct ulist *refs, struct ulist *roots) 585 { 586 struct btrfs_key key; 587 struct btrfs_path *path; 588 struct btrfs_key info_key = { 0 }; 589 struct btrfs_delayed_ref_root *delayed_refs = NULL; 590 struct btrfs_delayed_ref_head *head; 591 int info_level = 0; 592 int ret; 593 int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT); 594 struct list_head prefs_delayed; 595 struct list_head prefs; 596 struct __prelim_ref *ref; 597 598 INIT_LIST_HEAD(&prefs); 599 INIT_LIST_HEAD(&prefs_delayed); 600 601 key.objectid = bytenr; 602 key.type = BTRFS_EXTENT_ITEM_KEY; 603 key.offset = (u64)-1; 604 605 path = btrfs_alloc_path(); 606 if (!path) 607 return -ENOMEM; 608 path->search_commit_root = !!search_commit_root; 609 610 /* 611 * grab both a lock on the path and a lock on the delayed ref head. 612 * We need both to get a consistent picture of how the refs look 613 * at a specified point in time 614 */ 615 again: 616 head = NULL; 617 618 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 619 if (ret < 0) 620 goto out; 621 BUG_ON(ret == 0); 622 623 if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) { 624 /* 625 * look if there are updates for this ref queued and lock the 626 * head 627 */ 628 delayed_refs = &trans->transaction->delayed_refs; 629 spin_lock(&delayed_refs->lock); 630 head = btrfs_find_delayed_ref_head(trans, bytenr); 631 if (head) { 632 if (!mutex_trylock(&head->mutex)) { 633 atomic_inc(&head->node.refs); 634 spin_unlock(&delayed_refs->lock); 635 636 btrfs_release_path(path); 637 638 /* 639 * Mutex was contended, block until it's 640 * released and try again 641 */ 642 mutex_lock(&head->mutex); 643 mutex_unlock(&head->mutex); 644 btrfs_put_delayed_ref(&head->node); 645 goto again; 646 } 647 ret = __add_delayed_refs(head, seq, &info_key, 648 &prefs_delayed); 649 if (ret) { 650 spin_unlock(&delayed_refs->lock); 651 goto out; 652 } 653 } 654 spin_unlock(&delayed_refs->lock); 655 } 656 657 if (path->slots[0]) { 658 struct extent_buffer *leaf; 659 int slot; 660 661 leaf = path->nodes[0]; 662 slot = path->slots[0] - 1; 663 btrfs_item_key_to_cpu(leaf, &key, slot); 664 if (key.objectid == bytenr && 665 key.type == BTRFS_EXTENT_ITEM_KEY) { 666 ret = __add_inline_refs(fs_info, path, bytenr, 667 &info_key, &info_level, &prefs); 668 if (ret) 669 goto out; 670 ret = __add_keyed_refs(fs_info, path, bytenr, &info_key, 671 info_level, &prefs); 672 if (ret) 673 goto out; 674 } 675 } 676 btrfs_release_path(path); 677 678 /* 679 * when adding the delayed refs above, the info_key might not have 680 * been known yet. Go over the list and replace the missing keys 681 */ 682 list_for_each_entry(ref, &prefs_delayed, list) { 683 if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0) 684 memcpy(&ref->key, &info_key, sizeof(ref->key)); 685 } 686 list_splice_init(&prefs_delayed, &prefs); 687 688 ret = __merge_refs(&prefs, 1); 689 if (ret) 690 goto out; 691 692 ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs); 693 if (ret) 694 goto out; 695 696 ret = __merge_refs(&prefs, 2); 697 if (ret) 698 goto out; 699 700 while (!list_empty(&prefs)) { 701 ref = list_first_entry(&prefs, struct __prelim_ref, list); 702 list_del(&ref->list); 703 if (ref->count < 0) 704 WARN_ON(1); 705 if (ref->count && ref->root_id && ref->parent == 0) { 706 /* no parent == root of tree */ 707 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 708 BUG_ON(ret < 0); 709 } 710 if (ref->count && ref->parent) { 711 ret = ulist_add(refs, ref->parent, 0, GFP_NOFS); 712 BUG_ON(ret < 0); 713 } 714 kfree(ref); 715 } 716 717 out: 718 if (head) 719 mutex_unlock(&head->mutex); 720 btrfs_free_path(path); 721 while (!list_empty(&prefs)) { 722 ref = list_first_entry(&prefs, struct __prelim_ref, list); 723 list_del(&ref->list); 724 kfree(ref); 725 } 726 while (!list_empty(&prefs_delayed)) { 727 ref = list_first_entry(&prefs_delayed, struct __prelim_ref, 728 list); 729 list_del(&ref->list); 730 kfree(ref); 731 } 732 733 return ret; 734 } 735 736 /* 737 * Finds all leafs with a reference to the specified combination of bytenr and 738 * offset. key_list_head will point to a list of corresponding keys (caller must 739 * free each list element). The leafs will be stored in the leafs ulist, which 740 * must be freed with ulist_free. 741 * 742 * returns 0 on success, <0 on error 743 */ 744 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 745 struct btrfs_fs_info *fs_info, u64 bytenr, 746 u64 num_bytes, u64 seq, struct ulist **leafs) 747 { 748 struct ulist *tmp; 749 int ret; 750 751 tmp = ulist_alloc(GFP_NOFS); 752 if (!tmp) 753 return -ENOMEM; 754 *leafs = ulist_alloc(GFP_NOFS); 755 if (!*leafs) { 756 ulist_free(tmp); 757 return -ENOMEM; 758 } 759 760 ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp); 761 ulist_free(tmp); 762 763 if (ret < 0 && ret != -ENOENT) { 764 ulist_free(*leafs); 765 return ret; 766 } 767 768 return 0; 769 } 770 771 /* 772 * walk all backrefs for a given extent to find all roots that reference this 773 * extent. Walking a backref means finding all extents that reference this 774 * extent and in turn walk the backrefs of those, too. Naturally this is a 775 * recursive process, but here it is implemented in an iterative fashion: We 776 * find all referencing extents for the extent in question and put them on a 777 * list. In turn, we find all referencing extents for those, further appending 778 * to the list. The way we iterate the list allows adding more elements after 779 * the current while iterating. The process stops when we reach the end of the 780 * list. Found roots are added to the roots list. 781 * 782 * returns 0 on success, < 0 on error. 783 */ 784 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 785 struct btrfs_fs_info *fs_info, u64 bytenr, 786 u64 num_bytes, u64 seq, struct ulist **roots) 787 { 788 struct ulist *tmp; 789 struct ulist_node *node = NULL; 790 int ret; 791 792 tmp = ulist_alloc(GFP_NOFS); 793 if (!tmp) 794 return -ENOMEM; 795 *roots = ulist_alloc(GFP_NOFS); 796 if (!*roots) { 797 ulist_free(tmp); 798 return -ENOMEM; 799 } 800 801 while (1) { 802 ret = find_parent_nodes(trans, fs_info, bytenr, seq, 803 tmp, *roots); 804 if (ret < 0 && ret != -ENOENT) { 805 ulist_free(tmp); 806 ulist_free(*roots); 807 return ret; 808 } 809 node = ulist_next(tmp, node); 810 if (!node) 811 break; 812 bytenr = node->val; 813 } 814 815 ulist_free(tmp); 816 return 0; 817 } 818 819 820 static int __inode_info(u64 inum, u64 ioff, u8 key_type, 821 struct btrfs_root *fs_root, struct btrfs_path *path, 822 struct btrfs_key *found_key) 823 { 824 int ret; 825 struct btrfs_key key; 826 struct extent_buffer *eb; 827 828 key.type = key_type; 829 key.objectid = inum; 830 key.offset = ioff; 831 832 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 833 if (ret < 0) 834 return ret; 835 836 eb = path->nodes[0]; 837 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 838 ret = btrfs_next_leaf(fs_root, path); 839 if (ret) 840 return ret; 841 eb = path->nodes[0]; 842 } 843 844 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 845 if (found_key->type != key.type || found_key->objectid != key.objectid) 846 return 1; 847 848 return 0; 849 } 850 851 /* 852 * this makes the path point to (inum INODE_ITEM ioff) 853 */ 854 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 855 struct btrfs_path *path) 856 { 857 struct btrfs_key key; 858 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path, 859 &key); 860 } 861 862 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 863 struct btrfs_path *path, 864 struct btrfs_key *found_key) 865 { 866 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path, 867 found_key); 868 } 869 870 /* 871 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements 872 * of the path are separated by '/' and the path is guaranteed to be 873 * 0-terminated. the path is only given within the current file system. 874 * Therefore, it never starts with a '/'. the caller is responsible to provide 875 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 876 * the start point of the resulting string is returned. this pointer is within 877 * dest, normally. 878 * in case the path buffer would overflow, the pointer is decremented further 879 * as if output was written to the buffer, though no more output is actually 880 * generated. that way, the caller can determine how much space would be 881 * required for the path to fit into the buffer. in that case, the returned 882 * value will be smaller than dest. callers must check this! 883 */ 884 static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 885 struct btrfs_inode_ref *iref, 886 struct extent_buffer *eb_in, u64 parent, 887 char *dest, u32 size) 888 { 889 u32 len; 890 int slot; 891 u64 next_inum; 892 int ret; 893 s64 bytes_left = size - 1; 894 struct extent_buffer *eb = eb_in; 895 struct btrfs_key found_key; 896 897 if (bytes_left >= 0) 898 dest[bytes_left] = '\0'; 899 900 while (1) { 901 len = btrfs_inode_ref_name_len(eb, iref); 902 bytes_left -= len; 903 if (bytes_left >= 0) 904 read_extent_buffer(eb, dest + bytes_left, 905 (unsigned long)(iref + 1), len); 906 if (eb != eb_in) 907 free_extent_buffer(eb); 908 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 909 if (ret > 0) 910 ret = -ENOENT; 911 if (ret) 912 break; 913 next_inum = found_key.offset; 914 915 /* regular exit ahead */ 916 if (parent == next_inum) 917 break; 918 919 slot = path->slots[0]; 920 eb = path->nodes[0]; 921 /* make sure we can use eb after releasing the path */ 922 if (eb != eb_in) 923 atomic_inc(&eb->refs); 924 btrfs_release_path(path); 925 926 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 927 parent = next_inum; 928 --bytes_left; 929 if (bytes_left >= 0) 930 dest[bytes_left] = '/'; 931 } 932 933 btrfs_release_path(path); 934 935 if (ret) 936 return ERR_PTR(ret); 937 938 return dest + bytes_left; 939 } 940 941 /* 942 * this makes the path point to (logical EXTENT_ITEM *) 943 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 944 * tree blocks and <0 on error. 945 */ 946 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 947 struct btrfs_path *path, struct btrfs_key *found_key) 948 { 949 int ret; 950 u64 flags; 951 u32 item_size; 952 struct extent_buffer *eb; 953 struct btrfs_extent_item *ei; 954 struct btrfs_key key; 955 956 key.type = BTRFS_EXTENT_ITEM_KEY; 957 key.objectid = logical; 958 key.offset = (u64)-1; 959 960 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 961 if (ret < 0) 962 return ret; 963 ret = btrfs_previous_item(fs_info->extent_root, path, 964 0, BTRFS_EXTENT_ITEM_KEY); 965 if (ret < 0) 966 return ret; 967 968 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 969 if (found_key->type != BTRFS_EXTENT_ITEM_KEY || 970 found_key->objectid > logical || 971 found_key->objectid + found_key->offset <= logical) { 972 pr_debug("logical %llu is not within any extent\n", 973 (unsigned long long)logical); 974 return -ENOENT; 975 } 976 977 eb = path->nodes[0]; 978 item_size = btrfs_item_size_nr(eb, path->slots[0]); 979 BUG_ON(item_size < sizeof(*ei)); 980 981 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 982 flags = btrfs_extent_flags(eb, ei); 983 984 pr_debug("logical %llu is at position %llu within the extent (%llu " 985 "EXTENT_ITEM %llu) flags %#llx size %u\n", 986 (unsigned long long)logical, 987 (unsigned long long)(logical - found_key->objectid), 988 (unsigned long long)found_key->objectid, 989 (unsigned long long)found_key->offset, 990 (unsigned long long)flags, item_size); 991 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 992 return BTRFS_EXTENT_FLAG_TREE_BLOCK; 993 if (flags & BTRFS_EXTENT_FLAG_DATA) 994 return BTRFS_EXTENT_FLAG_DATA; 995 996 return -EIO; 997 } 998 999 /* 1000 * helper function to iterate extent inline refs. ptr must point to a 0 value 1001 * for the first call and may be modified. it is used to track state. 1002 * if more refs exist, 0 is returned and the next call to 1003 * __get_extent_inline_ref must pass the modified ptr parameter to get the 1004 * next ref. after the last ref was processed, 1 is returned. 1005 * returns <0 on error 1006 */ 1007 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, 1008 struct btrfs_extent_item *ei, u32 item_size, 1009 struct btrfs_extent_inline_ref **out_eiref, 1010 int *out_type) 1011 { 1012 unsigned long end; 1013 u64 flags; 1014 struct btrfs_tree_block_info *info; 1015 1016 if (!*ptr) { 1017 /* first call */ 1018 flags = btrfs_extent_flags(eb, ei); 1019 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1020 info = (struct btrfs_tree_block_info *)(ei + 1); 1021 *out_eiref = 1022 (struct btrfs_extent_inline_ref *)(info + 1); 1023 } else { 1024 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1025 } 1026 *ptr = (unsigned long)*out_eiref; 1027 if ((void *)*ptr >= (void *)ei + item_size) 1028 return -ENOENT; 1029 } 1030 1031 end = (unsigned long)ei + item_size; 1032 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr; 1033 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); 1034 1035 *ptr += btrfs_extent_inline_ref_size(*out_type); 1036 WARN_ON(*ptr > end); 1037 if (*ptr == end) 1038 return 1; /* last */ 1039 1040 return 0; 1041 } 1042 1043 /* 1044 * reads the tree block backref for an extent. tree level and root are returned 1045 * through out_level and out_root. ptr must point to a 0 value for the first 1046 * call and may be modified (see __get_extent_inline_ref comment). 1047 * returns 0 if data was provided, 1 if there was no more data to provide or 1048 * <0 on error. 1049 */ 1050 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1051 struct btrfs_extent_item *ei, u32 item_size, 1052 u64 *out_root, u8 *out_level) 1053 { 1054 int ret; 1055 int type; 1056 struct btrfs_tree_block_info *info; 1057 struct btrfs_extent_inline_ref *eiref; 1058 1059 if (*ptr == (unsigned long)-1) 1060 return 1; 1061 1062 while (1) { 1063 ret = __get_extent_inline_ref(ptr, eb, ei, item_size, 1064 &eiref, &type); 1065 if (ret < 0) 1066 return ret; 1067 1068 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1069 type == BTRFS_SHARED_BLOCK_REF_KEY) 1070 break; 1071 1072 if (ret == 1) 1073 return 1; 1074 } 1075 1076 /* we can treat both ref types equally here */ 1077 info = (struct btrfs_tree_block_info *)(ei + 1); 1078 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1079 *out_level = btrfs_tree_block_level(eb, info); 1080 1081 if (ret == 1) 1082 *ptr = (unsigned long)-1; 1083 1084 return 0; 1085 } 1086 1087 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical, 1088 u64 orig_extent_item_objectid, 1089 u64 extent_item_pos, u64 root, 1090 iterate_extent_inodes_t *iterate, void *ctx) 1091 { 1092 u64 disk_byte; 1093 struct btrfs_key key; 1094 struct btrfs_file_extent_item *fi; 1095 struct extent_buffer *eb; 1096 int slot; 1097 int nritems; 1098 int ret = 0; 1099 int extent_type; 1100 u64 data_offset; 1101 u64 data_len; 1102 1103 eb = read_tree_block(fs_info->tree_root, logical, 1104 fs_info->tree_root->leafsize, 0); 1105 if (!eb) 1106 return -EIO; 1107 1108 /* 1109 * from the shared data ref, we only have the leaf but we need 1110 * the key. thus, we must look into all items and see that we 1111 * find one (some) with a reference to our extent item. 1112 */ 1113 nritems = btrfs_header_nritems(eb); 1114 for (slot = 0; slot < nritems; ++slot) { 1115 btrfs_item_key_to_cpu(eb, &key, slot); 1116 if (key.type != BTRFS_EXTENT_DATA_KEY) 1117 continue; 1118 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 1119 extent_type = btrfs_file_extent_type(eb, fi); 1120 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1121 continue; 1122 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 1123 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1124 if (disk_byte != orig_extent_item_objectid) 1125 continue; 1126 1127 data_offset = btrfs_file_extent_offset(eb, fi); 1128 data_len = btrfs_file_extent_num_bytes(eb, fi); 1129 1130 if (extent_item_pos < data_offset || 1131 extent_item_pos >= data_offset + data_len) 1132 continue; 1133 1134 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1135 "root %llu\n", orig_extent_item_objectid, 1136 key.objectid, key.offset, root); 1137 ret = iterate(key.objectid, 1138 key.offset + (extent_item_pos - data_offset), 1139 root, ctx); 1140 if (ret) { 1141 pr_debug("stopping iteration because ret=%d\n", ret); 1142 break; 1143 } 1144 } 1145 1146 free_extent_buffer(eb); 1147 1148 return ret; 1149 } 1150 1151 /* 1152 * calls iterate() for every inode that references the extent identified by 1153 * the given parameters. 1154 * when the iterator function returns a non-zero value, iteration stops. 1155 */ 1156 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1157 u64 extent_item_objectid, u64 extent_item_pos, 1158 int search_commit_root, 1159 iterate_extent_inodes_t *iterate, void *ctx) 1160 { 1161 int ret; 1162 struct list_head data_refs = LIST_HEAD_INIT(data_refs); 1163 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs); 1164 struct btrfs_trans_handle *trans; 1165 struct ulist *refs = NULL; 1166 struct ulist *roots = NULL; 1167 struct ulist_node *ref_node = NULL; 1168 struct ulist_node *root_node = NULL; 1169 struct seq_list seq_elem; 1170 struct btrfs_delayed_ref_root *delayed_refs = NULL; 1171 1172 pr_debug("resolving all inodes for extent %llu\n", 1173 extent_item_objectid); 1174 1175 if (search_commit_root) { 1176 trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT; 1177 } else { 1178 trans = btrfs_join_transaction(fs_info->extent_root); 1179 if (IS_ERR(trans)) 1180 return PTR_ERR(trans); 1181 1182 delayed_refs = &trans->transaction->delayed_refs; 1183 spin_lock(&delayed_refs->lock); 1184 btrfs_get_delayed_seq(delayed_refs, &seq_elem); 1185 spin_unlock(&delayed_refs->lock); 1186 } 1187 1188 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1189 extent_item_pos, seq_elem.seq, 1190 &refs); 1191 1192 if (ret) 1193 goto out; 1194 1195 while (!ret && (ref_node = ulist_next(refs, ref_node))) { 1196 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1, 1197 seq_elem.seq, &roots); 1198 if (ret) 1199 break; 1200 while (!ret && (root_node = ulist_next(roots, root_node))) { 1201 pr_debug("root %llu references leaf %llu\n", 1202 root_node->val, ref_node->val); 1203 ret = iterate_leaf_refs(fs_info, ref_node->val, 1204 extent_item_objectid, 1205 extent_item_pos, root_node->val, 1206 iterate, ctx); 1207 } 1208 } 1209 1210 ulist_free(refs); 1211 ulist_free(roots); 1212 out: 1213 if (!search_commit_root) { 1214 btrfs_put_delayed_seq(delayed_refs, &seq_elem); 1215 btrfs_end_transaction(trans, fs_info->extent_root); 1216 } 1217 1218 return ret; 1219 } 1220 1221 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 1222 struct btrfs_path *path, 1223 iterate_extent_inodes_t *iterate, void *ctx) 1224 { 1225 int ret; 1226 u64 extent_item_pos; 1227 struct btrfs_key found_key; 1228 int search_commit_root = path->search_commit_root; 1229 1230 ret = extent_from_logical(fs_info, logical, path, 1231 &found_key); 1232 btrfs_release_path(path); 1233 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1234 ret = -EINVAL; 1235 if (ret < 0) 1236 return ret; 1237 1238 extent_item_pos = logical - found_key.objectid; 1239 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1240 extent_item_pos, search_commit_root, 1241 iterate, ctx); 1242 1243 return ret; 1244 } 1245 1246 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 1247 struct btrfs_path *path, 1248 iterate_irefs_t *iterate, void *ctx) 1249 { 1250 int ret; 1251 int slot; 1252 u32 cur; 1253 u32 len; 1254 u32 name_len; 1255 u64 parent = 0; 1256 int found = 0; 1257 struct extent_buffer *eb; 1258 struct btrfs_item *item; 1259 struct btrfs_inode_ref *iref; 1260 struct btrfs_key found_key; 1261 1262 while (1) { 1263 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1264 &found_key); 1265 if (ret < 0) 1266 break; 1267 if (ret) { 1268 ret = found ? 0 : -ENOENT; 1269 break; 1270 } 1271 ++found; 1272 1273 parent = found_key.offset; 1274 slot = path->slots[0]; 1275 eb = path->nodes[0]; 1276 /* make sure we can use eb after releasing the path */ 1277 atomic_inc(&eb->refs); 1278 btrfs_release_path(path); 1279 1280 item = btrfs_item_nr(eb, slot); 1281 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1282 1283 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 1284 name_len = btrfs_inode_ref_name_len(eb, iref); 1285 /* path must be released before calling iterate()! */ 1286 pr_debug("following ref at offset %u for inode %llu in " 1287 "tree %llu\n", cur, 1288 (unsigned long long)found_key.objectid, 1289 (unsigned long long)fs_root->objectid); 1290 ret = iterate(parent, iref, eb, ctx); 1291 if (ret) { 1292 free_extent_buffer(eb); 1293 break; 1294 } 1295 len = sizeof(*iref) + name_len; 1296 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1297 } 1298 free_extent_buffer(eb); 1299 } 1300 1301 btrfs_release_path(path); 1302 1303 return ret; 1304 } 1305 1306 /* 1307 * returns 0 if the path could be dumped (probably truncated) 1308 * returns <0 in case of an error 1309 */ 1310 static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, 1311 struct extent_buffer *eb, void *ctx) 1312 { 1313 struct inode_fs_paths *ipath = ctx; 1314 char *fspath; 1315 char *fspath_min; 1316 int i = ipath->fspath->elem_cnt; 1317 const int s_ptr = sizeof(char *); 1318 u32 bytes_left; 1319 1320 bytes_left = ipath->fspath->bytes_left > s_ptr ? 1321 ipath->fspath->bytes_left - s_ptr : 0; 1322 1323 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1324 fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb, 1325 inum, fspath_min, bytes_left); 1326 if (IS_ERR(fspath)) 1327 return PTR_ERR(fspath); 1328 1329 if (fspath > fspath_min) { 1330 pr_debug("path resolved: %s\n", fspath); 1331 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 1332 ++ipath->fspath->elem_cnt; 1333 ipath->fspath->bytes_left = fspath - fspath_min; 1334 } else { 1335 pr_debug("missed path, not enough space. missing bytes: %lu, " 1336 "constructed so far: %s\n", 1337 (unsigned long)(fspath_min - fspath), fspath_min); 1338 ++ipath->fspath->elem_missed; 1339 ipath->fspath->bytes_missing += fspath_min - fspath; 1340 ipath->fspath->bytes_left = 0; 1341 } 1342 1343 return 0; 1344 } 1345 1346 /* 1347 * this dumps all file system paths to the inode into the ipath struct, provided 1348 * is has been created large enough. each path is zero-terminated and accessed 1349 * from ipath->fspath->val[i]. 1350 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1351 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1352 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1353 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1354 * have been needed to return all paths. 1355 */ 1356 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 1357 { 1358 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 1359 inode_to_path, ipath); 1360 } 1361 1362 struct btrfs_data_container *init_data_container(u32 total_bytes) 1363 { 1364 struct btrfs_data_container *data; 1365 size_t alloc_bytes; 1366 1367 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 1368 data = kmalloc(alloc_bytes, GFP_NOFS); 1369 if (!data) 1370 return ERR_PTR(-ENOMEM); 1371 1372 if (total_bytes >= sizeof(*data)) { 1373 data->bytes_left = total_bytes - sizeof(*data); 1374 data->bytes_missing = 0; 1375 } else { 1376 data->bytes_missing = sizeof(*data) - total_bytes; 1377 data->bytes_left = 0; 1378 } 1379 1380 data->elem_cnt = 0; 1381 data->elem_missed = 0; 1382 1383 return data; 1384 } 1385 1386 /* 1387 * allocates space to return multiple file system paths for an inode. 1388 * total_bytes to allocate are passed, note that space usable for actual path 1389 * information will be total_bytes - sizeof(struct inode_fs_paths). 1390 * the returned pointer must be freed with free_ipath() in the end. 1391 */ 1392 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 1393 struct btrfs_path *path) 1394 { 1395 struct inode_fs_paths *ifp; 1396 struct btrfs_data_container *fspath; 1397 1398 fspath = init_data_container(total_bytes); 1399 if (IS_ERR(fspath)) 1400 return (void *)fspath; 1401 1402 ifp = kmalloc(sizeof(*ifp), GFP_NOFS); 1403 if (!ifp) { 1404 kfree(fspath); 1405 return ERR_PTR(-ENOMEM); 1406 } 1407 1408 ifp->btrfs_path = path; 1409 ifp->fspath = fspath; 1410 ifp->fs_root = fs_root; 1411 1412 return ifp; 1413 } 1414 1415 void free_ipath(struct inode_fs_paths *ipath) 1416 { 1417 kfree(ipath->fspath); 1418 kfree(ipath); 1419 } 1420