Lines Matching +full:data +full:- +full:path

1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
20 #include "extent-tree.h"
22 #include "tree-checker.h"
42 u64 offset = key->offset; in check_extent_in_eb()
48 if (!ctx->ignore_extent_item_pos && in check_extent_in_eb()
56 if (ctx->extent_item_pos < data_offset || in check_extent_in_eb()
57 ctx->extent_item_pos >= data_offset + data_len) in check_extent_in_eb()
59 offset += ctx->extent_item_pos - data_offset; in check_extent_in_eb()
62 if (!ctx->indirect_ref_iterator || !ctx->cache_lookup) in check_extent_in_eb()
65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids, in check_extent_in_eb()
73 ret = ctx->indirect_ref_iterator(key->objectid, offset, in check_extent_in_eb()
75 ctx->user_ctx); in check_extent_in_eb()
83 return -ENOMEM; in check_extent_in_eb()
85 e->next = *eie; in check_extent_in_eb()
86 e->inum = key->objectid; in check_extent_in_eb()
87 e->offset = offset; in check_extent_in_eb()
88 e->num_bytes = data_len; in check_extent_in_eb()
99 eie_next = eie->next; in free_inode_elem_list()
117 * from the shared data ref, we only have the leaf but we need in find_extent_in_eb()
132 if (disk_byte != ctx->bytenr) in find_extent_in_eb()
151 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
160 * ref->count >0:
161 * - incremented when a ref->count transitions to >0
162 * - decremented when a ref->count transitions to <1
178 * The number of times we found our inode refers to the data extent we
180 * items we could find for our inode that point to our target data
193 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
203 return -ENOMEM; in btrfs_prelim_ref_init()
219 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
225 if (ref1->level < ref2->level) in prelim_ref_compare()
226 return -1; in prelim_ref_compare()
227 if (ref1->level > ref2->level) in prelim_ref_compare()
229 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
230 return -1; in prelim_ref_compare()
231 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
233 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
234 return -1; in prelim_ref_compare()
235 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
237 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
238 return -1; in prelim_ref_compare()
239 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
241 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
242 return -1; in prelim_ref_compare()
243 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
245 if (ref1->parent < ref2->parent) in prelim_ref_compare()
246 return -1; in prelim_ref_compare()
247 if (ref1->parent > ref2->parent) in prelim_ref_compare()
275 sc->share_count--; in update_share_count()
277 sc->share_count++; in update_share_count()
279 if (newref->root_id == btrfs_root_id(sc->root) && in update_share_count()
280 newref->wanted_disk_byte == sc->data_bytenr && in update_share_count()
281 newref->key_for_search.objectid == sc->inum) in update_share_count()
282 sc->self_ref_count += newref->count; in update_share_count()
298 root = &preftree->root; in prelim_ref_insert()
299 exist = rb_find_add_cached(&newref->rbnode, root, prelim_ref_rb_add_cmp); in prelim_ref_insert()
303 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
305 while (eie && eie->next) in prelim_ref_insert()
306 eie = eie->next; in prelim_ref_insert()
309 ref->inode_list = newref->inode_list; in prelim_ref_insert()
311 eie->next = newref->inode_list; in prelim_ref_insert()
313 preftree->count); in prelim_ref_insert()
315 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
316 * The ref->count is updated to follow any in prelim_ref_insert()
319 update_share_count(sc, ref->count, in prelim_ref_insert()
320 ref->count + newref->count, newref); in prelim_ref_insert()
321 ref->count += newref->count; in prelim_ref_insert()
326 update_share_count(sc, 0, newref->count, newref); in prelim_ref_insert()
327 preftree->count++; in prelim_ref_insert()
328 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
340 &preftree->root.rb_root, rbnode) { in prelim_release()
341 free_inode_elem_list(ref->inode_list); in prelim_release()
345 preftree->root = RB_ROOT_CACHED; in prelim_release()
346 preftree->count = 0; in prelim_release()
351 * - obtaining the parent is the goal
352 * - if you add a key, you must know that it is a correct key
353 * - if you cannot add the parent or a correct key, then we will look into the
359 * information | tree | tree | data | data
360 * --------------------+--------+----------+--------+----------
361 * parent logical | y | - | - | -
362 * key to resolve | - | y | y | y
363 * tree block logical | - | - | - | -
366 * - column 1: we've the parent -> done
367 * - column 2, 3, 4: we use the key to find the parent
372 * information | tree | tree | data | data
373 * --------------------+--------+----------+--------+----------
374 * parent logical | y | - | y | -
375 * key to resolve | - | - | - | y
377 * root for resolving | - | y | y | y
379 * - column 1, 3: we've the parent -> done
380 * - column 2: we take the first key from the block to find the parent
382 * - column 4: we use the key to find the parent
400 return -ENOMEM; in add_prelim_ref()
402 ref->root_id = root_id; in add_prelim_ref()
404 ref->key_for_search = *key; in add_prelim_ref()
406 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
408 ref->inode_list = NULL; in add_prelim_ref()
409 ref->level = level; in add_prelim_ref()
410 ref->count = count; in add_prelim_ref()
411 ref->parent = parent; in add_prelim_ref()
412 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
423 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
434 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
437 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
444 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
458 p = &(*p)->rb_left; in is_shared_data_backref()
460 p = &(*p)->rb_right; in is_shared_data_backref()
468 struct btrfs_root *root, struct btrfs_path *path, in add_all_parents() argument
477 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
481 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
487 eb = path->nodes[level]; in add_all_parents()
488 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
495 * 1. We normally enter this function with the path already pointing to in add_all_parents()
499 * matches shared data backref in add_all_parents()
504 eb = path->nodes[0]; in add_all_parents()
505 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
506 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
507 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
508 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
509 ret = btrfs_next_leaf(root, path); in add_all_parents()
511 ret = btrfs_next_old_leaf(root, path, ctx->time_seq); in add_all_parents()
514 while (!ret && count < ref->count) { in add_all_parents()
515 eb = path->nodes[0]; in add_all_parents()
516 slot = path->slots[0]; in add_all_parents()
520 if (key.objectid != key_for_search->objectid || in add_all_parents()
526 * matches shared data backref, OR in add_all_parents()
530 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
531 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
532 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
533 ret = btrfs_next_leaf(root, path); in add_all_parents()
535 ret = btrfs_next_old_leaf(root, path, ctx->time_seq); in add_all_parents()
548 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
552 if (!ctx->skip_inode_ref_list) { in add_all_parents()
560 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
564 if (!ret && !ctx->skip_inode_ref_list) { in add_all_parents()
565 while (old->next) in add_all_parents()
566 old = old->next; in add_all_parents()
567 old->next = eie; in add_all_parents()
572 if (ctx->time_seq == BTRFS_SEQ_LAST) in add_all_parents()
573 ret = btrfs_next_item(root, path); in add_all_parents()
575 ret = btrfs_next_old_item(root, path, ctx->time_seq); in add_all_parents()
591 struct btrfs_path *path, in resolve_indirect_ref() argument
599 int level = ref->level; in resolve_indirect_ref()
600 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
610 if (path->search_commit_root) in resolve_indirect_ref()
611 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id); in resolve_indirect_ref()
613 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false); in resolve_indirect_ref()
619 if (!path->search_commit_root && in resolve_indirect_ref()
620 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
621 ret = -ENOENT; in resolve_indirect_ref()
625 if (btrfs_is_testing(ctx->fs_info)) { in resolve_indirect_ref()
626 ret = -ENOENT; in resolve_indirect_ref()
630 if (path->search_commit_root) in resolve_indirect_ref()
631 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
632 else if (ctx->time_seq == BTRFS_SEQ_LAST) in resolve_indirect_ref()
633 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
635 root_level = btrfs_old_root_level(root, ctx->time_seq); in resolve_indirect_ref()
641 * We can often find data backrefs with an offset that is too large in resolve_indirect_ref()
643 * subtracting a file's offset with the data offset of its in resolve_indirect_ref()
644 * corresponding extent data item. This can happen for example in the in resolve_indirect_ref()
662 path->lowest_level = level; in resolve_indirect_ref()
663 if (ctx->time_seq == BTRFS_SEQ_LAST) in resolve_indirect_ref()
664 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); in resolve_indirect_ref()
666 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq); in resolve_indirect_ref()
668 btrfs_debug(ctx->fs_info, in resolve_indirect_ref()
670 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
671 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
672 ref->key_for_search.offset); in resolve_indirect_ref()
676 eb = path->nodes[level]; in resolve_indirect_ref()
682 level--; in resolve_indirect_ref()
683 eb = path->nodes[level]; in resolve_indirect_ref()
686 ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level); in resolve_indirect_ref()
690 path->lowest_level = 0; in resolve_indirect_ref()
691 btrfs_release_path(path); in resolve_indirect_ref()
700 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
732 struct btrfs_path *path, in resolve_indirect_refs() argument
744 return -ENOMEM; in resolve_indirect_refs()
752 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
757 if (WARN(ref->parent, in resolve_indirect_refs()
759 ret = -EINVAL; in resolve_indirect_refs()
763 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
764 preftrees->indirect.count--; in resolve_indirect_refs()
766 if (ref->count == 0) { in resolve_indirect_refs()
771 if (sc && ref->root_id != btrfs_root_id(sc->root)) { in resolve_indirect_refs()
776 ret2 = resolve_indirect_ref(ctx, path, preftrees, ref, parents); in resolve_indirect_refs()
781 if (ret2 == -ENOENT) { in resolve_indirect_refs()
782 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
794 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
795 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
805 ret = -ENOMEM; in resolve_indirect_refs()
809 new_ref->parent = node->val; in resolve_indirect_refs()
810 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
811 prelim_ref_insert(ctx->fs_info, &preftrees->direct, in resolve_indirect_refs()
819 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
841 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
844 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
848 rb_erase_cached(node, &tree->root); in add_missing_keys()
850 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
851 BUG_ON(ref->key_for_search.type); in add_missing_keys()
852 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
854 check.level = ref->level - 1; in add_missing_keys()
855 check.owner_root = ref->root_id; in add_missing_keys()
857 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check); in add_missing_keys()
865 return -EIO; in add_missing_keys()
871 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
873 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
877 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
897 spin_lock(&head->lock); in add_delayed_refs()
898 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
901 if (node->seq > seq) in add_delayed_refs()
904 switch (node->action) { in add_delayed_refs()
910 count = node->ref_mod; in add_delayed_refs()
913 count = node->ref_mod * -1; in add_delayed_refs()
918 switch (node->type) { in add_delayed_refs()
925 if (head->extent_op && head->extent_op->update_key) { in add_delayed_refs()
926 btrfs_disk_key_to_cpu(&key, &head->extent_op->key); in add_delayed_refs()
930 ret = add_indirect_ref(fs_info, preftrees, node->ref_root, in add_delayed_refs()
931 key_ptr, level + 1, node->bytenr, in add_delayed_refs()
944 node->parent, node->bytenr, count, in add_delayed_refs()
949 /* NORMAL INDIRECT DATA backref */ in add_delayed_refs()
970 sc->have_delayed_delete_refs = true; in add_delayed_refs()
972 ret = add_indirect_ref(fs_info, preftrees, node->ref_root, in add_delayed_refs()
973 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
979 ret = add_direct_ref(fs_info, preftrees, 0, node->parent, in add_delayed_refs()
980 node->bytenr, count, sc, in add_delayed_refs()
997 spin_unlock(&head->lock); in add_delayed_refs()
1007 struct btrfs_path *path, in add_inline_refs() argument
1025 leaf = path->nodes[0]; in add_inline_refs()
1026 slot = path->slots[0]; in add_inline_refs()
1031 if (ctx->check_extent_item) { in add_inline_refs()
1032 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx); in add_inline_refs()
1066 return -EUCLEAN; in add_inline_refs()
1072 ret = add_direct_ref(ctx->fs_info, preftrees, in add_inline_refs()
1074 ctx->bytenr, 1, NULL, GFP_NOFS); in add_inline_refs()
1083 ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset, in add_inline_refs()
1084 ctx->bytenr, count, sc, GFP_NOFS); in add_inline_refs()
1088 ret = add_indirect_ref(ctx->fs_info, preftrees, offset, in add_inline_refs()
1090 ctx->bytenr, 1, NULL, GFP_NOFS); in add_inline_refs()
1097 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1104 if (sc && key.objectid != sc->inum && in add_inline_refs()
1105 !sc->have_delayed_delete_refs) { in add_inline_refs()
1112 if (!ctx->skip_data_ref || in add_inline_refs()
1113 !ctx->skip_data_ref(root, key.objectid, key.offset, in add_inline_refs()
1114 ctx->user_ctx)) in add_inline_refs()
1115 ret = add_indirect_ref(ctx->fs_info, preftrees, in add_inline_refs()
1116 root, &key, 0, ctx->bytenr, in add_inline_refs()
1121 ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA)); in add_inline_refs()
1135 * add all non-inline backrefs for bytenr to the list
1141 struct btrfs_path *path, in add_keyed_refs() argument
1145 struct btrfs_fs_info *fs_info = extent_root->fs_info; in add_keyed_refs()
1152 ret = btrfs_next_item(extent_root, path); in add_keyed_refs()
1160 slot = path->slots[0]; in add_keyed_refs()
1161 leaf = path->nodes[0]; in add_keyed_refs()
1164 if (key.objectid != ctx->bytenr) in add_keyed_refs()
1176 ctx->bytenr, 1, NULL, GFP_NOFS); in add_keyed_refs()
1187 key.offset, ctx->bytenr, count, in add_keyed_refs()
1194 NULL, info_level + 1, ctx->bytenr, in add_keyed_refs()
1198 /* NORMAL INDIRECT DATA backref */ in add_keyed_refs()
1211 if (sc && key.objectid != sc->inum && in add_keyed_refs()
1212 !sc->have_delayed_delete_refs) { in add_keyed_refs()
1219 if (!ctx->skip_data_ref || in add_keyed_refs()
1220 !ctx->skip_data_ref(root, key.objectid, key.offset, in add_keyed_refs()
1221 ctx->user_ctx)) in add_keyed_refs()
1223 &key, 0, ctx->bytenr, in add_keyed_refs()
1240 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1247 const struct btrfs_fs_info *fs_info = root->fs_info; in lookup_backref_shared_cache()
1250 if (!current->journal_info) in lookup_backref_shared_cache()
1251 lockdep_assert_held(&fs_info->commit_root_sem); in lookup_backref_shared_cache()
1253 if (!ctx->use_path_cache) in lookup_backref_shared_cache()
1260 * Level -1 is used for the data extent, which is not reliable to cache in lookup_backref_shared_cache()
1267 entry = &ctx->path_cache_entries[level]; in lookup_backref_shared_cache()
1270 if (entry->bytenr != bytenr) in lookup_backref_shared_cache()
1277 if (!entry->is_shared && in lookup_backref_shared_cache()
1278 entry->gen != btrfs_root_last_snapshot(&root->root_item)) in lookup_backref_shared_cache()
1286 if (entry->is_shared && in lookup_backref_shared_cache()
1287 entry->gen != btrfs_get_last_root_drop_gen(fs_info)) in lookup_backref_shared_cache()
1290 *is_shared = entry->is_shared; in lookup_backref_shared_cache()
1300 ctx->path_cache_entries[i].is_shared = true; in lookup_backref_shared_cache()
1301 ctx->path_cache_entries[i].gen = entry->gen; in lookup_backref_shared_cache()
1310 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1317 const struct btrfs_fs_info *fs_info = root->fs_info; in store_backref_shared_cache()
1321 if (!current->journal_info) in store_backref_shared_cache()
1322 lockdep_assert_held(&fs_info->commit_root_sem); in store_backref_shared_cache()
1324 if (!ctx->use_path_cache) in store_backref_shared_cache()
1331 * Level -1 is used for the data extent, which is not reliable to cache in store_backref_shared_cache()
1341 gen = btrfs_root_last_snapshot(&root->root_item); in store_backref_shared_cache()
1343 entry = &ctx->path_cache_entries[level]; in store_backref_shared_cache()
1344 entry->bytenr = bytenr; in store_backref_shared_cache()
1345 entry->is_shared = is_shared; in store_backref_shared_cache()
1346 entry->gen = gen; in store_backref_shared_cache()
1350 * extent buffers below it to true. As nodes in the path are COWed, in store_backref_shared_cache()
1352 * then the sharedness of a data extent becomes direct, the refcount of in store_backref_shared_cache()
1353 * data extent is increased in the extent item at the extent tree. in store_backref_shared_cache()
1357 entry = &ctx->path_cache_entries[i]; in store_backref_shared_cache()
1358 entry->is_shared = is_shared; in store_backref_shared_cache()
1359 entry->gen = gen; in store_backref_shared_cache()
1381 struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr); in find_parent_nodes()
1383 struct btrfs_path *path; in find_parent_nodes() local
1399 ASSERT(ctx->roots == NULL); in find_parent_nodes()
1401 key.objectid = ctx->bytenr; in find_parent_nodes()
1402 if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA)) in find_parent_nodes()
1406 key.offset = (u64)-1; in find_parent_nodes()
1408 path = btrfs_alloc_path(); in find_parent_nodes()
1409 if (!path) in find_parent_nodes()
1410 return -ENOMEM; in find_parent_nodes()
1411 if (!ctx->trans) { in find_parent_nodes()
1412 path->search_commit_root = 1; in find_parent_nodes()
1413 path->skip_locking = 1; in find_parent_nodes()
1416 if (ctx->time_seq == BTRFS_SEQ_LAST) in find_parent_nodes()
1417 path->skip_locking = 1; in find_parent_nodes()
1422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); in find_parent_nodes()
1427 * Key with offset -1 found, there would have to exist an extent in find_parent_nodes()
1430 ret = -EUCLEAN; in find_parent_nodes()
1434 if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) && in find_parent_nodes()
1435 ctx->time_seq != BTRFS_SEQ_LAST) { in find_parent_nodes()
1438 * means we have the path lock, we need to grab the ref head and in find_parent_nodes()
1442 delayed_refs = &ctx->trans->transaction->delayed_refs; in find_parent_nodes()
1443 spin_lock(&delayed_refs->lock); in find_parent_nodes()
1444 head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs, in find_parent_nodes()
1445 ctx->bytenr); in find_parent_nodes()
1447 if (!mutex_trylock(&head->mutex)) { in find_parent_nodes()
1448 refcount_inc(&head->refs); in find_parent_nodes()
1449 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1451 btrfs_release_path(path); in find_parent_nodes()
1457 mutex_lock(&head->mutex); in find_parent_nodes()
1458 mutex_unlock(&head->mutex); in find_parent_nodes()
1462 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1463 ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq, in find_parent_nodes()
1465 mutex_unlock(&head->mutex); in find_parent_nodes()
1469 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
1473 if (path->slots[0]) { in find_parent_nodes()
1477 path->slots[0]--; in find_parent_nodes()
1478 leaf = path->nodes[0]; in find_parent_nodes()
1479 slot = path->slots[0]; in find_parent_nodes()
1481 if (key.objectid == ctx->bytenr && in find_parent_nodes()
1484 ret = add_inline_refs(ctx, path, &info_level, in find_parent_nodes()
1488 ret = add_keyed_refs(ctx, root, path, info_level, in find_parent_nodes()
1505 * item pointing to it in case we are dealing with a data extent. in find_parent_nodes()
1510 * If we are here for a data extent and we have a share_check structure in find_parent_nodes()
1511 * it means the data extent is not directly shared (does not have in find_parent_nodes()
1512 * multiple reference items), so we have to check if a path in the fs in find_parent_nodes()
1514 * extent item pointing to the data extent) is shared, that is, if any in find_parent_nodes()
1515 * of the extent buffers in the path is referenced by other trees. in find_parent_nodes()
1517 if (sc && ctx->bytenr == sc->data_bytenr) { in find_parent_nodes()
1519 * If our data extent is from a generation more recent than the in find_parent_nodes()
1523 * determining the extent buffers for the path from the fs tree in find_parent_nodes()
1525 * points to the data extent. in find_parent_nodes()
1527 if (sc->data_extent_gen > in find_parent_nodes()
1528 btrfs_root_last_snapshot(&sc->root->root_item)) { in find_parent_nodes()
1534 * If we are only determining if a data extent is shared or not in find_parent_nodes()
1537 * indirect references for a data extent, since the fs tree path in find_parent_nodes()
1538 * is the same (same leaf, so same path). We skip as long as the in find_parent_nodes()
1540 * one file extent item pointing to the data extent, because in in find_parent_nodes()
1544 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr && in find_parent_nodes()
1545 sc->self_ref_count == 1) { in find_parent_nodes()
1549 cached = lookup_backref_shared_cache(sc->ctx, sc->root, in find_parent_nodes()
1550 sc->ctx->curr_leaf_bytenr, in find_parent_nodes()
1562 btrfs_release_path(path); in find_parent_nodes()
1564 ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0); in find_parent_nodes()
1570 ret = resolve_indirect_refs(ctx, path, &preftrees, sc); in find_parent_nodes()
1586 node = rb_next(&ref->rbnode); in find_parent_nodes()
1588 * ref->count < 0 can happen here if there are delayed in find_parent_nodes()
1589 * refs with a node->action of BTRFS_DROP_DELAYED_REF. in find_parent_nodes()
1595 * and would retain their original ref->count < 0. in find_parent_nodes()
1597 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) { in find_parent_nodes()
1599 ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS); in find_parent_nodes()
1603 if (ref->count && ref->parent) { in find_parent_nodes()
1604 if (!ctx->skip_inode_ref_list && !ref->inode_list && in find_parent_nodes()
1605 ref->level == 0) { in find_parent_nodes()
1609 check.level = ref->level; in find_parent_nodes()
1611 eb = read_tree_block(ctx->fs_info, ref->parent, in find_parent_nodes()
1619 ret = -EIO; in find_parent_nodes()
1623 if (!path->skip_locking) in find_parent_nodes()
1626 if (!path->skip_locking) in find_parent_nodes()
1632 ref->inode_list = eie; in find_parent_nodes()
1640 ret = ulist_add_merge_ptr(ctx->refs, ref->parent, in find_parent_nodes()
1641 ref->inode_list, in find_parent_nodes()
1645 if (!ret && !ctx->skip_inode_ref_list) { in find_parent_nodes()
1656 ret = -EUCLEAN; in find_parent_nodes()
1659 while (eie->next) in find_parent_nodes()
1660 eie = eie->next; in find_parent_nodes()
1661 eie->next = ref->inode_list; in find_parent_nodes()
1668 * use-after-free when our caller uses it or double in find_parent_nodes()
1671 ref->inode_list = NULL; in find_parent_nodes()
1677 btrfs_free_path(path); in find_parent_nodes()
1690 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1691 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1693 * @ctx->ignore_extent_item_pos is false, otherwise a simple ulist_free() is
1696 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1702 ASSERT(ctx->refs == NULL); in btrfs_find_all_leafs()
1704 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_leafs()
1705 if (!ctx->refs) in btrfs_find_all_leafs()
1706 return -ENOMEM; in btrfs_find_all_leafs()
1710 (ret < 0 && ret != -ENOENT)) { in btrfs_find_all_leafs()
1711 free_leaf_list(ctx->refs); in btrfs_find_all_leafs()
1712 ctx->refs = NULL; in btrfs_find_all_leafs()
1730 * Found roots are added to @ctx->roots, which is allocated by this function if
1733 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1740 const u64 orig_bytenr = ctx->bytenr; in btrfs_find_all_roots_safe()
1741 const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list; in btrfs_find_all_roots_safe()
1746 ASSERT(ctx->refs == NULL); in btrfs_find_all_roots_safe()
1748 ctx->refs = ulist_alloc(GFP_NOFS); in btrfs_find_all_roots_safe()
1749 if (!ctx->refs) in btrfs_find_all_roots_safe()
1750 return -ENOMEM; in btrfs_find_all_roots_safe()
1752 if (!ctx->roots) { in btrfs_find_all_roots_safe()
1753 ctx->roots = ulist_alloc(GFP_NOFS); in btrfs_find_all_roots_safe()
1754 if (!ctx->roots) { in btrfs_find_all_roots_safe()
1755 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1756 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1757 return -ENOMEM; in btrfs_find_all_roots_safe()
1762 ctx->skip_inode_ref_list = true; in btrfs_find_all_roots_safe()
1769 if (ret < 0 && ret != -ENOENT) { in btrfs_find_all_roots_safe()
1771 ulist_free(ctx->roots); in btrfs_find_all_roots_safe()
1772 ctx->roots = NULL; in btrfs_find_all_roots_safe()
1777 node = ulist_next(ctx->refs, &uiter); in btrfs_find_all_roots_safe()
1780 ctx->bytenr = node->val; in btrfs_find_all_roots_safe()
1784 ulist_free(ctx->refs); in btrfs_find_all_roots_safe()
1785 ctx->refs = NULL; in btrfs_find_all_roots_safe()
1786 ctx->bytenr = orig_bytenr; in btrfs_find_all_roots_safe()
1787 ctx->skip_inode_ref_list = orig_skip_inode_ref_list; in btrfs_find_all_roots_safe()
1797 if (!ctx->trans && !skip_commit_root_sem) in btrfs_find_all_roots()
1798 down_read(&ctx->fs_info->commit_root_sem); in btrfs_find_all_roots()
1800 if (!ctx->trans && !skip_commit_root_sem) in btrfs_find_all_roots()
1801 up_read(&ctx->fs_info->commit_root_sem); in btrfs_find_all_roots()
1813 ulist_init(&ctx->refs); in btrfs_alloc_backref_share_check_ctx()
1823 ulist_release(&ctx->refs); in btrfs_free_backref_share_ctx()
1828 * Check if a data extent is shared or not.
1852 struct btrfs_root *root = inode->root; in btrfs_is_data_extent_shared()
1853 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_is_data_extent_shared()
1874 if (ctx->prev_extents_cache[i].bytenr == bytenr) in btrfs_is_data_extent_shared()
1875 return ctx->prev_extents_cache[i].is_shared; in btrfs_is_data_extent_shared()
1878 ulist_init(&ctx->refs); in btrfs_is_data_extent_shared()
1882 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { in btrfs_is_data_extent_shared()
1887 down_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
1893 ctx->use_path_cache = true; in btrfs_is_data_extent_shared()
1897 * If it is, then we have a data extent that is shared due to a shared in btrfs_is_data_extent_shared()
1898 * subtree (caused by snapshotting) and we don't need to check for data in btrfs_is_data_extent_shared()
1900 * to determine if the data extent is shared through reflinks. in btrfs_is_data_extent_shared()
1903 ctx->curr_leaf_bytenr, 0, in btrfs_is_data_extent_shared()
1913 walk_ctx.refs = &ctx->refs; in btrfs_is_data_extent_shared()
1915 /* -1 means we are in the bytenr of the data extent. */ in btrfs_is_data_extent_shared()
1916 level = -1; in btrfs_is_data_extent_shared()
1919 const unsigned long prev_ref_count = ctx->refs.nnodes; in btrfs_is_data_extent_shared()
1932 if (ret < 0 && ret != -ENOENT) in btrfs_is_data_extent_shared()
1938 * the ctx->refs ulist, in which case we have to check multiple in btrfs_is_data_extent_shared()
1940 * use the path cache which is made for a single path. Multiple in btrfs_is_data_extent_shared()
1943 * 1) level -1, the data extent: If our data extent was not in btrfs_is_data_extent_shared()
1947 * extent items that point to the data extent - this happens in btrfs_is_data_extent_shared()
1954 * cache only works for a single path (by far the most common in btrfs_is_data_extent_shared()
1963 * (direct ref) and a non-shared tree block ref (indirect in btrfs_is_data_extent_shared()
1966 if ((ctx->refs.nnodes - prev_ref_count) > 1) in btrfs_is_data_extent_shared()
1967 ctx->use_path_cache = false; in btrfs_is_data_extent_shared()
1972 node = ulist_next(&ctx->refs, &uiter); in btrfs_is_data_extent_shared()
1975 bytenr = node->val; in btrfs_is_data_extent_shared()
1976 if (ctx->use_path_cache) { in btrfs_is_data_extent_shared()
1994 * If the path cache is disabled, then it means at some tree level we in btrfs_is_data_extent_shared()
1996 * multiple leaves with file extent items pointing to the same data in btrfs_is_data_extent_shared()
2000 if (!ctx->use_path_cache) { in btrfs_is_data_extent_shared()
2003 level--; in btrfs_is_data_extent_shared()
2005 bytenr = ctx->path_cache_entries[level].bytenr; in btrfs_is_data_extent_shared()
2006 ctx->use_path_cache = true; in btrfs_is_data_extent_shared()
2012 ctx->path_cache_entries[i].bytenr = 0; in btrfs_is_data_extent_shared()
2016 * Cache the sharedness result for the data extent if we know our inode in btrfs_is_data_extent_shared()
2017 * has more than 1 file extent item that refers to the data extent. in btrfs_is_data_extent_shared()
2020 int slot = ctx->prev_extents_cache_slot; in btrfs_is_data_extent_shared()
2022 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr; in btrfs_is_data_extent_shared()
2023 ctx->prev_extents_cache[slot].is_shared = (ret == 1); in btrfs_is_data_extent_shared()
2026 ctx->prev_extents_cache_slot = slot; in btrfs_is_data_extent_shared()
2034 up_read(&fs_info->commit_root_sem); in btrfs_is_data_extent_shared()
2037 ulist_release(&ctx->refs); in btrfs_is_data_extent_shared()
2038 ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr; in btrfs_is_data_extent_shared()
2044 u64 start_off, struct btrfs_path *path, in btrfs_find_one_extref() argument
2059 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); in btrfs_find_one_extref()
2064 leaf = path->nodes[0]; in btrfs_find_one_extref()
2065 slot = path->slots[0]; in btrfs_find_one_extref()
2076 ret = btrfs_next_leaf(root, path); in btrfs_find_one_extref()
2079 ret = -ENOENT; in btrfs_find_one_extref()
2093 ret = -ENOENT; in btrfs_find_one_extref()
2100 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_find_one_extref()
2112 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2113 * Elements of the path are separated by '/' and the path is guaranteed to be
2114 * 0-terminated. the path is only given within the current file system.
2119 * in case the path buffer would overflow, the pointer is decremented further
2122 * required for the path to fit into the buffer. in that case, the returned
2125 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, in btrfs_ref_to_path() argument
2133 s64 bytes_left = ((s64)size) - 1; in btrfs_ref_to_path()
2142 bytes_left -= name_len; in btrfs_ref_to_path()
2147 if (!path->skip_locking) in btrfs_ref_to_path()
2151 ret = btrfs_find_item(fs_root, path, parent, 0, in btrfs_ref_to_path()
2154 ret = -ENOENT; in btrfs_ref_to_path()
2164 slot = path->slots[0]; in btrfs_ref_to_path()
2165 eb = path->nodes[0]; in btrfs_ref_to_path()
2166 /* make sure we can use eb after releasing the path */ in btrfs_ref_to_path()
2168 path->nodes[0] = NULL; in btrfs_ref_to_path()
2169 path->locks[0] = 0; in btrfs_ref_to_path()
2171 btrfs_release_path(path); in btrfs_ref_to_path()
2178 --bytes_left; in btrfs_ref_to_path()
2183 btrfs_release_path(path); in btrfs_ref_to_path()
2192 * this makes the path point to (logical EXTENT_ITEM *)
2193 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2197 struct btrfs_path *path, struct btrfs_key *found_key, in extent_from_logical() argument
2213 key.offset = (u64)-1; in extent_from_logical()
2215 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); in extent_from_logical()
2220 * Key with offset -1 found, there would have to exist an extent in extent_from_logical()
2223 return -EUCLEAN; in extent_from_logical()
2226 ret = btrfs_previous_extent_item(extent_root, path, 0); in extent_from_logical()
2229 ret = -ENOENT; in extent_from_logical()
2232 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); in extent_from_logical()
2233 if (found_key->type == BTRFS_METADATA_ITEM_KEY) in extent_from_logical()
2234 size = fs_info->nodesize; in extent_from_logical()
2235 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) in extent_from_logical()
2236 size = found_key->offset; in extent_from_logical()
2238 if (found_key->objectid > logical || in extent_from_logical()
2239 found_key->objectid + size <= logical) { in extent_from_logical()
2242 return -ENOENT; in extent_from_logical()
2245 eb = path->nodes[0]; in extent_from_logical()
2247 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in extent_from_logical()
2252 logical, logical - found_key->objectid, found_key->objectid, in extent_from_logical()
2253 found_key->offset, flags, btrfs_item_size(eb, path->slots[0])); in extent_from_logical()
2266 return -EIO; in extent_from_logical()
2293 if (key->type == BTRFS_METADATA_ITEM_KEY) { in get_extent_inline_ref()
2298 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); in get_extent_inline_ref()
2308 return -ENOENT; in get_extent_inline_ref()
2316 return -EUCLEAN; in get_extent_inline_ref()
2330 * returns 0 if data was provided, 1 if there was no more data to provide or
2341 if (*ptr == (unsigned long)-1) in tree_backref_for_extent()
2361 if (key->type == BTRFS_EXTENT_ITEM_KEY) { in tree_backref_for_extent()
2367 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); in tree_backref_for_extent()
2368 *out_level = (u8)key->offset; in tree_backref_for_extent()
2372 *ptr = (unsigned long)-1; in tree_backref_for_extent()
2385 for (eie = inode_list; eie; eie = eie->next) { in iterate_leaf_refs()
2388 extent_item_objectid, eie->inum, in iterate_leaf_refs()
2389 eie->offset, root); in iterate_leaf_refs()
2390 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx); in iterate_leaf_refs()
2405 * when the iterator function returns a non-zero value, iteration stops.
2417 btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu", in iterate_extent_inodes()
2418 ctx->bytenr); in iterate_extent_inodes()
2420 ASSERT(ctx->trans == NULL); in iterate_extent_inodes()
2421 ASSERT(ctx->roots == NULL); in iterate_extent_inodes()
2426 trans = btrfs_attach_transaction(ctx->fs_info->tree_root); in iterate_extent_inodes()
2428 if (PTR_ERR(trans) != -ENOENT && in iterate_extent_inodes()
2429 PTR_ERR(trans) != -EROFS) in iterate_extent_inodes()
2433 ctx->trans = trans; in iterate_extent_inodes()
2436 if (ctx->trans) { in iterate_extent_inodes()
2437 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem); in iterate_extent_inodes()
2438 ctx->time_seq = seq_elem.seq; in iterate_extent_inodes()
2440 down_read(&ctx->fs_info->commit_root_sem); in iterate_extent_inodes()
2446 refs = ctx->refs; in iterate_extent_inodes()
2447 ctx->refs = NULL; in iterate_extent_inodes()
2451 const u64 leaf_bytenr = ref_node->val; in iterate_extent_inodes()
2456 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux; in iterate_extent_inodes()
2458 if (ctx->cache_lookup) { in iterate_extent_inodes()
2463 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx, in iterate_extent_inodes()
2467 ret = iterate_leaf_refs(ctx->fs_info, in iterate_extent_inodes()
2480 if (!ctx->roots) { in iterate_extent_inodes()
2481 ctx->roots = ulist_alloc(GFP_NOFS); in iterate_extent_inodes()
2482 if (!ctx->roots) { in iterate_extent_inodes()
2483 ret = -ENOMEM; in iterate_extent_inodes()
2488 ctx->bytenr = leaf_bytenr; in iterate_extent_inodes()
2493 if (ctx->cache_store) in iterate_extent_inodes()
2494 ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx); in iterate_extent_inodes()
2497 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) { in iterate_extent_inodes()
2498 btrfs_debug(ctx->fs_info, in iterate_extent_inodes()
2499 "root %llu references leaf %llu, data list %#llx", in iterate_extent_inodes()
2500 root_node->val, ref_node->val, in iterate_extent_inodes()
2501 ref_node->aux); in iterate_extent_inodes()
2502 ret = iterate_leaf_refs(ctx->fs_info, inode_list, in iterate_extent_inodes()
2503 root_node->val, ctx->bytenr, in iterate_extent_inodes()
2506 ulist_reinit(ctx->roots); in iterate_extent_inodes()
2511 if (ctx->trans) { in iterate_extent_inodes()
2512 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem); in iterate_extent_inodes()
2513 btrfs_end_transaction(ctx->trans); in iterate_extent_inodes()
2514 ctx->trans = NULL; in iterate_extent_inodes()
2516 up_read(&ctx->fs_info->commit_root_sem); in iterate_extent_inodes()
2519 ulist_free(ctx->roots); in iterate_extent_inodes()
2520 ctx->roots = NULL; in iterate_extent_inodes()
2533 if (inodes->bytes_left >= c) { in build_ino_list()
2534 inodes->bytes_left -= c; in build_ino_list()
2535 inodes->val[inodes->elem_cnt] = inum; in build_ino_list()
2536 inodes->val[inodes->elem_cnt + 1] = offset; in build_ino_list()
2537 inodes->val[inodes->elem_cnt + 2] = root; in build_ino_list()
2538 inodes->elem_cnt += 3; in build_ino_list()
2540 inodes->bytes_missing += c - inodes->bytes_left; in build_ino_list()
2541 inodes->bytes_left = 0; in build_ino_list()
2542 inodes->elem_missed += 3; in build_ino_list()
2555 struct btrfs_path *path; in iterate_inodes_from_logical() local
2557 path = btrfs_alloc_path(); in iterate_inodes_from_logical()
2558 if (!path) in iterate_inodes_from_logical()
2559 return -ENOMEM; in iterate_inodes_from_logical()
2561 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); in iterate_inodes_from_logical()
2562 btrfs_free_path(path); in iterate_inodes_from_logical()
2566 return -EINVAL; in iterate_inodes_from_logical()
2572 walk_ctx.extent_item_pos = logical - found_key.objectid; in iterate_inodes_from_logical()
2590 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_refs()
2591 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_refs() local
2597 ret = btrfs_find_item(fs_root, path, inum, in iterate_inode_refs()
2604 ret = found ? 0 : -ENOENT; in iterate_inode_refs()
2610 slot = path->slots[0]; in iterate_inode_refs()
2611 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_refs()
2613 ret = -ENOMEM; in iterate_inode_refs()
2616 btrfs_release_path(path); in iterate_inode_refs()
2622 /* path must be released before calling iterate()! */ in iterate_inode_refs()
2623 btrfs_debug(fs_root->fs_info, in iterate_inode_refs()
2637 btrfs_release_path(path); in iterate_inode_refs()
2649 struct btrfs_root *fs_root = ipath->fs_root; in iterate_inode_extrefs()
2650 struct btrfs_path *path = ipath->btrfs_path; in iterate_inode_extrefs() local
2658 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, in iterate_inode_extrefs()
2663 ret = found ? 0 : -ENOENT; in iterate_inode_extrefs()
2668 slot = path->slots[0]; in iterate_inode_extrefs()
2669 eb = btrfs_clone_extent_buffer(path->nodes[0]); in iterate_inode_extrefs()
2671 ret = -ENOMEM; in iterate_inode_extrefs()
2674 btrfs_release_path(path); in iterate_inode_extrefs()
2687 (unsigned long)&extref->name, eb, ipath); in iterate_inode_extrefs()
2699 btrfs_release_path(path); in iterate_inode_extrefs()
2705 * returns 0 if the path could be dumped (probably truncated)
2713 int i = ipath->fspath->elem_cnt; in inode_to_path()
2717 bytes_left = ipath->fspath->bytes_left > s_ptr ? in inode_to_path()
2718 ipath->fspath->bytes_left - s_ptr : 0; in inode_to_path()
2720 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; in inode_to_path()
2721 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, in inode_to_path()
2727 ipath->fspath->val[i] = (u64)(unsigned long)fspath; in inode_to_path()
2728 ++ipath->fspath->elem_cnt; in inode_to_path()
2729 ipath->fspath->bytes_left = fspath - fspath_min; in inode_to_path()
2731 ++ipath->fspath->elem_missed; in inode_to_path()
2732 ipath->fspath->bytes_missing += fspath_min - fspath; in inode_to_path()
2733 ipath->fspath->bytes_left = 0; in inode_to_path()
2741 * is has been created large enough. each path is zero-terminated and accessed
2742 * from ipath->fspath->val[i].
2743 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2744 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2745 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2746 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2757 else if (ret != -ENOENT) in paths_from_inode()
2761 if (ret == -ENOENT && found_refs) in paths_from_inode()
2769 struct btrfs_data_container *data; in init_data_container() local
2772 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); in init_data_container()
2773 data = kvzalloc(alloc_bytes, GFP_KERNEL); in init_data_container()
2774 if (!data) in init_data_container()
2775 return ERR_PTR(-ENOMEM); in init_data_container()
2777 if (total_bytes >= sizeof(*data)) in init_data_container()
2778 data->bytes_left = total_bytes - sizeof(*data); in init_data_container()
2780 data->bytes_missing = sizeof(*data) - total_bytes; in init_data_container()
2782 return data; in init_data_container()
2787 * total_bytes to allocate are passed, note that space usable for actual path
2788 * information will be total_bytes - sizeof(struct inode_fs_paths).
2792 struct btrfs_path *path) in init_ipath() argument
2804 return ERR_PTR(-ENOMEM); in init_ipath()
2807 ifp->btrfs_path = path; in init_ipath()
2808 ifp->fspath = fspath; in init_ipath()
2809 ifp->fs_root = fs_root; in init_ipath()
2818 kvfree(ipath->fspath); in free_ipath()
2830 ret->path = btrfs_alloc_path(); in btrfs_backref_iter_alloc()
2831 if (!ret->path) { in btrfs_backref_iter_alloc()
2837 ret->path->search_commit_root = 1; in btrfs_backref_iter_alloc()
2838 ret->path->skip_locking = 1; in btrfs_backref_iter_alloc()
2839 ret->fs_info = fs_info; in btrfs_backref_iter_alloc()
2846 iter->bytenr = 0; in btrfs_backref_iter_release()
2847 iter->item_ptr = 0; in btrfs_backref_iter_release()
2848 iter->cur_ptr = 0; in btrfs_backref_iter_release()
2849 iter->end_ptr = 0; in btrfs_backref_iter_release()
2850 btrfs_release_path(iter->path); in btrfs_backref_iter_release()
2851 memset(&iter->cur_key, 0, sizeof(iter->cur_key)); in btrfs_backref_iter_release()
2856 struct btrfs_fs_info *fs_info = iter->fs_info; in btrfs_backref_iter_start()
2858 struct btrfs_path *path = iter->path; in btrfs_backref_iter_start() local
2865 key.offset = (u64)-1; in btrfs_backref_iter_start()
2866 iter->bytenr = bytenr; in btrfs_backref_iter_start()
2868 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); in btrfs_backref_iter_start()
2873 * Key with offset -1 found, there would have to exist an extent in btrfs_backref_iter_start()
2876 ret = -EUCLEAN; in btrfs_backref_iter_start()
2879 if (unlikely(path->slots[0] == 0)) { in btrfs_backref_iter_start()
2881 ret = -EUCLEAN; in btrfs_backref_iter_start()
2884 path->slots[0]--; in btrfs_backref_iter_start()
2886 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in btrfs_backref_iter_start()
2889 ret = -ENOENT; in btrfs_backref_iter_start()
2892 memcpy(&iter->cur_key, &key, sizeof(key)); in btrfs_backref_iter_start()
2893 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2894 path->slots[0]); in btrfs_backref_iter_start()
2895 iter->end_ptr = (u32)(iter->item_ptr + in btrfs_backref_iter_start()
2896 btrfs_item_size(path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2897 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_backref_iter_start()
2903 * This is an extra precaution for non skinny-metadata, where in btrfs_backref_iter_start()
2907 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { in btrfs_backref_iter_start()
2908 ret = -ENOTSUPP; in btrfs_backref_iter_start()
2911 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); in btrfs_backref_iter_start()
2914 if (iter->cur_ptr >= iter->end_ptr) { in btrfs_backref_iter_start()
2915 ret = btrfs_next_item(extent_root, path); in btrfs_backref_iter_start()
2919 ret = -ENOENT; in btrfs_backref_iter_start()
2925 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, in btrfs_backref_iter_start()
2926 path->slots[0]); in btrfs_backref_iter_start()
2927 if (iter->cur_key.objectid != bytenr || in btrfs_backref_iter_start()
2928 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && in btrfs_backref_iter_start()
2929 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { in btrfs_backref_iter_start()
2930 ret = -ENOENT; in btrfs_backref_iter_start()
2933 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_start()
2934 path->slots[0]); in btrfs_backref_iter_start()
2935 iter->item_ptr = iter->cur_ptr; in btrfs_backref_iter_start()
2936 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size( in btrfs_backref_iter_start()
2937 path->nodes[0], path->slots[0])); in btrfs_backref_iter_start()
2948 if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY || in btrfs_backref_iter_is_inline_ref()
2949 iter->cur_key.type == BTRFS_METADATA_ITEM_KEY) in btrfs_backref_iter_is_inline_ref()
2958 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2966 struct extent_buffer *eb = iter->path->nodes[0]; in btrfs_backref_iter_next()
2968 struct btrfs_path *path = iter->path; in btrfs_backref_iter_next() local
2975 ASSERT(iter->cur_ptr < iter->end_ptr); in btrfs_backref_iter_next()
2985 ((unsigned long)iter->cur_ptr); in btrfs_backref_iter_next()
2990 iter->cur_ptr += size; in btrfs_backref_iter_next()
2991 if (iter->cur_ptr < iter->end_ptr) in btrfs_backref_iter_next()
2998 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); in btrfs_backref_iter_next()
2999 ret = btrfs_next_item(extent_root, iter->path); in btrfs_backref_iter_next()
3003 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); in btrfs_backref_iter_next()
3004 if (iter->cur_key.objectid != iter->bytenr || in btrfs_backref_iter_next()
3005 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && in btrfs_backref_iter_next()
3006 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) in btrfs_backref_iter_next()
3008 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], in btrfs_backref_iter_next()
3009 path->slots[0]); in btrfs_backref_iter_next()
3010 iter->cur_ptr = iter->item_ptr; in btrfs_backref_iter_next()
3011 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0], in btrfs_backref_iter_next()
3012 path->slots[0]); in btrfs_backref_iter_next()
3021 cache->rb_root = RB_ROOT; in btrfs_backref_init_cache()
3023 INIT_LIST_HEAD(&cache->pending[i]); in btrfs_backref_init_cache()
3024 INIT_LIST_HEAD(&cache->pending_edge); in btrfs_backref_init_cache()
3025 INIT_LIST_HEAD(&cache->useless_node); in btrfs_backref_init_cache()
3026 cache->fs_info = fs_info; in btrfs_backref_init_cache()
3027 cache->is_reloc = is_reloc; in btrfs_backref_init_cache()
3040 INIT_LIST_HEAD(&node->list); in btrfs_backref_alloc_node()
3041 INIT_LIST_HEAD(&node->upper); in btrfs_backref_alloc_node()
3042 INIT_LIST_HEAD(&node->lower); in btrfs_backref_alloc_node()
3043 RB_CLEAR_NODE(&node->rb_node); in btrfs_backref_alloc_node()
3044 cache->nr_nodes++; in btrfs_backref_alloc_node()
3045 node->level = level; in btrfs_backref_alloc_node()
3046 node->bytenr = bytenr; in btrfs_backref_alloc_node()
3055 ASSERT(list_empty(&node->list)); in btrfs_backref_free_node()
3056 ASSERT(list_empty(&node->lower)); in btrfs_backref_free_node()
3057 ASSERT(node->eb == NULL); in btrfs_backref_free_node()
3058 cache->nr_nodes--; in btrfs_backref_free_node()
3059 btrfs_put_root(node->root); in btrfs_backref_free_node()
3071 cache->nr_edges++; in btrfs_backref_alloc_edge()
3079 cache->nr_edges--; in btrfs_backref_free_edge()
3086 if (node->locked) { in btrfs_backref_unlock_node_buffer()
3087 btrfs_tree_unlock(node->eb); in btrfs_backref_unlock_node_buffer()
3088 node->locked = 0; in btrfs_backref_unlock_node_buffer()
3094 if (node->eb) { in btrfs_backref_drop_node_buffer()
3096 free_extent_buffer(node->eb); in btrfs_backref_drop_node_buffer()
3097 node->eb = NULL; in btrfs_backref_drop_node_buffer()
3111 ASSERT(list_empty(&node->upper)); in btrfs_backref_drop_node()
3114 list_del_init(&node->list); in btrfs_backref_drop_node()
3115 list_del_init(&node->lower); in btrfs_backref_drop_node()
3116 if (!RB_EMPTY_NODE(&node->rb_node)) in btrfs_backref_drop_node()
3117 rb_erase(&node->rb_node, &tree->rb_root); in btrfs_backref_drop_node()
3123 * upper edges and any uncached nodes in the path.
3136 while (!list_empty(&node->upper)) { in btrfs_backref_cleanup_node()
3137 edge = list_first_entry(&node->upper, struct btrfs_backref_edge, in btrfs_backref_cleanup_node()
3139 list_del(&edge->list[LOWER]); in btrfs_backref_cleanup_node()
3140 list_del(&edge->list[UPPER]); in btrfs_backref_cleanup_node()
3154 while ((node = rb_entry_safe(rb_first(&cache->rb_root), in btrfs_backref_release_cache()
3158 ASSERT(list_empty(&cache->pending_edge)); in btrfs_backref_release_cache()
3159 ASSERT(list_empty(&cache->useless_node)); in btrfs_backref_release_cache()
3160 ASSERT(!cache->nr_nodes); in btrfs_backref_release_cache()
3161 ASSERT(!cache->nr_edges); in btrfs_backref_release_cache()
3168 ASSERT(upper && lower && upper->level == lower->level + 1); in btrfs_backref_link_edge()
3169 edge->node[LOWER] = lower; in btrfs_backref_link_edge()
3170 edge->node[UPPER] = upper; in btrfs_backref_link_edge()
3171 list_add_tail(&edge->list[LOWER], &lower->upper); in btrfs_backref_link_edge()
3193 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); in handle_direct_tree_backref()
3196 if (ref_key->objectid == ref_key->offset) { in handle_direct_tree_backref()
3199 cur->is_reloc_root = 1; in handle_direct_tree_backref()
3201 if (cache->is_reloc) { in handle_direct_tree_backref()
3202 root = find_reloc_root(cache->fs_info, cur->bytenr); in handle_direct_tree_backref()
3204 return -ENOENT; in handle_direct_tree_backref()
3205 cur->root = root; in handle_direct_tree_backref()
3211 list_add(&cur->list, &cache->useless_node); in handle_direct_tree_backref()
3218 return -ENOMEM; in handle_direct_tree_backref()
3220 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); in handle_direct_tree_backref()
3223 upper = btrfs_backref_alloc_node(cache, ref_key->offset, in handle_direct_tree_backref()
3224 cur->level + 1); in handle_direct_tree_backref()
3227 return -ENOMEM; in handle_direct_tree_backref()
3234 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in handle_direct_tree_backref()
3238 ASSERT(upper->checked); in handle_direct_tree_backref()
3239 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_direct_tree_backref()
3255 * @path: A clean (released) path, to avoid allocating path every time
3260 struct btrfs_path *path, in handle_indirect_tree_backref() argument
3265 struct btrfs_fs_info *fs_info = cache->fs_info; in handle_indirect_tree_backref()
3276 root = btrfs_get_fs_root(fs_info, ref_key->offset, false); in handle_indirect_tree_backref()
3280 /* We shouldn't be using backref cache for non-shareable roots. */ in handle_indirect_tree_backref()
3281 if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) { in handle_indirect_tree_backref()
3283 return -EUCLEAN; in handle_indirect_tree_backref()
3286 if (btrfs_root_level(&root->root_item) == cur->level) { in handle_indirect_tree_backref()
3288 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); in handle_indirect_tree_backref()
3296 * completely relying on direct backref (key->offset is parent in handle_indirect_tree_backref()
3299 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { in handle_indirect_tree_backref()
3301 list_add(&cur->list, &cache->useless_node); in handle_indirect_tree_backref()
3303 cur->root = root; in handle_indirect_tree_backref()
3308 level = cur->level + 1; in handle_indirect_tree_backref()
3311 path->search_commit_root = 1; in handle_indirect_tree_backref()
3312 path->skip_locking = 1; in handle_indirect_tree_backref()
3313 path->lowest_level = level; in handle_indirect_tree_backref()
3314 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); in handle_indirect_tree_backref()
3315 path->lowest_level = 0; in handle_indirect_tree_backref()
3320 if (ret > 0 && path->slots[level] > 0) in handle_indirect_tree_backref()
3321 path->slots[level]--; in handle_indirect_tree_backref()
3323 eb = path->nodes[level]; in handle_indirect_tree_backref()
3324 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { in handle_indirect_tree_backref()
3327 cur->bytenr, level - 1, btrfs_root_id(root), in handle_indirect_tree_backref()
3328 tree_key->objectid, tree_key->type, tree_key->offset); in handle_indirect_tree_backref()
3330 ret = -ENOENT; in handle_indirect_tree_backref()
3335 /* Add all nodes and edges in the path */ in handle_indirect_tree_backref()
3337 if (!path->nodes[level]) { in handle_indirect_tree_backref()
3338 ASSERT(btrfs_root_bytenr(&root->root_item) == in handle_indirect_tree_backref()
3339 lower->bytenr); in handle_indirect_tree_backref()
3342 cache->is_reloc) { in handle_indirect_tree_backref()
3344 list_add(&lower->list, &cache->useless_node); in handle_indirect_tree_backref()
3346 lower->root = root; in handle_indirect_tree_backref()
3354 ret = -ENOMEM; in handle_indirect_tree_backref()
3358 eb = path->nodes[level]; in handle_indirect_tree_backref()
3359 rb_node = rb_simple_search(&cache->rb_root, eb->start); in handle_indirect_tree_backref()
3361 upper = btrfs_backref_alloc_node(cache, eb->start, in handle_indirect_tree_backref()
3362 lower->level + 1); in handle_indirect_tree_backref()
3366 ret = -ENOMEM; in handle_indirect_tree_backref()
3369 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3372 if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) { in handle_indirect_tree_backref()
3376 ret = -EUCLEAN; in handle_indirect_tree_backref()
3385 upper->checked = 0; in handle_indirect_tree_backref()
3387 upper->checked = 1; in handle_indirect_tree_backref()
3394 if (!upper->checked && need_check) { in handle_indirect_tree_backref()
3396 list_add_tail(&edge->list[UPPER], in handle_indirect_tree_backref()
3397 &cache->pending_edge); in handle_indirect_tree_backref()
3399 if (upper->checked) in handle_indirect_tree_backref()
3401 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3406 ASSERT(upper->checked); in handle_indirect_tree_backref()
3407 INIT_LIST_HEAD(&edge->list[UPPER]); in handle_indirect_tree_backref()
3408 if (!upper->owner) in handle_indirect_tree_backref()
3409 upper->owner = btrfs_header_owner(eb); in handle_indirect_tree_backref()
3421 btrfs_release_path(path); in handle_indirect_tree_backref()
3429 * links aren't yet bi-directional. Needs to finish such links.
3433 * @path: Released path for indirect tree backref lookup
3439 struct btrfs_path *path, in btrfs_backref_add_tree_node() argument
3448 ret = btrfs_backref_iter_start(iter, cur->bytenr); in btrfs_backref_add_tree_node()
3461 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3465 WARN_ON(cur->checked); in btrfs_backref_add_tree_node()
3466 if (!list_empty(&cur->upper)) { in btrfs_backref_add_tree_node()
3471 ASSERT(list_is_singular(&cur->upper)); in btrfs_backref_add_tree_node()
3472 edge = list_first_entry(&cur->upper, struct btrfs_backref_edge, in btrfs_backref_add_tree_node()
3474 ASSERT(list_empty(&edge->list[UPPER])); in btrfs_backref_add_tree_node()
3475 exist = edge->node[UPPER]; in btrfs_backref_add_tree_node()
3480 if (!exist->checked) in btrfs_backref_add_tree_node()
3481 list_add_tail(&edge->list[UPPER], &cache->pending_edge); in btrfs_backref_add_tree_node()
3492 eb = iter->path->nodes[0]; in btrfs_backref_add_tree_node()
3494 key.objectid = iter->bytenr; in btrfs_backref_add_tree_node()
3500 ((unsigned long)iter->cur_ptr); in btrfs_backref_add_tree_node()
3504 ret = -EUCLEAN; in btrfs_backref_add_tree_node()
3510 key.type = iter->cur_key.type; in btrfs_backref_add_tree_node()
3511 key.offset = iter->cur_key.offset; in btrfs_backref_add_tree_node()
3520 exist->owner == key.offset) || in btrfs_backref_add_tree_node()
3522 exist->bytenr == key.offset))) { in btrfs_backref_add_tree_node()
3538 ret = handle_indirect_tree_backref(trans, cache, path, in btrfs_backref_add_tree_node()
3544 * Unrecognized tree backref items (if it can pass tree-checker) in btrfs_backref_add_tree_node()
3549 cur->checked = 1; in btrfs_backref_add_tree_node()
3562 struct list_head *useless_node = &cache->useless_node; in btrfs_backref_finish_upper_links()
3567 ASSERT(start->checked); in btrfs_backref_finish_upper_links()
3569 rb_node = rb_simple_insert(&cache->rb_root, &start->simple_node); in btrfs_backref_finish_upper_links()
3571 btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST); in btrfs_backref_finish_upper_links()
3578 list_for_each_entry(edge, &start->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3579 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3587 list_del_init(&edge->list[UPPER]); in btrfs_backref_finish_upper_links()
3588 upper = edge->node[UPPER]; in btrfs_backref_finish_upper_links()
3589 lower = edge->node[LOWER]; in btrfs_backref_finish_upper_links()
3592 if (upper->detached) { in btrfs_backref_finish_upper_links()
3593 list_del(&edge->list[LOWER]); in btrfs_backref_finish_upper_links()
3597 if (list_empty(&lower->upper)) in btrfs_backref_finish_upper_links()
3598 list_add(&lower->list, useless_node); in btrfs_backref_finish_upper_links()
3605 * So if we have upper->rb_node populated, this means a cache in btrfs_backref_finish_upper_links()
3609 if (!RB_EMPTY_NODE(&upper->rb_node)) { in btrfs_backref_finish_upper_links()
3610 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3615 if (unlikely(!upper->checked)) { in btrfs_backref_finish_upper_links()
3617 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3620 rb_node = rb_simple_insert(&cache->rb_root, &upper->simple_node); in btrfs_backref_finish_upper_links()
3622 btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST); in btrfs_backref_finish_upper_links()
3623 return -EUCLEAN; in btrfs_backref_finish_upper_links()
3626 list_add_tail(&edge->list[UPPER], &upper->lower); in btrfs_backref_finish_upper_links()
3632 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_finish_upper_links()
3633 list_add_tail(&edge->list[UPPER], &pending_edge); in btrfs_backref_finish_upper_links()
3645 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3646 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3648 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3650 while (!list_empty(&cache->pending_edge)) { in btrfs_backref_error_cleanup()
3651 edge = list_first_entry(&cache->pending_edge, in btrfs_backref_error_cleanup()
3653 list_del(&edge->list[UPPER]); in btrfs_backref_error_cleanup()
3654 list_del(&edge->list[LOWER]); in btrfs_backref_error_cleanup()
3655 lower = edge->node[LOWER]; in btrfs_backref_error_cleanup()
3656 upper = edge->node[UPPER]; in btrfs_backref_error_cleanup()
3663 if (list_empty(&lower->upper) && in btrfs_backref_error_cleanup()
3664 RB_EMPTY_NODE(&lower->rb_node)) in btrfs_backref_error_cleanup()
3665 list_add(&lower->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3667 if (!RB_EMPTY_NODE(&upper->rb_node)) in btrfs_backref_error_cleanup()
3671 list_for_each_entry(edge, &upper->upper, list[LOWER]) in btrfs_backref_error_cleanup()
3672 list_add_tail(&edge->list[UPPER], in btrfs_backref_error_cleanup()
3673 &cache->pending_edge); in btrfs_backref_error_cleanup()
3674 if (list_empty(&upper->upper)) in btrfs_backref_error_cleanup()
3675 list_add(&upper->list, &cache->useless_node); in btrfs_backref_error_cleanup()
3678 while (!list_empty(&cache->useless_node)) { in btrfs_backref_error_cleanup()
3679 lower = list_first_entry(&cache->useless_node, in btrfs_backref_error_cleanup()
3681 list_del_init(&lower->list); in btrfs_backref_error_cleanup()
3688 ASSERT(list_empty(&cache->useless_node) && in btrfs_backref_error_cleanup()
3689 list_empty(&cache->pending_edge)); in btrfs_backref_error_cleanup()