Lines Matching defs:node

50 			       const struct btrfs_delayed_ref_node *node,
60 const struct btrfs_delayed_ref_node *node,
90 * the head node for delayed ref is used to store the sum of all the
92 * node may also store the extent flags to set. This way you can check
501 const struct btrfs_delayed_ref_node *node,
507 u64 owner = btrfs_delayed_ref_owner(node);
508 u64 offset = btrfs_delayed_ref_offset(node);
514 if (node->parent) {
516 key.offset = node->parent;
520 key.offset = hash_extent_data_ref(node->ref_root, owner, offset);
529 if (node->parent) {
534 btrfs_set_shared_data_ref_count(leaf, ref, node->ref_mod);
537 num_refs += node->ref_mod;
545 if (match_extent_data_ref(leaf, ref, node->ref_root,
560 btrfs_set_extent_data_ref_root(leaf, ref, node->ref_root);
563 btrfs_set_extent_data_ref_count(leaf, ref, node->ref_mod);
566 num_refs += node->ref_mod;
688 const struct btrfs_delayed_ref_node *node,
696 if (node->parent) {
698 key.offset = node->parent;
701 key.offset = node->ref_root;
1476 * @node: The delayed ref node used to get the bytenr/length for
1484 const struct btrfs_delayed_ref_node *node,
1491 u64 bytenr = node->bytenr;
1492 u64 num_bytes = node->num_bytes;
1493 u64 owner = btrfs_delayed_ref_owner(node);
1494 u64 offset = btrfs_delayed_ref_offset(node);
1496 int refs_to_add = node->ref_mod;
1505 node->parent, node->ref_root, owner,
1527 ret = insert_tree_block_ref(trans, path, node, bytenr);
1531 ret = insert_extent_data_ref(trans, path, node, bytenr);
1558 const struct btrfs_delayed_ref_node *node,
1566 trace_run_delayed_data_ref(trans->fs_info, node);
1568 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1569 parent = node->parent;
1571 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1575 .num_bytes = node->num_bytes,
1580 u64 owner = btrfs_delayed_ref_owner(node);
1581 u64 offset = btrfs_delayed_ref_offset(node);
1586 key.objectid = node->bytenr;
1588 key.offset = node->num_bytes;
1590 ret = alloc_reserved_file_extent(trans, parent, node->ref_root,
1592 node->ref_mod,
1597 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1598 ret = __btrfs_inc_extent_ref(trans, node, extent_op);
1599 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1600 ret = __btrfs_free_extent(trans, href, node, extent_op);
1713 const struct btrfs_delayed_ref_node *node,
1722 trace_run_delayed_tree_ref(trans->fs_info, node);
1724 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1725 parent = node->parent;
1726 ref_root = node->ref_root;
1728 if (unlikely(node->ref_mod != 1)) {
1731 node->bytenr, node->ref_mod, node->action, ref_root,
1735 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1744 ret = alloc_reserved_tree_block(trans, node, extent_op);
1747 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1748 ret = __btrfs_inc_extent_ref(trans, node, extent_op);
1749 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1750 ret = __btrfs_free_extent(trans, href, node, extent_op);
1760 const struct btrfs_delayed_ref_node *node,
1768 btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
1774 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1775 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1776 ret = run_delayed_tree_ref(trans, href, node, extent_op,
1778 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1779 node->type == BTRFS_SHARED_DATA_REF_KEY)
1780 ret = run_delayed_data_ref(trans, href, node, extent_op,
1782 else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY)
1787 btrfs_pin_extent(trans, node->bytenr, node->num_bytes);
1791 node->bytenr, node->num_bytes, node->type,
1792 node->action, node->ref_mod, ret);
2039 * Or we can get node references of the same type that weren't
2213 struct rb_node *node;
2262 for (node = rb_first_cached(&head->ref_tree); node;
2263 node = rb_next(node)) {
2267 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
3023 * Drop one or more refs of @node.
3042 * node->bytenr = 13631488
3043 * node->num_bytes = 1048576
3067 * node->bytenr = 13631488
3068 * node->num_bytes = 1048576
3083 const struct btrfs_delayed_ref_node *node,
3098 int refs_to_drop = node->ref_mod;
3101 u64 bytenr = node->bytenr;
3102 u64 num_bytes = node->num_bytes;
3103 u64 owner_objectid = btrfs_delayed_ref_owner(node);
3104 u64 owner_offset = btrfs_delayed_ref_offset(node);
3120 node->bytenr, refs_to_drop);
3130 node->parent, node->ref_root, owner_objectid,
3231 bytenr, node->parent, node->ref_root, owner_objectid,
3483 * operations for this node. If we re-allocate this node we
3484 * could replay operations on this node that happened when it
3492 * node or root points to this extent buffer, so if after this
3494 * existing log of operations on this node that we have to
4868 const struct btrfs_delayed_ref_node *node,
4883 int level = btrfs_delayed_ref_owner(node);
4886 extent_key.objectid = node->bytenr;
4892 extent_key.offset = node->num_bytes;
4926 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
4929 btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent);
4933 btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
4938 return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
5253 * simply drop our reference to it from our current parent node) and there are
5255 * blocks from the current parent node then we have to do the FULL_BACKREF dance
5261 * Decide if we need to walk down into this node to adjust the references.
5270 * This is meant to be called when we're evaluating if a node we point to at
5272 * reference to it. We return true if we should walk into the node, false if we
5329 /* All other cases we need to wander into the node. */
5393 /* If we don't need to visit this node don't reada. */
5743 /* If we don't have to walk into this node skip it. */
5749 * We have to walk down into this node, and if we're currently at the
5897 if (eb == root->node) {
5931 * DROP_REFERENCE and our refcount is > 1 then we've entered a shared node and
5933 * FULL_BACKREF on this node if it's not already set, and then do the
5941 * without visiting the node. For UPDATE_BACKREF we will skip any children that
5980 * current node, and if we're at the end of that node then we call
5981 * walk_up_proc() on our current node which will do one of a few things based on
5993 * current node and walk up to the next node to walk down the next slot.
6093 * dropped as we unlock the root node and parent nodes as we walk down
6102 level = btrfs_header_level(root->node);
6130 level = btrfs_header_level(root->node);
6310 * drop subtree rooted at tree block 'node'.
6312 * NOTE: this function will unlock and release tree block 'node'
6317 struct extent_buffer *node,
6343 btrfs_assert_tree_write_locked(node);
6344 level = btrfs_header_level(node);
6345 path->nodes[level] = node;