Lines Matching full:refs
218 * Return 0 when both refs are for the same block (and can be merged).
286 * Add @newref to the @root rbtree, merging identical refs.
302 /* Identical refs, merge them and free @newref */
356 * delayed refs
369 * on disk refs (inline or keyed)
417 /* direct refs use root == 0, key == NULL */
427 /* indirect refs use parent == 0 */
605 * adding new delayed refs. To deal with this we need to look in cache
715 * We maintain three separate rbtrees: one for direct refs, one for
716 * indirect refs which have a key, and one for indirect refs which do not
720 * indirect refs with missing keys. An appropriate key is located and
721 * the ref is moved onto the tree for indirect refs. After all missing
747 * the tree, allocating new refs for each insertion, and then
825 * We may have inode lists attached to refs in the parents ulist, so we
826 * must free them before freeing the ulist and its refs.
883 * add all currently queued delayed refs from this head whose seq nr is
988 * refs have been checked.
1022 * enumerate all inline refs
1365 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1366 * indirect refs to their parent bytenr.
1438 * lock it so we have a consistent view of the refs at the given
1447 refcount_inc(&head->refs);
1576 * This walks the tree of merged and resolved refs. Tree blocks are
1588 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1590 * identical refs to keep the overall count correct.
1591 * prelim_ref_insert() will merge only those refs
1592 * which compare identically. Any refs having
1639 ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1665 * this ref to the ref we added to the 'refs' ulist.
1690 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1695 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1701 ASSERT(ctx->refs == NULL);
1703 ctx->refs = ulist_alloc(GFP_NOFS);
1704 if (!ctx->refs)
1710 free_leaf_list(ctx->refs);
1711 ctx->refs = NULL;
1732 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1745 ASSERT(ctx->refs == NULL);
1747 ctx->refs = ulist_alloc(GFP_NOFS);
1748 if (!ctx->refs)
1754 ulist_free(ctx->refs);
1755 ctx->refs = NULL;
1776 node = ulist_next(ctx->refs, &uiter);
1783 ulist_free(ctx->refs);
1784 ctx->refs = NULL;
1812 ulist_init(&ctx->refs);
1822 ulist_release(&ctx->refs);
1842 * delayed refs, but continues on even when no running transaction exists.
1877 ulist_init(&ctx->refs);
1912 walk_ctx.refs = &ctx->refs;
1918 const unsigned long prev_ref_count = ctx->refs.nnodes;
1937 * the ctx->refs ulist, in which case we have to check multiple
1965 if ((ctx->refs.nnodes - prev_ref_count) > 1)
1971 node = ulist_next(&ctx->refs, &uiter);
2036 ulist_release(&ctx->refs);
2269 * helper function to iterate extent inline refs. ptr must point to a 0 value
2271 * if more refs exist, 0 is returned and the next call to
2411 struct ulist *refs;
2445 refs = ctx->refs;
2446 ctx->refs = NULL;
2449 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2508 free_leaf_list(refs);
2965 /* We're still inside the inline refs */