Lines Matching full:existing
579 * Return true if the ref was merged into an existing one (and therefore can be
623 /* remove existing tail if its ref_mod is zero */ in insert_delayed_ref()
632 * existing and update must have the same bytenr
635 struct btrfs_delayed_ref_head *existing, in update_existing_head_ref() argument
643 BUG_ON(existing->is_data != update->is_data); in update_existing_head_ref()
645 spin_lock(&existing->lock); in update_existing_head_ref()
652 if (!existing->owning_root) in update_existing_head_ref()
653 existing->owning_root = update->owning_root; in update_existing_head_ref()
659 * with an existing head ref without in update_existing_head_ref()
663 existing->must_insert_reserved = update->must_insert_reserved; in update_existing_head_ref()
664 existing->owning_root = update->owning_root; in update_existing_head_ref()
670 existing->num_bytes = update->num_bytes; in update_existing_head_ref()
675 if (!existing->extent_op) { in update_existing_head_ref()
676 existing->extent_op = update->extent_op; in update_existing_head_ref()
679 memcpy(&existing->extent_op->key, in update_existing_head_ref()
682 existing->extent_op->update_key = true; in update_existing_head_ref()
685 existing->extent_op->flags_to_set |= in update_existing_head_ref()
687 existing->extent_op->update_flags = true; in update_existing_head_ref()
697 old_ref_mod = existing->total_ref_mod; in update_existing_head_ref()
698 existing->ref_mod += update->ref_mod; in update_existing_head_ref()
699 existing->total_ref_mod += update->ref_mod; in update_existing_head_ref()
707 if (existing->is_data) { in update_existing_head_ref()
710 existing->num_bytes); in update_existing_head_ref()
712 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) { in update_existing_head_ref()
713 delayed_refs->pending_csums -= existing->num_bytes; in update_existing_head_ref()
716 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) { in update_existing_head_ref()
717 delayed_refs->pending_csums += existing->num_bytes; in update_existing_head_ref()
722 spin_unlock(&existing->lock); in update_existing_head_ref()
814 struct btrfs_delayed_ref_head *existing; in add_delayed_ref_head() local
854 existing = xa_load(&delayed_refs->head_refs, index); in add_delayed_ref_head()
855 if (existing) { in add_delayed_ref_head()
856 update_existing_head_ref(trans, existing, head_ref); in add_delayed_ref_head()
858 * we've updated the existing ref, free the newly in add_delayed_ref_head()
862 head_ref = existing; in add_delayed_ref_head()
864 existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC); in add_delayed_ref_head()
865 if (xa_is_err(existing)) { in add_delayed_ref_head()
867 ASSERT(xa_err(existing) != -ENOMEM); in add_delayed_ref_head()
868 return ERR_PTR(xa_err(existing)); in add_delayed_ref_head()
869 } else if (WARN_ON(existing)) { in add_delayed_ref_head()