Lines Matching +full:ref2 +full:-
1 // SPDX-License-Identifier: GPL-2.0
11 #include "delayed-ref.h"
12 #include "extent-tree.h"
15 #include "space-info.h"
16 #include "tree-mod-log.h"
33 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; in btrfs_check_space_for_delayed_refs()
34 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in btrfs_check_space_for_delayed_refs()
38 spin_lock(&global_rsv->lock); in btrfs_check_space_for_delayed_refs()
39 reserved = global_rsv->reserved; in btrfs_check_space_for_delayed_refs()
40 spin_unlock(&global_rsv->lock); in btrfs_check_space_for_delayed_refs()
48 spin_lock(&delayed_refs_rsv->lock); in btrfs_check_space_for_delayed_refs()
49 reserved += delayed_refs_rsv->reserved; in btrfs_check_space_for_delayed_refs()
50 if (delayed_refs_rsv->size >= reserved) in btrfs_check_space_for_delayed_refs()
52 spin_unlock(&delayed_refs_rsv->lock); in btrfs_check_space_for_delayed_refs()
68 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; in btrfs_delayed_refs_rsv_release()
84 * This is to be called anytime we may have adjusted trans->delayed_ref_updates
85 * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
90 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_update_delayed_refs_rsv()
91 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_update_delayed_refs_rsv()
92 struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv; in btrfs_update_delayed_refs_rsv()
96 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates); in btrfs_update_delayed_refs_rsv()
98 trans->delayed_ref_csum_deletions); in btrfs_update_delayed_refs_rsv()
111 * avoid exhausting it and reach -ENOSPC during a transaction commit. in btrfs_update_delayed_refs_rsv()
113 spin_lock(&local_rsv->lock); in btrfs_update_delayed_refs_rsv()
114 reserved_bytes = min(num_bytes, local_rsv->reserved); in btrfs_update_delayed_refs_rsv()
115 local_rsv->reserved -= reserved_bytes; in btrfs_update_delayed_refs_rsv()
116 local_rsv->full = (local_rsv->reserved >= local_rsv->size); in btrfs_update_delayed_refs_rsv()
117 spin_unlock(&local_rsv->lock); in btrfs_update_delayed_refs_rsv()
119 spin_lock(&delayed_rsv->lock); in btrfs_update_delayed_refs_rsv()
120 delayed_rsv->size += num_bytes; in btrfs_update_delayed_refs_rsv()
121 delayed_rsv->reserved += reserved_bytes; in btrfs_update_delayed_refs_rsv()
122 delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size); in btrfs_update_delayed_refs_rsv()
123 spin_unlock(&delayed_rsv->lock); in btrfs_update_delayed_refs_rsv()
124 trans->delayed_ref_updates = 0; in btrfs_update_delayed_refs_rsv()
125 trans->delayed_ref_csum_deletions = 0; in btrfs_update_delayed_refs_rsv()
134 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_inc_delayed_refs_rsv_bg_inserts()
136 spin_lock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_inserts()
142 delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1); in btrfs_inc_delayed_refs_rsv_bg_inserts()
143 delayed_rsv->full = false; in btrfs_inc_delayed_refs_rsv_bg_inserts()
144 spin_unlock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_inserts()
153 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_dec_delayed_refs_rsv_bg_inserts()
169 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_inc_delayed_refs_rsv_bg_updates()
171 spin_lock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_updates()
177 delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1); in btrfs_inc_delayed_refs_rsv_bg_updates()
178 delayed_rsv->full = false; in btrfs_inc_delayed_refs_rsv_bg_updates()
179 spin_unlock(&delayed_rsv->lock); in btrfs_inc_delayed_refs_rsv_bg_updates()
188 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; in btrfs_dec_delayed_refs_rsv_bg_updates()
205 * will return -ENOSPC if we can't make the reservation.
210 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; in btrfs_delayed_refs_rsv_refill()
211 struct btrfs_space_info *space_info = block_rsv->space_info; in btrfs_delayed_refs_rsv_refill()
216 int ret = -ENOSPC; in btrfs_delayed_refs_rsv_refill()
218 spin_lock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
219 if (block_rsv->reserved < block_rsv->size) { in btrfs_delayed_refs_rsv_refill()
220 num_bytes = block_rsv->size - block_rsv->reserved; in btrfs_delayed_refs_rsv_refill()
223 spin_unlock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
236 spin_lock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
237 if (block_rsv->reserved < block_rsv->size) { in btrfs_delayed_refs_rsv_refill()
238 u64 needed = block_rsv->size - block_rsv->reserved; in btrfs_delayed_refs_rsv_refill()
241 block_rsv->reserved += needed; in btrfs_delayed_refs_rsv_refill()
242 block_rsv->full = true; in btrfs_delayed_refs_rsv_refill()
243 to_free = num_bytes - needed; in btrfs_delayed_refs_rsv_refill()
246 block_rsv->reserved += num_bytes; in btrfs_delayed_refs_rsv_refill()
254 spin_unlock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill()
269 struct btrfs_delayed_ref_node *ref2) in comp_data_refs() argument
271 if (ref1->data_ref.objectid < ref2->data_ref.objectid) in comp_data_refs()
272 return -1; in comp_data_refs()
273 if (ref1->data_ref.objectid > ref2->data_ref.objectid) in comp_data_refs()
275 if (ref1->data_ref.offset < ref2->data_ref.offset) in comp_data_refs()
276 return -1; in comp_data_refs()
277 if (ref1->data_ref.offset > ref2->data_ref.offset) in comp_data_refs()
283 struct btrfs_delayed_ref_node *ref2, in comp_refs() argument
288 if (ref1->type < ref2->type) in comp_refs()
289 return -1; in comp_refs()
290 if (ref1->type > ref2->type) in comp_refs()
292 if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY || in comp_refs()
293 ref1->type == BTRFS_SHARED_DATA_REF_KEY) { in comp_refs()
294 if (ref1->parent < ref2->parent) in comp_refs()
295 return -1; in comp_refs()
296 if (ref1->parent > ref2->parent) in comp_refs()
299 if (ref1->ref_root < ref2->ref_root) in comp_refs()
300 return -1; in comp_refs()
301 if (ref1->ref_root > ref2->ref_root) in comp_refs()
303 if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY) in comp_refs()
304 ret = comp_data_refs(ref1, ref2); in comp_refs()
309 if (ref1->seq < ref2->seq) in comp_refs()
310 return -1; in comp_refs()
311 if (ref1->seq > ref2->seq) in comp_refs()
320 struct rb_node **p = &root->rb_root.rb_node; in tree_insert()
321 struct rb_node *node = &ins->ref_node; in tree_insert()
334 p = &(*p)->rb_left; in tree_insert()
336 p = &(*p)->rb_right; in tree_insert()
353 lockdep_assert_held(&dr->lock); in find_first_ref_head()
355 return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT); in find_first_ref_head()
361 lockdep_assert_held(&delayed_refs->lock); in btrfs_delayed_ref_lock()
362 if (mutex_trylock(&head->mutex)) in btrfs_delayed_ref_lock()
365 refcount_inc(&head->refs); in btrfs_delayed_ref_lock()
366 spin_unlock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
368 mutex_lock(&head->mutex); in btrfs_delayed_ref_lock()
369 spin_lock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
370 if (!head->tracked) { in btrfs_delayed_ref_lock()
371 mutex_unlock(&head->mutex); in btrfs_delayed_ref_lock()
384 lockdep_assert_held(&head->lock); in drop_delayed_ref()
385 rb_erase_cached(&ref->ref_node, &head->ref_tree); in drop_delayed_ref()
386 RB_CLEAR_NODE(&ref->ref_node); in drop_delayed_ref()
387 if (!list_empty(&ref->add_list)) in drop_delayed_ref()
388 list_del(&ref->add_list); in drop_delayed_ref()
400 struct rb_node *node = rb_next(&ref->ref_node); in merge_ref()
408 if (seq && next->seq >= seq) in merge_ref()
413 if (ref->action == next->action) { in merge_ref()
414 mod = next->ref_mod; in merge_ref()
416 if (ref->ref_mod < next->ref_mod) { in merge_ref()
420 mod = -next->ref_mod; in merge_ref()
424 ref->ref_mod += mod; in merge_ref()
425 if (ref->ref_mod == 0) { in merge_ref()
432 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || in merge_ref()
433 ref->type == BTRFS_SHARED_BLOCK_REF_KEY); in merge_ref()
448 lockdep_assert_held(&head->lock); in btrfs_merge_delayed_refs()
450 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) in btrfs_merge_delayed_refs()
454 if (head->is_data) in btrfs_merge_delayed_refs()
459 for (node = rb_first_cached(&head->ref_tree); node; in btrfs_merge_delayed_refs()
462 if (seq && ref->seq >= seq) in btrfs_merge_delayed_refs()
494 spin_lock(&delayed_refs->lock); in btrfs_select_ref_head()
496 start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits); in btrfs_select_ref_head()
497 xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) { in btrfs_select_ref_head()
498 if (!head->processing) { in btrfs_select_ref_head()
504 if (delayed_refs->run_delayed_start == 0) { in btrfs_select_ref_head()
505 spin_unlock(&delayed_refs->lock); in btrfs_select_ref_head()
508 delayed_refs->run_delayed_start = 0; in btrfs_select_ref_head()
512 head->processing = true; in btrfs_select_ref_head()
513 WARN_ON(delayed_refs->num_heads_ready == 0); in btrfs_select_ref_head()
514 delayed_refs->num_heads_ready--; in btrfs_select_ref_head()
515 delayed_refs->run_delayed_start = head->bytenr + in btrfs_select_ref_head()
516 head->num_bytes; in btrfs_select_ref_head()
519 spin_unlock(&delayed_refs->lock); in btrfs_select_ref_head()
527 return ERR_PTR(-EAGAIN); in btrfs_select_ref_head()
535 spin_lock(&delayed_refs->lock); in btrfs_unselect_ref_head()
536 head->processing = false; in btrfs_unselect_ref_head()
537 delayed_refs->num_heads_ready++; in btrfs_unselect_ref_head()
538 spin_unlock(&delayed_refs->lock); in btrfs_unselect_ref_head()
546 const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits); in btrfs_delete_ref_head()
548 lockdep_assert_held(&delayed_refs->lock); in btrfs_delete_ref_head()
549 lockdep_assert_held(&head->lock); in btrfs_delete_ref_head()
551 xa_erase(&delayed_refs->head_refs, index); in btrfs_delete_ref_head()
552 head->tracked = false; in btrfs_delete_ref_head()
553 delayed_refs->num_heads--; in btrfs_delete_ref_head()
554 if (!head->processing) in btrfs_delete_ref_head()
555 delayed_refs->num_heads_ready--; in btrfs_delete_ref_head()
569 struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs; in insert_delayed_ref()
573 spin_lock(&href->lock); in insert_delayed_ref()
574 exist = tree_insert(&href->ref_tree, ref); in insert_delayed_ref()
576 if (ref->action == BTRFS_ADD_DELAYED_REF) in insert_delayed_ref()
577 list_add_tail(&ref->add_list, &href->ref_add_list); in insert_delayed_ref()
578 spin_unlock(&href->lock); in insert_delayed_ref()
579 trans->delayed_ref_updates++; in insert_delayed_ref()
584 if (exist->action == ref->action) { in insert_delayed_ref()
585 mod = ref->ref_mod; in insert_delayed_ref()
588 if (exist->ref_mod < ref->ref_mod) { in insert_delayed_ref()
589 exist->action = ref->action; in insert_delayed_ref()
590 mod = -exist->ref_mod; in insert_delayed_ref()
591 exist->ref_mod = ref->ref_mod; in insert_delayed_ref()
592 if (ref->action == BTRFS_ADD_DELAYED_REF) in insert_delayed_ref()
593 list_add_tail(&exist->add_list, in insert_delayed_ref()
594 &href->ref_add_list); in insert_delayed_ref()
595 else if (ref->action == BTRFS_DROP_DELAYED_REF) { in insert_delayed_ref()
596 ASSERT(!list_empty(&exist->add_list)); in insert_delayed_ref()
597 list_del_init(&exist->add_list); in insert_delayed_ref()
602 mod = -ref->ref_mod; in insert_delayed_ref()
604 exist->ref_mod += mod; in insert_delayed_ref()
607 if (exist->ref_mod == 0) in insert_delayed_ref()
608 drop_delayed_ref(trans->fs_info, root, href, exist); in insert_delayed_ref()
609 spin_unlock(&href->lock); in insert_delayed_ref()
622 &trans->transaction->delayed_refs; in update_existing_head_ref()
623 struct btrfs_fs_info *fs_info = trans->fs_info; in update_existing_head_ref()
626 BUG_ON(existing->is_data != update->is_data); in update_existing_head_ref()
628 spin_lock(&existing->lock); in update_existing_head_ref()
635 if (!existing->owning_root) in update_existing_head_ref()
636 existing->owning_root = update->owning_root; in update_existing_head_ref()
638 if (update->must_insert_reserved) { in update_existing_head_ref()
646 existing->must_insert_reserved = update->must_insert_reserved; in update_existing_head_ref()
647 existing->owning_root = update->owning_root; in update_existing_head_ref()
653 existing->num_bytes = update->num_bytes; in update_existing_head_ref()
657 if (update->extent_op) { in update_existing_head_ref()
658 if (!existing->extent_op) { in update_existing_head_ref()
659 existing->extent_op = update->extent_op; in update_existing_head_ref()
661 if (update->extent_op->update_key) { in update_existing_head_ref()
662 memcpy(&existing->extent_op->key, in update_existing_head_ref()
663 &update->extent_op->key, in update_existing_head_ref()
664 sizeof(update->extent_op->key)); in update_existing_head_ref()
665 existing->extent_op->update_key = true; in update_existing_head_ref()
667 if (update->extent_op->update_flags) { in update_existing_head_ref()
668 existing->extent_op->flags_to_set |= in update_existing_head_ref()
669 update->extent_op->flags_to_set; in update_existing_head_ref()
670 existing->extent_op->update_flags = true; in update_existing_head_ref()
672 btrfs_free_delayed_extent_op(update->extent_op); in update_existing_head_ref()
678 * currently, for refs we just added we know we're a-ok. in update_existing_head_ref()
680 old_ref_mod = existing->total_ref_mod; in update_existing_head_ref()
681 existing->ref_mod += update->ref_mod; in update_existing_head_ref()
682 existing->total_ref_mod += update->ref_mod; in update_existing_head_ref()
690 if (existing->is_data) { in update_existing_head_ref()
693 existing->num_bytes); in update_existing_head_ref()
695 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) { in update_existing_head_ref()
696 delayed_refs->pending_csums -= existing->num_bytes; in update_existing_head_ref()
699 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) { in update_existing_head_ref()
700 delayed_refs->pending_csums += existing->num_bytes; in update_existing_head_ref()
701 trans->delayed_ref_csum_deletions += csum_leaves; in update_existing_head_ref()
705 spin_unlock(&existing->lock); in update_existing_head_ref()
717 BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved); in init_delayed_ref_head()
719 switch (generic_ref->action) { in init_delayed_ref_head()
731 count_mod = -1; in init_delayed_ref_head()
739 * ref->must_insert_reserved is the flag used to record that in init_delayed_ref_head()
750 refcount_set(&head_ref->refs, 1); in init_delayed_ref_head()
751 head_ref->bytenr = generic_ref->bytenr; in init_delayed_ref_head()
752 head_ref->num_bytes = generic_ref->num_bytes; in init_delayed_ref_head()
753 head_ref->ref_mod = count_mod; in init_delayed_ref_head()
754 head_ref->reserved_bytes = reserved; in init_delayed_ref_head()
755 head_ref->must_insert_reserved = must_insert_reserved; in init_delayed_ref_head()
756 head_ref->owning_root = generic_ref->owning_root; in init_delayed_ref_head()
757 head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA); in init_delayed_ref_head()
758 head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID); in init_delayed_ref_head()
759 head_ref->ref_tree = RB_ROOT_CACHED; in init_delayed_ref_head()
760 INIT_LIST_HEAD(&head_ref->ref_add_list); in init_delayed_ref_head()
761 head_ref->tracked = false; in init_delayed_ref_head()
762 head_ref->processing = false; in init_delayed_ref_head()
763 head_ref->total_ref_mod = count_mod; in init_delayed_ref_head()
764 spin_lock_init(&head_ref->lock); in init_delayed_ref_head()
765 mutex_init(&head_ref->mutex); in init_delayed_ref_head()
768 if (generic_ref->type == BTRFS_REF_METADATA) in init_delayed_ref_head()
769 head_ref->level = generic_ref->tree_ref.level; in init_delayed_ref_head()
771 head_ref->level = U8_MAX; in init_delayed_ref_head()
774 if (generic_ref->ref_root && reserved) { in init_delayed_ref_head()
775 qrecord->data_rsv = reserved; in init_delayed_ref_head()
776 qrecord->data_rsv_refroot = generic_ref->ref_root; in init_delayed_ref_head()
778 qrecord->num_bytes = generic_ref->num_bytes; in init_delayed_ref_head()
779 qrecord->old_roots = NULL; in init_delayed_ref_head()
796 struct btrfs_fs_info *fs_info = trans->fs_info; in add_delayed_ref_head()
799 const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits); in add_delayed_ref_head()
802 delayed_refs = &trans->transaction->delayed_refs; in add_delayed_ref_head()
803 lockdep_assert_held(&delayed_refs->lock); in add_delayed_ref_head()
806 if (head_ref->bytenr >= MAX_LFS_FILESIZE) { in add_delayed_ref_head()
808 xa_release(&delayed_refs->dirty_extents, index); in add_delayed_ref_head()
811 head_ref->bytenr); in add_delayed_ref_head()
813 return ERR_PTR(-EOVERFLOW); in add_delayed_ref_head()
822 head_ref->bytenr); in add_delayed_ref_head()
825 xa_release(&delayed_refs->dirty_extents, index); in add_delayed_ref_head()
837 existing = xa_load(&delayed_refs->head_refs, index); in add_delayed_ref_head()
847 existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC); in add_delayed_ref_head()
850 ASSERT(xa_err(existing) != -ENOMEM); in add_delayed_ref_head()
855 * delayed_refs->lock. in add_delayed_ref_head()
857 return ERR_PTR(-EEXIST); in add_delayed_ref_head()
859 head_ref->tracked = true; in add_delayed_ref_head()
866 if (head_ref->is_data && head_ref->ref_mod < 0) { in add_delayed_ref_head()
867 delayed_refs->pending_csums += head_ref->num_bytes; in add_delayed_ref_head()
868 trans->delayed_ref_csum_deletions += in add_delayed_ref_head()
869 btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes); in add_delayed_ref_head()
871 delayed_refs->num_heads++; in add_delayed_ref_head()
872 delayed_refs->num_heads_ready++; in add_delayed_ref_head()
893 * can be either one of the well-known metadata trees or the
908 int action = generic_ref->action; in init_delayed_ref_common()
914 if (is_fstree(generic_ref->ref_root)) in init_delayed_ref_common()
915 seq = atomic64_read(&fs_info->tree_mod_seq); in init_delayed_ref_common()
917 refcount_set(&ref->refs, 1); in init_delayed_ref_common()
918 ref->bytenr = generic_ref->bytenr; in init_delayed_ref_common()
919 ref->num_bytes = generic_ref->num_bytes; in init_delayed_ref_common()
920 ref->ref_mod = 1; in init_delayed_ref_common()
921 ref->action = action; in init_delayed_ref_common()
922 ref->seq = seq; in init_delayed_ref_common()
923 ref->type = btrfs_ref_type(generic_ref); in init_delayed_ref_common()
924 ref->ref_root = generic_ref->ref_root; in init_delayed_ref_common()
925 ref->parent = generic_ref->parent; in init_delayed_ref_common()
926 RB_CLEAR_NODE(&ref->ref_node); in init_delayed_ref_common()
927 INIT_LIST_HEAD(&ref->add_list); in init_delayed_ref_common()
929 if (generic_ref->type == BTRFS_REF_DATA) in init_delayed_ref_common()
930 ref->data_ref = generic_ref->data_ref; in init_delayed_ref_common()
932 ref->tree_ref = generic_ref->tree_ref; in init_delayed_ref_common()
940 generic_ref->real_root = mod_root ?: generic_ref->ref_root; in btrfs_init_tree_ref()
942 generic_ref->tree_ref.level = level; in btrfs_init_tree_ref()
943 generic_ref->type = BTRFS_REF_METADATA; in btrfs_init_tree_ref()
944 if (skip_qgroup || !(is_fstree(generic_ref->ref_root) && in btrfs_init_tree_ref()
946 generic_ref->skip_qgroup = true; in btrfs_init_tree_ref()
948 generic_ref->skip_qgroup = false; in btrfs_init_tree_ref()
957 generic_ref->real_root = mod_root ?: generic_ref->ref_root; in btrfs_init_data_ref()
959 generic_ref->data_ref.objectid = ino; in btrfs_init_data_ref()
960 generic_ref->data_ref.offset = offset; in btrfs_init_data_ref()
961 generic_ref->type = BTRFS_REF_DATA; in btrfs_init_data_ref()
962 if (skip_qgroup || !(is_fstree(generic_ref->ref_root) && in btrfs_init_data_ref()
964 generic_ref->skip_qgroup = true; in btrfs_init_data_ref()
966 generic_ref->skip_qgroup = false; in btrfs_init_data_ref()
974 struct btrfs_fs_info *fs_info = trans->fs_info; in add_delayed_ref()
980 const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits); in add_delayed_ref()
983 int action = generic_ref->action; in add_delayed_ref()
989 return -ENOMEM; in add_delayed_ref()
993 ret = -ENOMEM; in add_delayed_ref()
997 delayed_refs = &trans->transaction->delayed_refs; in add_delayed_ref()
999 if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) { in add_delayed_ref()
1002 ret = -ENOMEM; in add_delayed_ref()
1005 if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) { in add_delayed_ref()
1006 ret = -ENOMEM; in add_delayed_ref()
1012 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS); in add_delayed_ref()
1015 xa_release(&delayed_refs->dirty_extents, index); in add_delayed_ref()
1021 head_ref->extent_op = extent_op; in add_delayed_ref()
1023 spin_lock(&delayed_refs->lock); in add_delayed_ref()
1032 xa_release(&delayed_refs->head_refs, index); in add_delayed_ref()
1033 spin_unlock(&delayed_refs->lock); in add_delayed_ref()
1040 spin_unlock(&delayed_refs->lock); in add_delayed_ref()
1048 if (generic_ref->type == BTRFS_REF_DATA) in add_delayed_ref()
1049 trace_add_delayed_data_ref(trans->fs_info, node); in add_delayed_ref()
1051 trace_add_delayed_tree_ref(trans->fs_info, node); in add_delayed_ref()
1056 return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr); in add_delayed_ref()
1076 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action); in btrfs_add_delayed_tree_ref()
1087 ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action); in btrfs_add_delayed_data_ref()
1095 const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits); in btrfs_add_delayed_extent_op()
1110 return -ENOMEM; in btrfs_add_delayed_extent_op()
1113 head_ref->extent_op = extent_op; in btrfs_add_delayed_extent_op()
1115 delayed_refs = &trans->transaction->delayed_refs; in btrfs_add_delayed_extent_op()
1117 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS); in btrfs_add_delayed_extent_op()
1123 spin_lock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1127 xa_release(&delayed_refs->head_refs, index); in btrfs_add_delayed_extent_op()
1128 spin_unlock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1132 spin_unlock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1144 if (refcount_dec_and_test(&ref->refs)) { in btrfs_put_delayed_ref()
1145 WARN_ON(!RB_EMPTY_NODE(&ref->ref_node)); in btrfs_put_delayed_ref()
1159 const unsigned long index = (bytenr >> fs_info->sectorsize_bits); in btrfs_find_delayed_ref_head()
1161 lockdep_assert_held(&delayed_refs->lock); in btrfs_find_delayed_ref_head()
1163 return xa_load(&delayed_refs->head_refs, index); in btrfs_find_delayed_ref_head()
1170 if (type < entry->type) in find_comp()
1171 return -1; in find_comp()
1172 if (type > entry->type) in find_comp()
1176 if (root < entry->ref_root) in find_comp()
1177 return -1; in find_comp()
1178 if (root > entry->ref_root) in find_comp()
1181 if (parent < entry->parent) in find_comp()
1182 return -1; in find_comp()
1183 if (parent > entry->parent) in find_comp()
1205 lockdep_assert_held(&head->mutex); in btrfs_find_delayed_tree_ref()
1207 spin_lock(&head->lock); in btrfs_find_delayed_tree_ref()
1208 node = head->ref_tree.rb_root.rb_node; in btrfs_find_delayed_tree_ref()
1216 node = node->rb_left; in btrfs_find_delayed_tree_ref()
1218 node = node->rb_right; in btrfs_find_delayed_tree_ref()
1224 if (entry->action == BTRFS_ADD_DELAYED_REF) in btrfs_find_delayed_tree_ref()
1229 spin_unlock(&head->lock); in btrfs_find_delayed_tree_ref()
1235 struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs; in btrfs_destroy_delayed_refs()
1236 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_destroy_delayed_refs()
1238 spin_lock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
1251 spin_lock(&head->lock); in btrfs_destroy_delayed_refs()
1252 while ((n = rb_first_cached(&head->ref_tree)) != NULL) { in btrfs_destroy_delayed_refs()
1258 if (head->must_insert_reserved) in btrfs_destroy_delayed_refs()
1260 btrfs_free_delayed_extent_op(head->extent_op); in btrfs_destroy_delayed_refs()
1262 spin_unlock(&head->lock); in btrfs_destroy_delayed_refs()
1263 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
1264 mutex_unlock(&head->mutex); in btrfs_destroy_delayed_refs()
1269 bg = btrfs_lookup_block_group(fs_info, head->bytenr); in btrfs_destroy_delayed_refs()
1279 head->bytenr); in btrfs_destroy_delayed_refs()
1281 spin_lock(&bg->space_info->lock); in btrfs_destroy_delayed_refs()
1282 spin_lock(&bg->lock); in btrfs_destroy_delayed_refs()
1283 bg->pinned += head->num_bytes; in btrfs_destroy_delayed_refs()
1285 bg->space_info, in btrfs_destroy_delayed_refs()
1286 head->num_bytes); in btrfs_destroy_delayed_refs()
1287 bg->reserved -= head->num_bytes; in btrfs_destroy_delayed_refs()
1288 bg->space_info->bytes_reserved -= head->num_bytes; in btrfs_destroy_delayed_refs()
1289 spin_unlock(&bg->lock); in btrfs_destroy_delayed_refs()
1290 spin_unlock(&bg->space_info->lock); in btrfs_destroy_delayed_refs()
1295 btrfs_error_unpin_extent_range(fs_info, head->bytenr, in btrfs_destroy_delayed_refs()
1296 head->bytenr + head->num_bytes - 1); in btrfs_destroy_delayed_refs()
1301 spin_lock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
1305 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
1332 return -ENOMEM; in btrfs_delayed_ref_init()