Lines Matching +full:re +full:-

1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
12 #include "delayed-ref.h"
13 #include "ref-verify.h"
63 * free it until we unmount the file system in order to make sure re-allocations
83 if (entry->bytenr < *bytenr) in block_entry_bytenr_key_cmp()
85 else if (entry->bytenr > *bytenr) in block_entry_bytenr_key_cmp()
86 return -1; in block_entry_bytenr_key_cmp()
95 return block_entry_bytenr_key_cmp(&new_entry->bytenr, existing); in block_entry_bytenr_cmp()
103 node = rb_find_add(&be->node, root, block_entry_bytenr_cmp); in insert_block_entry()
120 if (entry->root_objectid < *objectid) in root_entry_root_objectid_key_cmp()
122 else if (entry->root_objectid > *objectid) in root_entry_root_objectid_key_cmp()
123 return -1; in root_entry_root_objectid_key_cmp()
132 return root_entry_root_objectid_key_cmp(&new_entry->root_objectid, existing); in root_entry_root_objectid_cmp()
136 struct root_entry *re) in insert_root_entry() argument
140 node = rb_find_add(&re->node, root, root_entry_root_objectid_cmp); in insert_root_entry()
146 if (ref1->root_objectid < ref2->root_objectid) in comp_refs()
147 return -1; in comp_refs()
148 if (ref1->root_objectid > ref2->root_objectid) in comp_refs()
150 if (ref1->parent < ref2->parent) in comp_refs()
151 return -1; in comp_refs()
152 if (ref1->parent > ref2->parent) in comp_refs()
154 if (ref1->owner < ref2->owner) in comp_refs()
155 return -1; in comp_refs()
156 if (ref1->owner > ref2->owner) in comp_refs()
158 if (ref1->offset < ref2->offset) in comp_refs()
159 return -1; in comp_refs()
160 if (ref1->offset > ref2->offset) in comp_refs()
178 node = rb_find_add(&ref->node, root, ref_entry_cmp); in insert_ref_entry()
193 ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2); in __save_stack_trace()
199 if (ra->trace_len == 0) { in __print_stack_trace()
200 btrfs_err(fs_info, " ref-verify: no stacktrace"); in __print_stack_trace()
203 stack_trace_print(ra->trace, ra->trace_len, 2); in __print_stack_trace()
213 btrfs_err(fs_info, " ref-verify: no stacktrace support"); in __print_stack_trace()
219 struct root_entry *re; in free_block_entry() local
224 while ((n = rb_first(&be->roots))) { in free_block_entry()
225 re = rb_entry(n, struct root_entry, node); in free_block_entry()
226 rb_erase(&re->node, &be->roots); in free_block_entry()
227 kfree(re); in free_block_entry()
230 while((n = rb_first(&be->refs))) { in free_block_entry()
232 rb_erase(&ref->node, &be->refs); in free_block_entry()
236 while (!list_empty(&be->actions)) { in free_block_entry()
237 ra = list_first_entry(&be->actions, struct ref_action, in free_block_entry()
239 list_del(&ra->list); in free_block_entry()
250 struct root_entry *re = NULL; in add_block_entry() local
252 re = kzalloc(sizeof(struct root_entry), GFP_NOFS); in add_block_entry()
254 if (!be || !re) { in add_block_entry()
255 kfree(re); in add_block_entry()
257 return ERR_PTR(-ENOMEM); in add_block_entry()
259 be->bytenr = bytenr; in add_block_entry()
260 be->len = len; in add_block_entry()
262 re->root_objectid = root_objectid; in add_block_entry()
263 re->num_refs = 0; in add_block_entry()
265 spin_lock(&fs_info->ref_verify_lock); in add_block_entry()
266 exist = insert_block_entry(&fs_info->block_tree, be); in add_block_entry()
271 exist_re = insert_root_entry(&exist->roots, re); in add_block_entry()
273 kfree(re); in add_block_entry()
275 kfree(re); in add_block_entry()
281 be->num_refs = 0; in add_block_entry()
282 be->metadata = 0; in add_block_entry()
283 be->from_disk = 0; in add_block_entry()
284 be->roots = RB_ROOT; in add_block_entry()
285 be->refs = RB_ROOT; in add_block_entry()
286 INIT_LIST_HEAD(&be->actions); in add_block_entry()
288 insert_root_entry(&be->roots, re); in add_block_entry()
290 kfree(re); in add_block_entry()
298 struct root_entry *re; in add_tree_block() local
303 return -ENOMEM; in add_tree_block()
306 ref->root_objectid = 0; in add_tree_block()
308 ref->root_objectid = ref_root; in add_tree_block()
309 ref->parent = parent; in add_tree_block()
310 ref->owner = level; in add_tree_block()
311 ref->offset = 0; in add_tree_block()
312 ref->num_refs = 1; in add_tree_block()
314 be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root); in add_tree_block()
319 be->num_refs++; in add_tree_block()
320 be->from_disk = 1; in add_tree_block()
321 be->metadata = 1; in add_tree_block()
325 re = lookup_root_entry(&be->roots, ref_root); in add_tree_block()
326 ASSERT(re); in add_tree_block()
327 re->num_refs++; in add_tree_block()
329 exist = insert_ref_entry(&be->refs, ref); in add_tree_block()
331 exist->num_refs++; in add_tree_block()
334 spin_unlock(&fs_info->ref_verify_lock); in add_tree_block()
348 return -ENOMEM; in add_shared_data_ref()
354 be->num_refs += num_refs; in add_shared_data_ref()
356 ref->parent = parent; in add_shared_data_ref()
357 ref->num_refs = num_refs; in add_shared_data_ref()
358 if (insert_ref_entry(&be->refs, ref)) { in add_shared_data_ref()
359 spin_unlock(&fs_info->ref_verify_lock); in add_shared_data_ref()
362 return -EINVAL; in add_shared_data_ref()
364 spin_unlock(&fs_info->ref_verify_lock); in add_shared_data_ref()
375 struct root_entry *re; in add_extent_data_ref() local
383 return -ENOMEM; in add_extent_data_ref()
389 be->num_refs += num_refs; in add_extent_data_ref()
391 ref->parent = 0; in add_extent_data_ref()
392 ref->owner = owner; in add_extent_data_ref()
393 ref->root_objectid = ref_root; in add_extent_data_ref()
394 ref->offset = offset; in add_extent_data_ref()
395 ref->num_refs = num_refs; in add_extent_data_ref()
396 if (insert_ref_entry(&be->refs, ref)) { in add_extent_data_ref()
397 spin_unlock(&fs_info->ref_verify_lock); in add_extent_data_ref()
400 return -EINVAL; in add_extent_data_ref()
403 re = lookup_root_entry(&be->roots, ref_root); in add_extent_data_ref()
404 if (!re) { in add_extent_data_ref()
405 spin_unlock(&fs_info->ref_verify_lock); in add_extent_data_ref()
407 return -EINVAL; in add_extent_data_ref()
409 re->num_refs += num_refs; in add_extent_data_ref()
410 spin_unlock(&fs_info->ref_verify_lock); in add_extent_data_ref()
422 struct extent_buffer *leaf = path->nodes[0]; in process_extent_item()
432 if ((key->type == BTRFS_EXTENT_ITEM_KEY) && in process_extent_item()
440 if (key->type == BTRFS_METADATA_ITEM_KEY) in process_extent_item()
441 *tree_block_level = key->offset; in process_extent_item()
453 ret = add_tree_block(fs_info, offset, 0, key->objectid, in process_extent_item()
457 ret = add_tree_block(fs_info, 0, offset, key->objectid, in process_extent_item()
461 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in process_extent_item()
463 key->objectid, key->offset); in process_extent_item()
469 key->objectid, key->offset); in process_extent_item()
475 ret = -EINVAL; in process_extent_item()
480 ret = -EINVAL; in process_extent_item()
494 struct btrfs_fs_info *fs_info = root->fs_info; in process_leaf()
495 struct extent_buffer *leaf = path->nodes[0]; in process_leaf()
554 eb = btrfs_read_node_slot(path->nodes[level], in walk_down_tree()
555 path->slots[level]); in walk_down_tree()
559 path->nodes[level-1] = eb; in walk_down_tree()
560 path->slots[level-1] = 0; in walk_down_tree()
561 path->locks[level-1] = BTRFS_READ_LOCK; in walk_down_tree()
568 level--; in walk_down_tree()
579 if (!path->nodes[l]) in walk_up_tree()
582 path->slots[l]++; in walk_up_tree()
583 if (path->slots[l] < in walk_up_tree()
584 btrfs_header_nritems(path->nodes[l])) { in walk_up_tree()
589 btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]); in walk_up_tree()
590 free_extent_buffer(path->nodes[l]); in walk_up_tree()
591 path->nodes[l] = NULL; in walk_up_tree()
592 path->slots[l] = 0; in walk_up_tree()
593 path->locks[l] = 0; in walk_up_tree()
604 ra->action, ra->root, ra->ref.root_objectid, ra->ref.parent, in dump_ref_action()
605 ra->ref.owner, ra->ref.offset, ra->ref.num_refs); in dump_ref_action()
617 struct root_entry *re; in dump_block_entry() local
623 be->bytenr, be->len, be->num_refs, be->metadata, in dump_block_entry()
624 be->from_disk); in dump_block_entry()
626 for (n = rb_first(&be->refs); n; n = rb_next(n)) { in dump_block_entry()
630 ref->root_objectid, ref->parent, ref->owner, in dump_block_entry()
631 ref->offset, ref->num_refs); in dump_block_entry()
634 for (n = rb_first(&be->roots); n; n = rb_next(n)) { in dump_block_entry()
635 re = rb_entry(n, struct root_entry, node); in dump_block_entry()
637 re->root_objectid, re->num_refs); in dump_block_entry()
640 list_for_each_entry(ra, &be->actions, list) in dump_block_entry()
658 struct root_entry *re = NULL; in btrfs_ref_tree_mod() local
659 int action = generic_ref->action; in btrfs_ref_tree_mod()
662 u64 bytenr = generic_ref->bytenr; in btrfs_ref_tree_mod()
663 u64 num_bytes = generic_ref->num_bytes; in btrfs_ref_tree_mod()
664 u64 parent = generic_ref->parent; in btrfs_ref_tree_mod()
672 if (generic_ref->type == BTRFS_REF_METADATA) { in btrfs_ref_tree_mod()
674 ref_root = generic_ref->ref_root; in btrfs_ref_tree_mod()
675 owner = generic_ref->tree_ref.level; in btrfs_ref_tree_mod()
677 ref_root = generic_ref->ref_root; in btrfs_ref_tree_mod()
678 owner = generic_ref->data_ref.objectid; in btrfs_ref_tree_mod()
679 offset = generic_ref->data_ref.offset; in btrfs_ref_tree_mod()
688 ret = -ENOMEM; in btrfs_ref_tree_mod()
692 ref->parent = parent; in btrfs_ref_tree_mod()
693 ref->owner = owner; in btrfs_ref_tree_mod()
694 ref->root_objectid = ref_root; in btrfs_ref_tree_mod()
695 ref->offset = offset; in btrfs_ref_tree_mod()
696 ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1; in btrfs_ref_tree_mod()
698 memcpy(&ra->ref, ref, sizeof(struct ref_entry)); in btrfs_ref_tree_mod()
703 * on-disk refs we pre-loaded. in btrfs_ref_tree_mod()
705 ra->ref.owner = owner; in btrfs_ref_tree_mod()
706 ra->ref.offset = offset; in btrfs_ref_tree_mod()
707 ra->ref.root_objectid = ref_root; in btrfs_ref_tree_mod()
710 INIT_LIST_HEAD(&ra->list); in btrfs_ref_tree_mod()
711 ra->action = action; in btrfs_ref_tree_mod()
712 ra->root = generic_ref->real_root; in btrfs_ref_tree_mod()
718 ret = -EINVAL; in btrfs_ref_tree_mod()
732 be->num_refs++; in btrfs_ref_tree_mod()
734 be->metadata = 1; in btrfs_ref_tree_mod()
736 if (be->num_refs != 1) { in btrfs_ref_tree_mod()
738 "re-allocated a block that still has references to it!"); in btrfs_ref_tree_mod()
746 while (!list_empty(&be->actions)) { in btrfs_ref_tree_mod()
749 tmp = list_first_entry(&be->actions, struct ref_action, in btrfs_ref_tree_mod()
751 list_del(&tmp->list); in btrfs_ref_tree_mod()
758 re = kmalloc(sizeof(struct root_entry), GFP_NOFS); in btrfs_ref_tree_mod()
759 if (!re) { in btrfs_ref_tree_mod()
762 ret = -ENOMEM; in btrfs_ref_tree_mod()
768 * re->num_refs. in btrfs_ref_tree_mod()
770 ref_root = generic_ref->real_root; in btrfs_ref_tree_mod()
771 re->root_objectid = generic_ref->real_root; in btrfs_ref_tree_mod()
772 re->num_refs = 0; in btrfs_ref_tree_mod()
775 spin_lock(&fs_info->ref_verify_lock); in btrfs_ref_tree_mod()
776 be = lookup_block_entry(&fs_info->block_tree, bytenr); in btrfs_ref_tree_mod()
784 kfree(re); in btrfs_ref_tree_mod()
786 } else if (be->num_refs == 0) { in btrfs_ref_tree_mod()
794 kfree(re); in btrfs_ref_tree_mod()
799 tmp = insert_root_entry(&be->roots, re); in btrfs_ref_tree_mod()
801 kfree(re); in btrfs_ref_tree_mod()
802 re = tmp; in btrfs_ref_tree_mod()
807 exist = insert_ref_entry(&be->refs, ref); in btrfs_ref_tree_mod()
810 if (exist->num_refs == 0) { in btrfs_ref_tree_mod()
819 exist->num_refs--; in btrfs_ref_tree_mod()
820 if (exist->num_refs == 0) { in btrfs_ref_tree_mod()
821 rb_erase(&exist->node, &be->refs); in btrfs_ref_tree_mod()
824 } else if (!be->metadata) { in btrfs_ref_tree_mod()
825 exist->num_refs++; in btrfs_ref_tree_mod()
842 rb_erase(&ref->node, &be->refs); in btrfs_ref_tree_mod()
849 if (!parent && !re) { in btrfs_ref_tree_mod()
850 re = lookup_root_entry(&be->roots, ref_root); in btrfs_ref_tree_mod()
851 if (!re) { in btrfs_ref_tree_mod()
853 * This shouldn't happen because we will add our re in btrfs_ref_tree_mod()
859 generic_ref->real_root, be->bytenr); in btrfs_ref_tree_mod()
867 if (re) in btrfs_ref_tree_mod()
868 re->num_refs--; in btrfs_ref_tree_mod()
869 be->num_refs--; in btrfs_ref_tree_mod()
871 be->num_refs++; in btrfs_ref_tree_mod()
872 if (re) in btrfs_ref_tree_mod()
873 re->num_refs++; in btrfs_ref_tree_mod()
875 list_add_tail(&ra->list, &be->actions); in btrfs_ref_tree_mod()
878 spin_unlock(&fs_info->ref_verify_lock); in btrfs_ref_tree_mod()
882 btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); in btrfs_ref_tree_mod()
896 spin_lock(&fs_info->ref_verify_lock); in btrfs_free_ref_cache()
897 while ((n = rb_first(&fs_info->block_tree))) { in btrfs_free_ref_cache()
899 rb_erase(&be->node, &fs_info->block_tree); in btrfs_free_ref_cache()
901 cond_resched_lock(&fs_info->ref_verify_lock); in btrfs_free_ref_cache()
903 spin_unlock(&fs_info->ref_verify_lock); in btrfs_free_ref_cache()
915 spin_lock(&fs_info->ref_verify_lock); in btrfs_free_ref_tree_range()
916 n = fs_info->block_tree.rb_node; in btrfs_free_ref_tree_range()
919 if (entry->bytenr < start) { in btrfs_free_ref_tree_range()
920 n = n->rb_right; in btrfs_free_ref_tree_range()
921 } else if (entry->bytenr > start) { in btrfs_free_ref_tree_range()
922 n = n->rb_left; in btrfs_free_ref_tree_range()
929 (entry->bytenr < start && be->bytenr > start) || in btrfs_free_ref_tree_range()
930 (entry->bytenr < start && entry->bytenr > be->bytenr)) in btrfs_free_ref_tree_range()
939 spin_unlock(&fs_info->ref_verify_lock); in btrfs_free_ref_tree_range()
943 n = &be->node; in btrfs_free_ref_tree_range()
947 if (be->bytenr < start && be->bytenr + be->len > start) { in btrfs_free_ref_tree_range()
954 if (be->bytenr < start) in btrfs_free_ref_tree_range()
956 if (be->bytenr >= start + len) in btrfs_free_ref_tree_range()
958 if (be->bytenr + be->len > start + len) { in btrfs_free_ref_tree_range()
964 rb_erase(&be->node, &fs_info->block_tree); in btrfs_free_ref_tree_range()
967 spin_unlock(&fs_info->ref_verify_lock); in btrfs_free_ref_tree_range()
985 return -ENOMEM; in btrfs_build_ref_tree()
990 path->nodes[level] = eb; in btrfs_build_ref_tree()
991 path->slots[level] = 0; in btrfs_build_ref_tree()
992 path->locks[level] = BTRFS_READ_LOCK; in btrfs_build_ref_tree()
1015 btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); in btrfs_build_ref_tree()