Lines Matching +full:no +full:- +full:insert +full:- +full:detect

1 // SPDX-License-Identifier: GPL-2.0
8 #include "extent-io-tree.h"
15 return !RB_EMPTY_NODE(&state->rb_node); in extent_state_in_tree()
27 list_add(&state->leak_list, &states); in btrfs_leak_debug_add_state()
36 list_del(&state->leak_list); in btrfs_leak_debug_del_state()
48 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check()
50 refcount_read(&state->refs)); in btrfs_extent_state_leak_debug_check()
51 list_del(&state->leak_list); in btrfs_extent_state_leak_debug_check()
63 const struct btrfs_inode *inode = tree->inode; in __btrfs_debug_check_extent_io_range()
66 if (tree->owner != IO_TREE_INODE_IO) in __btrfs_debug_check_extent_io_range()
69 isize = i_size_read(&inode->vfs_inode); in __btrfs_debug_check_extent_io_range()
70 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { in __btrfs_debug_check_extent_io_range()
71 btrfs_debug_rl(inode->root->fs_info, in __btrfs_debug_check_extent_io_range()
83 /* Read-only access to the inode. */
86 if (tree->owner == IO_TREE_INODE_IO) in btrfs_extent_io_tree_to_inode()
87 return tree->inode; in btrfs_extent_io_tree_to_inode()
91 /* For read-only access to fs_info. */
94 if (tree->owner == IO_TREE_INODE_IO) in btrfs_extent_io_tree_to_fs_info()
95 return tree->inode->root->fs_info; in btrfs_extent_io_tree_to_fs_info()
96 return tree->fs_info; in btrfs_extent_io_tree_to_fs_info()
102 tree->state = RB_ROOT; in btrfs_extent_io_tree_init()
103 spin_lock_init(&tree->lock); in btrfs_extent_io_tree_init()
104 tree->fs_info = fs_info; in btrfs_extent_io_tree_init()
105 tree->owner = owner; in btrfs_extent_io_tree_init()
110 * tree. This should be called once we are sure no other task can access the
111 * tree anymore, so no tree updates happen after we empty the tree and there
121 spin_lock(&tree->lock); in btrfs_extent_io_tree_release()
122 root = tree->state; in btrfs_extent_io_tree_release()
123 tree->state = RB_ROOT; in btrfs_extent_io_tree_release()
126 RB_CLEAR_NODE(&state->rb_node); in btrfs_extent_io_tree_release()
127 ASSERT(!(state->state & EXTENT_LOCK_BITS)); in btrfs_extent_io_tree_release()
129 * No need for a memory barrier here, as we are holding the tree in btrfs_extent_io_tree_release()
133 ASSERT(!waitqueue_active(&state->wq)); in btrfs_extent_io_tree_release()
135 cond_resched_lock(&tree->lock); in btrfs_extent_io_tree_release()
138 * Should still be empty even after a reschedule, no other task should in btrfs_extent_io_tree_release()
141 ASSERT(RB_EMPTY_ROOT(&tree->state)); in btrfs_extent_io_tree_release()
142 spin_unlock(&tree->lock); in btrfs_extent_io_tree_release()
157 state->state = 0; in alloc_extent_state()
158 RB_CLEAR_NODE(&state->rb_node); in alloc_extent_state()
160 refcount_set(&state->refs, 1); in alloc_extent_state()
161 init_waitqueue_head(&state->wq); in alloc_extent_state()
178 if (refcount_dec_and_test(&state->refs)) { in btrfs_free_extent_state()
194 if (set && (state->state & bits) == bits) in add_extent_changeset()
196 if (!set && (state->state & bits) == 0) in add_extent_changeset()
198 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
199 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
206 struct rb_node *next = rb_next(&state->rb_node); in next_state()
213 struct rb_node *next = rb_prev(&state->rb_node); in prev_state()
231 * If no such entry exists, return the first entry that starts and ends after
242 struct rb_root *root = &tree->state; in tree_search_for_insert()
243 struct rb_node **node = &root->rb_node; in tree_search_for_insert()
251 if (offset < entry->start) in tree_search_for_insert()
252 node = &(*node)->rb_left; in tree_search_for_insert()
253 else if (offset > entry->end) in tree_search_for_insert()
254 node = &(*node)->rb_right; in tree_search_for_insert()
269 while (entry && offset > entry->end) in tree_search_for_insert()
283 * Return a pointer to the entry that contains @offset byte address. If no
292 struct rb_root *root = &tree->state; in tree_search_prev_next()
293 struct rb_node **node = &root->rb_node; in tree_search_prev_next()
303 if (offset < entry->start) in tree_search_prev_next()
304 node = &(*node)->rb_left; in tree_search_prev_next()
305 else if (offset > entry->end) in tree_search_prev_next()
306 node = &(*node)->rb_right; in tree_search_prev_next()
312 while (entry && offset > entry->end) in tree_search_prev_next()
317 while (entry && offset < entry->start) in tree_search_prev_next()
325 * Inexact rb-tree search, return the next entry if @offset is not found
339 opname, state->start, state->end); in extent_io_tree_panic()
347 if (prev && prev->end == state->start - 1 && prev->state == state->state) { in merge_prev_state()
348 if (tree->owner == IO_TREE_INODE_IO) in merge_prev_state()
349 btrfs_merge_delalloc_extent(tree->inode, state, prev); in merge_prev_state()
350 state->start = prev->start; in merge_prev_state()
351 rb_erase(&prev->rb_node, &tree->state); in merge_prev_state()
352 RB_CLEAR_NODE(&prev->rb_node); in merge_prev_state()
362 if (next && next->start == state->end + 1 && next->state == state->state) { in merge_next_state()
363 if (tree->owner == IO_TREE_INODE_IO) in merge_next_state()
364 btrfs_merge_delalloc_extent(tree->inode, state, next); in merge_next_state()
365 state->end = next->end; in merge_next_state()
366 rb_erase(&next->rb_node, &tree->state); in merge_next_state()
367 RB_CLEAR_NODE(&next->rb_node); in merge_next_state()
383 if (state->state & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY)) in merge_state()
397 if (tree->owner == IO_TREE_INODE_IO) in set_state_bits()
398 btrfs_set_delalloc_extent(tree->inode, state, bits); in set_state_bits()
402 state->state |= bits_to_set; in set_state_bits()
406 * Insert an extent_state struct into the tree. 'bits' are set on the
427 const u64 start = state->start - 1; in insert_state()
428 const u64 end = state->end + 1; in insert_state()
433 node = &tree->state.rb_node; in insert_state()
440 if (state->end < entry->start) { in insert_state()
441 if (try_merge && end == entry->start && in insert_state()
442 state->state == entry->state) { in insert_state()
443 if (tree->owner == IO_TREE_INODE_IO) in insert_state()
444 btrfs_merge_delalloc_extent(tree->inode, in insert_state()
446 entry->start = state->start; in insert_state()
448 state->state = 0; in insert_state()
451 node = &(*node)->rb_left; in insert_state()
452 } else if (state->end > entry->end) { in insert_state()
453 if (try_merge && entry->end == start && in insert_state()
454 state->state == entry->state) { in insert_state()
455 if (tree->owner == IO_TREE_INODE_IO) in insert_state()
456 btrfs_merge_delalloc_extent(tree->inode, in insert_state()
458 entry->end = state->end; in insert_state()
460 state->state = 0; in insert_state()
463 node = &(*node)->rb_right; in insert_state()
465 return ERR_PTR(-EEXIST); in insert_state()
469 rb_link_node(&state->rb_node, parent, node); in insert_state()
470 rb_insert_color(&state->rb_node, &tree->state); in insert_state()
476 * Insert state to @tree to the location given by @node and @parent.
484 rb_link_node(&state->rb_node, parent, node); in insert_state_fast()
485 rb_insert_color(&state->rb_node, &tree->state); in insert_state_fast()
495 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
497 * prealloc: [orig->start, split - 1]
498 * orig: [ split, orig->end ]
509 if (tree->owner == IO_TREE_INODE_IO) in split_state()
510 btrfs_split_delalloc_extent(tree->inode, orig, split); in split_state()
512 prealloc->start = orig->start; in split_state()
513 prealloc->end = split - 1; in split_state()
514 prealloc->state = orig->state; in split_state()
515 orig->start = split; in split_state()
517 parent = &orig->rb_node; in split_state()
525 if (prealloc->end < entry->start) { in split_state()
526 node = &(*node)->rb_left; in split_state()
527 } else if (prealloc->end > entry->end) { in split_state()
528 node = &(*node)->rb_right; in split_state()
531 return -EEXIST; in split_state()
535 rb_link_node(&prealloc->rb_node, parent, node); in split_state()
536 rb_insert_color(&prealloc->rb_node, &tree->state); in split_state()
547 if (state->end < end) in next_search_state()
557 * If no bits are set on the state struct after clearing things, the
569 if (tree->owner == IO_TREE_INODE_IO) in clear_state_bit()
570 btrfs_clear_delalloc_extent(tree->inode, state, bits); in clear_state_bit()
574 state->state &= ~bits_to_clear; in clear_state_bit()
576 wake_up(&state->wq); in clear_state_bit()
577 if (state->state == 0) { in clear_state_bit()
580 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
581 RB_CLEAR_NODE(&state->rb_node); in clear_state_bit()
594 * Detect if extent bits request NOWAIT semantics and set the gfp mask accordingly,
600 *bits &= EXTENT_NOWAIT - 1; in set_gfp_mask_from_bits()
628 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in btrfs_clear_extent_bit_changeset()
642 * up not needing the pre-allocated extent state at all, which in btrfs_clear_extent_bit_changeset()
650 spin_lock(&tree->lock); in btrfs_clear_extent_bit_changeset()
660 cached->start <= start && cached->end > start) { in btrfs_clear_extent_bit_changeset()
662 refcount_dec(&cached->refs); in btrfs_clear_extent_bit_changeset()
675 if (state->start > end) in btrfs_clear_extent_bit_changeset()
677 WARN_ON(state->end < start); in btrfs_clear_extent_bit_changeset()
678 last_end = state->end; in btrfs_clear_extent_bit_changeset()
681 if (!(state->state & bits)) { in btrfs_clear_extent_bit_changeset()
687 * | ---- desired range ---- | in btrfs_clear_extent_bit_changeset()
689 * | ------------- state -------------- | in btrfs_clear_extent_bit_changeset()
701 if (state->start < start) { in btrfs_clear_extent_bit_changeset()
711 if (state->end <= end) { in btrfs_clear_extent_bit_changeset()
721 * in non-atomic mode and start the search again. in btrfs_clear_extent_bit_changeset()
725 * | ---- desired range ---- | in btrfs_clear_extent_bit_changeset()
729 if (state->start <= end && state->end > end) { in btrfs_clear_extent_bit_changeset()
741 wake_up(&state->wq); in btrfs_clear_extent_bit_changeset()
758 spin_unlock(&tree->lock); in btrfs_clear_extent_bit_changeset()
764 spin_unlock(&tree->lock); in btrfs_clear_extent_bit_changeset()
783 spin_lock(&tree->lock); in wait_extent_bit()
792 state->start <= start && start < state->end) in wait_extent_bit()
804 if (state->start > end) in wait_extent_bit()
807 if (state->state & bits) { in wait_extent_bit()
810 start = state->start; in wait_extent_bit()
811 refcount_inc(&state->refs); in wait_extent_bit()
812 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); in wait_extent_bit()
813 spin_unlock(&tree->lock); in wait_extent_bit()
815 spin_lock(&tree->lock); in wait_extent_bit()
816 finish_wait(&state->wq, &wait); in wait_extent_bit()
820 start = state->end + 1; in wait_extent_bit()
825 if (!cond_resched_lock(&tree->lock)) { in wait_extent_bit()
831 /* This state is no longer useful, clear it and free it up. */ in wait_extent_bit()
837 spin_unlock(&tree->lock); in wait_extent_bit()
845 if (!flags || (state->state & flags)) { in cache_state_if_flags()
847 refcount_inc(&state->refs); in cache_state_if_flags()
860 * tree->lock must be held. NULL will returned if nothing was found after
874 if (state->state & bits) in find_first_extent_bit_state()
896 spin_lock(&tree->lock); in btrfs_find_first_extent_bit()
899 if (state->end == start - 1 && extent_state_in_tree(state)) { in btrfs_find_first_extent_bit()
901 if (state->state & bits) in btrfs_find_first_extent_bit()
925 *start_ret = state->start; in btrfs_find_first_extent_bit()
926 *end_ret = state->end; in btrfs_find_first_extent_bit()
930 spin_unlock(&tree->lock); in btrfs_find_first_extent_bit()
945 * will drop the tree->lock, so use this helper if you want to find the actual
947 * then walk down the tree until we find a non-contiguous area. The area
951 * @start_ret and @end_ret are updated, or false if no range was found.
961 spin_lock(&tree->lock); in btrfs_find_contiguous_extent_bit()
964 *start_ret = state->start; in btrfs_find_contiguous_extent_bit()
965 *end_ret = state->end; in btrfs_find_contiguous_extent_bit()
967 if (state->start > (*end_ret + 1)) in btrfs_find_contiguous_extent_bit()
969 *end_ret = state->end; in btrfs_find_contiguous_extent_bit()
973 spin_unlock(&tree->lock); in btrfs_find_contiguous_extent_bit()
992 spin_lock(&tree->lock); in btrfs_find_delalloc_range()
1000 *end = (u64)-1; in btrfs_find_delalloc_range()
1005 if (found && (state->start != cur_start || in btrfs_find_delalloc_range()
1006 (state->state & EXTENT_BOUNDARY))) { in btrfs_find_delalloc_range()
1009 if (!(state->state & EXTENT_DELALLOC)) { in btrfs_find_delalloc_range()
1011 *end = state->end; in btrfs_find_delalloc_range()
1015 *start = state->start; in btrfs_find_delalloc_range()
1017 refcount_inc(&state->refs); in btrfs_find_delalloc_range()
1020 *end = state->end; in btrfs_find_delalloc_range()
1021 cur_start = state->end + 1; in btrfs_find_delalloc_range()
1022 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1028 spin_unlock(&tree->lock); in btrfs_find_delalloc_range()
1037 * If any of the exclusive bits are set, this will fail with -EEXIST if some
1064 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in set_extent_bit()
1074 * up not needing the pre-allocated extent state at all, which in set_extent_bit()
1085 spin_lock(&tree->lock); in set_extent_bit()
1088 if (state->start <= start && state->end > start && in set_extent_bit()
1101 prealloc->start = start; in set_extent_bit()
1102 prealloc->end = end; in set_extent_bit()
1109 last_start = state->start; in set_extent_bit()
1110 last_end = state->end; in set_extent_bit()
1113 * | ---- desired range ---- | in set_extent_bit()
1118 if (state->start == start && state->end <= end) { in set_extent_bit()
1119 if (state->state & exclusive_bits) { in set_extent_bit()
1120 *failed_start = state->start; in set_extent_bit()
1122 ret = -EEXIST; in set_extent_bit()
1133 if (state && state->start == start && !need_resched()) in set_extent_bit()
1139 * | ---- desired range ---- | in set_extent_bit()
1142 * | ------------- state -------------- | in set_extent_bit()
1153 if (state->start < start) { in set_extent_bit()
1154 if (state->state & exclusive_bits) { in set_extent_bit()
1157 ret = -EEXIST; in set_extent_bit()
1165 if ((state->state & bits) == bits) { in set_extent_bit()
1166 start = state->end + 1; in set_extent_bit()
1181 if (state->end <= end) { in set_extent_bit()
1189 if (state && state->start == start && !need_resched()) in set_extent_bit()
1195 * | ---- desired range ---- | in set_extent_bit()
1198 * There's a hole, we need to insert something in it and ignore the in set_extent_bit()
1201 if (state->start > start) { in set_extent_bit()
1212 prealloc->start = start; in set_extent_bit()
1214 prealloc->end = end; in set_extent_bit()
1216 prealloc->end = last_start - 1; in set_extent_bit()
1221 extent_io_tree_panic(tree, prealloc, "insert", ret); in set_extent_bit()
1228 start = inserted_state->end + 1; in set_extent_bit()
1241 * we will end up here and try to allocate a prealloc state and insert. in set_extent_bit()
1248 * | ---- desired range ---- | in set_extent_bit()
1253 if (state->start <= end && state->end > end) { in set_extent_bit()
1254 if (state->state & exclusive_bits) { in set_extent_bit()
1257 ret = -EEXIST; in set_extent_bit()
1281 spin_unlock(&tree->lock); in set_extent_bit()
1287 spin_unlock(&tree->lock); in set_extent_bit()
1332 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in btrfs_convert_extent_bit()
1340 * that matches exactly the target range, in which case no in btrfs_convert_extent_bit()
1346 return -ENOMEM; in btrfs_convert_extent_bit()
1349 spin_lock(&tree->lock); in btrfs_convert_extent_bit()
1352 if (state->start <= start && state->end > start && in btrfs_convert_extent_bit()
1365 ret = -ENOMEM; in btrfs_convert_extent_bit()
1368 prealloc->start = start; in btrfs_convert_extent_bit()
1369 prealloc->end = end; in btrfs_convert_extent_bit()
1376 last_start = state->start; in btrfs_convert_extent_bit()
1377 last_end = state->end; in btrfs_convert_extent_bit()
1380 * | ---- desired range ---- | in btrfs_convert_extent_bit()
1385 if (state->start == start && state->end <= end) { in btrfs_convert_extent_bit()
1392 if (state && state->start == start && !need_resched()) in btrfs_convert_extent_bit()
1398 * | ---- desired range ---- | in btrfs_convert_extent_bit()
1401 * | ------------- state -------------- | in btrfs_convert_extent_bit()
1412 if (state->start < start) { in btrfs_convert_extent_bit()
1415 ret = -ENOMEM; in btrfs_convert_extent_bit()
1424 if (state->end <= end) { in btrfs_convert_extent_bit()
1431 if (state && state->start == start && !need_resched()) in btrfs_convert_extent_bit()
1437 * | ---- desired range ---- | in btrfs_convert_extent_bit()
1440 * There's a hole, we need to insert something in it and ignore the in btrfs_convert_extent_bit()
1443 if (state->start > start) { in btrfs_convert_extent_bit()
1448 ret = -ENOMEM; in btrfs_convert_extent_bit()
1456 prealloc->start = start; in btrfs_convert_extent_bit()
1458 prealloc->end = end; in btrfs_convert_extent_bit()
1460 prealloc->end = last_start - 1; in btrfs_convert_extent_bit()
1465 extent_io_tree_panic(tree, prealloc, "insert", ret); in btrfs_convert_extent_bit()
1471 start = inserted_state->end + 1; in btrfs_convert_extent_bit()
1484 * we will end up here and try to allocate a prealloc state and insert. in btrfs_convert_extent_bit()
1491 * | ---- desired range ---- | in btrfs_convert_extent_bit()
1496 if (state->start <= end && state->end > end) { in btrfs_convert_extent_bit()
1499 ret = -ENOMEM; in btrfs_convert_extent_bit()
1520 spin_unlock(&tree->lock); in btrfs_convert_extent_bit()
1526 spin_unlock(&tree->lock); in btrfs_convert_extent_bit()
1543 * set it's possible that @end_ret contains -1, this happens in case the range
1553 spin_lock(&tree->lock); in btrfs_find_first_clear_extent_bit()
1564 *end_ret = -1; in btrfs_find_first_clear_extent_bit()
1571 *start_ret = prev->end + 1; in btrfs_find_first_clear_extent_bit()
1572 *end_ret = -1; in btrfs_find_first_clear_extent_bit()
1582 if (in_range(start, state->start, state->end - state->start + 1)) { in btrfs_find_first_clear_extent_bit()
1583 if (state->state & bits) { in btrfs_find_first_clear_extent_bit()
1585 * |--range with bits sets--| in btrfs_find_first_clear_extent_bit()
1589 start = state->end + 1; in btrfs_find_first_clear_extent_bit()
1596 * |--range with bits cleared----| in btrfs_find_first_clear_extent_bit()
1600 *start_ret = state->start; in btrfs_find_first_clear_extent_bit()
1605 * |---prev range---|---hole/unset---|---node range---| in btrfs_find_first_clear_extent_bit()
1611 * |---hole/unset--||--first node--| in btrfs_find_first_clear_extent_bit()
1616 *start_ret = prev->end + 1; in btrfs_find_first_clear_extent_bit()
1628 if (state->end >= start && !(state->state & bits)) { in btrfs_find_first_clear_extent_bit()
1629 *end_ret = state->end; in btrfs_find_first_clear_extent_bit()
1631 *end_ret = state->start - 1; in btrfs_find_first_clear_extent_bit()
1637 spin_unlock(&tree->lock); in btrfs_find_first_clear_extent_bit()
1680 spin_lock(&tree->lock); in btrfs_count_range_bits()
1690 if (cached->start <= cur_start && cur_start <= cached->end) { in btrfs_count_range_bits()
1692 } else if (cached->start > cur_start) { in btrfs_count_range_bits()
1698 * are looking for, and if so, use it - this is a common case in btrfs_count_range_bits()
1700 * no previous state record, we can start from our cached state. in btrfs_count_range_bits()
1705 else if (prev->start <= cur_start && cur_start <= prev->end) in btrfs_count_range_bits()
1718 if (state->start > search_end) in btrfs_count_range_bits()
1720 if (contig && found && state->start > last + 1) in btrfs_count_range_bits()
1722 if (state->end >= cur_start && (state->state & bits) == bits) { in btrfs_count_range_bits()
1723 total_bytes += min(search_end, state->end) + 1 - in btrfs_count_range_bits()
1724 max(cur_start, state->start); in btrfs_count_range_bits()
1728 *start = max(cur_start, state->start); in btrfs_count_range_bits()
1731 last = state->end; in btrfs_count_range_bits()
1742 refcount_inc(&state->refs); in btrfs_count_range_bits()
1745 spin_unlock(&tree->lock); in btrfs_count_range_bits()
1760 spin_lock(&tree->lock); in btrfs_test_range_bit_exists()
1763 if (state->start > end) in btrfs_test_range_bit_exists()
1766 if (state->state & bit) { in btrfs_test_range_bit_exists()
1771 if (state->end >= end) in btrfs_test_range_bit_exists()
1775 spin_unlock(&tree->lock); in btrfs_test_range_bit_exists()
1793 spin_lock(&tree->lock); in btrfs_get_range_bits()
1795 if (state && state->start < end) { in btrfs_get_range_bits()
1797 refcount_inc(&state->refs); in btrfs_get_range_bits()
1800 if (state->start > end) in btrfs_get_range_bits()
1803 *bits |= state->state; in btrfs_get_range_bits()
1805 if (state->end >= end) in btrfs_get_range_bits()
1810 spin_unlock(&tree->lock); in btrfs_get_range_bits()
1825 spin_lock(&tree->lock); in btrfs_test_range_bit()
1826 if (cached && extent_state_in_tree(cached) && cached->start <= start && in btrfs_test_range_bit()
1827 cached->end > start) in btrfs_test_range_bit()
1832 if (state->start > start) { in btrfs_test_range_bit()
1837 if ((state->state & bit) == 0) { in btrfs_test_range_bit()
1842 if (state->end >= end) in btrfs_test_range_bit()
1846 start = state->end + 1; in btrfs_test_range_bit()
1853 spin_unlock(&tree->lock); in btrfs_test_range_bit()
1864 * fail with -EEXIST or changeset will record the whole range. in btrfs_set_record_extent_bits()
1890 if (ret == -EEXIST) { in btrfs_try_lock_extent_bits()
1892 btrfs_clear_extent_bit(tree, start, failed_start - 1, in btrfs_try_lock_extent_bits()
1900 * Either insert or lock state struct between start and end use mask to tell
1912 while (ret == -EEXIST) { in btrfs_lock_extent_bits()
1914 btrfs_clear_extent_bit(tree, start, failed_start - 1, in btrfs_lock_extent_bits()
1926 * This is meant to be used in a context where we know no other tasks can
1934 spin_lock(&tree->lock); in btrfs_next_extent_state()
1938 refcount_inc(&next->refs); in btrfs_next_extent_state()
1939 spin_unlock(&tree->lock); in btrfs_next_extent_state()
1956 return -ENOMEM; in btrfs_extent_state_init_cachep()