Lines Matching +full:iref +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0
20 #include "extent-tree.h"
22 #include "disk-io.h"
23 #include "print-tree.h"
27 #include "free-space-cache.h"
28 #include "free-space-tree.h"
30 #include "ref-verify.h"
31 #include "space-info.h"
32 #include "block-rsv.h"
35 #include "dev-replace.h"
38 #include "root-tree.h"
39 #include "file-item.h"
41 #include "tree-checker.h"
42 #include "raid-stripe-tree.h"
61 static int find_next_key(struct btrfs_path *path, int level,
66 return (cache->flags & bits) == bits; in block_group_bits()
79 return -ENOMEM; in btrfs_lookup_data_extent()
118 offset = fs_info->nodesize; in btrfs_lookup_extent_info()
124 return -ENOMEM; in btrfs_lookup_extent_info()
140 if (path->slots[0]) { in btrfs_lookup_extent_info()
141 path->slots[0]--; in btrfs_lookup_extent_info()
142 btrfs_item_key_to_cpu(path->nodes[0], &key, in btrfs_lookup_extent_info()
143 path->slots[0]); in btrfs_lookup_extent_info()
146 key.offset == fs_info->nodesize) in btrfs_lookup_extent_info()
152 struct extent_buffer *leaf = path->nodes[0]; in btrfs_lookup_extent_info()
154 const u32 item_size = btrfs_item_size(leaf, path->slots[0]); in btrfs_lookup_extent_info()
157 ret = -EUCLEAN; in btrfs_lookup_extent_info()
165 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in btrfs_lookup_extent_info()
168 ret = -EUCLEAN; in btrfs_lookup_extent_info()
176 owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]); in btrfs_lookup_extent_info()
183 delayed_refs = &trans->transaction->delayed_refs; in btrfs_lookup_extent_info()
184 spin_lock(&delayed_refs->lock); in btrfs_lookup_extent_info()
187 if (!mutex_trylock(&head->mutex)) { in btrfs_lookup_extent_info()
188 refcount_inc(&head->refs); in btrfs_lookup_extent_info()
189 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
197 mutex_lock(&head->mutex); in btrfs_lookup_extent_info()
198 mutex_unlock(&head->mutex); in btrfs_lookup_extent_info()
202 spin_lock(&head->lock); in btrfs_lookup_extent_info()
203 if (head->extent_op && head->extent_op->update_flags) in btrfs_lookup_extent_info()
204 extent_flags |= head->extent_op->flags_to_set; in btrfs_lookup_extent_info()
206 num_refs += head->ref_mod; in btrfs_lookup_extent_info()
207 spin_unlock(&head->lock); in btrfs_lookup_extent_info()
208 mutex_unlock(&head->mutex); in btrfs_lookup_extent_info()
210 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
239 * for pointers in non-shared tree blocks. For a given pointer in a block,
242 * b-tree searching. The full back refs is for pointers in tree blocks not
269 * block, increase lower level extents' reference counts. The original
274 * the new block, increase lower level extents' reference count.
285 * - multiple snapshots, subvolumes, or different generations in one subvol
286 * - different files inside a single subvolume
287 * - different offsets inside a file (bookend extents in file.c)
291 * - Objectid of the subvolume root
292 * - objectid of the file holding the reference
293 * - original offset in the file
294 * - how many bookend extents
301 * - number of pointers in the tree leaf
318 * - Different subvolumes
326 * level of the tree block are required. These information are stored in
336 struct btrfs_extent_inline_ref *iref, in btrfs_get_extent_inline_ref_type() argument
339 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_get_extent_inline_ref_type()
340 int type = btrfs_extent_inline_ref_type(eb, iref); in btrfs_get_extent_inline_ref_type()
341 u64 offset = btrfs_extent_inline_ref_offset(eb, iref); in btrfs_get_extent_inline_ref_type()
361 if (offset && IS_ALIGNED(offset, fs_info->sectorsize)) in btrfs_get_extent_inline_ref_type()
374 IS_ALIGNED(offset, fs_info->sectorsize)) in btrfs_get_extent_inline_ref_type()
386 "eb %llu iref 0x%lx invalid extent inline ref type %d", in btrfs_get_extent_inline_ref_type()
387 eb->start, (unsigned long)iref, type); in btrfs_get_extent_inline_ref_type()
433 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); in lookup_extent_data_ref()
452 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in lookup_extent_data_ref()
458 return -ENOENT; in lookup_extent_data_ref()
462 ret = -ENOENT; in lookup_extent_data_ref()
463 leaf = path->nodes[0]; in lookup_extent_data_ref()
466 if (path->slots[0] >= nritems) { in lookup_extent_data_ref()
470 return -ENOENT; in lookup_extent_data_ref()
474 leaf = path->nodes[0]; in lookup_extent_data_ref()
479 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in lookup_extent_data_ref()
484 ref = btrfs_item_ptr(leaf, path->slots[0], in lookup_extent_data_ref()
496 path->slots[0]++; in lookup_extent_data_ref()
507 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); in insert_extent_data_ref()
517 if (node->parent) { in insert_extent_data_ref()
519 key.offset = node->parent; in insert_extent_data_ref()
523 key.offset = hash_extent_data_ref(node->ref_root, owner, offset); in insert_extent_data_ref()
528 if (ret && ret != -EEXIST) in insert_extent_data_ref()
531 leaf = path->nodes[0]; in insert_extent_data_ref()
532 if (node->parent) { in insert_extent_data_ref()
534 ref = btrfs_item_ptr(leaf, path->slots[0], in insert_extent_data_ref()
537 btrfs_set_shared_data_ref_count(leaf, ref, node->ref_mod); in insert_extent_data_ref()
540 num_refs += node->ref_mod; in insert_extent_data_ref()
545 while (ret == -EEXIST) { in insert_extent_data_ref()
546 ref = btrfs_item_ptr(leaf, path->slots[0], in insert_extent_data_ref()
548 if (match_extent_data_ref(leaf, ref, node->ref_root, in insert_extent_data_ref()
555 if (ret && ret != -EEXIST) in insert_extent_data_ref()
558 leaf = path->nodes[0]; in insert_extent_data_ref()
560 ref = btrfs_item_ptr(leaf, path->slots[0], in insert_extent_data_ref()
563 btrfs_set_extent_data_ref_root(leaf, ref, node->ref_root); in insert_extent_data_ref()
566 btrfs_set_extent_data_ref_count(leaf, ref, node->ref_mod); in insert_extent_data_ref()
569 num_refs += node->ref_mod; in insert_extent_data_ref()
592 leaf = path->nodes[0]; in remove_extent_data_ref()
593 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in remove_extent_data_ref()
596 ref1 = btrfs_item_ptr(leaf, path->slots[0], in remove_extent_data_ref()
600 ref2 = btrfs_item_ptr(leaf, path->slots[0], in remove_extent_data_ref()
604 btrfs_err(trans->fs_info, in remove_extent_data_ref()
607 btrfs_abort_transaction(trans, -EUCLEAN); in remove_extent_data_ref()
608 return -EUCLEAN; in remove_extent_data_ref()
612 num_refs -= refs_to_drop; in remove_extent_data_ref()
627 struct btrfs_extent_inline_ref *iref) in extent_data_ref_count() argument
636 leaf = path->nodes[0]; in extent_data_ref_count()
637 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in extent_data_ref_count()
639 if (iref) { in extent_data_ref_count()
644 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); in extent_data_ref_count()
647 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); in extent_data_ref_count()
650 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); in extent_data_ref_count()
654 ref1 = btrfs_item_ptr(leaf, path->slots[0], in extent_data_ref_count()
658 ref2 = btrfs_item_ptr(leaf, path->slots[0], in extent_data_ref_count()
672 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); in lookup_tree_block_ref()
685 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in lookup_tree_block_ref()
687 ret = -ENOENT; in lookup_tree_block_ref()
696 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); in insert_tree_block_ref()
701 if (node->parent) { in insert_tree_block_ref()
703 key.offset = node->parent; in insert_tree_block_ref()
706 key.offset = node->ref_root; in insert_tree_block_ref()
731 static int find_next_key(struct btrfs_path *path, int level, in find_next_key() argument
735 for (; level < BTRFS_MAX_LEVEL; level++) { in find_next_key()
736 if (!path->nodes[level]) in find_next_key()
738 if (path->slots[level] + 1 >= in find_next_key()
739 btrfs_header_nritems(path->nodes[level])) in find_next_key()
741 if (level == 0) in find_next_key()
742 btrfs_item_key_to_cpu(path->nodes[level], key, in find_next_key()
743 path->slots[level] + 1); in find_next_key()
745 btrfs_node_key_to_cpu(path->nodes[level], key, in find_next_key()
746 path->slots[level] + 1); in find_next_key()
757 * should be inserted, and -ENOENT is returned.
760 * points to the extent item, and -EAGAIN is returned.
773 struct btrfs_fs_info *fs_info = trans->fs_info; in lookup_inline_extent_backref()
778 struct btrfs_extent_inline_ref *iref; in lookup_inline_extent_backref() local
797 path->search_for_extension = 1; in lookup_inline_extent_backref()
799 extra_size = -1; in lookup_inline_extent_backref()
802 * Owner is our level, so we can just add one to get the level for the in lookup_inline_extent_backref()
821 if (path->slots[0]) { in lookup_inline_extent_backref()
822 path->slots[0]--; in lookup_inline_extent_backref()
823 btrfs_item_key_to_cpu(path->nodes[0], &key, in lookup_inline_extent_backref()
824 path->slots[0]); in lookup_inline_extent_backref()
840 ret = -ENOENT; in lookup_inline_extent_backref()
843 btrfs_print_leaf(path->nodes[0]); in lookup_inline_extent_backref()
848 ret = -EUCLEAN; in lookup_inline_extent_backref()
852 leaf = path->nodes[0]; in lookup_inline_extent_backref()
853 item_size = btrfs_item_size(leaf, path->slots[0]); in lookup_inline_extent_backref()
855 ret = -EUCLEAN; in lookup_inline_extent_backref()
863 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in lookup_inline_extent_backref()
879 ret = -ENOENT; in lookup_inline_extent_backref()
881 iref = (struct btrfs_extent_inline_ref *)ptr; in lookup_inline_extent_backref()
882 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed); in lookup_inline_extent_backref()
889 ret = -EUCLEAN; in lookup_inline_extent_backref()
902 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in lookup_inline_extent_backref()
913 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); in lookup_inline_extent_backref()
934 ret = -EUCLEAN; in lookup_inline_extent_backref()
935 btrfs_print_leaf(path->nodes[0]); in lookup_inline_extent_backref()
938 path->slots[0], root_objectid, owner, offset, parent); in lookup_inline_extent_backref()
942 if (ret == -ENOENT && insert) { in lookup_inline_extent_backref()
945 ret = -EAGAIN; in lookup_inline_extent_backref()
949 if (path->slots[0] + 1 < btrfs_header_nritems(path->nodes[0])) { in lookup_inline_extent_backref()
952 btrfs_item_key_to_cpu(path->nodes[0], &tmp_key, path->slots[0] + 1); in lookup_inline_extent_backref()
955 ret = -EAGAIN; in lookup_inline_extent_backref()
961 if (!path->keep_locks) { in lookup_inline_extent_backref()
963 path->keep_locks = 1; in lookup_inline_extent_backref()
976 ret = -EAGAIN; in lookup_inline_extent_backref()
983 if (path->keep_locks) { in lookup_inline_extent_backref()
984 path->keep_locks = 0; in lookup_inline_extent_backref()
988 path->search_for_extension = 0; in lookup_inline_extent_backref()
998 struct btrfs_extent_inline_ref *iref, in setup_inline_extent_backref() argument
1012 leaf = path->nodes[0]; in setup_inline_extent_backref()
1013 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in setup_inline_extent_backref()
1014 item_offset = (unsigned long)iref - (unsigned long)ei; in setup_inline_extent_backref()
1021 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in setup_inline_extent_backref()
1029 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); in setup_inline_extent_backref()
1030 if (ptr < end - size) in setup_inline_extent_backref()
1032 end - size - ptr); in setup_inline_extent_backref()
1034 iref = (struct btrfs_extent_inline_ref *)ptr; in setup_inline_extent_backref()
1035 btrfs_set_extent_inline_ref_type(leaf, iref, type); in setup_inline_extent_backref()
1038 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in setup_inline_extent_backref()
1045 sref = (struct btrfs_shared_data_ref *)(iref + 1); in setup_inline_extent_backref()
1047 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); in setup_inline_extent_backref()
1049 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); in setup_inline_extent_backref()
1051 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); in setup_inline_extent_backref()
1067 if (ret != -ENOENT) in lookup_extent_backref()
1089 struct btrfs_extent_inline_ref *iref, in update_inline_extent_backref() argument
1093 struct extent_buffer *leaf = path->nodes[0]; in update_inline_extent_backref()
1094 struct btrfs_fs_info *fs_info = leaf->fs_info; in update_inline_extent_backref()
1105 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in update_inline_extent_backref()
1111 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in update_inline_extent_backref()
1113 extent_size = fs_info->nodesize; in update_inline_extent_backref()
1118 "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu", in update_inline_extent_backref()
1120 return -EUCLEAN; in update_inline_extent_backref()
1127 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); in update_inline_extent_backref()
1133 return -EUCLEAN; in update_inline_extent_backref()
1136 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in update_inline_extent_backref()
1139 sref = (struct btrfs_shared_data_ref *)(iref + 1); in update_inline_extent_backref()
1151 if (unlikely(refs_to_mod != -1)) { in update_inline_extent_backref()
1154 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in update_inline_extent_backref()
1158 "invalid refs_to_mod for tree block %llu, has %d expect -1", in update_inline_extent_backref()
1160 return -EUCLEAN; in update_inline_extent_backref()
1164 if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) { in update_inline_extent_backref()
1168 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in update_inline_extent_backref()
1170 extent_size = fs_info->nodesize; in update_inline_extent_backref()
1175 "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu", in update_inline_extent_backref()
1176 (unsigned long)iref, key.objectid, extent_size, in update_inline_extent_backref()
1178 return -EUCLEAN; in update_inline_extent_backref()
1189 item_size = btrfs_item_size(leaf, path->slots[0]); in update_inline_extent_backref()
1190 ptr = (unsigned long)iref; in update_inline_extent_backref()
1194 end - ptr - size); in update_inline_extent_backref()
1195 item_size -= size; in update_inline_extent_backref()
1210 struct btrfs_extent_inline_ref *iref; in insert_inline_extent_backref() local
1213 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr, in insert_inline_extent_backref()
1222 btrfs_print_leaf(path->nodes[0]); in insert_inline_extent_backref()
1223 btrfs_crit(trans->fs_info, in insert_inline_extent_backref()
1225 bytenr, num_bytes, root_objectid, path->slots[0]); in insert_inline_extent_backref()
1226 return -EUCLEAN; in insert_inline_extent_backref()
1228 ret = update_inline_extent_backref(trans, path, iref, in insert_inline_extent_backref()
1230 } else if (ret == -ENOENT) { in insert_inline_extent_backref()
1231 setup_inline_extent_backref(trans, path, iref, parent, in insert_inline_extent_backref()
1242 struct btrfs_extent_inline_ref *iref, in remove_extent_backref() argument
1248 if (iref) in remove_extent_backref()
1249 ret = update_inline_extent_backref(trans, path, iref, in remove_extent_backref()
1250 -refs_to_drop, NULL); in remove_extent_backref()
1267 len -= aligned_start - start; in btrfs_issue_discard()
1284 u64 size = sb_start - start; in btrfs_issue_discard()
1296 start += sb_end - start; in btrfs_issue_discard()
1301 bytes_left = end - start; in btrfs_issue_discard()
1311 else if (ret != -EOPNOTSUPP) in btrfs_issue_discard()
1320 bytes_left = end - start; in btrfs_issue_discard()
1331 if (ret != -EOPNOTSUPP) in btrfs_issue_discard()
1337 bytes_left -= bytes_to_discard; in btrfs_issue_discard()
1341 ret = -ERESTARTSYS; in btrfs_issue_discard()
1351 struct btrfs_device *dev = stripe->dev; in do_discard_extent()
1352 struct btrfs_fs_info *fs_info = dev->fs_info; in do_discard_extent()
1353 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in do_discard_extent()
1354 u64 phys = stripe->physical; in do_discard_extent()
1355 u64 len = stripe->length; in do_discard_extent()
1368 dev != dev_replace->srcdev) in do_discard_extent()
1374 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, in do_discard_extent()
1377 } else if (bdev_max_discard_sectors(stripe->dev->bdev)) { in do_discard_extent()
1378 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); in do_discard_extent()
1407 num_bytes = end - cur; in btrfs_discard_extent()
1411 if (ret == -EOPNOTSUPP) in btrfs_discard_extent()
1420 if (!stripe->dev->bdev) { in btrfs_discard_extent()
1426 &stripe->dev->dev_state)) in btrfs_discard_extent()
1435 if (ret != -EOPNOTSUPP) in btrfs_discard_extent()
1453 /* Can return -ENOMEM */
1457 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_inc_extent_ref()
1460 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && in btrfs_inc_extent_ref()
1461 generic_ref->action); in btrfs_inc_extent_ref()
1462 BUG_ON(generic_ref->type == BTRFS_REF_METADATA && in btrfs_inc_extent_ref()
1463 generic_ref->ref_root == BTRFS_TREE_LOG_OBJECTID); in btrfs_inc_extent_ref()
1465 if (generic_ref->type == BTRFS_REF_METADATA) in btrfs_inc_extent_ref()
1498 u64 bytenr = node->bytenr; in __btrfs_inc_extent_ref()
1499 u64 num_bytes = node->num_bytes; in __btrfs_inc_extent_ref()
1503 int refs_to_add = node->ref_mod; in __btrfs_inc_extent_ref()
1508 return -ENOMEM; in __btrfs_inc_extent_ref()
1512 node->parent, node->ref_root, owner, in __btrfs_inc_extent_ref()
1514 if ((ret < 0 && ret != -EAGAIN) || !ret) in __btrfs_inc_extent_ref()
1518 * Ok we had -EAGAIN which means we didn't have space to insert and in __btrfs_inc_extent_ref()
1522 leaf = path->nodes[0]; in __btrfs_inc_extent_ref()
1523 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in __btrfs_inc_extent_ref()
1524 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in __btrfs_inc_extent_ref()
1549 u64 root = href->owning_root; in free_head_ref_squota_rsv()
1556 !href->is_data || !is_fstree(root)) in free_head_ref_squota_rsv()
1559 btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes, in free_head_ref_squota_rsv()
1573 trace_run_delayed_data_ref(trans->fs_info, node); in run_delayed_data_ref()
1575 if (node->type == BTRFS_SHARED_DATA_REF_KEY) in run_delayed_data_ref()
1576 parent = node->parent; in run_delayed_data_ref()
1578 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { in run_delayed_data_ref()
1581 .root = href->owning_root, in run_delayed_data_ref()
1582 .num_bytes = node->num_bytes, in run_delayed_data_ref()
1585 .generation = trans->transid, in run_delayed_data_ref()
1591 flags |= extent_op->flags_to_set; in run_delayed_data_ref()
1593 key.objectid = node->bytenr; in run_delayed_data_ref()
1595 key.offset = node->num_bytes; in run_delayed_data_ref()
1597 ret = alloc_reserved_file_extent(trans, parent, node->ref_root, in run_delayed_data_ref()
1599 node->ref_mod, in run_delayed_data_ref()
1600 href->owning_root); in run_delayed_data_ref()
1601 free_head_ref_squota_rsv(trans->fs_info, href); in run_delayed_data_ref()
1603 ret = btrfs_record_squota_delta(trans->fs_info, &delta); in run_delayed_data_ref()
1604 } else if (node->action == BTRFS_ADD_DELAYED_REF) { in run_delayed_data_ref()
1606 } else if (node->action == BTRFS_DROP_DELAYED_REF) { in run_delayed_data_ref()
1619 if (extent_op->update_flags) { in __run_delayed_extent_op()
1620 flags |= extent_op->flags_to_set; in __run_delayed_extent_op()
1624 if (extent_op->update_key) { in __run_delayed_extent_op()
1628 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); in __run_delayed_extent_op()
1636 struct btrfs_fs_info *fs_info = trans->fs_info; in run_delayed_extent_op()
1654 return -ENOMEM; in run_delayed_extent_op()
1656 key.objectid = head->bytenr; in run_delayed_extent_op()
1660 key.offset = head->level; in run_delayed_extent_op()
1663 key.offset = head->num_bytes; in run_delayed_extent_op()
1673 if (path->slots[0] > 0) { in run_delayed_extent_op()
1674 path->slots[0]--; in run_delayed_extent_op()
1675 btrfs_item_key_to_cpu(path->nodes[0], &key, in run_delayed_extent_op()
1676 path->slots[0]); in run_delayed_extent_op()
1677 if (key.objectid == head->bytenr && in run_delayed_extent_op()
1679 key.offset == head->num_bytes) in run_delayed_extent_op()
1686 key.objectid = head->bytenr; in run_delayed_extent_op()
1687 key.offset = head->num_bytes; in run_delayed_extent_op()
1692 ret = -EUCLEAN; in run_delayed_extent_op()
1694 "missing extent item for extent %llu num_bytes %llu level %d", in run_delayed_extent_op()
1695 head->bytenr, head->num_bytes, head->level); in run_delayed_extent_op()
1700 leaf = path->nodes[0]; in run_delayed_extent_op()
1701 item_size = btrfs_item_size(leaf, path->slots[0]); in run_delayed_extent_op()
1704 ret = -EUCLEAN; in run_delayed_extent_op()
1712 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in run_delayed_extent_op()
1728 struct btrfs_fs_info *fs_info = trans->fs_info; in run_delayed_tree_ref()
1732 trace_run_delayed_tree_ref(trans->fs_info, node); in run_delayed_tree_ref()
1734 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) in run_delayed_tree_ref()
1735 parent = node->parent; in run_delayed_tree_ref()
1736 ref_root = node->ref_root; in run_delayed_tree_ref()
1738 if (unlikely(node->ref_mod != 1)) { in run_delayed_tree_ref()
1739 btrfs_err(trans->fs_info, in run_delayed_tree_ref()
1741 node->bytenr, node->ref_mod, node->action, ref_root, in run_delayed_tree_ref()
1743 return -EUCLEAN; in run_delayed_tree_ref()
1745 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { in run_delayed_tree_ref()
1747 .root = href->owning_root, in run_delayed_tree_ref()
1748 .num_bytes = fs_info->nodesize, in run_delayed_tree_ref()
1751 .generation = trans->transid, in run_delayed_tree_ref()
1757 } else if (node->action == BTRFS_ADD_DELAYED_REF) { in run_delayed_tree_ref()
1759 } else if (node->action == BTRFS_DROP_DELAYED_REF) { in run_delayed_tree_ref()
1778 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); in run_one_delayed_ref()
1779 free_head_ref_squota_rsv(trans->fs_info, href); in run_one_delayed_ref()
1784 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || in run_one_delayed_ref()
1785 node->type == BTRFS_SHARED_BLOCK_REF_KEY) in run_one_delayed_ref()
1788 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || in run_one_delayed_ref()
1789 node->type == BTRFS_SHARED_DATA_REF_KEY) in run_one_delayed_ref()
1792 else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY) in run_one_delayed_ref()
1797 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); in run_one_delayed_ref()
1799 btrfs_err(trans->fs_info, in run_one_delayed_ref()
1801 node->bytenr, node->num_bytes, node->type, in run_one_delayed_ref()
1802 node->action, node->ref_mod, ret); in run_one_delayed_ref()
1811 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) in select_delayed_ref()
1820 if (!list_empty(&head->ref_add_list)) in select_delayed_ref()
1821 return list_first_entry(&head->ref_add_list, in select_delayed_ref()
1824 ref = rb_entry(rb_first_cached(&head->ref_tree), in select_delayed_ref()
1826 ASSERT(list_empty(&ref->add_list)); in select_delayed_ref()
1833 struct btrfs_delayed_extent_op *extent_op = head->extent_op; in cleanup_extent_op()
1838 if (head->must_insert_reserved) { in cleanup_extent_op()
1839 head->extent_op = NULL; in cleanup_extent_op()
1855 head->extent_op = NULL; in run_and_cleanup_extent_op()
1856 spin_unlock(&head->lock); in run_and_cleanup_extent_op()
1872 if (head->total_ref_mod < 0 && head->is_data) { in btrfs_cleanup_ref_head_accounting()
1875 spin_lock(&delayed_refs->lock); in btrfs_cleanup_ref_head_accounting()
1876 delayed_refs->pending_csums -= head->num_bytes; in btrfs_cleanup_ref_head_accounting()
1877 spin_unlock(&delayed_refs->lock); in btrfs_cleanup_ref_head_accounting()
1878 nr_csums = btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); in btrfs_cleanup_ref_head_accounting()
1885 if (head->must_insert_reserved) in btrfs_cleanup_ref_head_accounting()
1896 struct btrfs_fs_info *fs_info = trans->fs_info; in cleanup_ref_head()
1900 delayed_refs = &trans->transaction->delayed_refs; in cleanup_ref_head()
1912 * Need to drop our head ref lock and re-acquire the delayed ref lock in cleanup_ref_head()
1913 * and then re-check to make sure nobody got added. in cleanup_ref_head()
1915 spin_unlock(&head->lock); in cleanup_ref_head()
1916 spin_lock(&delayed_refs->lock); in cleanup_ref_head()
1917 spin_lock(&head->lock); in cleanup_ref_head()
1918 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { in cleanup_ref_head()
1919 spin_unlock(&head->lock); in cleanup_ref_head()
1920 spin_unlock(&delayed_refs->lock); in cleanup_ref_head()
1924 spin_unlock(&head->lock); in cleanup_ref_head()
1925 spin_unlock(&delayed_refs->lock); in cleanup_ref_head()
1927 if (head->must_insert_reserved) { in cleanup_ref_head()
1928 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); in cleanup_ref_head()
1929 if (head->is_data) { in cleanup_ref_head()
1932 csum_root = btrfs_csum_root(fs_info, head->bytenr); in cleanup_ref_head()
1933 ret = btrfs_del_csums(trans, csum_root, head->bytenr, in cleanup_ref_head()
1934 head->num_bytes); in cleanup_ref_head()
1950 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_run_delayed_refs_for_head()
1957 delayed_refs = &trans->transaction->delayed_refs; in btrfs_run_delayed_refs_for_head()
1959 lockdep_assert_held(&locked_ref->mutex); in btrfs_run_delayed_refs_for_head()
1960 lockdep_assert_held(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
1963 if (ref->seq && in btrfs_run_delayed_refs_for_head()
1964 btrfs_check_delayed_seq(fs_info, ref->seq)) { in btrfs_run_delayed_refs_for_head()
1965 spin_unlock(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
1967 return -EAGAIN; in btrfs_run_delayed_refs_for_head()
1970 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); in btrfs_run_delayed_refs_for_head()
1971 RB_CLEAR_NODE(&ref->ref_node); in btrfs_run_delayed_refs_for_head()
1972 if (!list_empty(&ref->add_list)) in btrfs_run_delayed_refs_for_head()
1973 list_del(&ref->add_list); in btrfs_run_delayed_refs_for_head()
1978 switch (ref->action) { in btrfs_run_delayed_refs_for_head()
1981 locked_ref->ref_mod -= ref->ref_mod; in btrfs_run_delayed_refs_for_head()
1984 locked_ref->ref_mod += ref->ref_mod; in btrfs_run_delayed_refs_for_head()
1994 must_insert_reserved = locked_ref->must_insert_reserved; in btrfs_run_delayed_refs_for_head()
2001 locked_ref->must_insert_reserved = false; in btrfs_run_delayed_refs_for_head()
2003 extent_op = locked_ref->extent_op; in btrfs_run_delayed_refs_for_head()
2004 locked_ref->extent_op = NULL; in btrfs_run_delayed_refs_for_head()
2005 spin_unlock(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
2022 spin_lock(&locked_ref->lock); in btrfs_run_delayed_refs_for_head()
2031 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2036 struct btrfs_fs_info *fs_info = trans->fs_info; in __btrfs_run_delayed_refs()
2044 delayed_refs = &trans->transaction->delayed_refs; in __btrfs_run_delayed_refs()
2046 max_count = delayed_refs->num_heads_ready; in __btrfs_run_delayed_refs()
2054 if (PTR_ERR(locked_ref) == -EAGAIN) { in __btrfs_run_delayed_refs()
2066 * finish. If we merged anything we need to re-loop so we can in __btrfs_run_delayed_refs()
2074 spin_lock(&locked_ref->lock); in __btrfs_run_delayed_refs()
2078 if (ret < 0 && ret != -EAGAIN) { in __btrfs_run_delayed_refs()
2101 * returned -EAGAIN, meaning we need to select another head in __btrfs_run_delayed_refs()
2121 struct rb_node *n = root->rb_node; in find_middle()
2130 first = entry->bytenr; in find_middle()
2135 last = entry->bytenr; in find_middle()
2137 n = root->rb_node; in find_middle()
2141 WARN_ON(!entry->in_tree); in find_middle()
2143 middle = entry->bytenr; in find_middle()
2146 n = n->rb_left; in find_middle()
2148 n = n->rb_right; in find_middle()
2150 alt = 1 - alt; in find_middle()
2165 * Use (u64)-1 (U64_MAX) to run all existing delayed references
2173 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_run_delayed_refs()
2181 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) in btrfs_run_delayed_refs()
2184 delayed_refs = &trans->transaction->delayed_refs; in btrfs_run_delayed_refs()
2187 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); in btrfs_run_delayed_refs()
2198 spin_lock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2199 if (xa_empty(&delayed_refs->head_refs)) { in btrfs_run_delayed_refs()
2200 spin_unlock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2203 spin_unlock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2220 return -ENOMEM; in btrfs_set_disk_extent_flags()
2222 extent_op->flags_to_set = flags; in btrfs_set_disk_extent_flags()
2223 extent_op->update_flags = true; in btrfs_set_disk_extent_flags()
2224 extent_op->update_key = false; in btrfs_set_disk_extent_flags()
2226 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, in btrfs_set_disk_extent_flags()
2244 spin_lock(&root->fs_info->trans_lock); in check_delayed_ref()
2245 cur_trans = root->fs_info->running_transaction; in check_delayed_ref()
2247 refcount_inc(&cur_trans->use_count); in check_delayed_ref()
2248 spin_unlock(&root->fs_info->trans_lock); in check_delayed_ref()
2252 delayed_refs = &cur_trans->delayed_refs; in check_delayed_ref()
2253 spin_lock(&delayed_refs->lock); in check_delayed_ref()
2254 head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr); in check_delayed_ref()
2256 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
2261 if (!mutex_trylock(&head->mutex)) { in check_delayed_ref()
2262 if (path->nowait) { in check_delayed_ref()
2263 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
2265 return -EAGAIN; in check_delayed_ref()
2268 refcount_inc(&head->refs); in check_delayed_ref()
2269 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
2277 mutex_lock(&head->mutex); in check_delayed_ref()
2278 mutex_unlock(&head->mutex); in check_delayed_ref()
2281 return -EAGAIN; in check_delayed_ref()
2283 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
2285 spin_lock(&head->lock); in check_delayed_ref()
2290 for (node = rb_first_cached(&head->ref_tree); node; in check_delayed_ref()
2297 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { in check_delayed_ref()
2309 if (ref->ref_root != btrfs_root_id(root) || in check_delayed_ref()
2315 spin_unlock(&head->lock); in check_delayed_ref()
2316 mutex_unlock(&head->mutex); in check_delayed_ref()
2326 struct btrfs_fs_info *fs_info = root->fs_info; in check_committed_ref()
2330 struct btrfs_extent_inline_ref *iref; in check_committed_ref() local
2339 key.offset = (u64)-1; in check_committed_ref()
2347 * Key with offset -1 found, there would have to exist an extent in check_committed_ref()
2350 ret = -EUCLEAN; in check_committed_ref()
2354 ret = -ENOENT; in check_committed_ref()
2355 if (path->slots[0] == 0) in check_committed_ref()
2358 path->slots[0]--; in check_committed_ref()
2359 leaf = path->nodes[0]; in check_committed_ref()
2360 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in check_committed_ref()
2366 item_size = btrfs_item_size(leaf, path->slots[0]); in check_committed_ref()
2367 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); in check_committed_ref()
2375 iref = (struct btrfs_extent_inline_ref *)(ei + 1); in check_committed_ref()
2376 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); in check_committed_ref()
2379 iref = (struct btrfs_extent_inline_ref *)(iref + 1); in check_committed_ref()
2392 btrfs_root_last_snapshot(&root->root_item))) in check_committed_ref()
2396 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); in check_committed_ref()
2400 ref = (struct btrfs_extent_data_ref *)(&iref->offset); in check_committed_ref()
2421 if (ret && ret != -ENOENT) in btrfs_cross_ref_exist()
2425 } while (ret == -EAGAIN && !path->nowait); in btrfs_cross_ref_exist()
2439 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_mod_ref()
2448 int level; in __btrfs_mod_ref() local
2456 level = btrfs_header_level(buf); in __btrfs_mod_ref()
2458 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) in __btrfs_mod_ref()
2462 parent = buf->start; in __btrfs_mod_ref()
2477 if (level == 0) { in __btrfs_mod_ref()
2493 key.offset -= btrfs_file_extent_offset(buf, fi); in __btrfs_mod_ref()
2505 ref.num_bytes = fs_info->nodesize; in __btrfs_mod_ref()
2507 btrfs_init_tree_ref(&ref, level - 1, in __btrfs_mod_ref()
2536 struct btrfs_fs_info *fs_info = root->fs_info; in get_alloc_profile_by_root()
2542 else if (root == fs_info->chunk_root) in get_alloc_profile_by_root()
2556 read_lock(&fs_info->block_group_cache_lock); in first_logical_byte()
2558 leftmost = rb_first_cached(&fs_info->block_group_cache_tree); in first_logical_byte()
2563 bytenr = bg->start; in first_logical_byte()
2565 read_unlock(&fs_info->block_group_cache_lock); in first_logical_byte()
2574 struct btrfs_fs_info *fs_info = cache->fs_info; in pin_down_extent()
2576 spin_lock(&cache->space_info->lock); in pin_down_extent()
2577 spin_lock(&cache->lock); in pin_down_extent()
2578 cache->pinned += num_bytes; in pin_down_extent()
2579 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, in pin_down_extent()
2582 cache->reserved -= num_bytes; in pin_down_extent()
2583 cache->space_info->bytes_reserved -= num_bytes; in pin_down_extent()
2585 spin_unlock(&cache->lock); in pin_down_extent()
2586 spin_unlock(&cache->space_info->lock); in pin_down_extent()
2588 set_extent_bit(&trans->transaction->pinned_extents, bytenr, in pin_down_extent()
2589 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); in pin_down_extent()
2598 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); in btrfs_pin_extent()
2613 cache = btrfs_lookup_block_group(trans->fs_info, eb->start); in btrfs_pin_extent_for_log_replay()
2615 return -EINVAL; in btrfs_pin_extent_for_log_replay()
2625 pin_down_extent(trans, cache, eb->start, eb->len, 0); in btrfs_pin_extent_for_log_replay()
2628 ret = btrfs_remove_free_space(cache, eb->start, eb->len); in btrfs_pin_extent_for_log_replay()
2642 return -EINVAL; in __exclude_logged_extent()
2656 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_exclude_logged_extents()
2689 atomic_inc(&bg->reservations); in btrfs_inc_block_group_reservations()
2706 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { in fetch_cluster_info()
2707 ret = &fs_info->meta_alloc_cluster; in fetch_cluster_info()
2712 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && in fetch_cluster_info()
2715 ret = &fs_info->data_alloc_cluster; in fetch_cluster_info()
2727 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in unpin_extent_range()
2738 start >= cache->start + cache->length) { in unpin_extent_range()
2745 ret = -EUCLEAN; in unpin_extent_range()
2750 cache->space_info, in unpin_extent_range()
2755 len = cache->start + cache->length - start; in unpin_extent_range()
2756 len = min(len, end + 1 - start); in unpin_extent_range()
2763 space_info = cache->space_info; in unpin_extent_range()
2771 if (cluster && cluster->fragmented && in unpin_extent_range()
2773 spin_lock(&cluster->lock); in unpin_extent_range()
2774 cluster->fragmented = 0; in unpin_extent_range()
2775 spin_unlock(&cluster->lock); in unpin_extent_range()
2778 spin_lock(&space_info->lock); in unpin_extent_range()
2779 spin_lock(&cache->lock); in unpin_extent_range()
2780 cache->pinned -= len; in unpin_extent_range()
2781 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); in unpin_extent_range()
2782 space_info->max_extent_size = 0; in unpin_extent_range()
2783 if (cache->ro) { in unpin_extent_range()
2784 space_info->bytes_readonly += len; in unpin_extent_range()
2792 spin_unlock(&cache->lock); in unpin_extent_range()
2794 global_rsv->space_info == space_info) { in unpin_extent_range()
2795 spin_lock(&global_rsv->lock); in unpin_extent_range()
2796 if (!global_rsv->full) { in unpin_extent_range()
2797 u64 to_add = min(len, global_rsv->size - in unpin_extent_range()
2798 global_rsv->reserved); in unpin_extent_range()
2800 global_rsv->reserved += to_add; in unpin_extent_range()
2803 if (global_rsv->reserved >= global_rsv->size) in unpin_extent_range()
2804 global_rsv->full = 1; in unpin_extent_range()
2805 len -= to_add; in unpin_extent_range()
2807 spin_unlock(&global_rsv->lock); in unpin_extent_range()
2812 spin_unlock(&space_info->lock); in unpin_extent_range()
2823 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_finish_extent_commit()
2831 unpin = &trans->transaction->pinned_extents; in btrfs_finish_extent_commit()
2836 mutex_lock(&fs_info->unused_bg_unpin_mutex); in btrfs_finish_extent_commit()
2839 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in btrfs_finish_extent_commit()
2845 end + 1 - start, NULL); in btrfs_finish_extent_commit()
2850 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in btrfs_finish_extent_commit()
2856 btrfs_discard_calc_delay(&fs_info->discard_ctl); in btrfs_finish_extent_commit()
2857 btrfs_discard_schedule_work(&fs_info->discard_ctl, true); in btrfs_finish_extent_commit()
2865 deleted_bgs = &trans->transaction->deleted_bgs; in btrfs_finish_extent_commit()
2869 ret = -EROFS; in btrfs_finish_extent_commit()
2872 block_group->start, in btrfs_finish_extent_commit()
2873 block_group->length, in btrfs_finish_extent_commit()
2876 list_del_init(&block_group->bg_list); in btrfs_finish_extent_commit()
2909 struct btrfs_extent_inline_ref *iref; in btrfs_get_extent_owner_root() local
2926 iref = (struct btrfs_extent_inline_ref *)ptr; in btrfs_get_extent_owner_root()
2927 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); in btrfs_get_extent_owner_root()
2931 oref = (struct btrfs_extent_owner_ref *)(&iref->offset); in btrfs_get_extent_owner_root()
2943 u64 num_bytes = delta->num_bytes; in do_free_extent_accounting()
2945 if (delta->is_data) { in do_free_extent_accounting()
2948 csum_root = btrfs_csum_root(trans->fs_info, bytenr); in do_free_extent_accounting()
2962 ret = btrfs_record_squota_delta(trans->fs_info, delta); in do_free_extent_accounting()
2983 btrfs_abort_transaction(trans, -EUCLEAN); \
2984 btrfs_print_leaf(path->nodes[0]); \
2985 btrfs_crit(trans->fs_info, fmt, ##args); \
3008 * node->bytenr = 13631488
3009 * node->num_bytes = 1048576
3033 * node->bytenr = 13631488
3034 * node->num_bytes = 1048576
3052 struct btrfs_fs_info *info = trans->fs_info; in __btrfs_free_extent()
3058 struct btrfs_extent_inline_ref *iref; in __btrfs_free_extent() local
3064 int refs_to_drop = node->ref_mod; in __btrfs_free_extent()
3067 u64 bytenr = node->bytenr; in __btrfs_free_extent()
3068 u64 num_bytes = node->num_bytes; in __btrfs_free_extent()
3072 u64 delayed_ref_root = href->owning_root; in __btrfs_free_extent()
3079 return -ENOMEM; in __btrfs_free_extent()
3086 node->bytenr, refs_to_drop); in __btrfs_free_extent()
3087 ret = -EINVAL; in __btrfs_free_extent()
3095 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes, in __btrfs_free_extent()
3096 node->parent, node->ref_root, owner_objectid, in __btrfs_free_extent()
3106 extent_slot = path->slots[0]; in __btrfs_free_extent()
3108 btrfs_item_key_to_cpu(path->nodes[0], &key, in __btrfs_free_extent()
3124 if (path->slots[0] - extent_slot > 5) in __btrfs_free_extent()
3126 extent_slot--; in __btrfs_free_extent()
3130 if (iref) { in __btrfs_free_extent()
3132 "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref", in __btrfs_free_extent()
3133 path->slots[0]); in __btrfs_free_extent()
3134 ret = -EUCLEAN; in __btrfs_free_extent()
3157 &key, path, -1, 1); in __btrfs_free_extent()
3158 if (ret > 0 && skinny_metadata && path->slots[0]) { in __btrfs_free_extent()
3163 path->slots[0]--; in __btrfs_free_extent()
3164 btrfs_item_key_to_cpu(path->nodes[0], &key, in __btrfs_free_extent()
3165 path->slots[0]); in __btrfs_free_extent()
3179 &key, path, -1, 1); in __btrfs_free_extent()
3184 btrfs_print_leaf(path->nodes[0]); in __btrfs_free_extent()
3187 ret, bytenr, path->slots[0]); in __btrfs_free_extent()
3193 extent_slot = path->slots[0]; in __btrfs_free_extent()
3195 } else if (WARN_ON(ret == -ENOENT)) { in __btrfs_free_extent()
3198 bytenr, node->parent, node->ref_root, owner_objectid, in __btrfs_free_extent()
3199 owner_offset, path->slots[0]); in __btrfs_free_extent()
3206 leaf = path->nodes[0]; in __btrfs_free_extent()
3209 ret = -EUCLEAN; in __btrfs_free_extent()
3210 btrfs_err(trans->fs_info, in __btrfs_free_extent()
3226 path->slots[0], owner_objectid, item_size, in __btrfs_free_extent()
3228 ret = -EUCLEAN; in __btrfs_free_extent()
3239 refs_to_drop, refs, bytenr, path->slots[0]); in __btrfs_free_extent()
3240 ret = -EUCLEAN; in __btrfs_free_extent()
3243 refs -= refs_to_drop; in __btrfs_free_extent()
3252 if (iref) { in __btrfs_free_extent()
3255 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u", in __btrfs_free_extent()
3256 path->slots[0]); in __btrfs_free_extent()
3257 ret = -EUCLEAN; in __btrfs_free_extent()
3266 iref, refs_to_drop, is_data); in __btrfs_free_extent()
3284 extent_data_ref_count(path, iref)) { in __btrfs_free_extent()
3287 extent_data_ref_count(path, iref), in __btrfs_free_extent()
3288 refs_to_drop, path->slots[0]); in __btrfs_free_extent()
3289 ret = -EUCLEAN; in __btrfs_free_extent()
3292 if (iref) { in __btrfs_free_extent()
3293 if (path->slots[0] != extent_slot) { in __btrfs_free_extent()
3295 "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref", in __btrfs_free_extent()
3297 key.offset, path->slots[0]); in __btrfs_free_extent()
3298 ret = -EUCLEAN; in __btrfs_free_extent()
3308 if (path->slots[0] != extent_slot + 1) { in __btrfs_free_extent()
3311 path->slots[0]); in __btrfs_free_extent()
3312 ret = -EUCLEAN; in __btrfs_free_extent()
3315 path->slots[0] = extent_slot; in __btrfs_free_extent()
3327 delta.root = btrfs_get_extent_owner_root(trans->fs_info, in __btrfs_free_extent()
3330 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], in __btrfs_free_extent()
3356 struct btrfs_fs_info *fs_info = trans->fs_info; in check_ref_cleanup()
3361 delayed_refs = &trans->transaction->delayed_refs; in check_ref_cleanup()
3362 spin_lock(&delayed_refs->lock); in check_ref_cleanup()
3367 spin_lock(&head->lock); in check_ref_cleanup()
3368 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) in check_ref_cleanup()
3378 if (!mutex_trylock(&head->mutex)) in check_ref_cleanup()
3382 head->processing = false; in check_ref_cleanup()
3384 spin_unlock(&head->lock); in check_ref_cleanup()
3385 spin_unlock(&delayed_refs->lock); in check_ref_cleanup()
3387 BUG_ON(head->extent_op); in check_ref_cleanup()
3388 if (head->must_insert_reserved) in check_ref_cleanup()
3392 mutex_unlock(&head->mutex); in check_ref_cleanup()
3396 spin_unlock(&head->lock); in check_ref_cleanup()
3399 spin_unlock(&delayed_refs->lock); in check_ref_cleanup()
3408 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_free_tree_block()
3415 .bytenr = buf->start, in btrfs_free_tree_block()
3416 .num_bytes = buf->len, in btrfs_free_tree_block()
3440 if (btrfs_header_generation(buf) != trans->transid) in btrfs_free_tree_block()
3444 ret = check_ref_cleanup(trans, buf->start); in btrfs_free_tree_block()
3449 bg = btrfs_lookup_block_group(fs_info, buf->start); in btrfs_free_tree_block()
3452 pin_down_extent(trans, bg, buf->start, buf->len, 1); in btrfs_free_tree_block()
3459 * operations for this node. If we re-allocate this node we in btrfs_free_tree_block()
3474 if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags) in btrfs_free_tree_block()
3476 pin_down_extent(trans, bg, buf->start, buf->len, 1); in btrfs_free_tree_block()
3481 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); in btrfs_free_tree_block()
3483 btrfs_add_free_space(bg, buf->start, buf->len); in btrfs_free_tree_block()
3484 btrfs_free_reserved_bytes(bg, buf->len, 0); in btrfs_free_tree_block()
3486 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); in btrfs_free_tree_block()
3494 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); in btrfs_free_tree_block()
3498 /* Can return -ENOMEM */
3501 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_free_extent()
3511 if (ref->ref_root == BTRFS_TREE_LOG_OBJECTID) { in btrfs_free_extent()
3512 btrfs_pin_extent(trans, ref->bytenr, ref->num_bytes, 1); in btrfs_free_extent()
3514 } else if (ref->type == BTRFS_REF_METADATA) { in btrfs_free_extent()
3520 if (ref->ref_root != BTRFS_TREE_LOG_OBJECTID) in btrfs_free_extent()
3567 down_read(&cache->data_rwsem); in btrfs_lock_block_group()
3575 down_read(&cache->data_rwsem); in btrfs_grab_block_group()
3582 __acquires(&cluster->refill_lock) in btrfs_lock_cluster()
3586 spin_lock(&cluster->refill_lock); in btrfs_lock_cluster()
3588 used_bg = cluster->block_group; in btrfs_lock_cluster()
3600 if (down_read_trylock(&used_bg->data_rwsem)) in btrfs_lock_cluster()
3603 spin_unlock(&cluster->refill_lock); in btrfs_lock_cluster()
3605 /* We should only have one-level nested. */ in btrfs_lock_cluster()
3606 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); in btrfs_lock_cluster()
3608 spin_lock(&cluster->refill_lock); in btrfs_lock_cluster()
3609 if (used_bg == cluster->block_group) in btrfs_lock_cluster()
3612 up_read(&used_bg->data_rwsem); in btrfs_lock_cluster()
3622 up_read(&cache->data_rwsem); in btrfs_release_block_group()
3629 * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3631 * Return 0 means we have found a location and set ffe_ctl->found_offset.
3638 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in find_free_extent_clustered()
3643 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); in find_free_extent_clustered()
3646 if (cluster_bg != bg && (cluster_bg->ro || in find_free_extent_clustered()
3647 !block_group_bits(cluster_bg, ffe_ctl->flags))) in find_free_extent_clustered()
3651 ffe_ctl->num_bytes, cluster_bg->start, in find_free_extent_clustered()
3652 &ffe_ctl->max_extent_size); in find_free_extent_clustered()
3655 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3658 ffe_ctl->found_offset = offset; in find_free_extent_clustered()
3661 WARN_ON(last_ptr->block_group != cluster_bg); in find_free_extent_clustered()
3675 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { in find_free_extent_clustered()
3676 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3677 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); in find_free_extent_clustered()
3678 return -ENOENT; in find_free_extent_clustered()
3685 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); in find_free_extent_clustered()
3688 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { in find_free_extent_clustered()
3689 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3690 return -ENOENT; in find_free_extent_clustered()
3694 ffe_ctl->empty_cluster + ffe_ctl->empty_size, in find_free_extent_clustered()
3695 bg->full_stripe_len); in find_free_extent_clustered()
3696 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, in find_free_extent_clustered()
3697 ffe_ctl->num_bytes, aligned_cluster); in find_free_extent_clustered()
3701 ffe_ctl->num_bytes, ffe_ctl->search_start, in find_free_extent_clustered()
3702 &ffe_ctl->max_extent_size); in find_free_extent_clustered()
3705 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3706 ffe_ctl->found_offset = offset; in find_free_extent_clustered()
3717 spin_unlock(&last_ptr->refill_lock); in find_free_extent_clustered()
3723 * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3728 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in find_free_extent_unclustered()
3737 spin_lock(&last_ptr->lock); in find_free_extent_unclustered()
3738 last_ptr->fragmented = 1; in find_free_extent_unclustered()
3739 spin_unlock(&last_ptr->lock); in find_free_extent_unclustered()
3741 if (ffe_ctl->cached) { in find_free_extent_unclustered()
3744 free_space_ctl = bg->free_space_ctl; in find_free_extent_unclustered()
3745 spin_lock(&free_space_ctl->tree_lock); in find_free_extent_unclustered()
3746 if (free_space_ctl->free_space < in find_free_extent_unclustered()
3747 ffe_ctl->num_bytes + ffe_ctl->empty_cluster + in find_free_extent_unclustered()
3748 ffe_ctl->empty_size) { in find_free_extent_unclustered()
3749 ffe_ctl->total_free_space = max_t(u64, in find_free_extent_unclustered()
3750 ffe_ctl->total_free_space, in find_free_extent_unclustered()
3751 free_space_ctl->free_space); in find_free_extent_unclustered()
3752 spin_unlock(&free_space_ctl->tree_lock); in find_free_extent_unclustered()
3755 spin_unlock(&free_space_ctl->tree_lock); in find_free_extent_unclustered()
3758 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, in find_free_extent_unclustered()
3759 ffe_ctl->num_bytes, ffe_ctl->empty_size, in find_free_extent_unclustered()
3760 &ffe_ctl->max_extent_size); in find_free_extent_unclustered()
3763 ffe_ctl->found_offset = offset; in find_free_extent_unclustered()
3774 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { in do_allocation_clustered()
3778 /* ret == -ENOENT case falls through */ in do_allocation_clustered()
3785 * Tree-log block group locking
3790 * for tree-log metadata.
3801 * Simple allocator for sequential-only block group. It only allows sequential
3809 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_allocation_zoned()
3810 struct btrfs_space_info *space_info = block_group->space_info; in do_allocation_zoned()
3811 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in do_allocation_zoned()
3812 u64 start = block_group->start; in do_allocation_zoned()
3813 u64 num_bytes = ffe_ctl->num_bytes; in do_allocation_zoned()
3815 u64 bytenr = block_group->start; in do_allocation_zoned()
3821 ASSERT(btrfs_is_zoned(block_group->fs_info)); in do_allocation_zoned()
3824 * Do not allow non-tree-log blocks in the dedicated tree-log block in do_allocation_zoned()
3827 spin_lock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3828 log_bytenr = fs_info->treelog_bg; in do_allocation_zoned()
3829 if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || in do_allocation_zoned()
3830 (!ffe_ctl->for_treelog && bytenr == log_bytenr))) in do_allocation_zoned()
3832 spin_unlock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3837 * Do not allow non-relocation blocks in the dedicated relocation block in do_allocation_zoned()
3840 spin_lock(&fs_info->relocation_bg_lock); in do_allocation_zoned()
3841 data_reloc_bytenr = fs_info->data_reloc_bg; in do_allocation_zoned()
3843 ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || in do_allocation_zoned()
3844 (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) in do_allocation_zoned()
3846 spin_unlock(&fs_info->relocation_bg_lock); in do_allocation_zoned()
3851 spin_lock(&block_group->lock); in do_allocation_zoned()
3852 if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) { in do_allocation_zoned()
3855 * May need to clear fs_info->{treelog,data_reloc}_bg. in do_allocation_zoned()
3859 spin_unlock(&block_group->lock); in do_allocation_zoned()
3862 if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && in do_allocation_zoned()
3866 * May need to clear fs_info->{treelog,data_reloc}_bg. in do_allocation_zoned()
3871 spin_lock(&space_info->lock); in do_allocation_zoned()
3872 spin_lock(&block_group->lock); in do_allocation_zoned()
3873 spin_lock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3874 spin_lock(&fs_info->relocation_bg_lock); in do_allocation_zoned()
3879 ASSERT(!ffe_ctl->for_treelog || in do_allocation_zoned()
3880 block_group->start == fs_info->treelog_bg || in do_allocation_zoned()
3881 fs_info->treelog_bg == 0); in do_allocation_zoned()
3882 ASSERT(!ffe_ctl->for_data_reloc || in do_allocation_zoned()
3883 block_group->start == fs_info->data_reloc_bg || in do_allocation_zoned()
3884 fs_info->data_reloc_bg == 0); in do_allocation_zoned()
3886 if (block_group->ro || in do_allocation_zoned()
3887 (!ffe_ctl->for_data_reloc && in do_allocation_zoned()
3888 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) { in do_allocation_zoned()
3894 * Do not allow currently using block group to be tree-log dedicated in do_allocation_zoned()
3897 if (ffe_ctl->for_treelog && !fs_info->treelog_bg && in do_allocation_zoned()
3898 (block_group->used || block_group->reserved)) { in do_allocation_zoned()
3907 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && in do_allocation_zoned()
3908 (block_group->used || block_group->reserved)) { in do_allocation_zoned()
3913 WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); in do_allocation_zoned()
3914 avail = block_group->zone_capacity - block_group->alloc_offset; in do_allocation_zoned()
3916 if (ffe_ctl->max_extent_size < avail) { in do_allocation_zoned()
3921 ffe_ctl->max_extent_size = avail; in do_allocation_zoned()
3922 ffe_ctl->total_free_space = avail; in do_allocation_zoned()
3928 if (ffe_ctl->for_treelog && !fs_info->treelog_bg) in do_allocation_zoned()
3929 fs_info->treelog_bg = block_group->start; in do_allocation_zoned()
3931 if (ffe_ctl->for_data_reloc) { in do_allocation_zoned()
3932 if (!fs_info->data_reloc_bg) in do_allocation_zoned()
3933 fs_info->data_reloc_bg = block_group->start; in do_allocation_zoned()
3936 * for data relocation. Compared to increasing the ->ro, setting in do_allocation_zoned()
3937 * the ->zoned_data_reloc_ongoing flag still allows nocow in do_allocation_zoned()
3941 * regular (non-relocation data) extent. With mix of relocation in do_allocation_zoned()
3949 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags); in do_allocation_zoned()
3952 ffe_ctl->found_offset = start + block_group->alloc_offset; in do_allocation_zoned()
3953 block_group->alloc_offset += num_bytes; in do_allocation_zoned()
3954 spin_lock(&ctl->tree_lock); in do_allocation_zoned()
3955 ctl->free_space -= num_bytes; in do_allocation_zoned()
3956 spin_unlock(&ctl->tree_lock); in do_allocation_zoned()
3963 ffe_ctl->search_start = ffe_ctl->found_offset; in do_allocation_zoned()
3966 if (ret && ffe_ctl->for_treelog) in do_allocation_zoned()
3967 fs_info->treelog_bg = 0; in do_allocation_zoned()
3968 if (ret && ffe_ctl->for_data_reloc) in do_allocation_zoned()
3969 fs_info->data_reloc_bg = 0; in do_allocation_zoned()
3970 spin_unlock(&fs_info->relocation_bg_lock); in do_allocation_zoned()
3971 spin_unlock(&fs_info->treelog_bg_lock); in do_allocation_zoned()
3972 spin_unlock(&block_group->lock); in do_allocation_zoned()
3973 spin_unlock(&space_info->lock); in do_allocation_zoned()
3981 switch (ffe_ctl->policy) { in do_allocation()
3995 switch (ffe_ctl->policy) { in release_block_group()
3997 ffe_ctl->retry_uncached = false; in release_block_group()
4006 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != in release_block_group()
4007 ffe_ctl->index); in release_block_group()
4014 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in found_extent_clustered()
4016 if (!ffe_ctl->use_cluster && last_ptr) { in found_extent_clustered()
4017 spin_lock(&last_ptr->lock); in found_extent_clustered()
4018 last_ptr->window_start = ins->objectid; in found_extent_clustered()
4019 spin_unlock(&last_ptr->lock); in found_extent_clustered()
4026 switch (ffe_ctl->policy) { in found_extent()
4042 if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)) in can_allocate_chunk_zoned()
4046 if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) in can_allocate_chunk_zoned()
4056 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { in can_allocate_chunk_zoned()
4070 if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) in can_allocate_chunk_zoned()
4071 return -ENOSPC; in can_allocate_chunk_zoned()
4080 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) in can_allocate_chunk_zoned()
4081 return -EAGAIN; in can_allocate_chunk_zoned()
4094 switch (ffe_ctl->policy) { in can_allocate_chunk()
4105 * Return >0 means caller needs to re-search for free extent
4114 struct btrfs_root *root = fs_info->chunk_root; in find_free_extent_update_loop()
4117 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && in find_free_extent_update_loop()
4118 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) in find_free_extent_update_loop()
4119 ffe_ctl->orig_have_caching_bg = true; in find_free_extent_update_loop()
4121 if (ins->objectid) { in find_free_extent_update_loop()
4126 if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) in find_free_extent_update_loop()
4129 ffe_ctl->index++; in find_free_extent_update_loop()
4130 if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) in find_free_extent_update_loop()
4134 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { in find_free_extent_update_loop()
4135 ffe_ctl->index = 0; in find_free_extent_update_loop()
4141 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT && in find_free_extent_update_loop()
4142 (!ffe_ctl->orig_have_caching_bg && full_search)) in find_free_extent_update_loop()
4143 ffe_ctl->loop++; in find_free_extent_update_loop()
4144 ffe_ctl->loop++; in find_free_extent_update_loop()
4146 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { in find_free_extent_update_loop()
4155 trans = current->journal_info; in find_free_extent_update_loop()
4166 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, in find_free_extent_update_loop()
4170 if (ret == -ENOSPC) { in find_free_extent_update_loop()
4172 ffe_ctl->loop++; in find_free_extent_update_loop()
4184 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { in find_free_extent_update_loop()
4185 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) in find_free_extent_update_loop()
4186 return -ENOSPC; in find_free_extent_update_loop()
4192 if (ffe_ctl->empty_size == 0 && in find_free_extent_update_loop()
4193 ffe_ctl->empty_cluster == 0) in find_free_extent_update_loop()
4194 return -ENOSPC; in find_free_extent_update_loop()
4195 ffe_ctl->empty_size = 0; in find_free_extent_update_loop()
4196 ffe_ctl->empty_cluster = 0; in find_free_extent_update_loop()
4200 return -ENOSPC; in find_free_extent_update_loop()
4206 if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) in find_free_extent_check_size_class()
4210 if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) in find_free_extent_check_size_class()
4212 if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && in find_free_extent_check_size_class()
4213 bg->size_class == BTRFS_BG_SZ_NONE) in find_free_extent_check_size_class()
4215 return ffe_ctl->size_class == bg->size_class; in find_free_extent_check_size_class()
4233 if (space_info->max_extent_size) { in prepare_allocation_clustered()
4234 spin_lock(&space_info->lock); in prepare_allocation_clustered()
4235 if (space_info->max_extent_size && in prepare_allocation_clustered()
4236 ffe_ctl->num_bytes > space_info->max_extent_size) { in prepare_allocation_clustered()
4237 ins->offset = space_info->max_extent_size; in prepare_allocation_clustered()
4238 spin_unlock(&space_info->lock); in prepare_allocation_clustered()
4239 return -ENOSPC; in prepare_allocation_clustered()
4240 } else if (space_info->max_extent_size) { in prepare_allocation_clustered()
4241 ffe_ctl->use_cluster = false; in prepare_allocation_clustered()
4243 spin_unlock(&space_info->lock); in prepare_allocation_clustered()
4246 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, in prepare_allocation_clustered()
4247 &ffe_ctl->empty_cluster); in prepare_allocation_clustered()
4248 if (ffe_ctl->last_ptr) { in prepare_allocation_clustered()
4249 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; in prepare_allocation_clustered()
4251 spin_lock(&last_ptr->lock); in prepare_allocation_clustered()
4252 if (last_ptr->block_group) in prepare_allocation_clustered()
4253 ffe_ctl->hint_byte = last_ptr->window_start; in prepare_allocation_clustered()
4254 if (last_ptr->fragmented) { in prepare_allocation_clustered()
4260 ffe_ctl->hint_byte = last_ptr->window_start; in prepare_allocation_clustered()
4261 ffe_ctl->use_cluster = false; in prepare_allocation_clustered()
4263 spin_unlock(&last_ptr->lock); in prepare_allocation_clustered()
4272 if (ffe_ctl->for_treelog) { in prepare_allocation_zoned()
4273 spin_lock(&fs_info->treelog_bg_lock); in prepare_allocation_zoned()
4274 if (fs_info->treelog_bg) in prepare_allocation_zoned()
4275 ffe_ctl->hint_byte = fs_info->treelog_bg; in prepare_allocation_zoned()
4276 spin_unlock(&fs_info->treelog_bg_lock); in prepare_allocation_zoned()
4277 } else if (ffe_ctl->for_data_reloc) { in prepare_allocation_zoned()
4278 spin_lock(&fs_info->relocation_bg_lock); in prepare_allocation_zoned()
4279 if (fs_info->data_reloc_bg) in prepare_allocation_zoned()
4280 ffe_ctl->hint_byte = fs_info->data_reloc_bg; in prepare_allocation_zoned()
4281 spin_unlock(&fs_info->relocation_bg_lock); in prepare_allocation_zoned()
4282 } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { in prepare_allocation_zoned()
4285 spin_lock(&fs_info->zone_active_bgs_lock); in prepare_allocation_zoned()
4286 list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) { in prepare_allocation_zoned()
4291 u64 avail = block_group->zone_capacity - block_group->alloc_offset; in prepare_allocation_zoned()
4293 if (block_group_bits(block_group, ffe_ctl->flags) && in prepare_allocation_zoned()
4294 avail >= ffe_ctl->num_bytes) { in prepare_allocation_zoned()
4295 ffe_ctl->hint_byte = block_group->start; in prepare_allocation_zoned()
4299 spin_unlock(&fs_info->zone_active_bgs_lock); in prepare_allocation_zoned()
4310 switch (ffe_ctl->policy) { in prepare_allocation()
4324 * ins->objectid == start position
4325 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4326 * ins->offset == the size of the hole.
4335 * |- Iterate through all block groups
4336 * | |- Get a valid block group
4337 * | |- Try to do clustered allocation in that block group
4338 * | |- Try to do unclustered allocation in that block group
4339 * | |- Check if the result is valid
4340 * | | |- If valid, then exit
4341 * | |- Jump to next block group
4343 * |- Push harder to find free extents
4344 * |- If not found, re-iterate all block groups
4350 struct btrfs_fs_info *fs_info = root->fs_info; in find_free_extent()
4357 WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); in find_free_extent()
4359 ffe_ctl->search_start = 0; in find_free_extent()
4361 ffe_ctl->empty_cluster = 0; in find_free_extent()
4362 ffe_ctl->last_ptr = NULL; in find_free_extent()
4363 ffe_ctl->use_cluster = true; in find_free_extent()
4364 ffe_ctl->have_caching_bg = false; in find_free_extent()
4365 ffe_ctl->orig_have_caching_bg = false; in find_free_extent()
4366 ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags); in find_free_extent()
4367 ffe_ctl->loop = 0; in find_free_extent()
4368 ffe_ctl->retry_uncached = false; in find_free_extent()
4369 ffe_ctl->cached = 0; in find_free_extent()
4370 ffe_ctl->max_extent_size = 0; in find_free_extent()
4371 ffe_ctl->total_free_space = 0; in find_free_extent()
4372 ffe_ctl->found_offset = 0; in find_free_extent()
4373 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; in find_free_extent()
4374 ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes); in find_free_extent()
4377 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; in find_free_extent()
4379 ins->type = BTRFS_EXTENT_ITEM_KEY; in find_free_extent()
4380 ins->objectid = 0; in find_free_extent()
4381 ins->offset = 0; in find_free_extent()
4385 space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); in find_free_extent()
4387 btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); in find_free_extent()
4388 return -ENOSPC; in find_free_extent()
4395 ffe_ctl->search_start = max(ffe_ctl->search_start, in find_free_extent()
4397 ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); in find_free_extent()
4398 if (ffe_ctl->search_start == ffe_ctl->hint_byte) { in find_free_extent()
4400 ffe_ctl->search_start); in find_free_extent()
4405 * However if we are re-searching with an ideal block group in find_free_extent()
4408 if (block_group && block_group_bits(block_group, ffe_ctl->flags) && in find_free_extent()
4409 block_group->cached != BTRFS_CACHE_NO) { in find_free_extent()
4410 down_read(&space_info->groups_sem); in find_free_extent()
4411 if (list_empty(&block_group->list) || in find_free_extent()
4412 block_group->ro) { in find_free_extent()
4420 up_read(&space_info->groups_sem); in find_free_extent()
4422 ffe_ctl->index = btrfs_bg_flags_to_raid_index( in find_free_extent()
4423 block_group->flags); in find_free_extent()
4425 ffe_ctl->delalloc); in find_free_extent()
4426 ffe_ctl->hinted = true; in find_free_extent()
4435 ffe_ctl->have_caching_bg = false; in find_free_extent()
4436 if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || in find_free_extent()
4437 ffe_ctl->index == 0) in find_free_extent()
4439 down_read(&space_info->groups_sem); in find_free_extent()
4441 &space_info->block_groups[ffe_ctl->index], list) { in find_free_extent()
4444 ffe_ctl->hinted = false; in find_free_extent()
4445 /* If the block group is read-only, we can skip it entirely. */ in find_free_extent()
4446 if (unlikely(block_group->ro)) { in find_free_extent()
4447 if (ffe_ctl->for_treelog) in find_free_extent()
4449 if (ffe_ctl->for_data_reloc) in find_free_extent()
4454 btrfs_grab_block_group(block_group, ffe_ctl->delalloc); in find_free_extent()
4455 ffe_ctl->search_start = block_group->start; in find_free_extent()
4462 if (!block_group_bits(block_group, ffe_ctl->flags)) { in find_free_extent()
4473 if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) in find_free_extent()
4481 btrfs_release_block_group(block_group, ffe_ctl->delalloc); in find_free_extent()
4487 ffe_ctl->cached = btrfs_block_group_done(block_group); in find_free_extent()
4488 if (unlikely(!ffe_ctl->cached)) { in find_free_extent()
4489 ffe_ctl->have_caching_bg = true; in find_free_extent()
4508 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { in find_free_extent()
4510 cache_block_group_error = -EIO; in find_free_extent()
4523 btrfs_release_block_group(block_group, ffe_ctl->delalloc); in find_free_extent()
4528 ffe_ctl->search_start = round_up(ffe_ctl->found_offset, in find_free_extent()
4529 fs_info->stripesize); in find_free_extent()
4532 if (ffe_ctl->search_start + ffe_ctl->num_bytes > in find_free_extent()
4533 block_group->start + block_group->length) { in find_free_extent()
4535 ffe_ctl->found_offset, in find_free_extent()
4536 ffe_ctl->num_bytes); in find_free_extent()
4540 if (ffe_ctl->found_offset < ffe_ctl->search_start) in find_free_extent()
4542 ffe_ctl->found_offset, in find_free_extent()
4543 ffe_ctl->search_start - ffe_ctl->found_offset); in find_free_extent()
4545 ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes, in find_free_extent()
4546 ffe_ctl->num_bytes, in find_free_extent()
4547 ffe_ctl->delalloc, in find_free_extent()
4548 ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS); in find_free_extent()
4549 if (ret == -EAGAIN) { in find_free_extent()
4551 ffe_ctl->found_offset, in find_free_extent()
4552 ffe_ctl->num_bytes); in find_free_extent()
4558 ins->objectid = ffe_ctl->search_start; in find_free_extent()
4559 ins->offset = ffe_ctl->num_bytes; in find_free_extent()
4562 btrfs_release_block_group(block_group, ffe_ctl->delalloc); in find_free_extent()
4565 if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && in find_free_extent()
4566 !ffe_ctl->retry_uncached) { in find_free_extent()
4567 ffe_ctl->retry_uncached = true; in find_free_extent()
4569 ffe_ctl->num_bytes + in find_free_extent()
4570 ffe_ctl->empty_cluster + in find_free_extent()
4571 ffe_ctl->empty_size); in find_free_extent()
4574 release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc); in find_free_extent()
4577 up_read(&space_info->groups_sem); in find_free_extent()
4583 if (ret == -ENOSPC && !cache_block_group_error) { in find_free_extent()
4585 * Use ffe_ctl->total_free_space as fallback if we can't find in find_free_extent()
4588 if (!ffe_ctl->max_extent_size) in find_free_extent()
4589 ffe_ctl->max_extent_size = ffe_ctl->total_free_space; in find_free_extent()
4590 spin_lock(&space_info->lock); in find_free_extent()
4591 space_info->max_extent_size = ffe_ctl->max_extent_size; in find_free_extent()
4592 spin_unlock(&space_info->lock); in find_free_extent()
4593 ins->offset = ffe_ctl->max_extent_size; in find_free_extent()
4594 } else if (ret == -ENOSPC) { in find_free_extent()
4604 * @root - The root that will contain this extent
4606 * @ram_bytes - The amount of space in ram that @num_bytes take. This
4610 * @num_bytes - Number of bytes to allocate on-disk.
4612 * @min_alloc_size - Indicates the minimum amount of space that the
4619 * @empty_size - A hint that you plan on doing more COW. This is the
4624 * @hint_byte - Hint to the allocator to start searching above the byte
4627 * @ins - This key is modified to record the found hole. It will
4629 * ins->objectid == start position
4630 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4631 * ins->offset == the size of the hole.
4633 * @is_data - Boolean flag indicating whether an extent is
4636 * @delalloc - Boolean flag indicating whether this allocation is for
4642 * case -ENOSPC is returned then @ins->offset will contain the size of the
4650 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_reserve_extent()
4660 WARN_ON(num_bytes < fs_info->sectorsize); in btrfs_reserve_extent()
4674 btrfs_dec_block_group_reservations(fs_info, ins->objectid); in btrfs_reserve_extent()
4675 } else if (ret == -ENOSPC) { in btrfs_reserve_extent()
4676 if (!final_tried && ins->offset) { in btrfs_reserve_extent()
4677 num_bytes = min(num_bytes >> 1, ins->offset); in btrfs_reserve_extent()
4679 fs_info->sectorsize); in btrfs_reserve_extent()
4690 "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d", in btrfs_reserve_extent()
4710 return -ENOSPC; in btrfs_free_reserved_extent()
4727 cache = btrfs_lookup_block_group(trans->fs_info, eb->start); in btrfs_pin_reserved_extent()
4729 btrfs_err(trans->fs_info, "unable to find block group for %llu", in btrfs_pin_reserved_extent()
4730 eb->start); in btrfs_pin_reserved_extent()
4731 return -ENOSPC; in btrfs_pin_reserved_extent()
4734 ret = pin_down_extent(trans, cache, eb->start, eb->len, 1); in btrfs_pin_reserved_extent()
4742 struct btrfs_fs_info *fs_info = trans->fs_info; in alloc_reserved_extent()
4766 struct btrfs_fs_info *fs_info = trans->fs_info; in alloc_reserved_file_extent()
4771 struct btrfs_extent_inline_ref *iref; in alloc_reserved_file_extent() local
4790 return -ENOMEM; in alloc_reserved_file_extent()
4792 extent_root = btrfs_extent_root(fs_info, ins->objectid); in alloc_reserved_file_extent()
4799 leaf = path->nodes[0]; in alloc_reserved_file_extent()
4800 extent_item = btrfs_item_ptr(leaf, path->slots[0], in alloc_reserved_file_extent()
4803 btrfs_set_extent_generation(leaf, extent_item, trans->transid); in alloc_reserved_file_extent()
4807 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); in alloc_reserved_file_extent()
4809 btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_EXTENT_OWNER_REF_KEY); in alloc_reserved_file_extent()
4810 oref = (struct btrfs_extent_owner_ref *)(&iref->offset); in alloc_reserved_file_extent()
4812 iref = (struct btrfs_extent_inline_ref *)(oref + 1); in alloc_reserved_file_extent()
4814 btrfs_set_extent_inline_ref_type(leaf, iref, type); in alloc_reserved_file_extent()
4818 ref = (struct btrfs_shared_data_ref *)(iref + 1); in alloc_reserved_file_extent()
4819 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); in alloc_reserved_file_extent()
4823 ref = (struct btrfs_extent_data_ref *)(&iref->offset); in alloc_reserved_file_extent()
4830 btrfs_mark_buffer_dirty(trans, path->nodes[0]); in alloc_reserved_file_extent()
4833 return alloc_reserved_extent(trans, ins->objectid, ins->offset); in alloc_reserved_file_extent()
4840 struct btrfs_fs_info *fs_info = trans->fs_info; in alloc_reserved_tree_block()
4846 struct btrfs_extent_inline_ref *iref; in alloc_reserved_tree_block() local
4849 u32 size = sizeof(*extent_item) + sizeof(*iref); in alloc_reserved_tree_block()
4850 const u64 flags = (extent_op ? extent_op->flags_to_set : 0); in alloc_reserved_tree_block()
4851 /* The owner of a tree block is the level. */ in alloc_reserved_tree_block()
4852 int level = btrfs_delayed_ref_owner(node); in alloc_reserved_tree_block() local
4855 extent_key.objectid = node->bytenr; in alloc_reserved_tree_block()
4857 /* The owner of a tree block is the level. */ in alloc_reserved_tree_block()
4858 extent_key.offset = level; in alloc_reserved_tree_block()
4861 extent_key.offset = node->num_bytes; in alloc_reserved_tree_block()
4868 return -ENOMEM; in alloc_reserved_tree_block()
4878 leaf = path->nodes[0]; in alloc_reserved_tree_block()
4879 extent_item = btrfs_item_ptr(leaf, path->slots[0], in alloc_reserved_tree_block()
4882 btrfs_set_extent_generation(leaf, extent_item, trans->transid); in alloc_reserved_tree_block()
4887 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); in alloc_reserved_tree_block()
4890 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); in alloc_reserved_tree_block()
4891 btrfs_set_tree_block_level(leaf, block_info, level); in alloc_reserved_tree_block()
4892 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); in alloc_reserved_tree_block()
4895 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { in alloc_reserved_tree_block()
4896 btrfs_set_extent_inline_ref_type(leaf, iref, in alloc_reserved_tree_block()
4898 btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent); in alloc_reserved_tree_block()
4900 btrfs_set_extent_inline_ref_type(leaf, iref, in alloc_reserved_tree_block()
4902 btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root); in alloc_reserved_tree_block()
4908 return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize); in alloc_reserved_tree_block()
4918 .bytenr = ins->objectid, in btrfs_alloc_reserved_file_extent()
4919 .num_bytes = ins->offset, in btrfs_alloc_reserved_file_extent()
4926 if (btrfs_is_data_reloc_root(root) && is_fstree(root->relocation_src_root)) in btrfs_alloc_reserved_file_extent()
4927 generic_ref.owning_root = root->relocation_src_root; in btrfs_alloc_reserved_file_extent()
4930 btrfs_ref_tree_mod(root->fs_info, &generic_ref); in btrfs_alloc_reserved_file_extent()
4944 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_alloc_logged_file_extent()
4950 .num_bytes = ins->offset, in btrfs_alloc_logged_file_extent()
4951 .generation = trans->transid, in btrfs_alloc_logged_file_extent()
4961 ret = __exclude_logged_extent(fs_info, ins->objectid, in btrfs_alloc_logged_file_extent()
4962 ins->offset); in btrfs_alloc_logged_file_extent()
4967 block_group = btrfs_lookup_block_group(fs_info, ins->objectid); in btrfs_alloc_logged_file_extent()
4969 return -EINVAL; in btrfs_alloc_logged_file_extent()
4971 space_info = block_group->space_info; in btrfs_alloc_logged_file_extent()
4972 spin_lock(&space_info->lock); in btrfs_alloc_logged_file_extent()
4973 spin_lock(&block_group->lock); in btrfs_alloc_logged_file_extent()
4974 space_info->bytes_reserved += ins->offset; in btrfs_alloc_logged_file_extent()
4975 block_group->reserved += ins->offset; in btrfs_alloc_logged_file_extent()
4976 spin_unlock(&block_group->lock); in btrfs_alloc_logged_file_extent()
4977 spin_unlock(&space_info->lock); in btrfs_alloc_logged_file_extent()
4982 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); in btrfs_alloc_logged_file_extent()
4995 if (eb->lock_owner == current->pid) { in check_eb_lock_owner()
4996 btrfs_err_rl(eb->fs_info, in check_eb_lock_owner()
4998 eb->start, btrfs_header_owner(eb), current->pid); in check_eb_lock_owner()
5012 u64 bytenr, int level, u64 owner, in btrfs_init_new_buffer() argument
5015 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_init_new_buffer()
5019 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level); in btrfs_init_new_buffer()
5025 return ERR_PTR(-EUCLEAN); in btrfs_init_new_buffer()
5039 !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) in btrfs_init_new_buffer()
5043 btrfs_set_header_generation(buf, trans->transid); in btrfs_init_new_buffer()
5048 * set to the appropriate level and owner. in btrfs_init_new_buffer()
5050 btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level); in btrfs_init_new_buffer()
5054 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); in btrfs_init_new_buffer()
5055 clear_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &buf->bflags); in btrfs_init_new_buffer()
5060 btrfs_set_header_level(buf, level); in btrfs_init_new_buffer()
5061 btrfs_set_header_bytenr(buf, buf->start); in btrfs_init_new_buffer()
5062 btrfs_set_header_generation(buf, trans->transid); in btrfs_init_new_buffer()
5065 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); in btrfs_init_new_buffer()
5066 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); in btrfs_init_new_buffer()
5068 buf->log_index = root->log_transid % 2; in btrfs_init_new_buffer()
5073 if (buf->log_index == 0) in btrfs_init_new_buffer()
5074 set_extent_bit(&root->dirty_log_pages, buf->start, in btrfs_init_new_buffer()
5075 buf->start + buf->len - 1, in btrfs_init_new_buffer()
5078 set_extent_bit(&root->dirty_log_pages, buf->start, in btrfs_init_new_buffer()
5079 buf->start + buf->len - 1, in btrfs_init_new_buffer()
5082 buf->log_index = -1; in btrfs_init_new_buffer()
5083 set_extent_bit(&trans->transaction->dirty_pages, buf->start, in btrfs_init_new_buffer()
5084 buf->start + buf->len - 1, EXTENT_DIRTY, NULL); in btrfs_init_new_buffer()
5098 int level, u64 hint, in btrfs_alloc_tree_block() argument
5103 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_alloc_tree_block()
5109 u32 blocksize = fs_info->nodesize; in btrfs_alloc_tree_block()
5115 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, in btrfs_alloc_tree_block()
5116 level, root_objectid, nest); in btrfs_alloc_tree_block()
5118 root->alloc_bytenr += blocksize; in btrfs_alloc_tree_block()
5132 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level, in btrfs_alloc_tree_block()
5162 ret = -ENOMEM; in btrfs_alloc_tree_block()
5166 memcpy(&extent_op->key, key, sizeof(extent_op->key)); in btrfs_alloc_tree_block()
5168 memset(&extent_op->key, 0, sizeof(extent_op->key)); in btrfs_alloc_tree_block()
5169 extent_op->flags_to_set = flags; in btrfs_alloc_tree_block()
5170 extent_op->update_key = (skinny_metadata ? false : true); in btrfs_alloc_tree_block()
5171 extent_op->update_flags = (flags != 0); in btrfs_alloc_tree_block()
5176 btrfs_init_tree_ref(&generic_ref, level, btrfs_root_id(root), false); in btrfs_alloc_tree_block()
5203 int level; member
5236 * @refs: the number of refs for wc->level - 1
5237 * @flags: the flags for wc->level - 1
5241 * wc->level should be read and walked into, or if we can simply delete our
5254 int level = wc->level; in visit_node_for_delete() local
5256 ASSERT(level > 0); in visit_node_for_delete()
5257 ASSERT(wc->refs[level - 1] > 0); in visit_node_for_delete()
5263 if (wc->stage == UPDATE_BACKREF) { in visit_node_for_delete()
5264 if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) in visit_node_for_delete()
5273 if (wc->refs[level - 1] == 1) in visit_node_for_delete()
5280 if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) in visit_node_for_delete()
5288 if (!wc->update_ref || generation <= btrfs_root_origin_generation(root)) in visit_node_for_delete()
5296 if (btrfs_comp_cpu_keys(&key, &wc->update_progress) < 0) in visit_node_for_delete()
5308 struct btrfs_fs_info *fs_info = root->fs_info; in reada_walk_down()
5319 if (path->slots[wc->level] < wc->reada_slot) { in reada_walk_down()
5320 wc->reada_count = wc->reada_count * 2 / 3; in reada_walk_down()
5321 wc->reada_count = max(wc->reada_count, 2); in reada_walk_down()
5323 wc->reada_count = wc->reada_count * 3 / 2; in reada_walk_down()
5324 wc->reada_count = min_t(int, wc->reada_count, in reada_walk_down()
5328 eb = path->nodes[wc->level]; in reada_walk_down()
5331 for (slot = path->slots[wc->level]; slot < nritems; slot++) { in reada_walk_down()
5332 if (nread >= wc->reada_count) in reada_walk_down()
5339 if (slot == path->slots[wc->level]) in reada_walk_down()
5342 if (wc->stage == UPDATE_BACKREF && in reada_walk_down()
5348 wc->level - 1, 1, &refs, in reada_walk_down()
5370 wc->reada_slot = slot; in reada_walk_down()
5376 * when wc->stage == UPDATE_BACKREF, this function updates
5386 struct btrfs_fs_info *fs_info = root->fs_info; in walk_down_proc()
5387 int level = wc->level; in walk_down_proc() local
5388 struct extent_buffer *eb = path->nodes[level]; in walk_down_proc()
5392 if (wc->stage == UPDATE_BACKREF && btrfs_header_owner(eb) != btrfs_root_id(root)) in walk_down_proc()
5399 if (wc->lookup_info && in walk_down_proc()
5400 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || in walk_down_proc()
5401 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { in walk_down_proc()
5402 ASSERT(path->locks[level]); in walk_down_proc()
5404 eb->start, level, 1, in walk_down_proc()
5405 &wc->refs[level], in walk_down_proc()
5406 &wc->flags[level], in walk_down_proc()
5410 if (unlikely(wc->refs[level] == 0)) { in walk_down_proc()
5412 eb->start); in walk_down_proc()
5413 return -EUCLEAN; in walk_down_proc()
5417 if (wc->stage == DROP_REFERENCE) { in walk_down_proc()
5418 if (wc->refs[level] > 1) in walk_down_proc()
5421 if (path->locks[level] && !wc->keep_locks) { in walk_down_proc()
5422 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_down_proc()
5423 path->locks[level] = 0; in walk_down_proc()
5428 /* wc->stage == UPDATE_BACKREF */ in walk_down_proc()
5429 if (!(wc->flags[level] & flag)) { in walk_down_proc()
5430 ASSERT(path->locks[level]); in walk_down_proc()
5446 wc->flags[level] |= flag; in walk_down_proc()
5453 if (path->locks[level] && level > 0) { in walk_down_proc()
5454 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_down_proc()
5455 path->locks[level] = 0; in walk_down_proc()
5466 int level) in check_ref_exists() argument
5471 struct btrfs_extent_inline_ref *iref; in check_ref_exists() local
5477 return -ENOMEM; in check_ref_exists()
5479 ret = lookup_extent_backref(trans, path, &iref, bytenr, in check_ref_exists()
5480 root->fs_info->nodesize, parent, in check_ref_exists()
5481 btrfs_root_id(root), level, 0); in check_ref_exists()
5482 if (ret != -ENOENT) { in check_ref_exists()
5485 * return the error if it's not -ENOENT; in check_ref_exists()
5496 delayed_refs = &trans->transaction->delayed_refs; in check_ref_exists()
5497 spin_lock(&delayed_refs->lock); in check_ref_exists()
5498 head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr); in check_ref_exists()
5501 if (!mutex_trylock(&head->mutex)) { in check_ref_exists()
5507 refcount_inc(&head->refs); in check_ref_exists()
5508 spin_unlock(&delayed_refs->lock); in check_ref_exists()
5512 mutex_lock(&head->mutex); in check_ref_exists()
5513 mutex_unlock(&head->mutex); in check_ref_exists()
5518 exists = btrfs_find_delayed_tree_ref(head, root->root_key.objectid, parent); in check_ref_exists()
5519 mutex_unlock(&head->mutex); in check_ref_exists()
5521 spin_unlock(&delayed_refs->lock); in check_ref_exists()
5528 * block we need to drop the lock, read it off of the disk, re-lock it and
5539 int level = wc->level; in check_next_block_uptodate() local
5544 generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]); in check_next_block_uptodate()
5549 check.level = level - 1; in check_next_block_uptodate()
5553 btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, path->slots[level]); in check_next_block_uptodate()
5556 if (level == 1) in check_next_block_uptodate()
5564 wc->lookup_info = 1; in check_next_block_uptodate()
5569 * If we determine that we don't have to visit wc->level - 1 then we need to
5584 .bytenr = next->start, in maybe_drop_reference()
5585 .num_bytes = root->fs_info->nodesize, in maybe_drop_reference()
5589 int level = wc->level; in maybe_drop_reference() local
5593 if (wc->stage == UPDATE_BACKREF) in maybe_drop_reference()
5596 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { in maybe_drop_reference()
5597 ref.parent = path->nodes[level]->start; in maybe_drop_reference()
5599 ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level])); in maybe_drop_reference()
5600 if (btrfs_root_id(root) != btrfs_header_owner(path->nodes[level])) { in maybe_drop_reference()
5601 btrfs_err(root->fs_info, "mismatched block owner"); in maybe_drop_reference()
5602 return -EIO; in maybe_drop_reference()
5610 * ->restarted flag. in maybe_drop_reference()
5612 if (wc->restarted) { in maybe_drop_reference()
5613 ret = check_ref_exists(trans, root, next->start, ref.parent, in maybe_drop_reference()
5614 level - 1); in maybe_drop_reference()
5618 wc->restarted = 0; in maybe_drop_reference()
5627 wc->refs[level - 1] > 1) { in maybe_drop_reference()
5628 u64 generation = btrfs_node_ptr_generation(path->nodes[level], in maybe_drop_reference()
5629 path->slots[level]); in maybe_drop_reference()
5631 ret = btrfs_qgroup_trace_subtree(trans, next, generation, level - 1); in maybe_drop_reference()
5633 btrfs_err_rl(root->fs_info, in maybe_drop_reference()
5645 wc->drop_level = level; in maybe_drop_reference()
5646 find_next_key(path, level, &wc->drop_progress); in maybe_drop_reference()
5648 btrfs_init_tree_ref(&ref, level - 1, 0, false); in maybe_drop_reference()
5655 * when wc->stage == DROP_REFERENCE, this function checks
5658 * rooted at the block, this function changes wc->stage to
5670 struct btrfs_fs_info *fs_info = root->fs_info; in do_walk_down()
5675 int level = wc->level; in do_walk_down() local
5678 generation = btrfs_node_ptr_generation(path->nodes[level], in do_walk_down()
5679 path->slots[level]); in do_walk_down()
5681 * if the lower level block was created before the snapshot in do_walk_down()
5685 if (wc->stage == UPDATE_BACKREF && in do_walk_down()
5687 wc->lookup_info = 1; in do_walk_down()
5691 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); in do_walk_down()
5694 level - 1); in do_walk_down()
5700 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, in do_walk_down()
5701 &wc->refs[level - 1], in do_walk_down()
5702 &wc->flags[level - 1], in do_walk_down()
5707 if (unlikely(wc->refs[level - 1] == 0)) { in do_walk_down()
5710 ret = -EUCLEAN; in do_walk_down()
5713 wc->lookup_info = 0; in do_walk_down()
5716 if (!visit_node_for_delete(root, wc, path->nodes[level], in do_walk_down()
5717 wc->flags[level - 1], path->slots[level])) in do_walk_down()
5725 if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) { in do_walk_down()
5726 wc->stage = UPDATE_BACKREF; in do_walk_down()
5727 wc->shared_level = level - 1; in do_walk_down()
5734 level--; in do_walk_down()
5735 ASSERT(level == btrfs_header_level(next)); in do_walk_down()
5736 if (level != btrfs_header_level(next)) { in do_walk_down()
5737 btrfs_err(root->fs_info, "mismatched level"); in do_walk_down()
5738 ret = -EIO; in do_walk_down()
5741 path->nodes[level] = next; in do_walk_down()
5742 path->slots[level] = 0; in do_walk_down()
5743 path->locks[level] = BTRFS_WRITE_LOCK; in do_walk_down()
5744 wc->level = level; in do_walk_down()
5745 if (wc->level == 1) in do_walk_down()
5746 wc->reada_slot = 0; in do_walk_down()
5752 wc->refs[level - 1] = 0; in do_walk_down()
5753 wc->flags[level - 1] = 0; in do_walk_down()
5754 wc->lookup_info = 1; in do_walk_down()
5767 * when wc->stage == DROP_REFERENCE, this function drops
5770 * when wc->stage == UPDATE_BACKREF, this function changes
5771 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5781 struct btrfs_fs_info *fs_info = root->fs_info; in walk_up_proc()
5783 int level = wc->level; in walk_up_proc() local
5784 struct extent_buffer *eb = path->nodes[level]; in walk_up_proc()
5787 if (wc->stage == UPDATE_BACKREF) { in walk_up_proc()
5788 ASSERT(wc->shared_level >= level); in walk_up_proc()
5789 if (level < wc->shared_level) in walk_up_proc()
5792 ret = find_next_key(path, level + 1, &wc->update_progress); in walk_up_proc()
5794 wc->update_ref = 0; in walk_up_proc()
5796 wc->stage = DROP_REFERENCE; in walk_up_proc()
5797 wc->shared_level = -1; in walk_up_proc()
5798 path->slots[level] = 0; in walk_up_proc()
5805 if (!path->locks[level]) { in walk_up_proc()
5806 ASSERT(level > 0); in walk_up_proc()
5808 path->locks[level] = BTRFS_WRITE_LOCK; in walk_up_proc()
5811 eb->start, level, 1, in walk_up_proc()
5812 &wc->refs[level], in walk_up_proc()
5813 &wc->flags[level], in walk_up_proc()
5816 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_up_proc()
5817 path->locks[level] = 0; in walk_up_proc()
5820 if (unlikely(wc->refs[level] == 0)) { in walk_up_proc()
5821 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_up_proc()
5823 eb->start); in walk_up_proc()
5824 return -EUCLEAN; in walk_up_proc()
5826 if (wc->refs[level] == 1) { in walk_up_proc()
5827 btrfs_tree_unlock_rw(eb, path->locks[level]); in walk_up_proc()
5828 path->locks[level] = 0; in walk_up_proc()
5834 /* wc->stage == DROP_REFERENCE */ in walk_up_proc()
5835 ASSERT(path->locks[level] || wc->refs[level] == 1); in walk_up_proc()
5837 if (wc->refs[level] == 1) { in walk_up_proc()
5838 if (level == 0) { in walk_up_proc()
5839 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) in walk_up_proc()
5857 if (!path->locks[level]) { in walk_up_proc()
5859 path->locks[level] = BTRFS_WRITE_LOCK; in walk_up_proc()
5864 if (eb == root->node) { in walk_up_proc()
5865 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) in walk_up_proc()
5866 parent = eb->start; in walk_up_proc()
5870 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) in walk_up_proc()
5871 parent = path->nodes[level + 1]->start; in walk_up_proc()
5873 btrfs_header_owner(path->nodes[level + 1])) in walk_up_proc()
5878 wc->refs[level] == 1); in walk_up_proc()
5882 wc->refs[level] = 0; in walk_up_proc()
5883 wc->flags[level] = 0; in walk_up_proc()
5889 return -EUCLEAN; in walk_up_proc()
5896 * wc->level. At this point path->nodes[wc->level] should be populated and
5905 * our current path->nodes[wc->level]. For DROP_REFERENCE that means dropping
5918 int level = wc->level; in walk_down_tree() local
5921 wc->lookup_info = 1; in walk_down_tree()
5922 while (level >= 0) { in walk_down_tree()
5927 if (level == 0) in walk_down_tree()
5930 if (path->slots[level] >= in walk_down_tree()
5931 btrfs_header_nritems(path->nodes[level])) in walk_down_tree()
5936 path->slots[level]++; in walk_down_tree()
5940 level = wc->level; in walk_down_tree()
5951 * UPDATE_BACKREF. If we wc->level is currently less than our wc->shared_level
5954 * wc->shared_level. Once we're at or above our wc->shared_level we can switch
5958 * If we're level 0 then we need to btrfs_dec_ref() on all of the data extents
5967 int level = wc->level; in walk_up_tree() local
5970 path->slots[level] = btrfs_header_nritems(path->nodes[level]); in walk_up_tree()
5971 while (level < max_level && path->nodes[level]) { in walk_up_tree()
5972 wc->level = level; in walk_up_tree()
5973 if (path->slots[level] + 1 < in walk_up_tree()
5974 btrfs_header_nritems(path->nodes[level])) { in walk_up_tree()
5975 path->slots[level]++; in walk_up_tree()
5984 if (path->locks[level]) { in walk_up_tree()
5985 btrfs_tree_unlock_rw(path->nodes[level], in walk_up_tree()
5986 path->locks[level]); in walk_up_tree()
5987 path->locks[level] = 0; in walk_up_tree()
5989 free_extent_buffer(path->nodes[level]); in walk_up_tree()
5990 path->nodes[level] = NULL; in walk_up_tree()
5991 level++; in walk_up_tree()
6005 * also make sure backrefs for the shared block and all lower level
6008 * If called with for_reloc == 0, may exit early with -EAGAIN
6013 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_drop_snapshot()
6016 struct btrfs_root *tree_root = fs_info->tree_root; in btrfs_drop_snapshot()
6017 struct btrfs_root_item *root_item = &root->root_item; in btrfs_drop_snapshot()
6022 int level; in btrfs_drop_snapshot() local
6030 ret = -ENOMEM; in btrfs_drop_snapshot()
6037 ret = -ENOMEM; in btrfs_drop_snapshot()
6066 set_bit(BTRFS_ROOT_DELETING, &root->state); in btrfs_drop_snapshot()
6067 unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); in btrfs_drop_snapshot()
6069 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { in btrfs_drop_snapshot()
6070 level = btrfs_header_level(root->node); in btrfs_drop_snapshot()
6071 path->nodes[level] = btrfs_lock_root_node(root); in btrfs_drop_snapshot()
6072 path->slots[level] = 0; in btrfs_drop_snapshot()
6073 path->locks[level] = BTRFS_WRITE_LOCK; in btrfs_drop_snapshot()
6074 memset(&wc->update_progress, 0, in btrfs_drop_snapshot()
6075 sizeof(wc->update_progress)); in btrfs_drop_snapshot()
6077 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); in btrfs_drop_snapshot()
6078 memcpy(&wc->update_progress, &key, in btrfs_drop_snapshot()
6079 sizeof(wc->update_progress)); in btrfs_drop_snapshot()
6081 level = btrfs_root_drop_level(root_item); in btrfs_drop_snapshot()
6082 BUG_ON(level == 0); in btrfs_drop_snapshot()
6083 path->lowest_level = level; in btrfs_drop_snapshot()
6085 path->lowest_level = 0; in btrfs_drop_snapshot()
6098 level = btrfs_header_level(root->node); in btrfs_drop_snapshot()
6100 btrfs_tree_lock(path->nodes[level]); in btrfs_drop_snapshot()
6101 path->locks[level] = BTRFS_WRITE_LOCK; in btrfs_drop_snapshot()
6108 path->nodes[level]->start, in btrfs_drop_snapshot()
6109 level, 1, &wc->refs[level], in btrfs_drop_snapshot()
6110 &wc->flags[level], NULL); in btrfs_drop_snapshot()
6114 BUG_ON(wc->refs[level] == 0); in btrfs_drop_snapshot()
6116 if (level == btrfs_root_drop_level(root_item)) in btrfs_drop_snapshot()
6119 btrfs_tree_unlock(path->nodes[level]); in btrfs_drop_snapshot()
6120 path->locks[level] = 0; in btrfs_drop_snapshot()
6121 WARN_ON(wc->refs[level] != 1); in btrfs_drop_snapshot()
6122 level--; in btrfs_drop_snapshot()
6126 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); in btrfs_drop_snapshot()
6127 wc->level = level; in btrfs_drop_snapshot()
6128 wc->shared_level = -1; in btrfs_drop_snapshot()
6129 wc->stage = DROP_REFERENCE; in btrfs_drop_snapshot()
6130 wc->update_ref = update_ref; in btrfs_drop_snapshot()
6131 wc->keep_locks = 0; in btrfs_drop_snapshot()
6132 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); in btrfs_drop_snapshot()
6149 BUG_ON(wc->stage != DROP_REFERENCE); in btrfs_drop_snapshot()
6154 if (wc->stage == DROP_REFERENCE) { in btrfs_drop_snapshot()
6155 wc->drop_level = wc->level; in btrfs_drop_snapshot()
6156 btrfs_node_key_to_cpu(path->nodes[wc->drop_level], in btrfs_drop_snapshot()
6157 &wc->drop_progress, in btrfs_drop_snapshot()
6158 path->slots[wc->drop_level]); in btrfs_drop_snapshot()
6160 btrfs_cpu_key_to_disk(&root_item->drop_progress, in btrfs_drop_snapshot()
6161 &wc->drop_progress); in btrfs_drop_snapshot()
6162 btrfs_set_root_drop_level(root_item, wc->drop_level); in btrfs_drop_snapshot()
6164 BUG_ON(wc->level == 0); in btrfs_drop_snapshot()
6168 &root->root_key, in btrfs_drop_snapshot()
6176 btrfs_set_last_root_drop_gen(fs_info, trans->transid); in btrfs_drop_snapshot()
6182 ret = -EAGAIN; in btrfs_drop_snapshot()
6205 ret = btrfs_del_root(trans, &root->root_key); in btrfs_drop_snapshot()
6212 ret = btrfs_find_root(tree_root, &root->root_key, path, in btrfs_drop_snapshot()
6223 * The most common failure here is just -ENOENT. in btrfs_drop_snapshot()
6237 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) in btrfs_drop_snapshot()
6244 btrfs_set_last_root_drop_gen(fs_info, trans->transid); in btrfs_drop_snapshot()
6289 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_drop_subtree()
6292 int level; in btrfs_drop_subtree() local
6300 return -ENOMEM; in btrfs_drop_subtree()
6305 return -ENOMEM; in btrfs_drop_subtree()
6310 atomic_inc(&parent->refs); in btrfs_drop_subtree()
6311 path->nodes[parent_level] = parent; in btrfs_drop_subtree()
6312 path->slots[parent_level] = btrfs_header_nritems(parent); in btrfs_drop_subtree()
6315 level = btrfs_header_level(node); in btrfs_drop_subtree()
6316 path->nodes[level] = node; in btrfs_drop_subtree()
6317 path->slots[level] = 0; in btrfs_drop_subtree()
6318 path->locks[level] = BTRFS_WRITE_LOCK; in btrfs_drop_subtree()
6320 wc->refs[parent_level] = 1; in btrfs_drop_subtree()
6321 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; in btrfs_drop_subtree()
6322 wc->level = level; in btrfs_drop_subtree()
6323 wc->shared_level = -1; in btrfs_drop_subtree()
6324 wc->stage = DROP_REFERENCE; in btrfs_drop_subtree()
6325 wc->update_ref = 0; in btrfs_drop_subtree()
6326 wc->keep_locks = 1; in btrfs_drop_subtree()
6327 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); in btrfs_drop_subtree()
6384 if (!bdev_max_discard_sectors(device->bdev)) in btrfs_trim_free_extents()
6388 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) in btrfs_trim_free_extents()
6392 if (device->total_bytes <= device->bytes_used) in btrfs_trim_free_extents()
6398 struct btrfs_fs_info *fs_info = device->fs_info; in btrfs_trim_free_extents()
6401 ret = mutex_lock_interruptible(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
6405 find_first_clear_extent_bit(&device->alloc_state, start, in btrfs_trim_free_extents()
6410 if (start > device->total_bytes) { in btrfs_trim_free_extents()
6414 start, end - start + 1, in btrfs_trim_free_extents()
6416 device->total_bytes); in btrfs_trim_free_extents()
6417 mutex_unlock(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
6427 * end of the device it will set end to -1, in this case it's up in btrfs_trim_free_extents()
6430 end = min(end, device->total_bytes - 1); in btrfs_trim_free_extents()
6432 len = end - start + 1; in btrfs_trim_free_extents()
6436 mutex_unlock(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
6441 ret = btrfs_issue_discard(device->bdev, start, len, in btrfs_trim_free_extents()
6444 set_extent_bit(&device->alloc_state, start, in btrfs_trim_free_extents()
6445 start + bytes - 1, CHUNK_TRIMMED, NULL); in btrfs_trim_free_extents()
6446 mutex_unlock(&fs_info->chunk_mutex); in btrfs_trim_free_extents()
6455 ret = -ERESTARTSYS; in btrfs_trim_free_extents()
6476 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; in btrfs_trim_fs()
6490 if (range->start == U64_MAX) in btrfs_trim_fs()
6491 return -EINVAL; in btrfs_trim_fs()
6494 * Check range overflow if range->len is set. in btrfs_trim_fs()
6495 * The default range->len is U64_MAX. in btrfs_trim_fs()
6497 if (range->len != U64_MAX && in btrfs_trim_fs()
6498 check_add_overflow(range->start, range->len, &range_end)) in btrfs_trim_fs()
6499 return -EINVAL; in btrfs_trim_fs()
6501 cache = btrfs_lookup_first_block_group(fs_info, range->start); in btrfs_trim_fs()
6503 if (cache->start >= range_end) { in btrfs_trim_fs()
6508 start = max(range->start, cache->start); in btrfs_trim_fs()
6509 end = min(range_end, cache->start + cache->length); in btrfs_trim_fs()
6511 if (end - start >= range->minlen) { in btrfs_trim_fs()
6524 range->minlen); in btrfs_trim_fs()
6540 mutex_lock(&fs_devices->device_list_mutex); in btrfs_trim_fs()
6541 list_for_each_entry(device, &fs_devices->devices, dev_list) { in btrfs_trim_fs()
6542 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) in btrfs_trim_fs()
6554 mutex_unlock(&fs_devices->device_list_mutex); in btrfs_trim_fs()
6560 range->len = trimmed; in btrfs_trim_fs()