Lines Matching +full:reference +full:- +full:div +full:- +full:factor

1 // SPDX-License-Identifier: GPL-2.0
7 #include "block-group.h"
8 #include "space-info.h"
9 #include "disk-io.h"
10 #include "free-space-cache.h"
11 #include "free-space-tree.h"
14 #include "ref-verify.h"
16 #include "tree-log.h"
17 #include "delalloc-space.h"
23 #include "extent-tree.h"
28 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_should_fragment_free_space()
31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_should_fragment_free_space()
33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); in btrfs_should_fragment_free_space()
45 const struct btrfs_balance_control *bctl = fs_info->balance_ctl; in get_restripe_target()
52 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
53 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; in get_restripe_target()
55 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
56 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; in get_restripe_target()
58 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
59 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; in get_restripe_target()
74 u64 num_devices = fs_info->fs_devices->rw_devices; in btrfs_reduce_alloc_profile()
83 spin_lock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
86 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
89 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
98 /* Select the highest-redundancy RAID level. */ in btrfs_reduce_alloc_profile()
128 seq = read_seqbegin(&fs_info->profiles_lock); in btrfs_get_alloc_profile()
131 flags |= fs_info->avail_data_alloc_bits; in btrfs_get_alloc_profile()
133 flags |= fs_info->avail_system_alloc_bits; in btrfs_get_alloc_profile()
135 flags |= fs_info->avail_metadata_alloc_bits; in btrfs_get_alloc_profile()
136 } while (read_seqretry(&fs_info->profiles_lock, seq)); in btrfs_get_alloc_profile()
143 refcount_inc(&cache->refs); in btrfs_get_block_group()
148 if (refcount_dec_and_test(&cache->refs)) { in btrfs_put_block_group()
149 WARN_ON(cache->pinned > 0); in btrfs_put_block_group()
157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_put_block_group()
158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) in btrfs_put_block_group()
159 WARN_ON(cache->reserved > 0); in btrfs_put_block_group()
166 if (WARN_ON(!list_empty(&cache->discard_list))) in btrfs_put_block_group()
167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, in btrfs_put_block_group()
170 kfree(cache->free_space_ctl); in btrfs_put_block_group()
171 btrfs_free_chunk_map(cache->physical_map); in btrfs_put_block_group()
187 ASSERT(block_group->length != 0); in btrfs_add_block_group_cache()
189 write_lock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
190 p = &info->block_group_cache_tree.rb_root.rb_node; in btrfs_add_block_group_cache()
195 if (block_group->start < cache->start) { in btrfs_add_block_group_cache()
196 p = &(*p)->rb_left; in btrfs_add_block_group_cache()
197 } else if (block_group->start > cache->start) { in btrfs_add_block_group_cache()
198 p = &(*p)->rb_right; in btrfs_add_block_group_cache()
201 write_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
202 return -EEXIST; in btrfs_add_block_group_cache()
206 rb_link_node(&block_group->cache_node, parent, p); in btrfs_add_block_group_cache()
207 rb_insert_color_cached(&block_group->cache_node, in btrfs_add_block_group_cache()
208 &info->block_group_cache_tree, leftmost); in btrfs_add_block_group_cache()
210 write_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
226 read_lock(&info->block_group_cache_lock); in block_group_cache_tree_search()
227 n = info->block_group_cache_tree.rb_root.rb_node; in block_group_cache_tree_search()
231 end = cache->start + cache->length - 1; in block_group_cache_tree_search()
232 start = cache->start; in block_group_cache_tree_search()
235 if (!contains && (!ret || start < ret->start)) in block_group_cache_tree_search()
237 n = n->rb_left; in block_group_cache_tree_search()
243 n = n->rb_right; in block_group_cache_tree_search()
251 read_unlock(&info->block_group_cache_lock); in block_group_cache_tree_search()
277 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_next_block_group()
280 read_lock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
283 if (RB_EMPTY_NODE(&cache->cache_node)) { in btrfs_next_block_group()
284 const u64 next_bytenr = cache->start + cache->length; in btrfs_next_block_group()
286 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
290 node = rb_next(&cache->cache_node); in btrfs_next_block_group()
297 read_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
309 * as the block group exists and it's currently not in read-only mode.
311 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
326 spin_lock(&bg->lock); in btrfs_inc_nocow_writers()
327 if (bg->ro) in btrfs_inc_nocow_writers()
330 atomic_inc(&bg->nocow_writers); in btrfs_inc_nocow_writers()
331 spin_unlock(&bg->lock); in btrfs_inc_nocow_writers()
351 * to use it, then it should get a reference on it before calling this function.
355 if (atomic_dec_and_test(&bg->nocow_writers)) in btrfs_dec_nocow_writers()
356 wake_up_var(&bg->nocow_writers); in btrfs_dec_nocow_writers()
364 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); in btrfs_wait_nocow_writers()
374 if (atomic_dec_and_test(&bg->reservations)) in btrfs_dec_block_group_reservations()
375 wake_up_var(&bg->reservations); in btrfs_dec_block_group_reservations()
381 struct btrfs_space_info *space_info = bg->space_info; in btrfs_wait_block_group_reservations()
383 ASSERT(bg->ro); in btrfs_wait_block_group_reservations()
385 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) in btrfs_wait_block_group_reservations()
398 down_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
399 up_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
401 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); in btrfs_wait_block_group_reservations()
409 spin_lock(&cache->lock); in btrfs_get_caching_control()
410 if (!cache->caching_ctl) { in btrfs_get_caching_control()
411 spin_unlock(&cache->lock); in btrfs_get_caching_control()
415 ctl = cache->caching_ctl; in btrfs_get_caching_control()
416 refcount_inc(&ctl->count); in btrfs_get_caching_control()
417 spin_unlock(&cache->lock); in btrfs_get_caching_control()
423 if (refcount_dec_and_test(&ctl->count)) in btrfs_put_caching_control()
437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
456 progress = atomic_read(&caching_ctl->progress); in btrfs_wait_block_group_cache_progress()
458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || in btrfs_wait_block_group_cache_progress()
459 (progress != atomic_read(&caching_ctl->progress) && in btrfs_wait_block_group_cache_progress()
460 (cache->free_space_ctl->free_space >= num_bytes))); in btrfs_wait_block_group_cache_progress()
468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); in btrfs_caching_ctl_wait_done()
469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; in btrfs_caching_ctl_wait_done()
479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; in btrfs_wait_block_group_cache_done()
488 struct btrfs_fs_info *fs_info = block_group->fs_info; in fragment_free_space()
489 u64 start = block_group->start; in fragment_free_space()
490 u64 len = block_group->length; in fragment_free_space()
491 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? in fragment_free_space()
492 fs_info->nodesize : fs_info->sectorsize; in fragment_free_space()
501 len -= step; in fragment_free_space()
522 struct btrfs_fs_info *info = block_group->fs_info; in btrfs_add_new_free_space()
530 if (!find_first_extent_bit(&info->excluded_extents, start, in btrfs_add_new_free_space()
539 size = extent_start - start; in btrfs_add_new_free_space()
553 size = end - start; in btrfs_add_new_free_space()
573 * Pre-conditions on indices:
585 struct btrfs_fs_info *fs_info = block_group->fs_info; in sample_block_group_extent_item()
588 u64 search_end = block_group->start + block_group->length; in sample_block_group_extent_item()
596 lockdep_assert_held(&caching_ctl->mutex); in sample_block_group_extent_item()
597 lockdep_assert_held_read(&fs_info->commit_root_sem); in sample_block_group_extent_item()
601 return -ENOMEM; in sample_block_group_extent_item()
603 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, in sample_block_group_extent_item()
606 path->skip_locking = 1; in sample_block_group_extent_item()
607 path->search_commit_root = 1; in sample_block_group_extent_item()
608 path->reada = READA_FORWARD; in sample_block_group_extent_item()
610 search_offset = index * div_u64(block_group->length, max_index); in sample_block_group_extent_item()
611 search_key.objectid = block_group->start + search_offset; in sample_block_group_extent_item()
617 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && in sample_block_group_extent_item()
618 found_key->objectid >= block_group->start && in sample_block_group_extent_item()
619 found_key->objectid + found_key->offset <= search_end) in sample_block_group_extent_item()
623 if (found_key->objectid >= search_end) { in sample_block_group_extent_item()
629 lockdep_assert_held(&caching_ctl->mutex); in sample_block_group_extent_item()
630 lockdep_assert_held_read(&fs_info->commit_root_sem); in sample_block_group_extent_item()
672 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_block_group_size_class()
675 u64 min_size = block_group->length; in load_block_group_size_class()
682 lockdep_assert_held(&caching_ctl->mutex); in load_block_group_size_class()
683 lockdep_assert_held_read(&fs_info->commit_root_sem); in load_block_group_size_class()
694 spin_lock(&block_group->lock); in load_block_group_size_class()
695 block_group->size_class = size_class; in load_block_group_size_class()
696 spin_unlock(&block_group->lock); in load_block_group_size_class()
704 struct btrfs_block_group *block_group = caching_ctl->block_group; in load_extent_tree_free()
705 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_extent_tree_free()
718 return -ENOMEM; in load_extent_tree_free()
720 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); in load_extent_tree_free()
736 * root, since its read-only in load_extent_tree_free()
738 path->skip_locking = 1; in load_extent_tree_free()
739 path->search_commit_root = 1; in load_extent_tree_free()
740 path->reada = READA_FORWARD; in load_extent_tree_free()
751 leaf = path->nodes[0]; in load_extent_tree_free()
756 last = (u64)-1; in load_extent_tree_free()
760 if (path->slots[0] < nritems) { in load_extent_tree_free()
761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in load_extent_tree_free()
768 rwsem_is_contended(&fs_info->commit_root_sem)) { in load_extent_tree_free()
770 up_read(&fs_info->commit_root_sem); in load_extent_tree_free()
771 mutex_unlock(&caching_ctl->mutex); in load_extent_tree_free()
773 mutex_lock(&caching_ctl->mutex); in load_extent_tree_free()
774 down_read(&fs_info->commit_root_sem); in load_extent_tree_free()
783 leaf = path->nodes[0]; in load_extent_tree_free()
796 if (key.objectid < block_group->start) { in load_extent_tree_free()
797 path->slots[0]++; in load_extent_tree_free()
801 if (key.objectid >= block_group->start + block_group->length) in load_extent_tree_free()
815 fs_info->nodesize; in load_extent_tree_free()
822 atomic_inc(&caching_ctl->progress); in load_extent_tree_free()
823 wake_up(&caching_ctl->wait); in load_extent_tree_free()
827 path->slots[0]++; in load_extent_tree_free()
831 block_group->start + block_group->length, in load_extent_tree_free()
840 clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, in btrfs_free_excluded_extents()
841 bg->start + bg->length - 1, EXTENT_UPTODATE); in btrfs_free_excluded_extents()
852 block_group = caching_ctl->block_group; in caching_thread()
853 fs_info = block_group->fs_info; in caching_thread()
855 mutex_lock(&caching_ctl->mutex); in caching_thread()
856 down_read(&fs_info->commit_root_sem); in caching_thread()
870 spin_lock(&block_group->lock); in caching_thread()
871 block_group->cached = BTRFS_CACHE_STARTED; in caching_thread()
872 spin_unlock(&block_group->lock); in caching_thread()
873 wake_up(&caching_ctl->wait); in caching_thread()
884 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) in caching_thread()
889 spin_lock(&block_group->lock); in caching_thread()
890 block_group->caching_ctl = NULL; in caching_thread()
891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; in caching_thread()
892 spin_unlock(&block_group->lock); in caching_thread()
898 spin_lock(&block_group->space_info->lock); in caching_thread()
899 spin_lock(&block_group->lock); in caching_thread()
900 bytes_used = block_group->length - block_group->used; in caching_thread()
901 block_group->space_info->bytes_used += bytes_used >> 1; in caching_thread()
902 spin_unlock(&block_group->lock); in caching_thread()
903 spin_unlock(&block_group->space_info->lock); in caching_thread()
908 up_read(&fs_info->commit_root_sem); in caching_thread()
910 mutex_unlock(&caching_ctl->mutex); in caching_thread()
912 wake_up(&caching_ctl->wait); in caching_thread()
920 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_cache_block_group()
930 return -ENOMEM; in btrfs_cache_block_group()
932 INIT_LIST_HEAD(&caching_ctl->list); in btrfs_cache_block_group()
933 mutex_init(&caching_ctl->mutex); in btrfs_cache_block_group()
934 init_waitqueue_head(&caching_ctl->wait); in btrfs_cache_block_group()
935 caching_ctl->block_group = cache; in btrfs_cache_block_group()
936 refcount_set(&caching_ctl->count, 2); in btrfs_cache_block_group()
937 atomic_set(&caching_ctl->progress, 0); in btrfs_cache_block_group()
938 btrfs_init_work(&caching_ctl->work, caching_thread, NULL); in btrfs_cache_block_group()
940 spin_lock(&cache->lock); in btrfs_cache_block_group()
941 if (cache->cached != BTRFS_CACHE_NO) { in btrfs_cache_block_group()
944 caching_ctl = cache->caching_ctl; in btrfs_cache_block_group()
946 refcount_inc(&caching_ctl->count); in btrfs_cache_block_group()
947 spin_unlock(&cache->lock); in btrfs_cache_block_group()
950 WARN_ON(cache->caching_ctl); in btrfs_cache_block_group()
951 cache->caching_ctl = caching_ctl; in btrfs_cache_block_group()
952 cache->cached = BTRFS_CACHE_STARTED; in btrfs_cache_block_group()
953 spin_unlock(&cache->lock); in btrfs_cache_block_group()
955 write_lock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
956 refcount_inc(&caching_ctl->count); in btrfs_cache_block_group()
957 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); in btrfs_cache_block_group()
958 write_unlock(&fs_info->block_group_cache_lock); in btrfs_cache_block_group()
962 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); in btrfs_cache_block_group()
977 write_seqlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
979 fs_info->avail_data_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
981 fs_info->avail_metadata_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
983 fs_info->avail_system_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
984 write_sequnlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
990 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
993 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
1003 struct list_head *head = &fs_info->space_info; in clear_incompat_bg_bits()
1007 down_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
1008 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) in clear_incompat_bg_bits()
1010 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) in clear_incompat_bg_bits()
1012 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) in clear_incompat_bg_bits()
1014 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) in clear_incompat_bg_bits()
1016 up_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
1028 return fs_info->block_group_root; in btrfs_block_group_root()
1036 struct btrfs_fs_info *fs_info = trans->fs_info; in remove_block_group_item()
1042 key.objectid = block_group->start; in remove_block_group_item()
1044 key.offset = block_group->length; in remove_block_group_item()
1046 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in remove_block_group_item()
1048 ret = -ENOENT; in remove_block_group_item()
1059 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_remove_block_group()
1067 int factor; in btrfs_remove_block_group() local
1072 block_group = btrfs_lookup_block_group(fs_info, map->start); in btrfs_remove_block_group()
1074 return -ENOENT; in btrfs_remove_block_group()
1076 BUG_ON(!block_group->ro); in btrfs_remove_block_group()
1084 btrfs_free_ref_tree_range(fs_info, block_group->start, in btrfs_remove_block_group()
1085 block_group->length); in btrfs_remove_block_group()
1087 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_remove_block_group()
1088 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_remove_block_group()
1091 cluster = &fs_info->data_alloc_cluster; in btrfs_remove_block_group()
1092 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
1094 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
1100 cluster = &fs_info->meta_alloc_cluster; in btrfs_remove_block_group()
1101 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
1103 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
1110 ret = -ENOMEM; in btrfs_remove_block_group()
1120 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
1125 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1126 if (!list_empty(&block_group->io_list)) { in btrfs_remove_block_group()
1127 list_del_init(&block_group->io_list); in btrfs_remove_block_group()
1129 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); in btrfs_remove_block_group()
1131 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1134 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1137 if (!list_empty(&block_group->dirty_list)) { in btrfs_remove_block_group()
1138 list_del_init(&block_group->dirty_list); in btrfs_remove_block_group()
1142 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1143 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
1149 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1150 rb_erase_cached(&block_group->cache_node, in btrfs_remove_block_group()
1151 &fs_info->block_group_cache_tree); in btrfs_remove_block_group()
1152 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_remove_block_group()
1157 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1159 down_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1164 list_del_init(&block_group->list); in btrfs_remove_block_group()
1165 if (list_empty(&block_group->space_info->block_groups[index])) { in btrfs_remove_block_group()
1166 kobj = block_group->space_info->block_group_kobjs[index]; in btrfs_remove_block_group()
1167 block_group->space_info->block_group_kobjs[index] = NULL; in btrfs_remove_block_group()
1168 clear_avail_alloc_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1170 up_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1171 clear_incompat_bg_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1177 if (block_group->cached == BTRFS_CACHE_STARTED) in btrfs_remove_block_group()
1180 write_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1185 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { in btrfs_remove_block_group()
1186 if (ctl->block_group == block_group) { in btrfs_remove_block_group()
1188 refcount_inc(&caching_ctl->count); in btrfs_remove_block_group()
1194 list_del_init(&caching_ctl->list); in btrfs_remove_block_group()
1195 write_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1203 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1204 WARN_ON(!list_empty(&block_group->dirty_list)); in btrfs_remove_block_group()
1205 WARN_ON(!list_empty(&block_group->io_list)); in btrfs_remove_block_group()
1206 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1210 spin_lock(&block_group->space_info->lock); in btrfs_remove_block_group()
1211 list_del_init(&block_group->ro_list); in btrfs_remove_block_group()
1214 WARN_ON(block_group->space_info->total_bytes in btrfs_remove_block_group()
1215 < block_group->length); in btrfs_remove_block_group()
1216 WARN_ON(block_group->space_info->bytes_readonly in btrfs_remove_block_group()
1217 < block_group->length - block_group->zone_unusable); in btrfs_remove_block_group()
1218 WARN_ON(block_group->space_info->bytes_zone_unusable in btrfs_remove_block_group()
1219 < block_group->zone_unusable); in btrfs_remove_block_group()
1220 WARN_ON(block_group->space_info->disk_total in btrfs_remove_block_group()
1221 < block_group->length * factor); in btrfs_remove_block_group()
1223 block_group->space_info->total_bytes -= block_group->length; in btrfs_remove_block_group()
1224 block_group->space_info->bytes_readonly -= in btrfs_remove_block_group()
1225 (block_group->length - block_group->zone_unusable); in btrfs_remove_block_group()
1226 btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info, in btrfs_remove_block_group()
1227 -block_group->zone_unusable); in btrfs_remove_block_group()
1228 block_group->space_info->disk_total -= block_group->length * factor; in btrfs_remove_block_group()
1230 spin_unlock(&block_group->space_info->lock); in btrfs_remove_block_group()
1237 * allocating a new block group - the unfreeze task ends up removing in btrfs_remove_block_group()
1241 * item key (and failing with -EEXIST and a transaction abort). in btrfs_remove_block_group()
1251 spin_lock(&block_group->lock); in btrfs_remove_block_group()
1252 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); in btrfs_remove_block_group()
1257 * fs_info->block_group_cache_tree so no one can't find it anymore and in btrfs_remove_block_group()
1259 * from the rbtree, they have already incremented block_group->frozen - in btrfs_remove_block_group()
1264 * And we must not remove the chunk map from the fs_info->mapping_tree in btrfs_remove_block_group()
1276 * is mounted with -odiscard. The same protections must remain in btrfs_remove_block_group()
1280 remove_map = (atomic_read(&block_group->frozen) == 0); in btrfs_remove_block_group()
1281 spin_unlock(&block_group->lock); in btrfs_remove_block_group()
1287 /* Once for the lookup reference */ in btrfs_remove_block_group()
1304 ASSERT(map->start == chunk_offset); in btrfs_start_trans_remove_block_group()
1325 num_items = 3 + map->num_stripes; in btrfs_start_trans_remove_block_group()
1332 * Mark block group @cache read-only, so later write won't happen to block
1346 struct btrfs_space_info *sinfo = cache->space_info; in inc_block_group_ro()
1348 int ret = -ENOSPC; in inc_block_group_ro()
1350 spin_lock(&sinfo->lock); in inc_block_group_ro()
1351 spin_lock(&cache->lock); in inc_block_group_ro()
1353 if (cache->swap_extents) { in inc_block_group_ro()
1354 ret = -ETXTBSY; in inc_block_group_ro()
1358 if (cache->ro) { in inc_block_group_ro()
1359 cache->ro++; in inc_block_group_ro()
1364 num_bytes = cache->length - cache->reserved - cache->pinned - in inc_block_group_ro()
1365 cache->bytes_super - cache->zone_unusable - cache->used; in inc_block_group_ro()
1373 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { in inc_block_group_ro()
1380 if (sinfo_used + num_bytes <= sinfo->total_bytes) in inc_block_group_ro()
1389 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, in inc_block_group_ro()
1395 sinfo->bytes_readonly += num_bytes; in inc_block_group_ro()
1396 if (btrfs_is_zoned(cache->fs_info)) { in inc_block_group_ro()
1398 sinfo->bytes_readonly += cache->zone_unusable; in inc_block_group_ro()
1399 btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo, in inc_block_group_ro()
1400 -cache->zone_unusable); in inc_block_group_ro()
1401 cache->zone_unusable = 0; in inc_block_group_ro()
1403 cache->ro++; in inc_block_group_ro()
1404 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); in inc_block_group_ro()
1407 spin_unlock(&cache->lock); in inc_block_group_ro()
1408 spin_unlock(&sinfo->lock); in inc_block_group_ro()
1409 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { in inc_block_group_ro()
1410 btrfs_info(cache->fs_info, in inc_block_group_ro()
1411 "unable to make block group %llu ro", cache->start); in inc_block_group_ro()
1412 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); in inc_block_group_ro()
1420 struct btrfs_fs_info *fs_info = trans->fs_info; in clean_pinned_extents()
1422 const u64 start = bg->start; in clean_pinned_extents()
1423 const u64 end = start + bg->length - 1; in clean_pinned_extents()
1426 spin_lock(&fs_info->trans_lock); in clean_pinned_extents()
1427 if (trans->transaction->list.prev != &fs_info->trans_list) { in clean_pinned_extents()
1428 prev_trans = list_last_entry(&trans->transaction->list, in clean_pinned_extents()
1430 refcount_inc(&prev_trans->use_count); in clean_pinned_extents()
1432 spin_unlock(&fs_info->trans_lock); in clean_pinned_extents()
1438 * transaction N - 1, and have seen a range belonging to the block in clean_pinned_extents()
1444 mutex_lock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1446 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, in clean_pinned_extents()
1452 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, in clean_pinned_extents()
1455 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1475 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_delete_unused_bgs()
1485 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) in btrfs_delete_unused_bgs()
1488 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1489 while (!list_empty(&fs_info->unused_bgs)) { in btrfs_delete_unused_bgs()
1493 block_group = list_first_entry(&fs_info->unused_bgs, in btrfs_delete_unused_bgs()
1496 list_del_init(&block_group->bg_list); in btrfs_delete_unused_bgs()
1498 space_info = block_group->space_info; in btrfs_delete_unused_bgs()
1504 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1506 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); in btrfs_delete_unused_bgs()
1509 down_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1519 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1521 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1526 spin_lock(&space_info->lock); in btrfs_delete_unused_bgs()
1527 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1528 if (btrfs_is_block_group_used(block_group) || block_group->ro || in btrfs_delete_unused_bgs()
1529 list_is_singular(&block_group->list)) { in btrfs_delete_unused_bgs()
1538 * information from fs_info->avail_*_alloc_bits and the in btrfs_delete_unused_bgs()
1541 * fs_info->avail_*_alloc_bits would be 0. in btrfs_delete_unused_bgs()
1544 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1545 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1546 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1553 * space_info->bytes_may_use was incremented by a task but no in btrfs_delete_unused_bgs()
1563 * space_info - if that's the case, then it means we have tasks in btrfs_delete_unused_bgs()
1570 if (space_info->total_bytes - block_group->length < used && in btrfs_delete_unused_bgs()
1571 block_group->zone_unusable < block_group->length) { in btrfs_delete_unused_bgs()
1573 * Add a reference for the list, compensate for the ref in btrfs_delete_unused_bgs()
1575 * fs_info->unused_bgs list. in btrfs_delete_unused_bgs()
1578 list_add_tail(&block_group->bg_list, &retry_list); in btrfs_delete_unused_bgs()
1581 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1582 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1583 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1587 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1588 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1592 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1601 if (ret == -EAGAIN) in btrfs_delete_unused_bgs()
1611 block_group->start); in btrfs_delete_unused_bgs()
1634 spin_lock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1635 if (!list_empty(&block_group->discard_list)) { in btrfs_delete_unused_bgs()
1636 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1638 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1642 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1645 spin_lock(&space_info->lock); in btrfs_delete_unused_bgs()
1646 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1649 -block_group->pinned); in btrfs_delete_unused_bgs()
1650 space_info->bytes_readonly += block_group->pinned; in btrfs_delete_unused_bgs()
1651 block_group->pinned = 0; in btrfs_delete_unused_bgs()
1653 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1654 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1668 * need to reset sequential-required zones. in btrfs_delete_unused_bgs()
1681 ret = btrfs_remove_chunk(trans, block_group->start); in btrfs_delete_unused_bgs()
1690 * If we're not mounted with -odiscard, we can just forget in btrfs_delete_unused_bgs()
1695 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1698 * fs_info->unused_bgs, so use a list_move operation in btrfs_delete_unused_bgs()
1701 list_move(&block_group->bg_list, in btrfs_delete_unused_bgs()
1702 &trans->transaction->deleted_bgs); in btrfs_delete_unused_bgs()
1703 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1710 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1712 list_splice_tail(&retry_list, &fs_info->unused_bgs); in btrfs_delete_unused_bgs()
1713 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1714 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1719 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1720 list_splice_tail(&retry_list, &fs_info->unused_bgs); in btrfs_delete_unused_bgs()
1721 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1722 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_delete_unused_bgs()
1729 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_unused()
1731 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1732 if (list_empty(&bg->bg_list)) { in btrfs_mark_bg_unused()
1735 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1736 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { in btrfs_mark_bg_unused()
1739 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1741 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1756 return bg1->used > bg2->used; in reclaim_bgs_cmp()
1768 const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info); in should_reclaim_block_group()
1769 u64 thresh_bytes = mult_perc(bg->length, thresh_pct); in should_reclaim_block_group()
1770 const u64 new_val = bg->used; in should_reclaim_block_group()
1795 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_reclaim_bgs_work()
1804 sb_start_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1807 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1815 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { in btrfs_reclaim_bgs_work()
1817 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1821 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1827 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); in btrfs_reclaim_bgs_work()
1828 while (!list_empty(&fs_info->reclaim_bgs)) { in btrfs_reclaim_bgs_work()
1833 bg = list_first_entry(&fs_info->reclaim_bgs, in btrfs_reclaim_bgs_work()
1836 list_del_init(&bg->bg_list); in btrfs_reclaim_bgs_work()
1838 space_info = bg->space_info; in btrfs_reclaim_bgs_work()
1839 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1842 down_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1844 spin_lock(&space_info->lock); in btrfs_reclaim_bgs_work()
1845 spin_lock(&bg->lock); in btrfs_reclaim_bgs_work()
1846 if (bg->reserved || bg->pinned || bg->ro) { in btrfs_reclaim_bgs_work()
1853 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1854 spin_unlock(&space_info->lock); in btrfs_reclaim_bgs_work()
1855 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1858 if (bg->used == 0) { in btrfs_reclaim_bgs_work()
1866 * for the non-existent extents and running some extra in btrfs_reclaim_bgs_work()
1872 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1873 spin_unlock(&space_info->lock); in btrfs_reclaim_bgs_work()
1874 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1888 if (!should_reclaim_block_group(bg, bg->length)) { in btrfs_reclaim_bgs_work()
1889 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1890 spin_unlock(&space_info->lock); in btrfs_reclaim_bgs_work()
1891 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1894 spin_unlock(&bg->lock); in btrfs_reclaim_bgs_work()
1895 spin_unlock(&space_info->lock); in btrfs_reclaim_bgs_work()
1898 * Get out fast, in case we're read-only or unmounting the in btrfs_reclaim_bgs_work()
1900 * for the read-only case. As we did sb_start_write(), in btrfs_reclaim_bgs_work()
1901 * "mount -o remount,ro" won't happen and read-only filesystem in btrfs_reclaim_bgs_work()
1902 * means it is forced read-only due to a fatal error. So, it in btrfs_reclaim_bgs_work()
1903 * never gets back to read-write to let us reclaim again. in btrfs_reclaim_bgs_work()
1906 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1913 * zone_unusable value gets moved to the block group's read-only in btrfs_reclaim_bgs_work()
1916 zone_unusable = bg->zone_unusable; in btrfs_reclaim_bgs_work()
1918 up_write(&space_info->groups_sem); in btrfs_reclaim_bgs_work()
1924 bg->start, in btrfs_reclaim_bgs_work()
1925 div64_u64(bg->used * 100, bg->length), in btrfs_reclaim_bgs_work()
1926 div64_u64(zone_unusable * 100, bg->length)); in btrfs_reclaim_bgs_work()
1928 reclaimed = bg->used; in btrfs_reclaim_bgs_work()
1929 ret = btrfs_relocate_chunk(fs_info, bg->start); in btrfs_reclaim_bgs_work()
1933 bg->start); in btrfs_reclaim_bgs_work()
1935 spin_lock(&space_info->lock); in btrfs_reclaim_bgs_work()
1936 space_info->reclaim_errors++; in btrfs_reclaim_bgs_work()
1937 if (READ_ONCE(space_info->periodic_reclaim)) in btrfs_reclaim_bgs_work()
1938 space_info->periodic_reclaim_ready = false; in btrfs_reclaim_bgs_work()
1939 spin_unlock(&space_info->lock); in btrfs_reclaim_bgs_work()
1941 spin_lock(&space_info->lock); in btrfs_reclaim_bgs_work()
1942 space_info->reclaim_count++; in btrfs_reclaim_bgs_work()
1943 space_info->reclaim_bytes += reclaimed; in btrfs_reclaim_bgs_work()
1944 spin_unlock(&space_info->lock); in btrfs_reclaim_bgs_work()
1947 if (ret && !READ_ONCE(space_info->periodic_reclaim)) { in btrfs_reclaim_bgs_work()
1949 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1955 if (list_empty(&bg->bg_list)) { in btrfs_reclaim_bgs_work()
1957 list_add_tail(&bg->bg_list, &retry_list); in btrfs_reclaim_bgs_work()
1959 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1963 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_reclaim_bgs_work()
1973 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) in btrfs_reclaim_bgs_work()
1975 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1977 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1978 mutex_unlock(&fs_info->reclaim_bgs_lock); in btrfs_reclaim_bgs_work()
1980 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1981 list_splice_tail(&retry_list, &fs_info->reclaim_bgs); in btrfs_reclaim_bgs_work()
1982 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs_work()
1984 sb_end_write(fs_info->sb); in btrfs_reclaim_bgs_work()
1990 spin_lock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1991 if (!list_empty(&fs_info->reclaim_bgs)) in btrfs_reclaim_bgs()
1992 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); in btrfs_reclaim_bgs()
1993 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_reclaim_bgs()
1998 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_to_reclaim()
2000 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
2001 if (list_empty(&bg->bg_list)) { in btrfs_mark_bg_to_reclaim()
2004 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); in btrfs_mark_bg_to_reclaim()
2006 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_to_reclaim()
2019 slot = path->slots[0]; in read_bg_from_eb()
2020 leaf = path->nodes[0]; in read_bg_from_eb()
2022 map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset); in read_bg_from_eb()
2026 key->objectid, key->offset); in read_bg_from_eb()
2027 return -ENOENT; in read_bg_from_eb()
2030 if (map->start != key->objectid || map->chunk_len != key->offset) { in read_bg_from_eb()
2033 key->objectid, key->offset, map->start, map->chunk_len); in read_bg_from_eb()
2034 ret = -EUCLEAN; in read_bg_from_eb()
2043 if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in read_bg_from_eb()
2046 key->objectid, key->offset, flags, in read_bg_from_eb()
2047 (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type)); in read_bg_from_eb()
2048 ret = -EUCLEAN; in read_bg_from_eb()
2065 if (found_key.objectid >= key->objectid && in find_first_block_group()
2078 write_seqlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
2080 fs_info->avail_data_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2082 fs_info->avail_metadata_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2084 fs_info->avail_system_alloc_bits |= extra_flags; in set_avail_alloc_bits()
2085 write_sequnlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
2115 return -EIO; in btrfs_rmap_block()
2117 data_stripe_length = map->stripe_size; in btrfs_rmap_block()
2119 chunk_start = map->start; in btrfs_rmap_block()
2122 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in btrfs_rmap_block()
2125 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); in btrfs_rmap_block()
2127 ret = -ENOMEM; in btrfs_rmap_block()
2131 for (i = 0; i < map->num_stripes; i++) { in btrfs_rmap_block()
2137 if (!in_range(physical, map->stripes[i].physical, in btrfs_rmap_block()
2141 stripe_nr = (physical - map->stripes[i].physical) >> in btrfs_rmap_block()
2143 offset = (physical - map->stripes[i].physical) & in btrfs_rmap_block()
2146 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | in btrfs_rmap_block()
2148 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, in btrfs_rmap_block()
2149 map->sub_stripes); in btrfs_rmap_block()
2153 * instead of map->stripe_len in btrfs_rmap_block()
2179 struct btrfs_fs_info *fs_info = cache->fs_info; in exclude_super_stripes()
2186 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { in exclude_super_stripes()
2187 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; in exclude_super_stripes()
2188 cache->bytes_super += stripe_len; in exclude_super_stripes()
2189 ret = set_extent_bit(&fs_info->excluded_extents, cache->start, in exclude_super_stripes()
2190 cache->start + stripe_len - 1, in exclude_super_stripes()
2198 ret = btrfs_rmap_block(fs_info, cache->start, in exclude_super_stripes()
2208 cache->start); in exclude_super_stripes()
2209 return -EUCLEAN; in exclude_super_stripes()
2212 while (nr--) { in exclude_super_stripes()
2214 cache->start + cache->length - logical[nr]); in exclude_super_stripes()
2216 cache->bytes_super += len; in exclude_super_stripes()
2217 ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], in exclude_super_stripes()
2218 logical[nr] + len - 1, in exclude_super_stripes()
2240 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), in btrfs_create_block_group_cache()
2242 if (!cache->free_space_ctl) { in btrfs_create_block_group_cache()
2247 cache->start = start; in btrfs_create_block_group_cache()
2249 cache->fs_info = fs_info; in btrfs_create_block_group_cache()
2250 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); in btrfs_create_block_group_cache()
2252 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; in btrfs_create_block_group_cache()
2254 refcount_set(&cache->refs, 1); in btrfs_create_block_group_cache()
2255 spin_lock_init(&cache->lock); in btrfs_create_block_group_cache()
2256 init_rwsem(&cache->data_rwsem); in btrfs_create_block_group_cache()
2257 INIT_LIST_HEAD(&cache->list); in btrfs_create_block_group_cache()
2258 INIT_LIST_HEAD(&cache->cluster_list); in btrfs_create_block_group_cache()
2259 INIT_LIST_HEAD(&cache->bg_list); in btrfs_create_block_group_cache()
2260 INIT_LIST_HEAD(&cache->ro_list); in btrfs_create_block_group_cache()
2261 INIT_LIST_HEAD(&cache->discard_list); in btrfs_create_block_group_cache()
2262 INIT_LIST_HEAD(&cache->dirty_list); in btrfs_create_block_group_cache()
2263 INIT_LIST_HEAD(&cache->io_list); in btrfs_create_block_group_cache()
2264 INIT_LIST_HEAD(&cache->active_bg_list); in btrfs_create_block_group_cache()
2265 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); in btrfs_create_block_group_cache()
2266 atomic_set(&cache->frozen, 0); in btrfs_create_block_group_cache()
2267 mutex_init(&cache->free_space_lock); in btrfs_create_block_group_cache()
2294 bg = btrfs_lookup_block_group(fs_info, map->start); in check_chunk_block_group_mappings()
2298 map->start, map->chunk_len); in check_chunk_block_group_mappings()
2299 ret = -EUCLEAN; in check_chunk_block_group_mappings()
2303 if (bg->start != map->start || bg->length != map->chunk_len || in check_chunk_block_group_mappings()
2304 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != in check_chunk_block_group_mappings()
2305 (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in check_chunk_block_group_mappings()
2308 map->start, map->chunk_len, in check_chunk_block_group_mappings()
2309 map->type & BTRFS_BLOCK_GROUP_TYPE_MASK, in check_chunk_block_group_mappings()
2310 bg->start, bg->length, in check_chunk_block_group_mappings()
2311 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); in check_chunk_block_group_mappings()
2312 ret = -EUCLEAN; in check_chunk_block_group_mappings()
2317 start = map->start + map->chunk_len; in check_chunk_block_group_mappings()
2333 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); in read_one_block_group()
2335 cache = btrfs_create_block_group_cache(info, key->objectid); in read_one_block_group()
2337 return -ENOMEM; in read_one_block_group()
2339 cache->length = key->offset; in read_one_block_group()
2340 cache->used = btrfs_stack_block_group_used(bgi); in read_one_block_group()
2341 cache->commit_used = cache->used; in read_one_block_group()
2342 cache->flags = btrfs_stack_block_group_flags(bgi); in read_one_block_group()
2343 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); in read_one_block_group()
2359 cache->disk_cache_state = BTRFS_DC_CLEAR; in read_one_block_group()
2361 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && in read_one_block_group()
2362 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { in read_one_block_group()
2365 cache->start); in read_one_block_group()
2366 ret = -EINVAL; in read_one_block_group()
2373 cache->start); in read_one_block_group()
2405 } else if (cache->length == cache->used) { in read_one_block_group()
2406 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
2408 } else if (cache->used == 0) { in read_one_block_group()
2409 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
2410 ret = btrfs_add_new_free_space(cache, cache->start, in read_one_block_group()
2411 cache->start + cache->length, NULL); in read_one_block_group()
2425 set_avail_alloc_bits(info, cache->flags); in read_one_block_group()
2426 if (btrfs_chunk_writeable(info, cache->start)) { in read_one_block_group()
2427 if (cache->used == 0) { in read_one_block_group()
2428 ASSERT(list_empty(&cache->bg_list)); in read_one_block_group()
2430 btrfs_discard_queue_work(&info->discard_ctl, cache); in read_one_block_group()
2449 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { in fill_dummy_bgs()
2454 bg = btrfs_create_block_group_cache(fs_info, map->start); in fill_dummy_bgs()
2456 ret = -ENOMEM; in fill_dummy_bgs()
2461 bg->length = map->chunk_len; in fill_dummy_bgs()
2462 bg->flags = map->type; in fill_dummy_bgs()
2463 bg->cached = BTRFS_CACHE_FINISHED; in fill_dummy_bgs()
2464 bg->used = map->chunk_len; in fill_dummy_bgs()
2465 bg->flags = map->type; in fill_dummy_bgs()
2471 if (ret == -EEXIST) { in fill_dummy_bgs()
2485 set_avail_alloc_bits(fs_info, bg->flags); in fill_dummy_bgs()
2505 * unsupported RO options. The fs can never be mounted read-write, so no in btrfs_read_block_groups()
2511 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & in btrfs_read_block_groups()
2520 return -ENOMEM; in btrfs_read_block_groups()
2522 cache_gen = btrfs_super_cache_generation(info->super_copy); in btrfs_read_block_groups()
2524 btrfs_super_generation(info->super_copy) != cache_gen) in btrfs_read_block_groups()
2540 leaf = path->nodes[0]; in btrfs_read_block_groups()
2541 slot = path->slots[0]; in btrfs_read_block_groups()
2556 list_for_each_entry(space_info, &info->space_info, list) { in btrfs_read_block_groups()
2560 if (list_empty(&space_info->block_groups[i])) in btrfs_read_block_groups()
2562 cache = list_first_entry(&space_info->block_groups[i], in btrfs_read_block_groups()
2568 if (!(btrfs_get_alloc_profile(info, space_info->flags) & in btrfs_read_block_groups()
2575 * Avoid allocating from un-mirrored block group if there are in btrfs_read_block_groups()
2579 &space_info->block_groups[BTRFS_RAID_RAID0], in btrfs_read_block_groups()
2583 &space_info->block_groups[BTRFS_RAID_SINGLE], in btrfs_read_block_groups()
2613 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_block_group_item()
2620 spin_lock(&block_group->lock); in insert_block_group_item()
2621 btrfs_set_stack_block_group_used(&bgi, block_group->used); in insert_block_group_item()
2623 block_group->global_root_id); in insert_block_group_item()
2624 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); in insert_block_group_item()
2625 old_commit_used = block_group->commit_used; in insert_block_group_item()
2626 block_group->commit_used = block_group->used; in insert_block_group_item()
2627 key.objectid = block_group->start; in insert_block_group_item()
2629 key.offset = block_group->length; in insert_block_group_item()
2630 spin_unlock(&block_group->lock); in insert_block_group_item()
2634 spin_lock(&block_group->lock); in insert_block_group_item()
2635 block_group->commit_used = old_commit_used; in insert_block_group_item()
2636 spin_unlock(&block_group->lock); in insert_block_group_item()
2646 struct btrfs_fs_info *fs_info = device->fs_info; in insert_dev_extent()
2647 struct btrfs_root *root = fs_info->dev_root; in insert_dev_extent()
2654 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); in insert_dev_extent()
2655 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); in insert_dev_extent()
2658 return -ENOMEM; in insert_dev_extent()
2660 key.objectid = device->devid; in insert_dev_extent()
2667 leaf = path->nodes[0]; in insert_dev_extent()
2668 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); in insert_dev_extent()
2690 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_dev_extents()
2706 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the in insert_dev_extents()
2710 mutex_lock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2711 for (i = 0; i < map->num_stripes; i++) { in insert_dev_extents()
2712 device = map->stripes[i].dev; in insert_dev_extents()
2713 dev_offset = map->stripes[i].physical; in insert_dev_extents()
2716 map->stripe_size); in insert_dev_extents()
2720 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in insert_dev_extents()
2735 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_create_pending_block_groups()
2739 while (!list_empty(&trans->new_bgs)) { in btrfs_create_pending_block_groups()
2742 block_group = list_first_entry(&trans->new_bgs, in btrfs_create_pending_block_groups()
2748 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_create_pending_block_groups()
2754 &block_group->runtime_flags)) { in btrfs_create_pending_block_groups()
2755 mutex_lock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2757 mutex_unlock(&fs_info->chunk_mutex); in btrfs_create_pending_block_groups()
2761 ret = insert_dev_extents(trans, block_group->start, in btrfs_create_pending_block_groups()
2762 block_group->length); in btrfs_create_pending_block_groups()
2773 if (block_group->space_info->block_group_kobjs[index] == NULL) in btrfs_create_pending_block_groups()
2779 list_del_init(&block_group->bg_list); in btrfs_create_pending_block_groups()
2780 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); in btrfs_create_pending_block_groups()
2805 spin_lock(&block_group->lock); in btrfs_create_pending_block_groups()
2807 spin_unlock(&block_group->lock); in btrfs_create_pending_block_groups()
2817 * For extent tree v2 we use the block_group_item->chunk_offset to point at our
2822 u64 div = SZ_1G; in calculate_global_root_id() local
2829 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) in calculate_global_root_id()
2830 div = SZ_128M; in calculate_global_root_id()
2832 offset = div64_u64(offset, div); in calculate_global_root_id()
2833 div64_u64_rem(offset, fs_info->nr_global_roots, &index); in calculate_global_root_id()
2841 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_make_block_group()
2849 return ERR_PTR(-ENOMEM); in btrfs_make_block_group()
2856 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); in btrfs_make_block_group()
2858 cache->length = size; in btrfs_make_block_group()
2860 cache->flags = type; in btrfs_make_block_group()
2861 cache->cached = BTRFS_CACHE_FINISHED; in btrfs_make_block_group()
2862 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); in btrfs_make_block_group()
2865 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); in btrfs_make_block_group()
2891 * with its ->space_info set. in btrfs_make_block_group()
2893 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); in btrfs_make_block_group()
2894 ASSERT(cache->space_info); in btrfs_make_block_group()
2904 * Now that our block group has its ->space_info set and is inserted in in btrfs_make_block_group()
2913 cache->space_info->bytes_used += size >> 1; in btrfs_make_block_group()
2918 list_add_tail(&cache->bg_list, &trans->new_bgs); in btrfs_make_block_group()
2930 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2937 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_inc_block_group_ro()
2945 * This can only happen when we are doing read-only scrub on read-only in btrfs_inc_block_group_ro()
2947 * In that case we should not start a new transaction on read-only fs. in btrfs_inc_block_group_ro()
2950 if (sb_rdonly(fs_info->sb)) { in btrfs_inc_block_group_ro()
2951 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2953 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2969 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2970 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { in btrfs_inc_block_group_ro()
2971 u64 transid = trans->transid; in btrfs_inc_block_group_ro()
2973 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2988 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2989 if (alloc_flags != cache->flags) { in btrfs_inc_block_group_ro()
2996 if (ret == -ENOSPC) in btrfs_inc_block_group_ro()
3006 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
3012 * we still want to try our best to mark the block group read-only. in btrfs_inc_block_group_ro()
3014 if (!do_chunk_alloc && ret == -ENOSPC && in btrfs_inc_block_group_ro()
3015 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) in btrfs_inc_block_group_ro()
3018 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro()
3026 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); in btrfs_inc_block_group_ro()
3031 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
3034 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { in btrfs_inc_block_group_ro()
3035 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
3036 mutex_lock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
3038 mutex_unlock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
3041 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
3049 struct btrfs_space_info *sinfo = cache->space_info; in btrfs_dec_block_group_ro()
3052 BUG_ON(!cache->ro); in btrfs_dec_block_group_ro()
3054 spin_lock(&sinfo->lock); in btrfs_dec_block_group_ro()
3055 spin_lock(&cache->lock); in btrfs_dec_block_group_ro()
3056 if (!--cache->ro) { in btrfs_dec_block_group_ro()
3057 if (btrfs_is_zoned(cache->fs_info)) { in btrfs_dec_block_group_ro()
3059 cache->zone_unusable = in btrfs_dec_block_group_ro()
3060 (cache->alloc_offset - cache->used - cache->pinned - in btrfs_dec_block_group_ro()
3061 cache->reserved) + in btrfs_dec_block_group_ro()
3062 (cache->length - cache->zone_capacity); in btrfs_dec_block_group_ro()
3063 btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo, in btrfs_dec_block_group_ro()
3064 cache->zone_unusable); in btrfs_dec_block_group_ro()
3065 sinfo->bytes_readonly -= cache->zone_unusable; in btrfs_dec_block_group_ro()
3067 num_bytes = cache->length - cache->reserved - in btrfs_dec_block_group_ro()
3068 cache->pinned - cache->bytes_super - in btrfs_dec_block_group_ro()
3069 cache->zone_unusable - cache->used; in btrfs_dec_block_group_ro()
3070 sinfo->bytes_readonly -= num_bytes; in btrfs_dec_block_group_ro()
3071 list_del_init(&cache->ro_list); in btrfs_dec_block_group_ro()
3073 spin_unlock(&cache->lock); in btrfs_dec_block_group_ro()
3074 spin_unlock(&sinfo->lock); in btrfs_dec_block_group_ro()
3081 struct btrfs_fs_info *fs_info = trans->fs_info; in update_block_group_item()
3094 * We cannot use cache->used directly outside of the spin lock, as it in update_block_group_item()
3097 spin_lock(&cache->lock); in update_block_group_item()
3098 old_commit_used = cache->commit_used; in update_block_group_item()
3099 used = cache->used; in update_block_group_item()
3101 if (cache->commit_used == used) { in update_block_group_item()
3102 spin_unlock(&cache->lock); in update_block_group_item()
3105 cache->commit_used = used; in update_block_group_item()
3106 spin_unlock(&cache->lock); in update_block_group_item()
3108 key.objectid = cache->start; in update_block_group_item()
3110 key.offset = cache->length; in update_block_group_item()
3115 ret = -ENOENT; in update_block_group_item()
3119 leaf = path->nodes[0]; in update_block_group_item()
3120 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); in update_block_group_item()
3123 cache->global_root_id); in update_block_group_item()
3124 btrfs_set_stack_block_group_flags(&bgi, cache->flags); in update_block_group_item()
3131 * unless the block group item didn't exist yet - this is to prevent a in update_block_group_item()
3135 * insertion set it to a value greater than 0 - if the block group later in update_block_group_item()
3138 if (ret < 0 && ret != -ENOENT) { in update_block_group_item()
3139 spin_lock(&cache->lock); in update_block_group_item()
3140 cache->commit_used = old_commit_used; in update_block_group_item()
3141 spin_unlock(&cache->lock); in update_block_group_item()
3151 struct btrfs_fs_info *fs_info = block_group->fs_info; in cache_save_setup()
3167 if (block_group->length < (100 * SZ_1M)) { in cache_save_setup()
3168 spin_lock(&block_group->lock); in cache_save_setup()
3169 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in cache_save_setup()
3170 spin_unlock(&block_group->lock); in cache_save_setup()
3178 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { in cache_save_setup()
3188 if (block_group->ro) in cache_save_setup()
3202 BTRFS_I(inode)->generation = 0; in cache_save_setup()
3221 if (block_group->cache_generation == trans->transid && in cache_save_setup()
3229 &fs_info->global_block_rsv); in cache_save_setup()
3238 spin_lock(&block_group->lock); in cache_save_setup()
3239 if (block_group->cached != BTRFS_CACHE_FINISHED || in cache_save_setup()
3248 spin_unlock(&block_group->lock); in cache_save_setup()
3251 spin_unlock(&block_group->lock); in cache_save_setup()
3257 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { in cache_save_setup()
3258 ret = -ENOSPC; in cache_save_setup()
3268 cache_size = div_u64(block_group->length, SZ_256M); in cache_save_setup()
3273 cache_size *= fs_info->sectorsize; in cache_save_setup()
3293 else if (ret == -ENOSPC) in cache_save_setup()
3294 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); in cache_save_setup()
3301 spin_lock(&block_group->lock); in cache_save_setup()
3303 block_group->cache_generation = trans->transid; in cache_save_setup()
3304 block_group->disk_cache_state = dcs; in cache_save_setup()
3305 spin_unlock(&block_group->lock); in cache_save_setup()
3313 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_setup_space_cache()
3315 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_setup_space_cache()
3318 if (list_empty(&cur_trans->dirty_bgs) || in btrfs_setup_space_cache()
3324 return -ENOMEM; in btrfs_setup_space_cache()
3327 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, in btrfs_setup_space_cache()
3329 if (cache->disk_cache_state == BTRFS_DC_CLEAR) in btrfs_setup_space_cache()
3351 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_start_dirty_block_groups()
3353 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_start_dirty_block_groups()
3358 struct list_head *io = &cur_trans->io_bgs; in btrfs_start_dirty_block_groups()
3361 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3362 if (list_empty(&cur_trans->dirty_bgs)) { in btrfs_start_dirty_block_groups()
3363 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3366 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
3367 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3376 ret = -ENOMEM; in btrfs_start_dirty_block_groups()
3386 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3393 * This can happen if something re-dirties a block group that in btrfs_start_dirty_block_groups()
3397 if (!list_empty(&cache->io_list)) { in btrfs_start_dirty_block_groups()
3398 list_del_init(&cache->io_list); in btrfs_start_dirty_block_groups()
3405 * btrfs_wait_cache_io uses the cache->dirty_list to decide if in btrfs_start_dirty_block_groups()
3412 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3413 list_del_init(&cache->dirty_list); in btrfs_start_dirty_block_groups()
3414 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3420 if (cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_start_dirty_block_groups()
3421 cache->io_ctl.inode = NULL; in btrfs_start_dirty_block_groups()
3423 if (ret == 0 && cache->io_ctl.inode) { in btrfs_start_dirty_block_groups()
3431 list_add_tail(&cache->io_list, io); in btrfs_start_dirty_block_groups()
3445 * other task (struct btrfs_trans_handle->new_bgs). This in btrfs_start_dirty_block_groups()
3451 if (ret == -ENOENT) { in btrfs_start_dirty_block_groups()
3453 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3454 if (list_empty(&cache->dirty_list)) { in btrfs_start_dirty_block_groups()
3455 list_add_tail(&cache->dirty_list, in btrfs_start_dirty_block_groups()
3456 &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
3460 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3476 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3479 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3481 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
3491 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3492 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
3498 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3501 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3505 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3506 list_splice_init(&dirty, &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
3507 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
3517 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_dirty_block_groups()
3519 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_write_dirty_block_groups()
3523 struct list_head *io = &cur_trans->io_bgs; in btrfs_write_dirty_block_groups()
3527 return -ENOMEM; in btrfs_write_dirty_block_groups()
3544 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3545 while (!list_empty(&cur_trans->dirty_bgs)) { in btrfs_write_dirty_block_groups()
3546 cache = list_first_entry(&cur_trans->dirty_bgs, in btrfs_write_dirty_block_groups()
3551 * This can happen if cache_save_setup re-dirties a block group in btrfs_write_dirty_block_groups()
3555 if (!list_empty(&cache->io_list)) { in btrfs_write_dirty_block_groups()
3556 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3557 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
3560 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3567 list_del_init(&cache->dirty_list); in btrfs_write_dirty_block_groups()
3568 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3576 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_write_dirty_block_groups()
3577 cache->io_ctl.inode = NULL; in btrfs_write_dirty_block_groups()
3579 if (ret == 0 && cache->io_ctl.inode) { in btrfs_write_dirty_block_groups()
3581 list_add_tail(&cache->io_list, io); in btrfs_write_dirty_block_groups()
3605 if (ret == -ENOENT) { in btrfs_write_dirty_block_groups()
3606 wait_event(cur_trans->writer_wait, in btrfs_write_dirty_block_groups()
3607 atomic_read(&cur_trans->num_writers) == 1); in btrfs_write_dirty_block_groups()
3618 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3620 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
3629 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
3641 struct btrfs_fs_info *info = trans->fs_info; in btrfs_update_block_group()
3647 int factor; in btrfs_update_block_group() local
3650 spin_lock(&info->delalloc_root_lock); in btrfs_update_block_group()
3651 old_val = btrfs_super_bytes_used(info->super_copy); in btrfs_update_block_group()
3655 old_val -= num_bytes; in btrfs_update_block_group()
3656 btrfs_set_super_bytes_used(info->super_copy, old_val); in btrfs_update_block_group()
3657 spin_unlock(&info->delalloc_root_lock); in btrfs_update_block_group()
3661 return -ENOENT; in btrfs_update_block_group()
3664 ASSERT(bytenr + num_bytes <= cache->start + cache->length); in btrfs_update_block_group()
3666 space_info = cache->space_info; in btrfs_update_block_group()
3667 factor = btrfs_bg_type_to_factor(cache->flags); in btrfs_update_block_group()
3678 spin_lock(&space_info->lock); in btrfs_update_block_group()
3679 spin_lock(&cache->lock); in btrfs_update_block_group()
3682 cache->disk_cache_state < BTRFS_DC_CLEAR) in btrfs_update_block_group()
3683 cache->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_update_block_group()
3685 old_val = cache->used; in btrfs_update_block_group()
3688 cache->used = old_val; in btrfs_update_block_group()
3689 cache->reserved -= num_bytes; in btrfs_update_block_group()
3690 cache->reclaim_mark = 0; in btrfs_update_block_group()
3691 space_info->bytes_reserved -= num_bytes; in btrfs_update_block_group()
3692 space_info->bytes_used += num_bytes; in btrfs_update_block_group()
3693 space_info->disk_used += num_bytes * factor; in btrfs_update_block_group()
3694 if (READ_ONCE(space_info->periodic_reclaim)) in btrfs_update_block_group()
3695 btrfs_space_info_update_reclaimable(space_info, -num_bytes); in btrfs_update_block_group()
3696 spin_unlock(&cache->lock); in btrfs_update_block_group()
3697 spin_unlock(&space_info->lock); in btrfs_update_block_group()
3699 old_val -= num_bytes; in btrfs_update_block_group()
3700 cache->used = old_val; in btrfs_update_block_group()
3701 cache->pinned += num_bytes; in btrfs_update_block_group()
3703 space_info->bytes_used -= num_bytes; in btrfs_update_block_group()
3704 space_info->disk_used -= num_bytes * factor; in btrfs_update_block_group()
3705 if (READ_ONCE(space_info->periodic_reclaim)) in btrfs_update_block_group()
3710 spin_unlock(&cache->lock); in btrfs_update_block_group()
3711 spin_unlock(&space_info->lock); in btrfs_update_block_group()
3713 set_extent_bit(&trans->transaction->pinned_extents, bytenr, in btrfs_update_block_group()
3714 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); in btrfs_update_block_group()
3717 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
3718 if (list_empty(&cache->dirty_list)) { in btrfs_update_block_group()
3719 list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs); in btrfs_update_block_group()
3723 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
3757 * reservation and return -EAGAIN, otherwise this function always succeeds.
3763 struct btrfs_space_info *space_info = cache->space_info; in btrfs_add_reserved_bytes()
3767 spin_lock(&space_info->lock); in btrfs_add_reserved_bytes()
3768 spin_lock(&cache->lock); in btrfs_add_reserved_bytes()
3769 if (cache->ro) { in btrfs_add_reserved_bytes()
3770 ret = -EAGAIN; in btrfs_add_reserved_bytes()
3780 cache->reserved += num_bytes; in btrfs_add_reserved_bytes()
3781 space_info->bytes_reserved += num_bytes; in btrfs_add_reserved_bytes()
3782 trace_btrfs_space_reservation(cache->fs_info, "space_info", in btrfs_add_reserved_bytes()
3783 space_info->flags, num_bytes, 1); in btrfs_add_reserved_bytes()
3784 btrfs_space_info_update_bytes_may_use(cache->fs_info, in btrfs_add_reserved_bytes()
3785 space_info, -ram_bytes); in btrfs_add_reserved_bytes()
3787 cache->delalloc_bytes += num_bytes; in btrfs_add_reserved_bytes()
3794 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_add_reserved_bytes()
3796 spin_unlock(&cache->lock); in btrfs_add_reserved_bytes()
3797 spin_unlock(&space_info->lock); in btrfs_add_reserved_bytes()
3816 struct btrfs_space_info *space_info = cache->space_info; in btrfs_free_reserved_bytes()
3818 spin_lock(&space_info->lock); in btrfs_free_reserved_bytes()
3819 spin_lock(&cache->lock); in btrfs_free_reserved_bytes()
3820 if (cache->ro) in btrfs_free_reserved_bytes()
3821 space_info->bytes_readonly += num_bytes; in btrfs_free_reserved_bytes()
3822 else if (btrfs_is_zoned(cache->fs_info)) in btrfs_free_reserved_bytes()
3823 space_info->bytes_zone_unusable += num_bytes; in btrfs_free_reserved_bytes()
3824 cache->reserved -= num_bytes; in btrfs_free_reserved_bytes()
3825 space_info->bytes_reserved -= num_bytes; in btrfs_free_reserved_bytes()
3826 space_info->max_extent_size = 0; in btrfs_free_reserved_bytes()
3829 cache->delalloc_bytes -= num_bytes; in btrfs_free_reserved_bytes()
3830 spin_unlock(&cache->lock); in btrfs_free_reserved_bytes()
3832 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_free_reserved_bytes()
3833 spin_unlock(&space_info->lock); in btrfs_free_reserved_bytes()
3838 struct list_head *head = &info->space_info; in force_metadata_allocation()
3842 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) in force_metadata_allocation()
3843 found->force_alloc = CHUNK_ALLOC_FORCE; in force_metadata_allocation()
3861 thresh = btrfs_super_total_bytes(fs_info->super_copy); in should_alloc_chunk()
3864 if (sinfo->total_bytes - bytes_used < thresh) in should_alloc_chunk()
3868 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) in should_alloc_chunk()
3875 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc()
3901 * Normally we are not expected to fail with -ENOSPC here, since we have in do_chunk_alloc()
3918 * can be slow on very large filesystems, so we tolerate the -ENOSPC and in do_chunk_alloc()
3938 if (ret == -ENOSPC) { in do_chunk_alloc()
3939 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); in do_chunk_alloc()
3977 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
3979 * that belong in the chunk btree to it - more specifically, we need to
3982 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
4020 * meanwhile - this typically happens with tasks that don't reserve space
4035 * a seed device - we must create new metadata and system chunks without adding
4038 * btrees, since all the chunks from the seed device are read-only.
4053 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
4055 * a modification to the chunk btree - use cases for the later are adding,
4061 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
4067 * that mutex. The same logic applies to removing chunks - we must reserve system
4069 * while holding fs_info->chunk_mutex.
4074 * - return 1 if it successfully allocates a chunk,
4075 * - return errors including -ENOSPC otherwise.
4077 * - return 0 if it doesn't need to allocate a new chunk,
4078 * - return 1 if it successfully allocates a chunk,
4079 * - return errors including -ENOSPC otherwise.
4084 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_chunk_alloc()
4097 /* Don't re-enter if we're already allocating a chunk */ in btrfs_chunk_alloc()
4098 if (trans->allocating_chunk) in btrfs_chunk_alloc()
4099 return -ENOSPC; in btrfs_chunk_alloc()
4109 * lock on it and on its parent - if the COW operation triggers a system in btrfs_chunk_alloc()
4118 * here - this happens in the cases described above at do_chunk_alloc(). in btrfs_chunk_alloc()
4122 return -ENOSPC; in btrfs_chunk_alloc()
4128 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
4129 if (force < space_info->force_alloc) in btrfs_chunk_alloc()
4130 force = space_info->force_alloc; in btrfs_chunk_alloc()
4132 if (space_info->full) { in btrfs_chunk_alloc()
4135 ret = -ENOSPC; in btrfs_chunk_alloc()
4138 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4141 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4143 } else if (space_info->chunk_alloc) { in btrfs_chunk_alloc()
4152 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4153 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4154 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4157 space_info->chunk_alloc = 1; in btrfs_chunk_alloc()
4159 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4165 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4166 trans->allocating_chunk = true; in btrfs_chunk_alloc()
4180 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { in btrfs_chunk_alloc()
4181 fs_info->data_chunk_allocations++; in btrfs_chunk_alloc()
4182 if (!(fs_info->data_chunk_allocations % in btrfs_chunk_alloc()
4183 fs_info->metadata_ratio)) in btrfs_chunk_alloc()
4188 trans->allocating_chunk = false; in btrfs_chunk_alloc()
4203 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
4205 if (ret == -ENOSPC) in btrfs_chunk_alloc()
4206 space_info->full = 1; in btrfs_chunk_alloc()
4211 space_info->max_extent_size = 0; in btrfs_chunk_alloc()
4214 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in btrfs_chunk_alloc()
4216 space_info->chunk_alloc = 0; in btrfs_chunk_alloc()
4217 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
4218 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
4229 num_dev = fs_info->fs_devices->rw_devices; in get_profile_num_devs()
4238 struct btrfs_fs_info *fs_info = trans->fs_info; in reserve_chunk_space()
4247 lockdep_assert_held(&fs_info->chunk_mutex); in reserve_chunk_space()
4250 spin_lock(&info->lock); in reserve_chunk_space()
4251 left = info->total_bytes - btrfs_space_info_used(info, true); in reserve_chunk_space()
4252 spin_unlock(&info->lock); in reserve_chunk_space()
4287 * the cases described at do_chunk_alloc() - the system in reserve_chunk_space()
4298 &fs_info->chunk_block_rsv, in reserve_chunk_space()
4301 trans->chunk_bytes_reserved += bytes; in reserve_chunk_space()
4307 * The caller must be holding fs_info->chunk_mutex.
4311 struct btrfs_fs_info *fs_info = trans->fs_info; in check_system_chunk()
4341 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_reserve_chunk_metadata()
4349 mutex_lock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
4351 mutex_unlock(&fs_info->chunk_mutex); in btrfs_reserve_chunk_metadata()
4361 spin_lock(&block_group->lock); in btrfs_put_block_group_cache()
4363 &block_group->runtime_flags)) { in btrfs_put_block_group_cache()
4364 struct btrfs_inode *inode = block_group->inode; in btrfs_put_block_group_cache()
4366 block_group->inode = NULL; in btrfs_put_block_group_cache()
4367 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
4369 ASSERT(block_group->io_ctl.inode == NULL); in btrfs_put_block_group_cache()
4370 iput(&inode->vfs_inode); in btrfs_put_block_group_cache()
4372 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
4391 if (info->active_meta_bg) { in btrfs_free_block_groups()
4392 btrfs_put_block_group(info->active_meta_bg); in btrfs_free_block_groups()
4393 info->active_meta_bg = NULL; in btrfs_free_block_groups()
4395 if (info->active_system_bg) { in btrfs_free_block_groups()
4396 btrfs_put_block_group(info->active_system_bg); in btrfs_free_block_groups()
4397 info->active_system_bg = NULL; in btrfs_free_block_groups()
4401 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4402 while (!list_empty(&info->caching_block_groups)) { in btrfs_free_block_groups()
4403 caching_ctl = list_entry(info->caching_block_groups.next, in btrfs_free_block_groups()
4405 list_del(&caching_ctl->list); in btrfs_free_block_groups()
4408 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4410 spin_lock(&info->unused_bgs_lock); in btrfs_free_block_groups()
4411 while (!list_empty(&info->unused_bgs)) { in btrfs_free_block_groups()
4412 block_group = list_first_entry(&info->unused_bgs, in btrfs_free_block_groups()
4415 list_del_init(&block_group->bg_list); in btrfs_free_block_groups()
4419 while (!list_empty(&info->reclaim_bgs)) { in btrfs_free_block_groups()
4420 block_group = list_first_entry(&info->reclaim_bgs, in btrfs_free_block_groups()
4423 list_del_init(&block_group->bg_list); in btrfs_free_block_groups()
4426 spin_unlock(&info->unused_bgs_lock); in btrfs_free_block_groups()
4428 spin_lock(&info->zone_active_bgs_lock); in btrfs_free_block_groups()
4429 while (!list_empty(&info->zone_active_bgs)) { in btrfs_free_block_groups()
4430 block_group = list_first_entry(&info->zone_active_bgs, in btrfs_free_block_groups()
4433 list_del_init(&block_group->active_bg_list); in btrfs_free_block_groups()
4436 spin_unlock(&info->zone_active_bgs_lock); in btrfs_free_block_groups()
4438 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4439 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { in btrfs_free_block_groups()
4442 rb_erase_cached(&block_group->cache_node, in btrfs_free_block_groups()
4443 &info->block_group_cache_tree); in btrfs_free_block_groups()
4444 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_free_block_groups()
4445 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4447 down_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
4448 list_del(&block_group->list); in btrfs_free_block_groups()
4449 up_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
4455 if (block_group->cached == BTRFS_CACHE_NO || in btrfs_free_block_groups()
4456 block_group->cached == BTRFS_CACHE_ERROR) in btrfs_free_block_groups()
4460 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); in btrfs_free_block_groups()
4461 ASSERT(list_empty(&block_group->dirty_list)); in btrfs_free_block_groups()
4462 ASSERT(list_empty(&block_group->io_list)); in btrfs_free_block_groups()
4463 ASSERT(list_empty(&block_group->bg_list)); in btrfs_free_block_groups()
4464 ASSERT(refcount_read(&block_group->refs) == 1); in btrfs_free_block_groups()
4465 ASSERT(block_group->swap_extents == 0); in btrfs_free_block_groups()
4468 write_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4470 write_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
4474 while (!list_empty(&info->space_info)) { in btrfs_free_block_groups()
4475 space_info = list_entry(info->space_info.next, in btrfs_free_block_groups()
4483 if (WARN_ON(space_info->bytes_pinned > 0 || in btrfs_free_block_groups()
4484 space_info->bytes_may_use > 0)) in btrfs_free_block_groups()
4494 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || in btrfs_free_block_groups()
4496 if (WARN_ON(space_info->bytes_reserved > 0)) in btrfs_free_block_groups()
4500 WARN_ON(space_info->reclaim_size > 0); in btrfs_free_block_groups()
4501 list_del(&space_info->list); in btrfs_free_block_groups()
4509 atomic_inc(&cache->frozen); in btrfs_freeze_block_group()
4514 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_unfreeze_block_group()
4517 spin_lock(&block_group->lock); in btrfs_unfreeze_block_group()
4518 cleanup = (atomic_dec_and_test(&block_group->frozen) && in btrfs_unfreeze_block_group()
4519 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); in btrfs_unfreeze_block_group()
4520 spin_unlock(&block_group->lock); in btrfs_unfreeze_block_group()
4525 map = btrfs_find_chunk_map(fs_info, block_group->start, 1); in btrfs_unfreeze_block_group()
4531 /* Once for our lookup reference. */ in btrfs_unfreeze_block_group()
4547 spin_lock(&bg->lock); in btrfs_inc_block_group_swap_extents()
4548 if (bg->ro) in btrfs_inc_block_group_swap_extents()
4551 bg->swap_extents++; in btrfs_inc_block_group_swap_extents()
4552 spin_unlock(&bg->lock); in btrfs_inc_block_group_swap_extents()
4559 spin_lock(&bg->lock); in btrfs_dec_block_group_swap_extents()
4560 ASSERT(!bg->ro); in btrfs_dec_block_group_swap_extents()
4561 ASSERT(bg->swap_extents >= amount); in btrfs_dec_block_group_swap_extents()
4562 bg->swap_extents -= amount; in btrfs_dec_block_group_swap_extents()
4563 spin_unlock(&bg->lock); in btrfs_dec_block_group_swap_extents()
4583 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the
4601 if (bg->size_class == size_class) in btrfs_use_block_group_size_class()
4613 if (bg->size_class != BTRFS_BG_SZ_NONE) { in btrfs_use_block_group_size_class()
4616 return -EAGAIN; in btrfs_use_block_group_size_class()
4622 bg->size_class = size_class; in btrfs_use_block_group_size_class()
4629 if (btrfs_is_zoned(bg->fs_info)) in btrfs_block_group_should_use_size_class()