12e405ad8SJosef Bacik // SPDX-License-Identifier: GPL-2.0 22e405ad8SJosef Bacik 352bb7a21SBoris Burkov #include <linux/sizes.h> 42ca0ec77SJohannes Thumshirn #include <linux/list_sort.h> 5784352feSDavid Sterba #include "misc.h" 62e405ad8SJosef Bacik #include "ctree.h" 72e405ad8SJosef Bacik #include "block-group.h" 83eeb3226SJosef Bacik #include "space-info.h" 99f21246dSJosef Bacik #include "disk-io.h" 109f21246dSJosef Bacik #include "free-space-cache.h" 119f21246dSJosef Bacik #include "free-space-tree.h" 12e3e0520bSJosef Bacik #include "volumes.h" 13e3e0520bSJosef Bacik #include "transaction.h" 14e3e0520bSJosef Bacik #include "ref-verify.h" 154358d963SJosef Bacik #include "sysfs.h" 164358d963SJosef Bacik #include "tree-log.h" 1777745c05SJosef Bacik #include "delalloc-space.h" 18b0643e59SDennis Zhou #include "discard.h" 1996a14336SNikolay Borisov #include "raid56.h" 2008e11a3dSNaohiro Aota #include "zoned.h" 21c7f13d42SJosef Bacik #include "fs.h" 2207e81dc9SJosef Bacik #include "accessors.h" 23a0231804SJosef Bacik #include "extent-tree.h" 242e405ad8SJosef Bacik 2506d61cb1SJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 2606d61cb1SJosef Bacik int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) 2706d61cb1SJosef Bacik { 2806d61cb1SJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 2906d61cb1SJosef Bacik 3006d61cb1SJosef Bacik return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && 3106d61cb1SJosef Bacik block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 3206d61cb1SJosef Bacik (btrfs_test_opt(fs_info, FRAGMENT_DATA) && 3306d61cb1SJosef Bacik block_group->flags & BTRFS_BLOCK_GROUP_DATA); 3406d61cb1SJosef Bacik } 3506d61cb1SJosef Bacik #endif 3606d61cb1SJosef Bacik 37878d7b67SJosef Bacik /* 38878d7b67SJosef Bacik * Return target flags in extended format or 0 if restripe for this chunk_type 39878d7b67SJosef Bacik * is not in progress 40878d7b67SJosef Bacik * 41878d7b67SJosef Bacik * Should be called with balance_lock held 42878d7b67SJosef Bacik */ 43e11c0406SJosef Bacik static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 44878d7b67SJosef Bacik { 45878d7b67SJosef Bacik struct btrfs_balance_control *bctl = fs_info->balance_ctl; 46878d7b67SJosef Bacik u64 target = 0; 47878d7b67SJosef Bacik 48878d7b67SJosef Bacik if (!bctl) 49878d7b67SJosef Bacik return 0; 50878d7b67SJosef Bacik 51878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && 52878d7b67SJosef Bacik bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 53878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 54878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 55878d7b67SJosef Bacik bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 56878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 57878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 58878d7b67SJosef Bacik bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 59878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 60878d7b67SJosef Bacik } 61878d7b67SJosef Bacik 62878d7b67SJosef Bacik return target; 63878d7b67SJosef Bacik } 64878d7b67SJosef Bacik 65878d7b67SJosef Bacik /* 66878d7b67SJosef Bacik * @flags: available profiles in extended format (see ctree.h) 67878d7b67SJosef Bacik * 68878d7b67SJosef Bacik * Return reduced profile in chunk format. If profile changing is in progress 69878d7b67SJosef Bacik * (either running or paused) picks the target profile (if it's already 70878d7b67SJosef Bacik * available), otherwise falls back to plain reducing. 71878d7b67SJosef Bacik */ 72878d7b67SJosef Bacik static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 73878d7b67SJosef Bacik { 74878d7b67SJosef Bacik u64 num_devices = fs_info->fs_devices->rw_devices; 75878d7b67SJosef Bacik u64 target; 76878d7b67SJosef Bacik u64 raid_type; 77878d7b67SJosef Bacik u64 allowed = 0; 78878d7b67SJosef Bacik 79878d7b67SJosef Bacik /* 80878d7b67SJosef Bacik * See if restripe for this chunk_type is in progress, if so try to 81878d7b67SJosef Bacik * reduce to the target profile 82878d7b67SJosef Bacik */ 83878d7b67SJosef Bacik spin_lock(&fs_info->balance_lock); 84e11c0406SJosef Bacik target = get_restripe_target(fs_info, flags); 85878d7b67SJosef Bacik if (target) { 86878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 87878d7b67SJosef Bacik return extended_to_chunk(target); 88878d7b67SJosef Bacik } 89878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 90878d7b67SJosef Bacik 91878d7b67SJosef Bacik /* First, mask out the RAID levels which aren't possible */ 92878d7b67SJosef Bacik for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 93878d7b67SJosef Bacik if (num_devices >= btrfs_raid_array[raid_type].devs_min) 94878d7b67SJosef Bacik allowed |= btrfs_raid_array[raid_type].bg_flag; 95878d7b67SJosef Bacik } 96878d7b67SJosef Bacik allowed &= flags; 97878d7b67SJosef Bacik 98160fe8f6SMatt Corallo /* Select the highest-redundancy RAID level. */ 99160fe8f6SMatt Corallo if (allowed & BTRFS_BLOCK_GROUP_RAID1C4) 100160fe8f6SMatt Corallo allowed = BTRFS_BLOCK_GROUP_RAID1C4; 101160fe8f6SMatt Corallo else if (allowed & BTRFS_BLOCK_GROUP_RAID6) 102878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID6; 103160fe8f6SMatt Corallo else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3) 104160fe8f6SMatt Corallo allowed = BTRFS_BLOCK_GROUP_RAID1C3; 105878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 106878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID5; 107878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 108878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID10; 109878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 110878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID1; 111160fe8f6SMatt Corallo else if (allowed & BTRFS_BLOCK_GROUP_DUP) 112160fe8f6SMatt Corallo allowed = BTRFS_BLOCK_GROUP_DUP; 113878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 114878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID0; 115878d7b67SJosef Bacik 116878d7b67SJosef Bacik flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 117878d7b67SJosef Bacik 118878d7b67SJosef Bacik return extended_to_chunk(flags | allowed); 119878d7b67SJosef Bacik } 120878d7b67SJosef Bacik 121ef0a82daSJohannes Thumshirn u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 122878d7b67SJosef Bacik { 123878d7b67SJosef Bacik unsigned seq; 124878d7b67SJosef Bacik u64 flags; 125878d7b67SJosef Bacik 126878d7b67SJosef Bacik do { 127878d7b67SJosef Bacik flags = orig_flags; 128878d7b67SJosef Bacik seq = read_seqbegin(&fs_info->profiles_lock); 129878d7b67SJosef Bacik 130878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 131878d7b67SJosef Bacik flags |= fs_info->avail_data_alloc_bits; 132878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 133878d7b67SJosef Bacik flags |= fs_info->avail_system_alloc_bits; 134878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_METADATA) 135878d7b67SJosef Bacik flags |= fs_info->avail_metadata_alloc_bits; 136878d7b67SJosef Bacik } while (read_seqretry(&fs_info->profiles_lock, seq)); 137878d7b67SJosef Bacik 138878d7b67SJosef Bacik return btrfs_reduce_alloc_profile(fs_info, flags); 139878d7b67SJosef Bacik } 140878d7b67SJosef Bacik 14132da5386SDavid Sterba void btrfs_get_block_group(struct btrfs_block_group *cache) 1423cad1284SJosef Bacik { 14348aaeebeSJosef Bacik refcount_inc(&cache->refs); 1443cad1284SJosef Bacik } 1453cad1284SJosef Bacik 14632da5386SDavid Sterba void btrfs_put_block_group(struct btrfs_block_group *cache) 1473cad1284SJosef Bacik { 14848aaeebeSJosef Bacik if (refcount_dec_and_test(&cache->refs)) { 1493cad1284SJosef Bacik WARN_ON(cache->pinned > 0); 15040cdc509SFilipe Manana /* 15140cdc509SFilipe Manana * If there was a failure to cleanup a log tree, very likely due 15240cdc509SFilipe Manana * to an IO failure on a writeback attempt of one or more of its 15340cdc509SFilipe Manana * extent buffers, we could not do proper (and cheap) unaccounting 15440cdc509SFilipe Manana * of their reserved space, so don't warn on reserved > 0 in that 15540cdc509SFilipe Manana * case. 15640cdc509SFilipe Manana */ 15740cdc509SFilipe Manana if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 15840cdc509SFilipe Manana !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 1593cad1284SJosef Bacik WARN_ON(cache->reserved > 0); 1603cad1284SJosef Bacik 1613cad1284SJosef Bacik /* 162b0643e59SDennis Zhou * A block_group shouldn't be on the discard_list anymore. 163b0643e59SDennis Zhou * Remove the block_group from the discard_list to prevent us 164b0643e59SDennis Zhou * from causing a panic due to NULL pointer dereference. 165b0643e59SDennis Zhou */ 166b0643e59SDennis Zhou if (WARN_ON(!list_empty(&cache->discard_list))) 167b0643e59SDennis Zhou btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 168b0643e59SDennis Zhou cache); 169b0643e59SDennis Zhou 1703cad1284SJosef Bacik kfree(cache->free_space_ctl); 171dafc340dSNaohiro Aota kfree(cache->physical_map); 1723cad1284SJosef Bacik kfree(cache); 1733cad1284SJosef Bacik } 1743cad1284SJosef Bacik } 1753cad1284SJosef Bacik 1762e405ad8SJosef Bacik /* 1774358d963SJosef Bacik * This adds the block group to the fs_info rb tree for the block group cache 1784358d963SJosef Bacik */ 1794358d963SJosef Bacik static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 18032da5386SDavid Sterba struct btrfs_block_group *block_group) 1814358d963SJosef Bacik { 1824358d963SJosef Bacik struct rb_node **p; 1834358d963SJosef Bacik struct rb_node *parent = NULL; 18432da5386SDavid Sterba struct btrfs_block_group *cache; 18508dddb29SFilipe Manana bool leftmost = true; 1864358d963SJosef Bacik 1879afc6649SQu Wenruo ASSERT(block_group->length != 0); 1889afc6649SQu Wenruo 18916b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 19008dddb29SFilipe Manana p = &info->block_group_cache_tree.rb_root.rb_node; 1914358d963SJosef Bacik 1924358d963SJosef Bacik while (*p) { 1934358d963SJosef Bacik parent = *p; 19432da5386SDavid Sterba cache = rb_entry(parent, struct btrfs_block_group, cache_node); 195b3470b5dSDavid Sterba if (block_group->start < cache->start) { 1964358d963SJosef Bacik p = &(*p)->rb_left; 197b3470b5dSDavid Sterba } else if (block_group->start > cache->start) { 1984358d963SJosef Bacik p = &(*p)->rb_right; 19908dddb29SFilipe Manana leftmost = false; 2004358d963SJosef Bacik } else { 20116b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 2024358d963SJosef Bacik return -EEXIST; 2034358d963SJosef Bacik } 2044358d963SJosef Bacik } 2054358d963SJosef Bacik 2064358d963SJosef Bacik rb_link_node(&block_group->cache_node, parent, p); 20708dddb29SFilipe Manana rb_insert_color_cached(&block_group->cache_node, 20808dddb29SFilipe Manana &info->block_group_cache_tree, leftmost); 2094358d963SJosef Bacik 21016b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 2114358d963SJosef Bacik 2124358d963SJosef Bacik return 0; 2134358d963SJosef Bacik } 2144358d963SJosef Bacik 2154358d963SJosef Bacik /* 2162e405ad8SJosef Bacik * This will return the block group at or after bytenr if contains is 0, else 2172e405ad8SJosef Bacik * it will return the block group that contains the bytenr 2182e405ad8SJosef Bacik */ 21932da5386SDavid Sterba static struct btrfs_block_group *block_group_cache_tree_search( 2202e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr, int contains) 2212e405ad8SJosef Bacik { 22232da5386SDavid Sterba struct btrfs_block_group *cache, *ret = NULL; 2232e405ad8SJosef Bacik struct rb_node *n; 2242e405ad8SJosef Bacik u64 end, start; 2252e405ad8SJosef Bacik 22616b0c258SFilipe Manana read_lock(&info->block_group_cache_lock); 22708dddb29SFilipe Manana n = info->block_group_cache_tree.rb_root.rb_node; 2282e405ad8SJosef Bacik 2292e405ad8SJosef Bacik while (n) { 23032da5386SDavid Sterba cache = rb_entry(n, struct btrfs_block_group, cache_node); 231b3470b5dSDavid Sterba end = cache->start + cache->length - 1; 232b3470b5dSDavid Sterba start = cache->start; 2332e405ad8SJosef Bacik 2342e405ad8SJosef Bacik if (bytenr < start) { 235b3470b5dSDavid Sterba if (!contains && (!ret || start < ret->start)) 2362e405ad8SJosef Bacik ret = cache; 2372e405ad8SJosef Bacik n = n->rb_left; 2382e405ad8SJosef Bacik } else if (bytenr > start) { 2392e405ad8SJosef Bacik if (contains && bytenr <= end) { 2402e405ad8SJosef Bacik ret = cache; 2412e405ad8SJosef Bacik break; 2422e405ad8SJosef Bacik } 2432e405ad8SJosef Bacik n = n->rb_right; 2442e405ad8SJosef Bacik } else { 2452e405ad8SJosef Bacik ret = cache; 2462e405ad8SJosef Bacik break; 2472e405ad8SJosef Bacik } 2482e405ad8SJosef Bacik } 24908dddb29SFilipe Manana if (ret) 2502e405ad8SJosef Bacik btrfs_get_block_group(ret); 25116b0c258SFilipe Manana read_unlock(&info->block_group_cache_lock); 2522e405ad8SJosef Bacik 2532e405ad8SJosef Bacik return ret; 2542e405ad8SJosef Bacik } 2552e405ad8SJosef Bacik 2562e405ad8SJosef Bacik /* 2572e405ad8SJosef Bacik * Return the block group that starts at or after bytenr 2582e405ad8SJosef Bacik */ 25932da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_first_block_group( 2602e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2612e405ad8SJosef Bacik { 2622e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 0); 2632e405ad8SJosef Bacik } 2642e405ad8SJosef Bacik 2652e405ad8SJosef Bacik /* 2662e405ad8SJosef Bacik * Return the block group that contains the given bytenr 2672e405ad8SJosef Bacik */ 26832da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_block_group( 2692e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2702e405ad8SJosef Bacik { 2712e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 1); 2722e405ad8SJosef Bacik } 2732e405ad8SJosef Bacik 27432da5386SDavid Sterba struct btrfs_block_group *btrfs_next_block_group( 27532da5386SDavid Sterba struct btrfs_block_group *cache) 2762e405ad8SJosef Bacik { 2772e405ad8SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 2782e405ad8SJosef Bacik struct rb_node *node; 2792e405ad8SJosef Bacik 28016b0c258SFilipe Manana read_lock(&fs_info->block_group_cache_lock); 2812e405ad8SJosef Bacik 2822e405ad8SJosef Bacik /* If our block group was removed, we need a full search. */ 2832e405ad8SJosef Bacik if (RB_EMPTY_NODE(&cache->cache_node)) { 284b3470b5dSDavid Sterba const u64 next_bytenr = cache->start + cache->length; 2852e405ad8SJosef Bacik 28616b0c258SFilipe Manana read_unlock(&fs_info->block_group_cache_lock); 2872e405ad8SJosef Bacik btrfs_put_block_group(cache); 2888b01f931SFilipe Manana return btrfs_lookup_first_block_group(fs_info, next_bytenr); 2892e405ad8SJosef Bacik } 2902e405ad8SJosef Bacik node = rb_next(&cache->cache_node); 2912e405ad8SJosef Bacik btrfs_put_block_group(cache); 2922e405ad8SJosef Bacik if (node) { 29332da5386SDavid Sterba cache = rb_entry(node, struct btrfs_block_group, cache_node); 2942e405ad8SJosef Bacik btrfs_get_block_group(cache); 2952e405ad8SJosef Bacik } else 2962e405ad8SJosef Bacik cache = NULL; 29716b0c258SFilipe Manana read_unlock(&fs_info->block_group_cache_lock); 2982e405ad8SJosef Bacik return cache; 2992e405ad8SJosef Bacik } 3003eeb3226SJosef Bacik 30143dd529aSDavid Sterba /* 3022306e83eSFilipe Manana * Check if we can do a NOCOW write for a given extent. 3032306e83eSFilipe Manana * 3042306e83eSFilipe Manana * @fs_info: The filesystem information object. 3052306e83eSFilipe Manana * @bytenr: Logical start address of the extent. 3062306e83eSFilipe Manana * 3072306e83eSFilipe Manana * Check if we can do a NOCOW write for the given extent, and increments the 3082306e83eSFilipe Manana * number of NOCOW writers in the block group that contains the extent, as long 3092306e83eSFilipe Manana * as the block group exists and it's currently not in read-only mode. 3102306e83eSFilipe Manana * 3112306e83eSFilipe Manana * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 3122306e83eSFilipe Manana * is responsible for calling btrfs_dec_nocow_writers() later. 3132306e83eSFilipe Manana * 3142306e83eSFilipe Manana * Or NULL if we can not do a NOCOW write 3152306e83eSFilipe Manana */ 3162306e83eSFilipe Manana struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 3172306e83eSFilipe Manana u64 bytenr) 3183eeb3226SJosef Bacik { 31932da5386SDavid Sterba struct btrfs_block_group *bg; 3202306e83eSFilipe Manana bool can_nocow = true; 3213eeb3226SJosef Bacik 3223eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 3233eeb3226SJosef Bacik if (!bg) 3242306e83eSFilipe Manana return NULL; 3253eeb3226SJosef Bacik 3263eeb3226SJosef Bacik spin_lock(&bg->lock); 3273eeb3226SJosef Bacik if (bg->ro) 3282306e83eSFilipe Manana can_nocow = false; 3293eeb3226SJosef Bacik else 3303eeb3226SJosef Bacik atomic_inc(&bg->nocow_writers); 3313eeb3226SJosef Bacik spin_unlock(&bg->lock); 3323eeb3226SJosef Bacik 3332306e83eSFilipe Manana if (!can_nocow) { 3343eeb3226SJosef Bacik btrfs_put_block_group(bg); 3352306e83eSFilipe Manana return NULL; 3363eeb3226SJosef Bacik } 3373eeb3226SJosef Bacik 3382306e83eSFilipe Manana /* No put on block group, done by btrfs_dec_nocow_writers(). */ 3392306e83eSFilipe Manana return bg; 3402306e83eSFilipe Manana } 3413eeb3226SJosef Bacik 34243dd529aSDavid Sterba /* 3432306e83eSFilipe Manana * Decrement the number of NOCOW writers in a block group. 3442306e83eSFilipe Manana * 3452306e83eSFilipe Manana * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 3462306e83eSFilipe Manana * and on the block group returned by that call. Typically this is called after 3472306e83eSFilipe Manana * creating an ordered extent for a NOCOW write, to prevent races with scrub and 3482306e83eSFilipe Manana * relocation. 3492306e83eSFilipe Manana * 3502306e83eSFilipe Manana * After this call, the caller should not use the block group anymore. It it wants 3512306e83eSFilipe Manana * to use it, then it should get a reference on it before calling this function. 3522306e83eSFilipe Manana */ 3532306e83eSFilipe Manana void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 3542306e83eSFilipe Manana { 3553eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->nocow_writers)) 3563eeb3226SJosef Bacik wake_up_var(&bg->nocow_writers); 3572306e83eSFilipe Manana 3582306e83eSFilipe Manana /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 3593eeb3226SJosef Bacik btrfs_put_block_group(bg); 3603eeb3226SJosef Bacik } 3613eeb3226SJosef Bacik 36232da5386SDavid Sterba void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 3633eeb3226SJosef Bacik { 3643eeb3226SJosef Bacik wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 3653eeb3226SJosef Bacik } 3663eeb3226SJosef Bacik 3673eeb3226SJosef Bacik void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 3683eeb3226SJosef Bacik const u64 start) 3693eeb3226SJosef Bacik { 37032da5386SDavid Sterba struct btrfs_block_group *bg; 3713eeb3226SJosef Bacik 3723eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, start); 3733eeb3226SJosef Bacik ASSERT(bg); 3743eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->reservations)) 3753eeb3226SJosef Bacik wake_up_var(&bg->reservations); 3763eeb3226SJosef Bacik btrfs_put_block_group(bg); 3773eeb3226SJosef Bacik } 3783eeb3226SJosef Bacik 37932da5386SDavid Sterba void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 3803eeb3226SJosef Bacik { 3813eeb3226SJosef Bacik struct btrfs_space_info *space_info = bg->space_info; 3823eeb3226SJosef Bacik 3833eeb3226SJosef Bacik ASSERT(bg->ro); 3843eeb3226SJosef Bacik 3853eeb3226SJosef Bacik if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 3863eeb3226SJosef Bacik return; 3873eeb3226SJosef Bacik 3883eeb3226SJosef Bacik /* 3893eeb3226SJosef Bacik * Our block group is read only but before we set it to read only, 3903eeb3226SJosef Bacik * some task might have had allocated an extent from it already, but it 3913eeb3226SJosef Bacik * has not yet created a respective ordered extent (and added it to a 3923eeb3226SJosef Bacik * root's list of ordered extents). 3933eeb3226SJosef Bacik * Therefore wait for any task currently allocating extents, since the 3943eeb3226SJosef Bacik * block group's reservations counter is incremented while a read lock 3953eeb3226SJosef Bacik * on the groups' semaphore is held and decremented after releasing 3963eeb3226SJosef Bacik * the read access on that semaphore and creating the ordered extent. 3973eeb3226SJosef Bacik */ 3983eeb3226SJosef Bacik down_write(&space_info->groups_sem); 3993eeb3226SJosef Bacik up_write(&space_info->groups_sem); 4003eeb3226SJosef Bacik 4013eeb3226SJosef Bacik wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 4023eeb3226SJosef Bacik } 4039f21246dSJosef Bacik 4049f21246dSJosef Bacik struct btrfs_caching_control *btrfs_get_caching_control( 40532da5386SDavid Sterba struct btrfs_block_group *cache) 4069f21246dSJosef Bacik { 4079f21246dSJosef Bacik struct btrfs_caching_control *ctl; 4089f21246dSJosef Bacik 4099f21246dSJosef Bacik spin_lock(&cache->lock); 4109f21246dSJosef Bacik if (!cache->caching_ctl) { 4119f21246dSJosef Bacik spin_unlock(&cache->lock); 4129f21246dSJosef Bacik return NULL; 4139f21246dSJosef Bacik } 4149f21246dSJosef Bacik 4159f21246dSJosef Bacik ctl = cache->caching_ctl; 4169f21246dSJosef Bacik refcount_inc(&ctl->count); 4179f21246dSJosef Bacik spin_unlock(&cache->lock); 4189f21246dSJosef Bacik return ctl; 4199f21246dSJosef Bacik } 4209f21246dSJosef Bacik 4219f21246dSJosef Bacik void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 4229f21246dSJosef Bacik { 4239f21246dSJosef Bacik if (refcount_dec_and_test(&ctl->count)) 4249f21246dSJosef Bacik kfree(ctl); 4259f21246dSJosef Bacik } 4269f21246dSJosef Bacik 4279f21246dSJosef Bacik /* 4289f21246dSJosef Bacik * When we wait for progress in the block group caching, its because our 4299f21246dSJosef Bacik * allocation attempt failed at least once. So, we must sleep and let some 4309f21246dSJosef Bacik * progress happen before we try again. 4319f21246dSJosef Bacik * 4329f21246dSJosef Bacik * This function will sleep at least once waiting for new free space to show 4339f21246dSJosef Bacik * up, and then it will check the block group free space numbers for our min 4349f21246dSJosef Bacik * num_bytes. Another option is to have it go ahead and look in the rbtree for 4359f21246dSJosef Bacik * a free extent of a given size, but this is a good start. 4369f21246dSJosef Bacik * 4379f21246dSJosef Bacik * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 4389f21246dSJosef Bacik * any of the information in this block group. 4399f21246dSJosef Bacik */ 44032da5386SDavid Sterba void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 4419f21246dSJosef Bacik u64 num_bytes) 4429f21246dSJosef Bacik { 4439f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 444*fc1f91b9SJosef Bacik int progress; 4459f21246dSJosef Bacik 4469f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4479f21246dSJosef Bacik if (!caching_ctl) 4489f21246dSJosef Bacik return; 4499f21246dSJosef Bacik 450*fc1f91b9SJosef Bacik /* 451*fc1f91b9SJosef Bacik * We've already failed to allocate from this block group, so even if 452*fc1f91b9SJosef Bacik * there's enough space in the block group it isn't contiguous enough to 453*fc1f91b9SJosef Bacik * allow for an allocation, so wait for at least the next wakeup tick, 454*fc1f91b9SJosef Bacik * or for the thing to be done. 455*fc1f91b9SJosef Bacik */ 456*fc1f91b9SJosef Bacik progress = atomic_read(&caching_ctl->progress); 457*fc1f91b9SJosef Bacik 45832da5386SDavid Sterba wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 459*fc1f91b9SJosef Bacik (progress != atomic_read(&caching_ctl->progress) && 460*fc1f91b9SJosef Bacik (cache->free_space_ctl->free_space >= num_bytes))); 4619f21246dSJosef Bacik 4629f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4639f21246dSJosef Bacik } 4649f21246dSJosef Bacik 465ced8ecf0SOmar Sandoval static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 466ced8ecf0SOmar Sandoval struct btrfs_caching_control *caching_ctl) 467ced8ecf0SOmar Sandoval { 468ced8ecf0SOmar Sandoval wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 469ced8ecf0SOmar Sandoval return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 470ced8ecf0SOmar Sandoval } 471ced8ecf0SOmar Sandoval 472ced8ecf0SOmar Sandoval static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 4739f21246dSJosef Bacik { 4749f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 475ced8ecf0SOmar Sandoval int ret; 4769f21246dSJosef Bacik 4779f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4789f21246dSJosef Bacik if (!caching_ctl) 4799f21246dSJosef Bacik return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 480ced8ecf0SOmar Sandoval ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 4819f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4829f21246dSJosef Bacik return ret; 4839f21246dSJosef Bacik } 4849f21246dSJosef Bacik 4859f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 48632da5386SDavid Sterba static void fragment_free_space(struct btrfs_block_group *block_group) 4879f21246dSJosef Bacik { 4889f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 489b3470b5dSDavid Sterba u64 start = block_group->start; 490b3470b5dSDavid Sterba u64 len = block_group->length; 4919f21246dSJosef Bacik u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 4929f21246dSJosef Bacik fs_info->nodesize : fs_info->sectorsize; 4939f21246dSJosef Bacik u64 step = chunk << 1; 4949f21246dSJosef Bacik 4959f21246dSJosef Bacik while (len > chunk) { 4969f21246dSJosef Bacik btrfs_remove_free_space(block_group, start, chunk); 4979f21246dSJosef Bacik start += step; 4989f21246dSJosef Bacik if (len < step) 4999f21246dSJosef Bacik len = 0; 5009f21246dSJosef Bacik else 5019f21246dSJosef Bacik len -= step; 5029f21246dSJosef Bacik } 5039f21246dSJosef Bacik } 5049f21246dSJosef Bacik #endif 5059f21246dSJosef Bacik 5069f21246dSJosef Bacik /* 5079f21246dSJosef Bacik * This is only called by btrfs_cache_block_group, since we could have freed 5089f21246dSJosef Bacik * extents we need to check the pinned_extents for any extents that can't be 5099f21246dSJosef Bacik * used yet since their free space will be released as soon as the transaction 5109f21246dSJosef Bacik * commits. 5119f21246dSJosef Bacik */ 512d8ccbd21SFilipe Manana int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end, 513d8ccbd21SFilipe Manana u64 *total_added_ret) 5149f21246dSJosef Bacik { 5159f21246dSJosef Bacik struct btrfs_fs_info *info = block_group->fs_info; 516d8ccbd21SFilipe Manana u64 extent_start, extent_end, size; 5179f21246dSJosef Bacik int ret; 5189f21246dSJosef Bacik 519d8ccbd21SFilipe Manana if (total_added_ret) 520d8ccbd21SFilipe Manana *total_added_ret = 0; 521d8ccbd21SFilipe Manana 5229f21246dSJosef Bacik while (start < end) { 523fe119a6eSNikolay Borisov ret = find_first_extent_bit(&info->excluded_extents, start, 5249f21246dSJosef Bacik &extent_start, &extent_end, 5259f21246dSJosef Bacik EXTENT_DIRTY | EXTENT_UPTODATE, 5269f21246dSJosef Bacik NULL); 5279f21246dSJosef Bacik if (ret) 5289f21246dSJosef Bacik break; 5299f21246dSJosef Bacik 5309f21246dSJosef Bacik if (extent_start <= start) { 5319f21246dSJosef Bacik start = extent_end + 1; 5329f21246dSJosef Bacik } else if (extent_start > start && extent_start < end) { 5339f21246dSJosef Bacik size = extent_start - start; 534b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, 535b0643e59SDennis Zhou start, size); 536d8ccbd21SFilipe Manana if (ret) 537d8ccbd21SFilipe Manana return ret; 538d8ccbd21SFilipe Manana if (total_added_ret) 539d8ccbd21SFilipe Manana *total_added_ret += size; 5409f21246dSJosef Bacik start = extent_end + 1; 5419f21246dSJosef Bacik } else { 5429f21246dSJosef Bacik break; 5439f21246dSJosef Bacik } 5449f21246dSJosef Bacik } 5459f21246dSJosef Bacik 5469f21246dSJosef Bacik if (start < end) { 5479f21246dSJosef Bacik size = end - start; 548b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, start, 549b0643e59SDennis Zhou size); 550d8ccbd21SFilipe Manana if (ret) 551d8ccbd21SFilipe Manana return ret; 552d8ccbd21SFilipe Manana if (total_added_ret) 553d8ccbd21SFilipe Manana *total_added_ret += size; 5549f21246dSJosef Bacik } 5559f21246dSJosef Bacik 556d8ccbd21SFilipe Manana return 0; 5579f21246dSJosef Bacik } 5589f21246dSJosef Bacik 559c7eec3d9SBoris Burkov /* 560c7eec3d9SBoris Burkov * Get an arbitrary extent item index / max_index through the block group 561c7eec3d9SBoris Burkov * 562c7eec3d9SBoris Burkov * @block_group the block group to sample from 563c7eec3d9SBoris Burkov * @index: the integral step through the block group to grab from 564c7eec3d9SBoris Burkov * @max_index: the granularity of the sampling 565c7eec3d9SBoris Burkov * @key: return value parameter for the item we find 566c7eec3d9SBoris Burkov * 567c7eec3d9SBoris Burkov * Pre-conditions on indices: 568c7eec3d9SBoris Burkov * 0 <= index <= max_index 569c7eec3d9SBoris Burkov * 0 < max_index 570c7eec3d9SBoris Burkov * 571c7eec3d9SBoris Burkov * Returns: 0 on success, 1 if the search didn't yield a useful item, negative 572c7eec3d9SBoris Burkov * error code on error. 573c7eec3d9SBoris Burkov */ 574c7eec3d9SBoris Burkov static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, 575c7eec3d9SBoris Burkov struct btrfs_block_group *block_group, 576c7eec3d9SBoris Burkov int index, int max_index, 57712148367SBoris Burkov struct btrfs_key *found_key) 578c7eec3d9SBoris Burkov { 579c7eec3d9SBoris Burkov struct btrfs_fs_info *fs_info = block_group->fs_info; 580c7eec3d9SBoris Burkov struct btrfs_root *extent_root; 581c7eec3d9SBoris Burkov u64 search_offset; 582c7eec3d9SBoris Burkov u64 search_end = block_group->start + block_group->length; 583c7eec3d9SBoris Burkov struct btrfs_path *path; 58412148367SBoris Burkov struct btrfs_key search_key; 58512148367SBoris Burkov int ret = 0; 586c7eec3d9SBoris Burkov 587c7eec3d9SBoris Burkov ASSERT(index >= 0); 588c7eec3d9SBoris Burkov ASSERT(index <= max_index); 589c7eec3d9SBoris Burkov ASSERT(max_index > 0); 590c7eec3d9SBoris Burkov lockdep_assert_held(&caching_ctl->mutex); 591c7eec3d9SBoris Burkov lockdep_assert_held_read(&fs_info->commit_root_sem); 592c7eec3d9SBoris Burkov 593c7eec3d9SBoris Burkov path = btrfs_alloc_path(); 594c7eec3d9SBoris Burkov if (!path) 595c7eec3d9SBoris Burkov return -ENOMEM; 596c7eec3d9SBoris Burkov 597c7eec3d9SBoris Burkov extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, 598c7eec3d9SBoris Burkov BTRFS_SUPER_INFO_OFFSET)); 599c7eec3d9SBoris Burkov 600c7eec3d9SBoris Burkov path->skip_locking = 1; 601c7eec3d9SBoris Burkov path->search_commit_root = 1; 602c7eec3d9SBoris Burkov path->reada = READA_FORWARD; 603c7eec3d9SBoris Burkov 604c7eec3d9SBoris Burkov search_offset = index * div_u64(block_group->length, max_index); 60512148367SBoris Burkov search_key.objectid = block_group->start + search_offset; 60612148367SBoris Burkov search_key.type = BTRFS_EXTENT_ITEM_KEY; 60712148367SBoris Burkov search_key.offset = 0; 608c7eec3d9SBoris Burkov 60912148367SBoris Burkov btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { 610c7eec3d9SBoris Burkov /* Success; sampled an extent item in the block group */ 61112148367SBoris Burkov if (found_key->type == BTRFS_EXTENT_ITEM_KEY && 61212148367SBoris Burkov found_key->objectid >= block_group->start && 61312148367SBoris Burkov found_key->objectid + found_key->offset <= search_end) 61412148367SBoris Burkov break; 615c7eec3d9SBoris Burkov 616c7eec3d9SBoris Burkov /* We can't possibly find a valid extent item anymore */ 61712148367SBoris Burkov if (found_key->objectid >= search_end) { 618c7eec3d9SBoris Burkov ret = 1; 619c7eec3d9SBoris Burkov break; 620c7eec3d9SBoris Burkov } 621c7eec3d9SBoris Burkov } 62212148367SBoris Burkov 623c7eec3d9SBoris Burkov lockdep_assert_held(&caching_ctl->mutex); 624c7eec3d9SBoris Burkov lockdep_assert_held_read(&fs_info->commit_root_sem); 625c7eec3d9SBoris Burkov btrfs_free_path(path); 626c7eec3d9SBoris Burkov return ret; 627c7eec3d9SBoris Burkov } 628c7eec3d9SBoris Burkov 629c7eec3d9SBoris Burkov /* 630c7eec3d9SBoris Burkov * Best effort attempt to compute a block group's size class while caching it. 631c7eec3d9SBoris Burkov * 632c7eec3d9SBoris Burkov * @block_group: the block group we are caching 633c7eec3d9SBoris Burkov * 634c7eec3d9SBoris Burkov * We cannot infer the size class while adding free space extents, because that 635c7eec3d9SBoris Burkov * logic doesn't care about contiguous file extents (it doesn't differentiate 636c7eec3d9SBoris Burkov * between a 100M extent and 100 contiguous 1M extents). So we need to read the 637c7eec3d9SBoris Burkov * file extent items. Reading all of them is quite wasteful, because usually 638c7eec3d9SBoris Burkov * only a handful are enough to give a good answer. Therefore, we just grab 5 of 639c7eec3d9SBoris Burkov * them at even steps through the block group and pick the smallest size class 640c7eec3d9SBoris Burkov * we see. Since size class is best effort, and not guaranteed in general, 641c7eec3d9SBoris Burkov * inaccuracy is acceptable. 642c7eec3d9SBoris Burkov * 643c7eec3d9SBoris Burkov * To be more explicit about why this algorithm makes sense: 644c7eec3d9SBoris Burkov * 645c7eec3d9SBoris Burkov * If we are caching in a block group from disk, then there are three major cases 646c7eec3d9SBoris Burkov * to consider: 647c7eec3d9SBoris Burkov * 1. the block group is well behaved and all extents in it are the same size 648c7eec3d9SBoris Burkov * class. 649c7eec3d9SBoris Burkov * 2. the block group is mostly one size class with rare exceptions for last 650c7eec3d9SBoris Burkov * ditch allocations 651c7eec3d9SBoris Burkov * 3. the block group was populated before size classes and can have a totally 652c7eec3d9SBoris Burkov * arbitrary mix of size classes. 653c7eec3d9SBoris Burkov * 654c7eec3d9SBoris Burkov * In case 1, looking at any extent in the block group will yield the correct 655c7eec3d9SBoris Burkov * result. For the mixed cases, taking the minimum size class seems like a good 656c7eec3d9SBoris Burkov * approximation, since gaps from frees will be usable to the size class. For 657c7eec3d9SBoris Burkov * 2., a small handful of file extents is likely to yield the right answer. For 658c7eec3d9SBoris Burkov * 3, we can either read every file extent, or admit that this is best effort 659c7eec3d9SBoris Burkov * anyway and try to stay fast. 660c7eec3d9SBoris Burkov * 661c7eec3d9SBoris Burkov * Returns: 0 on success, negative error code on error. 662c7eec3d9SBoris Burkov */ 663c7eec3d9SBoris Burkov static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl, 664c7eec3d9SBoris Burkov struct btrfs_block_group *block_group) 665c7eec3d9SBoris Burkov { 66612148367SBoris Burkov struct btrfs_fs_info *fs_info = block_group->fs_info; 667c7eec3d9SBoris Burkov struct btrfs_key key; 668c7eec3d9SBoris Burkov int i; 669c7eec3d9SBoris Burkov u64 min_size = block_group->length; 670c7eec3d9SBoris Burkov enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; 671c7eec3d9SBoris Burkov int ret; 672c7eec3d9SBoris Burkov 673cb0922f2SBoris Burkov if (!btrfs_block_group_should_use_size_class(block_group)) 674c7eec3d9SBoris Burkov return 0; 675c7eec3d9SBoris Burkov 67612148367SBoris Burkov lockdep_assert_held(&caching_ctl->mutex); 67712148367SBoris Burkov lockdep_assert_held_read(&fs_info->commit_root_sem); 678c7eec3d9SBoris Burkov for (i = 0; i < 5; ++i) { 679c7eec3d9SBoris Burkov ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); 680c7eec3d9SBoris Burkov if (ret < 0) 681c7eec3d9SBoris Burkov goto out; 682c7eec3d9SBoris Burkov if (ret > 0) 683c7eec3d9SBoris Burkov continue; 684c7eec3d9SBoris Burkov min_size = min_t(u64, min_size, key.offset); 685c7eec3d9SBoris Burkov size_class = btrfs_calc_block_group_size_class(min_size); 686c7eec3d9SBoris Burkov } 687c7eec3d9SBoris Burkov if (size_class != BTRFS_BG_SZ_NONE) { 688c7eec3d9SBoris Burkov spin_lock(&block_group->lock); 689c7eec3d9SBoris Burkov block_group->size_class = size_class; 690c7eec3d9SBoris Burkov spin_unlock(&block_group->lock); 691c7eec3d9SBoris Burkov } 692c7eec3d9SBoris Burkov out: 693c7eec3d9SBoris Burkov return ret; 694c7eec3d9SBoris Burkov } 695c7eec3d9SBoris Burkov 6969f21246dSJosef Bacik static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 6979f21246dSJosef Bacik { 69832da5386SDavid Sterba struct btrfs_block_group *block_group = caching_ctl->block_group; 6999f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 70029cbcf40SJosef Bacik struct btrfs_root *extent_root; 7019f21246dSJosef Bacik struct btrfs_path *path; 7029f21246dSJosef Bacik struct extent_buffer *leaf; 7039f21246dSJosef Bacik struct btrfs_key key; 7049f21246dSJosef Bacik u64 total_found = 0; 7059f21246dSJosef Bacik u64 last = 0; 7069f21246dSJosef Bacik u32 nritems; 7079f21246dSJosef Bacik int ret; 7089f21246dSJosef Bacik bool wakeup = true; 7099f21246dSJosef Bacik 7109f21246dSJosef Bacik path = btrfs_alloc_path(); 7119f21246dSJosef Bacik if (!path) 7129f21246dSJosef Bacik return -ENOMEM; 7139f21246dSJosef Bacik 714b3470b5dSDavid Sterba last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 71529cbcf40SJosef Bacik extent_root = btrfs_extent_root(fs_info, last); 7169f21246dSJosef Bacik 7179f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 7189f21246dSJosef Bacik /* 7199f21246dSJosef Bacik * If we're fragmenting we don't want to make anybody think we can 7209f21246dSJosef Bacik * allocate from this block group until we've had a chance to fragment 7219f21246dSJosef Bacik * the free space. 7229f21246dSJosef Bacik */ 7239f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) 7249f21246dSJosef Bacik wakeup = false; 7259f21246dSJosef Bacik #endif 7269f21246dSJosef Bacik /* 7279f21246dSJosef Bacik * We don't want to deadlock with somebody trying to allocate a new 7289f21246dSJosef Bacik * extent for the extent root while also trying to search the extent 7299f21246dSJosef Bacik * root to add free space. So we skip locking and search the commit 7309f21246dSJosef Bacik * root, since its read-only 7319f21246dSJosef Bacik */ 7329f21246dSJosef Bacik path->skip_locking = 1; 7339f21246dSJosef Bacik path->search_commit_root = 1; 7349f21246dSJosef Bacik path->reada = READA_FORWARD; 7359f21246dSJosef Bacik 7369f21246dSJosef Bacik key.objectid = last; 7379f21246dSJosef Bacik key.offset = 0; 7389f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 7399f21246dSJosef Bacik 7409f21246dSJosef Bacik next: 7419f21246dSJosef Bacik ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 7429f21246dSJosef Bacik if (ret < 0) 7439f21246dSJosef Bacik goto out; 7449f21246dSJosef Bacik 7459f21246dSJosef Bacik leaf = path->nodes[0]; 7469f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 7479f21246dSJosef Bacik 7489f21246dSJosef Bacik while (1) { 7499f21246dSJosef Bacik if (btrfs_fs_closing(fs_info) > 1) { 7509f21246dSJosef Bacik last = (u64)-1; 7519f21246dSJosef Bacik break; 7529f21246dSJosef Bacik } 7539f21246dSJosef Bacik 7549f21246dSJosef Bacik if (path->slots[0] < nritems) { 7559f21246dSJosef Bacik btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7569f21246dSJosef Bacik } else { 7579f21246dSJosef Bacik ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 7589f21246dSJosef Bacik if (ret) 7599f21246dSJosef Bacik break; 7609f21246dSJosef Bacik 7619f21246dSJosef Bacik if (need_resched() || 7629f21246dSJosef Bacik rwsem_is_contended(&fs_info->commit_root_sem)) { 7639f21246dSJosef Bacik btrfs_release_path(path); 7649f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 7659f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 7669f21246dSJosef Bacik cond_resched(); 7679f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 7689f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 7699f21246dSJosef Bacik goto next; 7709f21246dSJosef Bacik } 7719f21246dSJosef Bacik 7729f21246dSJosef Bacik ret = btrfs_next_leaf(extent_root, path); 7739f21246dSJosef Bacik if (ret < 0) 7749f21246dSJosef Bacik goto out; 7759f21246dSJosef Bacik if (ret) 7769f21246dSJosef Bacik break; 7779f21246dSJosef Bacik leaf = path->nodes[0]; 7789f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 7799f21246dSJosef Bacik continue; 7809f21246dSJosef Bacik } 7819f21246dSJosef Bacik 7829f21246dSJosef Bacik if (key.objectid < last) { 7839f21246dSJosef Bacik key.objectid = last; 7849f21246dSJosef Bacik key.offset = 0; 7859f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 7869f21246dSJosef Bacik btrfs_release_path(path); 7879f21246dSJosef Bacik goto next; 7889f21246dSJosef Bacik } 7899f21246dSJosef Bacik 790b3470b5dSDavid Sterba if (key.objectid < block_group->start) { 7919f21246dSJosef Bacik path->slots[0]++; 7929f21246dSJosef Bacik continue; 7939f21246dSJosef Bacik } 7949f21246dSJosef Bacik 795b3470b5dSDavid Sterba if (key.objectid >= block_group->start + block_group->length) 7969f21246dSJosef Bacik break; 7979f21246dSJosef Bacik 7989f21246dSJosef Bacik if (key.type == BTRFS_EXTENT_ITEM_KEY || 7999f21246dSJosef Bacik key.type == BTRFS_METADATA_ITEM_KEY) { 800d8ccbd21SFilipe Manana u64 space_added; 801d8ccbd21SFilipe Manana 802d8ccbd21SFilipe Manana ret = add_new_free_space(block_group, last, key.objectid, 803d8ccbd21SFilipe Manana &space_added); 804d8ccbd21SFilipe Manana if (ret) 805d8ccbd21SFilipe Manana goto out; 806d8ccbd21SFilipe Manana total_found += space_added; 8079f21246dSJosef Bacik if (key.type == BTRFS_METADATA_ITEM_KEY) 8089f21246dSJosef Bacik last = key.objectid + 8099f21246dSJosef Bacik fs_info->nodesize; 8109f21246dSJosef Bacik else 8119f21246dSJosef Bacik last = key.objectid + key.offset; 8129f21246dSJosef Bacik 8139f21246dSJosef Bacik if (total_found > CACHING_CTL_WAKE_UP) { 8149f21246dSJosef Bacik total_found = 0; 815*fc1f91b9SJosef Bacik if (wakeup) { 816*fc1f91b9SJosef Bacik atomic_inc(&caching_ctl->progress); 8179f21246dSJosef Bacik wake_up(&caching_ctl->wait); 8189f21246dSJosef Bacik } 8199f21246dSJosef Bacik } 820*fc1f91b9SJosef Bacik } 8219f21246dSJosef Bacik path->slots[0]++; 8229f21246dSJosef Bacik } 8239f21246dSJosef Bacik 824d8ccbd21SFilipe Manana ret = add_new_free_space(block_group, last, 825d8ccbd21SFilipe Manana block_group->start + block_group->length, 826d8ccbd21SFilipe Manana NULL); 8279f21246dSJosef Bacik out: 8289f21246dSJosef Bacik btrfs_free_path(path); 8299f21246dSJosef Bacik return ret; 8309f21246dSJosef Bacik } 8319f21246dSJosef Bacik 8329f21246dSJosef Bacik static noinline void caching_thread(struct btrfs_work *work) 8339f21246dSJosef Bacik { 83432da5386SDavid Sterba struct btrfs_block_group *block_group; 8359f21246dSJosef Bacik struct btrfs_fs_info *fs_info; 8369f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 8379f21246dSJosef Bacik int ret; 8389f21246dSJosef Bacik 8399f21246dSJosef Bacik caching_ctl = container_of(work, struct btrfs_caching_control, work); 8409f21246dSJosef Bacik block_group = caching_ctl->block_group; 8419f21246dSJosef Bacik fs_info = block_group->fs_info; 8429f21246dSJosef Bacik 8439f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 8449f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 8459f21246dSJosef Bacik 846c7eec3d9SBoris Burkov load_block_group_size_class(caching_ctl, block_group); 847e747853cSJosef Bacik if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 848e747853cSJosef Bacik ret = load_free_space_cache(block_group); 849e747853cSJosef Bacik if (ret == 1) { 850e747853cSJosef Bacik ret = 0; 851e747853cSJosef Bacik goto done; 852e747853cSJosef Bacik } 853e747853cSJosef Bacik 854e747853cSJosef Bacik /* 855e747853cSJosef Bacik * We failed to load the space cache, set ourselves to 856e747853cSJosef Bacik * CACHE_STARTED and carry on. 857e747853cSJosef Bacik */ 858e747853cSJosef Bacik spin_lock(&block_group->lock); 859e747853cSJosef Bacik block_group->cached = BTRFS_CACHE_STARTED; 860e747853cSJosef Bacik spin_unlock(&block_group->lock); 861e747853cSJosef Bacik wake_up(&caching_ctl->wait); 862e747853cSJosef Bacik } 863e747853cSJosef Bacik 8642f96e402SJosef Bacik /* 8652f96e402SJosef Bacik * If we are in the transaction that populated the free space tree we 8662f96e402SJosef Bacik * can't actually cache from the free space tree as our commit root and 8672f96e402SJosef Bacik * real root are the same, so we could change the contents of the blocks 8682f96e402SJosef Bacik * while caching. Instead do the slow caching in this case, and after 8692f96e402SJosef Bacik * the transaction has committed we will be safe. 8702f96e402SJosef Bacik */ 8712f96e402SJosef Bacik if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 8722f96e402SJosef Bacik !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 8739f21246dSJosef Bacik ret = load_free_space_tree(caching_ctl); 8749f21246dSJosef Bacik else 8759f21246dSJosef Bacik ret = load_extent_tree_free(caching_ctl); 876e747853cSJosef Bacik done: 8779f21246dSJosef Bacik spin_lock(&block_group->lock); 8789f21246dSJosef Bacik block_group->caching_ctl = NULL; 8799f21246dSJosef Bacik block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 8809f21246dSJosef Bacik spin_unlock(&block_group->lock); 8819f21246dSJosef Bacik 8829f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 8839f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) { 8849f21246dSJosef Bacik u64 bytes_used; 8859f21246dSJosef Bacik 8869f21246dSJosef Bacik spin_lock(&block_group->space_info->lock); 8879f21246dSJosef Bacik spin_lock(&block_group->lock); 888b3470b5dSDavid Sterba bytes_used = block_group->length - block_group->used; 8899f21246dSJosef Bacik block_group->space_info->bytes_used += bytes_used >> 1; 8909f21246dSJosef Bacik spin_unlock(&block_group->lock); 8919f21246dSJosef Bacik spin_unlock(&block_group->space_info->lock); 892e11c0406SJosef Bacik fragment_free_space(block_group); 8939f21246dSJosef Bacik } 8949f21246dSJosef Bacik #endif 8959f21246dSJosef Bacik 8969f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 8979f21246dSJosef Bacik btrfs_free_excluded_extents(block_group); 8989f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 8999f21246dSJosef Bacik 9009f21246dSJosef Bacik wake_up(&caching_ctl->wait); 9019f21246dSJosef Bacik 9029f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 9039f21246dSJosef Bacik btrfs_put_block_group(block_group); 9049f21246dSJosef Bacik } 9059f21246dSJosef Bacik 906ced8ecf0SOmar Sandoval int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 9079f21246dSJosef Bacik { 9089f21246dSJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 909e747853cSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 9109f21246dSJosef Bacik int ret = 0; 9119f21246dSJosef Bacik 9122eda5708SNaohiro Aota /* Allocator for zoned filesystems does not use the cache at all */ 9132eda5708SNaohiro Aota if (btrfs_is_zoned(fs_info)) 9142eda5708SNaohiro Aota return 0; 9152eda5708SNaohiro Aota 9169f21246dSJosef Bacik caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 9179f21246dSJosef Bacik if (!caching_ctl) 9189f21246dSJosef Bacik return -ENOMEM; 9199f21246dSJosef Bacik 9209f21246dSJosef Bacik INIT_LIST_HEAD(&caching_ctl->list); 9219f21246dSJosef Bacik mutex_init(&caching_ctl->mutex); 9229f21246dSJosef Bacik init_waitqueue_head(&caching_ctl->wait); 9239f21246dSJosef Bacik caching_ctl->block_group = cache; 924e747853cSJosef Bacik refcount_set(&caching_ctl->count, 2); 925*fc1f91b9SJosef Bacik atomic_set(&caching_ctl->progress, 0); 926a0cac0ecSOmar Sandoval btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); 9279f21246dSJosef Bacik 9289f21246dSJosef Bacik spin_lock(&cache->lock); 9299f21246dSJosef Bacik if (cache->cached != BTRFS_CACHE_NO) { 9309f21246dSJosef Bacik kfree(caching_ctl); 931e747853cSJosef Bacik 932e747853cSJosef Bacik caching_ctl = cache->caching_ctl; 933e747853cSJosef Bacik if (caching_ctl) 934e747853cSJosef Bacik refcount_inc(&caching_ctl->count); 935e747853cSJosef Bacik spin_unlock(&cache->lock); 936e747853cSJosef Bacik goto out; 9379f21246dSJosef Bacik } 9389f21246dSJosef Bacik WARN_ON(cache->caching_ctl); 9399f21246dSJosef Bacik cache->caching_ctl = caching_ctl; 9409f21246dSJosef Bacik cache->cached = BTRFS_CACHE_STARTED; 9419f21246dSJosef Bacik spin_unlock(&cache->lock); 9429f21246dSJosef Bacik 94316b0c258SFilipe Manana write_lock(&fs_info->block_group_cache_lock); 9449f21246dSJosef Bacik refcount_inc(&caching_ctl->count); 9459f21246dSJosef Bacik list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 94616b0c258SFilipe Manana write_unlock(&fs_info->block_group_cache_lock); 9479f21246dSJosef Bacik 9489f21246dSJosef Bacik btrfs_get_block_group(cache); 9499f21246dSJosef Bacik 9509f21246dSJosef Bacik btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 951e747853cSJosef Bacik out: 952ced8ecf0SOmar Sandoval if (wait && caching_ctl) 953ced8ecf0SOmar Sandoval ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 954e747853cSJosef Bacik if (caching_ctl) 955e747853cSJosef Bacik btrfs_put_caching_control(caching_ctl); 9569f21246dSJosef Bacik 9579f21246dSJosef Bacik return ret; 9589f21246dSJosef Bacik } 959e3e0520bSJosef Bacik 960e3e0520bSJosef Bacik static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 961e3e0520bSJosef Bacik { 962e3e0520bSJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 963e3e0520bSJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 964e3e0520bSJosef Bacik 965e3e0520bSJosef Bacik write_seqlock(&fs_info->profiles_lock); 966e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 967e3e0520bSJosef Bacik fs_info->avail_data_alloc_bits &= ~extra_flags; 968e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 969e3e0520bSJosef Bacik fs_info->avail_metadata_alloc_bits &= ~extra_flags; 970e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 971e3e0520bSJosef Bacik fs_info->avail_system_alloc_bits &= ~extra_flags; 972e3e0520bSJosef Bacik write_sequnlock(&fs_info->profiles_lock); 973e3e0520bSJosef Bacik } 974e3e0520bSJosef Bacik 975e3e0520bSJosef Bacik /* 976e3e0520bSJosef Bacik * Clear incompat bits for the following feature(s): 977e3e0520bSJosef Bacik * 978e3e0520bSJosef Bacik * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 979e3e0520bSJosef Bacik * in the whole filesystem 9809c907446SDavid Sterba * 9819c907446SDavid Sterba * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 982e3e0520bSJosef Bacik */ 983e3e0520bSJosef Bacik static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 984e3e0520bSJosef Bacik { 9859c907446SDavid Sterba bool found_raid56 = false; 9869c907446SDavid Sterba bool found_raid1c34 = false; 9879c907446SDavid Sterba 9889c907446SDavid Sterba if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 9899c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 9909c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 991e3e0520bSJosef Bacik struct list_head *head = &fs_info->space_info; 992e3e0520bSJosef Bacik struct btrfs_space_info *sinfo; 993e3e0520bSJosef Bacik 994e3e0520bSJosef Bacik list_for_each_entry_rcu(sinfo, head, list) { 995e3e0520bSJosef Bacik down_read(&sinfo->groups_sem); 996e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 9979c907446SDavid Sterba found_raid56 = true; 998e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 9999c907446SDavid Sterba found_raid56 = true; 10009c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 10019c907446SDavid Sterba found_raid1c34 = true; 10029c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 10039c907446SDavid Sterba found_raid1c34 = true; 1004e3e0520bSJosef Bacik up_read(&sinfo->groups_sem); 1005e3e0520bSJosef Bacik } 1006d8e6fd5cSFilipe Manana if (!found_raid56) 1007e3e0520bSJosef Bacik btrfs_clear_fs_incompat(fs_info, RAID56); 1008d8e6fd5cSFilipe Manana if (!found_raid1c34) 10099c907446SDavid Sterba btrfs_clear_fs_incompat(fs_info, RAID1C34); 1010e3e0520bSJosef Bacik } 1011e3e0520bSJosef Bacik } 1012e3e0520bSJosef Bacik 10137357623aSQu Wenruo static int remove_block_group_item(struct btrfs_trans_handle *trans, 10147357623aSQu Wenruo struct btrfs_path *path, 10157357623aSQu Wenruo struct btrfs_block_group *block_group) 10167357623aSQu Wenruo { 10177357623aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 10187357623aSQu Wenruo struct btrfs_root *root; 10197357623aSQu Wenruo struct btrfs_key key; 10207357623aSQu Wenruo int ret; 10217357623aSQu Wenruo 1022dfe8aec4SJosef Bacik root = btrfs_block_group_root(fs_info); 10237357623aSQu Wenruo key.objectid = block_group->start; 10247357623aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 10257357623aSQu Wenruo key.offset = block_group->length; 10267357623aSQu Wenruo 10277357623aSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 10287357623aSQu Wenruo if (ret > 0) 10297357623aSQu Wenruo ret = -ENOENT; 10307357623aSQu Wenruo if (ret < 0) 10317357623aSQu Wenruo return ret; 10327357623aSQu Wenruo 10337357623aSQu Wenruo ret = btrfs_del_item(trans, root, path); 10347357623aSQu Wenruo return ret; 10357357623aSQu Wenruo } 10367357623aSQu Wenruo 1037e3e0520bSJosef Bacik int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 1038e3e0520bSJosef Bacik u64 group_start, struct extent_map *em) 1039e3e0520bSJosef Bacik { 1040e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 1041e3e0520bSJosef Bacik struct btrfs_path *path; 104232da5386SDavid Sterba struct btrfs_block_group *block_group; 1043e3e0520bSJosef Bacik struct btrfs_free_cluster *cluster; 1044e3e0520bSJosef Bacik struct inode *inode; 1045e3e0520bSJosef Bacik struct kobject *kobj = NULL; 1046e3e0520bSJosef Bacik int ret; 1047e3e0520bSJosef Bacik int index; 1048e3e0520bSJosef Bacik int factor; 1049e3e0520bSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 1050e3e0520bSJosef Bacik bool remove_em; 1051e3e0520bSJosef Bacik bool remove_rsv = false; 1052e3e0520bSJosef Bacik 1053e3e0520bSJosef Bacik block_group = btrfs_lookup_block_group(fs_info, group_start); 1054e3e0520bSJosef Bacik BUG_ON(!block_group); 1055e3e0520bSJosef Bacik BUG_ON(!block_group->ro); 1056e3e0520bSJosef Bacik 1057e3e0520bSJosef Bacik trace_btrfs_remove_block_group(block_group); 1058e3e0520bSJosef Bacik /* 1059e3e0520bSJosef Bacik * Free the reserved super bytes from this block group before 1060e3e0520bSJosef Bacik * remove it. 1061e3e0520bSJosef Bacik */ 1062e3e0520bSJosef Bacik btrfs_free_excluded_extents(block_group); 1063b3470b5dSDavid Sterba btrfs_free_ref_tree_range(fs_info, block_group->start, 1064b3470b5dSDavid Sterba block_group->length); 1065e3e0520bSJosef Bacik 1066e3e0520bSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 1067e3e0520bSJosef Bacik factor = btrfs_bg_type_to_factor(block_group->flags); 1068e3e0520bSJosef Bacik 1069e3e0520bSJosef Bacik /* make sure this block group isn't part of an allocation cluster */ 1070e3e0520bSJosef Bacik cluster = &fs_info->data_alloc_cluster; 1071e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 1072e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 1073e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 1074e3e0520bSJosef Bacik 1075e3e0520bSJosef Bacik /* 1076e3e0520bSJosef Bacik * make sure this block group isn't part of a metadata 1077e3e0520bSJosef Bacik * allocation cluster 1078e3e0520bSJosef Bacik */ 1079e3e0520bSJosef Bacik cluster = &fs_info->meta_alloc_cluster; 1080e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 1081e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 1082e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 1083e3e0520bSJosef Bacik 108440ab3be1SNaohiro Aota btrfs_clear_treelog_bg(block_group); 1085c2707a25SJohannes Thumshirn btrfs_clear_data_reloc_bg(block_group); 108640ab3be1SNaohiro Aota 1087e3e0520bSJosef Bacik path = btrfs_alloc_path(); 1088e3e0520bSJosef Bacik if (!path) { 1089e3e0520bSJosef Bacik ret = -ENOMEM; 10909fecd132SFilipe Manana goto out; 1091e3e0520bSJosef Bacik } 1092e3e0520bSJosef Bacik 1093e3e0520bSJosef Bacik /* 1094e3e0520bSJosef Bacik * get the inode first so any iput calls done for the io_list 1095e3e0520bSJosef Bacik * aren't the final iput (no unlinks allowed now) 1096e3e0520bSJosef Bacik */ 1097e3e0520bSJosef Bacik inode = lookup_free_space_inode(block_group, path); 1098e3e0520bSJosef Bacik 1099e3e0520bSJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 1100e3e0520bSJosef Bacik /* 1101e3e0520bSJosef Bacik * Make sure our free space cache IO is done before removing the 1102e3e0520bSJosef Bacik * free space inode 1103e3e0520bSJosef Bacik */ 1104e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 1105e3e0520bSJosef Bacik if (!list_empty(&block_group->io_list)) { 1106e3e0520bSJosef Bacik list_del_init(&block_group->io_list); 1107e3e0520bSJosef Bacik 1108e3e0520bSJosef Bacik WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 1109e3e0520bSJosef Bacik 1110e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 1111e3e0520bSJosef Bacik btrfs_wait_cache_io(trans, block_group, path); 1112e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1113e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 1114e3e0520bSJosef Bacik } 1115e3e0520bSJosef Bacik 1116e3e0520bSJosef Bacik if (!list_empty(&block_group->dirty_list)) { 1117e3e0520bSJosef Bacik list_del_init(&block_group->dirty_list); 1118e3e0520bSJosef Bacik remove_rsv = true; 1119e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1120e3e0520bSJosef Bacik } 1121e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 1122e3e0520bSJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 1123e3e0520bSJosef Bacik 112436b216c8SBoris Burkov ret = btrfs_remove_free_space_inode(trans, inode, block_group); 1125e3e0520bSJosef Bacik if (ret) 11269fecd132SFilipe Manana goto out; 1127e3e0520bSJosef Bacik 112816b0c258SFilipe Manana write_lock(&fs_info->block_group_cache_lock); 112908dddb29SFilipe Manana rb_erase_cached(&block_group->cache_node, 1130e3e0520bSJosef Bacik &fs_info->block_group_cache_tree); 1131e3e0520bSJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 1132e3e0520bSJosef Bacik 11339fecd132SFilipe Manana /* Once for the block groups rbtree */ 11349fecd132SFilipe Manana btrfs_put_block_group(block_group); 11359fecd132SFilipe Manana 113616b0c258SFilipe Manana write_unlock(&fs_info->block_group_cache_lock); 1137e3e0520bSJosef Bacik 1138e3e0520bSJosef Bacik down_write(&block_group->space_info->groups_sem); 1139e3e0520bSJosef Bacik /* 1140e3e0520bSJosef Bacik * we must use list_del_init so people can check to see if they 1141e3e0520bSJosef Bacik * are still on the list after taking the semaphore 1142e3e0520bSJosef Bacik */ 1143e3e0520bSJosef Bacik list_del_init(&block_group->list); 1144e3e0520bSJosef Bacik if (list_empty(&block_group->space_info->block_groups[index])) { 1145e3e0520bSJosef Bacik kobj = block_group->space_info->block_group_kobjs[index]; 1146e3e0520bSJosef Bacik block_group->space_info->block_group_kobjs[index] = NULL; 1147e3e0520bSJosef Bacik clear_avail_alloc_bits(fs_info, block_group->flags); 1148e3e0520bSJosef Bacik } 1149e3e0520bSJosef Bacik up_write(&block_group->space_info->groups_sem); 1150e3e0520bSJosef Bacik clear_incompat_bg_bits(fs_info, block_group->flags); 1151e3e0520bSJosef Bacik if (kobj) { 1152e3e0520bSJosef Bacik kobject_del(kobj); 1153e3e0520bSJosef Bacik kobject_put(kobj); 1154e3e0520bSJosef Bacik } 1155e3e0520bSJosef Bacik 1156e3e0520bSJosef Bacik if (block_group->cached == BTRFS_CACHE_STARTED) 1157e3e0520bSJosef Bacik btrfs_wait_block_group_cache_done(block_group); 11587b9c293bSJosef Bacik 115916b0c258SFilipe Manana write_lock(&fs_info->block_group_cache_lock); 11607b9c293bSJosef Bacik caching_ctl = btrfs_get_caching_control(block_group); 1161e3e0520bSJosef Bacik if (!caching_ctl) { 1162e3e0520bSJosef Bacik struct btrfs_caching_control *ctl; 1163e3e0520bSJosef Bacik 11647b9c293bSJosef Bacik list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { 1165e3e0520bSJosef Bacik if (ctl->block_group == block_group) { 1166e3e0520bSJosef Bacik caching_ctl = ctl; 1167e3e0520bSJosef Bacik refcount_inc(&caching_ctl->count); 1168e3e0520bSJosef Bacik break; 1169e3e0520bSJosef Bacik } 1170e3e0520bSJosef Bacik } 11717b9c293bSJosef Bacik } 1172e3e0520bSJosef Bacik if (caching_ctl) 1173e3e0520bSJosef Bacik list_del_init(&caching_ctl->list); 117416b0c258SFilipe Manana write_unlock(&fs_info->block_group_cache_lock); 11757b9c293bSJosef Bacik 1176e3e0520bSJosef Bacik if (caching_ctl) { 1177e3e0520bSJosef Bacik /* Once for the caching bgs list and once for us. */ 1178e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1179e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1180e3e0520bSJosef Bacik } 1181e3e0520bSJosef Bacik 1182e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 1183e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->dirty_list)); 1184e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->io_list)); 1185e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 1186e3e0520bSJosef Bacik 1187e3e0520bSJosef Bacik btrfs_remove_free_space_cache(block_group); 1188e3e0520bSJosef Bacik 1189e3e0520bSJosef Bacik spin_lock(&block_group->space_info->lock); 1190e3e0520bSJosef Bacik list_del_init(&block_group->ro_list); 1191e3e0520bSJosef Bacik 1192e3e0520bSJosef Bacik if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1193e3e0520bSJosef Bacik WARN_ON(block_group->space_info->total_bytes 1194b3470b5dSDavid Sterba < block_group->length); 1195e3e0520bSJosef Bacik WARN_ON(block_group->space_info->bytes_readonly 1196169e0da9SNaohiro Aota < block_group->length - block_group->zone_unusable); 1197169e0da9SNaohiro Aota WARN_ON(block_group->space_info->bytes_zone_unusable 1198169e0da9SNaohiro Aota < block_group->zone_unusable); 1199e3e0520bSJosef Bacik WARN_ON(block_group->space_info->disk_total 1200b3470b5dSDavid Sterba < block_group->length * factor); 1201e3e0520bSJosef Bacik } 1202b3470b5dSDavid Sterba block_group->space_info->total_bytes -= block_group->length; 1203169e0da9SNaohiro Aota block_group->space_info->bytes_readonly -= 1204169e0da9SNaohiro Aota (block_group->length - block_group->zone_unusable); 1205169e0da9SNaohiro Aota block_group->space_info->bytes_zone_unusable -= 1206169e0da9SNaohiro Aota block_group->zone_unusable; 1207b3470b5dSDavid Sterba block_group->space_info->disk_total -= block_group->length * factor; 1208e3e0520bSJosef Bacik 1209e3e0520bSJosef Bacik spin_unlock(&block_group->space_info->lock); 1210e3e0520bSJosef Bacik 1211ffcb9d44SFilipe Manana /* 1212ffcb9d44SFilipe Manana * Remove the free space for the block group from the free space tree 1213ffcb9d44SFilipe Manana * and the block group's item from the extent tree before marking the 1214ffcb9d44SFilipe Manana * block group as removed. This is to prevent races with tasks that 1215ffcb9d44SFilipe Manana * freeze and unfreeze a block group, this task and another task 1216ffcb9d44SFilipe Manana * allocating a new block group - the unfreeze task ends up removing 1217ffcb9d44SFilipe Manana * the block group's extent map before the task calling this function 1218ffcb9d44SFilipe Manana * deletes the block group item from the extent tree, allowing for 1219ffcb9d44SFilipe Manana * another task to attempt to create another block group with the same 1220ffcb9d44SFilipe Manana * item key (and failing with -EEXIST and a transaction abort). 1221ffcb9d44SFilipe Manana */ 1222ffcb9d44SFilipe Manana ret = remove_block_group_free_space(trans, block_group); 1223ffcb9d44SFilipe Manana if (ret) 1224ffcb9d44SFilipe Manana goto out; 1225ffcb9d44SFilipe Manana 1226ffcb9d44SFilipe Manana ret = remove_block_group_item(trans, path, block_group); 1227ffcb9d44SFilipe Manana if (ret < 0) 1228ffcb9d44SFilipe Manana goto out; 1229ffcb9d44SFilipe Manana 1230e3e0520bSJosef Bacik spin_lock(&block_group->lock); 12313349b57fSJosef Bacik set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); 12323349b57fSJosef Bacik 1233e3e0520bSJosef Bacik /* 12346b7304afSFilipe Manana * At this point trimming or scrub can't start on this block group, 12356b7304afSFilipe Manana * because we removed the block group from the rbtree 12366b7304afSFilipe Manana * fs_info->block_group_cache_tree so no one can't find it anymore and 12376b7304afSFilipe Manana * even if someone already got this block group before we removed it 12386b7304afSFilipe Manana * from the rbtree, they have already incremented block_group->frozen - 12396b7304afSFilipe Manana * if they didn't, for the trimming case they won't find any free space 12406b7304afSFilipe Manana * entries because we already removed them all when we called 12416b7304afSFilipe Manana * btrfs_remove_free_space_cache(). 1242e3e0520bSJosef Bacik * 1243e3e0520bSJosef Bacik * And we must not remove the extent map from the fs_info->mapping_tree 1244e3e0520bSJosef Bacik * to prevent the same logical address range and physical device space 12456b7304afSFilipe Manana * ranges from being reused for a new block group. This is needed to 12466b7304afSFilipe Manana * avoid races with trimming and scrub. 12476b7304afSFilipe Manana * 12486b7304afSFilipe Manana * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1249e3e0520bSJosef Bacik * completely transactionless, so while it is trimming a range the 1250e3e0520bSJosef Bacik * currently running transaction might finish and a new one start, 1251e3e0520bSJosef Bacik * allowing for new block groups to be created that can reuse the same 1252e3e0520bSJosef Bacik * physical device locations unless we take this special care. 1253e3e0520bSJosef Bacik * 1254e3e0520bSJosef Bacik * There may also be an implicit trim operation if the file system 1255e3e0520bSJosef Bacik * is mounted with -odiscard. The same protections must remain 1256e3e0520bSJosef Bacik * in place until the extents have been discarded completely when 1257e3e0520bSJosef Bacik * the transaction commit has completed. 1258e3e0520bSJosef Bacik */ 12596b7304afSFilipe Manana remove_em = (atomic_read(&block_group->frozen) == 0); 1260e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1261e3e0520bSJosef Bacik 1262e3e0520bSJosef Bacik if (remove_em) { 1263e3e0520bSJosef Bacik struct extent_map_tree *em_tree; 1264e3e0520bSJosef Bacik 1265e3e0520bSJosef Bacik em_tree = &fs_info->mapping_tree; 1266e3e0520bSJosef Bacik write_lock(&em_tree->lock); 1267e3e0520bSJosef Bacik remove_extent_mapping(em_tree, em); 1268e3e0520bSJosef Bacik write_unlock(&em_tree->lock); 1269e3e0520bSJosef Bacik /* once for the tree */ 1270e3e0520bSJosef Bacik free_extent_map(em); 1271e3e0520bSJosef Bacik } 1272f6033c5eSXiyu Yang 12739fecd132SFilipe Manana out: 1274f6033c5eSXiyu Yang /* Once for the lookup reference */ 1275f6033c5eSXiyu Yang btrfs_put_block_group(block_group); 1276e3e0520bSJosef Bacik if (remove_rsv) 1277e3e0520bSJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 1278e3e0520bSJosef Bacik btrfs_free_path(path); 1279e3e0520bSJosef Bacik return ret; 1280e3e0520bSJosef Bacik } 1281e3e0520bSJosef Bacik 1282e3e0520bSJosef Bacik struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1283e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1284e3e0520bSJosef Bacik { 1285dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 1286e3e0520bSJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 1287e3e0520bSJosef Bacik struct extent_map *em; 1288e3e0520bSJosef Bacik struct map_lookup *map; 1289e3e0520bSJosef Bacik unsigned int num_items; 1290e3e0520bSJosef Bacik 1291e3e0520bSJosef Bacik read_lock(&em_tree->lock); 1292e3e0520bSJosef Bacik em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1293e3e0520bSJosef Bacik read_unlock(&em_tree->lock); 1294e3e0520bSJosef Bacik ASSERT(em && em->start == chunk_offset); 1295e3e0520bSJosef Bacik 1296e3e0520bSJosef Bacik /* 1297e3e0520bSJosef Bacik * We need to reserve 3 + N units from the metadata space info in order 1298e3e0520bSJosef Bacik * to remove a block group (done at btrfs_remove_chunk() and at 1299e3e0520bSJosef Bacik * btrfs_remove_block_group()), which are used for: 1300e3e0520bSJosef Bacik * 1301e3e0520bSJosef Bacik * 1 unit for adding the free space inode's orphan (located in the tree 1302e3e0520bSJosef Bacik * of tree roots). 1303e3e0520bSJosef Bacik * 1 unit for deleting the block group item (located in the extent 1304e3e0520bSJosef Bacik * tree). 1305e3e0520bSJosef Bacik * 1 unit for deleting the free space item (located in tree of tree 1306e3e0520bSJosef Bacik * roots). 1307e3e0520bSJosef Bacik * N units for deleting N device extent items corresponding to each 1308e3e0520bSJosef Bacik * stripe (located in the device tree). 1309e3e0520bSJosef Bacik * 1310e3e0520bSJosef Bacik * In order to remove a block group we also need to reserve units in the 1311e3e0520bSJosef Bacik * system space info in order to update the chunk tree (update one or 1312e3e0520bSJosef Bacik * more device items and remove one chunk item), but this is done at 1313e3e0520bSJosef Bacik * btrfs_remove_chunk() through a call to check_system_chunk(). 1314e3e0520bSJosef Bacik */ 1315e3e0520bSJosef Bacik map = em->map_lookup; 1316e3e0520bSJosef Bacik num_items = 3 + map->num_stripes; 1317e3e0520bSJosef Bacik free_extent_map(em); 1318e3e0520bSJosef Bacik 1319dfe8aec4SJosef Bacik return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1320e3e0520bSJosef Bacik } 1321e3e0520bSJosef Bacik 1322e3e0520bSJosef Bacik /* 132326ce2095SJosef Bacik * Mark block group @cache read-only, so later write won't happen to block 132426ce2095SJosef Bacik * group @cache. 132526ce2095SJosef Bacik * 132626ce2095SJosef Bacik * If @force is not set, this function will only mark the block group readonly 132726ce2095SJosef Bacik * if we have enough free space (1M) in other metadata/system block groups. 132826ce2095SJosef Bacik * If @force is not set, this function will mark the block group readonly 132926ce2095SJosef Bacik * without checking free space. 133026ce2095SJosef Bacik * 133126ce2095SJosef Bacik * NOTE: This function doesn't care if other block groups can contain all the 133226ce2095SJosef Bacik * data in this block group. That check should be done by relocation routine, 133326ce2095SJosef Bacik * not this function. 133426ce2095SJosef Bacik */ 133532da5386SDavid Sterba static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 133626ce2095SJosef Bacik { 133726ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 133826ce2095SJosef Bacik u64 num_bytes; 133926ce2095SJosef Bacik int ret = -ENOSPC; 134026ce2095SJosef Bacik 134126ce2095SJosef Bacik spin_lock(&sinfo->lock); 134226ce2095SJosef Bacik spin_lock(&cache->lock); 134326ce2095SJosef Bacik 1344195a49eaSFilipe Manana if (cache->swap_extents) { 1345195a49eaSFilipe Manana ret = -ETXTBSY; 1346195a49eaSFilipe Manana goto out; 1347195a49eaSFilipe Manana } 1348195a49eaSFilipe Manana 134926ce2095SJosef Bacik if (cache->ro) { 135026ce2095SJosef Bacik cache->ro++; 135126ce2095SJosef Bacik ret = 0; 135226ce2095SJosef Bacik goto out; 135326ce2095SJosef Bacik } 135426ce2095SJosef Bacik 1355b3470b5dSDavid Sterba num_bytes = cache->length - cache->reserved - cache->pinned - 1356169e0da9SNaohiro Aota cache->bytes_super - cache->zone_unusable - cache->used; 135726ce2095SJosef Bacik 135826ce2095SJosef Bacik /* 1359a30a3d20SJosef Bacik * Data never overcommits, even in mixed mode, so do just the straight 1360a30a3d20SJosef Bacik * check of left over space in how much we have allocated. 1361a30a3d20SJosef Bacik */ 1362a30a3d20SJosef Bacik if (force) { 1363a30a3d20SJosef Bacik ret = 0; 1364a30a3d20SJosef Bacik } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1365a30a3d20SJosef Bacik u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1366a30a3d20SJosef Bacik 1367a30a3d20SJosef Bacik /* 136826ce2095SJosef Bacik * Here we make sure if we mark this bg RO, we still have enough 1369f8935566SJosef Bacik * free space as buffer. 137026ce2095SJosef Bacik */ 1371a30a3d20SJosef Bacik if (sinfo_used + num_bytes <= sinfo->total_bytes) 1372a30a3d20SJosef Bacik ret = 0; 1373a30a3d20SJosef Bacik } else { 1374a30a3d20SJosef Bacik /* 1375a30a3d20SJosef Bacik * We overcommit metadata, so we need to do the 1376a30a3d20SJosef Bacik * btrfs_can_overcommit check here, and we need to pass in 1377a30a3d20SJosef Bacik * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1378a30a3d20SJosef Bacik * leeway to allow us to mark this block group as read only. 1379a30a3d20SJosef Bacik */ 1380a30a3d20SJosef Bacik if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1381a30a3d20SJosef Bacik BTRFS_RESERVE_NO_FLUSH)) 1382a30a3d20SJosef Bacik ret = 0; 1383a30a3d20SJosef Bacik } 1384a30a3d20SJosef Bacik 1385a30a3d20SJosef Bacik if (!ret) { 138626ce2095SJosef Bacik sinfo->bytes_readonly += num_bytes; 1387169e0da9SNaohiro Aota if (btrfs_is_zoned(cache->fs_info)) { 1388169e0da9SNaohiro Aota /* Migrate zone_unusable bytes to readonly */ 1389169e0da9SNaohiro Aota sinfo->bytes_readonly += cache->zone_unusable; 1390169e0da9SNaohiro Aota sinfo->bytes_zone_unusable -= cache->zone_unusable; 1391169e0da9SNaohiro Aota cache->zone_unusable = 0; 1392169e0da9SNaohiro Aota } 139326ce2095SJosef Bacik cache->ro++; 139426ce2095SJosef Bacik list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 139526ce2095SJosef Bacik } 139626ce2095SJosef Bacik out: 139726ce2095SJosef Bacik spin_unlock(&cache->lock); 139826ce2095SJosef Bacik spin_unlock(&sinfo->lock); 139926ce2095SJosef Bacik if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 140026ce2095SJosef Bacik btrfs_info(cache->fs_info, 1401b3470b5dSDavid Sterba "unable to make block group %llu ro", cache->start); 140226ce2095SJosef Bacik btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 140326ce2095SJosef Bacik } 140426ce2095SJosef Bacik return ret; 140526ce2095SJosef Bacik } 140626ce2095SJosef Bacik 1407fe119a6eSNikolay Borisov static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1408fe119a6eSNikolay Borisov struct btrfs_block_group *bg) 140945bb5d6aSNikolay Borisov { 141045bb5d6aSNikolay Borisov struct btrfs_fs_info *fs_info = bg->fs_info; 1411fe119a6eSNikolay Borisov struct btrfs_transaction *prev_trans = NULL; 141245bb5d6aSNikolay Borisov const u64 start = bg->start; 141345bb5d6aSNikolay Borisov const u64 end = start + bg->length - 1; 141445bb5d6aSNikolay Borisov int ret; 141545bb5d6aSNikolay Borisov 1416fe119a6eSNikolay Borisov spin_lock(&fs_info->trans_lock); 1417fe119a6eSNikolay Borisov if (trans->transaction->list.prev != &fs_info->trans_list) { 1418fe119a6eSNikolay Borisov prev_trans = list_last_entry(&trans->transaction->list, 1419fe119a6eSNikolay Borisov struct btrfs_transaction, list); 1420fe119a6eSNikolay Borisov refcount_inc(&prev_trans->use_count); 1421fe119a6eSNikolay Borisov } 1422fe119a6eSNikolay Borisov spin_unlock(&fs_info->trans_lock); 1423fe119a6eSNikolay Borisov 142445bb5d6aSNikolay Borisov /* 142545bb5d6aSNikolay Borisov * Hold the unused_bg_unpin_mutex lock to avoid racing with 142645bb5d6aSNikolay Borisov * btrfs_finish_extent_commit(). If we are at transaction N, another 142745bb5d6aSNikolay Borisov * task might be running finish_extent_commit() for the previous 142845bb5d6aSNikolay Borisov * transaction N - 1, and have seen a range belonging to the block 1429fe119a6eSNikolay Borisov * group in pinned_extents before we were able to clear the whole block 1430fe119a6eSNikolay Borisov * group range from pinned_extents. This means that task can lookup for 1431fe119a6eSNikolay Borisov * the block group after we unpinned it from pinned_extents and removed 1432fe119a6eSNikolay Borisov * it, leading to a BUG_ON() at unpin_extent_range(). 143345bb5d6aSNikolay Borisov */ 143445bb5d6aSNikolay Borisov mutex_lock(&fs_info->unused_bg_unpin_mutex); 1435fe119a6eSNikolay Borisov if (prev_trans) { 1436fe119a6eSNikolay Borisov ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 143745bb5d6aSNikolay Borisov EXTENT_DIRTY); 143845bb5d6aSNikolay Borisov if (ret) 1439534cf531SFilipe Manana goto out; 1440fe119a6eSNikolay Borisov } 144145bb5d6aSNikolay Borisov 1442fe119a6eSNikolay Borisov ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 144345bb5d6aSNikolay Borisov EXTENT_DIRTY); 1444534cf531SFilipe Manana out: 144545bb5d6aSNikolay Borisov mutex_unlock(&fs_info->unused_bg_unpin_mutex); 14465150bf19SFilipe Manana if (prev_trans) 14475150bf19SFilipe Manana btrfs_put_transaction(prev_trans); 144845bb5d6aSNikolay Borisov 1449534cf531SFilipe Manana return ret == 0; 145045bb5d6aSNikolay Borisov } 145145bb5d6aSNikolay Borisov 145226ce2095SJosef Bacik /* 1453e3e0520bSJosef Bacik * Process the unused_bgs list and remove any that don't have any allocated 1454e3e0520bSJosef Bacik * space inside of them. 1455e3e0520bSJosef Bacik */ 1456e3e0520bSJosef Bacik void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1457e3e0520bSJosef Bacik { 145832da5386SDavid Sterba struct btrfs_block_group *block_group; 1459e3e0520bSJosef Bacik struct btrfs_space_info *space_info; 1460e3e0520bSJosef Bacik struct btrfs_trans_handle *trans; 14616e80d4f8SDennis Zhou const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1462e3e0520bSJosef Bacik int ret = 0; 1463e3e0520bSJosef Bacik 1464e3e0520bSJosef Bacik if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1465e3e0520bSJosef Bacik return; 1466e3e0520bSJosef Bacik 14672f12741fSJosef Bacik if (btrfs_fs_closing(fs_info)) 14682f12741fSJosef Bacik return; 14692f12741fSJosef Bacik 1470ddfd08cbSJosef Bacik /* 1471ddfd08cbSJosef Bacik * Long running balances can keep us blocked here for eternity, so 1472ddfd08cbSJosef Bacik * simply skip deletion if we're unable to get the mutex. 1473ddfd08cbSJosef Bacik */ 1474f3372065SJohannes Thumshirn if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1475ddfd08cbSJosef Bacik return; 1476ddfd08cbSJosef Bacik 1477e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1478e3e0520bSJosef Bacik while (!list_empty(&fs_info->unused_bgs)) { 1479e3e0520bSJosef Bacik int trimming; 1480e3e0520bSJosef Bacik 1481e3e0520bSJosef Bacik block_group = list_first_entry(&fs_info->unused_bgs, 148232da5386SDavid Sterba struct btrfs_block_group, 1483e3e0520bSJosef Bacik bg_list); 1484e3e0520bSJosef Bacik list_del_init(&block_group->bg_list); 1485e3e0520bSJosef Bacik 1486e3e0520bSJosef Bacik space_info = block_group->space_info; 1487e3e0520bSJosef Bacik 1488e3e0520bSJosef Bacik if (ret || btrfs_mixed_space_info(space_info)) { 1489e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1490e3e0520bSJosef Bacik continue; 1491e3e0520bSJosef Bacik } 1492e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1493e3e0520bSJosef Bacik 1494b0643e59SDennis Zhou btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1495b0643e59SDennis Zhou 1496e3e0520bSJosef Bacik /* Don't want to race with allocators so take the groups_sem */ 1497e3e0520bSJosef Bacik down_write(&space_info->groups_sem); 14986e80d4f8SDennis Zhou 14996e80d4f8SDennis Zhou /* 15006e80d4f8SDennis Zhou * Async discard moves the final block group discard to be prior 15016e80d4f8SDennis Zhou * to the unused_bgs code path. Therefore, if it's not fully 15026e80d4f8SDennis Zhou * trimmed, punt it back to the async discard lists. 15036e80d4f8SDennis Zhou */ 15046e80d4f8SDennis Zhou if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 15056e80d4f8SDennis Zhou !btrfs_is_free_space_trimmed(block_group)) { 15066e80d4f8SDennis Zhou trace_btrfs_skip_unused_block_group(block_group); 15076e80d4f8SDennis Zhou up_write(&space_info->groups_sem); 15086e80d4f8SDennis Zhou /* Requeue if we failed because of async discard */ 15096e80d4f8SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 15106e80d4f8SDennis Zhou block_group); 15116e80d4f8SDennis Zhou goto next; 15126e80d4f8SDennis Zhou } 15136e80d4f8SDennis Zhou 1514e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1515e3e0520bSJosef Bacik if (block_group->reserved || block_group->pinned || 1516bf38be65SDavid Sterba block_group->used || block_group->ro || 1517e3e0520bSJosef Bacik list_is_singular(&block_group->list)) { 1518e3e0520bSJosef Bacik /* 1519e3e0520bSJosef Bacik * We want to bail if we made new allocations or have 1520e3e0520bSJosef Bacik * outstanding allocations in this block group. We do 1521e3e0520bSJosef Bacik * the ro check in case balance is currently acting on 1522e3e0520bSJosef Bacik * this block group. 1523e3e0520bSJosef Bacik */ 1524e3e0520bSJosef Bacik trace_btrfs_skip_unused_block_group(block_group); 1525e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1526e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1527e3e0520bSJosef Bacik goto next; 1528e3e0520bSJosef Bacik } 1529e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1530e3e0520bSJosef Bacik 1531e3e0520bSJosef Bacik /* We don't want to force the issue, only flip if it's ok. */ 1532e11c0406SJosef Bacik ret = inc_block_group_ro(block_group, 0); 1533e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1534e3e0520bSJosef Bacik if (ret < 0) { 1535e3e0520bSJosef Bacik ret = 0; 1536e3e0520bSJosef Bacik goto next; 1537e3e0520bSJosef Bacik } 1538e3e0520bSJosef Bacik 153974e91b12SNaohiro Aota ret = btrfs_zone_finish(block_group); 154074e91b12SNaohiro Aota if (ret < 0) { 154174e91b12SNaohiro Aota btrfs_dec_block_group_ro(block_group); 154274e91b12SNaohiro Aota if (ret == -EAGAIN) 154374e91b12SNaohiro Aota ret = 0; 154474e91b12SNaohiro Aota goto next; 154574e91b12SNaohiro Aota } 154674e91b12SNaohiro Aota 1547e3e0520bSJosef Bacik /* 1548e3e0520bSJosef Bacik * Want to do this before we do anything else so we can recover 1549e3e0520bSJosef Bacik * properly if we fail to join the transaction. 1550e3e0520bSJosef Bacik */ 1551e3e0520bSJosef Bacik trans = btrfs_start_trans_remove_block_group(fs_info, 1552b3470b5dSDavid Sterba block_group->start); 1553e3e0520bSJosef Bacik if (IS_ERR(trans)) { 1554e3e0520bSJosef Bacik btrfs_dec_block_group_ro(block_group); 1555e3e0520bSJosef Bacik ret = PTR_ERR(trans); 1556e3e0520bSJosef Bacik goto next; 1557e3e0520bSJosef Bacik } 1558e3e0520bSJosef Bacik 1559e3e0520bSJosef Bacik /* 1560e3e0520bSJosef Bacik * We could have pending pinned extents for this block group, 1561e3e0520bSJosef Bacik * just delete them, we don't care about them anymore. 1562e3e0520bSJosef Bacik */ 1563534cf531SFilipe Manana if (!clean_pinned_extents(trans, block_group)) { 1564534cf531SFilipe Manana btrfs_dec_block_group_ro(block_group); 1565e3e0520bSJosef Bacik goto end_trans; 1566534cf531SFilipe Manana } 1567e3e0520bSJosef Bacik 1568b0643e59SDennis Zhou /* 1569b0643e59SDennis Zhou * At this point, the block_group is read only and should fail 1570b0643e59SDennis Zhou * new allocations. However, btrfs_finish_extent_commit() can 1571b0643e59SDennis Zhou * cause this block_group to be placed back on the discard 1572b0643e59SDennis Zhou * lists because now the block_group isn't fully discarded. 1573b0643e59SDennis Zhou * Bail here and try again later after discarding everything. 1574b0643e59SDennis Zhou */ 1575b0643e59SDennis Zhou spin_lock(&fs_info->discard_ctl.lock); 1576b0643e59SDennis Zhou if (!list_empty(&block_group->discard_list)) { 1577b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1578b0643e59SDennis Zhou btrfs_dec_block_group_ro(block_group); 1579b0643e59SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 1580b0643e59SDennis Zhou block_group); 1581b0643e59SDennis Zhou goto end_trans; 1582b0643e59SDennis Zhou } 1583b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1584b0643e59SDennis Zhou 1585e3e0520bSJosef Bacik /* Reset pinned so btrfs_put_block_group doesn't complain */ 1586e3e0520bSJosef Bacik spin_lock(&space_info->lock); 1587e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1588e3e0520bSJosef Bacik 1589e3e0520bSJosef Bacik btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1590e3e0520bSJosef Bacik -block_group->pinned); 1591e3e0520bSJosef Bacik space_info->bytes_readonly += block_group->pinned; 1592e3e0520bSJosef Bacik block_group->pinned = 0; 1593e3e0520bSJosef Bacik 1594e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1595e3e0520bSJosef Bacik spin_unlock(&space_info->lock); 1596e3e0520bSJosef Bacik 15976e80d4f8SDennis Zhou /* 15986e80d4f8SDennis Zhou * The normal path here is an unused block group is passed here, 15996e80d4f8SDennis Zhou * then trimming is handled in the transaction commit path. 16006e80d4f8SDennis Zhou * Async discard interposes before this to do the trimming 16016e80d4f8SDennis Zhou * before coming down the unused block group path as trimming 16026e80d4f8SDennis Zhou * will no longer be done later in the transaction commit path. 16036e80d4f8SDennis Zhou */ 16046e80d4f8SDennis Zhou if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 16056e80d4f8SDennis Zhou goto flip_async; 16066e80d4f8SDennis Zhou 1607dcba6e48SNaohiro Aota /* 1608dcba6e48SNaohiro Aota * DISCARD can flip during remount. On zoned filesystems, we 1609dcba6e48SNaohiro Aota * need to reset sequential-required zones. 1610dcba6e48SNaohiro Aota */ 1611dcba6e48SNaohiro Aota trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1612dcba6e48SNaohiro Aota btrfs_is_zoned(fs_info); 1613e3e0520bSJosef Bacik 1614e3e0520bSJosef Bacik /* Implicit trim during transaction commit. */ 1615e3e0520bSJosef Bacik if (trimming) 16166b7304afSFilipe Manana btrfs_freeze_block_group(block_group); 1617e3e0520bSJosef Bacik 1618e3e0520bSJosef Bacik /* 1619e3e0520bSJosef Bacik * Btrfs_remove_chunk will abort the transaction if things go 1620e3e0520bSJosef Bacik * horribly wrong. 1621e3e0520bSJosef Bacik */ 1622b3470b5dSDavid Sterba ret = btrfs_remove_chunk(trans, block_group->start); 1623e3e0520bSJosef Bacik 1624e3e0520bSJosef Bacik if (ret) { 1625e3e0520bSJosef Bacik if (trimming) 16266b7304afSFilipe Manana btrfs_unfreeze_block_group(block_group); 1627e3e0520bSJosef Bacik goto end_trans; 1628e3e0520bSJosef Bacik } 1629e3e0520bSJosef Bacik 1630e3e0520bSJosef Bacik /* 1631e3e0520bSJosef Bacik * If we're not mounted with -odiscard, we can just forget 1632e3e0520bSJosef Bacik * about this block group. Otherwise we'll need to wait 1633e3e0520bSJosef Bacik * until transaction commit to do the actual discard. 1634e3e0520bSJosef Bacik */ 1635e3e0520bSJosef Bacik if (trimming) { 1636e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1637e3e0520bSJosef Bacik /* 1638e3e0520bSJosef Bacik * A concurrent scrub might have added us to the list 1639e3e0520bSJosef Bacik * fs_info->unused_bgs, so use a list_move operation 1640e3e0520bSJosef Bacik * to add the block group to the deleted_bgs list. 1641e3e0520bSJosef Bacik */ 1642e3e0520bSJosef Bacik list_move(&block_group->bg_list, 1643e3e0520bSJosef Bacik &trans->transaction->deleted_bgs); 1644e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1645e3e0520bSJosef Bacik btrfs_get_block_group(block_group); 1646e3e0520bSJosef Bacik } 1647e3e0520bSJosef Bacik end_trans: 1648e3e0520bSJosef Bacik btrfs_end_transaction(trans); 1649e3e0520bSJosef Bacik next: 1650e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1651e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1652e3e0520bSJosef Bacik } 1653e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1654f3372065SJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 16556e80d4f8SDennis Zhou return; 16566e80d4f8SDennis Zhou 16576e80d4f8SDennis Zhou flip_async: 16586e80d4f8SDennis Zhou btrfs_end_transaction(trans); 1659f3372065SJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 16606e80d4f8SDennis Zhou btrfs_put_block_group(block_group); 16616e80d4f8SDennis Zhou btrfs_discard_punt_unused_bgs_list(fs_info); 1662e3e0520bSJosef Bacik } 1663e3e0520bSJosef Bacik 166432da5386SDavid Sterba void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1665e3e0520bSJosef Bacik { 1666e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = bg->fs_info; 1667e3e0520bSJosef Bacik 1668e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1669e3e0520bSJosef Bacik if (list_empty(&bg->bg_list)) { 1670e3e0520bSJosef Bacik btrfs_get_block_group(bg); 16710657b20cSFilipe Manana trace_btrfs_add_unused_block_group(bg); 1672e3e0520bSJosef Bacik list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 16730657b20cSFilipe Manana } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { 1674a9f18971SNaohiro Aota /* Pull out the block group from the reclaim_bgs list. */ 16750657b20cSFilipe Manana trace_btrfs_add_unused_block_group(bg); 1676a9f18971SNaohiro Aota list_move_tail(&bg->bg_list, &fs_info->unused_bgs); 1677e3e0520bSJosef Bacik } 1678e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1679e3e0520bSJosef Bacik } 16804358d963SJosef Bacik 16812ca0ec77SJohannes Thumshirn /* 16822ca0ec77SJohannes Thumshirn * We want block groups with a low number of used bytes to be in the beginning 16832ca0ec77SJohannes Thumshirn * of the list, so they will get reclaimed first. 16842ca0ec77SJohannes Thumshirn */ 16852ca0ec77SJohannes Thumshirn static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 16862ca0ec77SJohannes Thumshirn const struct list_head *b) 16872ca0ec77SJohannes Thumshirn { 16882ca0ec77SJohannes Thumshirn const struct btrfs_block_group *bg1, *bg2; 16892ca0ec77SJohannes Thumshirn 16902ca0ec77SJohannes Thumshirn bg1 = list_entry(a, struct btrfs_block_group, bg_list); 16912ca0ec77SJohannes Thumshirn bg2 = list_entry(b, struct btrfs_block_group, bg_list); 16922ca0ec77SJohannes Thumshirn 16932ca0ec77SJohannes Thumshirn return bg1->used > bg2->used; 16942ca0ec77SJohannes Thumshirn } 16952ca0ec77SJohannes Thumshirn 16963687fcb0SJohannes Thumshirn static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) 16973687fcb0SJohannes Thumshirn { 16983687fcb0SJohannes Thumshirn if (btrfs_is_zoned(fs_info)) 16993687fcb0SJohannes Thumshirn return btrfs_zoned_should_reclaim(fs_info); 17003687fcb0SJohannes Thumshirn return true; 17013687fcb0SJohannes Thumshirn } 17023687fcb0SJohannes Thumshirn 170381531225SBoris Burkov static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) 170481531225SBoris Burkov { 170581531225SBoris Burkov const struct btrfs_space_info *space_info = bg->space_info; 170681531225SBoris Burkov const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); 170781531225SBoris Burkov const u64 new_val = bg->used; 170881531225SBoris Burkov const u64 old_val = new_val + bytes_freed; 170981531225SBoris Burkov u64 thresh; 171081531225SBoris Burkov 171181531225SBoris Burkov if (reclaim_thresh == 0) 171281531225SBoris Burkov return false; 171381531225SBoris Burkov 1714428c8e03SDavid Sterba thresh = mult_perc(bg->length, reclaim_thresh); 171581531225SBoris Burkov 171681531225SBoris Burkov /* 171781531225SBoris Burkov * If we were below the threshold before don't reclaim, we are likely a 171881531225SBoris Burkov * brand new block group and we don't want to relocate new block groups. 171981531225SBoris Burkov */ 172081531225SBoris Burkov if (old_val < thresh) 172181531225SBoris Burkov return false; 172281531225SBoris Burkov if (new_val >= thresh) 172381531225SBoris Burkov return false; 172481531225SBoris Burkov return true; 172581531225SBoris Burkov } 172681531225SBoris Burkov 172718bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs_work(struct work_struct *work) 172818bb8bbfSJohannes Thumshirn { 172918bb8bbfSJohannes Thumshirn struct btrfs_fs_info *fs_info = 173018bb8bbfSJohannes Thumshirn container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 173118bb8bbfSJohannes Thumshirn struct btrfs_block_group *bg; 173218bb8bbfSJohannes Thumshirn struct btrfs_space_info *space_info; 173318bb8bbfSJohannes Thumshirn 173418bb8bbfSJohannes Thumshirn if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 173518bb8bbfSJohannes Thumshirn return; 173618bb8bbfSJohannes Thumshirn 17372f12741fSJosef Bacik if (btrfs_fs_closing(fs_info)) 17382f12741fSJosef Bacik return; 17392f12741fSJosef Bacik 17403687fcb0SJohannes Thumshirn if (!btrfs_should_reclaim(fs_info)) 17413687fcb0SJohannes Thumshirn return; 17423687fcb0SJohannes Thumshirn 1743ca5e4ea0SNaohiro Aota sb_start_write(fs_info->sb); 1744ca5e4ea0SNaohiro Aota 1745ca5e4ea0SNaohiro Aota if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 1746ca5e4ea0SNaohiro Aota sb_end_write(fs_info->sb); 174718bb8bbfSJohannes Thumshirn return; 1748ca5e4ea0SNaohiro Aota } 174918bb8bbfSJohannes Thumshirn 17509cc0b837SJohannes Thumshirn /* 17519cc0b837SJohannes Thumshirn * Long running balances can keep us blocked here for eternity, so 17529cc0b837SJohannes Thumshirn * simply skip reclaim if we're unable to get the mutex. 17539cc0b837SJohannes Thumshirn */ 17549cc0b837SJohannes Thumshirn if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 17559cc0b837SJohannes Thumshirn btrfs_exclop_finish(fs_info); 1756ca5e4ea0SNaohiro Aota sb_end_write(fs_info->sb); 17579cc0b837SJohannes Thumshirn return; 17589cc0b837SJohannes Thumshirn } 17599cc0b837SJohannes Thumshirn 176018bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 17612ca0ec77SJohannes Thumshirn /* 17622ca0ec77SJohannes Thumshirn * Sort happens under lock because we can't simply splice it and sort. 17632ca0ec77SJohannes Thumshirn * The block groups might still be in use and reachable via bg_list, 17642ca0ec77SJohannes Thumshirn * and their presence in the reclaim_bgs list must be preserved. 17652ca0ec77SJohannes Thumshirn */ 17662ca0ec77SJohannes Thumshirn list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 176718bb8bbfSJohannes Thumshirn while (!list_empty(&fs_info->reclaim_bgs)) { 17685f93e776SJohannes Thumshirn u64 zone_unusable; 17691cea5cf0SFilipe Manana int ret = 0; 17701cea5cf0SFilipe Manana 177118bb8bbfSJohannes Thumshirn bg = list_first_entry(&fs_info->reclaim_bgs, 177218bb8bbfSJohannes Thumshirn struct btrfs_block_group, 177318bb8bbfSJohannes Thumshirn bg_list); 177418bb8bbfSJohannes Thumshirn list_del_init(&bg->bg_list); 177518bb8bbfSJohannes Thumshirn 177618bb8bbfSJohannes Thumshirn space_info = bg->space_info; 177718bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 177818bb8bbfSJohannes Thumshirn 177918bb8bbfSJohannes Thumshirn /* Don't race with allocators so take the groups_sem */ 178018bb8bbfSJohannes Thumshirn down_write(&space_info->groups_sem); 178118bb8bbfSJohannes Thumshirn 178218bb8bbfSJohannes Thumshirn spin_lock(&bg->lock); 178318bb8bbfSJohannes Thumshirn if (bg->reserved || bg->pinned || bg->ro) { 178418bb8bbfSJohannes Thumshirn /* 178518bb8bbfSJohannes Thumshirn * We want to bail if we made new allocations or have 178618bb8bbfSJohannes Thumshirn * outstanding allocations in this block group. We do 178718bb8bbfSJohannes Thumshirn * the ro check in case balance is currently acting on 178818bb8bbfSJohannes Thumshirn * this block group. 178918bb8bbfSJohannes Thumshirn */ 179018bb8bbfSJohannes Thumshirn spin_unlock(&bg->lock); 179118bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 179218bb8bbfSJohannes Thumshirn goto next; 179318bb8bbfSJohannes Thumshirn } 1794cc4804bfSBoris Burkov if (bg->used == 0) { 1795cc4804bfSBoris Burkov /* 1796cc4804bfSBoris Burkov * It is possible that we trigger relocation on a block 1797cc4804bfSBoris Burkov * group as its extents are deleted and it first goes 1798cc4804bfSBoris Burkov * below the threshold, then shortly after goes empty. 1799cc4804bfSBoris Burkov * 1800cc4804bfSBoris Burkov * In this case, relocating it does delete it, but has 1801cc4804bfSBoris Burkov * some overhead in relocation specific metadata, looking 1802cc4804bfSBoris Burkov * for the non-existent extents and running some extra 1803cc4804bfSBoris Burkov * transactions, which we can avoid by using one of the 1804cc4804bfSBoris Burkov * other mechanisms for dealing with empty block groups. 1805cc4804bfSBoris Burkov */ 1806cc4804bfSBoris Burkov if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1807cc4804bfSBoris Burkov btrfs_mark_bg_unused(bg); 1808cc4804bfSBoris Burkov spin_unlock(&bg->lock); 1809cc4804bfSBoris Burkov up_write(&space_info->groups_sem); 1810cc4804bfSBoris Burkov goto next; 181181531225SBoris Burkov 181281531225SBoris Burkov } 181381531225SBoris Burkov /* 181481531225SBoris Burkov * The block group might no longer meet the reclaim condition by 181581531225SBoris Burkov * the time we get around to reclaiming it, so to avoid 181681531225SBoris Burkov * reclaiming overly full block_groups, skip reclaiming them. 181781531225SBoris Burkov * 181881531225SBoris Burkov * Since the decision making process also depends on the amount 181981531225SBoris Burkov * being freed, pass in a fake giant value to skip that extra 182081531225SBoris Burkov * check, which is more meaningful when adding to the list in 182181531225SBoris Burkov * the first place. 182281531225SBoris Burkov */ 182381531225SBoris Burkov if (!should_reclaim_block_group(bg, bg->length)) { 182481531225SBoris Burkov spin_unlock(&bg->lock); 182581531225SBoris Burkov up_write(&space_info->groups_sem); 182681531225SBoris Burkov goto next; 1827cc4804bfSBoris Burkov } 182818bb8bbfSJohannes Thumshirn spin_unlock(&bg->lock); 182918bb8bbfSJohannes Thumshirn 183093463ff7SNaohiro Aota /* 183193463ff7SNaohiro Aota * Get out fast, in case we're read-only or unmounting the 183293463ff7SNaohiro Aota * filesystem. It is OK to drop block groups from the list even 183393463ff7SNaohiro Aota * for the read-only case. As we did sb_start_write(), 183493463ff7SNaohiro Aota * "mount -o remount,ro" won't happen and read-only filesystem 183593463ff7SNaohiro Aota * means it is forced read-only due to a fatal error. So, it 183693463ff7SNaohiro Aota * never gets back to read-write to let us reclaim again. 183793463ff7SNaohiro Aota */ 183893463ff7SNaohiro Aota if (btrfs_need_cleaner_sleep(fs_info)) { 183918bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 184018bb8bbfSJohannes Thumshirn goto next; 184118bb8bbfSJohannes Thumshirn } 184218bb8bbfSJohannes Thumshirn 18435f93e776SJohannes Thumshirn /* 18445f93e776SJohannes Thumshirn * Cache the zone_unusable value before turning the block group 18455f93e776SJohannes Thumshirn * to read only. As soon as the blog group is read only it's 18465f93e776SJohannes Thumshirn * zone_unusable value gets moved to the block group's read-only 18475f93e776SJohannes Thumshirn * bytes and isn't available for calculations anymore. 18485f93e776SJohannes Thumshirn */ 18495f93e776SJohannes Thumshirn zone_unusable = bg->zone_unusable; 185018bb8bbfSJohannes Thumshirn ret = inc_block_group_ro(bg, 0); 185118bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 185218bb8bbfSJohannes Thumshirn if (ret < 0) 185318bb8bbfSJohannes Thumshirn goto next; 185418bb8bbfSJohannes Thumshirn 18555f93e776SJohannes Thumshirn btrfs_info(fs_info, 18565f93e776SJohannes Thumshirn "reclaiming chunk %llu with %llu%% used %llu%% unusable", 185795cd356cSJohannes Thumshirn bg->start, 185895cd356cSJohannes Thumshirn div64_u64(bg->used * 100, bg->length), 18595f93e776SJohannes Thumshirn div64_u64(zone_unusable * 100, bg->length)); 186018bb8bbfSJohannes Thumshirn trace_btrfs_reclaim_block_group(bg); 186118bb8bbfSJohannes Thumshirn ret = btrfs_relocate_chunk(fs_info, bg->start); 186274944c87SJosef Bacik if (ret) { 186374944c87SJosef Bacik btrfs_dec_block_group_ro(bg); 186418bb8bbfSJohannes Thumshirn btrfs_err(fs_info, "error relocating chunk %llu", 186518bb8bbfSJohannes Thumshirn bg->start); 186674944c87SJosef Bacik } 186718bb8bbfSJohannes Thumshirn 186818bb8bbfSJohannes Thumshirn next: 18697e271809SNaohiro Aota if (ret) 18707e271809SNaohiro Aota btrfs_mark_bg_to_reclaim(bg); 18711cea5cf0SFilipe Manana btrfs_put_block_group(bg); 18723ed01616SNaohiro Aota 18733ed01616SNaohiro Aota mutex_unlock(&fs_info->reclaim_bgs_lock); 18743ed01616SNaohiro Aota /* 18753ed01616SNaohiro Aota * Reclaiming all the block groups in the list can take really 18763ed01616SNaohiro Aota * long. Prioritize cleaning up unused block groups. 18773ed01616SNaohiro Aota */ 18783ed01616SNaohiro Aota btrfs_delete_unused_bgs(fs_info); 18793ed01616SNaohiro Aota /* 18803ed01616SNaohiro Aota * If we are interrupted by a balance, we can just bail out. The 18813ed01616SNaohiro Aota * cleaner thread restart again if necessary. 18823ed01616SNaohiro Aota */ 18833ed01616SNaohiro Aota if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 18843ed01616SNaohiro Aota goto end; 1885d96b3424SFilipe Manana spin_lock(&fs_info->unused_bgs_lock); 188618bb8bbfSJohannes Thumshirn } 188718bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 188818bb8bbfSJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 18893ed01616SNaohiro Aota end: 189018bb8bbfSJohannes Thumshirn btrfs_exclop_finish(fs_info); 1891ca5e4ea0SNaohiro Aota sb_end_write(fs_info->sb); 189218bb8bbfSJohannes Thumshirn } 189318bb8bbfSJohannes Thumshirn 189418bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 189518bb8bbfSJohannes Thumshirn { 189618bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 189718bb8bbfSJohannes Thumshirn if (!list_empty(&fs_info->reclaim_bgs)) 189818bb8bbfSJohannes Thumshirn queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 189918bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 190018bb8bbfSJohannes Thumshirn } 190118bb8bbfSJohannes Thumshirn 190218bb8bbfSJohannes Thumshirn void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 190318bb8bbfSJohannes Thumshirn { 190418bb8bbfSJohannes Thumshirn struct btrfs_fs_info *fs_info = bg->fs_info; 190518bb8bbfSJohannes Thumshirn 190618bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 190718bb8bbfSJohannes Thumshirn if (list_empty(&bg->bg_list)) { 190818bb8bbfSJohannes Thumshirn btrfs_get_block_group(bg); 190918bb8bbfSJohannes Thumshirn trace_btrfs_add_reclaim_block_group(bg); 191018bb8bbfSJohannes Thumshirn list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); 191118bb8bbfSJohannes Thumshirn } 191218bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 191318bb8bbfSJohannes Thumshirn } 191418bb8bbfSJohannes Thumshirn 1915e3ba67a1SJohannes Thumshirn static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1916e3ba67a1SJohannes Thumshirn struct btrfs_path *path) 1917e3ba67a1SJohannes Thumshirn { 1918e3ba67a1SJohannes Thumshirn struct extent_map_tree *em_tree; 1919e3ba67a1SJohannes Thumshirn struct extent_map *em; 1920e3ba67a1SJohannes Thumshirn struct btrfs_block_group_item bg; 1921e3ba67a1SJohannes Thumshirn struct extent_buffer *leaf; 1922e3ba67a1SJohannes Thumshirn int slot; 1923e3ba67a1SJohannes Thumshirn u64 flags; 1924e3ba67a1SJohannes Thumshirn int ret = 0; 1925e3ba67a1SJohannes Thumshirn 1926e3ba67a1SJohannes Thumshirn slot = path->slots[0]; 1927e3ba67a1SJohannes Thumshirn leaf = path->nodes[0]; 1928e3ba67a1SJohannes Thumshirn 1929e3ba67a1SJohannes Thumshirn em_tree = &fs_info->mapping_tree; 1930e3ba67a1SJohannes Thumshirn read_lock(&em_tree->lock); 1931e3ba67a1SJohannes Thumshirn em = lookup_extent_mapping(em_tree, key->objectid, key->offset); 1932e3ba67a1SJohannes Thumshirn read_unlock(&em_tree->lock); 1933e3ba67a1SJohannes Thumshirn if (!em) { 1934e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1935e3ba67a1SJohannes Thumshirn "logical %llu len %llu found bg but no related chunk", 1936e3ba67a1SJohannes Thumshirn key->objectid, key->offset); 1937e3ba67a1SJohannes Thumshirn return -ENOENT; 1938e3ba67a1SJohannes Thumshirn } 1939e3ba67a1SJohannes Thumshirn 1940e3ba67a1SJohannes Thumshirn if (em->start != key->objectid || em->len != key->offset) { 1941e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1942e3ba67a1SJohannes Thumshirn "block group %llu len %llu mismatch with chunk %llu len %llu", 1943e3ba67a1SJohannes Thumshirn key->objectid, key->offset, em->start, em->len); 1944e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1945e3ba67a1SJohannes Thumshirn goto out_free_em; 1946e3ba67a1SJohannes Thumshirn } 1947e3ba67a1SJohannes Thumshirn 1948e3ba67a1SJohannes Thumshirn read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 1949e3ba67a1SJohannes Thumshirn sizeof(bg)); 1950e3ba67a1SJohannes Thumshirn flags = btrfs_stack_block_group_flags(&bg) & 1951e3ba67a1SJohannes Thumshirn BTRFS_BLOCK_GROUP_TYPE_MASK; 1952e3ba67a1SJohannes Thumshirn 1953e3ba67a1SJohannes Thumshirn if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 1954e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1955e3ba67a1SJohannes Thumshirn "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 1956e3ba67a1SJohannes Thumshirn key->objectid, key->offset, flags, 1957e3ba67a1SJohannes Thumshirn (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); 1958e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1959e3ba67a1SJohannes Thumshirn } 1960e3ba67a1SJohannes Thumshirn 1961e3ba67a1SJohannes Thumshirn out_free_em: 1962e3ba67a1SJohannes Thumshirn free_extent_map(em); 1963e3ba67a1SJohannes Thumshirn return ret; 1964e3ba67a1SJohannes Thumshirn } 1965e3ba67a1SJohannes Thumshirn 19664358d963SJosef Bacik static int find_first_block_group(struct btrfs_fs_info *fs_info, 19674358d963SJosef Bacik struct btrfs_path *path, 19684358d963SJosef Bacik struct btrfs_key *key) 19694358d963SJosef Bacik { 1970dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 1971e3ba67a1SJohannes Thumshirn int ret; 19724358d963SJosef Bacik struct btrfs_key found_key; 19734358d963SJosef Bacik 197436dfbbe2SGabriel Niebler btrfs_for_each_slot(root, key, &found_key, path, ret) { 19754358d963SJosef Bacik if (found_key.objectid >= key->objectid && 19764358d963SJosef Bacik found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 197736dfbbe2SGabriel Niebler return read_bg_from_eb(fs_info, &found_key, path); 1978e3ba67a1SJohannes Thumshirn } 19794358d963SJosef Bacik } 19804358d963SJosef Bacik return ret; 19814358d963SJosef Bacik } 19824358d963SJosef Bacik 19834358d963SJosef Bacik static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 19844358d963SJosef Bacik { 19854358d963SJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 19864358d963SJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 19874358d963SJosef Bacik 19884358d963SJosef Bacik write_seqlock(&fs_info->profiles_lock); 19894358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 19904358d963SJosef Bacik fs_info->avail_data_alloc_bits |= extra_flags; 19914358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 19924358d963SJosef Bacik fs_info->avail_metadata_alloc_bits |= extra_flags; 19934358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 19944358d963SJosef Bacik fs_info->avail_system_alloc_bits |= extra_flags; 19954358d963SJosef Bacik write_sequnlock(&fs_info->profiles_lock); 19964358d963SJosef Bacik } 19974358d963SJosef Bacik 199843dd529aSDavid Sterba /* 199943dd529aSDavid Sterba * Map a physical disk address to a list of logical addresses. 20009ee9b979SNikolay Borisov * 20019ee9b979SNikolay Borisov * @fs_info: the filesystem 200296a14336SNikolay Borisov * @chunk_start: logical address of block group 200396a14336SNikolay Borisov * @physical: physical address to map to logical addresses 200496a14336SNikolay Borisov * @logical: return array of logical addresses which map to @physical 200596a14336SNikolay Borisov * @naddrs: length of @logical 200696a14336SNikolay Borisov * @stripe_len: size of IO stripe for the given block group 200796a14336SNikolay Borisov * 200896a14336SNikolay Borisov * Maps a particular @physical disk address to a list of @logical addresses. 200996a14336SNikolay Borisov * Used primarily to exclude those portions of a block group that contain super 201096a14336SNikolay Borisov * block copies. 201196a14336SNikolay Borisov */ 201296a14336SNikolay Borisov int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 20131eb82ef8SChristoph Hellwig u64 physical, u64 **logical, int *naddrs, int *stripe_len) 201496a14336SNikolay Borisov { 201596a14336SNikolay Borisov struct extent_map *em; 201696a14336SNikolay Borisov struct map_lookup *map; 201796a14336SNikolay Borisov u64 *buf; 201896a14336SNikolay Borisov u64 bytenr; 20191776ad17SNikolay Borisov u64 data_stripe_length; 20201776ad17SNikolay Borisov u64 io_stripe_size; 20211776ad17SNikolay Borisov int i, nr = 0; 20221776ad17SNikolay Borisov int ret = 0; 202396a14336SNikolay Borisov 202496a14336SNikolay Borisov em = btrfs_get_chunk_map(fs_info, chunk_start, 1); 202596a14336SNikolay Borisov if (IS_ERR(em)) 202696a14336SNikolay Borisov return -EIO; 202796a14336SNikolay Borisov 202896a14336SNikolay Borisov map = em->map_lookup; 20299e22b925SNikolay Borisov data_stripe_length = em->orig_block_len; 2030a97699d1SQu Wenruo io_stripe_size = BTRFS_STRIPE_LEN; 2031138082f3SNaohiro Aota chunk_start = em->start; 203296a14336SNikolay Borisov 20339e22b925SNikolay Borisov /* For RAID5/6 adjust to a full IO stripe length */ 20349e22b925SNikolay Borisov if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2035a97699d1SQu Wenruo io_stripe_size = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT; 203696a14336SNikolay Borisov 203796a14336SNikolay Borisov buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 20381776ad17SNikolay Borisov if (!buf) { 20391776ad17SNikolay Borisov ret = -ENOMEM; 20401776ad17SNikolay Borisov goto out; 20411776ad17SNikolay Borisov } 204296a14336SNikolay Borisov 204396a14336SNikolay Borisov for (i = 0; i < map->num_stripes; i++) { 20441776ad17SNikolay Borisov bool already_inserted = false; 20456ded22c1SQu Wenruo u32 stripe_nr; 20466ded22c1SQu Wenruo u32 offset; 20471776ad17SNikolay Borisov int j; 20481776ad17SNikolay Borisov 20491776ad17SNikolay Borisov if (!in_range(physical, map->stripes[i].physical, 20501776ad17SNikolay Borisov data_stripe_length)) 205196a14336SNikolay Borisov continue; 205296a14336SNikolay Borisov 2053a97699d1SQu Wenruo stripe_nr = (physical - map->stripes[i].physical) >> 2054a97699d1SQu Wenruo BTRFS_STRIPE_LEN_SHIFT; 2055a97699d1SQu Wenruo offset = (physical - map->stripes[i].physical) & 2056a97699d1SQu Wenruo BTRFS_STRIPE_LEN_MASK; 205796a14336SNikolay Borisov 2058ac067734SDavid Sterba if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 20596ded22c1SQu Wenruo BTRFS_BLOCK_GROUP_RAID10)) 20606ded22c1SQu Wenruo stripe_nr = div_u64(stripe_nr * map->num_stripes + i, 20616ded22c1SQu Wenruo map->sub_stripes); 206296a14336SNikolay Borisov /* 206396a14336SNikolay Borisov * The remaining case would be for RAID56, multiply by 206496a14336SNikolay Borisov * nr_data_stripes(). Alternatively, just use rmap_len below 206596a14336SNikolay Borisov * instead of map->stripe_len 206696a14336SNikolay Borisov */ 2067138082f3SNaohiro Aota bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 20681776ad17SNikolay Borisov 20691776ad17SNikolay Borisov /* Ensure we don't add duplicate addresses */ 207096a14336SNikolay Borisov for (j = 0; j < nr; j++) { 20711776ad17SNikolay Borisov if (buf[j] == bytenr) { 20721776ad17SNikolay Borisov already_inserted = true; 207396a14336SNikolay Borisov break; 207496a14336SNikolay Borisov } 207596a14336SNikolay Borisov } 20761776ad17SNikolay Borisov 20771776ad17SNikolay Borisov if (!already_inserted) 20781776ad17SNikolay Borisov buf[nr++] = bytenr; 207996a14336SNikolay Borisov } 208096a14336SNikolay Borisov 208196a14336SNikolay Borisov *logical = buf; 208296a14336SNikolay Borisov *naddrs = nr; 20831776ad17SNikolay Borisov *stripe_len = io_stripe_size; 20841776ad17SNikolay Borisov out: 208596a14336SNikolay Borisov free_extent_map(em); 20861776ad17SNikolay Borisov return ret; 208796a14336SNikolay Borisov } 208896a14336SNikolay Borisov 208932da5386SDavid Sterba static int exclude_super_stripes(struct btrfs_block_group *cache) 20904358d963SJosef Bacik { 20914358d963SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 209212659251SNaohiro Aota const bool zoned = btrfs_is_zoned(fs_info); 20934358d963SJosef Bacik u64 bytenr; 20944358d963SJosef Bacik u64 *logical; 20954358d963SJosef Bacik int stripe_len; 20964358d963SJosef Bacik int i, nr, ret; 20974358d963SJosef Bacik 2098b3470b5dSDavid Sterba if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 2099b3470b5dSDavid Sterba stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 21004358d963SJosef Bacik cache->bytes_super += stripe_len; 2101b3470b5dSDavid Sterba ret = btrfs_add_excluded_extent(fs_info, cache->start, 21024358d963SJosef Bacik stripe_len); 21034358d963SJosef Bacik if (ret) 21044358d963SJosef Bacik return ret; 21054358d963SJosef Bacik } 21064358d963SJosef Bacik 21074358d963SJosef Bacik for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 21084358d963SJosef Bacik bytenr = btrfs_sb_offset(i); 21091eb82ef8SChristoph Hellwig ret = btrfs_rmap_block(fs_info, cache->start, 21104358d963SJosef Bacik bytenr, &logical, &nr, &stripe_len); 21114358d963SJosef Bacik if (ret) 21124358d963SJosef Bacik return ret; 21134358d963SJosef Bacik 211412659251SNaohiro Aota /* Shouldn't have super stripes in sequential zones */ 211512659251SNaohiro Aota if (zoned && nr) { 2116f1a07c2bSFilipe Manana kfree(logical); 211712659251SNaohiro Aota btrfs_err(fs_info, 211812659251SNaohiro Aota "zoned: block group %llu must not contain super block", 211912659251SNaohiro Aota cache->start); 212012659251SNaohiro Aota return -EUCLEAN; 212112659251SNaohiro Aota } 212212659251SNaohiro Aota 21234358d963SJosef Bacik while (nr--) { 212496f9b0f2SNikolay Borisov u64 len = min_t(u64, stripe_len, 212596f9b0f2SNikolay Borisov cache->start + cache->length - logical[nr]); 21264358d963SJosef Bacik 21274358d963SJosef Bacik cache->bytes_super += len; 212896f9b0f2SNikolay Borisov ret = btrfs_add_excluded_extent(fs_info, logical[nr], 212996f9b0f2SNikolay Borisov len); 21304358d963SJosef Bacik if (ret) { 21314358d963SJosef Bacik kfree(logical); 21324358d963SJosef Bacik return ret; 21334358d963SJosef Bacik } 21344358d963SJosef Bacik } 21354358d963SJosef Bacik 21364358d963SJosef Bacik kfree(logical); 21374358d963SJosef Bacik } 21384358d963SJosef Bacik return 0; 21394358d963SJosef Bacik } 21404358d963SJosef Bacik 214132da5386SDavid Sterba static struct btrfs_block_group *btrfs_create_block_group_cache( 21429afc6649SQu Wenruo struct btrfs_fs_info *fs_info, u64 start) 21434358d963SJosef Bacik { 214432da5386SDavid Sterba struct btrfs_block_group *cache; 21454358d963SJosef Bacik 21464358d963SJosef Bacik cache = kzalloc(sizeof(*cache), GFP_NOFS); 21474358d963SJosef Bacik if (!cache) 21484358d963SJosef Bacik return NULL; 21494358d963SJosef Bacik 21504358d963SJosef Bacik cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 21514358d963SJosef Bacik GFP_NOFS); 21524358d963SJosef Bacik if (!cache->free_space_ctl) { 21534358d963SJosef Bacik kfree(cache); 21544358d963SJosef Bacik return NULL; 21554358d963SJosef Bacik } 21564358d963SJosef Bacik 2157b3470b5dSDavid Sterba cache->start = start; 21584358d963SJosef Bacik 21594358d963SJosef Bacik cache->fs_info = fs_info; 21604358d963SJosef Bacik cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 21614358d963SJosef Bacik 21626e80d4f8SDennis Zhou cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 21636e80d4f8SDennis Zhou 216448aaeebeSJosef Bacik refcount_set(&cache->refs, 1); 21654358d963SJosef Bacik spin_lock_init(&cache->lock); 21664358d963SJosef Bacik init_rwsem(&cache->data_rwsem); 21674358d963SJosef Bacik INIT_LIST_HEAD(&cache->list); 21684358d963SJosef Bacik INIT_LIST_HEAD(&cache->cluster_list); 21694358d963SJosef Bacik INIT_LIST_HEAD(&cache->bg_list); 21704358d963SJosef Bacik INIT_LIST_HEAD(&cache->ro_list); 2171b0643e59SDennis Zhou INIT_LIST_HEAD(&cache->discard_list); 21724358d963SJosef Bacik INIT_LIST_HEAD(&cache->dirty_list); 21734358d963SJosef Bacik INIT_LIST_HEAD(&cache->io_list); 2174afba2bc0SNaohiro Aota INIT_LIST_HEAD(&cache->active_bg_list); 2175cd79909bSJosef Bacik btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 21766b7304afSFilipe Manana atomic_set(&cache->frozen, 0); 21774358d963SJosef Bacik mutex_init(&cache->free_space_lock); 21784358d963SJosef Bacik 21794358d963SJosef Bacik return cache; 21804358d963SJosef Bacik } 21814358d963SJosef Bacik 21824358d963SJosef Bacik /* 21834358d963SJosef Bacik * Iterate all chunks and verify that each of them has the corresponding block 21844358d963SJosef Bacik * group 21854358d963SJosef Bacik */ 21864358d963SJosef Bacik static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 21874358d963SJosef Bacik { 21884358d963SJosef Bacik struct extent_map_tree *map_tree = &fs_info->mapping_tree; 21894358d963SJosef Bacik struct extent_map *em; 219032da5386SDavid Sterba struct btrfs_block_group *bg; 21914358d963SJosef Bacik u64 start = 0; 21924358d963SJosef Bacik int ret = 0; 21934358d963SJosef Bacik 21944358d963SJosef Bacik while (1) { 21954358d963SJosef Bacik read_lock(&map_tree->lock); 21964358d963SJosef Bacik /* 21974358d963SJosef Bacik * lookup_extent_mapping will return the first extent map 21984358d963SJosef Bacik * intersecting the range, so setting @len to 1 is enough to 21994358d963SJosef Bacik * get the first chunk. 22004358d963SJosef Bacik */ 22014358d963SJosef Bacik em = lookup_extent_mapping(map_tree, start, 1); 22024358d963SJosef Bacik read_unlock(&map_tree->lock); 22034358d963SJosef Bacik if (!em) 22044358d963SJosef Bacik break; 22054358d963SJosef Bacik 22064358d963SJosef Bacik bg = btrfs_lookup_block_group(fs_info, em->start); 22074358d963SJosef Bacik if (!bg) { 22084358d963SJosef Bacik btrfs_err(fs_info, 22094358d963SJosef Bacik "chunk start=%llu len=%llu doesn't have corresponding block group", 22104358d963SJosef Bacik em->start, em->len); 22114358d963SJosef Bacik ret = -EUCLEAN; 22124358d963SJosef Bacik free_extent_map(em); 22134358d963SJosef Bacik break; 22144358d963SJosef Bacik } 2215b3470b5dSDavid Sterba if (bg->start != em->start || bg->length != em->len || 22164358d963SJosef Bacik (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 22174358d963SJosef Bacik (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 22184358d963SJosef Bacik btrfs_err(fs_info, 22194358d963SJosef Bacik "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 22204358d963SJosef Bacik em->start, em->len, 22214358d963SJosef Bacik em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 2222b3470b5dSDavid Sterba bg->start, bg->length, 22234358d963SJosef Bacik bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 22244358d963SJosef Bacik ret = -EUCLEAN; 22254358d963SJosef Bacik free_extent_map(em); 22264358d963SJosef Bacik btrfs_put_block_group(bg); 22274358d963SJosef Bacik break; 22284358d963SJosef Bacik } 22294358d963SJosef Bacik start = em->start + em->len; 22304358d963SJosef Bacik free_extent_map(em); 22314358d963SJosef Bacik btrfs_put_block_group(bg); 22324358d963SJosef Bacik } 22334358d963SJosef Bacik return ret; 22344358d963SJosef Bacik } 22354358d963SJosef Bacik 2236ffb9e0f0SQu Wenruo static int read_one_block_group(struct btrfs_fs_info *info, 22374afd2fe8SJohannes Thumshirn struct btrfs_block_group_item *bgi, 2238d49a2ddbSQu Wenruo const struct btrfs_key *key, 2239ffb9e0f0SQu Wenruo int need_clear) 2240ffb9e0f0SQu Wenruo { 224132da5386SDavid Sterba struct btrfs_block_group *cache; 2242ffb9e0f0SQu Wenruo const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2243ffb9e0f0SQu Wenruo int ret; 2244ffb9e0f0SQu Wenruo 2245d49a2ddbSQu Wenruo ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2246ffb9e0f0SQu Wenruo 22479afc6649SQu Wenruo cache = btrfs_create_block_group_cache(info, key->objectid); 2248ffb9e0f0SQu Wenruo if (!cache) 2249ffb9e0f0SQu Wenruo return -ENOMEM; 2250ffb9e0f0SQu Wenruo 22514afd2fe8SJohannes Thumshirn cache->length = key->offset; 22524afd2fe8SJohannes Thumshirn cache->used = btrfs_stack_block_group_used(bgi); 22537248e0ceSQu Wenruo cache->commit_used = cache->used; 22544afd2fe8SJohannes Thumshirn cache->flags = btrfs_stack_block_group_flags(bgi); 2255f7238e50SJosef Bacik cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); 22569afc6649SQu Wenruo 2257e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 2258e3e39c72SMarcos Paulo de Souza 2259ffb9e0f0SQu Wenruo if (need_clear) { 2260ffb9e0f0SQu Wenruo /* 2261ffb9e0f0SQu Wenruo * When we mount with old space cache, we need to 2262ffb9e0f0SQu Wenruo * set BTRFS_DC_CLEAR and set dirty flag. 2263ffb9e0f0SQu Wenruo * 2264ffb9e0f0SQu Wenruo * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2265ffb9e0f0SQu Wenruo * truncate the old free space cache inode and 2266ffb9e0f0SQu Wenruo * setup a new one. 2267ffb9e0f0SQu Wenruo * b) Setting 'dirty flag' makes sure that we flush 2268ffb9e0f0SQu Wenruo * the new space cache info onto disk. 2269ffb9e0f0SQu Wenruo */ 2270ffb9e0f0SQu Wenruo if (btrfs_test_opt(info, SPACE_CACHE)) 2271ffb9e0f0SQu Wenruo cache->disk_cache_state = BTRFS_DC_CLEAR; 2272ffb9e0f0SQu Wenruo } 2273ffb9e0f0SQu Wenruo if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2274ffb9e0f0SQu Wenruo (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2275ffb9e0f0SQu Wenruo btrfs_err(info, 2276ffb9e0f0SQu Wenruo "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2277ffb9e0f0SQu Wenruo cache->start); 2278ffb9e0f0SQu Wenruo ret = -EINVAL; 2279ffb9e0f0SQu Wenruo goto error; 2280ffb9e0f0SQu Wenruo } 2281ffb9e0f0SQu Wenruo 2282a94794d5SNaohiro Aota ret = btrfs_load_block_group_zone_info(cache, false); 228308e11a3dSNaohiro Aota if (ret) { 228408e11a3dSNaohiro Aota btrfs_err(info, "zoned: failed to load zone info of bg %llu", 228508e11a3dSNaohiro Aota cache->start); 228608e11a3dSNaohiro Aota goto error; 228708e11a3dSNaohiro Aota } 228808e11a3dSNaohiro Aota 2289ffb9e0f0SQu Wenruo /* 2290ffb9e0f0SQu Wenruo * We need to exclude the super stripes now so that the space info has 2291ffb9e0f0SQu Wenruo * super bytes accounted for, otherwise we'll think we have more space 2292ffb9e0f0SQu Wenruo * than we actually do. 2293ffb9e0f0SQu Wenruo */ 2294ffb9e0f0SQu Wenruo ret = exclude_super_stripes(cache); 2295ffb9e0f0SQu Wenruo if (ret) { 2296ffb9e0f0SQu Wenruo /* We may have excluded something, so call this just in case. */ 2297ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2298ffb9e0f0SQu Wenruo goto error; 2299ffb9e0f0SQu Wenruo } 2300ffb9e0f0SQu Wenruo 2301ffb9e0f0SQu Wenruo /* 2302169e0da9SNaohiro Aota * For zoned filesystem, space after the allocation offset is the only 2303169e0da9SNaohiro Aota * free space for a block group. So, we don't need any caching work. 2304169e0da9SNaohiro Aota * btrfs_calc_zone_unusable() will set the amount of free space and 2305169e0da9SNaohiro Aota * zone_unusable space. 2306169e0da9SNaohiro Aota * 2307169e0da9SNaohiro Aota * For regular filesystem, check for two cases, either we are full, and 2308169e0da9SNaohiro Aota * therefore don't need to bother with the caching work since we won't 2309169e0da9SNaohiro Aota * find any space, or we are empty, and we can just add all the space 2310169e0da9SNaohiro Aota * in and be done with it. This saves us _a_lot_ of time, particularly 2311169e0da9SNaohiro Aota * in the full case. 2312ffb9e0f0SQu Wenruo */ 2313169e0da9SNaohiro Aota if (btrfs_is_zoned(info)) { 2314169e0da9SNaohiro Aota btrfs_calc_zone_unusable(cache); 2315c46c4247SNaohiro Aota /* Should not have any excluded extents. Just in case, though. */ 2316c46c4247SNaohiro Aota btrfs_free_excluded_extents(cache); 2317169e0da9SNaohiro Aota } else if (cache->length == cache->used) { 2318ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 2319ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2320ffb9e0f0SQu Wenruo } else if (cache->used == 0) { 2321ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 2322d8ccbd21SFilipe Manana ret = add_new_free_space(cache, cache->start, 2323d8ccbd21SFilipe Manana cache->start + cache->length, NULL); 2324ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2325d8ccbd21SFilipe Manana if (ret) 2326d8ccbd21SFilipe Manana goto error; 2327ffb9e0f0SQu Wenruo } 2328ffb9e0f0SQu Wenruo 2329ffb9e0f0SQu Wenruo ret = btrfs_add_block_group_cache(info, cache); 2330ffb9e0f0SQu Wenruo if (ret) { 2331ffb9e0f0SQu Wenruo btrfs_remove_free_space_cache(cache); 2332ffb9e0f0SQu Wenruo goto error; 2333ffb9e0f0SQu Wenruo } 2334ffb9e0f0SQu Wenruo trace_btrfs_add_block_group(info, cache, 0); 2335723de71dSJosef Bacik btrfs_add_bg_to_space_info(info, cache); 2336ffb9e0f0SQu Wenruo 2337ffb9e0f0SQu Wenruo set_avail_alloc_bits(info, cache->flags); 2338a09f23c3SAnand Jain if (btrfs_chunk_writeable(info, cache->start)) { 2339a09f23c3SAnand Jain if (cache->used == 0) { 2340ffb9e0f0SQu Wenruo ASSERT(list_empty(&cache->bg_list)); 23416e80d4f8SDennis Zhou if (btrfs_test_opt(info, DISCARD_ASYNC)) 23426e80d4f8SDennis Zhou btrfs_discard_queue_work(&info->discard_ctl, cache); 23436e80d4f8SDennis Zhou else 2344ffb9e0f0SQu Wenruo btrfs_mark_bg_unused(cache); 2345ffb9e0f0SQu Wenruo } 2346a09f23c3SAnand Jain } else { 2347a09f23c3SAnand Jain inc_block_group_ro(cache, 1); 2348a09f23c3SAnand Jain } 2349a09f23c3SAnand Jain 2350ffb9e0f0SQu Wenruo return 0; 2351ffb9e0f0SQu Wenruo error: 2352ffb9e0f0SQu Wenruo btrfs_put_block_group(cache); 2353ffb9e0f0SQu Wenruo return ret; 2354ffb9e0f0SQu Wenruo } 2355ffb9e0f0SQu Wenruo 235642437a63SJosef Bacik static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 235742437a63SJosef Bacik { 235842437a63SJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 235942437a63SJosef Bacik struct rb_node *node; 236042437a63SJosef Bacik int ret = 0; 236142437a63SJosef Bacik 236242437a63SJosef Bacik for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 236342437a63SJosef Bacik struct extent_map *em; 236442437a63SJosef Bacik struct map_lookup *map; 236542437a63SJosef Bacik struct btrfs_block_group *bg; 236642437a63SJosef Bacik 236742437a63SJosef Bacik em = rb_entry(node, struct extent_map, rb_node); 236842437a63SJosef Bacik map = em->map_lookup; 236942437a63SJosef Bacik bg = btrfs_create_block_group_cache(fs_info, em->start); 237042437a63SJosef Bacik if (!bg) { 237142437a63SJosef Bacik ret = -ENOMEM; 237242437a63SJosef Bacik break; 237342437a63SJosef Bacik } 237442437a63SJosef Bacik 237542437a63SJosef Bacik /* Fill dummy cache as FULL */ 237642437a63SJosef Bacik bg->length = em->len; 237742437a63SJosef Bacik bg->flags = map->type; 237842437a63SJosef Bacik bg->cached = BTRFS_CACHE_FINISHED; 237942437a63SJosef Bacik bg->used = em->len; 238042437a63SJosef Bacik bg->flags = map->type; 238142437a63SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, bg); 23822b29726cSQu Wenruo /* 23832b29726cSQu Wenruo * We may have some valid block group cache added already, in 23842b29726cSQu Wenruo * that case we skip to the next one. 23852b29726cSQu Wenruo */ 23862b29726cSQu Wenruo if (ret == -EEXIST) { 23872b29726cSQu Wenruo ret = 0; 23882b29726cSQu Wenruo btrfs_put_block_group(bg); 23892b29726cSQu Wenruo continue; 23902b29726cSQu Wenruo } 23912b29726cSQu Wenruo 239242437a63SJosef Bacik if (ret) { 239342437a63SJosef Bacik btrfs_remove_free_space_cache(bg); 239442437a63SJosef Bacik btrfs_put_block_group(bg); 239542437a63SJosef Bacik break; 239642437a63SJosef Bacik } 23972b29726cSQu Wenruo 2398723de71dSJosef Bacik btrfs_add_bg_to_space_info(fs_info, bg); 239942437a63SJosef Bacik 240042437a63SJosef Bacik set_avail_alloc_bits(fs_info, bg->flags); 240142437a63SJosef Bacik } 240242437a63SJosef Bacik if (!ret) 240342437a63SJosef Bacik btrfs_init_global_block_rsv(fs_info); 240442437a63SJosef Bacik return ret; 240542437a63SJosef Bacik } 240642437a63SJosef Bacik 24074358d963SJosef Bacik int btrfs_read_block_groups(struct btrfs_fs_info *info) 24084358d963SJosef Bacik { 2409dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(info); 24104358d963SJosef Bacik struct btrfs_path *path; 24114358d963SJosef Bacik int ret; 241232da5386SDavid Sterba struct btrfs_block_group *cache; 24134358d963SJosef Bacik struct btrfs_space_info *space_info; 24144358d963SJosef Bacik struct btrfs_key key; 24154358d963SJosef Bacik int need_clear = 0; 24164358d963SJosef Bacik u64 cache_gen; 24174358d963SJosef Bacik 241881d5d614SQu Wenruo /* 241981d5d614SQu Wenruo * Either no extent root (with ibadroots rescue option) or we have 242081d5d614SQu Wenruo * unsupported RO options. The fs can never be mounted read-write, so no 242181d5d614SQu Wenruo * need to waste time searching block group items. 242281d5d614SQu Wenruo * 242381d5d614SQu Wenruo * This also allows new extent tree related changes to be RO compat, 242481d5d614SQu Wenruo * no need for a full incompat flag. 242581d5d614SQu Wenruo */ 242681d5d614SQu Wenruo if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & 242781d5d614SQu Wenruo ~BTRFS_FEATURE_COMPAT_RO_SUPP)) 242842437a63SJosef Bacik return fill_dummy_bgs(info); 242942437a63SJosef Bacik 24304358d963SJosef Bacik key.objectid = 0; 24314358d963SJosef Bacik key.offset = 0; 24324358d963SJosef Bacik key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 24334358d963SJosef Bacik path = btrfs_alloc_path(); 24344358d963SJosef Bacik if (!path) 24354358d963SJosef Bacik return -ENOMEM; 24364358d963SJosef Bacik 24374358d963SJosef Bacik cache_gen = btrfs_super_cache_generation(info->super_copy); 24384358d963SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 24394358d963SJosef Bacik btrfs_super_generation(info->super_copy) != cache_gen) 24404358d963SJosef Bacik need_clear = 1; 24414358d963SJosef Bacik if (btrfs_test_opt(info, CLEAR_CACHE)) 24424358d963SJosef Bacik need_clear = 1; 24434358d963SJosef Bacik 24444358d963SJosef Bacik while (1) { 24454afd2fe8SJohannes Thumshirn struct btrfs_block_group_item bgi; 24464afd2fe8SJohannes Thumshirn struct extent_buffer *leaf; 24474afd2fe8SJohannes Thumshirn int slot; 24484afd2fe8SJohannes Thumshirn 24494358d963SJosef Bacik ret = find_first_block_group(info, path, &key); 24504358d963SJosef Bacik if (ret > 0) 24514358d963SJosef Bacik break; 24524358d963SJosef Bacik if (ret != 0) 24534358d963SJosef Bacik goto error; 24544358d963SJosef Bacik 24554afd2fe8SJohannes Thumshirn leaf = path->nodes[0]; 24564afd2fe8SJohannes Thumshirn slot = path->slots[0]; 24574afd2fe8SJohannes Thumshirn 24584afd2fe8SJohannes Thumshirn read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 24594afd2fe8SJohannes Thumshirn sizeof(bgi)); 24604afd2fe8SJohannes Thumshirn 24614afd2fe8SJohannes Thumshirn btrfs_item_key_to_cpu(leaf, &key, slot); 24624afd2fe8SJohannes Thumshirn btrfs_release_path(path); 24634afd2fe8SJohannes Thumshirn ret = read_one_block_group(info, &bgi, &key, need_clear); 2464ffb9e0f0SQu Wenruo if (ret < 0) 24654358d963SJosef Bacik goto error; 2466ffb9e0f0SQu Wenruo key.objectid += key.offset; 2467ffb9e0f0SQu Wenruo key.offset = 0; 24684358d963SJosef Bacik } 24697837fa88SJosef Bacik btrfs_release_path(path); 24704358d963SJosef Bacik 247172804905SJosef Bacik list_for_each_entry(space_info, &info->space_info, list) { 247249ea112dSJosef Bacik int i; 247349ea112dSJosef Bacik 247449ea112dSJosef Bacik for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 247549ea112dSJosef Bacik if (list_empty(&space_info->block_groups[i])) 247649ea112dSJosef Bacik continue; 247749ea112dSJosef Bacik cache = list_first_entry(&space_info->block_groups[i], 247849ea112dSJosef Bacik struct btrfs_block_group, 247949ea112dSJosef Bacik list); 248049ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(cache); 248149ea112dSJosef Bacik } 248249ea112dSJosef Bacik 24834358d963SJosef Bacik if (!(btrfs_get_alloc_profile(info, space_info->flags) & 24844358d963SJosef Bacik (BTRFS_BLOCK_GROUP_RAID10 | 24854358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID1_MASK | 24864358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID56_MASK | 24874358d963SJosef Bacik BTRFS_BLOCK_GROUP_DUP))) 24884358d963SJosef Bacik continue; 24894358d963SJosef Bacik /* 24904358d963SJosef Bacik * Avoid allocating from un-mirrored block group if there are 24914358d963SJosef Bacik * mirrored block groups. 24924358d963SJosef Bacik */ 24934358d963SJosef Bacik list_for_each_entry(cache, 24944358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_RAID0], 24954358d963SJosef Bacik list) 2496e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 24974358d963SJosef Bacik list_for_each_entry(cache, 24984358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_SINGLE], 24994358d963SJosef Bacik list) 2500e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 25014358d963SJosef Bacik } 25024358d963SJosef Bacik 25034358d963SJosef Bacik btrfs_init_global_block_rsv(info); 25044358d963SJosef Bacik ret = check_chunk_block_group_mappings(info); 25054358d963SJosef Bacik error: 25064358d963SJosef Bacik btrfs_free_path(path); 25072b29726cSQu Wenruo /* 25082b29726cSQu Wenruo * We've hit some error while reading the extent tree, and have 25092b29726cSQu Wenruo * rescue=ibadroots mount option. 25102b29726cSQu Wenruo * Try to fill the tree using dummy block groups so that the user can 25112b29726cSQu Wenruo * continue to mount and grab their data. 25122b29726cSQu Wenruo */ 25132b29726cSQu Wenruo if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 25142b29726cSQu Wenruo ret = fill_dummy_bgs(info); 25154358d963SJosef Bacik return ret; 25164358d963SJosef Bacik } 25174358d963SJosef Bacik 251879bd3712SFilipe Manana /* 251979bd3712SFilipe Manana * This function, insert_block_group_item(), belongs to the phase 2 of chunk 252079bd3712SFilipe Manana * allocation. 252179bd3712SFilipe Manana * 252279bd3712SFilipe Manana * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 252379bd3712SFilipe Manana * phases. 252479bd3712SFilipe Manana */ 252597f4728aSQu Wenruo static int insert_block_group_item(struct btrfs_trans_handle *trans, 252697f4728aSQu Wenruo struct btrfs_block_group *block_group) 252797f4728aSQu Wenruo { 252897f4728aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 252997f4728aSQu Wenruo struct btrfs_block_group_item bgi; 2530dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 253197f4728aSQu Wenruo struct btrfs_key key; 2532675dfe12SFilipe Manana u64 old_commit_used; 2533675dfe12SFilipe Manana int ret; 253497f4728aSQu Wenruo 253597f4728aSQu Wenruo spin_lock(&block_group->lock); 253697f4728aSQu Wenruo btrfs_set_stack_block_group_used(&bgi, block_group->used); 253797f4728aSQu Wenruo btrfs_set_stack_block_group_chunk_objectid(&bgi, 2538f7238e50SJosef Bacik block_group->global_root_id); 253997f4728aSQu Wenruo btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 2540675dfe12SFilipe Manana old_commit_used = block_group->commit_used; 2541675dfe12SFilipe Manana block_group->commit_used = block_group->used; 254297f4728aSQu Wenruo key.objectid = block_group->start; 254397f4728aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 254497f4728aSQu Wenruo key.offset = block_group->length; 254597f4728aSQu Wenruo spin_unlock(&block_group->lock); 254697f4728aSQu Wenruo 2547675dfe12SFilipe Manana ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 2548675dfe12SFilipe Manana if (ret < 0) { 2549675dfe12SFilipe Manana spin_lock(&block_group->lock); 2550675dfe12SFilipe Manana block_group->commit_used = old_commit_used; 2551675dfe12SFilipe Manana spin_unlock(&block_group->lock); 2552675dfe12SFilipe Manana } 2553675dfe12SFilipe Manana 2554675dfe12SFilipe Manana return ret; 255597f4728aSQu Wenruo } 255697f4728aSQu Wenruo 25572eadb9e7SNikolay Borisov static int insert_dev_extent(struct btrfs_trans_handle *trans, 25582eadb9e7SNikolay Borisov struct btrfs_device *device, u64 chunk_offset, 25592eadb9e7SNikolay Borisov u64 start, u64 num_bytes) 25602eadb9e7SNikolay Borisov { 25612eadb9e7SNikolay Borisov struct btrfs_fs_info *fs_info = device->fs_info; 25622eadb9e7SNikolay Borisov struct btrfs_root *root = fs_info->dev_root; 25632eadb9e7SNikolay Borisov struct btrfs_path *path; 25642eadb9e7SNikolay Borisov struct btrfs_dev_extent *extent; 25652eadb9e7SNikolay Borisov struct extent_buffer *leaf; 25662eadb9e7SNikolay Borisov struct btrfs_key key; 25672eadb9e7SNikolay Borisov int ret; 25682eadb9e7SNikolay Borisov 25692eadb9e7SNikolay Borisov WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 25702eadb9e7SNikolay Borisov WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 25712eadb9e7SNikolay Borisov path = btrfs_alloc_path(); 25722eadb9e7SNikolay Borisov if (!path) 25732eadb9e7SNikolay Borisov return -ENOMEM; 25742eadb9e7SNikolay Borisov 25752eadb9e7SNikolay Borisov key.objectid = device->devid; 25762eadb9e7SNikolay Borisov key.type = BTRFS_DEV_EXTENT_KEY; 25772eadb9e7SNikolay Borisov key.offset = start; 25782eadb9e7SNikolay Borisov ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 25792eadb9e7SNikolay Borisov if (ret) 25802eadb9e7SNikolay Borisov goto out; 25812eadb9e7SNikolay Borisov 25822eadb9e7SNikolay Borisov leaf = path->nodes[0]; 25832eadb9e7SNikolay Borisov extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 25842eadb9e7SNikolay Borisov btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 25852eadb9e7SNikolay Borisov btrfs_set_dev_extent_chunk_objectid(leaf, extent, 25862eadb9e7SNikolay Borisov BTRFS_FIRST_CHUNK_TREE_OBJECTID); 25872eadb9e7SNikolay Borisov btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 25882eadb9e7SNikolay Borisov 25892eadb9e7SNikolay Borisov btrfs_set_dev_extent_length(leaf, extent, num_bytes); 25902eadb9e7SNikolay Borisov btrfs_mark_buffer_dirty(leaf); 25912eadb9e7SNikolay Borisov out: 25922eadb9e7SNikolay Borisov btrfs_free_path(path); 25932eadb9e7SNikolay Borisov return ret; 25942eadb9e7SNikolay Borisov } 25952eadb9e7SNikolay Borisov 25962eadb9e7SNikolay Borisov /* 25972eadb9e7SNikolay Borisov * This function belongs to phase 2. 25982eadb9e7SNikolay Borisov * 25992eadb9e7SNikolay Borisov * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 26002eadb9e7SNikolay Borisov * phases. 26012eadb9e7SNikolay Borisov */ 26022eadb9e7SNikolay Borisov static int insert_dev_extents(struct btrfs_trans_handle *trans, 26032eadb9e7SNikolay Borisov u64 chunk_offset, u64 chunk_size) 26042eadb9e7SNikolay Borisov { 26052eadb9e7SNikolay Borisov struct btrfs_fs_info *fs_info = trans->fs_info; 26062eadb9e7SNikolay Borisov struct btrfs_device *device; 26072eadb9e7SNikolay Borisov struct extent_map *em; 26082eadb9e7SNikolay Borisov struct map_lookup *map; 26092eadb9e7SNikolay Borisov u64 dev_offset; 26102eadb9e7SNikolay Borisov u64 stripe_size; 26112eadb9e7SNikolay Borisov int i; 26122eadb9e7SNikolay Borisov int ret = 0; 26132eadb9e7SNikolay Borisov 26142eadb9e7SNikolay Borisov em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 26152eadb9e7SNikolay Borisov if (IS_ERR(em)) 26162eadb9e7SNikolay Borisov return PTR_ERR(em); 26172eadb9e7SNikolay Borisov 26182eadb9e7SNikolay Borisov map = em->map_lookup; 26192eadb9e7SNikolay Borisov stripe_size = em->orig_block_len; 26202eadb9e7SNikolay Borisov 26212eadb9e7SNikolay Borisov /* 26222eadb9e7SNikolay Borisov * Take the device list mutex to prevent races with the final phase of 26232eadb9e7SNikolay Borisov * a device replace operation that replaces the device object associated 26242eadb9e7SNikolay Borisov * with the map's stripes, because the device object's id can change 26252eadb9e7SNikolay Borisov * at any time during that final phase of the device replace operation 26262eadb9e7SNikolay Borisov * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 26272eadb9e7SNikolay Borisov * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 26282eadb9e7SNikolay Borisov * resulting in persisting a device extent item with such ID. 26292eadb9e7SNikolay Borisov */ 26302eadb9e7SNikolay Borisov mutex_lock(&fs_info->fs_devices->device_list_mutex); 26312eadb9e7SNikolay Borisov for (i = 0; i < map->num_stripes; i++) { 26322eadb9e7SNikolay Borisov device = map->stripes[i].dev; 26332eadb9e7SNikolay Borisov dev_offset = map->stripes[i].physical; 26342eadb9e7SNikolay Borisov 26352eadb9e7SNikolay Borisov ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 26362eadb9e7SNikolay Borisov stripe_size); 26372eadb9e7SNikolay Borisov if (ret) 26382eadb9e7SNikolay Borisov break; 26392eadb9e7SNikolay Borisov } 26402eadb9e7SNikolay Borisov mutex_unlock(&fs_info->fs_devices->device_list_mutex); 26412eadb9e7SNikolay Borisov 26422eadb9e7SNikolay Borisov free_extent_map(em); 26432eadb9e7SNikolay Borisov return ret; 26442eadb9e7SNikolay Borisov } 26452eadb9e7SNikolay Borisov 264679bd3712SFilipe Manana /* 264779bd3712SFilipe Manana * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 264879bd3712SFilipe Manana * chunk allocation. 264979bd3712SFilipe Manana * 265079bd3712SFilipe Manana * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 265179bd3712SFilipe Manana * phases. 265279bd3712SFilipe Manana */ 26534358d963SJosef Bacik void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 26544358d963SJosef Bacik { 26554358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 265632da5386SDavid Sterba struct btrfs_block_group *block_group; 26574358d963SJosef Bacik int ret = 0; 26584358d963SJosef Bacik 26594358d963SJosef Bacik while (!list_empty(&trans->new_bgs)) { 266049ea112dSJosef Bacik int index; 266149ea112dSJosef Bacik 26624358d963SJosef Bacik block_group = list_first_entry(&trans->new_bgs, 266332da5386SDavid Sterba struct btrfs_block_group, 26644358d963SJosef Bacik bg_list); 26654358d963SJosef Bacik if (ret) 26664358d963SJosef Bacik goto next; 26674358d963SJosef Bacik 266849ea112dSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 266949ea112dSJosef Bacik 267097f4728aSQu Wenruo ret = insert_block_group_item(trans, block_group); 26714358d963SJosef Bacik if (ret) 26724358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 26733349b57fSJosef Bacik if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, 26743349b57fSJosef Bacik &block_group->runtime_flags)) { 267579bd3712SFilipe Manana mutex_lock(&fs_info->chunk_mutex); 267679bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 267779bd3712SFilipe Manana mutex_unlock(&fs_info->chunk_mutex); 267879bd3712SFilipe Manana if (ret) 267979bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 268079bd3712SFilipe Manana } 26812eadb9e7SNikolay Borisov ret = insert_dev_extents(trans, block_group->start, 268297f4728aSQu Wenruo block_group->length); 26834358d963SJosef Bacik if (ret) 26844358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 26854358d963SJosef Bacik add_block_group_free_space(trans, block_group); 268649ea112dSJosef Bacik 268749ea112dSJosef Bacik /* 268849ea112dSJosef Bacik * If we restriped during balance, we may have added a new raid 268949ea112dSJosef Bacik * type, so now add the sysfs entries when it is safe to do so. 269049ea112dSJosef Bacik * We don't have to worry about locking here as it's handled in 269149ea112dSJosef Bacik * btrfs_sysfs_add_block_group_type. 269249ea112dSJosef Bacik */ 269349ea112dSJosef Bacik if (block_group->space_info->block_group_kobjs[index] == NULL) 269449ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(block_group); 269549ea112dSJosef Bacik 26964358d963SJosef Bacik /* Already aborted the transaction if it failed. */ 26974358d963SJosef Bacik next: 26984358d963SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 26994358d963SJosef Bacik list_del_init(&block_group->bg_list); 27000657b20cSFilipe Manana clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); 27014358d963SJosef Bacik } 27024358d963SJosef Bacik btrfs_trans_release_chunk_metadata(trans); 27034358d963SJosef Bacik } 27044358d963SJosef Bacik 2705f7238e50SJosef Bacik /* 2706f7238e50SJosef Bacik * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2707f7238e50SJosef Bacik * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2708f7238e50SJosef Bacik */ 2709f7238e50SJosef Bacik static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) 2710f7238e50SJosef Bacik { 2711f7238e50SJosef Bacik u64 div = SZ_1G; 2712f7238e50SJosef Bacik u64 index; 2713f7238e50SJosef Bacik 2714f7238e50SJosef Bacik if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2715f7238e50SJosef Bacik return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2716f7238e50SJosef Bacik 2717f7238e50SJosef Bacik /* If we have a smaller fs index based on 128MiB. */ 2718f7238e50SJosef Bacik if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2719f7238e50SJosef Bacik div = SZ_128M; 2720f7238e50SJosef Bacik 2721f7238e50SJosef Bacik offset = div64_u64(offset, div); 2722f7238e50SJosef Bacik div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2723f7238e50SJosef Bacik return index; 2724f7238e50SJosef Bacik } 2725f7238e50SJosef Bacik 272679bd3712SFilipe Manana struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 27275758d1bdSFilipe Manana u64 type, 272879bd3712SFilipe Manana u64 chunk_offset, u64 size) 27294358d963SJosef Bacik { 27304358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 273132da5386SDavid Sterba struct btrfs_block_group *cache; 27324358d963SJosef Bacik int ret; 27334358d963SJosef Bacik 27344358d963SJosef Bacik btrfs_set_log_full_commit(trans); 27354358d963SJosef Bacik 27369afc6649SQu Wenruo cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 27374358d963SJosef Bacik if (!cache) 273879bd3712SFilipe Manana return ERR_PTR(-ENOMEM); 27394358d963SJosef Bacik 27400657b20cSFilipe Manana /* 27410657b20cSFilipe Manana * Mark it as new before adding it to the rbtree of block groups or any 27420657b20cSFilipe Manana * list, so that no other task finds it and calls btrfs_mark_bg_unused() 27430657b20cSFilipe Manana * before the new flag is set. 27440657b20cSFilipe Manana */ 27450657b20cSFilipe Manana set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); 27460657b20cSFilipe Manana 27479afc6649SQu Wenruo cache->length = size; 2748e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 27494358d963SJosef Bacik cache->flags = type; 27504358d963SJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 2751f7238e50SJosef Bacik cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2752f7238e50SJosef Bacik 2753997e3e2eSBoris Burkov if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 27540d7764ffSDavid Sterba set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); 275508e11a3dSNaohiro Aota 2756a94794d5SNaohiro Aota ret = btrfs_load_block_group_zone_info(cache, true); 275708e11a3dSNaohiro Aota if (ret) { 275808e11a3dSNaohiro Aota btrfs_put_block_group(cache); 275979bd3712SFilipe Manana return ERR_PTR(ret); 276008e11a3dSNaohiro Aota } 276108e11a3dSNaohiro Aota 27624358d963SJosef Bacik ret = exclude_super_stripes(cache); 27634358d963SJosef Bacik if (ret) { 27644358d963SJosef Bacik /* We may have excluded something, so call this just in case */ 27654358d963SJosef Bacik btrfs_free_excluded_extents(cache); 27664358d963SJosef Bacik btrfs_put_block_group(cache); 276779bd3712SFilipe Manana return ERR_PTR(ret); 27684358d963SJosef Bacik } 27694358d963SJosef Bacik 2770d8ccbd21SFilipe Manana ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); 27714358d963SJosef Bacik btrfs_free_excluded_extents(cache); 2772d8ccbd21SFilipe Manana if (ret) { 2773d8ccbd21SFilipe Manana btrfs_put_block_group(cache); 2774d8ccbd21SFilipe Manana return ERR_PTR(ret); 2775d8ccbd21SFilipe Manana } 27764358d963SJosef Bacik 27774358d963SJosef Bacik /* 27784358d963SJosef Bacik * Ensure the corresponding space_info object is created and 27794358d963SJosef Bacik * assigned to our block group. We want our bg to be added to the rbtree 27804358d963SJosef Bacik * with its ->space_info set. 27814358d963SJosef Bacik */ 27824358d963SJosef Bacik cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 27834358d963SJosef Bacik ASSERT(cache->space_info); 27844358d963SJosef Bacik 27854358d963SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, cache); 27864358d963SJosef Bacik if (ret) { 27874358d963SJosef Bacik btrfs_remove_free_space_cache(cache); 27884358d963SJosef Bacik btrfs_put_block_group(cache); 278979bd3712SFilipe Manana return ERR_PTR(ret); 27904358d963SJosef Bacik } 27914358d963SJosef Bacik 27924358d963SJosef Bacik /* 27934358d963SJosef Bacik * Now that our block group has its ->space_info set and is inserted in 27944358d963SJosef Bacik * the rbtree, update the space info's counters. 27954358d963SJosef Bacik */ 27964358d963SJosef Bacik trace_btrfs_add_block_group(fs_info, cache, 1); 2797723de71dSJosef Bacik btrfs_add_bg_to_space_info(fs_info, cache); 27984358d963SJosef Bacik btrfs_update_global_block_rsv(fs_info); 27994358d963SJosef Bacik 28009d4b0a12SJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 28019d4b0a12SJosef Bacik if (btrfs_should_fragment_free_space(cache)) { 28025758d1bdSFilipe Manana cache->space_info->bytes_used += size >> 1; 28039d4b0a12SJosef Bacik fragment_free_space(cache); 28049d4b0a12SJosef Bacik } 28059d4b0a12SJosef Bacik #endif 28064358d963SJosef Bacik 28074358d963SJosef Bacik list_add_tail(&cache->bg_list, &trans->new_bgs); 28084358d963SJosef Bacik trans->delayed_ref_updates++; 28094358d963SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 28104358d963SJosef Bacik 28114358d963SJosef Bacik set_avail_alloc_bits(fs_info, type); 281279bd3712SFilipe Manana return cache; 28134358d963SJosef Bacik } 281426ce2095SJosef Bacik 2815b12de528SQu Wenruo /* 2816b12de528SQu Wenruo * Mark one block group RO, can be called several times for the same block 2817b12de528SQu Wenruo * group. 2818b12de528SQu Wenruo * 2819b12de528SQu Wenruo * @cache: the destination block group 2820b12de528SQu Wenruo * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2821b12de528SQu Wenruo * ensure we still have some free space after marking this 2822b12de528SQu Wenruo * block group RO. 2823b12de528SQu Wenruo */ 2824b12de528SQu Wenruo int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2825b12de528SQu Wenruo bool do_chunk_alloc) 282626ce2095SJosef Bacik { 282726ce2095SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 282826ce2095SJosef Bacik struct btrfs_trans_handle *trans; 2829dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 283026ce2095SJosef Bacik u64 alloc_flags; 283126ce2095SJosef Bacik int ret; 2832b6e9f16cSNikolay Borisov bool dirty_bg_running; 283326ce2095SJosef Bacik 28342d192fc4SQu Wenruo /* 28352d192fc4SQu Wenruo * This can only happen when we are doing read-only scrub on read-only 28362d192fc4SQu Wenruo * mount. 28372d192fc4SQu Wenruo * In that case we should not start a new transaction on read-only fs. 28382d192fc4SQu Wenruo * Thus here we skip all chunk allocations. 28392d192fc4SQu Wenruo */ 28402d192fc4SQu Wenruo if (sb_rdonly(fs_info->sb)) { 28412d192fc4SQu Wenruo mutex_lock(&fs_info->ro_block_group_mutex); 28422d192fc4SQu Wenruo ret = inc_block_group_ro(cache, 0); 28432d192fc4SQu Wenruo mutex_unlock(&fs_info->ro_block_group_mutex); 28442d192fc4SQu Wenruo return ret; 28452d192fc4SQu Wenruo } 28462d192fc4SQu Wenruo 2847b6e9f16cSNikolay Borisov do { 2848dfe8aec4SJosef Bacik trans = btrfs_join_transaction(root); 284926ce2095SJosef Bacik if (IS_ERR(trans)) 285026ce2095SJosef Bacik return PTR_ERR(trans); 285126ce2095SJosef Bacik 2852b6e9f16cSNikolay Borisov dirty_bg_running = false; 2853b6e9f16cSNikolay Borisov 285426ce2095SJosef Bacik /* 2855b6e9f16cSNikolay Borisov * We're not allowed to set block groups readonly after the dirty 2856b6e9f16cSNikolay Borisov * block group cache has started writing. If it already started, 2857b6e9f16cSNikolay Borisov * back off and let this transaction commit. 285826ce2095SJosef Bacik */ 285926ce2095SJosef Bacik mutex_lock(&fs_info->ro_block_group_mutex); 286026ce2095SJosef Bacik if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 286126ce2095SJosef Bacik u64 transid = trans->transid; 286226ce2095SJosef Bacik 286326ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 286426ce2095SJosef Bacik btrfs_end_transaction(trans); 286526ce2095SJosef Bacik 286626ce2095SJosef Bacik ret = btrfs_wait_for_commit(fs_info, transid); 286726ce2095SJosef Bacik if (ret) 286826ce2095SJosef Bacik return ret; 2869b6e9f16cSNikolay Borisov dirty_bg_running = true; 287026ce2095SJosef Bacik } 2871b6e9f16cSNikolay Borisov } while (dirty_bg_running); 287226ce2095SJosef Bacik 2873b12de528SQu Wenruo if (do_chunk_alloc) { 287426ce2095SJosef Bacik /* 2875b12de528SQu Wenruo * If we are changing raid levels, try to allocate a 2876b12de528SQu Wenruo * corresponding block group with the new raid level. 287726ce2095SJosef Bacik */ 2878349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 287926ce2095SJosef Bacik if (alloc_flags != cache->flags) { 2880b12de528SQu Wenruo ret = btrfs_chunk_alloc(trans, alloc_flags, 2881b12de528SQu Wenruo CHUNK_ALLOC_FORCE); 288226ce2095SJosef Bacik /* 288326ce2095SJosef Bacik * ENOSPC is allowed here, we may have enough space 2884b12de528SQu Wenruo * already allocated at the new raid level to carry on 288526ce2095SJosef Bacik */ 288626ce2095SJosef Bacik if (ret == -ENOSPC) 288726ce2095SJosef Bacik ret = 0; 288826ce2095SJosef Bacik if (ret < 0) 288926ce2095SJosef Bacik goto out; 289026ce2095SJosef Bacik } 2891b12de528SQu Wenruo } 289226ce2095SJosef Bacik 2893a7a63accSJosef Bacik ret = inc_block_group_ro(cache, 0); 289426ce2095SJosef Bacik if (!ret) 289526ce2095SJosef Bacik goto out; 28967561551eSQu Wenruo if (ret == -ETXTBSY) 28977561551eSQu Wenruo goto unlock_out; 28987561551eSQu Wenruo 28997561551eSQu Wenruo /* 29007561551eSQu Wenruo * Skip chunk alloction if the bg is SYSTEM, this is to avoid system 29017561551eSQu Wenruo * chunk allocation storm to exhaust the system chunk array. Otherwise 29027561551eSQu Wenruo * we still want to try our best to mark the block group read-only. 29037561551eSQu Wenruo */ 29047561551eSQu Wenruo if (!do_chunk_alloc && ret == -ENOSPC && 29057561551eSQu Wenruo (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) 29067561551eSQu Wenruo goto unlock_out; 29077561551eSQu Wenruo 290826ce2095SJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 290926ce2095SJosef Bacik ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 291026ce2095SJosef Bacik if (ret < 0) 291126ce2095SJosef Bacik goto out; 2912b6a98021SNaohiro Aota /* 2913b6a98021SNaohiro Aota * We have allocated a new chunk. We also need to activate that chunk to 2914b6a98021SNaohiro Aota * grant metadata tickets for zoned filesystem. 2915b6a98021SNaohiro Aota */ 2916b6a98021SNaohiro Aota ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); 2917b6a98021SNaohiro Aota if (ret < 0) 2918b6a98021SNaohiro Aota goto out; 2919b6a98021SNaohiro Aota 2920e11c0406SJosef Bacik ret = inc_block_group_ro(cache, 0); 2921195a49eaSFilipe Manana if (ret == -ETXTBSY) 2922195a49eaSFilipe Manana goto unlock_out; 292326ce2095SJosef Bacik out: 292426ce2095SJosef Bacik if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 2925349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 292626ce2095SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 292726ce2095SJosef Bacik check_system_chunk(trans, alloc_flags); 292826ce2095SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 292926ce2095SJosef Bacik } 2930b12de528SQu Wenruo unlock_out: 293126ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 293226ce2095SJosef Bacik 293326ce2095SJosef Bacik btrfs_end_transaction(trans); 293426ce2095SJosef Bacik return ret; 293526ce2095SJosef Bacik } 293626ce2095SJosef Bacik 293732da5386SDavid Sterba void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 293826ce2095SJosef Bacik { 293926ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 294026ce2095SJosef Bacik u64 num_bytes; 294126ce2095SJosef Bacik 294226ce2095SJosef Bacik BUG_ON(!cache->ro); 294326ce2095SJosef Bacik 294426ce2095SJosef Bacik spin_lock(&sinfo->lock); 294526ce2095SJosef Bacik spin_lock(&cache->lock); 294626ce2095SJosef Bacik if (!--cache->ro) { 2947169e0da9SNaohiro Aota if (btrfs_is_zoned(cache->fs_info)) { 2948169e0da9SNaohiro Aota /* Migrate zone_unusable bytes back */ 294998173255SNaohiro Aota cache->zone_unusable = 295098173255SNaohiro Aota (cache->alloc_offset - cache->used) + 295198173255SNaohiro Aota (cache->length - cache->zone_capacity); 2952169e0da9SNaohiro Aota sinfo->bytes_zone_unusable += cache->zone_unusable; 2953169e0da9SNaohiro Aota sinfo->bytes_readonly -= cache->zone_unusable; 2954169e0da9SNaohiro Aota } 2955f9f28e5bSNaohiro Aota num_bytes = cache->length - cache->reserved - 2956f9f28e5bSNaohiro Aota cache->pinned - cache->bytes_super - 2957f9f28e5bSNaohiro Aota cache->zone_unusable - cache->used; 2958f9f28e5bSNaohiro Aota sinfo->bytes_readonly -= num_bytes; 295926ce2095SJosef Bacik list_del_init(&cache->ro_list); 296026ce2095SJosef Bacik } 296126ce2095SJosef Bacik spin_unlock(&cache->lock); 296226ce2095SJosef Bacik spin_unlock(&sinfo->lock); 296326ce2095SJosef Bacik } 296477745c05SJosef Bacik 29653be4d8efSQu Wenruo static int update_block_group_item(struct btrfs_trans_handle *trans, 296677745c05SJosef Bacik struct btrfs_path *path, 296732da5386SDavid Sterba struct btrfs_block_group *cache) 296877745c05SJosef Bacik { 296977745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 297077745c05SJosef Bacik int ret; 2971dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 297277745c05SJosef Bacik unsigned long bi; 297377745c05SJosef Bacik struct extent_buffer *leaf; 2974bf38be65SDavid Sterba struct btrfs_block_group_item bgi; 2975b3470b5dSDavid Sterba struct btrfs_key key; 29767248e0ceSQu Wenruo u64 old_commit_used; 29777248e0ceSQu Wenruo u64 used; 29787248e0ceSQu Wenruo 29797248e0ceSQu Wenruo /* 29807248e0ceSQu Wenruo * Block group items update can be triggered out of commit transaction 29817248e0ceSQu Wenruo * critical section, thus we need a consistent view of used bytes. 29827248e0ceSQu Wenruo * We cannot use cache->used directly outside of the spin lock, as it 29837248e0ceSQu Wenruo * may be changed. 29847248e0ceSQu Wenruo */ 29857248e0ceSQu Wenruo spin_lock(&cache->lock); 29867248e0ceSQu Wenruo old_commit_used = cache->commit_used; 29877248e0ceSQu Wenruo used = cache->used; 29887248e0ceSQu Wenruo /* No change in used bytes, can safely skip it. */ 29897248e0ceSQu Wenruo if (cache->commit_used == used) { 29907248e0ceSQu Wenruo spin_unlock(&cache->lock); 29917248e0ceSQu Wenruo return 0; 29927248e0ceSQu Wenruo } 29937248e0ceSQu Wenruo cache->commit_used = used; 29947248e0ceSQu Wenruo spin_unlock(&cache->lock); 299577745c05SJosef Bacik 2996b3470b5dSDavid Sterba key.objectid = cache->start; 2997b3470b5dSDavid Sterba key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2998b3470b5dSDavid Sterba key.offset = cache->length; 2999b3470b5dSDavid Sterba 30003be4d8efSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 300177745c05SJosef Bacik if (ret) { 300277745c05SJosef Bacik if (ret > 0) 300377745c05SJosef Bacik ret = -ENOENT; 300477745c05SJosef Bacik goto fail; 300577745c05SJosef Bacik } 300677745c05SJosef Bacik 300777745c05SJosef Bacik leaf = path->nodes[0]; 300877745c05SJosef Bacik bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 30097248e0ceSQu Wenruo btrfs_set_stack_block_group_used(&bgi, used); 3010de0dc456SDavid Sterba btrfs_set_stack_block_group_chunk_objectid(&bgi, 3011f7238e50SJosef Bacik cache->global_root_id); 3012de0dc456SDavid Sterba btrfs_set_stack_block_group_flags(&bgi, cache->flags); 3013bf38be65SDavid Sterba write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 301477745c05SJosef Bacik btrfs_mark_buffer_dirty(leaf); 301577745c05SJosef Bacik fail: 301677745c05SJosef Bacik btrfs_release_path(path); 30177248e0ceSQu Wenruo /* We didn't update the block group item, need to revert @commit_used. */ 30187248e0ceSQu Wenruo if (ret < 0) { 30197248e0ceSQu Wenruo spin_lock(&cache->lock); 30207248e0ceSQu Wenruo cache->commit_used = old_commit_used; 30217248e0ceSQu Wenruo spin_unlock(&cache->lock); 30227248e0ceSQu Wenruo } 302377745c05SJosef Bacik return ret; 302477745c05SJosef Bacik 302577745c05SJosef Bacik } 302677745c05SJosef Bacik 302732da5386SDavid Sterba static int cache_save_setup(struct btrfs_block_group *block_group, 302877745c05SJosef Bacik struct btrfs_trans_handle *trans, 302977745c05SJosef Bacik struct btrfs_path *path) 303077745c05SJosef Bacik { 303177745c05SJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 303277745c05SJosef Bacik struct btrfs_root *root = fs_info->tree_root; 303377745c05SJosef Bacik struct inode *inode = NULL; 303477745c05SJosef Bacik struct extent_changeset *data_reserved = NULL; 303577745c05SJosef Bacik u64 alloc_hint = 0; 303677745c05SJosef Bacik int dcs = BTRFS_DC_ERROR; 30370044ae11SQu Wenruo u64 cache_size = 0; 303877745c05SJosef Bacik int retries = 0; 303977745c05SJosef Bacik int ret = 0; 304077745c05SJosef Bacik 3041af456a2cSBoris Burkov if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 3042af456a2cSBoris Burkov return 0; 3043af456a2cSBoris Burkov 304477745c05SJosef Bacik /* 304577745c05SJosef Bacik * If this block group is smaller than 100 megs don't bother caching the 304677745c05SJosef Bacik * block group. 304777745c05SJosef Bacik */ 3048b3470b5dSDavid Sterba if (block_group->length < (100 * SZ_1M)) { 304977745c05SJosef Bacik spin_lock(&block_group->lock); 305077745c05SJosef Bacik block_group->disk_cache_state = BTRFS_DC_WRITTEN; 305177745c05SJosef Bacik spin_unlock(&block_group->lock); 305277745c05SJosef Bacik return 0; 305377745c05SJosef Bacik } 305477745c05SJosef Bacik 3055bf31f87fSDavid Sterba if (TRANS_ABORTED(trans)) 305677745c05SJosef Bacik return 0; 305777745c05SJosef Bacik again: 305877745c05SJosef Bacik inode = lookup_free_space_inode(block_group, path); 305977745c05SJosef Bacik if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 306077745c05SJosef Bacik ret = PTR_ERR(inode); 306177745c05SJosef Bacik btrfs_release_path(path); 306277745c05SJosef Bacik goto out; 306377745c05SJosef Bacik } 306477745c05SJosef Bacik 306577745c05SJosef Bacik if (IS_ERR(inode)) { 306677745c05SJosef Bacik BUG_ON(retries); 306777745c05SJosef Bacik retries++; 306877745c05SJosef Bacik 306977745c05SJosef Bacik if (block_group->ro) 307077745c05SJosef Bacik goto out_free; 307177745c05SJosef Bacik 307277745c05SJosef Bacik ret = create_free_space_inode(trans, block_group, path); 307377745c05SJosef Bacik if (ret) 307477745c05SJosef Bacik goto out_free; 307577745c05SJosef Bacik goto again; 307677745c05SJosef Bacik } 307777745c05SJosef Bacik 307877745c05SJosef Bacik /* 307977745c05SJosef Bacik * We want to set the generation to 0, that way if anything goes wrong 308077745c05SJosef Bacik * from here on out we know not to trust this cache when we load up next 308177745c05SJosef Bacik * time. 308277745c05SJosef Bacik */ 308377745c05SJosef Bacik BTRFS_I(inode)->generation = 0; 30849a56fcd1SNikolay Borisov ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 308577745c05SJosef Bacik if (ret) { 308677745c05SJosef Bacik /* 308777745c05SJosef Bacik * So theoretically we could recover from this, simply set the 308877745c05SJosef Bacik * super cache generation to 0 so we know to invalidate the 308977745c05SJosef Bacik * cache, but then we'd have to keep track of the block groups 309077745c05SJosef Bacik * that fail this way so we know we _have_ to reset this cache 309177745c05SJosef Bacik * before the next commit or risk reading stale cache. So to 309277745c05SJosef Bacik * limit our exposure to horrible edge cases lets just abort the 309377745c05SJosef Bacik * transaction, this only happens in really bad situations 309477745c05SJosef Bacik * anyway. 309577745c05SJosef Bacik */ 309677745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 309777745c05SJosef Bacik goto out_put; 309877745c05SJosef Bacik } 309977745c05SJosef Bacik WARN_ON(ret); 310077745c05SJosef Bacik 310177745c05SJosef Bacik /* We've already setup this transaction, go ahead and exit */ 310277745c05SJosef Bacik if (block_group->cache_generation == trans->transid && 310377745c05SJosef Bacik i_size_read(inode)) { 310477745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 310577745c05SJosef Bacik goto out_put; 310677745c05SJosef Bacik } 310777745c05SJosef Bacik 310877745c05SJosef Bacik if (i_size_read(inode) > 0) { 310977745c05SJosef Bacik ret = btrfs_check_trunc_cache_free_space(fs_info, 311077745c05SJosef Bacik &fs_info->global_block_rsv); 311177745c05SJosef Bacik if (ret) 311277745c05SJosef Bacik goto out_put; 311377745c05SJosef Bacik 311477745c05SJosef Bacik ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 311577745c05SJosef Bacik if (ret) 311677745c05SJosef Bacik goto out_put; 311777745c05SJosef Bacik } 311877745c05SJosef Bacik 311977745c05SJosef Bacik spin_lock(&block_group->lock); 312077745c05SJosef Bacik if (block_group->cached != BTRFS_CACHE_FINISHED || 312177745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) { 312277745c05SJosef Bacik /* 312377745c05SJosef Bacik * don't bother trying to write stuff out _if_ 312477745c05SJosef Bacik * a) we're not cached, 312577745c05SJosef Bacik * b) we're with nospace_cache mount option, 312677745c05SJosef Bacik * c) we're with v2 space_cache (FREE_SPACE_TREE). 312777745c05SJosef Bacik */ 312877745c05SJosef Bacik dcs = BTRFS_DC_WRITTEN; 312977745c05SJosef Bacik spin_unlock(&block_group->lock); 313077745c05SJosef Bacik goto out_put; 313177745c05SJosef Bacik } 313277745c05SJosef Bacik spin_unlock(&block_group->lock); 313377745c05SJosef Bacik 313477745c05SJosef Bacik /* 313577745c05SJosef Bacik * We hit an ENOSPC when setting up the cache in this transaction, just 313677745c05SJosef Bacik * skip doing the setup, we've already cleared the cache so we're safe. 313777745c05SJosef Bacik */ 313877745c05SJosef Bacik if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 313977745c05SJosef Bacik ret = -ENOSPC; 314077745c05SJosef Bacik goto out_put; 314177745c05SJosef Bacik } 314277745c05SJosef Bacik 314377745c05SJosef Bacik /* 314477745c05SJosef Bacik * Try to preallocate enough space based on how big the block group is. 314577745c05SJosef Bacik * Keep in mind this has to include any pinned space which could end up 314677745c05SJosef Bacik * taking up quite a bit since it's not folded into the other space 314777745c05SJosef Bacik * cache. 314877745c05SJosef Bacik */ 31490044ae11SQu Wenruo cache_size = div_u64(block_group->length, SZ_256M); 31500044ae11SQu Wenruo if (!cache_size) 31510044ae11SQu Wenruo cache_size = 1; 315277745c05SJosef Bacik 31530044ae11SQu Wenruo cache_size *= 16; 31540044ae11SQu Wenruo cache_size *= fs_info->sectorsize; 315577745c05SJosef Bacik 315636ea6f3eSNikolay Borisov ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 31571daedb1dSJosef Bacik cache_size, false); 315877745c05SJosef Bacik if (ret) 315977745c05SJosef Bacik goto out_put; 316077745c05SJosef Bacik 31610044ae11SQu Wenruo ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 31620044ae11SQu Wenruo cache_size, cache_size, 316377745c05SJosef Bacik &alloc_hint); 316477745c05SJosef Bacik /* 316577745c05SJosef Bacik * Our cache requires contiguous chunks so that we don't modify a bunch 316677745c05SJosef Bacik * of metadata or split extents when writing the cache out, which means 316777745c05SJosef Bacik * we can enospc if we are heavily fragmented in addition to just normal 316877745c05SJosef Bacik * out of space conditions. So if we hit this just skip setting up any 316977745c05SJosef Bacik * other block groups for this transaction, maybe we'll unpin enough 317077745c05SJosef Bacik * space the next time around. 317177745c05SJosef Bacik */ 317277745c05SJosef Bacik if (!ret) 317377745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 317477745c05SJosef Bacik else if (ret == -ENOSPC) 317577745c05SJosef Bacik set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 317677745c05SJosef Bacik 317777745c05SJosef Bacik out_put: 317877745c05SJosef Bacik iput(inode); 317977745c05SJosef Bacik out_free: 318077745c05SJosef Bacik btrfs_release_path(path); 318177745c05SJosef Bacik out: 318277745c05SJosef Bacik spin_lock(&block_group->lock); 318377745c05SJosef Bacik if (!ret && dcs == BTRFS_DC_SETUP) 318477745c05SJosef Bacik block_group->cache_generation = trans->transid; 318577745c05SJosef Bacik block_group->disk_cache_state = dcs; 318677745c05SJosef Bacik spin_unlock(&block_group->lock); 318777745c05SJosef Bacik 318877745c05SJosef Bacik extent_changeset_free(data_reserved); 318977745c05SJosef Bacik return ret; 319077745c05SJosef Bacik } 319177745c05SJosef Bacik 319277745c05SJosef Bacik int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 319377745c05SJosef Bacik { 319477745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 319532da5386SDavid Sterba struct btrfs_block_group *cache, *tmp; 319677745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 319777745c05SJosef Bacik struct btrfs_path *path; 319877745c05SJosef Bacik 319977745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs) || 320077745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) 320177745c05SJosef Bacik return 0; 320277745c05SJosef Bacik 320377745c05SJosef Bacik path = btrfs_alloc_path(); 320477745c05SJosef Bacik if (!path) 320577745c05SJosef Bacik return -ENOMEM; 320677745c05SJosef Bacik 320777745c05SJosef Bacik /* Could add new block groups, use _safe just in case */ 320877745c05SJosef Bacik list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 320977745c05SJosef Bacik dirty_list) { 321077745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_CLEAR) 321177745c05SJosef Bacik cache_save_setup(cache, trans, path); 321277745c05SJosef Bacik } 321377745c05SJosef Bacik 321477745c05SJosef Bacik btrfs_free_path(path); 321577745c05SJosef Bacik return 0; 321677745c05SJosef Bacik } 321777745c05SJosef Bacik 321877745c05SJosef Bacik /* 321977745c05SJosef Bacik * Transaction commit does final block group cache writeback during a critical 322077745c05SJosef Bacik * section where nothing is allowed to change the FS. This is required in 322177745c05SJosef Bacik * order for the cache to actually match the block group, but can introduce a 322277745c05SJosef Bacik * lot of latency into the commit. 322377745c05SJosef Bacik * 322477745c05SJosef Bacik * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 322577745c05SJosef Bacik * There's a chance we'll have to redo some of it if the block group changes 322677745c05SJosef Bacik * again during the commit, but it greatly reduces the commit latency by 322777745c05SJosef Bacik * getting rid of the easy block groups while we're still allowing others to 322877745c05SJosef Bacik * join the commit. 322977745c05SJosef Bacik */ 323077745c05SJosef Bacik int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 323177745c05SJosef Bacik { 323277745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 323332da5386SDavid Sterba struct btrfs_block_group *cache; 323477745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 323577745c05SJosef Bacik int ret = 0; 323677745c05SJosef Bacik int should_put; 323777745c05SJosef Bacik struct btrfs_path *path = NULL; 323877745c05SJosef Bacik LIST_HEAD(dirty); 323977745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 324077745c05SJosef Bacik int loops = 0; 324177745c05SJosef Bacik 324277745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 324377745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs)) { 324477745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 324577745c05SJosef Bacik return 0; 324677745c05SJosef Bacik } 324777745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 324877745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 324977745c05SJosef Bacik 325077745c05SJosef Bacik again: 325177745c05SJosef Bacik /* Make sure all the block groups on our dirty list actually exist */ 325277745c05SJosef Bacik btrfs_create_pending_block_groups(trans); 325377745c05SJosef Bacik 325477745c05SJosef Bacik if (!path) { 325577745c05SJosef Bacik path = btrfs_alloc_path(); 3256938fcbfbSJosef Bacik if (!path) { 3257938fcbfbSJosef Bacik ret = -ENOMEM; 3258938fcbfbSJosef Bacik goto out; 3259938fcbfbSJosef Bacik } 326077745c05SJosef Bacik } 326177745c05SJosef Bacik 326277745c05SJosef Bacik /* 326377745c05SJosef Bacik * cache_write_mutex is here only to save us from balance or automatic 326477745c05SJosef Bacik * removal of empty block groups deleting this block group while we are 326577745c05SJosef Bacik * writing out the cache 326677745c05SJosef Bacik */ 326777745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 326877745c05SJosef Bacik while (!list_empty(&dirty)) { 326977745c05SJosef Bacik bool drop_reserve = true; 327077745c05SJosef Bacik 327132da5386SDavid Sterba cache = list_first_entry(&dirty, struct btrfs_block_group, 327277745c05SJosef Bacik dirty_list); 327377745c05SJosef Bacik /* 327477745c05SJosef Bacik * This can happen if something re-dirties a block group that 327577745c05SJosef Bacik * is already under IO. Just wait for it to finish and then do 327677745c05SJosef Bacik * it all again 327777745c05SJosef Bacik */ 327877745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 327977745c05SJosef Bacik list_del_init(&cache->io_list); 328077745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 328177745c05SJosef Bacik btrfs_put_block_group(cache); 328277745c05SJosef Bacik } 328377745c05SJosef Bacik 328477745c05SJosef Bacik 328577745c05SJosef Bacik /* 328677745c05SJosef Bacik * btrfs_wait_cache_io uses the cache->dirty_list to decide if 328777745c05SJosef Bacik * it should update the cache_state. Don't delete until after 328877745c05SJosef Bacik * we wait. 328977745c05SJosef Bacik * 329077745c05SJosef Bacik * Since we're not running in the commit critical section 329177745c05SJosef Bacik * we need the dirty_bgs_lock to protect from update_block_group 329277745c05SJosef Bacik */ 329377745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 329477745c05SJosef Bacik list_del_init(&cache->dirty_list); 329577745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 329677745c05SJosef Bacik 329777745c05SJosef Bacik should_put = 1; 329877745c05SJosef Bacik 329977745c05SJosef Bacik cache_save_setup(cache, trans, path); 330077745c05SJosef Bacik 330177745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_SETUP) { 330277745c05SJosef Bacik cache->io_ctl.inode = NULL; 330377745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 330477745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 330577745c05SJosef Bacik should_put = 0; 330677745c05SJosef Bacik 330777745c05SJosef Bacik /* 330877745c05SJosef Bacik * The cache_write_mutex is protecting the 330977745c05SJosef Bacik * io_list, also refer to the definition of 331077745c05SJosef Bacik * btrfs_transaction::io_bgs for more details 331177745c05SJosef Bacik */ 331277745c05SJosef Bacik list_add_tail(&cache->io_list, io); 331377745c05SJosef Bacik } else { 331477745c05SJosef Bacik /* 331577745c05SJosef Bacik * If we failed to write the cache, the 331677745c05SJosef Bacik * generation will be bad and life goes on 331777745c05SJosef Bacik */ 331877745c05SJosef Bacik ret = 0; 331977745c05SJosef Bacik } 332077745c05SJosef Bacik } 332177745c05SJosef Bacik if (!ret) { 33223be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 332377745c05SJosef Bacik /* 332477745c05SJosef Bacik * Our block group might still be attached to the list 332577745c05SJosef Bacik * of new block groups in the transaction handle of some 332677745c05SJosef Bacik * other task (struct btrfs_trans_handle->new_bgs). This 332777745c05SJosef Bacik * means its block group item isn't yet in the extent 332877745c05SJosef Bacik * tree. If this happens ignore the error, as we will 332977745c05SJosef Bacik * try again later in the critical section of the 333077745c05SJosef Bacik * transaction commit. 333177745c05SJosef Bacik */ 333277745c05SJosef Bacik if (ret == -ENOENT) { 333377745c05SJosef Bacik ret = 0; 333477745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 333577745c05SJosef Bacik if (list_empty(&cache->dirty_list)) { 333677745c05SJosef Bacik list_add_tail(&cache->dirty_list, 333777745c05SJosef Bacik &cur_trans->dirty_bgs); 333877745c05SJosef Bacik btrfs_get_block_group(cache); 333977745c05SJosef Bacik drop_reserve = false; 334077745c05SJosef Bacik } 334177745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 334277745c05SJosef Bacik } else if (ret) { 334377745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 334477745c05SJosef Bacik } 334577745c05SJosef Bacik } 334677745c05SJosef Bacik 334777745c05SJosef Bacik /* If it's not on the io list, we need to put the block group */ 334877745c05SJosef Bacik if (should_put) 334977745c05SJosef Bacik btrfs_put_block_group(cache); 335077745c05SJosef Bacik if (drop_reserve) 335177745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 335277745c05SJosef Bacik /* 335377745c05SJosef Bacik * Avoid blocking other tasks for too long. It might even save 335477745c05SJosef Bacik * us from writing caches for block groups that are going to be 335577745c05SJosef Bacik * removed. 335677745c05SJosef Bacik */ 335777745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 3358938fcbfbSJosef Bacik if (ret) 3359938fcbfbSJosef Bacik goto out; 336077745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 336177745c05SJosef Bacik } 336277745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 336377745c05SJosef Bacik 336477745c05SJosef Bacik /* 336577745c05SJosef Bacik * Go through delayed refs for all the stuff we've just kicked off 336677745c05SJosef Bacik * and then loop back (just once) 336777745c05SJosef Bacik */ 336834d1eb0eSJosef Bacik if (!ret) 336977745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 0); 337077745c05SJosef Bacik if (!ret && loops == 0) { 337177745c05SJosef Bacik loops++; 337277745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 337377745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 337477745c05SJosef Bacik /* 337577745c05SJosef Bacik * dirty_bgs_lock protects us from concurrent block group 337677745c05SJosef Bacik * deletes too (not just cache_write_mutex). 337777745c05SJosef Bacik */ 337877745c05SJosef Bacik if (!list_empty(&dirty)) { 337977745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 338077745c05SJosef Bacik goto again; 338177745c05SJosef Bacik } 338277745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 3383938fcbfbSJosef Bacik } 3384938fcbfbSJosef Bacik out: 3385938fcbfbSJosef Bacik if (ret < 0) { 3386938fcbfbSJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 3387938fcbfbSJosef Bacik list_splice_init(&dirty, &cur_trans->dirty_bgs); 3388938fcbfbSJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 338977745c05SJosef Bacik btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 339077745c05SJosef Bacik } 339177745c05SJosef Bacik 339277745c05SJosef Bacik btrfs_free_path(path); 339377745c05SJosef Bacik return ret; 339477745c05SJosef Bacik } 339577745c05SJosef Bacik 339677745c05SJosef Bacik int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 339777745c05SJosef Bacik { 339877745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 339932da5386SDavid Sterba struct btrfs_block_group *cache; 340077745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 340177745c05SJosef Bacik int ret = 0; 340277745c05SJosef Bacik int should_put; 340377745c05SJosef Bacik struct btrfs_path *path; 340477745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 340577745c05SJosef Bacik 340677745c05SJosef Bacik path = btrfs_alloc_path(); 340777745c05SJosef Bacik if (!path) 340877745c05SJosef Bacik return -ENOMEM; 340977745c05SJosef Bacik 341077745c05SJosef Bacik /* 341177745c05SJosef Bacik * Even though we are in the critical section of the transaction commit, 341277745c05SJosef Bacik * we can still have concurrent tasks adding elements to this 341377745c05SJosef Bacik * transaction's list of dirty block groups. These tasks correspond to 341477745c05SJosef Bacik * endio free space workers started when writeback finishes for a 341577745c05SJosef Bacik * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 341677745c05SJosef Bacik * allocate new block groups as a result of COWing nodes of the root 341777745c05SJosef Bacik * tree when updating the free space inode. The writeback for the space 341877745c05SJosef Bacik * caches is triggered by an earlier call to 341977745c05SJosef Bacik * btrfs_start_dirty_block_groups() and iterations of the following 342077745c05SJosef Bacik * loop. 342177745c05SJosef Bacik * Also we want to do the cache_save_setup first and then run the 342277745c05SJosef Bacik * delayed refs to make sure we have the best chance at doing this all 342377745c05SJosef Bacik * in one shot. 342477745c05SJosef Bacik */ 342577745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 342677745c05SJosef Bacik while (!list_empty(&cur_trans->dirty_bgs)) { 342777745c05SJosef Bacik cache = list_first_entry(&cur_trans->dirty_bgs, 342832da5386SDavid Sterba struct btrfs_block_group, 342977745c05SJosef Bacik dirty_list); 343077745c05SJosef Bacik 343177745c05SJosef Bacik /* 343277745c05SJosef Bacik * This can happen if cache_save_setup re-dirties a block group 343377745c05SJosef Bacik * that is already under IO. Just wait for it to finish and 343477745c05SJosef Bacik * then do it all again 343577745c05SJosef Bacik */ 343677745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 343777745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 343877745c05SJosef Bacik list_del_init(&cache->io_list); 343977745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 344077745c05SJosef Bacik btrfs_put_block_group(cache); 344177745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 344277745c05SJosef Bacik } 344377745c05SJosef Bacik 344477745c05SJosef Bacik /* 344577745c05SJosef Bacik * Don't remove from the dirty list until after we've waited on 344677745c05SJosef Bacik * any pending IO 344777745c05SJosef Bacik */ 344877745c05SJosef Bacik list_del_init(&cache->dirty_list); 344977745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 345077745c05SJosef Bacik should_put = 1; 345177745c05SJosef Bacik 345277745c05SJosef Bacik cache_save_setup(cache, trans, path); 345377745c05SJosef Bacik 345477745c05SJosef Bacik if (!ret) 345577745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 345677745c05SJosef Bacik (unsigned long) -1); 345777745c05SJosef Bacik 345877745c05SJosef Bacik if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 345977745c05SJosef Bacik cache->io_ctl.inode = NULL; 346077745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 346177745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 346277745c05SJosef Bacik should_put = 0; 346377745c05SJosef Bacik list_add_tail(&cache->io_list, io); 346477745c05SJosef Bacik } else { 346577745c05SJosef Bacik /* 346677745c05SJosef Bacik * If we failed to write the cache, the 346777745c05SJosef Bacik * generation will be bad and life goes on 346877745c05SJosef Bacik */ 346977745c05SJosef Bacik ret = 0; 347077745c05SJosef Bacik } 347177745c05SJosef Bacik } 347277745c05SJosef Bacik if (!ret) { 34733be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 347477745c05SJosef Bacik /* 347577745c05SJosef Bacik * One of the free space endio workers might have 347677745c05SJosef Bacik * created a new block group while updating a free space 347777745c05SJosef Bacik * cache's inode (at inode.c:btrfs_finish_ordered_io()) 347877745c05SJosef Bacik * and hasn't released its transaction handle yet, in 347977745c05SJosef Bacik * which case the new block group is still attached to 348077745c05SJosef Bacik * its transaction handle and its creation has not 348177745c05SJosef Bacik * finished yet (no block group item in the extent tree 348277745c05SJosef Bacik * yet, etc). If this is the case, wait for all free 348377745c05SJosef Bacik * space endio workers to finish and retry. This is a 3484260db43cSRandy Dunlap * very rare case so no need for a more efficient and 348577745c05SJosef Bacik * complex approach. 348677745c05SJosef Bacik */ 348777745c05SJosef Bacik if (ret == -ENOENT) { 348877745c05SJosef Bacik wait_event(cur_trans->writer_wait, 348977745c05SJosef Bacik atomic_read(&cur_trans->num_writers) == 1); 34903be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 349177745c05SJosef Bacik } 349277745c05SJosef Bacik if (ret) 349377745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 349477745c05SJosef Bacik } 349577745c05SJosef Bacik 349677745c05SJosef Bacik /* If its not on the io list, we need to put the block group */ 349777745c05SJosef Bacik if (should_put) 349877745c05SJosef Bacik btrfs_put_block_group(cache); 349977745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 350077745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 350177745c05SJosef Bacik } 350277745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 350377745c05SJosef Bacik 350477745c05SJosef Bacik /* 350577745c05SJosef Bacik * Refer to the definition of io_bgs member for details why it's safe 350677745c05SJosef Bacik * to use it without any locking 350777745c05SJosef Bacik */ 350877745c05SJosef Bacik while (!list_empty(io)) { 350932da5386SDavid Sterba cache = list_first_entry(io, struct btrfs_block_group, 351077745c05SJosef Bacik io_list); 351177745c05SJosef Bacik list_del_init(&cache->io_list); 351277745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 351377745c05SJosef Bacik btrfs_put_block_group(cache); 351477745c05SJosef Bacik } 351577745c05SJosef Bacik 351677745c05SJosef Bacik btrfs_free_path(path); 351777745c05SJosef Bacik return ret; 351877745c05SJosef Bacik } 3519606d1bf1SJosef Bacik 3520606d1bf1SJosef Bacik int btrfs_update_block_group(struct btrfs_trans_handle *trans, 352111b66fa6SAnand Jain u64 bytenr, u64 num_bytes, bool alloc) 3522606d1bf1SJosef Bacik { 3523606d1bf1SJosef Bacik struct btrfs_fs_info *info = trans->fs_info; 352432da5386SDavid Sterba struct btrfs_block_group *cache = NULL; 3525606d1bf1SJosef Bacik u64 total = num_bytes; 3526606d1bf1SJosef Bacik u64 old_val; 3527606d1bf1SJosef Bacik u64 byte_in_group; 3528606d1bf1SJosef Bacik int factor; 3529606d1bf1SJosef Bacik int ret = 0; 3530606d1bf1SJosef Bacik 3531606d1bf1SJosef Bacik /* Block accounting for super block */ 3532606d1bf1SJosef Bacik spin_lock(&info->delalloc_root_lock); 3533606d1bf1SJosef Bacik old_val = btrfs_super_bytes_used(info->super_copy); 3534606d1bf1SJosef Bacik if (alloc) 3535606d1bf1SJosef Bacik old_val += num_bytes; 3536606d1bf1SJosef Bacik else 3537606d1bf1SJosef Bacik old_val -= num_bytes; 3538606d1bf1SJosef Bacik btrfs_set_super_bytes_used(info->super_copy, old_val); 3539606d1bf1SJosef Bacik spin_unlock(&info->delalloc_root_lock); 3540606d1bf1SJosef Bacik 3541606d1bf1SJosef Bacik while (total) { 3542df384da5SJosef Bacik struct btrfs_space_info *space_info; 3543efbf35a1SJosef Bacik bool reclaim = false; 3544ac2f1e63SJosef Bacik 3545606d1bf1SJosef Bacik cache = btrfs_lookup_block_group(info, bytenr); 3546606d1bf1SJosef Bacik if (!cache) { 3547606d1bf1SJosef Bacik ret = -ENOENT; 3548606d1bf1SJosef Bacik break; 3549606d1bf1SJosef Bacik } 3550df384da5SJosef Bacik space_info = cache->space_info; 3551606d1bf1SJosef Bacik factor = btrfs_bg_type_to_factor(cache->flags); 3552606d1bf1SJosef Bacik 3553606d1bf1SJosef Bacik /* 3554606d1bf1SJosef Bacik * If this block group has free space cache written out, we 3555606d1bf1SJosef Bacik * need to make sure to load it if we are removing space. This 3556606d1bf1SJosef Bacik * is because we need the unpinning stage to actually add the 3557606d1bf1SJosef Bacik * space back to the block group, otherwise we will leak space. 3558606d1bf1SJosef Bacik */ 355932da5386SDavid Sterba if (!alloc && !btrfs_block_group_done(cache)) 3560ced8ecf0SOmar Sandoval btrfs_cache_block_group(cache, true); 3561606d1bf1SJosef Bacik 3562b3470b5dSDavid Sterba byte_in_group = bytenr - cache->start; 3563b3470b5dSDavid Sterba WARN_ON(byte_in_group > cache->length); 3564606d1bf1SJosef Bacik 3565df384da5SJosef Bacik spin_lock(&space_info->lock); 3566606d1bf1SJosef Bacik spin_lock(&cache->lock); 3567606d1bf1SJosef Bacik 3568606d1bf1SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 3569606d1bf1SJosef Bacik cache->disk_cache_state < BTRFS_DC_CLEAR) 3570606d1bf1SJosef Bacik cache->disk_cache_state = BTRFS_DC_CLEAR; 3571606d1bf1SJosef Bacik 3572bf38be65SDavid Sterba old_val = cache->used; 3573b3470b5dSDavid Sterba num_bytes = min(total, cache->length - byte_in_group); 3574606d1bf1SJosef Bacik if (alloc) { 3575606d1bf1SJosef Bacik old_val += num_bytes; 3576bf38be65SDavid Sterba cache->used = old_val; 3577606d1bf1SJosef Bacik cache->reserved -= num_bytes; 3578df384da5SJosef Bacik space_info->bytes_reserved -= num_bytes; 3579df384da5SJosef Bacik space_info->bytes_used += num_bytes; 3580df384da5SJosef Bacik space_info->disk_used += num_bytes * factor; 3581606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3582df384da5SJosef Bacik spin_unlock(&space_info->lock); 3583606d1bf1SJosef Bacik } else { 3584606d1bf1SJosef Bacik old_val -= num_bytes; 3585bf38be65SDavid Sterba cache->used = old_val; 3586606d1bf1SJosef Bacik cache->pinned += num_bytes; 3587df384da5SJosef Bacik btrfs_space_info_update_bytes_pinned(info, space_info, 3588df384da5SJosef Bacik num_bytes); 3589df384da5SJosef Bacik space_info->bytes_used -= num_bytes; 3590df384da5SJosef Bacik space_info->disk_used -= num_bytes * factor; 3591ac2f1e63SJosef Bacik 3592ac2f1e63SJosef Bacik reclaim = should_reclaim_block_group(cache, num_bytes); 359352bb7a21SBoris Burkov 3594606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3595df384da5SJosef Bacik spin_unlock(&space_info->lock); 3596606d1bf1SJosef Bacik 3597fe1a598cSDavid Sterba set_extent_bit(&trans->transaction->pinned_extents, 3598606d1bf1SJosef Bacik bytenr, bytenr + num_bytes - 1, 35991d126800SDavid Sterba EXTENT_DIRTY, NULL); 3600606d1bf1SJosef Bacik } 3601606d1bf1SJosef Bacik 3602606d1bf1SJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 3603606d1bf1SJosef Bacik if (list_empty(&cache->dirty_list)) { 3604606d1bf1SJosef Bacik list_add_tail(&cache->dirty_list, 3605606d1bf1SJosef Bacik &trans->transaction->dirty_bgs); 3606606d1bf1SJosef Bacik trans->delayed_ref_updates++; 3607606d1bf1SJosef Bacik btrfs_get_block_group(cache); 3608606d1bf1SJosef Bacik } 3609606d1bf1SJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 3610606d1bf1SJosef Bacik 3611606d1bf1SJosef Bacik /* 3612606d1bf1SJosef Bacik * No longer have used bytes in this block group, queue it for 3613606d1bf1SJosef Bacik * deletion. We do this after adding the block group to the 3614606d1bf1SJosef Bacik * dirty list to avoid races between cleaner kthread and space 3615606d1bf1SJosef Bacik * cache writeout. 3616606d1bf1SJosef Bacik */ 36176e80d4f8SDennis Zhou if (!alloc && old_val == 0) { 36186e80d4f8SDennis Zhou if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3619606d1bf1SJosef Bacik btrfs_mark_bg_unused(cache); 3620ac2f1e63SJosef Bacik } else if (!alloc && reclaim) { 3621ac2f1e63SJosef Bacik btrfs_mark_bg_to_reclaim(cache); 36226e80d4f8SDennis Zhou } 3623606d1bf1SJosef Bacik 3624606d1bf1SJosef Bacik btrfs_put_block_group(cache); 3625606d1bf1SJosef Bacik total -= num_bytes; 3626606d1bf1SJosef Bacik bytenr += num_bytes; 3627606d1bf1SJosef Bacik } 3628606d1bf1SJosef Bacik 3629606d1bf1SJosef Bacik /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3630606d1bf1SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 3631606d1bf1SJosef Bacik return ret; 3632606d1bf1SJosef Bacik } 3633606d1bf1SJosef Bacik 363443dd529aSDavid Sterba /* 363543dd529aSDavid Sterba * Update the block_group and space info counters. 363643dd529aSDavid Sterba * 3637606d1bf1SJosef Bacik * @cache: The cache we are manipulating 3638606d1bf1SJosef Bacik * @ram_bytes: The number of bytes of file content, and will be same to 3639606d1bf1SJosef Bacik * @num_bytes except for the compress path. 3640606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 3641606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 3642606d1bf1SJosef Bacik * 3643606d1bf1SJosef Bacik * This is called by the allocator when it reserves space. If this is a 3644606d1bf1SJosef Bacik * reservation and the block group has become read only we cannot make the 3645606d1bf1SJosef Bacik * reservation and return -EAGAIN, otherwise this function always succeeds. 3646606d1bf1SJosef Bacik */ 364732da5386SDavid Sterba int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 364852bb7a21SBoris Burkov u64 ram_bytes, u64 num_bytes, int delalloc, 364952bb7a21SBoris Burkov bool force_wrong_size_class) 3650606d1bf1SJosef Bacik { 3651606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 365252bb7a21SBoris Burkov enum btrfs_block_group_size_class size_class; 3653606d1bf1SJosef Bacik int ret = 0; 3654606d1bf1SJosef Bacik 3655606d1bf1SJosef Bacik spin_lock(&space_info->lock); 3656606d1bf1SJosef Bacik spin_lock(&cache->lock); 3657606d1bf1SJosef Bacik if (cache->ro) { 3658606d1bf1SJosef Bacik ret = -EAGAIN; 365952bb7a21SBoris Burkov goto out; 366052bb7a21SBoris Burkov } 366152bb7a21SBoris Burkov 3662cb0922f2SBoris Burkov if (btrfs_block_group_should_use_size_class(cache)) { 366352bb7a21SBoris Burkov size_class = btrfs_calc_block_group_size_class(num_bytes); 366452bb7a21SBoris Burkov ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); 366552bb7a21SBoris Burkov if (ret) 366652bb7a21SBoris Burkov goto out; 366752bb7a21SBoris Burkov } 3668606d1bf1SJosef Bacik cache->reserved += num_bytes; 3669606d1bf1SJosef Bacik space_info->bytes_reserved += num_bytes; 3670a43c3835SJosef Bacik trace_btrfs_space_reservation(cache->fs_info, "space_info", 3671a43c3835SJosef Bacik space_info->flags, num_bytes, 1); 3672606d1bf1SJosef Bacik btrfs_space_info_update_bytes_may_use(cache->fs_info, 3673606d1bf1SJosef Bacik space_info, -ram_bytes); 3674606d1bf1SJosef Bacik if (delalloc) 3675606d1bf1SJosef Bacik cache->delalloc_bytes += num_bytes; 367699ffb43eSJosef Bacik 367799ffb43eSJosef Bacik /* 367852bb7a21SBoris Burkov * Compression can use less space than we reserved, so wake tickets if 367952bb7a21SBoris Burkov * that happens. 368099ffb43eSJosef Bacik */ 368199ffb43eSJosef Bacik if (num_bytes < ram_bytes) 368299ffb43eSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 368352bb7a21SBoris Burkov out: 3684606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3685606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 3686606d1bf1SJosef Bacik return ret; 3687606d1bf1SJosef Bacik } 3688606d1bf1SJosef Bacik 368943dd529aSDavid Sterba /* 369043dd529aSDavid Sterba * Update the block_group and space info counters. 369143dd529aSDavid Sterba * 3692606d1bf1SJosef Bacik * @cache: The cache we are manipulating 3693606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 3694606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 3695606d1bf1SJosef Bacik * 3696606d1bf1SJosef Bacik * This is called by somebody who is freeing space that was never actually used 3697606d1bf1SJosef Bacik * on disk. For example if you reserve some space for a new leaf in transaction 3698606d1bf1SJosef Bacik * A and before transaction A commits you free that leaf, you call this with 3699606d1bf1SJosef Bacik * reserve set to 0 in order to clear the reservation. 3700606d1bf1SJosef Bacik */ 370132da5386SDavid Sterba void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3702606d1bf1SJosef Bacik u64 num_bytes, int delalloc) 3703606d1bf1SJosef Bacik { 3704606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 3705606d1bf1SJosef Bacik 3706606d1bf1SJosef Bacik spin_lock(&space_info->lock); 3707606d1bf1SJosef Bacik spin_lock(&cache->lock); 3708606d1bf1SJosef Bacik if (cache->ro) 3709606d1bf1SJosef Bacik space_info->bytes_readonly += num_bytes; 3710606d1bf1SJosef Bacik cache->reserved -= num_bytes; 3711606d1bf1SJosef Bacik space_info->bytes_reserved -= num_bytes; 3712606d1bf1SJosef Bacik space_info->max_extent_size = 0; 3713606d1bf1SJosef Bacik 3714606d1bf1SJosef Bacik if (delalloc) 3715606d1bf1SJosef Bacik cache->delalloc_bytes -= num_bytes; 3716606d1bf1SJosef Bacik spin_unlock(&cache->lock); 37173308234aSJosef Bacik 37183308234aSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 3719606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 3720606d1bf1SJosef Bacik } 372107730d87SJosef Bacik 372207730d87SJosef Bacik static void force_metadata_allocation(struct btrfs_fs_info *info) 372307730d87SJosef Bacik { 372407730d87SJosef Bacik struct list_head *head = &info->space_info; 372507730d87SJosef Bacik struct btrfs_space_info *found; 372607730d87SJosef Bacik 372772804905SJosef Bacik list_for_each_entry(found, head, list) { 372807730d87SJosef Bacik if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 372907730d87SJosef Bacik found->force_alloc = CHUNK_ALLOC_FORCE; 373007730d87SJosef Bacik } 373107730d87SJosef Bacik } 373207730d87SJosef Bacik 373307730d87SJosef Bacik static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 373407730d87SJosef Bacik struct btrfs_space_info *sinfo, int force) 373507730d87SJosef Bacik { 373607730d87SJosef Bacik u64 bytes_used = btrfs_space_info_used(sinfo, false); 373707730d87SJosef Bacik u64 thresh; 373807730d87SJosef Bacik 373907730d87SJosef Bacik if (force == CHUNK_ALLOC_FORCE) 374007730d87SJosef Bacik return 1; 374107730d87SJosef Bacik 374207730d87SJosef Bacik /* 374307730d87SJosef Bacik * in limited mode, we want to have some free space up to 374407730d87SJosef Bacik * about 1% of the FS size. 374507730d87SJosef Bacik */ 374607730d87SJosef Bacik if (force == CHUNK_ALLOC_LIMITED) { 374707730d87SJosef Bacik thresh = btrfs_super_total_bytes(fs_info->super_copy); 3748428c8e03SDavid Sterba thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); 374907730d87SJosef Bacik 375007730d87SJosef Bacik if (sinfo->total_bytes - bytes_used < thresh) 375107730d87SJosef Bacik return 1; 375207730d87SJosef Bacik } 375307730d87SJosef Bacik 3754428c8e03SDavid Sterba if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) 375507730d87SJosef Bacik return 0; 375607730d87SJosef Bacik return 1; 375707730d87SJosef Bacik } 375807730d87SJosef Bacik 375907730d87SJosef Bacik int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 376007730d87SJosef Bacik { 376107730d87SJosef Bacik u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 376207730d87SJosef Bacik 376307730d87SJosef Bacik return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 376407730d87SJosef Bacik } 376507730d87SJosef Bacik 3766820c363bSNaohiro Aota static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) 376779bd3712SFilipe Manana { 376879bd3712SFilipe Manana struct btrfs_block_group *bg; 376979bd3712SFilipe Manana int ret; 377079bd3712SFilipe Manana 377107730d87SJosef Bacik /* 377279bd3712SFilipe Manana * Check if we have enough space in the system space info because we 377379bd3712SFilipe Manana * will need to update device items in the chunk btree and insert a new 377479bd3712SFilipe Manana * chunk item in the chunk btree as well. This will allocate a new 377579bd3712SFilipe Manana * system block group if needed. 377679bd3712SFilipe Manana */ 377779bd3712SFilipe Manana check_system_chunk(trans, flags); 377879bd3712SFilipe Manana 3779f6f39f7aSNikolay Borisov bg = btrfs_create_chunk(trans, flags); 378079bd3712SFilipe Manana if (IS_ERR(bg)) { 378179bd3712SFilipe Manana ret = PTR_ERR(bg); 378279bd3712SFilipe Manana goto out; 378379bd3712SFilipe Manana } 378479bd3712SFilipe Manana 378579bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 378679bd3712SFilipe Manana /* 378779bd3712SFilipe Manana * Normally we are not expected to fail with -ENOSPC here, since we have 378879bd3712SFilipe Manana * previously reserved space in the system space_info and allocated one 3789ecd84d54SFilipe Manana * new system chunk if necessary. However there are three exceptions: 379079bd3712SFilipe Manana * 379179bd3712SFilipe Manana * 1) We may have enough free space in the system space_info but all the 379279bd3712SFilipe Manana * existing system block groups have a profile which can not be used 379379bd3712SFilipe Manana * for extent allocation. 379479bd3712SFilipe Manana * 379579bd3712SFilipe Manana * This happens when mounting in degraded mode. For example we have a 379679bd3712SFilipe Manana * RAID1 filesystem with 2 devices, lose one device and mount the fs 379779bd3712SFilipe Manana * using the other device in degraded mode. If we then allocate a chunk, 379879bd3712SFilipe Manana * we may have enough free space in the existing system space_info, but 379979bd3712SFilipe Manana * none of the block groups can be used for extent allocation since they 380079bd3712SFilipe Manana * have a RAID1 profile, and because we are in degraded mode with a 380179bd3712SFilipe Manana * single device, we are forced to allocate a new system chunk with a 380279bd3712SFilipe Manana * SINGLE profile. Making check_system_chunk() iterate over all system 380379bd3712SFilipe Manana * block groups and check if they have a usable profile and enough space 380479bd3712SFilipe Manana * can be slow on very large filesystems, so we tolerate the -ENOSPC and 380579bd3712SFilipe Manana * try again after forcing allocation of a new system chunk. Like this 380679bd3712SFilipe Manana * we avoid paying the cost of that search in normal circumstances, when 380779bd3712SFilipe Manana * we were not mounted in degraded mode; 380879bd3712SFilipe Manana * 380979bd3712SFilipe Manana * 2) We had enough free space info the system space_info, and one suitable 381079bd3712SFilipe Manana * block group to allocate from when we called check_system_chunk() 381179bd3712SFilipe Manana * above. However right after we called it, the only system block group 381279bd3712SFilipe Manana * with enough free space got turned into RO mode by a running scrub, 381379bd3712SFilipe Manana * and in this case we have to allocate a new one and retry. We only 381479bd3712SFilipe Manana * need do this allocate and retry once, since we have a transaction 3815ecd84d54SFilipe Manana * handle and scrub uses the commit root to search for block groups; 3816ecd84d54SFilipe Manana * 3817ecd84d54SFilipe Manana * 3) We had one system block group with enough free space when we called 3818ecd84d54SFilipe Manana * check_system_chunk(), but after that, right before we tried to 3819ecd84d54SFilipe Manana * allocate the last extent buffer we needed, a discard operation came 3820ecd84d54SFilipe Manana * in and it temporarily removed the last free space entry from the 3821ecd84d54SFilipe Manana * block group (discard removes a free space entry, discards it, and 3822ecd84d54SFilipe Manana * then adds back the entry to the block group cache). 382379bd3712SFilipe Manana */ 382479bd3712SFilipe Manana if (ret == -ENOSPC) { 382579bd3712SFilipe Manana const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 382679bd3712SFilipe Manana struct btrfs_block_group *sys_bg; 382779bd3712SFilipe Manana 3828f6f39f7aSNikolay Borisov sys_bg = btrfs_create_chunk(trans, sys_flags); 382979bd3712SFilipe Manana if (IS_ERR(sys_bg)) { 383079bd3712SFilipe Manana ret = PTR_ERR(sys_bg); 383179bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 383279bd3712SFilipe Manana goto out; 383379bd3712SFilipe Manana } 383479bd3712SFilipe Manana 383579bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 383679bd3712SFilipe Manana if (ret) { 383779bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 383879bd3712SFilipe Manana goto out; 383979bd3712SFilipe Manana } 384079bd3712SFilipe Manana 384179bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 384279bd3712SFilipe Manana if (ret) { 384379bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 384479bd3712SFilipe Manana goto out; 384579bd3712SFilipe Manana } 384679bd3712SFilipe Manana } else if (ret) { 384779bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 384879bd3712SFilipe Manana goto out; 384979bd3712SFilipe Manana } 385079bd3712SFilipe Manana out: 385179bd3712SFilipe Manana btrfs_trans_release_chunk_metadata(trans); 385279bd3712SFilipe Manana 3853820c363bSNaohiro Aota if (ret) 3854820c363bSNaohiro Aota return ERR_PTR(ret); 3855820c363bSNaohiro Aota 3856820c363bSNaohiro Aota btrfs_get_block_group(bg); 3857820c363bSNaohiro Aota return bg; 385879bd3712SFilipe Manana } 385979bd3712SFilipe Manana 386079bd3712SFilipe Manana /* 386179bd3712SFilipe Manana * Chunk allocation is done in 2 phases: 386279bd3712SFilipe Manana * 386379bd3712SFilipe Manana * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 386479bd3712SFilipe Manana * the chunk, the chunk mapping, create its block group and add the items 386579bd3712SFilipe Manana * that belong in the chunk btree to it - more specifically, we need to 386679bd3712SFilipe Manana * update device items in the chunk btree and add a new chunk item to it. 386779bd3712SFilipe Manana * 386879bd3712SFilipe Manana * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 386979bd3712SFilipe Manana * group item to the extent btree and the device extent items to the devices 387079bd3712SFilipe Manana * btree. 387179bd3712SFilipe Manana * 387279bd3712SFilipe Manana * This is done to prevent deadlocks. For example when COWing a node from the 387379bd3712SFilipe Manana * extent btree we are holding a write lock on the node's parent and if we 387479bd3712SFilipe Manana * trigger chunk allocation and attempted to insert the new block group item 387579bd3712SFilipe Manana * in the extent btree right way, we could deadlock because the path for the 387679bd3712SFilipe Manana * insertion can include that parent node. At first glance it seems impossible 387779bd3712SFilipe Manana * to trigger chunk allocation after starting a transaction since tasks should 387879bd3712SFilipe Manana * reserve enough transaction units (metadata space), however while that is true 387979bd3712SFilipe Manana * most of the time, chunk allocation may still be triggered for several reasons: 388079bd3712SFilipe Manana * 388179bd3712SFilipe Manana * 1) When reserving metadata, we check if there is enough free space in the 388279bd3712SFilipe Manana * metadata space_info and therefore don't trigger allocation of a new chunk. 388379bd3712SFilipe Manana * However later when the task actually tries to COW an extent buffer from 388479bd3712SFilipe Manana * the extent btree or from the device btree for example, it is forced to 388579bd3712SFilipe Manana * allocate a new block group (chunk) because the only one that had enough 388679bd3712SFilipe Manana * free space was just turned to RO mode by a running scrub for example (or 388779bd3712SFilipe Manana * device replace, block group reclaim thread, etc), so we can not use it 388879bd3712SFilipe Manana * for allocating an extent and end up being forced to allocate a new one; 388979bd3712SFilipe Manana * 389079bd3712SFilipe Manana * 2) Because we only check that the metadata space_info has enough free bytes, 389179bd3712SFilipe Manana * we end up not allocating a new metadata chunk in that case. However if 389279bd3712SFilipe Manana * the filesystem was mounted in degraded mode, none of the existing block 389379bd3712SFilipe Manana * groups might be suitable for extent allocation due to their incompatible 389479bd3712SFilipe Manana * profile (for e.g. mounting a 2 devices filesystem, where all block groups 389579bd3712SFilipe Manana * use a RAID1 profile, in degraded mode using a single device). In this case 389679bd3712SFilipe Manana * when the task attempts to COW some extent buffer of the extent btree for 389779bd3712SFilipe Manana * example, it will trigger allocation of a new metadata block group with a 389879bd3712SFilipe Manana * suitable profile (SINGLE profile in the example of the degraded mount of 389979bd3712SFilipe Manana * the RAID1 filesystem); 390079bd3712SFilipe Manana * 390179bd3712SFilipe Manana * 3) The task has reserved enough transaction units / metadata space, but when 390279bd3712SFilipe Manana * it attempts to COW an extent buffer from the extent or device btree for 390379bd3712SFilipe Manana * example, it does not find any free extent in any metadata block group, 390479bd3712SFilipe Manana * therefore forced to try to allocate a new metadata block group. 390579bd3712SFilipe Manana * This is because some other task allocated all available extents in the 390679bd3712SFilipe Manana * meanwhile - this typically happens with tasks that don't reserve space 390779bd3712SFilipe Manana * properly, either intentionally or as a bug. One example where this is 390879bd3712SFilipe Manana * done intentionally is fsync, as it does not reserve any transaction units 390979bd3712SFilipe Manana * and ends up allocating a variable number of metadata extents for log 3910ecd84d54SFilipe Manana * tree extent buffers; 3911ecd84d54SFilipe Manana * 3912ecd84d54SFilipe Manana * 4) The task has reserved enough transaction units / metadata space, but right 3913ecd84d54SFilipe Manana * before it tries to allocate the last extent buffer it needs, a discard 3914ecd84d54SFilipe Manana * operation comes in and, temporarily, removes the last free space entry from 3915ecd84d54SFilipe Manana * the only metadata block group that had free space (discard starts by 3916ecd84d54SFilipe Manana * removing a free space entry from a block group, then does the discard 3917ecd84d54SFilipe Manana * operation and, once it's done, it adds back the free space entry to the 3918ecd84d54SFilipe Manana * block group). 391979bd3712SFilipe Manana * 392079bd3712SFilipe Manana * We also need this 2 phases setup when adding a device to a filesystem with 392179bd3712SFilipe Manana * a seed device - we must create new metadata and system chunks without adding 392279bd3712SFilipe Manana * any of the block group items to the chunk, extent and device btrees. If we 392379bd3712SFilipe Manana * did not do it this way, we would get ENOSPC when attempting to update those 392479bd3712SFilipe Manana * btrees, since all the chunks from the seed device are read-only. 392579bd3712SFilipe Manana * 392679bd3712SFilipe Manana * Phase 1 does the updates and insertions to the chunk btree because if we had 392779bd3712SFilipe Manana * it done in phase 2 and have a thundering herd of tasks allocating chunks in 392879bd3712SFilipe Manana * parallel, we risk having too many system chunks allocated by many tasks if 392979bd3712SFilipe Manana * many tasks reach phase 1 without the previous ones completing phase 2. In the 393079bd3712SFilipe Manana * extreme case this leads to exhaustion of the system chunk array in the 393179bd3712SFilipe Manana * superblock. This is easier to trigger if using a btree node/leaf size of 64K 393279bd3712SFilipe Manana * and with RAID filesystems (so we have more device items in the chunk btree). 393379bd3712SFilipe Manana * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 393479bd3712SFilipe Manana * the system chunk array due to concurrent allocations") provides more details. 393579bd3712SFilipe Manana * 39362bb2e00eSFilipe Manana * Allocation of system chunks does not happen through this function. A task that 39372bb2e00eSFilipe Manana * needs to update the chunk btree (the only btree that uses system chunks), must 39382bb2e00eSFilipe Manana * preallocate chunk space by calling either check_system_chunk() or 39392bb2e00eSFilipe Manana * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 39402bb2e00eSFilipe Manana * metadata chunk or when removing a chunk, while the later is used before doing 39412bb2e00eSFilipe Manana * a modification to the chunk btree - use cases for the later are adding, 39422bb2e00eSFilipe Manana * removing and resizing a device as well as relocation of a system chunk. 39432bb2e00eSFilipe Manana * See the comment below for more details. 394479bd3712SFilipe Manana * 394579bd3712SFilipe Manana * The reservation of system space, done through check_system_chunk(), as well 394679bd3712SFilipe Manana * as all the updates and insertions into the chunk btree must be done while 394779bd3712SFilipe Manana * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 394879bd3712SFilipe Manana * an extent buffer from the chunks btree we never trigger allocation of a new 394979bd3712SFilipe Manana * system chunk, which would result in a deadlock (trying to lock twice an 395079bd3712SFilipe Manana * extent buffer of the chunk btree, first time before triggering the chunk 395179bd3712SFilipe Manana * allocation and the second time during chunk allocation while attempting to 395279bd3712SFilipe Manana * update the chunks btree). The system chunk array is also updated while holding 395379bd3712SFilipe Manana * that mutex. The same logic applies to removing chunks - we must reserve system 395479bd3712SFilipe Manana * space, update the chunk btree and the system chunk array in the superblock 395579bd3712SFilipe Manana * while holding fs_info->chunk_mutex. 395679bd3712SFilipe Manana * 395779bd3712SFilipe Manana * This function, btrfs_chunk_alloc(), belongs to phase 1. 395879bd3712SFilipe Manana * 395979bd3712SFilipe Manana * If @force is CHUNK_ALLOC_FORCE: 396007730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 396107730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 396279bd3712SFilipe Manana * If @force is NOT CHUNK_ALLOC_FORCE: 396307730d87SJosef Bacik * - return 0 if it doesn't need to allocate a new chunk, 396407730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 396507730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 396607730d87SJosef Bacik */ 396707730d87SJosef Bacik int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 396807730d87SJosef Bacik enum btrfs_chunk_alloc_enum force) 396907730d87SJosef Bacik { 397007730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 397107730d87SJosef Bacik struct btrfs_space_info *space_info; 3972820c363bSNaohiro Aota struct btrfs_block_group *ret_bg; 397307730d87SJosef Bacik bool wait_for_alloc = false; 397407730d87SJosef Bacik bool should_alloc = false; 3975760e69c4SNaohiro Aota bool from_extent_allocation = false; 397607730d87SJosef Bacik int ret = 0; 397707730d87SJosef Bacik 3978760e69c4SNaohiro Aota if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 3979760e69c4SNaohiro Aota from_extent_allocation = true; 3980760e69c4SNaohiro Aota force = CHUNK_ALLOC_FORCE; 3981760e69c4SNaohiro Aota } 3982760e69c4SNaohiro Aota 398307730d87SJosef Bacik /* Don't re-enter if we're already allocating a chunk */ 398407730d87SJosef Bacik if (trans->allocating_chunk) 398507730d87SJosef Bacik return -ENOSPC; 398679bd3712SFilipe Manana /* 39872bb2e00eSFilipe Manana * Allocation of system chunks can not happen through this path, as we 39882bb2e00eSFilipe Manana * could end up in a deadlock if we are allocating a data or metadata 39892bb2e00eSFilipe Manana * chunk and there is another task modifying the chunk btree. 39902bb2e00eSFilipe Manana * 39912bb2e00eSFilipe Manana * This is because while we are holding the chunk mutex, we will attempt 39922bb2e00eSFilipe Manana * to add the new chunk item to the chunk btree or update an existing 39932bb2e00eSFilipe Manana * device item in the chunk btree, while the other task that is modifying 39942bb2e00eSFilipe Manana * the chunk btree is attempting to COW an extent buffer while holding a 39952bb2e00eSFilipe Manana * lock on it and on its parent - if the COW operation triggers a system 39962bb2e00eSFilipe Manana * chunk allocation, then we can deadlock because we are holding the 39972bb2e00eSFilipe Manana * chunk mutex and we may need to access that extent buffer or its parent 39982bb2e00eSFilipe Manana * in order to add the chunk item or update a device item. 39992bb2e00eSFilipe Manana * 40002bb2e00eSFilipe Manana * Tasks that want to modify the chunk tree should reserve system space 40012bb2e00eSFilipe Manana * before updating the chunk btree, by calling either 40022bb2e00eSFilipe Manana * btrfs_reserve_chunk_metadata() or check_system_chunk(). 40032bb2e00eSFilipe Manana * It's possible that after a task reserves the space, it still ends up 40042bb2e00eSFilipe Manana * here - this happens in the cases described above at do_chunk_alloc(). 40052bb2e00eSFilipe Manana * The task will have to either retry or fail. 400679bd3712SFilipe Manana */ 40072bb2e00eSFilipe Manana if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 400879bd3712SFilipe Manana return -ENOSPC; 400907730d87SJosef Bacik 401007730d87SJosef Bacik space_info = btrfs_find_space_info(fs_info, flags); 401107730d87SJosef Bacik ASSERT(space_info); 401207730d87SJosef Bacik 401307730d87SJosef Bacik do { 401407730d87SJosef Bacik spin_lock(&space_info->lock); 401507730d87SJosef Bacik if (force < space_info->force_alloc) 401607730d87SJosef Bacik force = space_info->force_alloc; 401707730d87SJosef Bacik should_alloc = should_alloc_chunk(fs_info, space_info, force); 401807730d87SJosef Bacik if (space_info->full) { 401907730d87SJosef Bacik /* No more free physical space */ 402007730d87SJosef Bacik if (should_alloc) 402107730d87SJosef Bacik ret = -ENOSPC; 402207730d87SJosef Bacik else 402307730d87SJosef Bacik ret = 0; 402407730d87SJosef Bacik spin_unlock(&space_info->lock); 402507730d87SJosef Bacik return ret; 402607730d87SJosef Bacik } else if (!should_alloc) { 402707730d87SJosef Bacik spin_unlock(&space_info->lock); 402807730d87SJosef Bacik return 0; 402907730d87SJosef Bacik } else if (space_info->chunk_alloc) { 403007730d87SJosef Bacik /* 403107730d87SJosef Bacik * Someone is already allocating, so we need to block 403207730d87SJosef Bacik * until this someone is finished and then loop to 403307730d87SJosef Bacik * recheck if we should continue with our allocation 403407730d87SJosef Bacik * attempt. 403507730d87SJosef Bacik */ 403607730d87SJosef Bacik wait_for_alloc = true; 40371314ca78SJosef Bacik force = CHUNK_ALLOC_NO_FORCE; 403807730d87SJosef Bacik spin_unlock(&space_info->lock); 403907730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 404007730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 404107730d87SJosef Bacik } else { 404207730d87SJosef Bacik /* Proceed with allocation */ 404307730d87SJosef Bacik space_info->chunk_alloc = 1; 404407730d87SJosef Bacik wait_for_alloc = false; 404507730d87SJosef Bacik spin_unlock(&space_info->lock); 404607730d87SJosef Bacik } 404707730d87SJosef Bacik 404807730d87SJosef Bacik cond_resched(); 404907730d87SJosef Bacik } while (wait_for_alloc); 405007730d87SJosef Bacik 405107730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 405207730d87SJosef Bacik trans->allocating_chunk = true; 405307730d87SJosef Bacik 405407730d87SJosef Bacik /* 405507730d87SJosef Bacik * If we have mixed data/metadata chunks we want to make sure we keep 405607730d87SJosef Bacik * allocating mixed chunks instead of individual chunks. 405707730d87SJosef Bacik */ 405807730d87SJosef Bacik if (btrfs_mixed_space_info(space_info)) 405907730d87SJosef Bacik flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 406007730d87SJosef Bacik 406107730d87SJosef Bacik /* 406207730d87SJosef Bacik * if we're doing a data chunk, go ahead and make sure that 406307730d87SJosef Bacik * we keep a reasonable number of metadata chunks allocated in the 406407730d87SJosef Bacik * FS as well. 406507730d87SJosef Bacik */ 406607730d87SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 406707730d87SJosef Bacik fs_info->data_chunk_allocations++; 406807730d87SJosef Bacik if (!(fs_info->data_chunk_allocations % 406907730d87SJosef Bacik fs_info->metadata_ratio)) 407007730d87SJosef Bacik force_metadata_allocation(fs_info); 407107730d87SJosef Bacik } 407207730d87SJosef Bacik 4073820c363bSNaohiro Aota ret_bg = do_chunk_alloc(trans, flags); 407407730d87SJosef Bacik trans->allocating_chunk = false; 407507730d87SJosef Bacik 4076760e69c4SNaohiro Aota if (IS_ERR(ret_bg)) { 4077820c363bSNaohiro Aota ret = PTR_ERR(ret_bg); 4078760e69c4SNaohiro Aota } else if (from_extent_allocation) { 4079760e69c4SNaohiro Aota /* 4080760e69c4SNaohiro Aota * New block group is likely to be used soon. Try to activate 4081760e69c4SNaohiro Aota * it now. Failure is OK for now. 4082760e69c4SNaohiro Aota */ 4083760e69c4SNaohiro Aota btrfs_zone_activate(ret_bg); 4084760e69c4SNaohiro Aota } 4085760e69c4SNaohiro Aota 4086760e69c4SNaohiro Aota if (!ret) 4087820c363bSNaohiro Aota btrfs_put_block_group(ret_bg); 4088820c363bSNaohiro Aota 408907730d87SJosef Bacik spin_lock(&space_info->lock); 409007730d87SJosef Bacik if (ret < 0) { 409107730d87SJosef Bacik if (ret == -ENOSPC) 409207730d87SJosef Bacik space_info->full = 1; 409307730d87SJosef Bacik else 409407730d87SJosef Bacik goto out; 409507730d87SJosef Bacik } else { 409607730d87SJosef Bacik ret = 1; 409707730d87SJosef Bacik space_info->max_extent_size = 0; 409807730d87SJosef Bacik } 409907730d87SJosef Bacik 410007730d87SJosef Bacik space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 410107730d87SJosef Bacik out: 410207730d87SJosef Bacik space_info->chunk_alloc = 0; 410307730d87SJosef Bacik spin_unlock(&space_info->lock); 410407730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 410507730d87SJosef Bacik 410607730d87SJosef Bacik return ret; 410707730d87SJosef Bacik } 410807730d87SJosef Bacik 410907730d87SJosef Bacik static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 411007730d87SJosef Bacik { 411107730d87SJosef Bacik u64 num_dev; 411207730d87SJosef Bacik 411307730d87SJosef Bacik num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 411407730d87SJosef Bacik if (!num_dev) 411507730d87SJosef Bacik num_dev = fs_info->fs_devices->rw_devices; 411607730d87SJosef Bacik 411707730d87SJosef Bacik return num_dev; 411807730d87SJosef Bacik } 411907730d87SJosef Bacik 41202bb2e00eSFilipe Manana static void reserve_chunk_space(struct btrfs_trans_handle *trans, 41212bb2e00eSFilipe Manana u64 bytes, 41222bb2e00eSFilipe Manana u64 type) 412307730d87SJosef Bacik { 412407730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 412507730d87SJosef Bacik struct btrfs_space_info *info; 412607730d87SJosef Bacik u64 left; 412707730d87SJosef Bacik int ret = 0; 412807730d87SJosef Bacik 412907730d87SJosef Bacik /* 413007730d87SJosef Bacik * Needed because we can end up allocating a system chunk and for an 413107730d87SJosef Bacik * atomic and race free space reservation in the chunk block reserve. 413207730d87SJosef Bacik */ 413307730d87SJosef Bacik lockdep_assert_held(&fs_info->chunk_mutex); 413407730d87SJosef Bacik 413507730d87SJosef Bacik info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 413607730d87SJosef Bacik spin_lock(&info->lock); 413707730d87SJosef Bacik left = info->total_bytes - btrfs_space_info_used(info, true); 413807730d87SJosef Bacik spin_unlock(&info->lock); 413907730d87SJosef Bacik 41402bb2e00eSFilipe Manana if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 414107730d87SJosef Bacik btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 41422bb2e00eSFilipe Manana left, bytes, type); 414307730d87SJosef Bacik btrfs_dump_space_info(fs_info, info, 0, 0); 414407730d87SJosef Bacik } 414507730d87SJosef Bacik 41462bb2e00eSFilipe Manana if (left < bytes) { 414707730d87SJosef Bacik u64 flags = btrfs_system_alloc_profile(fs_info); 414879bd3712SFilipe Manana struct btrfs_block_group *bg; 414907730d87SJosef Bacik 415007730d87SJosef Bacik /* 415107730d87SJosef Bacik * Ignore failure to create system chunk. We might end up not 415207730d87SJosef Bacik * needing it, as we might not need to COW all nodes/leafs from 415307730d87SJosef Bacik * the paths we visit in the chunk tree (they were already COWed 415407730d87SJosef Bacik * or created in the current transaction for example). 415507730d87SJosef Bacik */ 4156f6f39f7aSNikolay Borisov bg = btrfs_create_chunk(trans, flags); 415779bd3712SFilipe Manana if (IS_ERR(bg)) { 415879bd3712SFilipe Manana ret = PTR_ERR(bg); 41592bb2e00eSFilipe Manana } else { 416079bd3712SFilipe Manana /* 4161b6a98021SNaohiro Aota * We have a new chunk. We also need to activate it for 4162b6a98021SNaohiro Aota * zoned filesystem. 4163b6a98021SNaohiro Aota */ 4164b6a98021SNaohiro Aota ret = btrfs_zoned_activate_one_bg(fs_info, info, true); 4165b6a98021SNaohiro Aota if (ret < 0) 4166b6a98021SNaohiro Aota return; 4167b6a98021SNaohiro Aota 4168b6a98021SNaohiro Aota /* 416979bd3712SFilipe Manana * If we fail to add the chunk item here, we end up 417079bd3712SFilipe Manana * trying again at phase 2 of chunk allocation, at 417179bd3712SFilipe Manana * btrfs_create_pending_block_groups(). So ignore 41722bb2e00eSFilipe Manana * any error here. An ENOSPC here could happen, due to 41732bb2e00eSFilipe Manana * the cases described at do_chunk_alloc() - the system 41742bb2e00eSFilipe Manana * block group we just created was just turned into RO 41752bb2e00eSFilipe Manana * mode by a scrub for example, or a running discard 41762bb2e00eSFilipe Manana * temporarily removed its free space entries, etc. 417779bd3712SFilipe Manana */ 417879bd3712SFilipe Manana btrfs_chunk_alloc_add_chunk_item(trans, bg); 417979bd3712SFilipe Manana } 418007730d87SJosef Bacik } 418107730d87SJosef Bacik 418207730d87SJosef Bacik if (!ret) { 41839270501cSJosef Bacik ret = btrfs_block_rsv_add(fs_info, 418407730d87SJosef Bacik &fs_info->chunk_block_rsv, 41852bb2e00eSFilipe Manana bytes, BTRFS_RESERVE_NO_FLUSH); 41861cb3db1cSFilipe Manana if (!ret) 41872bb2e00eSFilipe Manana trans->chunk_bytes_reserved += bytes; 418807730d87SJosef Bacik } 418907730d87SJosef Bacik } 419007730d87SJosef Bacik 41912bb2e00eSFilipe Manana /* 41922bb2e00eSFilipe Manana * Reserve space in the system space for allocating or removing a chunk. 41932bb2e00eSFilipe Manana * The caller must be holding fs_info->chunk_mutex. 41942bb2e00eSFilipe Manana */ 41952bb2e00eSFilipe Manana void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 41962bb2e00eSFilipe Manana { 41972bb2e00eSFilipe Manana struct btrfs_fs_info *fs_info = trans->fs_info; 41982bb2e00eSFilipe Manana const u64 num_devs = get_profile_num_devs(fs_info, type); 41992bb2e00eSFilipe Manana u64 bytes; 42002bb2e00eSFilipe Manana 42012bb2e00eSFilipe Manana /* num_devs device items to update and 1 chunk item to add or remove. */ 42022bb2e00eSFilipe Manana bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 42032bb2e00eSFilipe Manana btrfs_calc_insert_metadata_size(fs_info, 1); 42042bb2e00eSFilipe Manana 42052bb2e00eSFilipe Manana reserve_chunk_space(trans, bytes, type); 42062bb2e00eSFilipe Manana } 42072bb2e00eSFilipe Manana 42082bb2e00eSFilipe Manana /* 42092bb2e00eSFilipe Manana * Reserve space in the system space, if needed, for doing a modification to the 42102bb2e00eSFilipe Manana * chunk btree. 42112bb2e00eSFilipe Manana * 42122bb2e00eSFilipe Manana * @trans: A transaction handle. 42132bb2e00eSFilipe Manana * @is_item_insertion: Indicate if the modification is for inserting a new item 42142bb2e00eSFilipe Manana * in the chunk btree or if it's for the deletion or update 42152bb2e00eSFilipe Manana * of an existing item. 42162bb2e00eSFilipe Manana * 42172bb2e00eSFilipe Manana * This is used in a context where we need to update the chunk btree outside 42182bb2e00eSFilipe Manana * block group allocation and removal, to avoid a deadlock with a concurrent 42192bb2e00eSFilipe Manana * task that is allocating a metadata or data block group and therefore needs to 42202bb2e00eSFilipe Manana * update the chunk btree while holding the chunk mutex. After the update to the 42212bb2e00eSFilipe Manana * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 42222bb2e00eSFilipe Manana * 42232bb2e00eSFilipe Manana */ 42242bb2e00eSFilipe Manana void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 42252bb2e00eSFilipe Manana bool is_item_insertion) 42262bb2e00eSFilipe Manana { 42272bb2e00eSFilipe Manana struct btrfs_fs_info *fs_info = trans->fs_info; 42282bb2e00eSFilipe Manana u64 bytes; 42292bb2e00eSFilipe Manana 42302bb2e00eSFilipe Manana if (is_item_insertion) 42312bb2e00eSFilipe Manana bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 42322bb2e00eSFilipe Manana else 42332bb2e00eSFilipe Manana bytes = btrfs_calc_metadata_size(fs_info, 1); 42342bb2e00eSFilipe Manana 42352bb2e00eSFilipe Manana mutex_lock(&fs_info->chunk_mutex); 42362bb2e00eSFilipe Manana reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 42372bb2e00eSFilipe Manana mutex_unlock(&fs_info->chunk_mutex); 42382bb2e00eSFilipe Manana } 42392bb2e00eSFilipe Manana 42403e43c279SJosef Bacik void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 42413e43c279SJosef Bacik { 424232da5386SDavid Sterba struct btrfs_block_group *block_group; 42433e43c279SJosef Bacik 424450c31eaaSJosef Bacik block_group = btrfs_lookup_first_block_group(info, 0); 42453e43c279SJosef Bacik while (block_group) { 42463e43c279SJosef Bacik btrfs_wait_block_group_cache_done(block_group); 42473e43c279SJosef Bacik spin_lock(&block_group->lock); 424850c31eaaSJosef Bacik if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, 424950c31eaaSJosef Bacik &block_group->runtime_flags)) { 425050c31eaaSJosef Bacik struct inode *inode = block_group->inode; 42513e43c279SJosef Bacik 42523e43c279SJosef Bacik block_group->inode = NULL; 42533e43c279SJosef Bacik spin_unlock(&block_group->lock); 425450c31eaaSJosef Bacik 42553e43c279SJosef Bacik ASSERT(block_group->io_ctl.inode == NULL); 42563e43c279SJosef Bacik iput(inode); 425750c31eaaSJosef Bacik } else { 425850c31eaaSJosef Bacik spin_unlock(&block_group->lock); 425950c31eaaSJosef Bacik } 426050c31eaaSJosef Bacik block_group = btrfs_next_block_group(block_group); 42613e43c279SJosef Bacik } 42623e43c279SJosef Bacik } 42633e43c279SJosef Bacik 42643e43c279SJosef Bacik /* 42653e43c279SJosef Bacik * Must be called only after stopping all workers, since we could have block 42663e43c279SJosef Bacik * group caching kthreads running, and therefore they could race with us if we 42673e43c279SJosef Bacik * freed the block groups before stopping them. 42683e43c279SJosef Bacik */ 42693e43c279SJosef Bacik int btrfs_free_block_groups(struct btrfs_fs_info *info) 42703e43c279SJosef Bacik { 427132da5386SDavid Sterba struct btrfs_block_group *block_group; 42723e43c279SJosef Bacik struct btrfs_space_info *space_info; 42733e43c279SJosef Bacik struct btrfs_caching_control *caching_ctl; 42743e43c279SJosef Bacik struct rb_node *n; 42753e43c279SJosef Bacik 427616b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 42773e43c279SJosef Bacik while (!list_empty(&info->caching_block_groups)) { 42783e43c279SJosef Bacik caching_ctl = list_entry(info->caching_block_groups.next, 42793e43c279SJosef Bacik struct btrfs_caching_control, list); 42803e43c279SJosef Bacik list_del(&caching_ctl->list); 42813e43c279SJosef Bacik btrfs_put_caching_control(caching_ctl); 42823e43c279SJosef Bacik } 428316b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 42843e43c279SJosef Bacik 42853e43c279SJosef Bacik spin_lock(&info->unused_bgs_lock); 42863e43c279SJosef Bacik while (!list_empty(&info->unused_bgs)) { 42873e43c279SJosef Bacik block_group = list_first_entry(&info->unused_bgs, 428832da5386SDavid Sterba struct btrfs_block_group, 42893e43c279SJosef Bacik bg_list); 42903e43c279SJosef Bacik list_del_init(&block_group->bg_list); 42913e43c279SJosef Bacik btrfs_put_block_group(block_group); 42923e43c279SJosef Bacik } 42933e43c279SJosef Bacik 429418bb8bbfSJohannes Thumshirn while (!list_empty(&info->reclaim_bgs)) { 429518bb8bbfSJohannes Thumshirn block_group = list_first_entry(&info->reclaim_bgs, 429618bb8bbfSJohannes Thumshirn struct btrfs_block_group, 429718bb8bbfSJohannes Thumshirn bg_list); 429818bb8bbfSJohannes Thumshirn list_del_init(&block_group->bg_list); 429918bb8bbfSJohannes Thumshirn btrfs_put_block_group(block_group); 430018bb8bbfSJohannes Thumshirn } 430118bb8bbfSJohannes Thumshirn spin_unlock(&info->unused_bgs_lock); 430218bb8bbfSJohannes Thumshirn 4303afba2bc0SNaohiro Aota spin_lock(&info->zone_active_bgs_lock); 4304afba2bc0SNaohiro Aota while (!list_empty(&info->zone_active_bgs)) { 4305afba2bc0SNaohiro Aota block_group = list_first_entry(&info->zone_active_bgs, 4306afba2bc0SNaohiro Aota struct btrfs_block_group, 4307afba2bc0SNaohiro Aota active_bg_list); 4308afba2bc0SNaohiro Aota list_del_init(&block_group->active_bg_list); 4309afba2bc0SNaohiro Aota btrfs_put_block_group(block_group); 4310afba2bc0SNaohiro Aota } 4311afba2bc0SNaohiro Aota spin_unlock(&info->zone_active_bgs_lock); 4312afba2bc0SNaohiro Aota 431316b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 431408dddb29SFilipe Manana while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 431532da5386SDavid Sterba block_group = rb_entry(n, struct btrfs_block_group, 43163e43c279SJosef Bacik cache_node); 431708dddb29SFilipe Manana rb_erase_cached(&block_group->cache_node, 43183e43c279SJosef Bacik &info->block_group_cache_tree); 43193e43c279SJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 432016b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 43213e43c279SJosef Bacik 43223e43c279SJosef Bacik down_write(&block_group->space_info->groups_sem); 43233e43c279SJosef Bacik list_del(&block_group->list); 43243e43c279SJosef Bacik up_write(&block_group->space_info->groups_sem); 43253e43c279SJosef Bacik 43263e43c279SJosef Bacik /* 43273e43c279SJosef Bacik * We haven't cached this block group, which means we could 43283e43c279SJosef Bacik * possibly have excluded extents on this block group. 43293e43c279SJosef Bacik */ 43303e43c279SJosef Bacik if (block_group->cached == BTRFS_CACHE_NO || 43313e43c279SJosef Bacik block_group->cached == BTRFS_CACHE_ERROR) 43323e43c279SJosef Bacik btrfs_free_excluded_extents(block_group); 43333e43c279SJosef Bacik 43343e43c279SJosef Bacik btrfs_remove_free_space_cache(block_group); 43353e43c279SJosef Bacik ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 43363e43c279SJosef Bacik ASSERT(list_empty(&block_group->dirty_list)); 43373e43c279SJosef Bacik ASSERT(list_empty(&block_group->io_list)); 43383e43c279SJosef Bacik ASSERT(list_empty(&block_group->bg_list)); 433948aaeebeSJosef Bacik ASSERT(refcount_read(&block_group->refs) == 1); 4340195a49eaSFilipe Manana ASSERT(block_group->swap_extents == 0); 43413e43c279SJosef Bacik btrfs_put_block_group(block_group); 43423e43c279SJosef Bacik 434316b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 43443e43c279SJosef Bacik } 434516b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 43463e43c279SJosef Bacik 43473e43c279SJosef Bacik btrfs_release_global_block_rsv(info); 43483e43c279SJosef Bacik 43493e43c279SJosef Bacik while (!list_empty(&info->space_info)) { 43503e43c279SJosef Bacik space_info = list_entry(info->space_info.next, 43513e43c279SJosef Bacik struct btrfs_space_info, 43523e43c279SJosef Bacik list); 43533e43c279SJosef Bacik 43543e43c279SJosef Bacik /* 43553e43c279SJosef Bacik * Do not hide this behind enospc_debug, this is actually 43563e43c279SJosef Bacik * important and indicates a real bug if this happens. 43573e43c279SJosef Bacik */ 43583e43c279SJosef Bacik if (WARN_ON(space_info->bytes_pinned > 0 || 43593e43c279SJosef Bacik space_info->bytes_may_use > 0)) 43603e43c279SJosef Bacik btrfs_dump_space_info(info, space_info, 0, 0); 436140cdc509SFilipe Manana 436240cdc509SFilipe Manana /* 436340cdc509SFilipe Manana * If there was a failure to cleanup a log tree, very likely due 436440cdc509SFilipe Manana * to an IO failure on a writeback attempt of one or more of its 436540cdc509SFilipe Manana * extent buffers, we could not do proper (and cheap) unaccounting 436640cdc509SFilipe Manana * of their reserved space, so don't warn on bytes_reserved > 0 in 436740cdc509SFilipe Manana * that case. 436840cdc509SFilipe Manana */ 436940cdc509SFilipe Manana if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 437040cdc509SFilipe Manana !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 437140cdc509SFilipe Manana if (WARN_ON(space_info->bytes_reserved > 0)) 437240cdc509SFilipe Manana btrfs_dump_space_info(info, space_info, 0, 0); 437340cdc509SFilipe Manana } 437440cdc509SFilipe Manana 4375d611add4SFilipe Manana WARN_ON(space_info->reclaim_size > 0); 43763e43c279SJosef Bacik list_del(&space_info->list); 43773e43c279SJosef Bacik btrfs_sysfs_remove_space_info(space_info); 43783e43c279SJosef Bacik } 43793e43c279SJosef Bacik return 0; 43803e43c279SJosef Bacik } 4381684b752bSFilipe Manana 4382684b752bSFilipe Manana void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4383684b752bSFilipe Manana { 4384684b752bSFilipe Manana atomic_inc(&cache->frozen); 4385684b752bSFilipe Manana } 4386684b752bSFilipe Manana 4387684b752bSFilipe Manana void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4388684b752bSFilipe Manana { 4389684b752bSFilipe Manana struct btrfs_fs_info *fs_info = block_group->fs_info; 4390684b752bSFilipe Manana struct extent_map_tree *em_tree; 4391684b752bSFilipe Manana struct extent_map *em; 4392684b752bSFilipe Manana bool cleanup; 4393684b752bSFilipe Manana 4394684b752bSFilipe Manana spin_lock(&block_group->lock); 4395684b752bSFilipe Manana cleanup = (atomic_dec_and_test(&block_group->frozen) && 43963349b57fSJosef Bacik test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); 4397684b752bSFilipe Manana spin_unlock(&block_group->lock); 4398684b752bSFilipe Manana 4399684b752bSFilipe Manana if (cleanup) { 4400684b752bSFilipe Manana em_tree = &fs_info->mapping_tree; 4401684b752bSFilipe Manana write_lock(&em_tree->lock); 4402684b752bSFilipe Manana em = lookup_extent_mapping(em_tree, block_group->start, 4403684b752bSFilipe Manana 1); 4404684b752bSFilipe Manana BUG_ON(!em); /* logic error, can't happen */ 4405684b752bSFilipe Manana remove_extent_mapping(em_tree, em); 4406684b752bSFilipe Manana write_unlock(&em_tree->lock); 4407684b752bSFilipe Manana 4408684b752bSFilipe Manana /* once for us and once for the tree */ 4409684b752bSFilipe Manana free_extent_map(em); 4410684b752bSFilipe Manana free_extent_map(em); 4411684b752bSFilipe Manana 4412684b752bSFilipe Manana /* 4413684b752bSFilipe Manana * We may have left one free space entry and other possible 4414684b752bSFilipe Manana * tasks trimming this block group have left 1 entry each one. 4415684b752bSFilipe Manana * Free them if any. 4416684b752bSFilipe Manana */ 4417fc80f7acSJosef Bacik btrfs_remove_free_space_cache(block_group); 4418684b752bSFilipe Manana } 4419684b752bSFilipe Manana } 4420195a49eaSFilipe Manana 4421195a49eaSFilipe Manana bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4422195a49eaSFilipe Manana { 4423195a49eaSFilipe Manana bool ret = true; 4424195a49eaSFilipe Manana 4425195a49eaSFilipe Manana spin_lock(&bg->lock); 4426195a49eaSFilipe Manana if (bg->ro) 4427195a49eaSFilipe Manana ret = false; 4428195a49eaSFilipe Manana else 4429195a49eaSFilipe Manana bg->swap_extents++; 4430195a49eaSFilipe Manana spin_unlock(&bg->lock); 4431195a49eaSFilipe Manana 4432195a49eaSFilipe Manana return ret; 4433195a49eaSFilipe Manana } 4434195a49eaSFilipe Manana 4435195a49eaSFilipe Manana void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4436195a49eaSFilipe Manana { 4437195a49eaSFilipe Manana spin_lock(&bg->lock); 4438195a49eaSFilipe Manana ASSERT(!bg->ro); 4439195a49eaSFilipe Manana ASSERT(bg->swap_extents >= amount); 4440195a49eaSFilipe Manana bg->swap_extents -= amount; 4441195a49eaSFilipe Manana spin_unlock(&bg->lock); 4442195a49eaSFilipe Manana } 444352bb7a21SBoris Burkov 444452bb7a21SBoris Burkov enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) 444552bb7a21SBoris Burkov { 444652bb7a21SBoris Burkov if (size <= SZ_128K) 444752bb7a21SBoris Burkov return BTRFS_BG_SZ_SMALL; 444852bb7a21SBoris Burkov if (size <= SZ_8M) 444952bb7a21SBoris Burkov return BTRFS_BG_SZ_MEDIUM; 445052bb7a21SBoris Burkov return BTRFS_BG_SZ_LARGE; 445152bb7a21SBoris Burkov } 445252bb7a21SBoris Burkov 445352bb7a21SBoris Burkov /* 445452bb7a21SBoris Burkov * Handle a block group allocating an extent in a size class 445552bb7a21SBoris Burkov * 445652bb7a21SBoris Burkov * @bg: The block group we allocated in. 445752bb7a21SBoris Burkov * @size_class: The size class of the allocation. 445852bb7a21SBoris Burkov * @force_wrong_size_class: Whether we are desperate enough to allow 445952bb7a21SBoris Burkov * mismatched size classes. 446052bb7a21SBoris Burkov * 446152bb7a21SBoris Burkov * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the 446252bb7a21SBoris Burkov * case of a race that leads to the wrong size class without 446352bb7a21SBoris Burkov * force_wrong_size_class set. 446452bb7a21SBoris Burkov * 446552bb7a21SBoris Burkov * find_free_extent will skip block groups with a mismatched size class until 446652bb7a21SBoris Burkov * it really needs to avoid ENOSPC. In that case it will set 446752bb7a21SBoris Burkov * force_wrong_size_class. However, if a block group is newly allocated and 446852bb7a21SBoris Burkov * doesn't yet have a size class, then it is possible for two allocations of 446952bb7a21SBoris Burkov * different sizes to race and both try to use it. The loser is caught here and 447052bb7a21SBoris Burkov * has to retry. 447152bb7a21SBoris Burkov */ 447252bb7a21SBoris Burkov int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 447352bb7a21SBoris Burkov enum btrfs_block_group_size_class size_class, 447452bb7a21SBoris Burkov bool force_wrong_size_class) 447552bb7a21SBoris Burkov { 447652bb7a21SBoris Burkov ASSERT(size_class != BTRFS_BG_SZ_NONE); 447752bb7a21SBoris Burkov 447852bb7a21SBoris Burkov /* The new allocation is in the right size class, do nothing */ 447952bb7a21SBoris Burkov if (bg->size_class == size_class) 448052bb7a21SBoris Burkov return 0; 448152bb7a21SBoris Burkov /* 448252bb7a21SBoris Burkov * The new allocation is in a mismatched size class. 448352bb7a21SBoris Burkov * This means one of two things: 448452bb7a21SBoris Burkov * 448552bb7a21SBoris Burkov * 1. Two tasks in find_free_extent for different size_classes raced 448652bb7a21SBoris Burkov * and hit the same empty block_group. Make the loser try again. 448752bb7a21SBoris Burkov * 2. A call to find_free_extent got desperate enough to set 448852bb7a21SBoris Burkov * 'force_wrong_slab'. Don't change the size_class, but allow the 448952bb7a21SBoris Burkov * allocation. 449052bb7a21SBoris Burkov */ 449152bb7a21SBoris Burkov if (bg->size_class != BTRFS_BG_SZ_NONE) { 449252bb7a21SBoris Burkov if (force_wrong_size_class) 449352bb7a21SBoris Burkov return 0; 449452bb7a21SBoris Burkov return -EAGAIN; 449552bb7a21SBoris Burkov } 449652bb7a21SBoris Burkov /* 449752bb7a21SBoris Burkov * The happy new block group case: the new allocation is the first 449852bb7a21SBoris Burkov * one in the block_group so we set size_class. 449952bb7a21SBoris Burkov */ 450052bb7a21SBoris Burkov bg->size_class = size_class; 450152bb7a21SBoris Burkov 450252bb7a21SBoris Burkov return 0; 450352bb7a21SBoris Burkov } 4504cb0922f2SBoris Burkov 4505cb0922f2SBoris Burkov bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) 4506cb0922f2SBoris Burkov { 4507cb0922f2SBoris Burkov if (btrfs_is_zoned(bg->fs_info)) 4508cb0922f2SBoris Burkov return false; 4509cb0922f2SBoris Burkov if (!btrfs_is_block_group_data_only(bg)) 4510cb0922f2SBoris Burkov return false; 4511cb0922f2SBoris Burkov return true; 4512cb0922f2SBoris Burkov } 4513