12e405ad8SJosef Bacik // SPDX-License-Identifier: GPL-2.0 22e405ad8SJosef Bacik 32ca0ec77SJohannes Thumshirn #include <linux/list_sort.h> 4784352feSDavid Sterba #include "misc.h" 52e405ad8SJosef Bacik #include "ctree.h" 62e405ad8SJosef Bacik #include "block-group.h" 73eeb3226SJosef Bacik #include "space-info.h" 89f21246dSJosef Bacik #include "disk-io.h" 99f21246dSJosef Bacik #include "free-space-cache.h" 109f21246dSJosef Bacik #include "free-space-tree.h" 11e3e0520bSJosef Bacik #include "volumes.h" 12e3e0520bSJosef Bacik #include "transaction.h" 13e3e0520bSJosef Bacik #include "ref-verify.h" 144358d963SJosef Bacik #include "sysfs.h" 154358d963SJosef Bacik #include "tree-log.h" 1677745c05SJosef Bacik #include "delalloc-space.h" 17b0643e59SDennis Zhou #include "discard.h" 1896a14336SNikolay Borisov #include "raid56.h" 1908e11a3dSNaohiro Aota #include "zoned.h" 202e405ad8SJosef Bacik 21878d7b67SJosef Bacik /* 22878d7b67SJosef Bacik * Return target flags in extended format or 0 if restripe for this chunk_type 23878d7b67SJosef Bacik * is not in progress 24878d7b67SJosef Bacik * 25878d7b67SJosef Bacik * Should be called with balance_lock held 26878d7b67SJosef Bacik */ 27e11c0406SJosef Bacik static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 28878d7b67SJosef Bacik { 29878d7b67SJosef Bacik struct btrfs_balance_control *bctl = fs_info->balance_ctl; 30878d7b67SJosef Bacik u64 target = 0; 31878d7b67SJosef Bacik 32878d7b67SJosef Bacik if (!bctl) 33878d7b67SJosef Bacik return 0; 34878d7b67SJosef Bacik 35878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && 36878d7b67SJosef Bacik bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 37878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 38878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 39878d7b67SJosef Bacik bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 40878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 41878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 42878d7b67SJosef Bacik bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 43878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 44878d7b67SJosef Bacik } 45878d7b67SJosef Bacik 46878d7b67SJosef Bacik return target; 47878d7b67SJosef Bacik } 48878d7b67SJosef Bacik 49878d7b67SJosef Bacik /* 50878d7b67SJosef Bacik * @flags: available profiles in extended format (see ctree.h) 51878d7b67SJosef Bacik * 52878d7b67SJosef Bacik * Return reduced profile in chunk format. If profile changing is in progress 53878d7b67SJosef Bacik * (either running or paused) picks the target profile (if it's already 54878d7b67SJosef Bacik * available), otherwise falls back to plain reducing. 55878d7b67SJosef Bacik */ 56878d7b67SJosef Bacik static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 57878d7b67SJosef Bacik { 58878d7b67SJosef Bacik u64 num_devices = fs_info->fs_devices->rw_devices; 59878d7b67SJosef Bacik u64 target; 60878d7b67SJosef Bacik u64 raid_type; 61878d7b67SJosef Bacik u64 allowed = 0; 62878d7b67SJosef Bacik 63878d7b67SJosef Bacik /* 64878d7b67SJosef Bacik * See if restripe for this chunk_type is in progress, if so try to 65878d7b67SJosef Bacik * reduce to the target profile 66878d7b67SJosef Bacik */ 67878d7b67SJosef Bacik spin_lock(&fs_info->balance_lock); 68e11c0406SJosef Bacik target = get_restripe_target(fs_info, flags); 69878d7b67SJosef Bacik if (target) { 70878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 71878d7b67SJosef Bacik return extended_to_chunk(target); 72878d7b67SJosef Bacik } 73878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 74878d7b67SJosef Bacik 75878d7b67SJosef Bacik /* First, mask out the RAID levels which aren't possible */ 76878d7b67SJosef Bacik for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 77878d7b67SJosef Bacik if (num_devices >= btrfs_raid_array[raid_type].devs_min) 78878d7b67SJosef Bacik allowed |= btrfs_raid_array[raid_type].bg_flag; 79878d7b67SJosef Bacik } 80878d7b67SJosef Bacik allowed &= flags; 81878d7b67SJosef Bacik 82878d7b67SJosef Bacik if (allowed & BTRFS_BLOCK_GROUP_RAID6) 83878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID6; 84878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 85878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID5; 86878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 87878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID10; 88878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 89878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID1; 90878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 91878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID0; 92878d7b67SJosef Bacik 93878d7b67SJosef Bacik flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 94878d7b67SJosef Bacik 95878d7b67SJosef Bacik return extended_to_chunk(flags | allowed); 96878d7b67SJosef Bacik } 97878d7b67SJosef Bacik 98ef0a82daSJohannes Thumshirn u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 99878d7b67SJosef Bacik { 100878d7b67SJosef Bacik unsigned seq; 101878d7b67SJosef Bacik u64 flags; 102878d7b67SJosef Bacik 103878d7b67SJosef Bacik do { 104878d7b67SJosef Bacik flags = orig_flags; 105878d7b67SJosef Bacik seq = read_seqbegin(&fs_info->profiles_lock); 106878d7b67SJosef Bacik 107878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 108878d7b67SJosef Bacik flags |= fs_info->avail_data_alloc_bits; 109878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 110878d7b67SJosef Bacik flags |= fs_info->avail_system_alloc_bits; 111878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_METADATA) 112878d7b67SJosef Bacik flags |= fs_info->avail_metadata_alloc_bits; 113878d7b67SJosef Bacik } while (read_seqretry(&fs_info->profiles_lock, seq)); 114878d7b67SJosef Bacik 115878d7b67SJosef Bacik return btrfs_reduce_alloc_profile(fs_info, flags); 116878d7b67SJosef Bacik } 117878d7b67SJosef Bacik 11832da5386SDavid Sterba void btrfs_get_block_group(struct btrfs_block_group *cache) 1193cad1284SJosef Bacik { 12048aaeebeSJosef Bacik refcount_inc(&cache->refs); 1213cad1284SJosef Bacik } 1223cad1284SJosef Bacik 12332da5386SDavid Sterba void btrfs_put_block_group(struct btrfs_block_group *cache) 1243cad1284SJosef Bacik { 12548aaeebeSJosef Bacik if (refcount_dec_and_test(&cache->refs)) { 1263cad1284SJosef Bacik WARN_ON(cache->pinned > 0); 12740cdc509SFilipe Manana /* 12840cdc509SFilipe Manana * If there was a failure to cleanup a log tree, very likely due 12940cdc509SFilipe Manana * to an IO failure on a writeback attempt of one or more of its 13040cdc509SFilipe Manana * extent buffers, we could not do proper (and cheap) unaccounting 13140cdc509SFilipe Manana * of their reserved space, so don't warn on reserved > 0 in that 13240cdc509SFilipe Manana * case. 13340cdc509SFilipe Manana */ 13440cdc509SFilipe Manana if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 13540cdc509SFilipe Manana !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 1363cad1284SJosef Bacik WARN_ON(cache->reserved > 0); 1373cad1284SJosef Bacik 1383cad1284SJosef Bacik /* 139b0643e59SDennis Zhou * A block_group shouldn't be on the discard_list anymore. 140b0643e59SDennis Zhou * Remove the block_group from the discard_list to prevent us 141b0643e59SDennis Zhou * from causing a panic due to NULL pointer dereference. 142b0643e59SDennis Zhou */ 143b0643e59SDennis Zhou if (WARN_ON(!list_empty(&cache->discard_list))) 144b0643e59SDennis Zhou btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 145b0643e59SDennis Zhou cache); 146b0643e59SDennis Zhou 147b0643e59SDennis Zhou /* 1483cad1284SJosef Bacik * If not empty, someone is still holding mutex of 1493cad1284SJosef Bacik * full_stripe_lock, which can only be released by caller. 1503cad1284SJosef Bacik * And it will definitely cause use-after-free when caller 1513cad1284SJosef Bacik * tries to release full stripe lock. 1523cad1284SJosef Bacik * 1533cad1284SJosef Bacik * No better way to resolve, but only to warn. 1543cad1284SJosef Bacik */ 1553cad1284SJosef Bacik WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); 1563cad1284SJosef Bacik kfree(cache->free_space_ctl); 157dafc340dSNaohiro Aota kfree(cache->physical_map); 1583cad1284SJosef Bacik kfree(cache); 1593cad1284SJosef Bacik } 1603cad1284SJosef Bacik } 1613cad1284SJosef Bacik 1622e405ad8SJosef Bacik /* 1634358d963SJosef Bacik * This adds the block group to the fs_info rb tree for the block group cache 1644358d963SJosef Bacik */ 1654358d963SJosef Bacik static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 16632da5386SDavid Sterba struct btrfs_block_group *block_group) 1674358d963SJosef Bacik { 1684358d963SJosef Bacik struct rb_node **p; 1694358d963SJosef Bacik struct rb_node *parent = NULL; 17032da5386SDavid Sterba struct btrfs_block_group *cache; 17108dddb29SFilipe Manana bool leftmost = true; 1724358d963SJosef Bacik 1739afc6649SQu Wenruo ASSERT(block_group->length != 0); 1749afc6649SQu Wenruo 17516b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 17608dddb29SFilipe Manana p = &info->block_group_cache_tree.rb_root.rb_node; 1774358d963SJosef Bacik 1784358d963SJosef Bacik while (*p) { 1794358d963SJosef Bacik parent = *p; 18032da5386SDavid Sterba cache = rb_entry(parent, struct btrfs_block_group, cache_node); 181b3470b5dSDavid Sterba if (block_group->start < cache->start) { 1824358d963SJosef Bacik p = &(*p)->rb_left; 183b3470b5dSDavid Sterba } else if (block_group->start > cache->start) { 1844358d963SJosef Bacik p = &(*p)->rb_right; 18508dddb29SFilipe Manana leftmost = false; 1864358d963SJosef Bacik } else { 18716b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 1884358d963SJosef Bacik return -EEXIST; 1894358d963SJosef Bacik } 1904358d963SJosef Bacik } 1914358d963SJosef Bacik 1924358d963SJosef Bacik rb_link_node(&block_group->cache_node, parent, p); 19308dddb29SFilipe Manana rb_insert_color_cached(&block_group->cache_node, 19408dddb29SFilipe Manana &info->block_group_cache_tree, leftmost); 1954358d963SJosef Bacik 19616b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 1974358d963SJosef Bacik 1984358d963SJosef Bacik return 0; 1994358d963SJosef Bacik } 2004358d963SJosef Bacik 2014358d963SJosef Bacik /* 2022e405ad8SJosef Bacik * This will return the block group at or after bytenr if contains is 0, else 2032e405ad8SJosef Bacik * it will return the block group that contains the bytenr 2042e405ad8SJosef Bacik */ 20532da5386SDavid Sterba static struct btrfs_block_group *block_group_cache_tree_search( 2062e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr, int contains) 2072e405ad8SJosef Bacik { 20832da5386SDavid Sterba struct btrfs_block_group *cache, *ret = NULL; 2092e405ad8SJosef Bacik struct rb_node *n; 2102e405ad8SJosef Bacik u64 end, start; 2112e405ad8SJosef Bacik 21216b0c258SFilipe Manana read_lock(&info->block_group_cache_lock); 21308dddb29SFilipe Manana n = info->block_group_cache_tree.rb_root.rb_node; 2142e405ad8SJosef Bacik 2152e405ad8SJosef Bacik while (n) { 21632da5386SDavid Sterba cache = rb_entry(n, struct btrfs_block_group, cache_node); 217b3470b5dSDavid Sterba end = cache->start + cache->length - 1; 218b3470b5dSDavid Sterba start = cache->start; 2192e405ad8SJosef Bacik 2202e405ad8SJosef Bacik if (bytenr < start) { 221b3470b5dSDavid Sterba if (!contains && (!ret || start < ret->start)) 2222e405ad8SJosef Bacik ret = cache; 2232e405ad8SJosef Bacik n = n->rb_left; 2242e405ad8SJosef Bacik } else if (bytenr > start) { 2252e405ad8SJosef Bacik if (contains && bytenr <= end) { 2262e405ad8SJosef Bacik ret = cache; 2272e405ad8SJosef Bacik break; 2282e405ad8SJosef Bacik } 2292e405ad8SJosef Bacik n = n->rb_right; 2302e405ad8SJosef Bacik } else { 2312e405ad8SJosef Bacik ret = cache; 2322e405ad8SJosef Bacik break; 2332e405ad8SJosef Bacik } 2342e405ad8SJosef Bacik } 23508dddb29SFilipe Manana if (ret) 2362e405ad8SJosef Bacik btrfs_get_block_group(ret); 23716b0c258SFilipe Manana read_unlock(&info->block_group_cache_lock); 2382e405ad8SJosef Bacik 2392e405ad8SJosef Bacik return ret; 2402e405ad8SJosef Bacik } 2412e405ad8SJosef Bacik 2422e405ad8SJosef Bacik /* 2432e405ad8SJosef Bacik * Return the block group that starts at or after bytenr 2442e405ad8SJosef Bacik */ 24532da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_first_block_group( 2462e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2472e405ad8SJosef Bacik { 2482e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 0); 2492e405ad8SJosef Bacik } 2502e405ad8SJosef Bacik 2512e405ad8SJosef Bacik /* 2522e405ad8SJosef Bacik * Return the block group that contains the given bytenr 2532e405ad8SJosef Bacik */ 25432da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_block_group( 2552e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2562e405ad8SJosef Bacik { 2572e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 1); 2582e405ad8SJosef Bacik } 2592e405ad8SJosef Bacik 26032da5386SDavid Sterba struct btrfs_block_group *btrfs_next_block_group( 26132da5386SDavid Sterba struct btrfs_block_group *cache) 2622e405ad8SJosef Bacik { 2632e405ad8SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 2642e405ad8SJosef Bacik struct rb_node *node; 2652e405ad8SJosef Bacik 26616b0c258SFilipe Manana read_lock(&fs_info->block_group_cache_lock); 2672e405ad8SJosef Bacik 2682e405ad8SJosef Bacik /* If our block group was removed, we need a full search. */ 2692e405ad8SJosef Bacik if (RB_EMPTY_NODE(&cache->cache_node)) { 270b3470b5dSDavid Sterba const u64 next_bytenr = cache->start + cache->length; 2712e405ad8SJosef Bacik 27216b0c258SFilipe Manana read_unlock(&fs_info->block_group_cache_lock); 2732e405ad8SJosef Bacik btrfs_put_block_group(cache); 2748b01f931SFilipe Manana return btrfs_lookup_first_block_group(fs_info, next_bytenr); 2752e405ad8SJosef Bacik } 2762e405ad8SJosef Bacik node = rb_next(&cache->cache_node); 2772e405ad8SJosef Bacik btrfs_put_block_group(cache); 2782e405ad8SJosef Bacik if (node) { 27932da5386SDavid Sterba cache = rb_entry(node, struct btrfs_block_group, cache_node); 2802e405ad8SJosef Bacik btrfs_get_block_group(cache); 2812e405ad8SJosef Bacik } else 2822e405ad8SJosef Bacik cache = NULL; 28316b0c258SFilipe Manana read_unlock(&fs_info->block_group_cache_lock); 2842e405ad8SJosef Bacik return cache; 2852e405ad8SJosef Bacik } 2863eeb3226SJosef Bacik 2872306e83eSFilipe Manana /** 2882306e83eSFilipe Manana * Check if we can do a NOCOW write for a given extent. 2892306e83eSFilipe Manana * 2902306e83eSFilipe Manana * @fs_info: The filesystem information object. 2912306e83eSFilipe Manana * @bytenr: Logical start address of the extent. 2922306e83eSFilipe Manana * 2932306e83eSFilipe Manana * Check if we can do a NOCOW write for the given extent, and increments the 2942306e83eSFilipe Manana * number of NOCOW writers in the block group that contains the extent, as long 2952306e83eSFilipe Manana * as the block group exists and it's currently not in read-only mode. 2962306e83eSFilipe Manana * 2972306e83eSFilipe Manana * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 2982306e83eSFilipe Manana * is responsible for calling btrfs_dec_nocow_writers() later. 2992306e83eSFilipe Manana * 3002306e83eSFilipe Manana * Or NULL if we can not do a NOCOW write 3012306e83eSFilipe Manana */ 3022306e83eSFilipe Manana struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 3032306e83eSFilipe Manana u64 bytenr) 3043eeb3226SJosef Bacik { 30532da5386SDavid Sterba struct btrfs_block_group *bg; 3062306e83eSFilipe Manana bool can_nocow = true; 3073eeb3226SJosef Bacik 3083eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 3093eeb3226SJosef Bacik if (!bg) 3102306e83eSFilipe Manana return NULL; 3113eeb3226SJosef Bacik 3123eeb3226SJosef Bacik spin_lock(&bg->lock); 3133eeb3226SJosef Bacik if (bg->ro) 3142306e83eSFilipe Manana can_nocow = false; 3153eeb3226SJosef Bacik else 3163eeb3226SJosef Bacik atomic_inc(&bg->nocow_writers); 3173eeb3226SJosef Bacik spin_unlock(&bg->lock); 3183eeb3226SJosef Bacik 3192306e83eSFilipe Manana if (!can_nocow) { 3203eeb3226SJosef Bacik btrfs_put_block_group(bg); 3212306e83eSFilipe Manana return NULL; 3223eeb3226SJosef Bacik } 3233eeb3226SJosef Bacik 3242306e83eSFilipe Manana /* No put on block group, done by btrfs_dec_nocow_writers(). */ 3252306e83eSFilipe Manana return bg; 3262306e83eSFilipe Manana } 3273eeb3226SJosef Bacik 3282306e83eSFilipe Manana /** 3292306e83eSFilipe Manana * Decrement the number of NOCOW writers in a block group. 3302306e83eSFilipe Manana * 3312306e83eSFilipe Manana * @bg: The block group. 3322306e83eSFilipe Manana * 3332306e83eSFilipe Manana * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 3342306e83eSFilipe Manana * and on the block group returned by that call. Typically this is called after 3352306e83eSFilipe Manana * creating an ordered extent for a NOCOW write, to prevent races with scrub and 3362306e83eSFilipe Manana * relocation. 3372306e83eSFilipe Manana * 3382306e83eSFilipe Manana * After this call, the caller should not use the block group anymore. It it wants 3392306e83eSFilipe Manana * to use it, then it should get a reference on it before calling this function. 3402306e83eSFilipe Manana */ 3412306e83eSFilipe Manana void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 3422306e83eSFilipe Manana { 3433eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->nocow_writers)) 3443eeb3226SJosef Bacik wake_up_var(&bg->nocow_writers); 3452306e83eSFilipe Manana 3462306e83eSFilipe Manana /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 3473eeb3226SJosef Bacik btrfs_put_block_group(bg); 3483eeb3226SJosef Bacik } 3493eeb3226SJosef Bacik 35032da5386SDavid Sterba void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 3513eeb3226SJosef Bacik { 3523eeb3226SJosef Bacik wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 3533eeb3226SJosef Bacik } 3543eeb3226SJosef Bacik 3553eeb3226SJosef Bacik void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 3563eeb3226SJosef Bacik const u64 start) 3573eeb3226SJosef Bacik { 35832da5386SDavid Sterba struct btrfs_block_group *bg; 3593eeb3226SJosef Bacik 3603eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, start); 3613eeb3226SJosef Bacik ASSERT(bg); 3623eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->reservations)) 3633eeb3226SJosef Bacik wake_up_var(&bg->reservations); 3643eeb3226SJosef Bacik btrfs_put_block_group(bg); 3653eeb3226SJosef Bacik } 3663eeb3226SJosef Bacik 36732da5386SDavid Sterba void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 3683eeb3226SJosef Bacik { 3693eeb3226SJosef Bacik struct btrfs_space_info *space_info = bg->space_info; 3703eeb3226SJosef Bacik 3713eeb3226SJosef Bacik ASSERT(bg->ro); 3723eeb3226SJosef Bacik 3733eeb3226SJosef Bacik if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 3743eeb3226SJosef Bacik return; 3753eeb3226SJosef Bacik 3763eeb3226SJosef Bacik /* 3773eeb3226SJosef Bacik * Our block group is read only but before we set it to read only, 3783eeb3226SJosef Bacik * some task might have had allocated an extent from it already, but it 3793eeb3226SJosef Bacik * has not yet created a respective ordered extent (and added it to a 3803eeb3226SJosef Bacik * root's list of ordered extents). 3813eeb3226SJosef Bacik * Therefore wait for any task currently allocating extents, since the 3823eeb3226SJosef Bacik * block group's reservations counter is incremented while a read lock 3833eeb3226SJosef Bacik * on the groups' semaphore is held and decremented after releasing 3843eeb3226SJosef Bacik * the read access on that semaphore and creating the ordered extent. 3853eeb3226SJosef Bacik */ 3863eeb3226SJosef Bacik down_write(&space_info->groups_sem); 3873eeb3226SJosef Bacik up_write(&space_info->groups_sem); 3883eeb3226SJosef Bacik 3893eeb3226SJosef Bacik wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 3903eeb3226SJosef Bacik } 3919f21246dSJosef Bacik 3929f21246dSJosef Bacik struct btrfs_caching_control *btrfs_get_caching_control( 39332da5386SDavid Sterba struct btrfs_block_group *cache) 3949f21246dSJosef Bacik { 3959f21246dSJosef Bacik struct btrfs_caching_control *ctl; 3969f21246dSJosef Bacik 3979f21246dSJosef Bacik spin_lock(&cache->lock); 3989f21246dSJosef Bacik if (!cache->caching_ctl) { 3999f21246dSJosef Bacik spin_unlock(&cache->lock); 4009f21246dSJosef Bacik return NULL; 4019f21246dSJosef Bacik } 4029f21246dSJosef Bacik 4039f21246dSJosef Bacik ctl = cache->caching_ctl; 4049f21246dSJosef Bacik refcount_inc(&ctl->count); 4059f21246dSJosef Bacik spin_unlock(&cache->lock); 4069f21246dSJosef Bacik return ctl; 4079f21246dSJosef Bacik } 4089f21246dSJosef Bacik 4099f21246dSJosef Bacik void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 4109f21246dSJosef Bacik { 4119f21246dSJosef Bacik if (refcount_dec_and_test(&ctl->count)) 4129f21246dSJosef Bacik kfree(ctl); 4139f21246dSJosef Bacik } 4149f21246dSJosef Bacik 4159f21246dSJosef Bacik /* 4169f21246dSJosef Bacik * When we wait for progress in the block group caching, its because our 4179f21246dSJosef Bacik * allocation attempt failed at least once. So, we must sleep and let some 4189f21246dSJosef Bacik * progress happen before we try again. 4199f21246dSJosef Bacik * 4209f21246dSJosef Bacik * This function will sleep at least once waiting for new free space to show 4219f21246dSJosef Bacik * up, and then it will check the block group free space numbers for our min 4229f21246dSJosef Bacik * num_bytes. Another option is to have it go ahead and look in the rbtree for 4239f21246dSJosef Bacik * a free extent of a given size, but this is a good start. 4249f21246dSJosef Bacik * 4259f21246dSJosef Bacik * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 4269f21246dSJosef Bacik * any of the information in this block group. 4279f21246dSJosef Bacik */ 42832da5386SDavid Sterba void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 4299f21246dSJosef Bacik u64 num_bytes) 4309f21246dSJosef Bacik { 4319f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 4329f21246dSJosef Bacik 4339f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4349f21246dSJosef Bacik if (!caching_ctl) 4359f21246dSJosef Bacik return; 4369f21246dSJosef Bacik 43732da5386SDavid Sterba wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 4389f21246dSJosef Bacik (cache->free_space_ctl->free_space >= num_bytes)); 4399f21246dSJosef Bacik 4409f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4419f21246dSJosef Bacik } 4429f21246dSJosef Bacik 443ced8ecf0SOmar Sandoval static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 444ced8ecf0SOmar Sandoval struct btrfs_caching_control *caching_ctl) 445ced8ecf0SOmar Sandoval { 446ced8ecf0SOmar Sandoval wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 447ced8ecf0SOmar Sandoval return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 448ced8ecf0SOmar Sandoval } 449ced8ecf0SOmar Sandoval 450ced8ecf0SOmar Sandoval static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 4519f21246dSJosef Bacik { 4529f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 453ced8ecf0SOmar Sandoval int ret; 4549f21246dSJosef Bacik 4559f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4569f21246dSJosef Bacik if (!caching_ctl) 4579f21246dSJosef Bacik return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 458ced8ecf0SOmar Sandoval ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 4599f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4609f21246dSJosef Bacik return ret; 4619f21246dSJosef Bacik } 4629f21246dSJosef Bacik 4639f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 46432da5386SDavid Sterba static void fragment_free_space(struct btrfs_block_group *block_group) 4659f21246dSJosef Bacik { 4669f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 467b3470b5dSDavid Sterba u64 start = block_group->start; 468b3470b5dSDavid Sterba u64 len = block_group->length; 4699f21246dSJosef Bacik u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 4709f21246dSJosef Bacik fs_info->nodesize : fs_info->sectorsize; 4719f21246dSJosef Bacik u64 step = chunk << 1; 4729f21246dSJosef Bacik 4739f21246dSJosef Bacik while (len > chunk) { 4749f21246dSJosef Bacik btrfs_remove_free_space(block_group, start, chunk); 4759f21246dSJosef Bacik start += step; 4769f21246dSJosef Bacik if (len < step) 4779f21246dSJosef Bacik len = 0; 4789f21246dSJosef Bacik else 4799f21246dSJosef Bacik len -= step; 4809f21246dSJosef Bacik } 4819f21246dSJosef Bacik } 4829f21246dSJosef Bacik #endif 4839f21246dSJosef Bacik 4849f21246dSJosef Bacik /* 4859f21246dSJosef Bacik * This is only called by btrfs_cache_block_group, since we could have freed 4869f21246dSJosef Bacik * extents we need to check the pinned_extents for any extents that can't be 4879f21246dSJosef Bacik * used yet since their free space will be released as soon as the transaction 4889f21246dSJosef Bacik * commits. 4899f21246dSJosef Bacik */ 49032da5386SDavid Sterba u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) 4919f21246dSJosef Bacik { 4929f21246dSJosef Bacik struct btrfs_fs_info *info = block_group->fs_info; 4939f21246dSJosef Bacik u64 extent_start, extent_end, size, total_added = 0; 4949f21246dSJosef Bacik int ret; 4959f21246dSJosef Bacik 4969f21246dSJosef Bacik while (start < end) { 497fe119a6eSNikolay Borisov ret = find_first_extent_bit(&info->excluded_extents, start, 4989f21246dSJosef Bacik &extent_start, &extent_end, 4999f21246dSJosef Bacik EXTENT_DIRTY | EXTENT_UPTODATE, 5009f21246dSJosef Bacik NULL); 5019f21246dSJosef Bacik if (ret) 5029f21246dSJosef Bacik break; 5039f21246dSJosef Bacik 5049f21246dSJosef Bacik if (extent_start <= start) { 5059f21246dSJosef Bacik start = extent_end + 1; 5069f21246dSJosef Bacik } else if (extent_start > start && extent_start < end) { 5079f21246dSJosef Bacik size = extent_start - start; 5089f21246dSJosef Bacik total_added += size; 509b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, 510b0643e59SDennis Zhou start, size); 5119f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 5129f21246dSJosef Bacik start = extent_end + 1; 5139f21246dSJosef Bacik } else { 5149f21246dSJosef Bacik break; 5159f21246dSJosef Bacik } 5169f21246dSJosef Bacik } 5179f21246dSJosef Bacik 5189f21246dSJosef Bacik if (start < end) { 5199f21246dSJosef Bacik size = end - start; 5209f21246dSJosef Bacik total_added += size; 521b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, start, 522b0643e59SDennis Zhou size); 5239f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 5249f21246dSJosef Bacik } 5259f21246dSJosef Bacik 5269f21246dSJosef Bacik return total_added; 5279f21246dSJosef Bacik } 5289f21246dSJosef Bacik 5299f21246dSJosef Bacik static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 5309f21246dSJosef Bacik { 53132da5386SDavid Sterba struct btrfs_block_group *block_group = caching_ctl->block_group; 5329f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 53329cbcf40SJosef Bacik struct btrfs_root *extent_root; 5349f21246dSJosef Bacik struct btrfs_path *path; 5359f21246dSJosef Bacik struct extent_buffer *leaf; 5369f21246dSJosef Bacik struct btrfs_key key; 5379f21246dSJosef Bacik u64 total_found = 0; 5389f21246dSJosef Bacik u64 last = 0; 5399f21246dSJosef Bacik u32 nritems; 5409f21246dSJosef Bacik int ret; 5419f21246dSJosef Bacik bool wakeup = true; 5429f21246dSJosef Bacik 5439f21246dSJosef Bacik path = btrfs_alloc_path(); 5449f21246dSJosef Bacik if (!path) 5459f21246dSJosef Bacik return -ENOMEM; 5469f21246dSJosef Bacik 547b3470b5dSDavid Sterba last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 54829cbcf40SJosef Bacik extent_root = btrfs_extent_root(fs_info, last); 5499f21246dSJosef Bacik 5509f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 5519f21246dSJosef Bacik /* 5529f21246dSJosef Bacik * If we're fragmenting we don't want to make anybody think we can 5539f21246dSJosef Bacik * allocate from this block group until we've had a chance to fragment 5549f21246dSJosef Bacik * the free space. 5559f21246dSJosef Bacik */ 5569f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) 5579f21246dSJosef Bacik wakeup = false; 5589f21246dSJosef Bacik #endif 5599f21246dSJosef Bacik /* 5609f21246dSJosef Bacik * We don't want to deadlock with somebody trying to allocate a new 5619f21246dSJosef Bacik * extent for the extent root while also trying to search the extent 5629f21246dSJosef Bacik * root to add free space. So we skip locking and search the commit 5639f21246dSJosef Bacik * root, since its read-only 5649f21246dSJosef Bacik */ 5659f21246dSJosef Bacik path->skip_locking = 1; 5669f21246dSJosef Bacik path->search_commit_root = 1; 5679f21246dSJosef Bacik path->reada = READA_FORWARD; 5689f21246dSJosef Bacik 5699f21246dSJosef Bacik key.objectid = last; 5709f21246dSJosef Bacik key.offset = 0; 5719f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 5729f21246dSJosef Bacik 5739f21246dSJosef Bacik next: 5749f21246dSJosef Bacik ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 5759f21246dSJosef Bacik if (ret < 0) 5769f21246dSJosef Bacik goto out; 5779f21246dSJosef Bacik 5789f21246dSJosef Bacik leaf = path->nodes[0]; 5799f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 5809f21246dSJosef Bacik 5819f21246dSJosef Bacik while (1) { 5829f21246dSJosef Bacik if (btrfs_fs_closing(fs_info) > 1) { 5839f21246dSJosef Bacik last = (u64)-1; 5849f21246dSJosef Bacik break; 5859f21246dSJosef Bacik } 5869f21246dSJosef Bacik 5879f21246dSJosef Bacik if (path->slots[0] < nritems) { 5889f21246dSJosef Bacik btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5899f21246dSJosef Bacik } else { 5909f21246dSJosef Bacik ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 5919f21246dSJosef Bacik if (ret) 5929f21246dSJosef Bacik break; 5939f21246dSJosef Bacik 5949f21246dSJosef Bacik if (need_resched() || 5959f21246dSJosef Bacik rwsem_is_contended(&fs_info->commit_root_sem)) { 5969f21246dSJosef Bacik if (wakeup) 5979f21246dSJosef Bacik caching_ctl->progress = last; 5989f21246dSJosef Bacik btrfs_release_path(path); 5999f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 6009f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 6019f21246dSJosef Bacik cond_resched(); 6029f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 6039f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 6049f21246dSJosef Bacik goto next; 6059f21246dSJosef Bacik } 6069f21246dSJosef Bacik 6079f21246dSJosef Bacik ret = btrfs_next_leaf(extent_root, path); 6089f21246dSJosef Bacik if (ret < 0) 6099f21246dSJosef Bacik goto out; 6109f21246dSJosef Bacik if (ret) 6119f21246dSJosef Bacik break; 6129f21246dSJosef Bacik leaf = path->nodes[0]; 6139f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 6149f21246dSJosef Bacik continue; 6159f21246dSJosef Bacik } 6169f21246dSJosef Bacik 6179f21246dSJosef Bacik if (key.objectid < last) { 6189f21246dSJosef Bacik key.objectid = last; 6199f21246dSJosef Bacik key.offset = 0; 6209f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 6219f21246dSJosef Bacik 6229f21246dSJosef Bacik if (wakeup) 6239f21246dSJosef Bacik caching_ctl->progress = last; 6249f21246dSJosef Bacik btrfs_release_path(path); 6259f21246dSJosef Bacik goto next; 6269f21246dSJosef Bacik } 6279f21246dSJosef Bacik 628b3470b5dSDavid Sterba if (key.objectid < block_group->start) { 6299f21246dSJosef Bacik path->slots[0]++; 6309f21246dSJosef Bacik continue; 6319f21246dSJosef Bacik } 6329f21246dSJosef Bacik 633b3470b5dSDavid Sterba if (key.objectid >= block_group->start + block_group->length) 6349f21246dSJosef Bacik break; 6359f21246dSJosef Bacik 6369f21246dSJosef Bacik if (key.type == BTRFS_EXTENT_ITEM_KEY || 6379f21246dSJosef Bacik key.type == BTRFS_METADATA_ITEM_KEY) { 6389f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 6399f21246dSJosef Bacik key.objectid); 6409f21246dSJosef Bacik if (key.type == BTRFS_METADATA_ITEM_KEY) 6419f21246dSJosef Bacik last = key.objectid + 6429f21246dSJosef Bacik fs_info->nodesize; 6439f21246dSJosef Bacik else 6449f21246dSJosef Bacik last = key.objectid + key.offset; 6459f21246dSJosef Bacik 6469f21246dSJosef Bacik if (total_found > CACHING_CTL_WAKE_UP) { 6479f21246dSJosef Bacik total_found = 0; 6489f21246dSJosef Bacik if (wakeup) 6499f21246dSJosef Bacik wake_up(&caching_ctl->wait); 6509f21246dSJosef Bacik } 6519f21246dSJosef Bacik } 6529f21246dSJosef Bacik path->slots[0]++; 6539f21246dSJosef Bacik } 6549f21246dSJosef Bacik ret = 0; 6559f21246dSJosef Bacik 6569f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 657b3470b5dSDavid Sterba block_group->start + block_group->length); 6589f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 6599f21246dSJosef Bacik 6609f21246dSJosef Bacik out: 6619f21246dSJosef Bacik btrfs_free_path(path); 6629f21246dSJosef Bacik return ret; 6639f21246dSJosef Bacik } 6649f21246dSJosef Bacik 6659f21246dSJosef Bacik static noinline void caching_thread(struct btrfs_work *work) 6669f21246dSJosef Bacik { 66732da5386SDavid Sterba struct btrfs_block_group *block_group; 6689f21246dSJosef Bacik struct btrfs_fs_info *fs_info; 6699f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 6709f21246dSJosef Bacik int ret; 6719f21246dSJosef Bacik 6729f21246dSJosef Bacik caching_ctl = container_of(work, struct btrfs_caching_control, work); 6739f21246dSJosef Bacik block_group = caching_ctl->block_group; 6749f21246dSJosef Bacik fs_info = block_group->fs_info; 6759f21246dSJosef Bacik 6769f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 6779f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 6789f21246dSJosef Bacik 679e747853cSJosef Bacik if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 680e747853cSJosef Bacik ret = load_free_space_cache(block_group); 681e747853cSJosef Bacik if (ret == 1) { 682e747853cSJosef Bacik ret = 0; 683e747853cSJosef Bacik goto done; 684e747853cSJosef Bacik } 685e747853cSJosef Bacik 686e747853cSJosef Bacik /* 687e747853cSJosef Bacik * We failed to load the space cache, set ourselves to 688e747853cSJosef Bacik * CACHE_STARTED and carry on. 689e747853cSJosef Bacik */ 690e747853cSJosef Bacik spin_lock(&block_group->lock); 691e747853cSJosef Bacik block_group->cached = BTRFS_CACHE_STARTED; 692e747853cSJosef Bacik spin_unlock(&block_group->lock); 693e747853cSJosef Bacik wake_up(&caching_ctl->wait); 694e747853cSJosef Bacik } 695e747853cSJosef Bacik 6962f96e402SJosef Bacik /* 6972f96e402SJosef Bacik * If we are in the transaction that populated the free space tree we 6982f96e402SJosef Bacik * can't actually cache from the free space tree as our commit root and 6992f96e402SJosef Bacik * real root are the same, so we could change the contents of the blocks 7002f96e402SJosef Bacik * while caching. Instead do the slow caching in this case, and after 7012f96e402SJosef Bacik * the transaction has committed we will be safe. 7022f96e402SJosef Bacik */ 7032f96e402SJosef Bacik if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 7042f96e402SJosef Bacik !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 7059f21246dSJosef Bacik ret = load_free_space_tree(caching_ctl); 7069f21246dSJosef Bacik else 7079f21246dSJosef Bacik ret = load_extent_tree_free(caching_ctl); 708e747853cSJosef Bacik done: 7099f21246dSJosef Bacik spin_lock(&block_group->lock); 7109f21246dSJosef Bacik block_group->caching_ctl = NULL; 7119f21246dSJosef Bacik block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 7129f21246dSJosef Bacik spin_unlock(&block_group->lock); 7139f21246dSJosef Bacik 7149f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 7159f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) { 7169f21246dSJosef Bacik u64 bytes_used; 7179f21246dSJosef Bacik 7189f21246dSJosef Bacik spin_lock(&block_group->space_info->lock); 7199f21246dSJosef Bacik spin_lock(&block_group->lock); 720b3470b5dSDavid Sterba bytes_used = block_group->length - block_group->used; 7219f21246dSJosef Bacik block_group->space_info->bytes_used += bytes_used >> 1; 7229f21246dSJosef Bacik spin_unlock(&block_group->lock); 7239f21246dSJosef Bacik spin_unlock(&block_group->space_info->lock); 724e11c0406SJosef Bacik fragment_free_space(block_group); 7259f21246dSJosef Bacik } 7269f21246dSJosef Bacik #endif 7279f21246dSJosef Bacik 7289f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 7299f21246dSJosef Bacik 7309f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 7319f21246dSJosef Bacik btrfs_free_excluded_extents(block_group); 7329f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 7339f21246dSJosef Bacik 7349f21246dSJosef Bacik wake_up(&caching_ctl->wait); 7359f21246dSJosef Bacik 7369f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 7379f21246dSJosef Bacik btrfs_put_block_group(block_group); 7389f21246dSJosef Bacik } 7399f21246dSJosef Bacik 740ced8ecf0SOmar Sandoval int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 7419f21246dSJosef Bacik { 7429f21246dSJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 743e747853cSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 7449f21246dSJosef Bacik int ret = 0; 7459f21246dSJosef Bacik 7462eda5708SNaohiro Aota /* Allocator for zoned filesystems does not use the cache at all */ 7472eda5708SNaohiro Aota if (btrfs_is_zoned(fs_info)) 7482eda5708SNaohiro Aota return 0; 7492eda5708SNaohiro Aota 7509f21246dSJosef Bacik caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 7519f21246dSJosef Bacik if (!caching_ctl) 7529f21246dSJosef Bacik return -ENOMEM; 7539f21246dSJosef Bacik 7549f21246dSJosef Bacik INIT_LIST_HEAD(&caching_ctl->list); 7559f21246dSJosef Bacik mutex_init(&caching_ctl->mutex); 7569f21246dSJosef Bacik init_waitqueue_head(&caching_ctl->wait); 7579f21246dSJosef Bacik caching_ctl->block_group = cache; 758b3470b5dSDavid Sterba caching_ctl->progress = cache->start; 759e747853cSJosef Bacik refcount_set(&caching_ctl->count, 2); 760a0cac0ecSOmar Sandoval btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); 7619f21246dSJosef Bacik 7629f21246dSJosef Bacik spin_lock(&cache->lock); 7639f21246dSJosef Bacik if (cache->cached != BTRFS_CACHE_NO) { 7649f21246dSJosef Bacik kfree(caching_ctl); 765e747853cSJosef Bacik 766e747853cSJosef Bacik caching_ctl = cache->caching_ctl; 767e747853cSJosef Bacik if (caching_ctl) 768e747853cSJosef Bacik refcount_inc(&caching_ctl->count); 769e747853cSJosef Bacik spin_unlock(&cache->lock); 770e747853cSJosef Bacik goto out; 7719f21246dSJosef Bacik } 7729f21246dSJosef Bacik WARN_ON(cache->caching_ctl); 7739f21246dSJosef Bacik cache->caching_ctl = caching_ctl; 7749f21246dSJosef Bacik cache->cached = BTRFS_CACHE_STARTED; 7759f21246dSJosef Bacik cache->has_caching_ctl = 1; 7769f21246dSJosef Bacik spin_unlock(&cache->lock); 7779f21246dSJosef Bacik 77816b0c258SFilipe Manana write_lock(&fs_info->block_group_cache_lock); 7799f21246dSJosef Bacik refcount_inc(&caching_ctl->count); 7809f21246dSJosef Bacik list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 78116b0c258SFilipe Manana write_unlock(&fs_info->block_group_cache_lock); 7829f21246dSJosef Bacik 7839f21246dSJosef Bacik btrfs_get_block_group(cache); 7849f21246dSJosef Bacik 7859f21246dSJosef Bacik btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 786e747853cSJosef Bacik out: 787ced8ecf0SOmar Sandoval if (wait && caching_ctl) 788ced8ecf0SOmar Sandoval ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 789e747853cSJosef Bacik if (caching_ctl) 790e747853cSJosef Bacik btrfs_put_caching_control(caching_ctl); 7919f21246dSJosef Bacik 7929f21246dSJosef Bacik return ret; 7939f21246dSJosef Bacik } 794e3e0520bSJosef Bacik 795e3e0520bSJosef Bacik static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 796e3e0520bSJosef Bacik { 797e3e0520bSJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 798e3e0520bSJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 799e3e0520bSJosef Bacik 800e3e0520bSJosef Bacik write_seqlock(&fs_info->profiles_lock); 801e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 802e3e0520bSJosef Bacik fs_info->avail_data_alloc_bits &= ~extra_flags; 803e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 804e3e0520bSJosef Bacik fs_info->avail_metadata_alloc_bits &= ~extra_flags; 805e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 806e3e0520bSJosef Bacik fs_info->avail_system_alloc_bits &= ~extra_flags; 807e3e0520bSJosef Bacik write_sequnlock(&fs_info->profiles_lock); 808e3e0520bSJosef Bacik } 809e3e0520bSJosef Bacik 810e3e0520bSJosef Bacik /* 811e3e0520bSJosef Bacik * Clear incompat bits for the following feature(s): 812e3e0520bSJosef Bacik * 813e3e0520bSJosef Bacik * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 814e3e0520bSJosef Bacik * in the whole filesystem 8159c907446SDavid Sterba * 8169c907446SDavid Sterba * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 817e3e0520bSJosef Bacik */ 818e3e0520bSJosef Bacik static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 819e3e0520bSJosef Bacik { 8209c907446SDavid Sterba bool found_raid56 = false; 8219c907446SDavid Sterba bool found_raid1c34 = false; 8229c907446SDavid Sterba 8239c907446SDavid Sterba if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 8249c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 8259c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 826e3e0520bSJosef Bacik struct list_head *head = &fs_info->space_info; 827e3e0520bSJosef Bacik struct btrfs_space_info *sinfo; 828e3e0520bSJosef Bacik 829e3e0520bSJosef Bacik list_for_each_entry_rcu(sinfo, head, list) { 830e3e0520bSJosef Bacik down_read(&sinfo->groups_sem); 831e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 8329c907446SDavid Sterba found_raid56 = true; 833e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 8349c907446SDavid Sterba found_raid56 = true; 8359c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 8369c907446SDavid Sterba found_raid1c34 = true; 8379c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 8389c907446SDavid Sterba found_raid1c34 = true; 839e3e0520bSJosef Bacik up_read(&sinfo->groups_sem); 840e3e0520bSJosef Bacik } 841d8e6fd5cSFilipe Manana if (!found_raid56) 842e3e0520bSJosef Bacik btrfs_clear_fs_incompat(fs_info, RAID56); 843d8e6fd5cSFilipe Manana if (!found_raid1c34) 8449c907446SDavid Sterba btrfs_clear_fs_incompat(fs_info, RAID1C34); 845e3e0520bSJosef Bacik } 846e3e0520bSJosef Bacik } 847e3e0520bSJosef Bacik 8487357623aSQu Wenruo static int remove_block_group_item(struct btrfs_trans_handle *trans, 8497357623aSQu Wenruo struct btrfs_path *path, 8507357623aSQu Wenruo struct btrfs_block_group *block_group) 8517357623aSQu Wenruo { 8527357623aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 8537357623aSQu Wenruo struct btrfs_root *root; 8547357623aSQu Wenruo struct btrfs_key key; 8557357623aSQu Wenruo int ret; 8567357623aSQu Wenruo 857dfe8aec4SJosef Bacik root = btrfs_block_group_root(fs_info); 8587357623aSQu Wenruo key.objectid = block_group->start; 8597357623aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 8607357623aSQu Wenruo key.offset = block_group->length; 8617357623aSQu Wenruo 8627357623aSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 8637357623aSQu Wenruo if (ret > 0) 8647357623aSQu Wenruo ret = -ENOENT; 8657357623aSQu Wenruo if (ret < 0) 8667357623aSQu Wenruo return ret; 8677357623aSQu Wenruo 8687357623aSQu Wenruo ret = btrfs_del_item(trans, root, path); 8697357623aSQu Wenruo return ret; 8707357623aSQu Wenruo } 8717357623aSQu Wenruo 872e3e0520bSJosef Bacik int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 873e3e0520bSJosef Bacik u64 group_start, struct extent_map *em) 874e3e0520bSJosef Bacik { 875e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 876e3e0520bSJosef Bacik struct btrfs_path *path; 87732da5386SDavid Sterba struct btrfs_block_group *block_group; 878e3e0520bSJosef Bacik struct btrfs_free_cluster *cluster; 879e3e0520bSJosef Bacik struct inode *inode; 880e3e0520bSJosef Bacik struct kobject *kobj = NULL; 881e3e0520bSJosef Bacik int ret; 882e3e0520bSJosef Bacik int index; 883e3e0520bSJosef Bacik int factor; 884e3e0520bSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 885e3e0520bSJosef Bacik bool remove_em; 886e3e0520bSJosef Bacik bool remove_rsv = false; 887e3e0520bSJosef Bacik 888e3e0520bSJosef Bacik block_group = btrfs_lookup_block_group(fs_info, group_start); 889e3e0520bSJosef Bacik BUG_ON(!block_group); 890e3e0520bSJosef Bacik BUG_ON(!block_group->ro); 891e3e0520bSJosef Bacik 892e3e0520bSJosef Bacik trace_btrfs_remove_block_group(block_group); 893e3e0520bSJosef Bacik /* 894e3e0520bSJosef Bacik * Free the reserved super bytes from this block group before 895e3e0520bSJosef Bacik * remove it. 896e3e0520bSJosef Bacik */ 897e3e0520bSJosef Bacik btrfs_free_excluded_extents(block_group); 898b3470b5dSDavid Sterba btrfs_free_ref_tree_range(fs_info, block_group->start, 899b3470b5dSDavid Sterba block_group->length); 900e3e0520bSJosef Bacik 901e3e0520bSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 902e3e0520bSJosef Bacik factor = btrfs_bg_type_to_factor(block_group->flags); 903e3e0520bSJosef Bacik 904e3e0520bSJosef Bacik /* make sure this block group isn't part of an allocation cluster */ 905e3e0520bSJosef Bacik cluster = &fs_info->data_alloc_cluster; 906e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 907e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 908e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 909e3e0520bSJosef Bacik 910e3e0520bSJosef Bacik /* 911e3e0520bSJosef Bacik * make sure this block group isn't part of a metadata 912e3e0520bSJosef Bacik * allocation cluster 913e3e0520bSJosef Bacik */ 914e3e0520bSJosef Bacik cluster = &fs_info->meta_alloc_cluster; 915e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 916e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 917e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 918e3e0520bSJosef Bacik 91940ab3be1SNaohiro Aota btrfs_clear_treelog_bg(block_group); 920c2707a25SJohannes Thumshirn btrfs_clear_data_reloc_bg(block_group); 92140ab3be1SNaohiro Aota 922e3e0520bSJosef Bacik path = btrfs_alloc_path(); 923e3e0520bSJosef Bacik if (!path) { 924e3e0520bSJosef Bacik ret = -ENOMEM; 9259fecd132SFilipe Manana goto out; 926e3e0520bSJosef Bacik } 927e3e0520bSJosef Bacik 928e3e0520bSJosef Bacik /* 929e3e0520bSJosef Bacik * get the inode first so any iput calls done for the io_list 930e3e0520bSJosef Bacik * aren't the final iput (no unlinks allowed now) 931e3e0520bSJosef Bacik */ 932e3e0520bSJosef Bacik inode = lookup_free_space_inode(block_group, path); 933e3e0520bSJosef Bacik 934e3e0520bSJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 935e3e0520bSJosef Bacik /* 936e3e0520bSJosef Bacik * Make sure our free space cache IO is done before removing the 937e3e0520bSJosef Bacik * free space inode 938e3e0520bSJosef Bacik */ 939e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 940e3e0520bSJosef Bacik if (!list_empty(&block_group->io_list)) { 941e3e0520bSJosef Bacik list_del_init(&block_group->io_list); 942e3e0520bSJosef Bacik 943e3e0520bSJosef Bacik WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 944e3e0520bSJosef Bacik 945e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 946e3e0520bSJosef Bacik btrfs_wait_cache_io(trans, block_group, path); 947e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 948e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 949e3e0520bSJosef Bacik } 950e3e0520bSJosef Bacik 951e3e0520bSJosef Bacik if (!list_empty(&block_group->dirty_list)) { 952e3e0520bSJosef Bacik list_del_init(&block_group->dirty_list); 953e3e0520bSJosef Bacik remove_rsv = true; 954e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 955e3e0520bSJosef Bacik } 956e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 957e3e0520bSJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 958e3e0520bSJosef Bacik 95936b216c8SBoris Burkov ret = btrfs_remove_free_space_inode(trans, inode, block_group); 960e3e0520bSJosef Bacik if (ret) 9619fecd132SFilipe Manana goto out; 962e3e0520bSJosef Bacik 96316b0c258SFilipe Manana write_lock(&fs_info->block_group_cache_lock); 96408dddb29SFilipe Manana rb_erase_cached(&block_group->cache_node, 965e3e0520bSJosef Bacik &fs_info->block_group_cache_tree); 966e3e0520bSJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 967e3e0520bSJosef Bacik 9689fecd132SFilipe Manana /* Once for the block groups rbtree */ 9699fecd132SFilipe Manana btrfs_put_block_group(block_group); 9709fecd132SFilipe Manana 97116b0c258SFilipe Manana write_unlock(&fs_info->block_group_cache_lock); 972e3e0520bSJosef Bacik 973e3e0520bSJosef Bacik down_write(&block_group->space_info->groups_sem); 974e3e0520bSJosef Bacik /* 975e3e0520bSJosef Bacik * we must use list_del_init so people can check to see if they 976e3e0520bSJosef Bacik * are still on the list after taking the semaphore 977e3e0520bSJosef Bacik */ 978e3e0520bSJosef Bacik list_del_init(&block_group->list); 979e3e0520bSJosef Bacik if (list_empty(&block_group->space_info->block_groups[index])) { 980e3e0520bSJosef Bacik kobj = block_group->space_info->block_group_kobjs[index]; 981e3e0520bSJosef Bacik block_group->space_info->block_group_kobjs[index] = NULL; 982e3e0520bSJosef Bacik clear_avail_alloc_bits(fs_info, block_group->flags); 983e3e0520bSJosef Bacik } 984e3e0520bSJosef Bacik up_write(&block_group->space_info->groups_sem); 985e3e0520bSJosef Bacik clear_incompat_bg_bits(fs_info, block_group->flags); 986e3e0520bSJosef Bacik if (kobj) { 987e3e0520bSJosef Bacik kobject_del(kobj); 988e3e0520bSJosef Bacik kobject_put(kobj); 989e3e0520bSJosef Bacik } 990e3e0520bSJosef Bacik 991e3e0520bSJosef Bacik if (block_group->has_caching_ctl) 992e3e0520bSJosef Bacik caching_ctl = btrfs_get_caching_control(block_group); 993e3e0520bSJosef Bacik if (block_group->cached == BTRFS_CACHE_STARTED) 994e3e0520bSJosef Bacik btrfs_wait_block_group_cache_done(block_group); 995e3e0520bSJosef Bacik if (block_group->has_caching_ctl) { 99616b0c258SFilipe Manana write_lock(&fs_info->block_group_cache_lock); 997e3e0520bSJosef Bacik if (!caching_ctl) { 998e3e0520bSJosef Bacik struct btrfs_caching_control *ctl; 999e3e0520bSJosef Bacik 1000e3e0520bSJosef Bacik list_for_each_entry(ctl, 1001e3e0520bSJosef Bacik &fs_info->caching_block_groups, list) 1002e3e0520bSJosef Bacik if (ctl->block_group == block_group) { 1003e3e0520bSJosef Bacik caching_ctl = ctl; 1004e3e0520bSJosef Bacik refcount_inc(&caching_ctl->count); 1005e3e0520bSJosef Bacik break; 1006e3e0520bSJosef Bacik } 1007e3e0520bSJosef Bacik } 1008e3e0520bSJosef Bacik if (caching_ctl) 1009e3e0520bSJosef Bacik list_del_init(&caching_ctl->list); 101016b0c258SFilipe Manana write_unlock(&fs_info->block_group_cache_lock); 1011e3e0520bSJosef Bacik if (caching_ctl) { 1012e3e0520bSJosef Bacik /* Once for the caching bgs list and once for us. */ 1013e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1014e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1015e3e0520bSJosef Bacik } 1016e3e0520bSJosef Bacik } 1017e3e0520bSJosef Bacik 1018e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 1019e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->dirty_list)); 1020e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->io_list)); 1021e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 1022e3e0520bSJosef Bacik 1023e3e0520bSJosef Bacik btrfs_remove_free_space_cache(block_group); 1024e3e0520bSJosef Bacik 1025e3e0520bSJosef Bacik spin_lock(&block_group->space_info->lock); 1026e3e0520bSJosef Bacik list_del_init(&block_group->ro_list); 1027e3e0520bSJosef Bacik 1028e3e0520bSJosef Bacik if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1029e3e0520bSJosef Bacik WARN_ON(block_group->space_info->total_bytes 1030b3470b5dSDavid Sterba < block_group->length); 1031e3e0520bSJosef Bacik WARN_ON(block_group->space_info->bytes_readonly 1032169e0da9SNaohiro Aota < block_group->length - block_group->zone_unusable); 1033169e0da9SNaohiro Aota WARN_ON(block_group->space_info->bytes_zone_unusable 1034169e0da9SNaohiro Aota < block_group->zone_unusable); 1035e3e0520bSJosef Bacik WARN_ON(block_group->space_info->disk_total 1036b3470b5dSDavid Sterba < block_group->length * factor); 10376a921de5SNaohiro Aota WARN_ON(block_group->zone_is_active && 10386a921de5SNaohiro Aota block_group->space_info->active_total_bytes 10396a921de5SNaohiro Aota < block_group->length); 1040e3e0520bSJosef Bacik } 1041b3470b5dSDavid Sterba block_group->space_info->total_bytes -= block_group->length; 10426a921de5SNaohiro Aota if (block_group->zone_is_active) 10436a921de5SNaohiro Aota block_group->space_info->active_total_bytes -= block_group->length; 1044169e0da9SNaohiro Aota block_group->space_info->bytes_readonly -= 1045169e0da9SNaohiro Aota (block_group->length - block_group->zone_unusable); 1046169e0da9SNaohiro Aota block_group->space_info->bytes_zone_unusable -= 1047169e0da9SNaohiro Aota block_group->zone_unusable; 1048b3470b5dSDavid Sterba block_group->space_info->disk_total -= block_group->length * factor; 1049e3e0520bSJosef Bacik 1050e3e0520bSJosef Bacik spin_unlock(&block_group->space_info->lock); 1051e3e0520bSJosef Bacik 1052ffcb9d44SFilipe Manana /* 1053ffcb9d44SFilipe Manana * Remove the free space for the block group from the free space tree 1054ffcb9d44SFilipe Manana * and the block group's item from the extent tree before marking the 1055ffcb9d44SFilipe Manana * block group as removed. This is to prevent races with tasks that 1056ffcb9d44SFilipe Manana * freeze and unfreeze a block group, this task and another task 1057ffcb9d44SFilipe Manana * allocating a new block group - the unfreeze task ends up removing 1058ffcb9d44SFilipe Manana * the block group's extent map before the task calling this function 1059ffcb9d44SFilipe Manana * deletes the block group item from the extent tree, allowing for 1060ffcb9d44SFilipe Manana * another task to attempt to create another block group with the same 1061ffcb9d44SFilipe Manana * item key (and failing with -EEXIST and a transaction abort). 1062ffcb9d44SFilipe Manana */ 1063ffcb9d44SFilipe Manana ret = remove_block_group_free_space(trans, block_group); 1064ffcb9d44SFilipe Manana if (ret) 1065ffcb9d44SFilipe Manana goto out; 1066ffcb9d44SFilipe Manana 1067ffcb9d44SFilipe Manana ret = remove_block_group_item(trans, path, block_group); 1068ffcb9d44SFilipe Manana if (ret < 0) 1069ffcb9d44SFilipe Manana goto out; 1070ffcb9d44SFilipe Manana 1071e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1072e3e0520bSJosef Bacik block_group->removed = 1; 1073e3e0520bSJosef Bacik /* 10746b7304afSFilipe Manana * At this point trimming or scrub can't start on this block group, 10756b7304afSFilipe Manana * because we removed the block group from the rbtree 10766b7304afSFilipe Manana * fs_info->block_group_cache_tree so no one can't find it anymore and 10776b7304afSFilipe Manana * even if someone already got this block group before we removed it 10786b7304afSFilipe Manana * from the rbtree, they have already incremented block_group->frozen - 10796b7304afSFilipe Manana * if they didn't, for the trimming case they won't find any free space 10806b7304afSFilipe Manana * entries because we already removed them all when we called 10816b7304afSFilipe Manana * btrfs_remove_free_space_cache(). 1082e3e0520bSJosef Bacik * 1083e3e0520bSJosef Bacik * And we must not remove the extent map from the fs_info->mapping_tree 1084e3e0520bSJosef Bacik * to prevent the same logical address range and physical device space 10856b7304afSFilipe Manana * ranges from being reused for a new block group. This is needed to 10866b7304afSFilipe Manana * avoid races with trimming and scrub. 10876b7304afSFilipe Manana * 10886b7304afSFilipe Manana * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1089e3e0520bSJosef Bacik * completely transactionless, so while it is trimming a range the 1090e3e0520bSJosef Bacik * currently running transaction might finish and a new one start, 1091e3e0520bSJosef Bacik * allowing for new block groups to be created that can reuse the same 1092e3e0520bSJosef Bacik * physical device locations unless we take this special care. 1093e3e0520bSJosef Bacik * 1094e3e0520bSJosef Bacik * There may also be an implicit trim operation if the file system 1095e3e0520bSJosef Bacik * is mounted with -odiscard. The same protections must remain 1096e3e0520bSJosef Bacik * in place until the extents have been discarded completely when 1097e3e0520bSJosef Bacik * the transaction commit has completed. 1098e3e0520bSJosef Bacik */ 10996b7304afSFilipe Manana remove_em = (atomic_read(&block_group->frozen) == 0); 1100e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1101e3e0520bSJosef Bacik 1102e3e0520bSJosef Bacik if (remove_em) { 1103e3e0520bSJosef Bacik struct extent_map_tree *em_tree; 1104e3e0520bSJosef Bacik 1105e3e0520bSJosef Bacik em_tree = &fs_info->mapping_tree; 1106e3e0520bSJosef Bacik write_lock(&em_tree->lock); 1107e3e0520bSJosef Bacik remove_extent_mapping(em_tree, em); 1108e3e0520bSJosef Bacik write_unlock(&em_tree->lock); 1109e3e0520bSJosef Bacik /* once for the tree */ 1110e3e0520bSJosef Bacik free_extent_map(em); 1111e3e0520bSJosef Bacik } 1112f6033c5eSXiyu Yang 11139fecd132SFilipe Manana out: 1114f6033c5eSXiyu Yang /* Once for the lookup reference */ 1115f6033c5eSXiyu Yang btrfs_put_block_group(block_group); 1116e3e0520bSJosef Bacik if (remove_rsv) 1117e3e0520bSJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 1118e3e0520bSJosef Bacik btrfs_free_path(path); 1119e3e0520bSJosef Bacik return ret; 1120e3e0520bSJosef Bacik } 1121e3e0520bSJosef Bacik 1122e3e0520bSJosef Bacik struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1123e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1124e3e0520bSJosef Bacik { 1125dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 1126e3e0520bSJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 1127e3e0520bSJosef Bacik struct extent_map *em; 1128e3e0520bSJosef Bacik struct map_lookup *map; 1129e3e0520bSJosef Bacik unsigned int num_items; 1130e3e0520bSJosef Bacik 1131e3e0520bSJosef Bacik read_lock(&em_tree->lock); 1132e3e0520bSJosef Bacik em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1133e3e0520bSJosef Bacik read_unlock(&em_tree->lock); 1134e3e0520bSJosef Bacik ASSERT(em && em->start == chunk_offset); 1135e3e0520bSJosef Bacik 1136e3e0520bSJosef Bacik /* 1137e3e0520bSJosef Bacik * We need to reserve 3 + N units from the metadata space info in order 1138e3e0520bSJosef Bacik * to remove a block group (done at btrfs_remove_chunk() and at 1139e3e0520bSJosef Bacik * btrfs_remove_block_group()), which are used for: 1140e3e0520bSJosef Bacik * 1141e3e0520bSJosef Bacik * 1 unit for adding the free space inode's orphan (located in the tree 1142e3e0520bSJosef Bacik * of tree roots). 1143e3e0520bSJosef Bacik * 1 unit for deleting the block group item (located in the extent 1144e3e0520bSJosef Bacik * tree). 1145e3e0520bSJosef Bacik * 1 unit for deleting the free space item (located in tree of tree 1146e3e0520bSJosef Bacik * roots). 1147e3e0520bSJosef Bacik * N units for deleting N device extent items corresponding to each 1148e3e0520bSJosef Bacik * stripe (located in the device tree). 1149e3e0520bSJosef Bacik * 1150e3e0520bSJosef Bacik * In order to remove a block group we also need to reserve units in the 1151e3e0520bSJosef Bacik * system space info in order to update the chunk tree (update one or 1152e3e0520bSJosef Bacik * more device items and remove one chunk item), but this is done at 1153e3e0520bSJosef Bacik * btrfs_remove_chunk() through a call to check_system_chunk(). 1154e3e0520bSJosef Bacik */ 1155e3e0520bSJosef Bacik map = em->map_lookup; 1156e3e0520bSJosef Bacik num_items = 3 + map->num_stripes; 1157e3e0520bSJosef Bacik free_extent_map(em); 1158e3e0520bSJosef Bacik 1159dfe8aec4SJosef Bacik return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1160e3e0520bSJosef Bacik } 1161e3e0520bSJosef Bacik 1162e3e0520bSJosef Bacik /* 116326ce2095SJosef Bacik * Mark block group @cache read-only, so later write won't happen to block 116426ce2095SJosef Bacik * group @cache. 116526ce2095SJosef Bacik * 116626ce2095SJosef Bacik * If @force is not set, this function will only mark the block group readonly 116726ce2095SJosef Bacik * if we have enough free space (1M) in other metadata/system block groups. 116826ce2095SJosef Bacik * If @force is not set, this function will mark the block group readonly 116926ce2095SJosef Bacik * without checking free space. 117026ce2095SJosef Bacik * 117126ce2095SJosef Bacik * NOTE: This function doesn't care if other block groups can contain all the 117226ce2095SJosef Bacik * data in this block group. That check should be done by relocation routine, 117326ce2095SJosef Bacik * not this function. 117426ce2095SJosef Bacik */ 117532da5386SDavid Sterba static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 117626ce2095SJosef Bacik { 117726ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 117826ce2095SJosef Bacik u64 num_bytes; 117926ce2095SJosef Bacik int ret = -ENOSPC; 118026ce2095SJosef Bacik 118126ce2095SJosef Bacik spin_lock(&sinfo->lock); 118226ce2095SJosef Bacik spin_lock(&cache->lock); 118326ce2095SJosef Bacik 1184195a49eaSFilipe Manana if (cache->swap_extents) { 1185195a49eaSFilipe Manana ret = -ETXTBSY; 1186195a49eaSFilipe Manana goto out; 1187195a49eaSFilipe Manana } 1188195a49eaSFilipe Manana 118926ce2095SJosef Bacik if (cache->ro) { 119026ce2095SJosef Bacik cache->ro++; 119126ce2095SJosef Bacik ret = 0; 119226ce2095SJosef Bacik goto out; 119326ce2095SJosef Bacik } 119426ce2095SJosef Bacik 1195b3470b5dSDavid Sterba num_bytes = cache->length - cache->reserved - cache->pinned - 1196169e0da9SNaohiro Aota cache->bytes_super - cache->zone_unusable - cache->used; 119726ce2095SJosef Bacik 119826ce2095SJosef Bacik /* 1199a30a3d20SJosef Bacik * Data never overcommits, even in mixed mode, so do just the straight 1200a30a3d20SJosef Bacik * check of left over space in how much we have allocated. 1201a30a3d20SJosef Bacik */ 1202a30a3d20SJosef Bacik if (force) { 1203a30a3d20SJosef Bacik ret = 0; 1204a30a3d20SJosef Bacik } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1205a30a3d20SJosef Bacik u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1206a30a3d20SJosef Bacik 1207a30a3d20SJosef Bacik /* 120826ce2095SJosef Bacik * Here we make sure if we mark this bg RO, we still have enough 1209f8935566SJosef Bacik * free space as buffer. 121026ce2095SJosef Bacik */ 1211a30a3d20SJosef Bacik if (sinfo_used + num_bytes <= sinfo->total_bytes) 1212a30a3d20SJosef Bacik ret = 0; 1213a30a3d20SJosef Bacik } else { 1214a30a3d20SJosef Bacik /* 1215a30a3d20SJosef Bacik * We overcommit metadata, so we need to do the 1216a30a3d20SJosef Bacik * btrfs_can_overcommit check here, and we need to pass in 1217a30a3d20SJosef Bacik * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1218a30a3d20SJosef Bacik * leeway to allow us to mark this block group as read only. 1219a30a3d20SJosef Bacik */ 1220a30a3d20SJosef Bacik if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1221a30a3d20SJosef Bacik BTRFS_RESERVE_NO_FLUSH)) 1222a30a3d20SJosef Bacik ret = 0; 1223a30a3d20SJosef Bacik } 1224a30a3d20SJosef Bacik 1225a30a3d20SJosef Bacik if (!ret) { 122626ce2095SJosef Bacik sinfo->bytes_readonly += num_bytes; 1227169e0da9SNaohiro Aota if (btrfs_is_zoned(cache->fs_info)) { 1228169e0da9SNaohiro Aota /* Migrate zone_unusable bytes to readonly */ 1229169e0da9SNaohiro Aota sinfo->bytes_readonly += cache->zone_unusable; 1230169e0da9SNaohiro Aota sinfo->bytes_zone_unusable -= cache->zone_unusable; 1231169e0da9SNaohiro Aota cache->zone_unusable = 0; 1232169e0da9SNaohiro Aota } 123326ce2095SJosef Bacik cache->ro++; 123426ce2095SJosef Bacik list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 123526ce2095SJosef Bacik } 123626ce2095SJosef Bacik out: 123726ce2095SJosef Bacik spin_unlock(&cache->lock); 123826ce2095SJosef Bacik spin_unlock(&sinfo->lock); 123926ce2095SJosef Bacik if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 124026ce2095SJosef Bacik btrfs_info(cache->fs_info, 1241b3470b5dSDavid Sterba "unable to make block group %llu ro", cache->start); 124226ce2095SJosef Bacik btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 124326ce2095SJosef Bacik } 124426ce2095SJosef Bacik return ret; 124526ce2095SJosef Bacik } 124626ce2095SJosef Bacik 1247fe119a6eSNikolay Borisov static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1248fe119a6eSNikolay Borisov struct btrfs_block_group *bg) 124945bb5d6aSNikolay Borisov { 125045bb5d6aSNikolay Borisov struct btrfs_fs_info *fs_info = bg->fs_info; 1251fe119a6eSNikolay Borisov struct btrfs_transaction *prev_trans = NULL; 125245bb5d6aSNikolay Borisov const u64 start = bg->start; 125345bb5d6aSNikolay Borisov const u64 end = start + bg->length - 1; 125445bb5d6aSNikolay Borisov int ret; 125545bb5d6aSNikolay Borisov 1256fe119a6eSNikolay Borisov spin_lock(&fs_info->trans_lock); 1257fe119a6eSNikolay Borisov if (trans->transaction->list.prev != &fs_info->trans_list) { 1258fe119a6eSNikolay Borisov prev_trans = list_last_entry(&trans->transaction->list, 1259fe119a6eSNikolay Borisov struct btrfs_transaction, list); 1260fe119a6eSNikolay Borisov refcount_inc(&prev_trans->use_count); 1261fe119a6eSNikolay Borisov } 1262fe119a6eSNikolay Borisov spin_unlock(&fs_info->trans_lock); 1263fe119a6eSNikolay Borisov 126445bb5d6aSNikolay Borisov /* 126545bb5d6aSNikolay Borisov * Hold the unused_bg_unpin_mutex lock to avoid racing with 126645bb5d6aSNikolay Borisov * btrfs_finish_extent_commit(). If we are at transaction N, another 126745bb5d6aSNikolay Borisov * task might be running finish_extent_commit() for the previous 126845bb5d6aSNikolay Borisov * transaction N - 1, and have seen a range belonging to the block 1269fe119a6eSNikolay Borisov * group in pinned_extents before we were able to clear the whole block 1270fe119a6eSNikolay Borisov * group range from pinned_extents. This means that task can lookup for 1271fe119a6eSNikolay Borisov * the block group after we unpinned it from pinned_extents and removed 1272fe119a6eSNikolay Borisov * it, leading to a BUG_ON() at unpin_extent_range(). 127345bb5d6aSNikolay Borisov */ 127445bb5d6aSNikolay Borisov mutex_lock(&fs_info->unused_bg_unpin_mutex); 1275fe119a6eSNikolay Borisov if (prev_trans) { 1276fe119a6eSNikolay Borisov ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 127745bb5d6aSNikolay Borisov EXTENT_DIRTY); 127845bb5d6aSNikolay Borisov if (ret) 1279534cf531SFilipe Manana goto out; 1280fe119a6eSNikolay Borisov } 128145bb5d6aSNikolay Borisov 1282fe119a6eSNikolay Borisov ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 128345bb5d6aSNikolay Borisov EXTENT_DIRTY); 1284534cf531SFilipe Manana out: 128545bb5d6aSNikolay Borisov mutex_unlock(&fs_info->unused_bg_unpin_mutex); 12865150bf19SFilipe Manana if (prev_trans) 12875150bf19SFilipe Manana btrfs_put_transaction(prev_trans); 128845bb5d6aSNikolay Borisov 1289534cf531SFilipe Manana return ret == 0; 129045bb5d6aSNikolay Borisov } 129145bb5d6aSNikolay Borisov 129226ce2095SJosef Bacik /* 1293e3e0520bSJosef Bacik * Process the unused_bgs list and remove any that don't have any allocated 1294e3e0520bSJosef Bacik * space inside of them. 1295e3e0520bSJosef Bacik */ 1296e3e0520bSJosef Bacik void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1297e3e0520bSJosef Bacik { 129832da5386SDavid Sterba struct btrfs_block_group *block_group; 1299e3e0520bSJosef Bacik struct btrfs_space_info *space_info; 1300e3e0520bSJosef Bacik struct btrfs_trans_handle *trans; 13016e80d4f8SDennis Zhou const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1302e3e0520bSJosef Bacik int ret = 0; 1303e3e0520bSJosef Bacik 1304e3e0520bSJosef Bacik if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1305e3e0520bSJosef Bacik return; 1306e3e0520bSJosef Bacik 1307*2f12741fSJosef Bacik if (btrfs_fs_closing(fs_info)) 1308*2f12741fSJosef Bacik return; 1309*2f12741fSJosef Bacik 1310ddfd08cbSJosef Bacik /* 1311ddfd08cbSJosef Bacik * Long running balances can keep us blocked here for eternity, so 1312ddfd08cbSJosef Bacik * simply skip deletion if we're unable to get the mutex. 1313ddfd08cbSJosef Bacik */ 1314f3372065SJohannes Thumshirn if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1315ddfd08cbSJosef Bacik return; 1316ddfd08cbSJosef Bacik 1317e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1318e3e0520bSJosef Bacik while (!list_empty(&fs_info->unused_bgs)) { 1319e3e0520bSJosef Bacik int trimming; 1320e3e0520bSJosef Bacik 1321e3e0520bSJosef Bacik block_group = list_first_entry(&fs_info->unused_bgs, 132232da5386SDavid Sterba struct btrfs_block_group, 1323e3e0520bSJosef Bacik bg_list); 1324e3e0520bSJosef Bacik list_del_init(&block_group->bg_list); 1325e3e0520bSJosef Bacik 1326e3e0520bSJosef Bacik space_info = block_group->space_info; 1327e3e0520bSJosef Bacik 1328e3e0520bSJosef Bacik if (ret || btrfs_mixed_space_info(space_info)) { 1329e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1330e3e0520bSJosef Bacik continue; 1331e3e0520bSJosef Bacik } 1332e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1333e3e0520bSJosef Bacik 1334b0643e59SDennis Zhou btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1335b0643e59SDennis Zhou 1336e3e0520bSJosef Bacik /* Don't want to race with allocators so take the groups_sem */ 1337e3e0520bSJosef Bacik down_write(&space_info->groups_sem); 13386e80d4f8SDennis Zhou 13396e80d4f8SDennis Zhou /* 13406e80d4f8SDennis Zhou * Async discard moves the final block group discard to be prior 13416e80d4f8SDennis Zhou * to the unused_bgs code path. Therefore, if it's not fully 13426e80d4f8SDennis Zhou * trimmed, punt it back to the async discard lists. 13436e80d4f8SDennis Zhou */ 13446e80d4f8SDennis Zhou if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 13456e80d4f8SDennis Zhou !btrfs_is_free_space_trimmed(block_group)) { 13466e80d4f8SDennis Zhou trace_btrfs_skip_unused_block_group(block_group); 13476e80d4f8SDennis Zhou up_write(&space_info->groups_sem); 13486e80d4f8SDennis Zhou /* Requeue if we failed because of async discard */ 13496e80d4f8SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 13506e80d4f8SDennis Zhou block_group); 13516e80d4f8SDennis Zhou goto next; 13526e80d4f8SDennis Zhou } 13536e80d4f8SDennis Zhou 1354e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1355e3e0520bSJosef Bacik if (block_group->reserved || block_group->pinned || 1356bf38be65SDavid Sterba block_group->used || block_group->ro || 1357e3e0520bSJosef Bacik list_is_singular(&block_group->list)) { 1358e3e0520bSJosef Bacik /* 1359e3e0520bSJosef Bacik * We want to bail if we made new allocations or have 1360e3e0520bSJosef Bacik * outstanding allocations in this block group. We do 1361e3e0520bSJosef Bacik * the ro check in case balance is currently acting on 1362e3e0520bSJosef Bacik * this block group. 1363e3e0520bSJosef Bacik */ 1364e3e0520bSJosef Bacik trace_btrfs_skip_unused_block_group(block_group); 1365e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1366e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1367e3e0520bSJosef Bacik goto next; 1368e3e0520bSJosef Bacik } 1369e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1370e3e0520bSJosef Bacik 1371e3e0520bSJosef Bacik /* We don't want to force the issue, only flip if it's ok. */ 1372e11c0406SJosef Bacik ret = inc_block_group_ro(block_group, 0); 1373e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1374e3e0520bSJosef Bacik if (ret < 0) { 1375e3e0520bSJosef Bacik ret = 0; 1376e3e0520bSJosef Bacik goto next; 1377e3e0520bSJosef Bacik } 1378e3e0520bSJosef Bacik 137974e91b12SNaohiro Aota ret = btrfs_zone_finish(block_group); 138074e91b12SNaohiro Aota if (ret < 0) { 138174e91b12SNaohiro Aota btrfs_dec_block_group_ro(block_group); 138274e91b12SNaohiro Aota if (ret == -EAGAIN) 138374e91b12SNaohiro Aota ret = 0; 138474e91b12SNaohiro Aota goto next; 138574e91b12SNaohiro Aota } 138674e91b12SNaohiro Aota 1387e3e0520bSJosef Bacik /* 1388e3e0520bSJosef Bacik * Want to do this before we do anything else so we can recover 1389e3e0520bSJosef Bacik * properly if we fail to join the transaction. 1390e3e0520bSJosef Bacik */ 1391e3e0520bSJosef Bacik trans = btrfs_start_trans_remove_block_group(fs_info, 1392b3470b5dSDavid Sterba block_group->start); 1393e3e0520bSJosef Bacik if (IS_ERR(trans)) { 1394e3e0520bSJosef Bacik btrfs_dec_block_group_ro(block_group); 1395e3e0520bSJosef Bacik ret = PTR_ERR(trans); 1396e3e0520bSJosef Bacik goto next; 1397e3e0520bSJosef Bacik } 1398e3e0520bSJosef Bacik 1399e3e0520bSJosef Bacik /* 1400e3e0520bSJosef Bacik * We could have pending pinned extents for this block group, 1401e3e0520bSJosef Bacik * just delete them, we don't care about them anymore. 1402e3e0520bSJosef Bacik */ 1403534cf531SFilipe Manana if (!clean_pinned_extents(trans, block_group)) { 1404534cf531SFilipe Manana btrfs_dec_block_group_ro(block_group); 1405e3e0520bSJosef Bacik goto end_trans; 1406534cf531SFilipe Manana } 1407e3e0520bSJosef Bacik 1408b0643e59SDennis Zhou /* 1409b0643e59SDennis Zhou * At this point, the block_group is read only and should fail 1410b0643e59SDennis Zhou * new allocations. However, btrfs_finish_extent_commit() can 1411b0643e59SDennis Zhou * cause this block_group to be placed back on the discard 1412b0643e59SDennis Zhou * lists because now the block_group isn't fully discarded. 1413b0643e59SDennis Zhou * Bail here and try again later after discarding everything. 1414b0643e59SDennis Zhou */ 1415b0643e59SDennis Zhou spin_lock(&fs_info->discard_ctl.lock); 1416b0643e59SDennis Zhou if (!list_empty(&block_group->discard_list)) { 1417b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1418b0643e59SDennis Zhou btrfs_dec_block_group_ro(block_group); 1419b0643e59SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 1420b0643e59SDennis Zhou block_group); 1421b0643e59SDennis Zhou goto end_trans; 1422b0643e59SDennis Zhou } 1423b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1424b0643e59SDennis Zhou 1425e3e0520bSJosef Bacik /* Reset pinned so btrfs_put_block_group doesn't complain */ 1426e3e0520bSJosef Bacik spin_lock(&space_info->lock); 1427e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1428e3e0520bSJosef Bacik 1429e3e0520bSJosef Bacik btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1430e3e0520bSJosef Bacik -block_group->pinned); 1431e3e0520bSJosef Bacik space_info->bytes_readonly += block_group->pinned; 1432e3e0520bSJosef Bacik block_group->pinned = 0; 1433e3e0520bSJosef Bacik 1434e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1435e3e0520bSJosef Bacik spin_unlock(&space_info->lock); 1436e3e0520bSJosef Bacik 14376e80d4f8SDennis Zhou /* 14386e80d4f8SDennis Zhou * The normal path here is an unused block group is passed here, 14396e80d4f8SDennis Zhou * then trimming is handled in the transaction commit path. 14406e80d4f8SDennis Zhou * Async discard interposes before this to do the trimming 14416e80d4f8SDennis Zhou * before coming down the unused block group path as trimming 14426e80d4f8SDennis Zhou * will no longer be done later in the transaction commit path. 14436e80d4f8SDennis Zhou */ 14446e80d4f8SDennis Zhou if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 14456e80d4f8SDennis Zhou goto flip_async; 14466e80d4f8SDennis Zhou 1447dcba6e48SNaohiro Aota /* 1448dcba6e48SNaohiro Aota * DISCARD can flip during remount. On zoned filesystems, we 1449dcba6e48SNaohiro Aota * need to reset sequential-required zones. 1450dcba6e48SNaohiro Aota */ 1451dcba6e48SNaohiro Aota trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1452dcba6e48SNaohiro Aota btrfs_is_zoned(fs_info); 1453e3e0520bSJosef Bacik 1454e3e0520bSJosef Bacik /* Implicit trim during transaction commit. */ 1455e3e0520bSJosef Bacik if (trimming) 14566b7304afSFilipe Manana btrfs_freeze_block_group(block_group); 1457e3e0520bSJosef Bacik 1458e3e0520bSJosef Bacik /* 1459e3e0520bSJosef Bacik * Btrfs_remove_chunk will abort the transaction if things go 1460e3e0520bSJosef Bacik * horribly wrong. 1461e3e0520bSJosef Bacik */ 1462b3470b5dSDavid Sterba ret = btrfs_remove_chunk(trans, block_group->start); 1463e3e0520bSJosef Bacik 1464e3e0520bSJosef Bacik if (ret) { 1465e3e0520bSJosef Bacik if (trimming) 14666b7304afSFilipe Manana btrfs_unfreeze_block_group(block_group); 1467e3e0520bSJosef Bacik goto end_trans; 1468e3e0520bSJosef Bacik } 1469e3e0520bSJosef Bacik 1470e3e0520bSJosef Bacik /* 1471e3e0520bSJosef Bacik * If we're not mounted with -odiscard, we can just forget 1472e3e0520bSJosef Bacik * about this block group. Otherwise we'll need to wait 1473e3e0520bSJosef Bacik * until transaction commit to do the actual discard. 1474e3e0520bSJosef Bacik */ 1475e3e0520bSJosef Bacik if (trimming) { 1476e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1477e3e0520bSJosef Bacik /* 1478e3e0520bSJosef Bacik * A concurrent scrub might have added us to the list 1479e3e0520bSJosef Bacik * fs_info->unused_bgs, so use a list_move operation 1480e3e0520bSJosef Bacik * to add the block group to the deleted_bgs list. 1481e3e0520bSJosef Bacik */ 1482e3e0520bSJosef Bacik list_move(&block_group->bg_list, 1483e3e0520bSJosef Bacik &trans->transaction->deleted_bgs); 1484e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1485e3e0520bSJosef Bacik btrfs_get_block_group(block_group); 1486e3e0520bSJosef Bacik } 1487e3e0520bSJosef Bacik end_trans: 1488e3e0520bSJosef Bacik btrfs_end_transaction(trans); 1489e3e0520bSJosef Bacik next: 1490e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1491e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1492e3e0520bSJosef Bacik } 1493e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1494f3372065SJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 14956e80d4f8SDennis Zhou return; 14966e80d4f8SDennis Zhou 14976e80d4f8SDennis Zhou flip_async: 14986e80d4f8SDennis Zhou btrfs_end_transaction(trans); 1499f3372065SJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 15006e80d4f8SDennis Zhou btrfs_put_block_group(block_group); 15016e80d4f8SDennis Zhou btrfs_discard_punt_unused_bgs_list(fs_info); 1502e3e0520bSJosef Bacik } 1503e3e0520bSJosef Bacik 150432da5386SDavid Sterba void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1505e3e0520bSJosef Bacik { 1506e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = bg->fs_info; 1507e3e0520bSJosef Bacik 1508e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1509e3e0520bSJosef Bacik if (list_empty(&bg->bg_list)) { 1510e3e0520bSJosef Bacik btrfs_get_block_group(bg); 1511e3e0520bSJosef Bacik trace_btrfs_add_unused_block_group(bg); 1512e3e0520bSJosef Bacik list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1513e3e0520bSJosef Bacik } 1514e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1515e3e0520bSJosef Bacik } 15164358d963SJosef Bacik 15172ca0ec77SJohannes Thumshirn /* 15182ca0ec77SJohannes Thumshirn * We want block groups with a low number of used bytes to be in the beginning 15192ca0ec77SJohannes Thumshirn * of the list, so they will get reclaimed first. 15202ca0ec77SJohannes Thumshirn */ 15212ca0ec77SJohannes Thumshirn static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 15222ca0ec77SJohannes Thumshirn const struct list_head *b) 15232ca0ec77SJohannes Thumshirn { 15242ca0ec77SJohannes Thumshirn const struct btrfs_block_group *bg1, *bg2; 15252ca0ec77SJohannes Thumshirn 15262ca0ec77SJohannes Thumshirn bg1 = list_entry(a, struct btrfs_block_group, bg_list); 15272ca0ec77SJohannes Thumshirn bg2 = list_entry(b, struct btrfs_block_group, bg_list); 15282ca0ec77SJohannes Thumshirn 15292ca0ec77SJohannes Thumshirn return bg1->used > bg2->used; 15302ca0ec77SJohannes Thumshirn } 15312ca0ec77SJohannes Thumshirn 15323687fcb0SJohannes Thumshirn static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) 15333687fcb0SJohannes Thumshirn { 15343687fcb0SJohannes Thumshirn if (btrfs_is_zoned(fs_info)) 15353687fcb0SJohannes Thumshirn return btrfs_zoned_should_reclaim(fs_info); 15363687fcb0SJohannes Thumshirn return true; 15373687fcb0SJohannes Thumshirn } 15383687fcb0SJohannes Thumshirn 153918bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs_work(struct work_struct *work) 154018bb8bbfSJohannes Thumshirn { 154118bb8bbfSJohannes Thumshirn struct btrfs_fs_info *fs_info = 154218bb8bbfSJohannes Thumshirn container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 154318bb8bbfSJohannes Thumshirn struct btrfs_block_group *bg; 154418bb8bbfSJohannes Thumshirn struct btrfs_space_info *space_info; 154518bb8bbfSJohannes Thumshirn 154618bb8bbfSJohannes Thumshirn if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 154718bb8bbfSJohannes Thumshirn return; 154818bb8bbfSJohannes Thumshirn 1549*2f12741fSJosef Bacik if (btrfs_fs_closing(fs_info)) 1550*2f12741fSJosef Bacik return; 1551*2f12741fSJosef Bacik 15523687fcb0SJohannes Thumshirn if (!btrfs_should_reclaim(fs_info)) 15533687fcb0SJohannes Thumshirn return; 15543687fcb0SJohannes Thumshirn 1555ca5e4ea0SNaohiro Aota sb_start_write(fs_info->sb); 1556ca5e4ea0SNaohiro Aota 1557ca5e4ea0SNaohiro Aota if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 1558ca5e4ea0SNaohiro Aota sb_end_write(fs_info->sb); 155918bb8bbfSJohannes Thumshirn return; 1560ca5e4ea0SNaohiro Aota } 156118bb8bbfSJohannes Thumshirn 15629cc0b837SJohannes Thumshirn /* 15639cc0b837SJohannes Thumshirn * Long running balances can keep us blocked here for eternity, so 15649cc0b837SJohannes Thumshirn * simply skip reclaim if we're unable to get the mutex. 15659cc0b837SJohannes Thumshirn */ 15669cc0b837SJohannes Thumshirn if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 15679cc0b837SJohannes Thumshirn btrfs_exclop_finish(fs_info); 1568ca5e4ea0SNaohiro Aota sb_end_write(fs_info->sb); 15699cc0b837SJohannes Thumshirn return; 15709cc0b837SJohannes Thumshirn } 15719cc0b837SJohannes Thumshirn 157218bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 15732ca0ec77SJohannes Thumshirn /* 15742ca0ec77SJohannes Thumshirn * Sort happens under lock because we can't simply splice it and sort. 15752ca0ec77SJohannes Thumshirn * The block groups might still be in use and reachable via bg_list, 15762ca0ec77SJohannes Thumshirn * and their presence in the reclaim_bgs list must be preserved. 15772ca0ec77SJohannes Thumshirn */ 15782ca0ec77SJohannes Thumshirn list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 157918bb8bbfSJohannes Thumshirn while (!list_empty(&fs_info->reclaim_bgs)) { 15805f93e776SJohannes Thumshirn u64 zone_unusable; 15811cea5cf0SFilipe Manana int ret = 0; 15821cea5cf0SFilipe Manana 158318bb8bbfSJohannes Thumshirn bg = list_first_entry(&fs_info->reclaim_bgs, 158418bb8bbfSJohannes Thumshirn struct btrfs_block_group, 158518bb8bbfSJohannes Thumshirn bg_list); 158618bb8bbfSJohannes Thumshirn list_del_init(&bg->bg_list); 158718bb8bbfSJohannes Thumshirn 158818bb8bbfSJohannes Thumshirn space_info = bg->space_info; 158918bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 159018bb8bbfSJohannes Thumshirn 159118bb8bbfSJohannes Thumshirn /* Don't race with allocators so take the groups_sem */ 159218bb8bbfSJohannes Thumshirn down_write(&space_info->groups_sem); 159318bb8bbfSJohannes Thumshirn 159418bb8bbfSJohannes Thumshirn spin_lock(&bg->lock); 159518bb8bbfSJohannes Thumshirn if (bg->reserved || bg->pinned || bg->ro) { 159618bb8bbfSJohannes Thumshirn /* 159718bb8bbfSJohannes Thumshirn * We want to bail if we made new allocations or have 159818bb8bbfSJohannes Thumshirn * outstanding allocations in this block group. We do 159918bb8bbfSJohannes Thumshirn * the ro check in case balance is currently acting on 160018bb8bbfSJohannes Thumshirn * this block group. 160118bb8bbfSJohannes Thumshirn */ 160218bb8bbfSJohannes Thumshirn spin_unlock(&bg->lock); 160318bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 160418bb8bbfSJohannes Thumshirn goto next; 160518bb8bbfSJohannes Thumshirn } 160618bb8bbfSJohannes Thumshirn spin_unlock(&bg->lock); 160718bb8bbfSJohannes Thumshirn 160818bb8bbfSJohannes Thumshirn /* Get out fast, in case we're unmounting the filesystem */ 160918bb8bbfSJohannes Thumshirn if (btrfs_fs_closing(fs_info)) { 161018bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 161118bb8bbfSJohannes Thumshirn goto next; 161218bb8bbfSJohannes Thumshirn } 161318bb8bbfSJohannes Thumshirn 16145f93e776SJohannes Thumshirn /* 16155f93e776SJohannes Thumshirn * Cache the zone_unusable value before turning the block group 16165f93e776SJohannes Thumshirn * to read only. As soon as the blog group is read only it's 16175f93e776SJohannes Thumshirn * zone_unusable value gets moved to the block group's read-only 16185f93e776SJohannes Thumshirn * bytes and isn't available for calculations anymore. 16195f93e776SJohannes Thumshirn */ 16205f93e776SJohannes Thumshirn zone_unusable = bg->zone_unusable; 162118bb8bbfSJohannes Thumshirn ret = inc_block_group_ro(bg, 0); 162218bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 162318bb8bbfSJohannes Thumshirn if (ret < 0) 162418bb8bbfSJohannes Thumshirn goto next; 162518bb8bbfSJohannes Thumshirn 16265f93e776SJohannes Thumshirn btrfs_info(fs_info, 16275f93e776SJohannes Thumshirn "reclaiming chunk %llu with %llu%% used %llu%% unusable", 16285f93e776SJohannes Thumshirn bg->start, div_u64(bg->used * 100, bg->length), 16295f93e776SJohannes Thumshirn div64_u64(zone_unusable * 100, bg->length)); 163018bb8bbfSJohannes Thumshirn trace_btrfs_reclaim_block_group(bg); 163118bb8bbfSJohannes Thumshirn ret = btrfs_relocate_chunk(fs_info, bg->start); 163274944c87SJosef Bacik if (ret) { 163374944c87SJosef Bacik btrfs_dec_block_group_ro(bg); 163418bb8bbfSJohannes Thumshirn btrfs_err(fs_info, "error relocating chunk %llu", 163518bb8bbfSJohannes Thumshirn bg->start); 163674944c87SJosef Bacik } 163718bb8bbfSJohannes Thumshirn 163818bb8bbfSJohannes Thumshirn next: 16391cea5cf0SFilipe Manana btrfs_put_block_group(bg); 1640d96b3424SFilipe Manana spin_lock(&fs_info->unused_bgs_lock); 164118bb8bbfSJohannes Thumshirn } 164218bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 164318bb8bbfSJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 164418bb8bbfSJohannes Thumshirn btrfs_exclop_finish(fs_info); 1645ca5e4ea0SNaohiro Aota sb_end_write(fs_info->sb); 164618bb8bbfSJohannes Thumshirn } 164718bb8bbfSJohannes Thumshirn 164818bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 164918bb8bbfSJohannes Thumshirn { 165018bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 165118bb8bbfSJohannes Thumshirn if (!list_empty(&fs_info->reclaim_bgs)) 165218bb8bbfSJohannes Thumshirn queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 165318bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 165418bb8bbfSJohannes Thumshirn } 165518bb8bbfSJohannes Thumshirn 165618bb8bbfSJohannes Thumshirn void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 165718bb8bbfSJohannes Thumshirn { 165818bb8bbfSJohannes Thumshirn struct btrfs_fs_info *fs_info = bg->fs_info; 165918bb8bbfSJohannes Thumshirn 166018bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 166118bb8bbfSJohannes Thumshirn if (list_empty(&bg->bg_list)) { 166218bb8bbfSJohannes Thumshirn btrfs_get_block_group(bg); 166318bb8bbfSJohannes Thumshirn trace_btrfs_add_reclaim_block_group(bg); 166418bb8bbfSJohannes Thumshirn list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); 166518bb8bbfSJohannes Thumshirn } 166618bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 166718bb8bbfSJohannes Thumshirn } 166818bb8bbfSJohannes Thumshirn 1669e3ba67a1SJohannes Thumshirn static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1670e3ba67a1SJohannes Thumshirn struct btrfs_path *path) 1671e3ba67a1SJohannes Thumshirn { 1672e3ba67a1SJohannes Thumshirn struct extent_map_tree *em_tree; 1673e3ba67a1SJohannes Thumshirn struct extent_map *em; 1674e3ba67a1SJohannes Thumshirn struct btrfs_block_group_item bg; 1675e3ba67a1SJohannes Thumshirn struct extent_buffer *leaf; 1676e3ba67a1SJohannes Thumshirn int slot; 1677e3ba67a1SJohannes Thumshirn u64 flags; 1678e3ba67a1SJohannes Thumshirn int ret = 0; 1679e3ba67a1SJohannes Thumshirn 1680e3ba67a1SJohannes Thumshirn slot = path->slots[0]; 1681e3ba67a1SJohannes Thumshirn leaf = path->nodes[0]; 1682e3ba67a1SJohannes Thumshirn 1683e3ba67a1SJohannes Thumshirn em_tree = &fs_info->mapping_tree; 1684e3ba67a1SJohannes Thumshirn read_lock(&em_tree->lock); 1685e3ba67a1SJohannes Thumshirn em = lookup_extent_mapping(em_tree, key->objectid, key->offset); 1686e3ba67a1SJohannes Thumshirn read_unlock(&em_tree->lock); 1687e3ba67a1SJohannes Thumshirn if (!em) { 1688e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1689e3ba67a1SJohannes Thumshirn "logical %llu len %llu found bg but no related chunk", 1690e3ba67a1SJohannes Thumshirn key->objectid, key->offset); 1691e3ba67a1SJohannes Thumshirn return -ENOENT; 1692e3ba67a1SJohannes Thumshirn } 1693e3ba67a1SJohannes Thumshirn 1694e3ba67a1SJohannes Thumshirn if (em->start != key->objectid || em->len != key->offset) { 1695e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1696e3ba67a1SJohannes Thumshirn "block group %llu len %llu mismatch with chunk %llu len %llu", 1697e3ba67a1SJohannes Thumshirn key->objectid, key->offset, em->start, em->len); 1698e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1699e3ba67a1SJohannes Thumshirn goto out_free_em; 1700e3ba67a1SJohannes Thumshirn } 1701e3ba67a1SJohannes Thumshirn 1702e3ba67a1SJohannes Thumshirn read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 1703e3ba67a1SJohannes Thumshirn sizeof(bg)); 1704e3ba67a1SJohannes Thumshirn flags = btrfs_stack_block_group_flags(&bg) & 1705e3ba67a1SJohannes Thumshirn BTRFS_BLOCK_GROUP_TYPE_MASK; 1706e3ba67a1SJohannes Thumshirn 1707e3ba67a1SJohannes Thumshirn if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 1708e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1709e3ba67a1SJohannes Thumshirn "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 1710e3ba67a1SJohannes Thumshirn key->objectid, key->offset, flags, 1711e3ba67a1SJohannes Thumshirn (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); 1712e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1713e3ba67a1SJohannes Thumshirn } 1714e3ba67a1SJohannes Thumshirn 1715e3ba67a1SJohannes Thumshirn out_free_em: 1716e3ba67a1SJohannes Thumshirn free_extent_map(em); 1717e3ba67a1SJohannes Thumshirn return ret; 1718e3ba67a1SJohannes Thumshirn } 1719e3ba67a1SJohannes Thumshirn 17204358d963SJosef Bacik static int find_first_block_group(struct btrfs_fs_info *fs_info, 17214358d963SJosef Bacik struct btrfs_path *path, 17224358d963SJosef Bacik struct btrfs_key *key) 17234358d963SJosef Bacik { 1724dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 1725e3ba67a1SJohannes Thumshirn int ret; 17264358d963SJosef Bacik struct btrfs_key found_key; 17274358d963SJosef Bacik 172836dfbbe2SGabriel Niebler btrfs_for_each_slot(root, key, &found_key, path, ret) { 17294358d963SJosef Bacik if (found_key.objectid >= key->objectid && 17304358d963SJosef Bacik found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 173136dfbbe2SGabriel Niebler return read_bg_from_eb(fs_info, &found_key, path); 1732e3ba67a1SJohannes Thumshirn } 17334358d963SJosef Bacik } 17344358d963SJosef Bacik return ret; 17354358d963SJosef Bacik } 17364358d963SJosef Bacik 17374358d963SJosef Bacik static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 17384358d963SJosef Bacik { 17394358d963SJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 17404358d963SJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 17414358d963SJosef Bacik 17424358d963SJosef Bacik write_seqlock(&fs_info->profiles_lock); 17434358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 17444358d963SJosef Bacik fs_info->avail_data_alloc_bits |= extra_flags; 17454358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 17464358d963SJosef Bacik fs_info->avail_metadata_alloc_bits |= extra_flags; 17474358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 17484358d963SJosef Bacik fs_info->avail_system_alloc_bits |= extra_flags; 17494358d963SJosef Bacik write_sequnlock(&fs_info->profiles_lock); 17504358d963SJosef Bacik } 17514358d963SJosef Bacik 175296a14336SNikolay Borisov /** 17539ee9b979SNikolay Borisov * Map a physical disk address to a list of logical addresses 17549ee9b979SNikolay Borisov * 17559ee9b979SNikolay Borisov * @fs_info: the filesystem 175696a14336SNikolay Borisov * @chunk_start: logical address of block group 1757138082f3SNaohiro Aota * @bdev: physical device to resolve, can be NULL to indicate any device 175896a14336SNikolay Borisov * @physical: physical address to map to logical addresses 175996a14336SNikolay Borisov * @logical: return array of logical addresses which map to @physical 176096a14336SNikolay Borisov * @naddrs: length of @logical 176196a14336SNikolay Borisov * @stripe_len: size of IO stripe for the given block group 176296a14336SNikolay Borisov * 176396a14336SNikolay Borisov * Maps a particular @physical disk address to a list of @logical addresses. 176496a14336SNikolay Borisov * Used primarily to exclude those portions of a block group that contain super 176596a14336SNikolay Borisov * block copies. 176696a14336SNikolay Borisov */ 176796a14336SNikolay Borisov int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 1768138082f3SNaohiro Aota struct block_device *bdev, u64 physical, u64 **logical, 1769138082f3SNaohiro Aota int *naddrs, int *stripe_len) 177096a14336SNikolay Borisov { 177196a14336SNikolay Borisov struct extent_map *em; 177296a14336SNikolay Borisov struct map_lookup *map; 177396a14336SNikolay Borisov u64 *buf; 177496a14336SNikolay Borisov u64 bytenr; 17751776ad17SNikolay Borisov u64 data_stripe_length; 17761776ad17SNikolay Borisov u64 io_stripe_size; 17771776ad17SNikolay Borisov int i, nr = 0; 17781776ad17SNikolay Borisov int ret = 0; 177996a14336SNikolay Borisov 178096a14336SNikolay Borisov em = btrfs_get_chunk_map(fs_info, chunk_start, 1); 178196a14336SNikolay Borisov if (IS_ERR(em)) 178296a14336SNikolay Borisov return -EIO; 178396a14336SNikolay Borisov 178496a14336SNikolay Borisov map = em->map_lookup; 17859e22b925SNikolay Borisov data_stripe_length = em->orig_block_len; 17861776ad17SNikolay Borisov io_stripe_size = map->stripe_len; 1787138082f3SNaohiro Aota chunk_start = em->start; 178896a14336SNikolay Borisov 17899e22b925SNikolay Borisov /* For RAID5/6 adjust to a full IO stripe length */ 17909e22b925SNikolay Borisov if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 17911776ad17SNikolay Borisov io_stripe_size = map->stripe_len * nr_data_stripes(map); 179296a14336SNikolay Borisov 179396a14336SNikolay Borisov buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 17941776ad17SNikolay Borisov if (!buf) { 17951776ad17SNikolay Borisov ret = -ENOMEM; 17961776ad17SNikolay Borisov goto out; 17971776ad17SNikolay Borisov } 179896a14336SNikolay Borisov 179996a14336SNikolay Borisov for (i = 0; i < map->num_stripes; i++) { 18001776ad17SNikolay Borisov bool already_inserted = false; 18011776ad17SNikolay Borisov u64 stripe_nr; 1802138082f3SNaohiro Aota u64 offset; 18031776ad17SNikolay Borisov int j; 18041776ad17SNikolay Borisov 18051776ad17SNikolay Borisov if (!in_range(physical, map->stripes[i].physical, 18061776ad17SNikolay Borisov data_stripe_length)) 180796a14336SNikolay Borisov continue; 180896a14336SNikolay Borisov 1809138082f3SNaohiro Aota if (bdev && map->stripes[i].dev->bdev != bdev) 1810138082f3SNaohiro Aota continue; 1811138082f3SNaohiro Aota 181296a14336SNikolay Borisov stripe_nr = physical - map->stripes[i].physical; 1813138082f3SNaohiro Aota stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset); 181496a14336SNikolay Borisov 1815ac067734SDavid Sterba if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 1816ac067734SDavid Sterba BTRFS_BLOCK_GROUP_RAID10)) { 181796a14336SNikolay Borisov stripe_nr = stripe_nr * map->num_stripes + i; 181896a14336SNikolay Borisov stripe_nr = div_u64(stripe_nr, map->sub_stripes); 181996a14336SNikolay Borisov } 182096a14336SNikolay Borisov /* 182196a14336SNikolay Borisov * The remaining case would be for RAID56, multiply by 182296a14336SNikolay Borisov * nr_data_stripes(). Alternatively, just use rmap_len below 182396a14336SNikolay Borisov * instead of map->stripe_len 182496a14336SNikolay Borisov */ 182596a14336SNikolay Borisov 1826138082f3SNaohiro Aota bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 18271776ad17SNikolay Borisov 18281776ad17SNikolay Borisov /* Ensure we don't add duplicate addresses */ 182996a14336SNikolay Borisov for (j = 0; j < nr; j++) { 18301776ad17SNikolay Borisov if (buf[j] == bytenr) { 18311776ad17SNikolay Borisov already_inserted = true; 183296a14336SNikolay Borisov break; 183396a14336SNikolay Borisov } 183496a14336SNikolay Borisov } 18351776ad17SNikolay Borisov 18361776ad17SNikolay Borisov if (!already_inserted) 18371776ad17SNikolay Borisov buf[nr++] = bytenr; 183896a14336SNikolay Borisov } 183996a14336SNikolay Borisov 184096a14336SNikolay Borisov *logical = buf; 184196a14336SNikolay Borisov *naddrs = nr; 18421776ad17SNikolay Borisov *stripe_len = io_stripe_size; 18431776ad17SNikolay Borisov out: 184496a14336SNikolay Borisov free_extent_map(em); 18451776ad17SNikolay Borisov return ret; 184696a14336SNikolay Borisov } 184796a14336SNikolay Borisov 184832da5386SDavid Sterba static int exclude_super_stripes(struct btrfs_block_group *cache) 18494358d963SJosef Bacik { 18504358d963SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 185112659251SNaohiro Aota const bool zoned = btrfs_is_zoned(fs_info); 18524358d963SJosef Bacik u64 bytenr; 18534358d963SJosef Bacik u64 *logical; 18544358d963SJosef Bacik int stripe_len; 18554358d963SJosef Bacik int i, nr, ret; 18564358d963SJosef Bacik 1857b3470b5dSDavid Sterba if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 1858b3470b5dSDavid Sterba stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 18594358d963SJosef Bacik cache->bytes_super += stripe_len; 1860b3470b5dSDavid Sterba ret = btrfs_add_excluded_extent(fs_info, cache->start, 18614358d963SJosef Bacik stripe_len); 18624358d963SJosef Bacik if (ret) 18634358d963SJosef Bacik return ret; 18644358d963SJosef Bacik } 18654358d963SJosef Bacik 18664358d963SJosef Bacik for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 18674358d963SJosef Bacik bytenr = btrfs_sb_offset(i); 1868138082f3SNaohiro Aota ret = btrfs_rmap_block(fs_info, cache->start, NULL, 18694358d963SJosef Bacik bytenr, &logical, &nr, &stripe_len); 18704358d963SJosef Bacik if (ret) 18714358d963SJosef Bacik return ret; 18724358d963SJosef Bacik 187312659251SNaohiro Aota /* Shouldn't have super stripes in sequential zones */ 187412659251SNaohiro Aota if (zoned && nr) { 187512659251SNaohiro Aota btrfs_err(fs_info, 187612659251SNaohiro Aota "zoned: block group %llu must not contain super block", 187712659251SNaohiro Aota cache->start); 187812659251SNaohiro Aota return -EUCLEAN; 187912659251SNaohiro Aota } 188012659251SNaohiro Aota 18814358d963SJosef Bacik while (nr--) { 188296f9b0f2SNikolay Borisov u64 len = min_t(u64, stripe_len, 188396f9b0f2SNikolay Borisov cache->start + cache->length - logical[nr]); 18844358d963SJosef Bacik 18854358d963SJosef Bacik cache->bytes_super += len; 188696f9b0f2SNikolay Borisov ret = btrfs_add_excluded_extent(fs_info, logical[nr], 188796f9b0f2SNikolay Borisov len); 18884358d963SJosef Bacik if (ret) { 18894358d963SJosef Bacik kfree(logical); 18904358d963SJosef Bacik return ret; 18914358d963SJosef Bacik } 18924358d963SJosef Bacik } 18934358d963SJosef Bacik 18944358d963SJosef Bacik kfree(logical); 18954358d963SJosef Bacik } 18964358d963SJosef Bacik return 0; 18974358d963SJosef Bacik } 18984358d963SJosef Bacik 189932da5386SDavid Sterba static void link_block_group(struct btrfs_block_group *cache) 19004358d963SJosef Bacik { 19014358d963SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 19024358d963SJosef Bacik int index = btrfs_bg_flags_to_raid_index(cache->flags); 19034358d963SJosef Bacik 19044358d963SJosef Bacik down_write(&space_info->groups_sem); 19054358d963SJosef Bacik list_add_tail(&cache->list, &space_info->block_groups[index]); 19064358d963SJosef Bacik up_write(&space_info->groups_sem); 19074358d963SJosef Bacik } 19084358d963SJosef Bacik 190932da5386SDavid Sterba static struct btrfs_block_group *btrfs_create_block_group_cache( 19109afc6649SQu Wenruo struct btrfs_fs_info *fs_info, u64 start) 19114358d963SJosef Bacik { 191232da5386SDavid Sterba struct btrfs_block_group *cache; 19134358d963SJosef Bacik 19144358d963SJosef Bacik cache = kzalloc(sizeof(*cache), GFP_NOFS); 19154358d963SJosef Bacik if (!cache) 19164358d963SJosef Bacik return NULL; 19174358d963SJosef Bacik 19184358d963SJosef Bacik cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 19194358d963SJosef Bacik GFP_NOFS); 19204358d963SJosef Bacik if (!cache->free_space_ctl) { 19214358d963SJosef Bacik kfree(cache); 19224358d963SJosef Bacik return NULL; 19234358d963SJosef Bacik } 19244358d963SJosef Bacik 1925b3470b5dSDavid Sterba cache->start = start; 19264358d963SJosef Bacik 19274358d963SJosef Bacik cache->fs_info = fs_info; 19284358d963SJosef Bacik cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 19294358d963SJosef Bacik 19306e80d4f8SDennis Zhou cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 19316e80d4f8SDennis Zhou 193248aaeebeSJosef Bacik refcount_set(&cache->refs, 1); 19334358d963SJosef Bacik spin_lock_init(&cache->lock); 19344358d963SJosef Bacik init_rwsem(&cache->data_rwsem); 19354358d963SJosef Bacik INIT_LIST_HEAD(&cache->list); 19364358d963SJosef Bacik INIT_LIST_HEAD(&cache->cluster_list); 19374358d963SJosef Bacik INIT_LIST_HEAD(&cache->bg_list); 19384358d963SJosef Bacik INIT_LIST_HEAD(&cache->ro_list); 1939b0643e59SDennis Zhou INIT_LIST_HEAD(&cache->discard_list); 19404358d963SJosef Bacik INIT_LIST_HEAD(&cache->dirty_list); 19414358d963SJosef Bacik INIT_LIST_HEAD(&cache->io_list); 1942afba2bc0SNaohiro Aota INIT_LIST_HEAD(&cache->active_bg_list); 1943cd79909bSJosef Bacik btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 19446b7304afSFilipe Manana atomic_set(&cache->frozen, 0); 19454358d963SJosef Bacik mutex_init(&cache->free_space_lock); 19464358d963SJosef Bacik btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); 19474358d963SJosef Bacik 19484358d963SJosef Bacik return cache; 19494358d963SJosef Bacik } 19504358d963SJosef Bacik 19514358d963SJosef Bacik /* 19524358d963SJosef Bacik * Iterate all chunks and verify that each of them has the corresponding block 19534358d963SJosef Bacik * group 19544358d963SJosef Bacik */ 19554358d963SJosef Bacik static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 19564358d963SJosef Bacik { 19574358d963SJosef Bacik struct extent_map_tree *map_tree = &fs_info->mapping_tree; 19584358d963SJosef Bacik struct extent_map *em; 195932da5386SDavid Sterba struct btrfs_block_group *bg; 19604358d963SJosef Bacik u64 start = 0; 19614358d963SJosef Bacik int ret = 0; 19624358d963SJosef Bacik 19634358d963SJosef Bacik while (1) { 19644358d963SJosef Bacik read_lock(&map_tree->lock); 19654358d963SJosef Bacik /* 19664358d963SJosef Bacik * lookup_extent_mapping will return the first extent map 19674358d963SJosef Bacik * intersecting the range, so setting @len to 1 is enough to 19684358d963SJosef Bacik * get the first chunk. 19694358d963SJosef Bacik */ 19704358d963SJosef Bacik em = lookup_extent_mapping(map_tree, start, 1); 19714358d963SJosef Bacik read_unlock(&map_tree->lock); 19724358d963SJosef Bacik if (!em) 19734358d963SJosef Bacik break; 19744358d963SJosef Bacik 19754358d963SJosef Bacik bg = btrfs_lookup_block_group(fs_info, em->start); 19764358d963SJosef Bacik if (!bg) { 19774358d963SJosef Bacik btrfs_err(fs_info, 19784358d963SJosef Bacik "chunk start=%llu len=%llu doesn't have corresponding block group", 19794358d963SJosef Bacik em->start, em->len); 19804358d963SJosef Bacik ret = -EUCLEAN; 19814358d963SJosef Bacik free_extent_map(em); 19824358d963SJosef Bacik break; 19834358d963SJosef Bacik } 1984b3470b5dSDavid Sterba if (bg->start != em->start || bg->length != em->len || 19854358d963SJosef Bacik (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 19864358d963SJosef Bacik (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 19874358d963SJosef Bacik btrfs_err(fs_info, 19884358d963SJosef Bacik "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 19894358d963SJosef Bacik em->start, em->len, 19904358d963SJosef Bacik em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 1991b3470b5dSDavid Sterba bg->start, bg->length, 19924358d963SJosef Bacik bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 19934358d963SJosef Bacik ret = -EUCLEAN; 19944358d963SJosef Bacik free_extent_map(em); 19954358d963SJosef Bacik btrfs_put_block_group(bg); 19964358d963SJosef Bacik break; 19974358d963SJosef Bacik } 19984358d963SJosef Bacik start = em->start + em->len; 19994358d963SJosef Bacik free_extent_map(em); 20004358d963SJosef Bacik btrfs_put_block_group(bg); 20014358d963SJosef Bacik } 20024358d963SJosef Bacik return ret; 20034358d963SJosef Bacik } 20044358d963SJosef Bacik 2005ffb9e0f0SQu Wenruo static int read_one_block_group(struct btrfs_fs_info *info, 20064afd2fe8SJohannes Thumshirn struct btrfs_block_group_item *bgi, 2007d49a2ddbSQu Wenruo const struct btrfs_key *key, 2008ffb9e0f0SQu Wenruo int need_clear) 2009ffb9e0f0SQu Wenruo { 201032da5386SDavid Sterba struct btrfs_block_group *cache; 2011ffb9e0f0SQu Wenruo struct btrfs_space_info *space_info; 2012ffb9e0f0SQu Wenruo const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2013ffb9e0f0SQu Wenruo int ret; 2014ffb9e0f0SQu Wenruo 2015d49a2ddbSQu Wenruo ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2016ffb9e0f0SQu Wenruo 20179afc6649SQu Wenruo cache = btrfs_create_block_group_cache(info, key->objectid); 2018ffb9e0f0SQu Wenruo if (!cache) 2019ffb9e0f0SQu Wenruo return -ENOMEM; 2020ffb9e0f0SQu Wenruo 20214afd2fe8SJohannes Thumshirn cache->length = key->offset; 20224afd2fe8SJohannes Thumshirn cache->used = btrfs_stack_block_group_used(bgi); 20234afd2fe8SJohannes Thumshirn cache->flags = btrfs_stack_block_group_flags(bgi); 2024f7238e50SJosef Bacik cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); 20259afc6649SQu Wenruo 2026e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 2027e3e39c72SMarcos Paulo de Souza 2028ffb9e0f0SQu Wenruo if (need_clear) { 2029ffb9e0f0SQu Wenruo /* 2030ffb9e0f0SQu Wenruo * When we mount with old space cache, we need to 2031ffb9e0f0SQu Wenruo * set BTRFS_DC_CLEAR and set dirty flag. 2032ffb9e0f0SQu Wenruo * 2033ffb9e0f0SQu Wenruo * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2034ffb9e0f0SQu Wenruo * truncate the old free space cache inode and 2035ffb9e0f0SQu Wenruo * setup a new one. 2036ffb9e0f0SQu Wenruo * b) Setting 'dirty flag' makes sure that we flush 2037ffb9e0f0SQu Wenruo * the new space cache info onto disk. 2038ffb9e0f0SQu Wenruo */ 2039ffb9e0f0SQu Wenruo if (btrfs_test_opt(info, SPACE_CACHE)) 2040ffb9e0f0SQu Wenruo cache->disk_cache_state = BTRFS_DC_CLEAR; 2041ffb9e0f0SQu Wenruo } 2042ffb9e0f0SQu Wenruo if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2043ffb9e0f0SQu Wenruo (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2044ffb9e0f0SQu Wenruo btrfs_err(info, 2045ffb9e0f0SQu Wenruo "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2046ffb9e0f0SQu Wenruo cache->start); 2047ffb9e0f0SQu Wenruo ret = -EINVAL; 2048ffb9e0f0SQu Wenruo goto error; 2049ffb9e0f0SQu Wenruo } 2050ffb9e0f0SQu Wenruo 2051a94794d5SNaohiro Aota ret = btrfs_load_block_group_zone_info(cache, false); 205208e11a3dSNaohiro Aota if (ret) { 205308e11a3dSNaohiro Aota btrfs_err(info, "zoned: failed to load zone info of bg %llu", 205408e11a3dSNaohiro Aota cache->start); 205508e11a3dSNaohiro Aota goto error; 205608e11a3dSNaohiro Aota } 205708e11a3dSNaohiro Aota 2058ffb9e0f0SQu Wenruo /* 2059ffb9e0f0SQu Wenruo * We need to exclude the super stripes now so that the space info has 2060ffb9e0f0SQu Wenruo * super bytes accounted for, otherwise we'll think we have more space 2061ffb9e0f0SQu Wenruo * than we actually do. 2062ffb9e0f0SQu Wenruo */ 2063ffb9e0f0SQu Wenruo ret = exclude_super_stripes(cache); 2064ffb9e0f0SQu Wenruo if (ret) { 2065ffb9e0f0SQu Wenruo /* We may have excluded something, so call this just in case. */ 2066ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2067ffb9e0f0SQu Wenruo goto error; 2068ffb9e0f0SQu Wenruo } 2069ffb9e0f0SQu Wenruo 2070ffb9e0f0SQu Wenruo /* 2071169e0da9SNaohiro Aota * For zoned filesystem, space after the allocation offset is the only 2072169e0da9SNaohiro Aota * free space for a block group. So, we don't need any caching work. 2073169e0da9SNaohiro Aota * btrfs_calc_zone_unusable() will set the amount of free space and 2074169e0da9SNaohiro Aota * zone_unusable space. 2075169e0da9SNaohiro Aota * 2076169e0da9SNaohiro Aota * For regular filesystem, check for two cases, either we are full, and 2077169e0da9SNaohiro Aota * therefore don't need to bother with the caching work since we won't 2078169e0da9SNaohiro Aota * find any space, or we are empty, and we can just add all the space 2079169e0da9SNaohiro Aota * in and be done with it. This saves us _a_lot_ of time, particularly 2080169e0da9SNaohiro Aota * in the full case. 2081ffb9e0f0SQu Wenruo */ 2082169e0da9SNaohiro Aota if (btrfs_is_zoned(info)) { 2083169e0da9SNaohiro Aota btrfs_calc_zone_unusable(cache); 2084c46c4247SNaohiro Aota /* Should not have any excluded extents. Just in case, though. */ 2085c46c4247SNaohiro Aota btrfs_free_excluded_extents(cache); 2086169e0da9SNaohiro Aota } else if (cache->length == cache->used) { 2087ffb9e0f0SQu Wenruo cache->last_byte_to_unpin = (u64)-1; 2088ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 2089ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2090ffb9e0f0SQu Wenruo } else if (cache->used == 0) { 2091ffb9e0f0SQu Wenruo cache->last_byte_to_unpin = (u64)-1; 2092ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 20939afc6649SQu Wenruo add_new_free_space(cache, cache->start, 20949afc6649SQu Wenruo cache->start + cache->length); 2095ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2096ffb9e0f0SQu Wenruo } 2097ffb9e0f0SQu Wenruo 2098ffb9e0f0SQu Wenruo ret = btrfs_add_block_group_cache(info, cache); 2099ffb9e0f0SQu Wenruo if (ret) { 2100ffb9e0f0SQu Wenruo btrfs_remove_free_space_cache(cache); 2101ffb9e0f0SQu Wenruo goto error; 2102ffb9e0f0SQu Wenruo } 2103ffb9e0f0SQu Wenruo trace_btrfs_add_block_group(info, cache, 0); 21049afc6649SQu Wenruo btrfs_update_space_info(info, cache->flags, cache->length, 2105169e0da9SNaohiro Aota cache->used, cache->bytes_super, 21066a921de5SNaohiro Aota cache->zone_unusable, cache->zone_is_active, 21076a921de5SNaohiro Aota &space_info); 2108ffb9e0f0SQu Wenruo 2109ffb9e0f0SQu Wenruo cache->space_info = space_info; 2110ffb9e0f0SQu Wenruo 2111ffb9e0f0SQu Wenruo link_block_group(cache); 2112ffb9e0f0SQu Wenruo 2113ffb9e0f0SQu Wenruo set_avail_alloc_bits(info, cache->flags); 2114a09f23c3SAnand Jain if (btrfs_chunk_writeable(info, cache->start)) { 2115a09f23c3SAnand Jain if (cache->used == 0) { 2116ffb9e0f0SQu Wenruo ASSERT(list_empty(&cache->bg_list)); 21176e80d4f8SDennis Zhou if (btrfs_test_opt(info, DISCARD_ASYNC)) 21186e80d4f8SDennis Zhou btrfs_discard_queue_work(&info->discard_ctl, cache); 21196e80d4f8SDennis Zhou else 2120ffb9e0f0SQu Wenruo btrfs_mark_bg_unused(cache); 2121ffb9e0f0SQu Wenruo } 2122a09f23c3SAnand Jain } else { 2123a09f23c3SAnand Jain inc_block_group_ro(cache, 1); 2124a09f23c3SAnand Jain } 2125a09f23c3SAnand Jain 2126ffb9e0f0SQu Wenruo return 0; 2127ffb9e0f0SQu Wenruo error: 2128ffb9e0f0SQu Wenruo btrfs_put_block_group(cache); 2129ffb9e0f0SQu Wenruo return ret; 2130ffb9e0f0SQu Wenruo } 2131ffb9e0f0SQu Wenruo 213242437a63SJosef Bacik static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 213342437a63SJosef Bacik { 213442437a63SJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 213542437a63SJosef Bacik struct btrfs_space_info *space_info; 213642437a63SJosef Bacik struct rb_node *node; 213742437a63SJosef Bacik int ret = 0; 213842437a63SJosef Bacik 213942437a63SJosef Bacik for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 214042437a63SJosef Bacik struct extent_map *em; 214142437a63SJosef Bacik struct map_lookup *map; 214242437a63SJosef Bacik struct btrfs_block_group *bg; 214342437a63SJosef Bacik 214442437a63SJosef Bacik em = rb_entry(node, struct extent_map, rb_node); 214542437a63SJosef Bacik map = em->map_lookup; 214642437a63SJosef Bacik bg = btrfs_create_block_group_cache(fs_info, em->start); 214742437a63SJosef Bacik if (!bg) { 214842437a63SJosef Bacik ret = -ENOMEM; 214942437a63SJosef Bacik break; 215042437a63SJosef Bacik } 215142437a63SJosef Bacik 215242437a63SJosef Bacik /* Fill dummy cache as FULL */ 215342437a63SJosef Bacik bg->length = em->len; 215442437a63SJosef Bacik bg->flags = map->type; 215542437a63SJosef Bacik bg->last_byte_to_unpin = (u64)-1; 215642437a63SJosef Bacik bg->cached = BTRFS_CACHE_FINISHED; 215742437a63SJosef Bacik bg->used = em->len; 215842437a63SJosef Bacik bg->flags = map->type; 215942437a63SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, bg); 21602b29726cSQu Wenruo /* 21612b29726cSQu Wenruo * We may have some valid block group cache added already, in 21622b29726cSQu Wenruo * that case we skip to the next one. 21632b29726cSQu Wenruo */ 21642b29726cSQu Wenruo if (ret == -EEXIST) { 21652b29726cSQu Wenruo ret = 0; 21662b29726cSQu Wenruo btrfs_put_block_group(bg); 21672b29726cSQu Wenruo continue; 21682b29726cSQu Wenruo } 21692b29726cSQu Wenruo 217042437a63SJosef Bacik if (ret) { 217142437a63SJosef Bacik btrfs_remove_free_space_cache(bg); 217242437a63SJosef Bacik btrfs_put_block_group(bg); 217342437a63SJosef Bacik break; 217442437a63SJosef Bacik } 21752b29726cSQu Wenruo 217642437a63SJosef Bacik btrfs_update_space_info(fs_info, bg->flags, em->len, em->len, 21776a921de5SNaohiro Aota 0, 0, false, &space_info); 217842437a63SJosef Bacik bg->space_info = space_info; 217942437a63SJosef Bacik link_block_group(bg); 218042437a63SJosef Bacik 218142437a63SJosef Bacik set_avail_alloc_bits(fs_info, bg->flags); 218242437a63SJosef Bacik } 218342437a63SJosef Bacik if (!ret) 218442437a63SJosef Bacik btrfs_init_global_block_rsv(fs_info); 218542437a63SJosef Bacik return ret; 218642437a63SJosef Bacik } 218742437a63SJosef Bacik 21884358d963SJosef Bacik int btrfs_read_block_groups(struct btrfs_fs_info *info) 21894358d963SJosef Bacik { 2190dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(info); 21914358d963SJosef Bacik struct btrfs_path *path; 21924358d963SJosef Bacik int ret; 219332da5386SDavid Sterba struct btrfs_block_group *cache; 21944358d963SJosef Bacik struct btrfs_space_info *space_info; 21954358d963SJosef Bacik struct btrfs_key key; 21964358d963SJosef Bacik int need_clear = 0; 21974358d963SJosef Bacik u64 cache_gen; 21984358d963SJosef Bacik 2199dfe8aec4SJosef Bacik if (!root) 220042437a63SJosef Bacik return fill_dummy_bgs(info); 220142437a63SJosef Bacik 22024358d963SJosef Bacik key.objectid = 0; 22034358d963SJosef Bacik key.offset = 0; 22044358d963SJosef Bacik key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 22054358d963SJosef Bacik path = btrfs_alloc_path(); 22064358d963SJosef Bacik if (!path) 22074358d963SJosef Bacik return -ENOMEM; 22084358d963SJosef Bacik 22094358d963SJosef Bacik cache_gen = btrfs_super_cache_generation(info->super_copy); 22104358d963SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 22114358d963SJosef Bacik btrfs_super_generation(info->super_copy) != cache_gen) 22124358d963SJosef Bacik need_clear = 1; 22134358d963SJosef Bacik if (btrfs_test_opt(info, CLEAR_CACHE)) 22144358d963SJosef Bacik need_clear = 1; 22154358d963SJosef Bacik 22164358d963SJosef Bacik while (1) { 22174afd2fe8SJohannes Thumshirn struct btrfs_block_group_item bgi; 22184afd2fe8SJohannes Thumshirn struct extent_buffer *leaf; 22194afd2fe8SJohannes Thumshirn int slot; 22204afd2fe8SJohannes Thumshirn 22214358d963SJosef Bacik ret = find_first_block_group(info, path, &key); 22224358d963SJosef Bacik if (ret > 0) 22234358d963SJosef Bacik break; 22244358d963SJosef Bacik if (ret != 0) 22254358d963SJosef Bacik goto error; 22264358d963SJosef Bacik 22274afd2fe8SJohannes Thumshirn leaf = path->nodes[0]; 22284afd2fe8SJohannes Thumshirn slot = path->slots[0]; 22294afd2fe8SJohannes Thumshirn 22304afd2fe8SJohannes Thumshirn read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 22314afd2fe8SJohannes Thumshirn sizeof(bgi)); 22324afd2fe8SJohannes Thumshirn 22334afd2fe8SJohannes Thumshirn btrfs_item_key_to_cpu(leaf, &key, slot); 22344afd2fe8SJohannes Thumshirn btrfs_release_path(path); 22354afd2fe8SJohannes Thumshirn ret = read_one_block_group(info, &bgi, &key, need_clear); 2236ffb9e0f0SQu Wenruo if (ret < 0) 22374358d963SJosef Bacik goto error; 2238ffb9e0f0SQu Wenruo key.objectid += key.offset; 2239ffb9e0f0SQu Wenruo key.offset = 0; 22404358d963SJosef Bacik } 22417837fa88SJosef Bacik btrfs_release_path(path); 22424358d963SJosef Bacik 224372804905SJosef Bacik list_for_each_entry(space_info, &info->space_info, list) { 224449ea112dSJosef Bacik int i; 224549ea112dSJosef Bacik 224649ea112dSJosef Bacik for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 224749ea112dSJosef Bacik if (list_empty(&space_info->block_groups[i])) 224849ea112dSJosef Bacik continue; 224949ea112dSJosef Bacik cache = list_first_entry(&space_info->block_groups[i], 225049ea112dSJosef Bacik struct btrfs_block_group, 225149ea112dSJosef Bacik list); 225249ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(cache); 225349ea112dSJosef Bacik } 225449ea112dSJosef Bacik 22554358d963SJosef Bacik if (!(btrfs_get_alloc_profile(info, space_info->flags) & 22564358d963SJosef Bacik (BTRFS_BLOCK_GROUP_RAID10 | 22574358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID1_MASK | 22584358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID56_MASK | 22594358d963SJosef Bacik BTRFS_BLOCK_GROUP_DUP))) 22604358d963SJosef Bacik continue; 22614358d963SJosef Bacik /* 22624358d963SJosef Bacik * Avoid allocating from un-mirrored block group if there are 22634358d963SJosef Bacik * mirrored block groups. 22644358d963SJosef Bacik */ 22654358d963SJosef Bacik list_for_each_entry(cache, 22664358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_RAID0], 22674358d963SJosef Bacik list) 2268e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 22694358d963SJosef Bacik list_for_each_entry(cache, 22704358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_SINGLE], 22714358d963SJosef Bacik list) 2272e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 22734358d963SJosef Bacik } 22744358d963SJosef Bacik 22754358d963SJosef Bacik btrfs_init_global_block_rsv(info); 22764358d963SJosef Bacik ret = check_chunk_block_group_mappings(info); 22774358d963SJosef Bacik error: 22784358d963SJosef Bacik btrfs_free_path(path); 22792b29726cSQu Wenruo /* 22802b29726cSQu Wenruo * We've hit some error while reading the extent tree, and have 22812b29726cSQu Wenruo * rescue=ibadroots mount option. 22822b29726cSQu Wenruo * Try to fill the tree using dummy block groups so that the user can 22832b29726cSQu Wenruo * continue to mount and grab their data. 22842b29726cSQu Wenruo */ 22852b29726cSQu Wenruo if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 22862b29726cSQu Wenruo ret = fill_dummy_bgs(info); 22874358d963SJosef Bacik return ret; 22884358d963SJosef Bacik } 22894358d963SJosef Bacik 229079bd3712SFilipe Manana /* 229179bd3712SFilipe Manana * This function, insert_block_group_item(), belongs to the phase 2 of chunk 229279bd3712SFilipe Manana * allocation. 229379bd3712SFilipe Manana * 229479bd3712SFilipe Manana * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 229579bd3712SFilipe Manana * phases. 229679bd3712SFilipe Manana */ 229797f4728aSQu Wenruo static int insert_block_group_item(struct btrfs_trans_handle *trans, 229897f4728aSQu Wenruo struct btrfs_block_group *block_group) 229997f4728aSQu Wenruo { 230097f4728aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 230197f4728aSQu Wenruo struct btrfs_block_group_item bgi; 2302dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 230397f4728aSQu Wenruo struct btrfs_key key; 230497f4728aSQu Wenruo 230597f4728aSQu Wenruo spin_lock(&block_group->lock); 230697f4728aSQu Wenruo btrfs_set_stack_block_group_used(&bgi, block_group->used); 230797f4728aSQu Wenruo btrfs_set_stack_block_group_chunk_objectid(&bgi, 2308f7238e50SJosef Bacik block_group->global_root_id); 230997f4728aSQu Wenruo btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 231097f4728aSQu Wenruo key.objectid = block_group->start; 231197f4728aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 231297f4728aSQu Wenruo key.offset = block_group->length; 231397f4728aSQu Wenruo spin_unlock(&block_group->lock); 231497f4728aSQu Wenruo 231597f4728aSQu Wenruo return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 231697f4728aSQu Wenruo } 231797f4728aSQu Wenruo 23182eadb9e7SNikolay Borisov static int insert_dev_extent(struct btrfs_trans_handle *trans, 23192eadb9e7SNikolay Borisov struct btrfs_device *device, u64 chunk_offset, 23202eadb9e7SNikolay Borisov u64 start, u64 num_bytes) 23212eadb9e7SNikolay Borisov { 23222eadb9e7SNikolay Borisov struct btrfs_fs_info *fs_info = device->fs_info; 23232eadb9e7SNikolay Borisov struct btrfs_root *root = fs_info->dev_root; 23242eadb9e7SNikolay Borisov struct btrfs_path *path; 23252eadb9e7SNikolay Borisov struct btrfs_dev_extent *extent; 23262eadb9e7SNikolay Borisov struct extent_buffer *leaf; 23272eadb9e7SNikolay Borisov struct btrfs_key key; 23282eadb9e7SNikolay Borisov int ret; 23292eadb9e7SNikolay Borisov 23302eadb9e7SNikolay Borisov WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 23312eadb9e7SNikolay Borisov WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 23322eadb9e7SNikolay Borisov path = btrfs_alloc_path(); 23332eadb9e7SNikolay Borisov if (!path) 23342eadb9e7SNikolay Borisov return -ENOMEM; 23352eadb9e7SNikolay Borisov 23362eadb9e7SNikolay Borisov key.objectid = device->devid; 23372eadb9e7SNikolay Borisov key.type = BTRFS_DEV_EXTENT_KEY; 23382eadb9e7SNikolay Borisov key.offset = start; 23392eadb9e7SNikolay Borisov ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 23402eadb9e7SNikolay Borisov if (ret) 23412eadb9e7SNikolay Borisov goto out; 23422eadb9e7SNikolay Borisov 23432eadb9e7SNikolay Borisov leaf = path->nodes[0]; 23442eadb9e7SNikolay Borisov extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 23452eadb9e7SNikolay Borisov btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 23462eadb9e7SNikolay Borisov btrfs_set_dev_extent_chunk_objectid(leaf, extent, 23472eadb9e7SNikolay Borisov BTRFS_FIRST_CHUNK_TREE_OBJECTID); 23482eadb9e7SNikolay Borisov btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 23492eadb9e7SNikolay Borisov 23502eadb9e7SNikolay Borisov btrfs_set_dev_extent_length(leaf, extent, num_bytes); 23512eadb9e7SNikolay Borisov btrfs_mark_buffer_dirty(leaf); 23522eadb9e7SNikolay Borisov out: 23532eadb9e7SNikolay Borisov btrfs_free_path(path); 23542eadb9e7SNikolay Borisov return ret; 23552eadb9e7SNikolay Borisov } 23562eadb9e7SNikolay Borisov 23572eadb9e7SNikolay Borisov /* 23582eadb9e7SNikolay Borisov * This function belongs to phase 2. 23592eadb9e7SNikolay Borisov * 23602eadb9e7SNikolay Borisov * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 23612eadb9e7SNikolay Borisov * phases. 23622eadb9e7SNikolay Borisov */ 23632eadb9e7SNikolay Borisov static int insert_dev_extents(struct btrfs_trans_handle *trans, 23642eadb9e7SNikolay Borisov u64 chunk_offset, u64 chunk_size) 23652eadb9e7SNikolay Borisov { 23662eadb9e7SNikolay Borisov struct btrfs_fs_info *fs_info = trans->fs_info; 23672eadb9e7SNikolay Borisov struct btrfs_device *device; 23682eadb9e7SNikolay Borisov struct extent_map *em; 23692eadb9e7SNikolay Borisov struct map_lookup *map; 23702eadb9e7SNikolay Borisov u64 dev_offset; 23712eadb9e7SNikolay Borisov u64 stripe_size; 23722eadb9e7SNikolay Borisov int i; 23732eadb9e7SNikolay Borisov int ret = 0; 23742eadb9e7SNikolay Borisov 23752eadb9e7SNikolay Borisov em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 23762eadb9e7SNikolay Borisov if (IS_ERR(em)) 23772eadb9e7SNikolay Borisov return PTR_ERR(em); 23782eadb9e7SNikolay Borisov 23792eadb9e7SNikolay Borisov map = em->map_lookup; 23802eadb9e7SNikolay Borisov stripe_size = em->orig_block_len; 23812eadb9e7SNikolay Borisov 23822eadb9e7SNikolay Borisov /* 23832eadb9e7SNikolay Borisov * Take the device list mutex to prevent races with the final phase of 23842eadb9e7SNikolay Borisov * a device replace operation that replaces the device object associated 23852eadb9e7SNikolay Borisov * with the map's stripes, because the device object's id can change 23862eadb9e7SNikolay Borisov * at any time during that final phase of the device replace operation 23872eadb9e7SNikolay Borisov * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 23882eadb9e7SNikolay Borisov * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 23892eadb9e7SNikolay Borisov * resulting in persisting a device extent item with such ID. 23902eadb9e7SNikolay Borisov */ 23912eadb9e7SNikolay Borisov mutex_lock(&fs_info->fs_devices->device_list_mutex); 23922eadb9e7SNikolay Borisov for (i = 0; i < map->num_stripes; i++) { 23932eadb9e7SNikolay Borisov device = map->stripes[i].dev; 23942eadb9e7SNikolay Borisov dev_offset = map->stripes[i].physical; 23952eadb9e7SNikolay Borisov 23962eadb9e7SNikolay Borisov ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 23972eadb9e7SNikolay Borisov stripe_size); 23982eadb9e7SNikolay Borisov if (ret) 23992eadb9e7SNikolay Borisov break; 24002eadb9e7SNikolay Borisov } 24012eadb9e7SNikolay Borisov mutex_unlock(&fs_info->fs_devices->device_list_mutex); 24022eadb9e7SNikolay Borisov 24032eadb9e7SNikolay Borisov free_extent_map(em); 24042eadb9e7SNikolay Borisov return ret; 24052eadb9e7SNikolay Borisov } 24062eadb9e7SNikolay Borisov 240779bd3712SFilipe Manana /* 240879bd3712SFilipe Manana * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 240979bd3712SFilipe Manana * chunk allocation. 241079bd3712SFilipe Manana * 241179bd3712SFilipe Manana * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 241279bd3712SFilipe Manana * phases. 241379bd3712SFilipe Manana */ 24144358d963SJosef Bacik void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 24154358d963SJosef Bacik { 24164358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 241732da5386SDavid Sterba struct btrfs_block_group *block_group; 24184358d963SJosef Bacik int ret = 0; 24194358d963SJosef Bacik 24204358d963SJosef Bacik while (!list_empty(&trans->new_bgs)) { 242149ea112dSJosef Bacik int index; 242249ea112dSJosef Bacik 24234358d963SJosef Bacik block_group = list_first_entry(&trans->new_bgs, 242432da5386SDavid Sterba struct btrfs_block_group, 24254358d963SJosef Bacik bg_list); 24264358d963SJosef Bacik if (ret) 24274358d963SJosef Bacik goto next; 24284358d963SJosef Bacik 242949ea112dSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 243049ea112dSJosef Bacik 243197f4728aSQu Wenruo ret = insert_block_group_item(trans, block_group); 24324358d963SJosef Bacik if (ret) 24334358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 243479bd3712SFilipe Manana if (!block_group->chunk_item_inserted) { 243579bd3712SFilipe Manana mutex_lock(&fs_info->chunk_mutex); 243679bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 243779bd3712SFilipe Manana mutex_unlock(&fs_info->chunk_mutex); 243879bd3712SFilipe Manana if (ret) 243979bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 244079bd3712SFilipe Manana } 24412eadb9e7SNikolay Borisov ret = insert_dev_extents(trans, block_group->start, 244297f4728aSQu Wenruo block_group->length); 24434358d963SJosef Bacik if (ret) 24444358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 24454358d963SJosef Bacik add_block_group_free_space(trans, block_group); 244649ea112dSJosef Bacik 244749ea112dSJosef Bacik /* 244849ea112dSJosef Bacik * If we restriped during balance, we may have added a new raid 244949ea112dSJosef Bacik * type, so now add the sysfs entries when it is safe to do so. 245049ea112dSJosef Bacik * We don't have to worry about locking here as it's handled in 245149ea112dSJosef Bacik * btrfs_sysfs_add_block_group_type. 245249ea112dSJosef Bacik */ 245349ea112dSJosef Bacik if (block_group->space_info->block_group_kobjs[index] == NULL) 245449ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(block_group); 245549ea112dSJosef Bacik 24564358d963SJosef Bacik /* Already aborted the transaction if it failed. */ 24574358d963SJosef Bacik next: 24584358d963SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 24594358d963SJosef Bacik list_del_init(&block_group->bg_list); 24604358d963SJosef Bacik } 24614358d963SJosef Bacik btrfs_trans_release_chunk_metadata(trans); 24624358d963SJosef Bacik } 24634358d963SJosef Bacik 2464f7238e50SJosef Bacik /* 2465f7238e50SJosef Bacik * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2466f7238e50SJosef Bacik * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2467f7238e50SJosef Bacik */ 2468f7238e50SJosef Bacik static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) 2469f7238e50SJosef Bacik { 2470f7238e50SJosef Bacik u64 div = SZ_1G; 2471f7238e50SJosef Bacik u64 index; 2472f7238e50SJosef Bacik 2473f7238e50SJosef Bacik if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2474f7238e50SJosef Bacik return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2475f7238e50SJosef Bacik 2476f7238e50SJosef Bacik /* If we have a smaller fs index based on 128MiB. */ 2477f7238e50SJosef Bacik if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2478f7238e50SJosef Bacik div = SZ_128M; 2479f7238e50SJosef Bacik 2480f7238e50SJosef Bacik offset = div64_u64(offset, div); 2481f7238e50SJosef Bacik div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2482f7238e50SJosef Bacik return index; 2483f7238e50SJosef Bacik } 2484f7238e50SJosef Bacik 248579bd3712SFilipe Manana struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 248679bd3712SFilipe Manana u64 bytes_used, u64 type, 248779bd3712SFilipe Manana u64 chunk_offset, u64 size) 24884358d963SJosef Bacik { 24894358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 249032da5386SDavid Sterba struct btrfs_block_group *cache; 24914358d963SJosef Bacik int ret; 24924358d963SJosef Bacik 24934358d963SJosef Bacik btrfs_set_log_full_commit(trans); 24944358d963SJosef Bacik 24959afc6649SQu Wenruo cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 24964358d963SJosef Bacik if (!cache) 249779bd3712SFilipe Manana return ERR_PTR(-ENOMEM); 24984358d963SJosef Bacik 24999afc6649SQu Wenruo cache->length = size; 2500e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 2501bf38be65SDavid Sterba cache->used = bytes_used; 25024358d963SJosef Bacik cache->flags = type; 25034358d963SJosef Bacik cache->last_byte_to_unpin = (u64)-1; 25044358d963SJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 2505f7238e50SJosef Bacik cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2506f7238e50SJosef Bacik 2507997e3e2eSBoris Burkov if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 25084358d963SJosef Bacik cache->needs_free_space = 1; 250908e11a3dSNaohiro Aota 2510a94794d5SNaohiro Aota ret = btrfs_load_block_group_zone_info(cache, true); 251108e11a3dSNaohiro Aota if (ret) { 251208e11a3dSNaohiro Aota btrfs_put_block_group(cache); 251379bd3712SFilipe Manana return ERR_PTR(ret); 251408e11a3dSNaohiro Aota } 251508e11a3dSNaohiro Aota 25164358d963SJosef Bacik ret = exclude_super_stripes(cache); 25174358d963SJosef Bacik if (ret) { 25184358d963SJosef Bacik /* We may have excluded something, so call this just in case */ 25194358d963SJosef Bacik btrfs_free_excluded_extents(cache); 25204358d963SJosef Bacik btrfs_put_block_group(cache); 252179bd3712SFilipe Manana return ERR_PTR(ret); 25224358d963SJosef Bacik } 25234358d963SJosef Bacik 25244358d963SJosef Bacik add_new_free_space(cache, chunk_offset, chunk_offset + size); 25254358d963SJosef Bacik 25264358d963SJosef Bacik btrfs_free_excluded_extents(cache); 25274358d963SJosef Bacik 25284358d963SJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 25294358d963SJosef Bacik if (btrfs_should_fragment_free_space(cache)) { 25304358d963SJosef Bacik u64 new_bytes_used = size - bytes_used; 25314358d963SJosef Bacik 25324358d963SJosef Bacik bytes_used += new_bytes_used >> 1; 2533e11c0406SJosef Bacik fragment_free_space(cache); 25344358d963SJosef Bacik } 25354358d963SJosef Bacik #endif 25364358d963SJosef Bacik /* 25374358d963SJosef Bacik * Ensure the corresponding space_info object is created and 25384358d963SJosef Bacik * assigned to our block group. We want our bg to be added to the rbtree 25394358d963SJosef Bacik * with its ->space_info set. 25404358d963SJosef Bacik */ 25414358d963SJosef Bacik cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 25424358d963SJosef Bacik ASSERT(cache->space_info); 25434358d963SJosef Bacik 25444358d963SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, cache); 25454358d963SJosef Bacik if (ret) { 25464358d963SJosef Bacik btrfs_remove_free_space_cache(cache); 25474358d963SJosef Bacik btrfs_put_block_group(cache); 254879bd3712SFilipe Manana return ERR_PTR(ret); 25494358d963SJosef Bacik } 25504358d963SJosef Bacik 25514358d963SJosef Bacik /* 25524358d963SJosef Bacik * Now that our block group has its ->space_info set and is inserted in 25534358d963SJosef Bacik * the rbtree, update the space info's counters. 25544358d963SJosef Bacik */ 25554358d963SJosef Bacik trace_btrfs_add_block_group(fs_info, cache, 1); 25564358d963SJosef Bacik btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, 255798173255SNaohiro Aota cache->bytes_super, cache->zone_unusable, 25586a921de5SNaohiro Aota cache->zone_is_active, &cache->space_info); 25594358d963SJosef Bacik btrfs_update_global_block_rsv(fs_info); 25604358d963SJosef Bacik 25614358d963SJosef Bacik link_block_group(cache); 25624358d963SJosef Bacik 25634358d963SJosef Bacik list_add_tail(&cache->bg_list, &trans->new_bgs); 25644358d963SJosef Bacik trans->delayed_ref_updates++; 25654358d963SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 25664358d963SJosef Bacik 25674358d963SJosef Bacik set_avail_alloc_bits(fs_info, type); 256879bd3712SFilipe Manana return cache; 25694358d963SJosef Bacik } 257026ce2095SJosef Bacik 2571b12de528SQu Wenruo /* 2572b12de528SQu Wenruo * Mark one block group RO, can be called several times for the same block 2573b12de528SQu Wenruo * group. 2574b12de528SQu Wenruo * 2575b12de528SQu Wenruo * @cache: the destination block group 2576b12de528SQu Wenruo * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2577b12de528SQu Wenruo * ensure we still have some free space after marking this 2578b12de528SQu Wenruo * block group RO. 2579b12de528SQu Wenruo */ 2580b12de528SQu Wenruo int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2581b12de528SQu Wenruo bool do_chunk_alloc) 258226ce2095SJosef Bacik { 258326ce2095SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 258426ce2095SJosef Bacik struct btrfs_trans_handle *trans; 2585dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 258626ce2095SJosef Bacik u64 alloc_flags; 258726ce2095SJosef Bacik int ret; 2588b6e9f16cSNikolay Borisov bool dirty_bg_running; 258926ce2095SJosef Bacik 25902d192fc4SQu Wenruo /* 25912d192fc4SQu Wenruo * This can only happen when we are doing read-only scrub on read-only 25922d192fc4SQu Wenruo * mount. 25932d192fc4SQu Wenruo * In that case we should not start a new transaction on read-only fs. 25942d192fc4SQu Wenruo * Thus here we skip all chunk allocations. 25952d192fc4SQu Wenruo */ 25962d192fc4SQu Wenruo if (sb_rdonly(fs_info->sb)) { 25972d192fc4SQu Wenruo mutex_lock(&fs_info->ro_block_group_mutex); 25982d192fc4SQu Wenruo ret = inc_block_group_ro(cache, 0); 25992d192fc4SQu Wenruo mutex_unlock(&fs_info->ro_block_group_mutex); 26002d192fc4SQu Wenruo return ret; 26012d192fc4SQu Wenruo } 26022d192fc4SQu Wenruo 2603b6e9f16cSNikolay Borisov do { 2604dfe8aec4SJosef Bacik trans = btrfs_join_transaction(root); 260526ce2095SJosef Bacik if (IS_ERR(trans)) 260626ce2095SJosef Bacik return PTR_ERR(trans); 260726ce2095SJosef Bacik 2608b6e9f16cSNikolay Borisov dirty_bg_running = false; 2609b6e9f16cSNikolay Borisov 261026ce2095SJosef Bacik /* 2611b6e9f16cSNikolay Borisov * We're not allowed to set block groups readonly after the dirty 2612b6e9f16cSNikolay Borisov * block group cache has started writing. If it already started, 2613b6e9f16cSNikolay Borisov * back off and let this transaction commit. 261426ce2095SJosef Bacik */ 261526ce2095SJosef Bacik mutex_lock(&fs_info->ro_block_group_mutex); 261626ce2095SJosef Bacik if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 261726ce2095SJosef Bacik u64 transid = trans->transid; 261826ce2095SJosef Bacik 261926ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 262026ce2095SJosef Bacik btrfs_end_transaction(trans); 262126ce2095SJosef Bacik 262226ce2095SJosef Bacik ret = btrfs_wait_for_commit(fs_info, transid); 262326ce2095SJosef Bacik if (ret) 262426ce2095SJosef Bacik return ret; 2625b6e9f16cSNikolay Borisov dirty_bg_running = true; 262626ce2095SJosef Bacik } 2627b6e9f16cSNikolay Borisov } while (dirty_bg_running); 262826ce2095SJosef Bacik 2629b12de528SQu Wenruo if (do_chunk_alloc) { 263026ce2095SJosef Bacik /* 2631b12de528SQu Wenruo * If we are changing raid levels, try to allocate a 2632b12de528SQu Wenruo * corresponding block group with the new raid level. 263326ce2095SJosef Bacik */ 2634349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 263526ce2095SJosef Bacik if (alloc_flags != cache->flags) { 2636b12de528SQu Wenruo ret = btrfs_chunk_alloc(trans, alloc_flags, 2637b12de528SQu Wenruo CHUNK_ALLOC_FORCE); 263826ce2095SJosef Bacik /* 263926ce2095SJosef Bacik * ENOSPC is allowed here, we may have enough space 2640b12de528SQu Wenruo * already allocated at the new raid level to carry on 264126ce2095SJosef Bacik */ 264226ce2095SJosef Bacik if (ret == -ENOSPC) 264326ce2095SJosef Bacik ret = 0; 264426ce2095SJosef Bacik if (ret < 0) 264526ce2095SJosef Bacik goto out; 264626ce2095SJosef Bacik } 2647b12de528SQu Wenruo } 264826ce2095SJosef Bacik 2649a7a63accSJosef Bacik ret = inc_block_group_ro(cache, 0); 2650195a49eaSFilipe Manana if (!do_chunk_alloc || ret == -ETXTBSY) 2651b12de528SQu Wenruo goto unlock_out; 265226ce2095SJosef Bacik if (!ret) 265326ce2095SJosef Bacik goto out; 265426ce2095SJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 265526ce2095SJosef Bacik ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 265626ce2095SJosef Bacik if (ret < 0) 265726ce2095SJosef Bacik goto out; 2658b6a98021SNaohiro Aota /* 2659b6a98021SNaohiro Aota * We have allocated a new chunk. We also need to activate that chunk to 2660b6a98021SNaohiro Aota * grant metadata tickets for zoned filesystem. 2661b6a98021SNaohiro Aota */ 2662b6a98021SNaohiro Aota ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); 2663b6a98021SNaohiro Aota if (ret < 0) 2664b6a98021SNaohiro Aota goto out; 2665b6a98021SNaohiro Aota 2666e11c0406SJosef Bacik ret = inc_block_group_ro(cache, 0); 2667195a49eaSFilipe Manana if (ret == -ETXTBSY) 2668195a49eaSFilipe Manana goto unlock_out; 266926ce2095SJosef Bacik out: 267026ce2095SJosef Bacik if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 2671349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 267226ce2095SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 267326ce2095SJosef Bacik check_system_chunk(trans, alloc_flags); 267426ce2095SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 267526ce2095SJosef Bacik } 2676b12de528SQu Wenruo unlock_out: 267726ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 267826ce2095SJosef Bacik 267926ce2095SJosef Bacik btrfs_end_transaction(trans); 268026ce2095SJosef Bacik return ret; 268126ce2095SJosef Bacik } 268226ce2095SJosef Bacik 268332da5386SDavid Sterba void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 268426ce2095SJosef Bacik { 268526ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 268626ce2095SJosef Bacik u64 num_bytes; 268726ce2095SJosef Bacik 268826ce2095SJosef Bacik BUG_ON(!cache->ro); 268926ce2095SJosef Bacik 269026ce2095SJosef Bacik spin_lock(&sinfo->lock); 269126ce2095SJosef Bacik spin_lock(&cache->lock); 269226ce2095SJosef Bacik if (!--cache->ro) { 2693169e0da9SNaohiro Aota if (btrfs_is_zoned(cache->fs_info)) { 2694169e0da9SNaohiro Aota /* Migrate zone_unusable bytes back */ 269598173255SNaohiro Aota cache->zone_unusable = 269698173255SNaohiro Aota (cache->alloc_offset - cache->used) + 269798173255SNaohiro Aota (cache->length - cache->zone_capacity); 2698169e0da9SNaohiro Aota sinfo->bytes_zone_unusable += cache->zone_unusable; 2699169e0da9SNaohiro Aota sinfo->bytes_readonly -= cache->zone_unusable; 2700169e0da9SNaohiro Aota } 2701f9f28e5bSNaohiro Aota num_bytes = cache->length - cache->reserved - 2702f9f28e5bSNaohiro Aota cache->pinned - cache->bytes_super - 2703f9f28e5bSNaohiro Aota cache->zone_unusable - cache->used; 2704f9f28e5bSNaohiro Aota sinfo->bytes_readonly -= num_bytes; 270526ce2095SJosef Bacik list_del_init(&cache->ro_list); 270626ce2095SJosef Bacik } 270726ce2095SJosef Bacik spin_unlock(&cache->lock); 270826ce2095SJosef Bacik spin_unlock(&sinfo->lock); 270926ce2095SJosef Bacik } 271077745c05SJosef Bacik 27113be4d8efSQu Wenruo static int update_block_group_item(struct btrfs_trans_handle *trans, 271277745c05SJosef Bacik struct btrfs_path *path, 271332da5386SDavid Sterba struct btrfs_block_group *cache) 271477745c05SJosef Bacik { 271577745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 271677745c05SJosef Bacik int ret; 2717dfe8aec4SJosef Bacik struct btrfs_root *root = btrfs_block_group_root(fs_info); 271877745c05SJosef Bacik unsigned long bi; 271977745c05SJosef Bacik struct extent_buffer *leaf; 2720bf38be65SDavid Sterba struct btrfs_block_group_item bgi; 2721b3470b5dSDavid Sterba struct btrfs_key key; 272277745c05SJosef Bacik 2723b3470b5dSDavid Sterba key.objectid = cache->start; 2724b3470b5dSDavid Sterba key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2725b3470b5dSDavid Sterba key.offset = cache->length; 2726b3470b5dSDavid Sterba 27273be4d8efSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 272877745c05SJosef Bacik if (ret) { 272977745c05SJosef Bacik if (ret > 0) 273077745c05SJosef Bacik ret = -ENOENT; 273177745c05SJosef Bacik goto fail; 273277745c05SJosef Bacik } 273377745c05SJosef Bacik 273477745c05SJosef Bacik leaf = path->nodes[0]; 273577745c05SJosef Bacik bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2736de0dc456SDavid Sterba btrfs_set_stack_block_group_used(&bgi, cache->used); 2737de0dc456SDavid Sterba btrfs_set_stack_block_group_chunk_objectid(&bgi, 2738f7238e50SJosef Bacik cache->global_root_id); 2739de0dc456SDavid Sterba btrfs_set_stack_block_group_flags(&bgi, cache->flags); 2740bf38be65SDavid Sterba write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 274177745c05SJosef Bacik btrfs_mark_buffer_dirty(leaf); 274277745c05SJosef Bacik fail: 274377745c05SJosef Bacik btrfs_release_path(path); 274477745c05SJosef Bacik return ret; 274577745c05SJosef Bacik 274677745c05SJosef Bacik } 274777745c05SJosef Bacik 274832da5386SDavid Sterba static int cache_save_setup(struct btrfs_block_group *block_group, 274977745c05SJosef Bacik struct btrfs_trans_handle *trans, 275077745c05SJosef Bacik struct btrfs_path *path) 275177745c05SJosef Bacik { 275277745c05SJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 275377745c05SJosef Bacik struct btrfs_root *root = fs_info->tree_root; 275477745c05SJosef Bacik struct inode *inode = NULL; 275577745c05SJosef Bacik struct extent_changeset *data_reserved = NULL; 275677745c05SJosef Bacik u64 alloc_hint = 0; 275777745c05SJosef Bacik int dcs = BTRFS_DC_ERROR; 27580044ae11SQu Wenruo u64 cache_size = 0; 275977745c05SJosef Bacik int retries = 0; 276077745c05SJosef Bacik int ret = 0; 276177745c05SJosef Bacik 2762af456a2cSBoris Burkov if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 2763af456a2cSBoris Burkov return 0; 2764af456a2cSBoris Burkov 276577745c05SJosef Bacik /* 276677745c05SJosef Bacik * If this block group is smaller than 100 megs don't bother caching the 276777745c05SJosef Bacik * block group. 276877745c05SJosef Bacik */ 2769b3470b5dSDavid Sterba if (block_group->length < (100 * SZ_1M)) { 277077745c05SJosef Bacik spin_lock(&block_group->lock); 277177745c05SJosef Bacik block_group->disk_cache_state = BTRFS_DC_WRITTEN; 277277745c05SJosef Bacik spin_unlock(&block_group->lock); 277377745c05SJosef Bacik return 0; 277477745c05SJosef Bacik } 277577745c05SJosef Bacik 2776bf31f87fSDavid Sterba if (TRANS_ABORTED(trans)) 277777745c05SJosef Bacik return 0; 277877745c05SJosef Bacik again: 277977745c05SJosef Bacik inode = lookup_free_space_inode(block_group, path); 278077745c05SJosef Bacik if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 278177745c05SJosef Bacik ret = PTR_ERR(inode); 278277745c05SJosef Bacik btrfs_release_path(path); 278377745c05SJosef Bacik goto out; 278477745c05SJosef Bacik } 278577745c05SJosef Bacik 278677745c05SJosef Bacik if (IS_ERR(inode)) { 278777745c05SJosef Bacik BUG_ON(retries); 278877745c05SJosef Bacik retries++; 278977745c05SJosef Bacik 279077745c05SJosef Bacik if (block_group->ro) 279177745c05SJosef Bacik goto out_free; 279277745c05SJosef Bacik 279377745c05SJosef Bacik ret = create_free_space_inode(trans, block_group, path); 279477745c05SJosef Bacik if (ret) 279577745c05SJosef Bacik goto out_free; 279677745c05SJosef Bacik goto again; 279777745c05SJosef Bacik } 279877745c05SJosef Bacik 279977745c05SJosef Bacik /* 280077745c05SJosef Bacik * We want to set the generation to 0, that way if anything goes wrong 280177745c05SJosef Bacik * from here on out we know not to trust this cache when we load up next 280277745c05SJosef Bacik * time. 280377745c05SJosef Bacik */ 280477745c05SJosef Bacik BTRFS_I(inode)->generation = 0; 28059a56fcd1SNikolay Borisov ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 280677745c05SJosef Bacik if (ret) { 280777745c05SJosef Bacik /* 280877745c05SJosef Bacik * So theoretically we could recover from this, simply set the 280977745c05SJosef Bacik * super cache generation to 0 so we know to invalidate the 281077745c05SJosef Bacik * cache, but then we'd have to keep track of the block groups 281177745c05SJosef Bacik * that fail this way so we know we _have_ to reset this cache 281277745c05SJosef Bacik * before the next commit or risk reading stale cache. So to 281377745c05SJosef Bacik * limit our exposure to horrible edge cases lets just abort the 281477745c05SJosef Bacik * transaction, this only happens in really bad situations 281577745c05SJosef Bacik * anyway. 281677745c05SJosef Bacik */ 281777745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 281877745c05SJosef Bacik goto out_put; 281977745c05SJosef Bacik } 282077745c05SJosef Bacik WARN_ON(ret); 282177745c05SJosef Bacik 282277745c05SJosef Bacik /* We've already setup this transaction, go ahead and exit */ 282377745c05SJosef Bacik if (block_group->cache_generation == trans->transid && 282477745c05SJosef Bacik i_size_read(inode)) { 282577745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 282677745c05SJosef Bacik goto out_put; 282777745c05SJosef Bacik } 282877745c05SJosef Bacik 282977745c05SJosef Bacik if (i_size_read(inode) > 0) { 283077745c05SJosef Bacik ret = btrfs_check_trunc_cache_free_space(fs_info, 283177745c05SJosef Bacik &fs_info->global_block_rsv); 283277745c05SJosef Bacik if (ret) 283377745c05SJosef Bacik goto out_put; 283477745c05SJosef Bacik 283577745c05SJosef Bacik ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 283677745c05SJosef Bacik if (ret) 283777745c05SJosef Bacik goto out_put; 283877745c05SJosef Bacik } 283977745c05SJosef Bacik 284077745c05SJosef Bacik spin_lock(&block_group->lock); 284177745c05SJosef Bacik if (block_group->cached != BTRFS_CACHE_FINISHED || 284277745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) { 284377745c05SJosef Bacik /* 284477745c05SJosef Bacik * don't bother trying to write stuff out _if_ 284577745c05SJosef Bacik * a) we're not cached, 284677745c05SJosef Bacik * b) we're with nospace_cache mount option, 284777745c05SJosef Bacik * c) we're with v2 space_cache (FREE_SPACE_TREE). 284877745c05SJosef Bacik */ 284977745c05SJosef Bacik dcs = BTRFS_DC_WRITTEN; 285077745c05SJosef Bacik spin_unlock(&block_group->lock); 285177745c05SJosef Bacik goto out_put; 285277745c05SJosef Bacik } 285377745c05SJosef Bacik spin_unlock(&block_group->lock); 285477745c05SJosef Bacik 285577745c05SJosef Bacik /* 285677745c05SJosef Bacik * We hit an ENOSPC when setting up the cache in this transaction, just 285777745c05SJosef Bacik * skip doing the setup, we've already cleared the cache so we're safe. 285877745c05SJosef Bacik */ 285977745c05SJosef Bacik if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 286077745c05SJosef Bacik ret = -ENOSPC; 286177745c05SJosef Bacik goto out_put; 286277745c05SJosef Bacik } 286377745c05SJosef Bacik 286477745c05SJosef Bacik /* 286577745c05SJosef Bacik * Try to preallocate enough space based on how big the block group is. 286677745c05SJosef Bacik * Keep in mind this has to include any pinned space which could end up 286777745c05SJosef Bacik * taking up quite a bit since it's not folded into the other space 286877745c05SJosef Bacik * cache. 286977745c05SJosef Bacik */ 28700044ae11SQu Wenruo cache_size = div_u64(block_group->length, SZ_256M); 28710044ae11SQu Wenruo if (!cache_size) 28720044ae11SQu Wenruo cache_size = 1; 287377745c05SJosef Bacik 28740044ae11SQu Wenruo cache_size *= 16; 28750044ae11SQu Wenruo cache_size *= fs_info->sectorsize; 287677745c05SJosef Bacik 287736ea6f3eSNikolay Borisov ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 28780044ae11SQu Wenruo cache_size); 287977745c05SJosef Bacik if (ret) 288077745c05SJosef Bacik goto out_put; 288177745c05SJosef Bacik 28820044ae11SQu Wenruo ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 28830044ae11SQu Wenruo cache_size, cache_size, 288477745c05SJosef Bacik &alloc_hint); 288577745c05SJosef Bacik /* 288677745c05SJosef Bacik * Our cache requires contiguous chunks so that we don't modify a bunch 288777745c05SJosef Bacik * of metadata or split extents when writing the cache out, which means 288877745c05SJosef Bacik * we can enospc if we are heavily fragmented in addition to just normal 288977745c05SJosef Bacik * out of space conditions. So if we hit this just skip setting up any 289077745c05SJosef Bacik * other block groups for this transaction, maybe we'll unpin enough 289177745c05SJosef Bacik * space the next time around. 289277745c05SJosef Bacik */ 289377745c05SJosef Bacik if (!ret) 289477745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 289577745c05SJosef Bacik else if (ret == -ENOSPC) 289677745c05SJosef Bacik set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 289777745c05SJosef Bacik 289877745c05SJosef Bacik out_put: 289977745c05SJosef Bacik iput(inode); 290077745c05SJosef Bacik out_free: 290177745c05SJosef Bacik btrfs_release_path(path); 290277745c05SJosef Bacik out: 290377745c05SJosef Bacik spin_lock(&block_group->lock); 290477745c05SJosef Bacik if (!ret && dcs == BTRFS_DC_SETUP) 290577745c05SJosef Bacik block_group->cache_generation = trans->transid; 290677745c05SJosef Bacik block_group->disk_cache_state = dcs; 290777745c05SJosef Bacik spin_unlock(&block_group->lock); 290877745c05SJosef Bacik 290977745c05SJosef Bacik extent_changeset_free(data_reserved); 291077745c05SJosef Bacik return ret; 291177745c05SJosef Bacik } 291277745c05SJosef Bacik 291377745c05SJosef Bacik int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 291477745c05SJosef Bacik { 291577745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 291632da5386SDavid Sterba struct btrfs_block_group *cache, *tmp; 291777745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 291877745c05SJosef Bacik struct btrfs_path *path; 291977745c05SJosef Bacik 292077745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs) || 292177745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) 292277745c05SJosef Bacik return 0; 292377745c05SJosef Bacik 292477745c05SJosef Bacik path = btrfs_alloc_path(); 292577745c05SJosef Bacik if (!path) 292677745c05SJosef Bacik return -ENOMEM; 292777745c05SJosef Bacik 292877745c05SJosef Bacik /* Could add new block groups, use _safe just in case */ 292977745c05SJosef Bacik list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 293077745c05SJosef Bacik dirty_list) { 293177745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_CLEAR) 293277745c05SJosef Bacik cache_save_setup(cache, trans, path); 293377745c05SJosef Bacik } 293477745c05SJosef Bacik 293577745c05SJosef Bacik btrfs_free_path(path); 293677745c05SJosef Bacik return 0; 293777745c05SJosef Bacik } 293877745c05SJosef Bacik 293977745c05SJosef Bacik /* 294077745c05SJosef Bacik * Transaction commit does final block group cache writeback during a critical 294177745c05SJosef Bacik * section where nothing is allowed to change the FS. This is required in 294277745c05SJosef Bacik * order for the cache to actually match the block group, but can introduce a 294377745c05SJosef Bacik * lot of latency into the commit. 294477745c05SJosef Bacik * 294577745c05SJosef Bacik * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 294677745c05SJosef Bacik * There's a chance we'll have to redo some of it if the block group changes 294777745c05SJosef Bacik * again during the commit, but it greatly reduces the commit latency by 294877745c05SJosef Bacik * getting rid of the easy block groups while we're still allowing others to 294977745c05SJosef Bacik * join the commit. 295077745c05SJosef Bacik */ 295177745c05SJosef Bacik int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 295277745c05SJosef Bacik { 295377745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 295432da5386SDavid Sterba struct btrfs_block_group *cache; 295577745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 295677745c05SJosef Bacik int ret = 0; 295777745c05SJosef Bacik int should_put; 295877745c05SJosef Bacik struct btrfs_path *path = NULL; 295977745c05SJosef Bacik LIST_HEAD(dirty); 296077745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 296177745c05SJosef Bacik int loops = 0; 296277745c05SJosef Bacik 296377745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 296477745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs)) { 296577745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 296677745c05SJosef Bacik return 0; 296777745c05SJosef Bacik } 296877745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 296977745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 297077745c05SJosef Bacik 297177745c05SJosef Bacik again: 297277745c05SJosef Bacik /* Make sure all the block groups on our dirty list actually exist */ 297377745c05SJosef Bacik btrfs_create_pending_block_groups(trans); 297477745c05SJosef Bacik 297577745c05SJosef Bacik if (!path) { 297677745c05SJosef Bacik path = btrfs_alloc_path(); 2977938fcbfbSJosef Bacik if (!path) { 2978938fcbfbSJosef Bacik ret = -ENOMEM; 2979938fcbfbSJosef Bacik goto out; 2980938fcbfbSJosef Bacik } 298177745c05SJosef Bacik } 298277745c05SJosef Bacik 298377745c05SJosef Bacik /* 298477745c05SJosef Bacik * cache_write_mutex is here only to save us from balance or automatic 298577745c05SJosef Bacik * removal of empty block groups deleting this block group while we are 298677745c05SJosef Bacik * writing out the cache 298777745c05SJosef Bacik */ 298877745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 298977745c05SJosef Bacik while (!list_empty(&dirty)) { 299077745c05SJosef Bacik bool drop_reserve = true; 299177745c05SJosef Bacik 299232da5386SDavid Sterba cache = list_first_entry(&dirty, struct btrfs_block_group, 299377745c05SJosef Bacik dirty_list); 299477745c05SJosef Bacik /* 299577745c05SJosef Bacik * This can happen if something re-dirties a block group that 299677745c05SJosef Bacik * is already under IO. Just wait for it to finish and then do 299777745c05SJosef Bacik * it all again 299877745c05SJosef Bacik */ 299977745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 300077745c05SJosef Bacik list_del_init(&cache->io_list); 300177745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 300277745c05SJosef Bacik btrfs_put_block_group(cache); 300377745c05SJosef Bacik } 300477745c05SJosef Bacik 300577745c05SJosef Bacik 300677745c05SJosef Bacik /* 300777745c05SJosef Bacik * btrfs_wait_cache_io uses the cache->dirty_list to decide if 300877745c05SJosef Bacik * it should update the cache_state. Don't delete until after 300977745c05SJosef Bacik * we wait. 301077745c05SJosef Bacik * 301177745c05SJosef Bacik * Since we're not running in the commit critical section 301277745c05SJosef Bacik * we need the dirty_bgs_lock to protect from update_block_group 301377745c05SJosef Bacik */ 301477745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 301577745c05SJosef Bacik list_del_init(&cache->dirty_list); 301677745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 301777745c05SJosef Bacik 301877745c05SJosef Bacik should_put = 1; 301977745c05SJosef Bacik 302077745c05SJosef Bacik cache_save_setup(cache, trans, path); 302177745c05SJosef Bacik 302277745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_SETUP) { 302377745c05SJosef Bacik cache->io_ctl.inode = NULL; 302477745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 302577745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 302677745c05SJosef Bacik should_put = 0; 302777745c05SJosef Bacik 302877745c05SJosef Bacik /* 302977745c05SJosef Bacik * The cache_write_mutex is protecting the 303077745c05SJosef Bacik * io_list, also refer to the definition of 303177745c05SJosef Bacik * btrfs_transaction::io_bgs for more details 303277745c05SJosef Bacik */ 303377745c05SJosef Bacik list_add_tail(&cache->io_list, io); 303477745c05SJosef Bacik } else { 303577745c05SJosef Bacik /* 303677745c05SJosef Bacik * If we failed to write the cache, the 303777745c05SJosef Bacik * generation will be bad and life goes on 303877745c05SJosef Bacik */ 303977745c05SJosef Bacik ret = 0; 304077745c05SJosef Bacik } 304177745c05SJosef Bacik } 304277745c05SJosef Bacik if (!ret) { 30433be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 304477745c05SJosef Bacik /* 304577745c05SJosef Bacik * Our block group might still be attached to the list 304677745c05SJosef Bacik * of new block groups in the transaction handle of some 304777745c05SJosef Bacik * other task (struct btrfs_trans_handle->new_bgs). This 304877745c05SJosef Bacik * means its block group item isn't yet in the extent 304977745c05SJosef Bacik * tree. If this happens ignore the error, as we will 305077745c05SJosef Bacik * try again later in the critical section of the 305177745c05SJosef Bacik * transaction commit. 305277745c05SJosef Bacik */ 305377745c05SJosef Bacik if (ret == -ENOENT) { 305477745c05SJosef Bacik ret = 0; 305577745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 305677745c05SJosef Bacik if (list_empty(&cache->dirty_list)) { 305777745c05SJosef Bacik list_add_tail(&cache->dirty_list, 305877745c05SJosef Bacik &cur_trans->dirty_bgs); 305977745c05SJosef Bacik btrfs_get_block_group(cache); 306077745c05SJosef Bacik drop_reserve = false; 306177745c05SJosef Bacik } 306277745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 306377745c05SJosef Bacik } else if (ret) { 306477745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 306577745c05SJosef Bacik } 306677745c05SJosef Bacik } 306777745c05SJosef Bacik 306877745c05SJosef Bacik /* If it's not on the io list, we need to put the block group */ 306977745c05SJosef Bacik if (should_put) 307077745c05SJosef Bacik btrfs_put_block_group(cache); 307177745c05SJosef Bacik if (drop_reserve) 307277745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 307377745c05SJosef Bacik /* 307477745c05SJosef Bacik * Avoid blocking other tasks for too long. It might even save 307577745c05SJosef Bacik * us from writing caches for block groups that are going to be 307677745c05SJosef Bacik * removed. 307777745c05SJosef Bacik */ 307877745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 3079938fcbfbSJosef Bacik if (ret) 3080938fcbfbSJosef Bacik goto out; 308177745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 308277745c05SJosef Bacik } 308377745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 308477745c05SJosef Bacik 308577745c05SJosef Bacik /* 308677745c05SJosef Bacik * Go through delayed refs for all the stuff we've just kicked off 308777745c05SJosef Bacik * and then loop back (just once) 308877745c05SJosef Bacik */ 308934d1eb0eSJosef Bacik if (!ret) 309077745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 0); 309177745c05SJosef Bacik if (!ret && loops == 0) { 309277745c05SJosef Bacik loops++; 309377745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 309477745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 309577745c05SJosef Bacik /* 309677745c05SJosef Bacik * dirty_bgs_lock protects us from concurrent block group 309777745c05SJosef Bacik * deletes too (not just cache_write_mutex). 309877745c05SJosef Bacik */ 309977745c05SJosef Bacik if (!list_empty(&dirty)) { 310077745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 310177745c05SJosef Bacik goto again; 310277745c05SJosef Bacik } 310377745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 3104938fcbfbSJosef Bacik } 3105938fcbfbSJosef Bacik out: 3106938fcbfbSJosef Bacik if (ret < 0) { 3107938fcbfbSJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 3108938fcbfbSJosef Bacik list_splice_init(&dirty, &cur_trans->dirty_bgs); 3109938fcbfbSJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 311077745c05SJosef Bacik btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 311177745c05SJosef Bacik } 311277745c05SJosef Bacik 311377745c05SJosef Bacik btrfs_free_path(path); 311477745c05SJosef Bacik return ret; 311577745c05SJosef Bacik } 311677745c05SJosef Bacik 311777745c05SJosef Bacik int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 311877745c05SJosef Bacik { 311977745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 312032da5386SDavid Sterba struct btrfs_block_group *cache; 312177745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 312277745c05SJosef Bacik int ret = 0; 312377745c05SJosef Bacik int should_put; 312477745c05SJosef Bacik struct btrfs_path *path; 312577745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 312677745c05SJosef Bacik 312777745c05SJosef Bacik path = btrfs_alloc_path(); 312877745c05SJosef Bacik if (!path) 312977745c05SJosef Bacik return -ENOMEM; 313077745c05SJosef Bacik 313177745c05SJosef Bacik /* 313277745c05SJosef Bacik * Even though we are in the critical section of the transaction commit, 313377745c05SJosef Bacik * we can still have concurrent tasks adding elements to this 313477745c05SJosef Bacik * transaction's list of dirty block groups. These tasks correspond to 313577745c05SJosef Bacik * endio free space workers started when writeback finishes for a 313677745c05SJosef Bacik * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 313777745c05SJosef Bacik * allocate new block groups as a result of COWing nodes of the root 313877745c05SJosef Bacik * tree when updating the free space inode. The writeback for the space 313977745c05SJosef Bacik * caches is triggered by an earlier call to 314077745c05SJosef Bacik * btrfs_start_dirty_block_groups() and iterations of the following 314177745c05SJosef Bacik * loop. 314277745c05SJosef Bacik * Also we want to do the cache_save_setup first and then run the 314377745c05SJosef Bacik * delayed refs to make sure we have the best chance at doing this all 314477745c05SJosef Bacik * in one shot. 314577745c05SJosef Bacik */ 314677745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 314777745c05SJosef Bacik while (!list_empty(&cur_trans->dirty_bgs)) { 314877745c05SJosef Bacik cache = list_first_entry(&cur_trans->dirty_bgs, 314932da5386SDavid Sterba struct btrfs_block_group, 315077745c05SJosef Bacik dirty_list); 315177745c05SJosef Bacik 315277745c05SJosef Bacik /* 315377745c05SJosef Bacik * This can happen if cache_save_setup re-dirties a block group 315477745c05SJosef Bacik * that is already under IO. Just wait for it to finish and 315577745c05SJosef Bacik * then do it all again 315677745c05SJosef Bacik */ 315777745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 315877745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 315977745c05SJosef Bacik list_del_init(&cache->io_list); 316077745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 316177745c05SJosef Bacik btrfs_put_block_group(cache); 316277745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 316377745c05SJosef Bacik } 316477745c05SJosef Bacik 316577745c05SJosef Bacik /* 316677745c05SJosef Bacik * Don't remove from the dirty list until after we've waited on 316777745c05SJosef Bacik * any pending IO 316877745c05SJosef Bacik */ 316977745c05SJosef Bacik list_del_init(&cache->dirty_list); 317077745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 317177745c05SJosef Bacik should_put = 1; 317277745c05SJosef Bacik 317377745c05SJosef Bacik cache_save_setup(cache, trans, path); 317477745c05SJosef Bacik 317577745c05SJosef Bacik if (!ret) 317677745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 317777745c05SJosef Bacik (unsigned long) -1); 317877745c05SJosef Bacik 317977745c05SJosef Bacik if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 318077745c05SJosef Bacik cache->io_ctl.inode = NULL; 318177745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 318277745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 318377745c05SJosef Bacik should_put = 0; 318477745c05SJosef Bacik list_add_tail(&cache->io_list, io); 318577745c05SJosef Bacik } else { 318677745c05SJosef Bacik /* 318777745c05SJosef Bacik * If we failed to write the cache, the 318877745c05SJosef Bacik * generation will be bad and life goes on 318977745c05SJosef Bacik */ 319077745c05SJosef Bacik ret = 0; 319177745c05SJosef Bacik } 319277745c05SJosef Bacik } 319377745c05SJosef Bacik if (!ret) { 31943be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 319577745c05SJosef Bacik /* 319677745c05SJosef Bacik * One of the free space endio workers might have 319777745c05SJosef Bacik * created a new block group while updating a free space 319877745c05SJosef Bacik * cache's inode (at inode.c:btrfs_finish_ordered_io()) 319977745c05SJosef Bacik * and hasn't released its transaction handle yet, in 320077745c05SJosef Bacik * which case the new block group is still attached to 320177745c05SJosef Bacik * its transaction handle and its creation has not 320277745c05SJosef Bacik * finished yet (no block group item in the extent tree 320377745c05SJosef Bacik * yet, etc). If this is the case, wait for all free 320477745c05SJosef Bacik * space endio workers to finish and retry. This is a 3205260db43cSRandy Dunlap * very rare case so no need for a more efficient and 320677745c05SJosef Bacik * complex approach. 320777745c05SJosef Bacik */ 320877745c05SJosef Bacik if (ret == -ENOENT) { 320977745c05SJosef Bacik wait_event(cur_trans->writer_wait, 321077745c05SJosef Bacik atomic_read(&cur_trans->num_writers) == 1); 32113be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 321277745c05SJosef Bacik } 321377745c05SJosef Bacik if (ret) 321477745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 321577745c05SJosef Bacik } 321677745c05SJosef Bacik 321777745c05SJosef Bacik /* If its not on the io list, we need to put the block group */ 321877745c05SJosef Bacik if (should_put) 321977745c05SJosef Bacik btrfs_put_block_group(cache); 322077745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 322177745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 322277745c05SJosef Bacik } 322377745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 322477745c05SJosef Bacik 322577745c05SJosef Bacik /* 322677745c05SJosef Bacik * Refer to the definition of io_bgs member for details why it's safe 322777745c05SJosef Bacik * to use it without any locking 322877745c05SJosef Bacik */ 322977745c05SJosef Bacik while (!list_empty(io)) { 323032da5386SDavid Sterba cache = list_first_entry(io, struct btrfs_block_group, 323177745c05SJosef Bacik io_list); 323277745c05SJosef Bacik list_del_init(&cache->io_list); 323377745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 323477745c05SJosef Bacik btrfs_put_block_group(cache); 323577745c05SJosef Bacik } 323677745c05SJosef Bacik 323777745c05SJosef Bacik btrfs_free_path(path); 323877745c05SJosef Bacik return ret; 323977745c05SJosef Bacik } 3240606d1bf1SJosef Bacik 3241ac2f1e63SJosef Bacik static inline bool should_reclaim_block_group(struct btrfs_block_group *bg, 3242ac2f1e63SJosef Bacik u64 bytes_freed) 3243ac2f1e63SJosef Bacik { 3244ac2f1e63SJosef Bacik const struct btrfs_space_info *space_info = bg->space_info; 3245ac2f1e63SJosef Bacik const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); 3246ac2f1e63SJosef Bacik const u64 new_val = bg->used; 3247ac2f1e63SJosef Bacik const u64 old_val = new_val + bytes_freed; 3248ac2f1e63SJosef Bacik u64 thresh; 3249ac2f1e63SJosef Bacik 3250ac2f1e63SJosef Bacik if (reclaim_thresh == 0) 3251ac2f1e63SJosef Bacik return false; 3252ac2f1e63SJosef Bacik 3253ac2f1e63SJosef Bacik thresh = div_factor_fine(bg->length, reclaim_thresh); 3254ac2f1e63SJosef Bacik 3255ac2f1e63SJosef Bacik /* 3256ac2f1e63SJosef Bacik * If we were below the threshold before don't reclaim, we are likely a 3257ac2f1e63SJosef Bacik * brand new block group and we don't want to relocate new block groups. 3258ac2f1e63SJosef Bacik */ 3259ac2f1e63SJosef Bacik if (old_val < thresh) 3260ac2f1e63SJosef Bacik return false; 3261ac2f1e63SJosef Bacik if (new_val >= thresh) 3262ac2f1e63SJosef Bacik return false; 3263ac2f1e63SJosef Bacik return true; 3264ac2f1e63SJosef Bacik } 3265ac2f1e63SJosef Bacik 3266606d1bf1SJosef Bacik int btrfs_update_block_group(struct btrfs_trans_handle *trans, 326711b66fa6SAnand Jain u64 bytenr, u64 num_bytes, bool alloc) 3268606d1bf1SJosef Bacik { 3269606d1bf1SJosef Bacik struct btrfs_fs_info *info = trans->fs_info; 327032da5386SDavid Sterba struct btrfs_block_group *cache = NULL; 3271606d1bf1SJosef Bacik u64 total = num_bytes; 3272606d1bf1SJosef Bacik u64 old_val; 3273606d1bf1SJosef Bacik u64 byte_in_group; 3274606d1bf1SJosef Bacik int factor; 3275606d1bf1SJosef Bacik int ret = 0; 3276606d1bf1SJosef Bacik 3277606d1bf1SJosef Bacik /* Block accounting for super block */ 3278606d1bf1SJosef Bacik spin_lock(&info->delalloc_root_lock); 3279606d1bf1SJosef Bacik old_val = btrfs_super_bytes_used(info->super_copy); 3280606d1bf1SJosef Bacik if (alloc) 3281606d1bf1SJosef Bacik old_val += num_bytes; 3282606d1bf1SJosef Bacik else 3283606d1bf1SJosef Bacik old_val -= num_bytes; 3284606d1bf1SJosef Bacik btrfs_set_super_bytes_used(info->super_copy, old_val); 3285606d1bf1SJosef Bacik spin_unlock(&info->delalloc_root_lock); 3286606d1bf1SJosef Bacik 3287606d1bf1SJosef Bacik while (total) { 3288ac2f1e63SJosef Bacik bool reclaim; 3289ac2f1e63SJosef Bacik 3290606d1bf1SJosef Bacik cache = btrfs_lookup_block_group(info, bytenr); 3291606d1bf1SJosef Bacik if (!cache) { 3292606d1bf1SJosef Bacik ret = -ENOENT; 3293606d1bf1SJosef Bacik break; 3294606d1bf1SJosef Bacik } 3295606d1bf1SJosef Bacik factor = btrfs_bg_type_to_factor(cache->flags); 3296606d1bf1SJosef Bacik 3297606d1bf1SJosef Bacik /* 3298606d1bf1SJosef Bacik * If this block group has free space cache written out, we 3299606d1bf1SJosef Bacik * need to make sure to load it if we are removing space. This 3300606d1bf1SJosef Bacik * is because we need the unpinning stage to actually add the 3301606d1bf1SJosef Bacik * space back to the block group, otherwise we will leak space. 3302606d1bf1SJosef Bacik */ 330332da5386SDavid Sterba if (!alloc && !btrfs_block_group_done(cache)) 3304ced8ecf0SOmar Sandoval btrfs_cache_block_group(cache, true); 3305606d1bf1SJosef Bacik 3306b3470b5dSDavid Sterba byte_in_group = bytenr - cache->start; 3307b3470b5dSDavid Sterba WARN_ON(byte_in_group > cache->length); 3308606d1bf1SJosef Bacik 3309606d1bf1SJosef Bacik spin_lock(&cache->space_info->lock); 3310606d1bf1SJosef Bacik spin_lock(&cache->lock); 3311606d1bf1SJosef Bacik 3312606d1bf1SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 3313606d1bf1SJosef Bacik cache->disk_cache_state < BTRFS_DC_CLEAR) 3314606d1bf1SJosef Bacik cache->disk_cache_state = BTRFS_DC_CLEAR; 3315606d1bf1SJosef Bacik 3316bf38be65SDavid Sterba old_val = cache->used; 3317b3470b5dSDavid Sterba num_bytes = min(total, cache->length - byte_in_group); 3318606d1bf1SJosef Bacik if (alloc) { 3319606d1bf1SJosef Bacik old_val += num_bytes; 3320bf38be65SDavid Sterba cache->used = old_val; 3321606d1bf1SJosef Bacik cache->reserved -= num_bytes; 3322606d1bf1SJosef Bacik cache->space_info->bytes_reserved -= num_bytes; 3323606d1bf1SJosef Bacik cache->space_info->bytes_used += num_bytes; 3324606d1bf1SJosef Bacik cache->space_info->disk_used += num_bytes * factor; 3325606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3326606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 3327606d1bf1SJosef Bacik } else { 3328606d1bf1SJosef Bacik old_val -= num_bytes; 3329bf38be65SDavid Sterba cache->used = old_val; 3330606d1bf1SJosef Bacik cache->pinned += num_bytes; 3331606d1bf1SJosef Bacik btrfs_space_info_update_bytes_pinned(info, 3332606d1bf1SJosef Bacik cache->space_info, num_bytes); 3333606d1bf1SJosef Bacik cache->space_info->bytes_used -= num_bytes; 3334606d1bf1SJosef Bacik cache->space_info->disk_used -= num_bytes * factor; 3335ac2f1e63SJosef Bacik 3336ac2f1e63SJosef Bacik reclaim = should_reclaim_block_group(cache, num_bytes); 3337606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3338606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 3339606d1bf1SJosef Bacik 3340fe119a6eSNikolay Borisov set_extent_dirty(&trans->transaction->pinned_extents, 3341606d1bf1SJosef Bacik bytenr, bytenr + num_bytes - 1, 3342606d1bf1SJosef Bacik GFP_NOFS | __GFP_NOFAIL); 3343606d1bf1SJosef Bacik } 3344606d1bf1SJosef Bacik 3345606d1bf1SJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 3346606d1bf1SJosef Bacik if (list_empty(&cache->dirty_list)) { 3347606d1bf1SJosef Bacik list_add_tail(&cache->dirty_list, 3348606d1bf1SJosef Bacik &trans->transaction->dirty_bgs); 3349606d1bf1SJosef Bacik trans->delayed_ref_updates++; 3350606d1bf1SJosef Bacik btrfs_get_block_group(cache); 3351606d1bf1SJosef Bacik } 3352606d1bf1SJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 3353606d1bf1SJosef Bacik 3354606d1bf1SJosef Bacik /* 3355606d1bf1SJosef Bacik * No longer have used bytes in this block group, queue it for 3356606d1bf1SJosef Bacik * deletion. We do this after adding the block group to the 3357606d1bf1SJosef Bacik * dirty list to avoid races between cleaner kthread and space 3358606d1bf1SJosef Bacik * cache writeout. 3359606d1bf1SJosef Bacik */ 33606e80d4f8SDennis Zhou if (!alloc && old_val == 0) { 33616e80d4f8SDennis Zhou if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3362606d1bf1SJosef Bacik btrfs_mark_bg_unused(cache); 3363ac2f1e63SJosef Bacik } else if (!alloc && reclaim) { 3364ac2f1e63SJosef Bacik btrfs_mark_bg_to_reclaim(cache); 33656e80d4f8SDennis Zhou } 3366606d1bf1SJosef Bacik 3367606d1bf1SJosef Bacik btrfs_put_block_group(cache); 3368606d1bf1SJosef Bacik total -= num_bytes; 3369606d1bf1SJosef Bacik bytenr += num_bytes; 3370606d1bf1SJosef Bacik } 3371606d1bf1SJosef Bacik 3372606d1bf1SJosef Bacik /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3373606d1bf1SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 3374606d1bf1SJosef Bacik return ret; 3375606d1bf1SJosef Bacik } 3376606d1bf1SJosef Bacik 3377606d1bf1SJosef Bacik /** 3378606d1bf1SJosef Bacik * btrfs_add_reserved_bytes - update the block_group and space info counters 3379606d1bf1SJosef Bacik * @cache: The cache we are manipulating 3380606d1bf1SJosef Bacik * @ram_bytes: The number of bytes of file content, and will be same to 3381606d1bf1SJosef Bacik * @num_bytes except for the compress path. 3382606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 3383606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 3384606d1bf1SJosef Bacik * 3385606d1bf1SJosef Bacik * This is called by the allocator when it reserves space. If this is a 3386606d1bf1SJosef Bacik * reservation and the block group has become read only we cannot make the 3387606d1bf1SJosef Bacik * reservation and return -EAGAIN, otherwise this function always succeeds. 3388606d1bf1SJosef Bacik */ 338932da5386SDavid Sterba int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3390606d1bf1SJosef Bacik u64 ram_bytes, u64 num_bytes, int delalloc) 3391606d1bf1SJosef Bacik { 3392606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 3393606d1bf1SJosef Bacik int ret = 0; 3394606d1bf1SJosef Bacik 3395606d1bf1SJosef Bacik spin_lock(&space_info->lock); 3396606d1bf1SJosef Bacik spin_lock(&cache->lock); 3397606d1bf1SJosef Bacik if (cache->ro) { 3398606d1bf1SJosef Bacik ret = -EAGAIN; 3399606d1bf1SJosef Bacik } else { 3400606d1bf1SJosef Bacik cache->reserved += num_bytes; 3401606d1bf1SJosef Bacik space_info->bytes_reserved += num_bytes; 3402a43c3835SJosef Bacik trace_btrfs_space_reservation(cache->fs_info, "space_info", 3403a43c3835SJosef Bacik space_info->flags, num_bytes, 1); 3404606d1bf1SJosef Bacik btrfs_space_info_update_bytes_may_use(cache->fs_info, 3405606d1bf1SJosef Bacik space_info, -ram_bytes); 3406606d1bf1SJosef Bacik if (delalloc) 3407606d1bf1SJosef Bacik cache->delalloc_bytes += num_bytes; 340899ffb43eSJosef Bacik 340999ffb43eSJosef Bacik /* 341099ffb43eSJosef Bacik * Compression can use less space than we reserved, so wake 341199ffb43eSJosef Bacik * tickets if that happens 341299ffb43eSJosef Bacik */ 341399ffb43eSJosef Bacik if (num_bytes < ram_bytes) 341499ffb43eSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 3415606d1bf1SJosef Bacik } 3416606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3417606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 3418606d1bf1SJosef Bacik return ret; 3419606d1bf1SJosef Bacik } 3420606d1bf1SJosef Bacik 3421606d1bf1SJosef Bacik /** 3422606d1bf1SJosef Bacik * btrfs_free_reserved_bytes - update the block_group and space info counters 3423606d1bf1SJosef Bacik * @cache: The cache we are manipulating 3424606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 3425606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 3426606d1bf1SJosef Bacik * 3427606d1bf1SJosef Bacik * This is called by somebody who is freeing space that was never actually used 3428606d1bf1SJosef Bacik * on disk. For example if you reserve some space for a new leaf in transaction 3429606d1bf1SJosef Bacik * A and before transaction A commits you free that leaf, you call this with 3430606d1bf1SJosef Bacik * reserve set to 0 in order to clear the reservation. 3431606d1bf1SJosef Bacik */ 343232da5386SDavid Sterba void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3433606d1bf1SJosef Bacik u64 num_bytes, int delalloc) 3434606d1bf1SJosef Bacik { 3435606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 3436606d1bf1SJosef Bacik 3437606d1bf1SJosef Bacik spin_lock(&space_info->lock); 3438606d1bf1SJosef Bacik spin_lock(&cache->lock); 3439606d1bf1SJosef Bacik if (cache->ro) 3440606d1bf1SJosef Bacik space_info->bytes_readonly += num_bytes; 3441606d1bf1SJosef Bacik cache->reserved -= num_bytes; 3442606d1bf1SJosef Bacik space_info->bytes_reserved -= num_bytes; 3443606d1bf1SJosef Bacik space_info->max_extent_size = 0; 3444606d1bf1SJosef Bacik 3445606d1bf1SJosef Bacik if (delalloc) 3446606d1bf1SJosef Bacik cache->delalloc_bytes -= num_bytes; 3447606d1bf1SJosef Bacik spin_unlock(&cache->lock); 34483308234aSJosef Bacik 34493308234aSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 3450606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 3451606d1bf1SJosef Bacik } 345207730d87SJosef Bacik 345307730d87SJosef Bacik static void force_metadata_allocation(struct btrfs_fs_info *info) 345407730d87SJosef Bacik { 345507730d87SJosef Bacik struct list_head *head = &info->space_info; 345607730d87SJosef Bacik struct btrfs_space_info *found; 345707730d87SJosef Bacik 345872804905SJosef Bacik list_for_each_entry(found, head, list) { 345907730d87SJosef Bacik if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 346007730d87SJosef Bacik found->force_alloc = CHUNK_ALLOC_FORCE; 346107730d87SJosef Bacik } 346207730d87SJosef Bacik } 346307730d87SJosef Bacik 346407730d87SJosef Bacik static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 346507730d87SJosef Bacik struct btrfs_space_info *sinfo, int force) 346607730d87SJosef Bacik { 346707730d87SJosef Bacik u64 bytes_used = btrfs_space_info_used(sinfo, false); 346807730d87SJosef Bacik u64 thresh; 346907730d87SJosef Bacik 347007730d87SJosef Bacik if (force == CHUNK_ALLOC_FORCE) 347107730d87SJosef Bacik return 1; 347207730d87SJosef Bacik 347307730d87SJosef Bacik /* 347407730d87SJosef Bacik * in limited mode, we want to have some free space up to 347507730d87SJosef Bacik * about 1% of the FS size. 347607730d87SJosef Bacik */ 347707730d87SJosef Bacik if (force == CHUNK_ALLOC_LIMITED) { 347807730d87SJosef Bacik thresh = btrfs_super_total_bytes(fs_info->super_copy); 347907730d87SJosef Bacik thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); 348007730d87SJosef Bacik 348107730d87SJosef Bacik if (sinfo->total_bytes - bytes_used < thresh) 348207730d87SJosef Bacik return 1; 348307730d87SJosef Bacik } 348407730d87SJosef Bacik 348507730d87SJosef Bacik if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) 348607730d87SJosef Bacik return 0; 348707730d87SJosef Bacik return 1; 348807730d87SJosef Bacik } 348907730d87SJosef Bacik 349007730d87SJosef Bacik int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 349107730d87SJosef Bacik { 349207730d87SJosef Bacik u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 349307730d87SJosef Bacik 349407730d87SJosef Bacik return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 349507730d87SJosef Bacik } 349607730d87SJosef Bacik 3497820c363bSNaohiro Aota static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) 349879bd3712SFilipe Manana { 349979bd3712SFilipe Manana struct btrfs_block_group *bg; 350079bd3712SFilipe Manana int ret; 350179bd3712SFilipe Manana 350207730d87SJosef Bacik /* 350379bd3712SFilipe Manana * Check if we have enough space in the system space info because we 350479bd3712SFilipe Manana * will need to update device items in the chunk btree and insert a new 350579bd3712SFilipe Manana * chunk item in the chunk btree as well. This will allocate a new 350679bd3712SFilipe Manana * system block group if needed. 350779bd3712SFilipe Manana */ 350879bd3712SFilipe Manana check_system_chunk(trans, flags); 350979bd3712SFilipe Manana 3510f6f39f7aSNikolay Borisov bg = btrfs_create_chunk(trans, flags); 351179bd3712SFilipe Manana if (IS_ERR(bg)) { 351279bd3712SFilipe Manana ret = PTR_ERR(bg); 351379bd3712SFilipe Manana goto out; 351479bd3712SFilipe Manana } 351579bd3712SFilipe Manana 351679bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 351779bd3712SFilipe Manana /* 351879bd3712SFilipe Manana * Normally we are not expected to fail with -ENOSPC here, since we have 351979bd3712SFilipe Manana * previously reserved space in the system space_info and allocated one 3520ecd84d54SFilipe Manana * new system chunk if necessary. However there are three exceptions: 352179bd3712SFilipe Manana * 352279bd3712SFilipe Manana * 1) We may have enough free space in the system space_info but all the 352379bd3712SFilipe Manana * existing system block groups have a profile which can not be used 352479bd3712SFilipe Manana * for extent allocation. 352579bd3712SFilipe Manana * 352679bd3712SFilipe Manana * This happens when mounting in degraded mode. For example we have a 352779bd3712SFilipe Manana * RAID1 filesystem with 2 devices, lose one device and mount the fs 352879bd3712SFilipe Manana * using the other device in degraded mode. If we then allocate a chunk, 352979bd3712SFilipe Manana * we may have enough free space in the existing system space_info, but 353079bd3712SFilipe Manana * none of the block groups can be used for extent allocation since they 353179bd3712SFilipe Manana * have a RAID1 profile, and because we are in degraded mode with a 353279bd3712SFilipe Manana * single device, we are forced to allocate a new system chunk with a 353379bd3712SFilipe Manana * SINGLE profile. Making check_system_chunk() iterate over all system 353479bd3712SFilipe Manana * block groups and check if they have a usable profile and enough space 353579bd3712SFilipe Manana * can be slow on very large filesystems, so we tolerate the -ENOSPC and 353679bd3712SFilipe Manana * try again after forcing allocation of a new system chunk. Like this 353779bd3712SFilipe Manana * we avoid paying the cost of that search in normal circumstances, when 353879bd3712SFilipe Manana * we were not mounted in degraded mode; 353979bd3712SFilipe Manana * 354079bd3712SFilipe Manana * 2) We had enough free space info the system space_info, and one suitable 354179bd3712SFilipe Manana * block group to allocate from when we called check_system_chunk() 354279bd3712SFilipe Manana * above. However right after we called it, the only system block group 354379bd3712SFilipe Manana * with enough free space got turned into RO mode by a running scrub, 354479bd3712SFilipe Manana * and in this case we have to allocate a new one and retry. We only 354579bd3712SFilipe Manana * need do this allocate and retry once, since we have a transaction 3546ecd84d54SFilipe Manana * handle and scrub uses the commit root to search for block groups; 3547ecd84d54SFilipe Manana * 3548ecd84d54SFilipe Manana * 3) We had one system block group with enough free space when we called 3549ecd84d54SFilipe Manana * check_system_chunk(), but after that, right before we tried to 3550ecd84d54SFilipe Manana * allocate the last extent buffer we needed, a discard operation came 3551ecd84d54SFilipe Manana * in and it temporarily removed the last free space entry from the 3552ecd84d54SFilipe Manana * block group (discard removes a free space entry, discards it, and 3553ecd84d54SFilipe Manana * then adds back the entry to the block group cache). 355479bd3712SFilipe Manana */ 355579bd3712SFilipe Manana if (ret == -ENOSPC) { 355679bd3712SFilipe Manana const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 355779bd3712SFilipe Manana struct btrfs_block_group *sys_bg; 355879bd3712SFilipe Manana 3559f6f39f7aSNikolay Borisov sys_bg = btrfs_create_chunk(trans, sys_flags); 356079bd3712SFilipe Manana if (IS_ERR(sys_bg)) { 356179bd3712SFilipe Manana ret = PTR_ERR(sys_bg); 356279bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 356379bd3712SFilipe Manana goto out; 356479bd3712SFilipe Manana } 356579bd3712SFilipe Manana 356679bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 356779bd3712SFilipe Manana if (ret) { 356879bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 356979bd3712SFilipe Manana goto out; 357079bd3712SFilipe Manana } 357179bd3712SFilipe Manana 357279bd3712SFilipe Manana ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 357379bd3712SFilipe Manana if (ret) { 357479bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 357579bd3712SFilipe Manana goto out; 357679bd3712SFilipe Manana } 357779bd3712SFilipe Manana } else if (ret) { 357879bd3712SFilipe Manana btrfs_abort_transaction(trans, ret); 357979bd3712SFilipe Manana goto out; 358079bd3712SFilipe Manana } 358179bd3712SFilipe Manana out: 358279bd3712SFilipe Manana btrfs_trans_release_chunk_metadata(trans); 358379bd3712SFilipe Manana 3584820c363bSNaohiro Aota if (ret) 3585820c363bSNaohiro Aota return ERR_PTR(ret); 3586820c363bSNaohiro Aota 3587820c363bSNaohiro Aota btrfs_get_block_group(bg); 3588820c363bSNaohiro Aota return bg; 358979bd3712SFilipe Manana } 359079bd3712SFilipe Manana 359179bd3712SFilipe Manana /* 359279bd3712SFilipe Manana * Chunk allocation is done in 2 phases: 359379bd3712SFilipe Manana * 359479bd3712SFilipe Manana * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 359579bd3712SFilipe Manana * the chunk, the chunk mapping, create its block group and add the items 359679bd3712SFilipe Manana * that belong in the chunk btree to it - more specifically, we need to 359779bd3712SFilipe Manana * update device items in the chunk btree and add a new chunk item to it. 359879bd3712SFilipe Manana * 359979bd3712SFilipe Manana * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 360079bd3712SFilipe Manana * group item to the extent btree and the device extent items to the devices 360179bd3712SFilipe Manana * btree. 360279bd3712SFilipe Manana * 360379bd3712SFilipe Manana * This is done to prevent deadlocks. For example when COWing a node from the 360479bd3712SFilipe Manana * extent btree we are holding a write lock on the node's parent and if we 360579bd3712SFilipe Manana * trigger chunk allocation and attempted to insert the new block group item 360679bd3712SFilipe Manana * in the extent btree right way, we could deadlock because the path for the 360779bd3712SFilipe Manana * insertion can include that parent node. At first glance it seems impossible 360879bd3712SFilipe Manana * to trigger chunk allocation after starting a transaction since tasks should 360979bd3712SFilipe Manana * reserve enough transaction units (metadata space), however while that is true 361079bd3712SFilipe Manana * most of the time, chunk allocation may still be triggered for several reasons: 361179bd3712SFilipe Manana * 361279bd3712SFilipe Manana * 1) When reserving metadata, we check if there is enough free space in the 361379bd3712SFilipe Manana * metadata space_info and therefore don't trigger allocation of a new chunk. 361479bd3712SFilipe Manana * However later when the task actually tries to COW an extent buffer from 361579bd3712SFilipe Manana * the extent btree or from the device btree for example, it is forced to 361679bd3712SFilipe Manana * allocate a new block group (chunk) because the only one that had enough 361779bd3712SFilipe Manana * free space was just turned to RO mode by a running scrub for example (or 361879bd3712SFilipe Manana * device replace, block group reclaim thread, etc), so we can not use it 361979bd3712SFilipe Manana * for allocating an extent and end up being forced to allocate a new one; 362079bd3712SFilipe Manana * 362179bd3712SFilipe Manana * 2) Because we only check that the metadata space_info has enough free bytes, 362279bd3712SFilipe Manana * we end up not allocating a new metadata chunk in that case. However if 362379bd3712SFilipe Manana * the filesystem was mounted in degraded mode, none of the existing block 362479bd3712SFilipe Manana * groups might be suitable for extent allocation due to their incompatible 362579bd3712SFilipe Manana * profile (for e.g. mounting a 2 devices filesystem, where all block groups 362679bd3712SFilipe Manana * use a RAID1 profile, in degraded mode using a single device). In this case 362779bd3712SFilipe Manana * when the task attempts to COW some extent buffer of the extent btree for 362879bd3712SFilipe Manana * example, it will trigger allocation of a new metadata block group with a 362979bd3712SFilipe Manana * suitable profile (SINGLE profile in the example of the degraded mount of 363079bd3712SFilipe Manana * the RAID1 filesystem); 363179bd3712SFilipe Manana * 363279bd3712SFilipe Manana * 3) The task has reserved enough transaction units / metadata space, but when 363379bd3712SFilipe Manana * it attempts to COW an extent buffer from the extent or device btree for 363479bd3712SFilipe Manana * example, it does not find any free extent in any metadata block group, 363579bd3712SFilipe Manana * therefore forced to try to allocate a new metadata block group. 363679bd3712SFilipe Manana * This is because some other task allocated all available extents in the 363779bd3712SFilipe Manana * meanwhile - this typically happens with tasks that don't reserve space 363879bd3712SFilipe Manana * properly, either intentionally or as a bug. One example where this is 363979bd3712SFilipe Manana * done intentionally is fsync, as it does not reserve any transaction units 364079bd3712SFilipe Manana * and ends up allocating a variable number of metadata extents for log 3641ecd84d54SFilipe Manana * tree extent buffers; 3642ecd84d54SFilipe Manana * 3643ecd84d54SFilipe Manana * 4) The task has reserved enough transaction units / metadata space, but right 3644ecd84d54SFilipe Manana * before it tries to allocate the last extent buffer it needs, a discard 3645ecd84d54SFilipe Manana * operation comes in and, temporarily, removes the last free space entry from 3646ecd84d54SFilipe Manana * the only metadata block group that had free space (discard starts by 3647ecd84d54SFilipe Manana * removing a free space entry from a block group, then does the discard 3648ecd84d54SFilipe Manana * operation and, once it's done, it adds back the free space entry to the 3649ecd84d54SFilipe Manana * block group). 365079bd3712SFilipe Manana * 365179bd3712SFilipe Manana * We also need this 2 phases setup when adding a device to a filesystem with 365279bd3712SFilipe Manana * a seed device - we must create new metadata and system chunks without adding 365379bd3712SFilipe Manana * any of the block group items to the chunk, extent and device btrees. If we 365479bd3712SFilipe Manana * did not do it this way, we would get ENOSPC when attempting to update those 365579bd3712SFilipe Manana * btrees, since all the chunks from the seed device are read-only. 365679bd3712SFilipe Manana * 365779bd3712SFilipe Manana * Phase 1 does the updates and insertions to the chunk btree because if we had 365879bd3712SFilipe Manana * it done in phase 2 and have a thundering herd of tasks allocating chunks in 365979bd3712SFilipe Manana * parallel, we risk having too many system chunks allocated by many tasks if 366079bd3712SFilipe Manana * many tasks reach phase 1 without the previous ones completing phase 2. In the 366179bd3712SFilipe Manana * extreme case this leads to exhaustion of the system chunk array in the 366279bd3712SFilipe Manana * superblock. This is easier to trigger if using a btree node/leaf size of 64K 366379bd3712SFilipe Manana * and with RAID filesystems (so we have more device items in the chunk btree). 366479bd3712SFilipe Manana * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 366579bd3712SFilipe Manana * the system chunk array due to concurrent allocations") provides more details. 366679bd3712SFilipe Manana * 36672bb2e00eSFilipe Manana * Allocation of system chunks does not happen through this function. A task that 36682bb2e00eSFilipe Manana * needs to update the chunk btree (the only btree that uses system chunks), must 36692bb2e00eSFilipe Manana * preallocate chunk space by calling either check_system_chunk() or 36702bb2e00eSFilipe Manana * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 36712bb2e00eSFilipe Manana * metadata chunk or when removing a chunk, while the later is used before doing 36722bb2e00eSFilipe Manana * a modification to the chunk btree - use cases for the later are adding, 36732bb2e00eSFilipe Manana * removing and resizing a device as well as relocation of a system chunk. 36742bb2e00eSFilipe Manana * See the comment below for more details. 367579bd3712SFilipe Manana * 367679bd3712SFilipe Manana * The reservation of system space, done through check_system_chunk(), as well 367779bd3712SFilipe Manana * as all the updates and insertions into the chunk btree must be done while 367879bd3712SFilipe Manana * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 367979bd3712SFilipe Manana * an extent buffer from the chunks btree we never trigger allocation of a new 368079bd3712SFilipe Manana * system chunk, which would result in a deadlock (trying to lock twice an 368179bd3712SFilipe Manana * extent buffer of the chunk btree, first time before triggering the chunk 368279bd3712SFilipe Manana * allocation and the second time during chunk allocation while attempting to 368379bd3712SFilipe Manana * update the chunks btree). The system chunk array is also updated while holding 368479bd3712SFilipe Manana * that mutex. The same logic applies to removing chunks - we must reserve system 368579bd3712SFilipe Manana * space, update the chunk btree and the system chunk array in the superblock 368679bd3712SFilipe Manana * while holding fs_info->chunk_mutex. 368779bd3712SFilipe Manana * 368879bd3712SFilipe Manana * This function, btrfs_chunk_alloc(), belongs to phase 1. 368979bd3712SFilipe Manana * 369079bd3712SFilipe Manana * If @force is CHUNK_ALLOC_FORCE: 369107730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 369207730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 369379bd3712SFilipe Manana * If @force is NOT CHUNK_ALLOC_FORCE: 369407730d87SJosef Bacik * - return 0 if it doesn't need to allocate a new chunk, 369507730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 369607730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 369707730d87SJosef Bacik */ 369807730d87SJosef Bacik int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 369907730d87SJosef Bacik enum btrfs_chunk_alloc_enum force) 370007730d87SJosef Bacik { 370107730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 370207730d87SJosef Bacik struct btrfs_space_info *space_info; 3703820c363bSNaohiro Aota struct btrfs_block_group *ret_bg; 370407730d87SJosef Bacik bool wait_for_alloc = false; 370507730d87SJosef Bacik bool should_alloc = false; 3706760e69c4SNaohiro Aota bool from_extent_allocation = false; 370707730d87SJosef Bacik int ret = 0; 370807730d87SJosef Bacik 3709760e69c4SNaohiro Aota if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 3710760e69c4SNaohiro Aota from_extent_allocation = true; 3711760e69c4SNaohiro Aota force = CHUNK_ALLOC_FORCE; 3712760e69c4SNaohiro Aota } 3713760e69c4SNaohiro Aota 371407730d87SJosef Bacik /* Don't re-enter if we're already allocating a chunk */ 371507730d87SJosef Bacik if (trans->allocating_chunk) 371607730d87SJosef Bacik return -ENOSPC; 371779bd3712SFilipe Manana /* 37182bb2e00eSFilipe Manana * Allocation of system chunks can not happen through this path, as we 37192bb2e00eSFilipe Manana * could end up in a deadlock if we are allocating a data or metadata 37202bb2e00eSFilipe Manana * chunk and there is another task modifying the chunk btree. 37212bb2e00eSFilipe Manana * 37222bb2e00eSFilipe Manana * This is because while we are holding the chunk mutex, we will attempt 37232bb2e00eSFilipe Manana * to add the new chunk item to the chunk btree or update an existing 37242bb2e00eSFilipe Manana * device item in the chunk btree, while the other task that is modifying 37252bb2e00eSFilipe Manana * the chunk btree is attempting to COW an extent buffer while holding a 37262bb2e00eSFilipe Manana * lock on it and on its parent - if the COW operation triggers a system 37272bb2e00eSFilipe Manana * chunk allocation, then we can deadlock because we are holding the 37282bb2e00eSFilipe Manana * chunk mutex and we may need to access that extent buffer or its parent 37292bb2e00eSFilipe Manana * in order to add the chunk item or update a device item. 37302bb2e00eSFilipe Manana * 37312bb2e00eSFilipe Manana * Tasks that want to modify the chunk tree should reserve system space 37322bb2e00eSFilipe Manana * before updating the chunk btree, by calling either 37332bb2e00eSFilipe Manana * btrfs_reserve_chunk_metadata() or check_system_chunk(). 37342bb2e00eSFilipe Manana * It's possible that after a task reserves the space, it still ends up 37352bb2e00eSFilipe Manana * here - this happens in the cases described above at do_chunk_alloc(). 37362bb2e00eSFilipe Manana * The task will have to either retry or fail. 373779bd3712SFilipe Manana */ 37382bb2e00eSFilipe Manana if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 373979bd3712SFilipe Manana return -ENOSPC; 374007730d87SJosef Bacik 374107730d87SJosef Bacik space_info = btrfs_find_space_info(fs_info, flags); 374207730d87SJosef Bacik ASSERT(space_info); 374307730d87SJosef Bacik 374407730d87SJosef Bacik do { 374507730d87SJosef Bacik spin_lock(&space_info->lock); 374607730d87SJosef Bacik if (force < space_info->force_alloc) 374707730d87SJosef Bacik force = space_info->force_alloc; 374807730d87SJosef Bacik should_alloc = should_alloc_chunk(fs_info, space_info, force); 374907730d87SJosef Bacik if (space_info->full) { 375007730d87SJosef Bacik /* No more free physical space */ 375107730d87SJosef Bacik if (should_alloc) 375207730d87SJosef Bacik ret = -ENOSPC; 375307730d87SJosef Bacik else 375407730d87SJosef Bacik ret = 0; 375507730d87SJosef Bacik spin_unlock(&space_info->lock); 375607730d87SJosef Bacik return ret; 375707730d87SJosef Bacik } else if (!should_alloc) { 375807730d87SJosef Bacik spin_unlock(&space_info->lock); 375907730d87SJosef Bacik return 0; 376007730d87SJosef Bacik } else if (space_info->chunk_alloc) { 376107730d87SJosef Bacik /* 376207730d87SJosef Bacik * Someone is already allocating, so we need to block 376307730d87SJosef Bacik * until this someone is finished and then loop to 376407730d87SJosef Bacik * recheck if we should continue with our allocation 376507730d87SJosef Bacik * attempt. 376607730d87SJosef Bacik */ 376707730d87SJosef Bacik wait_for_alloc = true; 37681314ca78SJosef Bacik force = CHUNK_ALLOC_NO_FORCE; 376907730d87SJosef Bacik spin_unlock(&space_info->lock); 377007730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 377107730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 377207730d87SJosef Bacik } else { 377307730d87SJosef Bacik /* Proceed with allocation */ 377407730d87SJosef Bacik space_info->chunk_alloc = 1; 377507730d87SJosef Bacik wait_for_alloc = false; 377607730d87SJosef Bacik spin_unlock(&space_info->lock); 377707730d87SJosef Bacik } 377807730d87SJosef Bacik 377907730d87SJosef Bacik cond_resched(); 378007730d87SJosef Bacik } while (wait_for_alloc); 378107730d87SJosef Bacik 378207730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 378307730d87SJosef Bacik trans->allocating_chunk = true; 378407730d87SJosef Bacik 378507730d87SJosef Bacik /* 378607730d87SJosef Bacik * If we have mixed data/metadata chunks we want to make sure we keep 378707730d87SJosef Bacik * allocating mixed chunks instead of individual chunks. 378807730d87SJosef Bacik */ 378907730d87SJosef Bacik if (btrfs_mixed_space_info(space_info)) 379007730d87SJosef Bacik flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 379107730d87SJosef Bacik 379207730d87SJosef Bacik /* 379307730d87SJosef Bacik * if we're doing a data chunk, go ahead and make sure that 379407730d87SJosef Bacik * we keep a reasonable number of metadata chunks allocated in the 379507730d87SJosef Bacik * FS as well. 379607730d87SJosef Bacik */ 379707730d87SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 379807730d87SJosef Bacik fs_info->data_chunk_allocations++; 379907730d87SJosef Bacik if (!(fs_info->data_chunk_allocations % 380007730d87SJosef Bacik fs_info->metadata_ratio)) 380107730d87SJosef Bacik force_metadata_allocation(fs_info); 380207730d87SJosef Bacik } 380307730d87SJosef Bacik 3804820c363bSNaohiro Aota ret_bg = do_chunk_alloc(trans, flags); 380507730d87SJosef Bacik trans->allocating_chunk = false; 380607730d87SJosef Bacik 3807760e69c4SNaohiro Aota if (IS_ERR(ret_bg)) { 3808820c363bSNaohiro Aota ret = PTR_ERR(ret_bg); 3809760e69c4SNaohiro Aota } else if (from_extent_allocation) { 3810760e69c4SNaohiro Aota /* 3811760e69c4SNaohiro Aota * New block group is likely to be used soon. Try to activate 3812760e69c4SNaohiro Aota * it now. Failure is OK for now. 3813760e69c4SNaohiro Aota */ 3814760e69c4SNaohiro Aota btrfs_zone_activate(ret_bg); 3815760e69c4SNaohiro Aota } 3816760e69c4SNaohiro Aota 3817760e69c4SNaohiro Aota if (!ret) 3818820c363bSNaohiro Aota btrfs_put_block_group(ret_bg); 3819820c363bSNaohiro Aota 382007730d87SJosef Bacik spin_lock(&space_info->lock); 382107730d87SJosef Bacik if (ret < 0) { 382207730d87SJosef Bacik if (ret == -ENOSPC) 382307730d87SJosef Bacik space_info->full = 1; 382407730d87SJosef Bacik else 382507730d87SJosef Bacik goto out; 382607730d87SJosef Bacik } else { 382707730d87SJosef Bacik ret = 1; 382807730d87SJosef Bacik space_info->max_extent_size = 0; 382907730d87SJosef Bacik } 383007730d87SJosef Bacik 383107730d87SJosef Bacik space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 383207730d87SJosef Bacik out: 383307730d87SJosef Bacik space_info->chunk_alloc = 0; 383407730d87SJosef Bacik spin_unlock(&space_info->lock); 383507730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 383607730d87SJosef Bacik 383707730d87SJosef Bacik return ret; 383807730d87SJosef Bacik } 383907730d87SJosef Bacik 384007730d87SJosef Bacik static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 384107730d87SJosef Bacik { 384207730d87SJosef Bacik u64 num_dev; 384307730d87SJosef Bacik 384407730d87SJosef Bacik num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 384507730d87SJosef Bacik if (!num_dev) 384607730d87SJosef Bacik num_dev = fs_info->fs_devices->rw_devices; 384707730d87SJosef Bacik 384807730d87SJosef Bacik return num_dev; 384907730d87SJosef Bacik } 385007730d87SJosef Bacik 38512bb2e00eSFilipe Manana static void reserve_chunk_space(struct btrfs_trans_handle *trans, 38522bb2e00eSFilipe Manana u64 bytes, 38532bb2e00eSFilipe Manana u64 type) 385407730d87SJosef Bacik { 385507730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 385607730d87SJosef Bacik struct btrfs_space_info *info; 385707730d87SJosef Bacik u64 left; 385807730d87SJosef Bacik int ret = 0; 385907730d87SJosef Bacik 386007730d87SJosef Bacik /* 386107730d87SJosef Bacik * Needed because we can end up allocating a system chunk and for an 386207730d87SJosef Bacik * atomic and race free space reservation in the chunk block reserve. 386307730d87SJosef Bacik */ 386407730d87SJosef Bacik lockdep_assert_held(&fs_info->chunk_mutex); 386507730d87SJosef Bacik 386607730d87SJosef Bacik info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 386707730d87SJosef Bacik spin_lock(&info->lock); 386807730d87SJosef Bacik left = info->total_bytes - btrfs_space_info_used(info, true); 386907730d87SJosef Bacik spin_unlock(&info->lock); 387007730d87SJosef Bacik 38712bb2e00eSFilipe Manana if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 387207730d87SJosef Bacik btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 38732bb2e00eSFilipe Manana left, bytes, type); 387407730d87SJosef Bacik btrfs_dump_space_info(fs_info, info, 0, 0); 387507730d87SJosef Bacik } 387607730d87SJosef Bacik 38772bb2e00eSFilipe Manana if (left < bytes) { 387807730d87SJosef Bacik u64 flags = btrfs_system_alloc_profile(fs_info); 387979bd3712SFilipe Manana struct btrfs_block_group *bg; 388007730d87SJosef Bacik 388107730d87SJosef Bacik /* 388207730d87SJosef Bacik * Ignore failure to create system chunk. We might end up not 388307730d87SJosef Bacik * needing it, as we might not need to COW all nodes/leafs from 388407730d87SJosef Bacik * the paths we visit in the chunk tree (they were already COWed 388507730d87SJosef Bacik * or created in the current transaction for example). 388607730d87SJosef Bacik */ 3887f6f39f7aSNikolay Borisov bg = btrfs_create_chunk(trans, flags); 388879bd3712SFilipe Manana if (IS_ERR(bg)) { 388979bd3712SFilipe Manana ret = PTR_ERR(bg); 38902bb2e00eSFilipe Manana } else { 389179bd3712SFilipe Manana /* 3892b6a98021SNaohiro Aota * We have a new chunk. We also need to activate it for 3893b6a98021SNaohiro Aota * zoned filesystem. 3894b6a98021SNaohiro Aota */ 3895b6a98021SNaohiro Aota ret = btrfs_zoned_activate_one_bg(fs_info, info, true); 3896b6a98021SNaohiro Aota if (ret < 0) 3897b6a98021SNaohiro Aota return; 3898b6a98021SNaohiro Aota 3899b6a98021SNaohiro Aota /* 390079bd3712SFilipe Manana * If we fail to add the chunk item here, we end up 390179bd3712SFilipe Manana * trying again at phase 2 of chunk allocation, at 390279bd3712SFilipe Manana * btrfs_create_pending_block_groups(). So ignore 39032bb2e00eSFilipe Manana * any error here. An ENOSPC here could happen, due to 39042bb2e00eSFilipe Manana * the cases described at do_chunk_alloc() - the system 39052bb2e00eSFilipe Manana * block group we just created was just turned into RO 39062bb2e00eSFilipe Manana * mode by a scrub for example, or a running discard 39072bb2e00eSFilipe Manana * temporarily removed its free space entries, etc. 390879bd3712SFilipe Manana */ 390979bd3712SFilipe Manana btrfs_chunk_alloc_add_chunk_item(trans, bg); 391079bd3712SFilipe Manana } 391107730d87SJosef Bacik } 391207730d87SJosef Bacik 391307730d87SJosef Bacik if (!ret) { 39149270501cSJosef Bacik ret = btrfs_block_rsv_add(fs_info, 391507730d87SJosef Bacik &fs_info->chunk_block_rsv, 39162bb2e00eSFilipe Manana bytes, BTRFS_RESERVE_NO_FLUSH); 39171cb3db1cSFilipe Manana if (!ret) 39182bb2e00eSFilipe Manana trans->chunk_bytes_reserved += bytes; 391907730d87SJosef Bacik } 392007730d87SJosef Bacik } 392107730d87SJosef Bacik 39222bb2e00eSFilipe Manana /* 39232bb2e00eSFilipe Manana * Reserve space in the system space for allocating or removing a chunk. 39242bb2e00eSFilipe Manana * The caller must be holding fs_info->chunk_mutex. 39252bb2e00eSFilipe Manana */ 39262bb2e00eSFilipe Manana void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 39272bb2e00eSFilipe Manana { 39282bb2e00eSFilipe Manana struct btrfs_fs_info *fs_info = trans->fs_info; 39292bb2e00eSFilipe Manana const u64 num_devs = get_profile_num_devs(fs_info, type); 39302bb2e00eSFilipe Manana u64 bytes; 39312bb2e00eSFilipe Manana 39322bb2e00eSFilipe Manana /* num_devs device items to update and 1 chunk item to add or remove. */ 39332bb2e00eSFilipe Manana bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 39342bb2e00eSFilipe Manana btrfs_calc_insert_metadata_size(fs_info, 1); 39352bb2e00eSFilipe Manana 39362bb2e00eSFilipe Manana reserve_chunk_space(trans, bytes, type); 39372bb2e00eSFilipe Manana } 39382bb2e00eSFilipe Manana 39392bb2e00eSFilipe Manana /* 39402bb2e00eSFilipe Manana * Reserve space in the system space, if needed, for doing a modification to the 39412bb2e00eSFilipe Manana * chunk btree. 39422bb2e00eSFilipe Manana * 39432bb2e00eSFilipe Manana * @trans: A transaction handle. 39442bb2e00eSFilipe Manana * @is_item_insertion: Indicate if the modification is for inserting a new item 39452bb2e00eSFilipe Manana * in the chunk btree or if it's for the deletion or update 39462bb2e00eSFilipe Manana * of an existing item. 39472bb2e00eSFilipe Manana * 39482bb2e00eSFilipe Manana * This is used in a context where we need to update the chunk btree outside 39492bb2e00eSFilipe Manana * block group allocation and removal, to avoid a deadlock with a concurrent 39502bb2e00eSFilipe Manana * task that is allocating a metadata or data block group and therefore needs to 39512bb2e00eSFilipe Manana * update the chunk btree while holding the chunk mutex. After the update to the 39522bb2e00eSFilipe Manana * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 39532bb2e00eSFilipe Manana * 39542bb2e00eSFilipe Manana */ 39552bb2e00eSFilipe Manana void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 39562bb2e00eSFilipe Manana bool is_item_insertion) 39572bb2e00eSFilipe Manana { 39582bb2e00eSFilipe Manana struct btrfs_fs_info *fs_info = trans->fs_info; 39592bb2e00eSFilipe Manana u64 bytes; 39602bb2e00eSFilipe Manana 39612bb2e00eSFilipe Manana if (is_item_insertion) 39622bb2e00eSFilipe Manana bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 39632bb2e00eSFilipe Manana else 39642bb2e00eSFilipe Manana bytes = btrfs_calc_metadata_size(fs_info, 1); 39652bb2e00eSFilipe Manana 39662bb2e00eSFilipe Manana mutex_lock(&fs_info->chunk_mutex); 39672bb2e00eSFilipe Manana reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 39682bb2e00eSFilipe Manana mutex_unlock(&fs_info->chunk_mutex); 39692bb2e00eSFilipe Manana } 39702bb2e00eSFilipe Manana 39713e43c279SJosef Bacik void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 39723e43c279SJosef Bacik { 397332da5386SDavid Sterba struct btrfs_block_group *block_group; 39743e43c279SJosef Bacik u64 last = 0; 39753e43c279SJosef Bacik 39763e43c279SJosef Bacik while (1) { 39773e43c279SJosef Bacik struct inode *inode; 39783e43c279SJosef Bacik 39793e43c279SJosef Bacik block_group = btrfs_lookup_first_block_group(info, last); 39803e43c279SJosef Bacik while (block_group) { 39813e43c279SJosef Bacik btrfs_wait_block_group_cache_done(block_group); 39823e43c279SJosef Bacik spin_lock(&block_group->lock); 39833e43c279SJosef Bacik if (block_group->iref) 39843e43c279SJosef Bacik break; 39853e43c279SJosef Bacik spin_unlock(&block_group->lock); 39863e43c279SJosef Bacik block_group = btrfs_next_block_group(block_group); 39873e43c279SJosef Bacik } 39883e43c279SJosef Bacik if (!block_group) { 39893e43c279SJosef Bacik if (last == 0) 39903e43c279SJosef Bacik break; 39913e43c279SJosef Bacik last = 0; 39923e43c279SJosef Bacik continue; 39933e43c279SJosef Bacik } 39943e43c279SJosef Bacik 39953e43c279SJosef Bacik inode = block_group->inode; 39963e43c279SJosef Bacik block_group->iref = 0; 39973e43c279SJosef Bacik block_group->inode = NULL; 39983e43c279SJosef Bacik spin_unlock(&block_group->lock); 39993e43c279SJosef Bacik ASSERT(block_group->io_ctl.inode == NULL); 40003e43c279SJosef Bacik iput(inode); 4001b3470b5dSDavid Sterba last = block_group->start + block_group->length; 40023e43c279SJosef Bacik btrfs_put_block_group(block_group); 40033e43c279SJosef Bacik } 40043e43c279SJosef Bacik } 40053e43c279SJosef Bacik 40063e43c279SJosef Bacik /* 40073e43c279SJosef Bacik * Must be called only after stopping all workers, since we could have block 40083e43c279SJosef Bacik * group caching kthreads running, and therefore they could race with us if we 40093e43c279SJosef Bacik * freed the block groups before stopping them. 40103e43c279SJosef Bacik */ 40113e43c279SJosef Bacik int btrfs_free_block_groups(struct btrfs_fs_info *info) 40123e43c279SJosef Bacik { 401332da5386SDavid Sterba struct btrfs_block_group *block_group; 40143e43c279SJosef Bacik struct btrfs_space_info *space_info; 40153e43c279SJosef Bacik struct btrfs_caching_control *caching_ctl; 40163e43c279SJosef Bacik struct rb_node *n; 40173e43c279SJosef Bacik 401816b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 40193e43c279SJosef Bacik while (!list_empty(&info->caching_block_groups)) { 40203e43c279SJosef Bacik caching_ctl = list_entry(info->caching_block_groups.next, 40213e43c279SJosef Bacik struct btrfs_caching_control, list); 40223e43c279SJosef Bacik list_del(&caching_ctl->list); 40233e43c279SJosef Bacik btrfs_put_caching_control(caching_ctl); 40243e43c279SJosef Bacik } 402516b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 40263e43c279SJosef Bacik 40273e43c279SJosef Bacik spin_lock(&info->unused_bgs_lock); 40283e43c279SJosef Bacik while (!list_empty(&info->unused_bgs)) { 40293e43c279SJosef Bacik block_group = list_first_entry(&info->unused_bgs, 403032da5386SDavid Sterba struct btrfs_block_group, 40313e43c279SJosef Bacik bg_list); 40323e43c279SJosef Bacik list_del_init(&block_group->bg_list); 40333e43c279SJosef Bacik btrfs_put_block_group(block_group); 40343e43c279SJosef Bacik } 40353e43c279SJosef Bacik 403618bb8bbfSJohannes Thumshirn while (!list_empty(&info->reclaim_bgs)) { 403718bb8bbfSJohannes Thumshirn block_group = list_first_entry(&info->reclaim_bgs, 403818bb8bbfSJohannes Thumshirn struct btrfs_block_group, 403918bb8bbfSJohannes Thumshirn bg_list); 404018bb8bbfSJohannes Thumshirn list_del_init(&block_group->bg_list); 404118bb8bbfSJohannes Thumshirn btrfs_put_block_group(block_group); 404218bb8bbfSJohannes Thumshirn } 404318bb8bbfSJohannes Thumshirn spin_unlock(&info->unused_bgs_lock); 404418bb8bbfSJohannes Thumshirn 4045afba2bc0SNaohiro Aota spin_lock(&info->zone_active_bgs_lock); 4046afba2bc0SNaohiro Aota while (!list_empty(&info->zone_active_bgs)) { 4047afba2bc0SNaohiro Aota block_group = list_first_entry(&info->zone_active_bgs, 4048afba2bc0SNaohiro Aota struct btrfs_block_group, 4049afba2bc0SNaohiro Aota active_bg_list); 4050afba2bc0SNaohiro Aota list_del_init(&block_group->active_bg_list); 4051afba2bc0SNaohiro Aota btrfs_put_block_group(block_group); 4052afba2bc0SNaohiro Aota } 4053afba2bc0SNaohiro Aota spin_unlock(&info->zone_active_bgs_lock); 4054afba2bc0SNaohiro Aota 405516b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 405608dddb29SFilipe Manana while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 405732da5386SDavid Sterba block_group = rb_entry(n, struct btrfs_block_group, 40583e43c279SJosef Bacik cache_node); 405908dddb29SFilipe Manana rb_erase_cached(&block_group->cache_node, 40603e43c279SJosef Bacik &info->block_group_cache_tree); 40613e43c279SJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 406216b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 40633e43c279SJosef Bacik 40643e43c279SJosef Bacik down_write(&block_group->space_info->groups_sem); 40653e43c279SJosef Bacik list_del(&block_group->list); 40663e43c279SJosef Bacik up_write(&block_group->space_info->groups_sem); 40673e43c279SJosef Bacik 40683e43c279SJosef Bacik /* 40693e43c279SJosef Bacik * We haven't cached this block group, which means we could 40703e43c279SJosef Bacik * possibly have excluded extents on this block group. 40713e43c279SJosef Bacik */ 40723e43c279SJosef Bacik if (block_group->cached == BTRFS_CACHE_NO || 40733e43c279SJosef Bacik block_group->cached == BTRFS_CACHE_ERROR) 40743e43c279SJosef Bacik btrfs_free_excluded_extents(block_group); 40753e43c279SJosef Bacik 40763e43c279SJosef Bacik btrfs_remove_free_space_cache(block_group); 40773e43c279SJosef Bacik ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 40783e43c279SJosef Bacik ASSERT(list_empty(&block_group->dirty_list)); 40793e43c279SJosef Bacik ASSERT(list_empty(&block_group->io_list)); 40803e43c279SJosef Bacik ASSERT(list_empty(&block_group->bg_list)); 408148aaeebeSJosef Bacik ASSERT(refcount_read(&block_group->refs) == 1); 4082195a49eaSFilipe Manana ASSERT(block_group->swap_extents == 0); 40833e43c279SJosef Bacik btrfs_put_block_group(block_group); 40843e43c279SJosef Bacik 408516b0c258SFilipe Manana write_lock(&info->block_group_cache_lock); 40863e43c279SJosef Bacik } 408716b0c258SFilipe Manana write_unlock(&info->block_group_cache_lock); 40883e43c279SJosef Bacik 40893e43c279SJosef Bacik btrfs_release_global_block_rsv(info); 40903e43c279SJosef Bacik 40913e43c279SJosef Bacik while (!list_empty(&info->space_info)) { 40923e43c279SJosef Bacik space_info = list_entry(info->space_info.next, 40933e43c279SJosef Bacik struct btrfs_space_info, 40943e43c279SJosef Bacik list); 40953e43c279SJosef Bacik 40963e43c279SJosef Bacik /* 40973e43c279SJosef Bacik * Do not hide this behind enospc_debug, this is actually 40983e43c279SJosef Bacik * important and indicates a real bug if this happens. 40993e43c279SJosef Bacik */ 41003e43c279SJosef Bacik if (WARN_ON(space_info->bytes_pinned > 0 || 41013e43c279SJosef Bacik space_info->bytes_may_use > 0)) 41023e43c279SJosef Bacik btrfs_dump_space_info(info, space_info, 0, 0); 410340cdc509SFilipe Manana 410440cdc509SFilipe Manana /* 410540cdc509SFilipe Manana * If there was a failure to cleanup a log tree, very likely due 410640cdc509SFilipe Manana * to an IO failure on a writeback attempt of one or more of its 410740cdc509SFilipe Manana * extent buffers, we could not do proper (and cheap) unaccounting 410840cdc509SFilipe Manana * of their reserved space, so don't warn on bytes_reserved > 0 in 410940cdc509SFilipe Manana * that case. 411040cdc509SFilipe Manana */ 411140cdc509SFilipe Manana if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 411240cdc509SFilipe Manana !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 411340cdc509SFilipe Manana if (WARN_ON(space_info->bytes_reserved > 0)) 411440cdc509SFilipe Manana btrfs_dump_space_info(info, space_info, 0, 0); 411540cdc509SFilipe Manana } 411640cdc509SFilipe Manana 4117d611add4SFilipe Manana WARN_ON(space_info->reclaim_size > 0); 41183e43c279SJosef Bacik list_del(&space_info->list); 41193e43c279SJosef Bacik btrfs_sysfs_remove_space_info(space_info); 41203e43c279SJosef Bacik } 41213e43c279SJosef Bacik return 0; 41223e43c279SJosef Bacik } 4123684b752bSFilipe Manana 4124684b752bSFilipe Manana void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4125684b752bSFilipe Manana { 4126684b752bSFilipe Manana atomic_inc(&cache->frozen); 4127684b752bSFilipe Manana } 4128684b752bSFilipe Manana 4129684b752bSFilipe Manana void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4130684b752bSFilipe Manana { 4131684b752bSFilipe Manana struct btrfs_fs_info *fs_info = block_group->fs_info; 4132684b752bSFilipe Manana struct extent_map_tree *em_tree; 4133684b752bSFilipe Manana struct extent_map *em; 4134684b752bSFilipe Manana bool cleanup; 4135684b752bSFilipe Manana 4136684b752bSFilipe Manana spin_lock(&block_group->lock); 4137684b752bSFilipe Manana cleanup = (atomic_dec_and_test(&block_group->frozen) && 4138684b752bSFilipe Manana block_group->removed); 4139684b752bSFilipe Manana spin_unlock(&block_group->lock); 4140684b752bSFilipe Manana 4141684b752bSFilipe Manana if (cleanup) { 4142684b752bSFilipe Manana em_tree = &fs_info->mapping_tree; 4143684b752bSFilipe Manana write_lock(&em_tree->lock); 4144684b752bSFilipe Manana em = lookup_extent_mapping(em_tree, block_group->start, 4145684b752bSFilipe Manana 1); 4146684b752bSFilipe Manana BUG_ON(!em); /* logic error, can't happen */ 4147684b752bSFilipe Manana remove_extent_mapping(em_tree, em); 4148684b752bSFilipe Manana write_unlock(&em_tree->lock); 4149684b752bSFilipe Manana 4150684b752bSFilipe Manana /* once for us and once for the tree */ 4151684b752bSFilipe Manana free_extent_map(em); 4152684b752bSFilipe Manana free_extent_map(em); 4153684b752bSFilipe Manana 4154684b752bSFilipe Manana /* 4155684b752bSFilipe Manana * We may have left one free space entry and other possible 4156684b752bSFilipe Manana * tasks trimming this block group have left 1 entry each one. 4157684b752bSFilipe Manana * Free them if any. 4158684b752bSFilipe Manana */ 4159684b752bSFilipe Manana __btrfs_remove_free_space_cache(block_group->free_space_ctl); 4160684b752bSFilipe Manana } 4161684b752bSFilipe Manana } 4162195a49eaSFilipe Manana 4163195a49eaSFilipe Manana bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4164195a49eaSFilipe Manana { 4165195a49eaSFilipe Manana bool ret = true; 4166195a49eaSFilipe Manana 4167195a49eaSFilipe Manana spin_lock(&bg->lock); 4168195a49eaSFilipe Manana if (bg->ro) 4169195a49eaSFilipe Manana ret = false; 4170195a49eaSFilipe Manana else 4171195a49eaSFilipe Manana bg->swap_extents++; 4172195a49eaSFilipe Manana spin_unlock(&bg->lock); 4173195a49eaSFilipe Manana 4174195a49eaSFilipe Manana return ret; 4175195a49eaSFilipe Manana } 4176195a49eaSFilipe Manana 4177195a49eaSFilipe Manana void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4178195a49eaSFilipe Manana { 4179195a49eaSFilipe Manana spin_lock(&bg->lock); 4180195a49eaSFilipe Manana ASSERT(!bg->ro); 4181195a49eaSFilipe Manana ASSERT(bg->swap_extents >= amount); 4182195a49eaSFilipe Manana bg->swap_extents -= amount; 4183195a49eaSFilipe Manana spin_unlock(&bg->lock); 4184195a49eaSFilipe Manana } 4185