12e405ad8SJosef Bacik // SPDX-License-Identifier: GPL-2.0 22e405ad8SJosef Bacik 3784352feSDavid Sterba #include "misc.h" 42e405ad8SJosef Bacik #include "ctree.h" 52e405ad8SJosef Bacik #include "block-group.h" 63eeb3226SJosef Bacik #include "space-info.h" 79f21246dSJosef Bacik #include "disk-io.h" 89f21246dSJosef Bacik #include "free-space-cache.h" 99f21246dSJosef Bacik #include "free-space-tree.h" 10e3e0520bSJosef Bacik #include "volumes.h" 11e3e0520bSJosef Bacik #include "transaction.h" 12e3e0520bSJosef Bacik #include "ref-verify.h" 134358d963SJosef Bacik #include "sysfs.h" 144358d963SJosef Bacik #include "tree-log.h" 1577745c05SJosef Bacik #include "delalloc-space.h" 16b0643e59SDennis Zhou #include "discard.h" 1796a14336SNikolay Borisov #include "raid56.h" 1808e11a3dSNaohiro Aota #include "zoned.h" 192e405ad8SJosef Bacik 20878d7b67SJosef Bacik /* 21878d7b67SJosef Bacik * Return target flags in extended format or 0 if restripe for this chunk_type 22878d7b67SJosef Bacik * is not in progress 23878d7b67SJosef Bacik * 24878d7b67SJosef Bacik * Should be called with balance_lock held 25878d7b67SJosef Bacik */ 26e11c0406SJosef Bacik static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 27878d7b67SJosef Bacik { 28878d7b67SJosef Bacik struct btrfs_balance_control *bctl = fs_info->balance_ctl; 29878d7b67SJosef Bacik u64 target = 0; 30878d7b67SJosef Bacik 31878d7b67SJosef Bacik if (!bctl) 32878d7b67SJosef Bacik return 0; 33878d7b67SJosef Bacik 34878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && 35878d7b67SJosef Bacik bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 36878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 37878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 38878d7b67SJosef Bacik bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 39878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 40878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 41878d7b67SJosef Bacik bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 42878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 43878d7b67SJosef Bacik } 44878d7b67SJosef Bacik 45878d7b67SJosef Bacik return target; 46878d7b67SJosef Bacik } 47878d7b67SJosef Bacik 48878d7b67SJosef Bacik /* 49878d7b67SJosef Bacik * @flags: available profiles in extended format (see ctree.h) 50878d7b67SJosef Bacik * 51878d7b67SJosef Bacik * Return reduced profile in chunk format. If profile changing is in progress 52878d7b67SJosef Bacik * (either running or paused) picks the target profile (if it's already 53878d7b67SJosef Bacik * available), otherwise falls back to plain reducing. 54878d7b67SJosef Bacik */ 55878d7b67SJosef Bacik static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 56878d7b67SJosef Bacik { 57878d7b67SJosef Bacik u64 num_devices = fs_info->fs_devices->rw_devices; 58878d7b67SJosef Bacik u64 target; 59878d7b67SJosef Bacik u64 raid_type; 60878d7b67SJosef Bacik u64 allowed = 0; 61878d7b67SJosef Bacik 62878d7b67SJosef Bacik /* 63878d7b67SJosef Bacik * See if restripe for this chunk_type is in progress, if so try to 64878d7b67SJosef Bacik * reduce to the target profile 65878d7b67SJosef Bacik */ 66878d7b67SJosef Bacik spin_lock(&fs_info->balance_lock); 67e11c0406SJosef Bacik target = get_restripe_target(fs_info, flags); 68878d7b67SJosef Bacik if (target) { 69878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 70878d7b67SJosef Bacik return extended_to_chunk(target); 71878d7b67SJosef Bacik } 72878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 73878d7b67SJosef Bacik 74878d7b67SJosef Bacik /* First, mask out the RAID levels which aren't possible */ 75878d7b67SJosef Bacik for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 76878d7b67SJosef Bacik if (num_devices >= btrfs_raid_array[raid_type].devs_min) 77878d7b67SJosef Bacik allowed |= btrfs_raid_array[raid_type].bg_flag; 78878d7b67SJosef Bacik } 79878d7b67SJosef Bacik allowed &= flags; 80878d7b67SJosef Bacik 81878d7b67SJosef Bacik if (allowed & BTRFS_BLOCK_GROUP_RAID6) 82878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID6; 83878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 84878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID5; 85878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 86878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID10; 87878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 88878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID1; 89878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 90878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID0; 91878d7b67SJosef Bacik 92878d7b67SJosef Bacik flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 93878d7b67SJosef Bacik 94878d7b67SJosef Bacik return extended_to_chunk(flags | allowed); 95878d7b67SJosef Bacik } 96878d7b67SJosef Bacik 97ef0a82daSJohannes Thumshirn u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 98878d7b67SJosef Bacik { 99878d7b67SJosef Bacik unsigned seq; 100878d7b67SJosef Bacik u64 flags; 101878d7b67SJosef Bacik 102878d7b67SJosef Bacik do { 103878d7b67SJosef Bacik flags = orig_flags; 104878d7b67SJosef Bacik seq = read_seqbegin(&fs_info->profiles_lock); 105878d7b67SJosef Bacik 106878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 107878d7b67SJosef Bacik flags |= fs_info->avail_data_alloc_bits; 108878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 109878d7b67SJosef Bacik flags |= fs_info->avail_system_alloc_bits; 110878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_METADATA) 111878d7b67SJosef Bacik flags |= fs_info->avail_metadata_alloc_bits; 112878d7b67SJosef Bacik } while (read_seqretry(&fs_info->profiles_lock, seq)); 113878d7b67SJosef Bacik 114878d7b67SJosef Bacik return btrfs_reduce_alloc_profile(fs_info, flags); 115878d7b67SJosef Bacik } 116878d7b67SJosef Bacik 11732da5386SDavid Sterba void btrfs_get_block_group(struct btrfs_block_group *cache) 1183cad1284SJosef Bacik { 11948aaeebeSJosef Bacik refcount_inc(&cache->refs); 1203cad1284SJosef Bacik } 1213cad1284SJosef Bacik 12232da5386SDavid Sterba void btrfs_put_block_group(struct btrfs_block_group *cache) 1233cad1284SJosef Bacik { 12448aaeebeSJosef Bacik if (refcount_dec_and_test(&cache->refs)) { 1253cad1284SJosef Bacik WARN_ON(cache->pinned > 0); 1263cad1284SJosef Bacik WARN_ON(cache->reserved > 0); 1273cad1284SJosef Bacik 1283cad1284SJosef Bacik /* 129b0643e59SDennis Zhou * A block_group shouldn't be on the discard_list anymore. 130b0643e59SDennis Zhou * Remove the block_group from the discard_list to prevent us 131b0643e59SDennis Zhou * from causing a panic due to NULL pointer dereference. 132b0643e59SDennis Zhou */ 133b0643e59SDennis Zhou if (WARN_ON(!list_empty(&cache->discard_list))) 134b0643e59SDennis Zhou btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 135b0643e59SDennis Zhou cache); 136b0643e59SDennis Zhou 137b0643e59SDennis Zhou /* 1383cad1284SJosef Bacik * If not empty, someone is still holding mutex of 1393cad1284SJosef Bacik * full_stripe_lock, which can only be released by caller. 1403cad1284SJosef Bacik * And it will definitely cause use-after-free when caller 1413cad1284SJosef Bacik * tries to release full stripe lock. 1423cad1284SJosef Bacik * 1433cad1284SJosef Bacik * No better way to resolve, but only to warn. 1443cad1284SJosef Bacik */ 1453cad1284SJosef Bacik WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); 1463cad1284SJosef Bacik kfree(cache->free_space_ctl); 1473cad1284SJosef Bacik kfree(cache); 1483cad1284SJosef Bacik } 1493cad1284SJosef Bacik } 1503cad1284SJosef Bacik 1512e405ad8SJosef Bacik /* 1524358d963SJosef Bacik * This adds the block group to the fs_info rb tree for the block group cache 1534358d963SJosef Bacik */ 1544358d963SJosef Bacik static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 15532da5386SDavid Sterba struct btrfs_block_group *block_group) 1564358d963SJosef Bacik { 1574358d963SJosef Bacik struct rb_node **p; 1584358d963SJosef Bacik struct rb_node *parent = NULL; 15932da5386SDavid Sterba struct btrfs_block_group *cache; 1604358d963SJosef Bacik 1619afc6649SQu Wenruo ASSERT(block_group->length != 0); 1629afc6649SQu Wenruo 1634358d963SJosef Bacik spin_lock(&info->block_group_cache_lock); 1644358d963SJosef Bacik p = &info->block_group_cache_tree.rb_node; 1654358d963SJosef Bacik 1664358d963SJosef Bacik while (*p) { 1674358d963SJosef Bacik parent = *p; 16832da5386SDavid Sterba cache = rb_entry(parent, struct btrfs_block_group, cache_node); 169b3470b5dSDavid Sterba if (block_group->start < cache->start) { 1704358d963SJosef Bacik p = &(*p)->rb_left; 171b3470b5dSDavid Sterba } else if (block_group->start > cache->start) { 1724358d963SJosef Bacik p = &(*p)->rb_right; 1734358d963SJosef Bacik } else { 1744358d963SJosef Bacik spin_unlock(&info->block_group_cache_lock); 1754358d963SJosef Bacik return -EEXIST; 1764358d963SJosef Bacik } 1774358d963SJosef Bacik } 1784358d963SJosef Bacik 1794358d963SJosef Bacik rb_link_node(&block_group->cache_node, parent, p); 1804358d963SJosef Bacik rb_insert_color(&block_group->cache_node, 1814358d963SJosef Bacik &info->block_group_cache_tree); 1824358d963SJosef Bacik 183b3470b5dSDavid Sterba if (info->first_logical_byte > block_group->start) 184b3470b5dSDavid Sterba info->first_logical_byte = block_group->start; 1854358d963SJosef Bacik 1864358d963SJosef Bacik spin_unlock(&info->block_group_cache_lock); 1874358d963SJosef Bacik 1884358d963SJosef Bacik return 0; 1894358d963SJosef Bacik } 1904358d963SJosef Bacik 1914358d963SJosef Bacik /* 1922e405ad8SJosef Bacik * This will return the block group at or after bytenr if contains is 0, else 1932e405ad8SJosef Bacik * it will return the block group that contains the bytenr 1942e405ad8SJosef Bacik */ 19532da5386SDavid Sterba static struct btrfs_block_group *block_group_cache_tree_search( 1962e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr, int contains) 1972e405ad8SJosef Bacik { 19832da5386SDavid Sterba struct btrfs_block_group *cache, *ret = NULL; 1992e405ad8SJosef Bacik struct rb_node *n; 2002e405ad8SJosef Bacik u64 end, start; 2012e405ad8SJosef Bacik 2022e405ad8SJosef Bacik spin_lock(&info->block_group_cache_lock); 2032e405ad8SJosef Bacik n = info->block_group_cache_tree.rb_node; 2042e405ad8SJosef Bacik 2052e405ad8SJosef Bacik while (n) { 20632da5386SDavid Sterba cache = rb_entry(n, struct btrfs_block_group, cache_node); 207b3470b5dSDavid Sterba end = cache->start + cache->length - 1; 208b3470b5dSDavid Sterba start = cache->start; 2092e405ad8SJosef Bacik 2102e405ad8SJosef Bacik if (bytenr < start) { 211b3470b5dSDavid Sterba if (!contains && (!ret || start < ret->start)) 2122e405ad8SJosef Bacik ret = cache; 2132e405ad8SJosef Bacik n = n->rb_left; 2142e405ad8SJosef Bacik } else if (bytenr > start) { 2152e405ad8SJosef Bacik if (contains && bytenr <= end) { 2162e405ad8SJosef Bacik ret = cache; 2172e405ad8SJosef Bacik break; 2182e405ad8SJosef Bacik } 2192e405ad8SJosef Bacik n = n->rb_right; 2202e405ad8SJosef Bacik } else { 2212e405ad8SJosef Bacik ret = cache; 2222e405ad8SJosef Bacik break; 2232e405ad8SJosef Bacik } 2242e405ad8SJosef Bacik } 2252e405ad8SJosef Bacik if (ret) { 2262e405ad8SJosef Bacik btrfs_get_block_group(ret); 227b3470b5dSDavid Sterba if (bytenr == 0 && info->first_logical_byte > ret->start) 228b3470b5dSDavid Sterba info->first_logical_byte = ret->start; 2292e405ad8SJosef Bacik } 2302e405ad8SJosef Bacik spin_unlock(&info->block_group_cache_lock); 2312e405ad8SJosef Bacik 2322e405ad8SJosef Bacik return ret; 2332e405ad8SJosef Bacik } 2342e405ad8SJosef Bacik 2352e405ad8SJosef Bacik /* 2362e405ad8SJosef Bacik * Return the block group that starts at or after bytenr 2372e405ad8SJosef Bacik */ 23832da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_first_block_group( 2392e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2402e405ad8SJosef Bacik { 2412e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 0); 2422e405ad8SJosef Bacik } 2432e405ad8SJosef Bacik 2442e405ad8SJosef Bacik /* 2452e405ad8SJosef Bacik * Return the block group that contains the given bytenr 2462e405ad8SJosef Bacik */ 24732da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_block_group( 2482e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2492e405ad8SJosef Bacik { 2502e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 1); 2512e405ad8SJosef Bacik } 2522e405ad8SJosef Bacik 25332da5386SDavid Sterba struct btrfs_block_group *btrfs_next_block_group( 25432da5386SDavid Sterba struct btrfs_block_group *cache) 2552e405ad8SJosef Bacik { 2562e405ad8SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 2572e405ad8SJosef Bacik struct rb_node *node; 2582e405ad8SJosef Bacik 2592e405ad8SJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 2602e405ad8SJosef Bacik 2612e405ad8SJosef Bacik /* If our block group was removed, we need a full search. */ 2622e405ad8SJosef Bacik if (RB_EMPTY_NODE(&cache->cache_node)) { 263b3470b5dSDavid Sterba const u64 next_bytenr = cache->start + cache->length; 2642e405ad8SJosef Bacik 2652e405ad8SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 2662e405ad8SJosef Bacik btrfs_put_block_group(cache); 2672e405ad8SJosef Bacik cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; 2682e405ad8SJosef Bacik } 2692e405ad8SJosef Bacik node = rb_next(&cache->cache_node); 2702e405ad8SJosef Bacik btrfs_put_block_group(cache); 2712e405ad8SJosef Bacik if (node) { 27232da5386SDavid Sterba cache = rb_entry(node, struct btrfs_block_group, cache_node); 2732e405ad8SJosef Bacik btrfs_get_block_group(cache); 2742e405ad8SJosef Bacik } else 2752e405ad8SJosef Bacik cache = NULL; 2762e405ad8SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 2772e405ad8SJosef Bacik return cache; 2782e405ad8SJosef Bacik } 2793eeb3226SJosef Bacik 2803eeb3226SJosef Bacik bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) 2813eeb3226SJosef Bacik { 28232da5386SDavid Sterba struct btrfs_block_group *bg; 2833eeb3226SJosef Bacik bool ret = true; 2843eeb3226SJosef Bacik 2853eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 2863eeb3226SJosef Bacik if (!bg) 2873eeb3226SJosef Bacik return false; 2883eeb3226SJosef Bacik 2893eeb3226SJosef Bacik spin_lock(&bg->lock); 2903eeb3226SJosef Bacik if (bg->ro) 2913eeb3226SJosef Bacik ret = false; 2923eeb3226SJosef Bacik else 2933eeb3226SJosef Bacik atomic_inc(&bg->nocow_writers); 2943eeb3226SJosef Bacik spin_unlock(&bg->lock); 2953eeb3226SJosef Bacik 2963eeb3226SJosef Bacik /* No put on block group, done by btrfs_dec_nocow_writers */ 2973eeb3226SJosef Bacik if (!ret) 2983eeb3226SJosef Bacik btrfs_put_block_group(bg); 2993eeb3226SJosef Bacik 3003eeb3226SJosef Bacik return ret; 3013eeb3226SJosef Bacik } 3023eeb3226SJosef Bacik 3033eeb3226SJosef Bacik void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) 3043eeb3226SJosef Bacik { 30532da5386SDavid Sterba struct btrfs_block_group *bg; 3063eeb3226SJosef Bacik 3073eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 3083eeb3226SJosef Bacik ASSERT(bg); 3093eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->nocow_writers)) 3103eeb3226SJosef Bacik wake_up_var(&bg->nocow_writers); 3113eeb3226SJosef Bacik /* 3123eeb3226SJosef Bacik * Once for our lookup and once for the lookup done by a previous call 3133eeb3226SJosef Bacik * to btrfs_inc_nocow_writers() 3143eeb3226SJosef Bacik */ 3153eeb3226SJosef Bacik btrfs_put_block_group(bg); 3163eeb3226SJosef Bacik btrfs_put_block_group(bg); 3173eeb3226SJosef Bacik } 3183eeb3226SJosef Bacik 31932da5386SDavid Sterba void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 3203eeb3226SJosef Bacik { 3213eeb3226SJosef Bacik wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 3223eeb3226SJosef Bacik } 3233eeb3226SJosef Bacik 3243eeb3226SJosef Bacik void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 3253eeb3226SJosef Bacik const u64 start) 3263eeb3226SJosef Bacik { 32732da5386SDavid Sterba struct btrfs_block_group *bg; 3283eeb3226SJosef Bacik 3293eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, start); 3303eeb3226SJosef Bacik ASSERT(bg); 3313eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->reservations)) 3323eeb3226SJosef Bacik wake_up_var(&bg->reservations); 3333eeb3226SJosef Bacik btrfs_put_block_group(bg); 3343eeb3226SJosef Bacik } 3353eeb3226SJosef Bacik 33632da5386SDavid Sterba void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 3373eeb3226SJosef Bacik { 3383eeb3226SJosef Bacik struct btrfs_space_info *space_info = bg->space_info; 3393eeb3226SJosef Bacik 3403eeb3226SJosef Bacik ASSERT(bg->ro); 3413eeb3226SJosef Bacik 3423eeb3226SJosef Bacik if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 3433eeb3226SJosef Bacik return; 3443eeb3226SJosef Bacik 3453eeb3226SJosef Bacik /* 3463eeb3226SJosef Bacik * Our block group is read only but before we set it to read only, 3473eeb3226SJosef Bacik * some task might have had allocated an extent from it already, but it 3483eeb3226SJosef Bacik * has not yet created a respective ordered extent (and added it to a 3493eeb3226SJosef Bacik * root's list of ordered extents). 3503eeb3226SJosef Bacik * Therefore wait for any task currently allocating extents, since the 3513eeb3226SJosef Bacik * block group's reservations counter is incremented while a read lock 3523eeb3226SJosef Bacik * on the groups' semaphore is held and decremented after releasing 3533eeb3226SJosef Bacik * the read access on that semaphore and creating the ordered extent. 3543eeb3226SJosef Bacik */ 3553eeb3226SJosef Bacik down_write(&space_info->groups_sem); 3563eeb3226SJosef Bacik up_write(&space_info->groups_sem); 3573eeb3226SJosef Bacik 3583eeb3226SJosef Bacik wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 3593eeb3226SJosef Bacik } 3609f21246dSJosef Bacik 3619f21246dSJosef Bacik struct btrfs_caching_control *btrfs_get_caching_control( 36232da5386SDavid Sterba struct btrfs_block_group *cache) 3639f21246dSJosef Bacik { 3649f21246dSJosef Bacik struct btrfs_caching_control *ctl; 3659f21246dSJosef Bacik 3669f21246dSJosef Bacik spin_lock(&cache->lock); 3679f21246dSJosef Bacik if (!cache->caching_ctl) { 3689f21246dSJosef Bacik spin_unlock(&cache->lock); 3699f21246dSJosef Bacik return NULL; 3709f21246dSJosef Bacik } 3719f21246dSJosef Bacik 3729f21246dSJosef Bacik ctl = cache->caching_ctl; 3739f21246dSJosef Bacik refcount_inc(&ctl->count); 3749f21246dSJosef Bacik spin_unlock(&cache->lock); 3759f21246dSJosef Bacik return ctl; 3769f21246dSJosef Bacik } 3779f21246dSJosef Bacik 3789f21246dSJosef Bacik void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 3799f21246dSJosef Bacik { 3809f21246dSJosef Bacik if (refcount_dec_and_test(&ctl->count)) 3819f21246dSJosef Bacik kfree(ctl); 3829f21246dSJosef Bacik } 3839f21246dSJosef Bacik 3849f21246dSJosef Bacik /* 3859f21246dSJosef Bacik * When we wait for progress in the block group caching, its because our 3869f21246dSJosef Bacik * allocation attempt failed at least once. So, we must sleep and let some 3879f21246dSJosef Bacik * progress happen before we try again. 3889f21246dSJosef Bacik * 3899f21246dSJosef Bacik * This function will sleep at least once waiting for new free space to show 3909f21246dSJosef Bacik * up, and then it will check the block group free space numbers for our min 3919f21246dSJosef Bacik * num_bytes. Another option is to have it go ahead and look in the rbtree for 3929f21246dSJosef Bacik * a free extent of a given size, but this is a good start. 3939f21246dSJosef Bacik * 3949f21246dSJosef Bacik * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 3959f21246dSJosef Bacik * any of the information in this block group. 3969f21246dSJosef Bacik */ 39732da5386SDavid Sterba void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 3989f21246dSJosef Bacik u64 num_bytes) 3999f21246dSJosef Bacik { 4009f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 4019f21246dSJosef Bacik 4029f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4039f21246dSJosef Bacik if (!caching_ctl) 4049f21246dSJosef Bacik return; 4059f21246dSJosef Bacik 40632da5386SDavid Sterba wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 4079f21246dSJosef Bacik (cache->free_space_ctl->free_space >= num_bytes)); 4089f21246dSJosef Bacik 4099f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4109f21246dSJosef Bacik } 4119f21246dSJosef Bacik 41232da5386SDavid Sterba int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 4139f21246dSJosef Bacik { 4149f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 4159f21246dSJosef Bacik int ret = 0; 4169f21246dSJosef Bacik 4179f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4189f21246dSJosef Bacik if (!caching_ctl) 4199f21246dSJosef Bacik return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 4209f21246dSJosef Bacik 42132da5386SDavid Sterba wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 4229f21246dSJosef Bacik if (cache->cached == BTRFS_CACHE_ERROR) 4239f21246dSJosef Bacik ret = -EIO; 4249f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4259f21246dSJosef Bacik return ret; 4269f21246dSJosef Bacik } 4279f21246dSJosef Bacik 428e747853cSJosef Bacik static bool space_cache_v1_done(struct btrfs_block_group *cache) 429e747853cSJosef Bacik { 430e747853cSJosef Bacik bool ret; 431e747853cSJosef Bacik 432e747853cSJosef Bacik spin_lock(&cache->lock); 433e747853cSJosef Bacik ret = cache->cached != BTRFS_CACHE_FAST; 434e747853cSJosef Bacik spin_unlock(&cache->lock); 435e747853cSJosef Bacik 436e747853cSJosef Bacik return ret; 437e747853cSJosef Bacik } 438e747853cSJosef Bacik 439e747853cSJosef Bacik void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache, 440e747853cSJosef Bacik struct btrfs_caching_control *caching_ctl) 441e747853cSJosef Bacik { 442e747853cSJosef Bacik wait_event(caching_ctl->wait, space_cache_v1_done(cache)); 443e747853cSJosef Bacik } 444e747853cSJosef Bacik 4459f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 44632da5386SDavid Sterba static void fragment_free_space(struct btrfs_block_group *block_group) 4479f21246dSJosef Bacik { 4489f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 449b3470b5dSDavid Sterba u64 start = block_group->start; 450b3470b5dSDavid Sterba u64 len = block_group->length; 4519f21246dSJosef Bacik u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 4529f21246dSJosef Bacik fs_info->nodesize : fs_info->sectorsize; 4539f21246dSJosef Bacik u64 step = chunk << 1; 4549f21246dSJosef Bacik 4559f21246dSJosef Bacik while (len > chunk) { 4569f21246dSJosef Bacik btrfs_remove_free_space(block_group, start, chunk); 4579f21246dSJosef Bacik start += step; 4589f21246dSJosef Bacik if (len < step) 4599f21246dSJosef Bacik len = 0; 4609f21246dSJosef Bacik else 4619f21246dSJosef Bacik len -= step; 4629f21246dSJosef Bacik } 4639f21246dSJosef Bacik } 4649f21246dSJosef Bacik #endif 4659f21246dSJosef Bacik 4669f21246dSJosef Bacik /* 4679f21246dSJosef Bacik * This is only called by btrfs_cache_block_group, since we could have freed 4689f21246dSJosef Bacik * extents we need to check the pinned_extents for any extents that can't be 4699f21246dSJosef Bacik * used yet since their free space will be released as soon as the transaction 4709f21246dSJosef Bacik * commits. 4719f21246dSJosef Bacik */ 47232da5386SDavid Sterba u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) 4739f21246dSJosef Bacik { 4749f21246dSJosef Bacik struct btrfs_fs_info *info = block_group->fs_info; 4759f21246dSJosef Bacik u64 extent_start, extent_end, size, total_added = 0; 4769f21246dSJosef Bacik int ret; 4779f21246dSJosef Bacik 4789f21246dSJosef Bacik while (start < end) { 479fe119a6eSNikolay Borisov ret = find_first_extent_bit(&info->excluded_extents, start, 4809f21246dSJosef Bacik &extent_start, &extent_end, 4819f21246dSJosef Bacik EXTENT_DIRTY | EXTENT_UPTODATE, 4829f21246dSJosef Bacik NULL); 4839f21246dSJosef Bacik if (ret) 4849f21246dSJosef Bacik break; 4859f21246dSJosef Bacik 4869f21246dSJosef Bacik if (extent_start <= start) { 4879f21246dSJosef Bacik start = extent_end + 1; 4889f21246dSJosef Bacik } else if (extent_start > start && extent_start < end) { 4899f21246dSJosef Bacik size = extent_start - start; 4909f21246dSJosef Bacik total_added += size; 491b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, 492b0643e59SDennis Zhou start, size); 4939f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 4949f21246dSJosef Bacik start = extent_end + 1; 4959f21246dSJosef Bacik } else { 4969f21246dSJosef Bacik break; 4979f21246dSJosef Bacik } 4989f21246dSJosef Bacik } 4999f21246dSJosef Bacik 5009f21246dSJosef Bacik if (start < end) { 5019f21246dSJosef Bacik size = end - start; 5029f21246dSJosef Bacik total_added += size; 503b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, start, 504b0643e59SDennis Zhou size); 5059f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 5069f21246dSJosef Bacik } 5079f21246dSJosef Bacik 5089f21246dSJosef Bacik return total_added; 5099f21246dSJosef Bacik } 5109f21246dSJosef Bacik 5119f21246dSJosef Bacik static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 5129f21246dSJosef Bacik { 51332da5386SDavid Sterba struct btrfs_block_group *block_group = caching_ctl->block_group; 5149f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 5159f21246dSJosef Bacik struct btrfs_root *extent_root = fs_info->extent_root; 5169f21246dSJosef Bacik struct btrfs_path *path; 5179f21246dSJosef Bacik struct extent_buffer *leaf; 5189f21246dSJosef Bacik struct btrfs_key key; 5199f21246dSJosef Bacik u64 total_found = 0; 5209f21246dSJosef Bacik u64 last = 0; 5219f21246dSJosef Bacik u32 nritems; 5229f21246dSJosef Bacik int ret; 5239f21246dSJosef Bacik bool wakeup = true; 5249f21246dSJosef Bacik 5259f21246dSJosef Bacik path = btrfs_alloc_path(); 5269f21246dSJosef Bacik if (!path) 5279f21246dSJosef Bacik return -ENOMEM; 5289f21246dSJosef Bacik 529b3470b5dSDavid Sterba last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 5309f21246dSJosef Bacik 5319f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 5329f21246dSJosef Bacik /* 5339f21246dSJosef Bacik * If we're fragmenting we don't want to make anybody think we can 5349f21246dSJosef Bacik * allocate from this block group until we've had a chance to fragment 5359f21246dSJosef Bacik * the free space. 5369f21246dSJosef Bacik */ 5379f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) 5389f21246dSJosef Bacik wakeup = false; 5399f21246dSJosef Bacik #endif 5409f21246dSJosef Bacik /* 5419f21246dSJosef Bacik * We don't want to deadlock with somebody trying to allocate a new 5429f21246dSJosef Bacik * extent for the extent root while also trying to search the extent 5439f21246dSJosef Bacik * root to add free space. So we skip locking and search the commit 5449f21246dSJosef Bacik * root, since its read-only 5459f21246dSJosef Bacik */ 5469f21246dSJosef Bacik path->skip_locking = 1; 5479f21246dSJosef Bacik path->search_commit_root = 1; 5489f21246dSJosef Bacik path->reada = READA_FORWARD; 5499f21246dSJosef Bacik 5509f21246dSJosef Bacik key.objectid = last; 5519f21246dSJosef Bacik key.offset = 0; 5529f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 5539f21246dSJosef Bacik 5549f21246dSJosef Bacik next: 5559f21246dSJosef Bacik ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 5569f21246dSJosef Bacik if (ret < 0) 5579f21246dSJosef Bacik goto out; 5589f21246dSJosef Bacik 5599f21246dSJosef Bacik leaf = path->nodes[0]; 5609f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 5619f21246dSJosef Bacik 5629f21246dSJosef Bacik while (1) { 5639f21246dSJosef Bacik if (btrfs_fs_closing(fs_info) > 1) { 5649f21246dSJosef Bacik last = (u64)-1; 5659f21246dSJosef Bacik break; 5669f21246dSJosef Bacik } 5679f21246dSJosef Bacik 5689f21246dSJosef Bacik if (path->slots[0] < nritems) { 5699f21246dSJosef Bacik btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5709f21246dSJosef Bacik } else { 5719f21246dSJosef Bacik ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 5729f21246dSJosef Bacik if (ret) 5739f21246dSJosef Bacik break; 5749f21246dSJosef Bacik 5759f21246dSJosef Bacik if (need_resched() || 5769f21246dSJosef Bacik rwsem_is_contended(&fs_info->commit_root_sem)) { 5779f21246dSJosef Bacik if (wakeup) 5789f21246dSJosef Bacik caching_ctl->progress = last; 5799f21246dSJosef Bacik btrfs_release_path(path); 5809f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 5819f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 5829f21246dSJosef Bacik cond_resched(); 5839f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 5849f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 5859f21246dSJosef Bacik goto next; 5869f21246dSJosef Bacik } 5879f21246dSJosef Bacik 5889f21246dSJosef Bacik ret = btrfs_next_leaf(extent_root, path); 5899f21246dSJosef Bacik if (ret < 0) 5909f21246dSJosef Bacik goto out; 5919f21246dSJosef Bacik if (ret) 5929f21246dSJosef Bacik break; 5939f21246dSJosef Bacik leaf = path->nodes[0]; 5949f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 5959f21246dSJosef Bacik continue; 5969f21246dSJosef Bacik } 5979f21246dSJosef Bacik 5989f21246dSJosef Bacik if (key.objectid < last) { 5999f21246dSJosef Bacik key.objectid = last; 6009f21246dSJosef Bacik key.offset = 0; 6019f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 6029f21246dSJosef Bacik 6039f21246dSJosef Bacik if (wakeup) 6049f21246dSJosef Bacik caching_ctl->progress = last; 6059f21246dSJosef Bacik btrfs_release_path(path); 6069f21246dSJosef Bacik goto next; 6079f21246dSJosef Bacik } 6089f21246dSJosef Bacik 609b3470b5dSDavid Sterba if (key.objectid < block_group->start) { 6109f21246dSJosef Bacik path->slots[0]++; 6119f21246dSJosef Bacik continue; 6129f21246dSJosef Bacik } 6139f21246dSJosef Bacik 614b3470b5dSDavid Sterba if (key.objectid >= block_group->start + block_group->length) 6159f21246dSJosef Bacik break; 6169f21246dSJosef Bacik 6179f21246dSJosef Bacik if (key.type == BTRFS_EXTENT_ITEM_KEY || 6189f21246dSJosef Bacik key.type == BTRFS_METADATA_ITEM_KEY) { 6199f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 6209f21246dSJosef Bacik key.objectid); 6219f21246dSJosef Bacik if (key.type == BTRFS_METADATA_ITEM_KEY) 6229f21246dSJosef Bacik last = key.objectid + 6239f21246dSJosef Bacik fs_info->nodesize; 6249f21246dSJosef Bacik else 6259f21246dSJosef Bacik last = key.objectid + key.offset; 6269f21246dSJosef Bacik 6279f21246dSJosef Bacik if (total_found > CACHING_CTL_WAKE_UP) { 6289f21246dSJosef Bacik total_found = 0; 6299f21246dSJosef Bacik if (wakeup) 6309f21246dSJosef Bacik wake_up(&caching_ctl->wait); 6319f21246dSJosef Bacik } 6329f21246dSJosef Bacik } 6339f21246dSJosef Bacik path->slots[0]++; 6349f21246dSJosef Bacik } 6359f21246dSJosef Bacik ret = 0; 6369f21246dSJosef Bacik 6379f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 638b3470b5dSDavid Sterba block_group->start + block_group->length); 6399f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 6409f21246dSJosef Bacik 6419f21246dSJosef Bacik out: 6429f21246dSJosef Bacik btrfs_free_path(path); 6439f21246dSJosef Bacik return ret; 6449f21246dSJosef Bacik } 6459f21246dSJosef Bacik 6469f21246dSJosef Bacik static noinline void caching_thread(struct btrfs_work *work) 6479f21246dSJosef Bacik { 64832da5386SDavid Sterba struct btrfs_block_group *block_group; 6499f21246dSJosef Bacik struct btrfs_fs_info *fs_info; 6509f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 6519f21246dSJosef Bacik int ret; 6529f21246dSJosef Bacik 6539f21246dSJosef Bacik caching_ctl = container_of(work, struct btrfs_caching_control, work); 6549f21246dSJosef Bacik block_group = caching_ctl->block_group; 6559f21246dSJosef Bacik fs_info = block_group->fs_info; 6569f21246dSJosef Bacik 6579f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 6589f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 6599f21246dSJosef Bacik 660e747853cSJosef Bacik if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 661e747853cSJosef Bacik ret = load_free_space_cache(block_group); 662e747853cSJosef Bacik if (ret == 1) { 663e747853cSJosef Bacik ret = 0; 664e747853cSJosef Bacik goto done; 665e747853cSJosef Bacik } 666e747853cSJosef Bacik 667e747853cSJosef Bacik /* 668e747853cSJosef Bacik * We failed to load the space cache, set ourselves to 669e747853cSJosef Bacik * CACHE_STARTED and carry on. 670e747853cSJosef Bacik */ 671e747853cSJosef Bacik spin_lock(&block_group->lock); 672e747853cSJosef Bacik block_group->cached = BTRFS_CACHE_STARTED; 673e747853cSJosef Bacik spin_unlock(&block_group->lock); 674e747853cSJosef Bacik wake_up(&caching_ctl->wait); 675e747853cSJosef Bacik } 676e747853cSJosef Bacik 6772f96e402SJosef Bacik /* 6782f96e402SJosef Bacik * If we are in the transaction that populated the free space tree we 6792f96e402SJosef Bacik * can't actually cache from the free space tree as our commit root and 6802f96e402SJosef Bacik * real root are the same, so we could change the contents of the blocks 6812f96e402SJosef Bacik * while caching. Instead do the slow caching in this case, and after 6822f96e402SJosef Bacik * the transaction has committed we will be safe. 6832f96e402SJosef Bacik */ 6842f96e402SJosef Bacik if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 6852f96e402SJosef Bacik !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 6869f21246dSJosef Bacik ret = load_free_space_tree(caching_ctl); 6879f21246dSJosef Bacik else 6889f21246dSJosef Bacik ret = load_extent_tree_free(caching_ctl); 689e747853cSJosef Bacik done: 6909f21246dSJosef Bacik spin_lock(&block_group->lock); 6919f21246dSJosef Bacik block_group->caching_ctl = NULL; 6929f21246dSJosef Bacik block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 6939f21246dSJosef Bacik spin_unlock(&block_group->lock); 6949f21246dSJosef Bacik 6959f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 6969f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) { 6979f21246dSJosef Bacik u64 bytes_used; 6989f21246dSJosef Bacik 6999f21246dSJosef Bacik spin_lock(&block_group->space_info->lock); 7009f21246dSJosef Bacik spin_lock(&block_group->lock); 701b3470b5dSDavid Sterba bytes_used = block_group->length - block_group->used; 7029f21246dSJosef Bacik block_group->space_info->bytes_used += bytes_used >> 1; 7039f21246dSJosef Bacik spin_unlock(&block_group->lock); 7049f21246dSJosef Bacik spin_unlock(&block_group->space_info->lock); 705e11c0406SJosef Bacik fragment_free_space(block_group); 7069f21246dSJosef Bacik } 7079f21246dSJosef Bacik #endif 7089f21246dSJosef Bacik 7099f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 7109f21246dSJosef Bacik 7119f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 7129f21246dSJosef Bacik btrfs_free_excluded_extents(block_group); 7139f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 7149f21246dSJosef Bacik 7159f21246dSJosef Bacik wake_up(&caching_ctl->wait); 7169f21246dSJosef Bacik 7179f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 7189f21246dSJosef Bacik btrfs_put_block_group(block_group); 7199f21246dSJosef Bacik } 7209f21246dSJosef Bacik 72132da5386SDavid Sterba int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only) 7229f21246dSJosef Bacik { 7239f21246dSJosef Bacik DEFINE_WAIT(wait); 7249f21246dSJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 725e747853cSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 7269f21246dSJosef Bacik int ret = 0; 7279f21246dSJosef Bacik 7282eda5708SNaohiro Aota /* Allocator for zoned filesystems does not use the cache at all */ 7292eda5708SNaohiro Aota if (btrfs_is_zoned(fs_info)) 7302eda5708SNaohiro Aota return 0; 7312eda5708SNaohiro Aota 7329f21246dSJosef Bacik caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 7339f21246dSJosef Bacik if (!caching_ctl) 7349f21246dSJosef Bacik return -ENOMEM; 7359f21246dSJosef Bacik 7369f21246dSJosef Bacik INIT_LIST_HEAD(&caching_ctl->list); 7379f21246dSJosef Bacik mutex_init(&caching_ctl->mutex); 7389f21246dSJosef Bacik init_waitqueue_head(&caching_ctl->wait); 7399f21246dSJosef Bacik caching_ctl->block_group = cache; 740b3470b5dSDavid Sterba caching_ctl->progress = cache->start; 741e747853cSJosef Bacik refcount_set(&caching_ctl->count, 2); 742a0cac0ecSOmar Sandoval btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); 7439f21246dSJosef Bacik 7449f21246dSJosef Bacik spin_lock(&cache->lock); 7459f21246dSJosef Bacik if (cache->cached != BTRFS_CACHE_NO) { 7469f21246dSJosef Bacik kfree(caching_ctl); 747e747853cSJosef Bacik 748e747853cSJosef Bacik caching_ctl = cache->caching_ctl; 749e747853cSJosef Bacik if (caching_ctl) 750e747853cSJosef Bacik refcount_inc(&caching_ctl->count); 751e747853cSJosef Bacik spin_unlock(&cache->lock); 752e747853cSJosef Bacik goto out; 7539f21246dSJosef Bacik } 7549f21246dSJosef Bacik WARN_ON(cache->caching_ctl); 7559f21246dSJosef Bacik cache->caching_ctl = caching_ctl; 756e747853cSJosef Bacik if (btrfs_test_opt(fs_info, SPACE_CACHE)) 7579f21246dSJosef Bacik cache->cached = BTRFS_CACHE_FAST; 758e747853cSJosef Bacik else 7599f21246dSJosef Bacik cache->cached = BTRFS_CACHE_STARTED; 7609f21246dSJosef Bacik cache->has_caching_ctl = 1; 7619f21246dSJosef Bacik spin_unlock(&cache->lock); 7629f21246dSJosef Bacik 763bbb86a37SJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 7649f21246dSJosef Bacik refcount_inc(&caching_ctl->count); 7659f21246dSJosef Bacik list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 766bbb86a37SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 7679f21246dSJosef Bacik 7689f21246dSJosef Bacik btrfs_get_block_group(cache); 7699f21246dSJosef Bacik 7709f21246dSJosef Bacik btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 771e747853cSJosef Bacik out: 772e747853cSJosef Bacik if (load_cache_only && caching_ctl) 773e747853cSJosef Bacik btrfs_wait_space_cache_v1_finished(cache, caching_ctl); 774e747853cSJosef Bacik if (caching_ctl) 775e747853cSJosef Bacik btrfs_put_caching_control(caching_ctl); 7769f21246dSJosef Bacik 7779f21246dSJosef Bacik return ret; 7789f21246dSJosef Bacik } 779e3e0520bSJosef Bacik 780e3e0520bSJosef Bacik static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 781e3e0520bSJosef Bacik { 782e3e0520bSJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 783e3e0520bSJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 784e3e0520bSJosef Bacik 785e3e0520bSJosef Bacik write_seqlock(&fs_info->profiles_lock); 786e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 787e3e0520bSJosef Bacik fs_info->avail_data_alloc_bits &= ~extra_flags; 788e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 789e3e0520bSJosef Bacik fs_info->avail_metadata_alloc_bits &= ~extra_flags; 790e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 791e3e0520bSJosef Bacik fs_info->avail_system_alloc_bits &= ~extra_flags; 792e3e0520bSJosef Bacik write_sequnlock(&fs_info->profiles_lock); 793e3e0520bSJosef Bacik } 794e3e0520bSJosef Bacik 795e3e0520bSJosef Bacik /* 796e3e0520bSJosef Bacik * Clear incompat bits for the following feature(s): 797e3e0520bSJosef Bacik * 798e3e0520bSJosef Bacik * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 799e3e0520bSJosef Bacik * in the whole filesystem 8009c907446SDavid Sterba * 8019c907446SDavid Sterba * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 802e3e0520bSJosef Bacik */ 803e3e0520bSJosef Bacik static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 804e3e0520bSJosef Bacik { 8059c907446SDavid Sterba bool found_raid56 = false; 8069c907446SDavid Sterba bool found_raid1c34 = false; 8079c907446SDavid Sterba 8089c907446SDavid Sterba if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 8099c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 8109c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 811e3e0520bSJosef Bacik struct list_head *head = &fs_info->space_info; 812e3e0520bSJosef Bacik struct btrfs_space_info *sinfo; 813e3e0520bSJosef Bacik 814e3e0520bSJosef Bacik list_for_each_entry_rcu(sinfo, head, list) { 815e3e0520bSJosef Bacik down_read(&sinfo->groups_sem); 816e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 8179c907446SDavid Sterba found_raid56 = true; 818e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 8199c907446SDavid Sterba found_raid56 = true; 8209c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 8219c907446SDavid Sterba found_raid1c34 = true; 8229c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 8239c907446SDavid Sterba found_raid1c34 = true; 824e3e0520bSJosef Bacik up_read(&sinfo->groups_sem); 825e3e0520bSJosef Bacik } 826d8e6fd5cSFilipe Manana if (!found_raid56) 827e3e0520bSJosef Bacik btrfs_clear_fs_incompat(fs_info, RAID56); 828d8e6fd5cSFilipe Manana if (!found_raid1c34) 8299c907446SDavid Sterba btrfs_clear_fs_incompat(fs_info, RAID1C34); 830e3e0520bSJosef Bacik } 831e3e0520bSJosef Bacik } 832e3e0520bSJosef Bacik 8337357623aSQu Wenruo static int remove_block_group_item(struct btrfs_trans_handle *trans, 8347357623aSQu Wenruo struct btrfs_path *path, 8357357623aSQu Wenruo struct btrfs_block_group *block_group) 8367357623aSQu Wenruo { 8377357623aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 8387357623aSQu Wenruo struct btrfs_root *root; 8397357623aSQu Wenruo struct btrfs_key key; 8407357623aSQu Wenruo int ret; 8417357623aSQu Wenruo 8427357623aSQu Wenruo root = fs_info->extent_root; 8437357623aSQu Wenruo key.objectid = block_group->start; 8447357623aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 8457357623aSQu Wenruo key.offset = block_group->length; 8467357623aSQu Wenruo 8477357623aSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 8487357623aSQu Wenruo if (ret > 0) 8497357623aSQu Wenruo ret = -ENOENT; 8507357623aSQu Wenruo if (ret < 0) 8517357623aSQu Wenruo return ret; 8527357623aSQu Wenruo 8537357623aSQu Wenruo ret = btrfs_del_item(trans, root, path); 8547357623aSQu Wenruo return ret; 8557357623aSQu Wenruo } 8567357623aSQu Wenruo 857e3e0520bSJosef Bacik int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 858e3e0520bSJosef Bacik u64 group_start, struct extent_map *em) 859e3e0520bSJosef Bacik { 860e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 861e3e0520bSJosef Bacik struct btrfs_path *path; 86232da5386SDavid Sterba struct btrfs_block_group *block_group; 863e3e0520bSJosef Bacik struct btrfs_free_cluster *cluster; 864e3e0520bSJosef Bacik struct inode *inode; 865e3e0520bSJosef Bacik struct kobject *kobj = NULL; 866e3e0520bSJosef Bacik int ret; 867e3e0520bSJosef Bacik int index; 868e3e0520bSJosef Bacik int factor; 869e3e0520bSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 870e3e0520bSJosef Bacik bool remove_em; 871e3e0520bSJosef Bacik bool remove_rsv = false; 872e3e0520bSJosef Bacik 873e3e0520bSJosef Bacik block_group = btrfs_lookup_block_group(fs_info, group_start); 874e3e0520bSJosef Bacik BUG_ON(!block_group); 875e3e0520bSJosef Bacik BUG_ON(!block_group->ro); 876e3e0520bSJosef Bacik 877e3e0520bSJosef Bacik trace_btrfs_remove_block_group(block_group); 878e3e0520bSJosef Bacik /* 879e3e0520bSJosef Bacik * Free the reserved super bytes from this block group before 880e3e0520bSJosef Bacik * remove it. 881e3e0520bSJosef Bacik */ 882e3e0520bSJosef Bacik btrfs_free_excluded_extents(block_group); 883b3470b5dSDavid Sterba btrfs_free_ref_tree_range(fs_info, block_group->start, 884b3470b5dSDavid Sterba block_group->length); 885e3e0520bSJosef Bacik 886e3e0520bSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 887e3e0520bSJosef Bacik factor = btrfs_bg_type_to_factor(block_group->flags); 888e3e0520bSJosef Bacik 889e3e0520bSJosef Bacik /* make sure this block group isn't part of an allocation cluster */ 890e3e0520bSJosef Bacik cluster = &fs_info->data_alloc_cluster; 891e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 892e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 893e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 894e3e0520bSJosef Bacik 895e3e0520bSJosef Bacik /* 896e3e0520bSJosef Bacik * make sure this block group isn't part of a metadata 897e3e0520bSJosef Bacik * allocation cluster 898e3e0520bSJosef Bacik */ 899e3e0520bSJosef Bacik cluster = &fs_info->meta_alloc_cluster; 900e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 901e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 902e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 903e3e0520bSJosef Bacik 90440ab3be1SNaohiro Aota btrfs_clear_treelog_bg(block_group); 90540ab3be1SNaohiro Aota 906e3e0520bSJosef Bacik path = btrfs_alloc_path(); 907e3e0520bSJosef Bacik if (!path) { 908e3e0520bSJosef Bacik ret = -ENOMEM; 9099fecd132SFilipe Manana goto out; 910e3e0520bSJosef Bacik } 911e3e0520bSJosef Bacik 912e3e0520bSJosef Bacik /* 913e3e0520bSJosef Bacik * get the inode first so any iput calls done for the io_list 914e3e0520bSJosef Bacik * aren't the final iput (no unlinks allowed now) 915e3e0520bSJosef Bacik */ 916e3e0520bSJosef Bacik inode = lookup_free_space_inode(block_group, path); 917e3e0520bSJosef Bacik 918e3e0520bSJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 919e3e0520bSJosef Bacik /* 920e3e0520bSJosef Bacik * Make sure our free space cache IO is done before removing the 921e3e0520bSJosef Bacik * free space inode 922e3e0520bSJosef Bacik */ 923e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 924e3e0520bSJosef Bacik if (!list_empty(&block_group->io_list)) { 925e3e0520bSJosef Bacik list_del_init(&block_group->io_list); 926e3e0520bSJosef Bacik 927e3e0520bSJosef Bacik WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 928e3e0520bSJosef Bacik 929e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 930e3e0520bSJosef Bacik btrfs_wait_cache_io(trans, block_group, path); 931e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 932e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 933e3e0520bSJosef Bacik } 934e3e0520bSJosef Bacik 935e3e0520bSJosef Bacik if (!list_empty(&block_group->dirty_list)) { 936e3e0520bSJosef Bacik list_del_init(&block_group->dirty_list); 937e3e0520bSJosef Bacik remove_rsv = true; 938e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 939e3e0520bSJosef Bacik } 940e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 941e3e0520bSJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 942e3e0520bSJosef Bacik 94336b216c8SBoris Burkov ret = btrfs_remove_free_space_inode(trans, inode, block_group); 944e3e0520bSJosef Bacik if (ret) 9459fecd132SFilipe Manana goto out; 946e3e0520bSJosef Bacik 947e3e0520bSJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 948e3e0520bSJosef Bacik rb_erase(&block_group->cache_node, 949e3e0520bSJosef Bacik &fs_info->block_group_cache_tree); 950e3e0520bSJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 951e3e0520bSJosef Bacik 9529fecd132SFilipe Manana /* Once for the block groups rbtree */ 9539fecd132SFilipe Manana btrfs_put_block_group(block_group); 9549fecd132SFilipe Manana 955b3470b5dSDavid Sterba if (fs_info->first_logical_byte == block_group->start) 956e3e0520bSJosef Bacik fs_info->first_logical_byte = (u64)-1; 957e3e0520bSJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 958e3e0520bSJosef Bacik 959e3e0520bSJosef Bacik down_write(&block_group->space_info->groups_sem); 960e3e0520bSJosef Bacik /* 961e3e0520bSJosef Bacik * we must use list_del_init so people can check to see if they 962e3e0520bSJosef Bacik * are still on the list after taking the semaphore 963e3e0520bSJosef Bacik */ 964e3e0520bSJosef Bacik list_del_init(&block_group->list); 965e3e0520bSJosef Bacik if (list_empty(&block_group->space_info->block_groups[index])) { 966e3e0520bSJosef Bacik kobj = block_group->space_info->block_group_kobjs[index]; 967e3e0520bSJosef Bacik block_group->space_info->block_group_kobjs[index] = NULL; 968e3e0520bSJosef Bacik clear_avail_alloc_bits(fs_info, block_group->flags); 969e3e0520bSJosef Bacik } 970e3e0520bSJosef Bacik up_write(&block_group->space_info->groups_sem); 971e3e0520bSJosef Bacik clear_incompat_bg_bits(fs_info, block_group->flags); 972e3e0520bSJosef Bacik if (kobj) { 973e3e0520bSJosef Bacik kobject_del(kobj); 974e3e0520bSJosef Bacik kobject_put(kobj); 975e3e0520bSJosef Bacik } 976e3e0520bSJosef Bacik 977e3e0520bSJosef Bacik if (block_group->has_caching_ctl) 978e3e0520bSJosef Bacik caching_ctl = btrfs_get_caching_control(block_group); 979e3e0520bSJosef Bacik if (block_group->cached == BTRFS_CACHE_STARTED) 980e3e0520bSJosef Bacik btrfs_wait_block_group_cache_done(block_group); 981e3e0520bSJosef Bacik if (block_group->has_caching_ctl) { 982bbb86a37SJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 983e3e0520bSJosef Bacik if (!caching_ctl) { 984e3e0520bSJosef Bacik struct btrfs_caching_control *ctl; 985e3e0520bSJosef Bacik 986e3e0520bSJosef Bacik list_for_each_entry(ctl, 987e3e0520bSJosef Bacik &fs_info->caching_block_groups, list) 988e3e0520bSJosef Bacik if (ctl->block_group == block_group) { 989e3e0520bSJosef Bacik caching_ctl = ctl; 990e3e0520bSJosef Bacik refcount_inc(&caching_ctl->count); 991e3e0520bSJosef Bacik break; 992e3e0520bSJosef Bacik } 993e3e0520bSJosef Bacik } 994e3e0520bSJosef Bacik if (caching_ctl) 995e3e0520bSJosef Bacik list_del_init(&caching_ctl->list); 996bbb86a37SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 997e3e0520bSJosef Bacik if (caching_ctl) { 998e3e0520bSJosef Bacik /* Once for the caching bgs list and once for us. */ 999e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1000e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1001e3e0520bSJosef Bacik } 1002e3e0520bSJosef Bacik } 1003e3e0520bSJosef Bacik 1004e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 1005e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->dirty_list)); 1006e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->io_list)); 1007e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 1008e3e0520bSJosef Bacik 1009e3e0520bSJosef Bacik btrfs_remove_free_space_cache(block_group); 1010e3e0520bSJosef Bacik 1011e3e0520bSJosef Bacik spin_lock(&block_group->space_info->lock); 1012e3e0520bSJosef Bacik list_del_init(&block_group->ro_list); 1013e3e0520bSJosef Bacik 1014e3e0520bSJosef Bacik if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1015e3e0520bSJosef Bacik WARN_ON(block_group->space_info->total_bytes 1016b3470b5dSDavid Sterba < block_group->length); 1017e3e0520bSJosef Bacik WARN_ON(block_group->space_info->bytes_readonly 1018169e0da9SNaohiro Aota < block_group->length - block_group->zone_unusable); 1019169e0da9SNaohiro Aota WARN_ON(block_group->space_info->bytes_zone_unusable 1020169e0da9SNaohiro Aota < block_group->zone_unusable); 1021e3e0520bSJosef Bacik WARN_ON(block_group->space_info->disk_total 1022b3470b5dSDavid Sterba < block_group->length * factor); 1023e3e0520bSJosef Bacik } 1024b3470b5dSDavid Sterba block_group->space_info->total_bytes -= block_group->length; 1025169e0da9SNaohiro Aota block_group->space_info->bytes_readonly -= 1026169e0da9SNaohiro Aota (block_group->length - block_group->zone_unusable); 1027169e0da9SNaohiro Aota block_group->space_info->bytes_zone_unusable -= 1028169e0da9SNaohiro Aota block_group->zone_unusable; 1029b3470b5dSDavid Sterba block_group->space_info->disk_total -= block_group->length * factor; 1030e3e0520bSJosef Bacik 1031e3e0520bSJosef Bacik spin_unlock(&block_group->space_info->lock); 1032e3e0520bSJosef Bacik 1033ffcb9d44SFilipe Manana /* 1034ffcb9d44SFilipe Manana * Remove the free space for the block group from the free space tree 1035ffcb9d44SFilipe Manana * and the block group's item from the extent tree before marking the 1036ffcb9d44SFilipe Manana * block group as removed. This is to prevent races with tasks that 1037ffcb9d44SFilipe Manana * freeze and unfreeze a block group, this task and another task 1038ffcb9d44SFilipe Manana * allocating a new block group - the unfreeze task ends up removing 1039ffcb9d44SFilipe Manana * the block group's extent map before the task calling this function 1040ffcb9d44SFilipe Manana * deletes the block group item from the extent tree, allowing for 1041ffcb9d44SFilipe Manana * another task to attempt to create another block group with the same 1042ffcb9d44SFilipe Manana * item key (and failing with -EEXIST and a transaction abort). 1043ffcb9d44SFilipe Manana */ 1044ffcb9d44SFilipe Manana ret = remove_block_group_free_space(trans, block_group); 1045ffcb9d44SFilipe Manana if (ret) 1046ffcb9d44SFilipe Manana goto out; 1047ffcb9d44SFilipe Manana 1048ffcb9d44SFilipe Manana ret = remove_block_group_item(trans, path, block_group); 1049ffcb9d44SFilipe Manana if (ret < 0) 1050ffcb9d44SFilipe Manana goto out; 1051ffcb9d44SFilipe Manana 1052e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1053e3e0520bSJosef Bacik block_group->removed = 1; 1054e3e0520bSJosef Bacik /* 10556b7304afSFilipe Manana * At this point trimming or scrub can't start on this block group, 10566b7304afSFilipe Manana * because we removed the block group from the rbtree 10576b7304afSFilipe Manana * fs_info->block_group_cache_tree so no one can't find it anymore and 10586b7304afSFilipe Manana * even if someone already got this block group before we removed it 10596b7304afSFilipe Manana * from the rbtree, they have already incremented block_group->frozen - 10606b7304afSFilipe Manana * if they didn't, for the trimming case they won't find any free space 10616b7304afSFilipe Manana * entries because we already removed them all when we called 10626b7304afSFilipe Manana * btrfs_remove_free_space_cache(). 1063e3e0520bSJosef Bacik * 1064e3e0520bSJosef Bacik * And we must not remove the extent map from the fs_info->mapping_tree 1065e3e0520bSJosef Bacik * to prevent the same logical address range and physical device space 10666b7304afSFilipe Manana * ranges from being reused for a new block group. This is needed to 10676b7304afSFilipe Manana * avoid races with trimming and scrub. 10686b7304afSFilipe Manana * 10696b7304afSFilipe Manana * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1070e3e0520bSJosef Bacik * completely transactionless, so while it is trimming a range the 1071e3e0520bSJosef Bacik * currently running transaction might finish and a new one start, 1072e3e0520bSJosef Bacik * allowing for new block groups to be created that can reuse the same 1073e3e0520bSJosef Bacik * physical device locations unless we take this special care. 1074e3e0520bSJosef Bacik * 1075e3e0520bSJosef Bacik * There may also be an implicit trim operation if the file system 1076e3e0520bSJosef Bacik * is mounted with -odiscard. The same protections must remain 1077e3e0520bSJosef Bacik * in place until the extents have been discarded completely when 1078e3e0520bSJosef Bacik * the transaction commit has completed. 1079e3e0520bSJosef Bacik */ 10806b7304afSFilipe Manana remove_em = (atomic_read(&block_group->frozen) == 0); 1081e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1082e3e0520bSJosef Bacik 1083e3e0520bSJosef Bacik if (remove_em) { 1084e3e0520bSJosef Bacik struct extent_map_tree *em_tree; 1085e3e0520bSJosef Bacik 1086e3e0520bSJosef Bacik em_tree = &fs_info->mapping_tree; 1087e3e0520bSJosef Bacik write_lock(&em_tree->lock); 1088e3e0520bSJosef Bacik remove_extent_mapping(em_tree, em); 1089e3e0520bSJosef Bacik write_unlock(&em_tree->lock); 1090e3e0520bSJosef Bacik /* once for the tree */ 1091e3e0520bSJosef Bacik free_extent_map(em); 1092e3e0520bSJosef Bacik } 1093f6033c5eSXiyu Yang 10949fecd132SFilipe Manana out: 1095f6033c5eSXiyu Yang /* Once for the lookup reference */ 1096f6033c5eSXiyu Yang btrfs_put_block_group(block_group); 1097e3e0520bSJosef Bacik if (remove_rsv) 1098e3e0520bSJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 1099e3e0520bSJosef Bacik btrfs_free_path(path); 1100e3e0520bSJosef Bacik return ret; 1101e3e0520bSJosef Bacik } 1102e3e0520bSJosef Bacik 1103e3e0520bSJosef Bacik struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1104e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1105e3e0520bSJosef Bacik { 1106e3e0520bSJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 1107e3e0520bSJosef Bacik struct extent_map *em; 1108e3e0520bSJosef Bacik struct map_lookup *map; 1109e3e0520bSJosef Bacik unsigned int num_items; 1110e3e0520bSJosef Bacik 1111e3e0520bSJosef Bacik read_lock(&em_tree->lock); 1112e3e0520bSJosef Bacik em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1113e3e0520bSJosef Bacik read_unlock(&em_tree->lock); 1114e3e0520bSJosef Bacik ASSERT(em && em->start == chunk_offset); 1115e3e0520bSJosef Bacik 1116e3e0520bSJosef Bacik /* 1117e3e0520bSJosef Bacik * We need to reserve 3 + N units from the metadata space info in order 1118e3e0520bSJosef Bacik * to remove a block group (done at btrfs_remove_chunk() and at 1119e3e0520bSJosef Bacik * btrfs_remove_block_group()), which are used for: 1120e3e0520bSJosef Bacik * 1121e3e0520bSJosef Bacik * 1 unit for adding the free space inode's orphan (located in the tree 1122e3e0520bSJosef Bacik * of tree roots). 1123e3e0520bSJosef Bacik * 1 unit for deleting the block group item (located in the extent 1124e3e0520bSJosef Bacik * tree). 1125e3e0520bSJosef Bacik * 1 unit for deleting the free space item (located in tree of tree 1126e3e0520bSJosef Bacik * roots). 1127e3e0520bSJosef Bacik * N units for deleting N device extent items corresponding to each 1128e3e0520bSJosef Bacik * stripe (located in the device tree). 1129e3e0520bSJosef Bacik * 1130e3e0520bSJosef Bacik * In order to remove a block group we also need to reserve units in the 1131e3e0520bSJosef Bacik * system space info in order to update the chunk tree (update one or 1132e3e0520bSJosef Bacik * more device items and remove one chunk item), but this is done at 1133e3e0520bSJosef Bacik * btrfs_remove_chunk() through a call to check_system_chunk(). 1134e3e0520bSJosef Bacik */ 1135e3e0520bSJosef Bacik map = em->map_lookup; 1136e3e0520bSJosef Bacik num_items = 3 + map->num_stripes; 1137e3e0520bSJosef Bacik free_extent_map(em); 1138e3e0520bSJosef Bacik 1139e3e0520bSJosef Bacik return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, 11407f9fe614SJosef Bacik num_items); 1141e3e0520bSJosef Bacik } 1142e3e0520bSJosef Bacik 1143e3e0520bSJosef Bacik /* 114426ce2095SJosef Bacik * Mark block group @cache read-only, so later write won't happen to block 114526ce2095SJosef Bacik * group @cache. 114626ce2095SJosef Bacik * 114726ce2095SJosef Bacik * If @force is not set, this function will only mark the block group readonly 114826ce2095SJosef Bacik * if we have enough free space (1M) in other metadata/system block groups. 114926ce2095SJosef Bacik * If @force is not set, this function will mark the block group readonly 115026ce2095SJosef Bacik * without checking free space. 115126ce2095SJosef Bacik * 115226ce2095SJosef Bacik * NOTE: This function doesn't care if other block groups can contain all the 115326ce2095SJosef Bacik * data in this block group. That check should be done by relocation routine, 115426ce2095SJosef Bacik * not this function. 115526ce2095SJosef Bacik */ 115632da5386SDavid Sterba static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 115726ce2095SJosef Bacik { 115826ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 115926ce2095SJosef Bacik u64 num_bytes; 116026ce2095SJosef Bacik int ret = -ENOSPC; 116126ce2095SJosef Bacik 116226ce2095SJosef Bacik spin_lock(&sinfo->lock); 116326ce2095SJosef Bacik spin_lock(&cache->lock); 116426ce2095SJosef Bacik 1165195a49eaSFilipe Manana if (cache->swap_extents) { 1166195a49eaSFilipe Manana ret = -ETXTBSY; 1167195a49eaSFilipe Manana goto out; 1168195a49eaSFilipe Manana } 1169195a49eaSFilipe Manana 117026ce2095SJosef Bacik if (cache->ro) { 117126ce2095SJosef Bacik cache->ro++; 117226ce2095SJosef Bacik ret = 0; 117326ce2095SJosef Bacik goto out; 117426ce2095SJosef Bacik } 117526ce2095SJosef Bacik 1176b3470b5dSDavid Sterba num_bytes = cache->length - cache->reserved - cache->pinned - 1177169e0da9SNaohiro Aota cache->bytes_super - cache->zone_unusable - cache->used; 117826ce2095SJosef Bacik 117926ce2095SJosef Bacik /* 1180a30a3d20SJosef Bacik * Data never overcommits, even in mixed mode, so do just the straight 1181a30a3d20SJosef Bacik * check of left over space in how much we have allocated. 1182a30a3d20SJosef Bacik */ 1183a30a3d20SJosef Bacik if (force) { 1184a30a3d20SJosef Bacik ret = 0; 1185a30a3d20SJosef Bacik } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1186a30a3d20SJosef Bacik u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1187a30a3d20SJosef Bacik 1188a30a3d20SJosef Bacik /* 118926ce2095SJosef Bacik * Here we make sure if we mark this bg RO, we still have enough 1190f8935566SJosef Bacik * free space as buffer. 119126ce2095SJosef Bacik */ 1192a30a3d20SJosef Bacik if (sinfo_used + num_bytes <= sinfo->total_bytes) 1193a30a3d20SJosef Bacik ret = 0; 1194a30a3d20SJosef Bacik } else { 1195a30a3d20SJosef Bacik /* 1196a30a3d20SJosef Bacik * We overcommit metadata, so we need to do the 1197a30a3d20SJosef Bacik * btrfs_can_overcommit check here, and we need to pass in 1198a30a3d20SJosef Bacik * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1199a30a3d20SJosef Bacik * leeway to allow us to mark this block group as read only. 1200a30a3d20SJosef Bacik */ 1201a30a3d20SJosef Bacik if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1202a30a3d20SJosef Bacik BTRFS_RESERVE_NO_FLUSH)) 1203a30a3d20SJosef Bacik ret = 0; 1204a30a3d20SJosef Bacik } 1205a30a3d20SJosef Bacik 1206a30a3d20SJosef Bacik if (!ret) { 120726ce2095SJosef Bacik sinfo->bytes_readonly += num_bytes; 1208169e0da9SNaohiro Aota if (btrfs_is_zoned(cache->fs_info)) { 1209169e0da9SNaohiro Aota /* Migrate zone_unusable bytes to readonly */ 1210169e0da9SNaohiro Aota sinfo->bytes_readonly += cache->zone_unusable; 1211169e0da9SNaohiro Aota sinfo->bytes_zone_unusable -= cache->zone_unusable; 1212169e0da9SNaohiro Aota cache->zone_unusable = 0; 1213169e0da9SNaohiro Aota } 121426ce2095SJosef Bacik cache->ro++; 121526ce2095SJosef Bacik list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 121626ce2095SJosef Bacik } 121726ce2095SJosef Bacik out: 121826ce2095SJosef Bacik spin_unlock(&cache->lock); 121926ce2095SJosef Bacik spin_unlock(&sinfo->lock); 122026ce2095SJosef Bacik if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 122126ce2095SJosef Bacik btrfs_info(cache->fs_info, 1222b3470b5dSDavid Sterba "unable to make block group %llu ro", cache->start); 122326ce2095SJosef Bacik btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 122426ce2095SJosef Bacik } 122526ce2095SJosef Bacik return ret; 122626ce2095SJosef Bacik } 122726ce2095SJosef Bacik 1228fe119a6eSNikolay Borisov static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1229fe119a6eSNikolay Borisov struct btrfs_block_group *bg) 123045bb5d6aSNikolay Borisov { 123145bb5d6aSNikolay Borisov struct btrfs_fs_info *fs_info = bg->fs_info; 1232fe119a6eSNikolay Borisov struct btrfs_transaction *prev_trans = NULL; 123345bb5d6aSNikolay Borisov const u64 start = bg->start; 123445bb5d6aSNikolay Borisov const u64 end = start + bg->length - 1; 123545bb5d6aSNikolay Borisov int ret; 123645bb5d6aSNikolay Borisov 1237fe119a6eSNikolay Borisov spin_lock(&fs_info->trans_lock); 1238fe119a6eSNikolay Borisov if (trans->transaction->list.prev != &fs_info->trans_list) { 1239fe119a6eSNikolay Borisov prev_trans = list_last_entry(&trans->transaction->list, 1240fe119a6eSNikolay Borisov struct btrfs_transaction, list); 1241fe119a6eSNikolay Borisov refcount_inc(&prev_trans->use_count); 1242fe119a6eSNikolay Borisov } 1243fe119a6eSNikolay Borisov spin_unlock(&fs_info->trans_lock); 1244fe119a6eSNikolay Borisov 124545bb5d6aSNikolay Borisov /* 124645bb5d6aSNikolay Borisov * Hold the unused_bg_unpin_mutex lock to avoid racing with 124745bb5d6aSNikolay Borisov * btrfs_finish_extent_commit(). If we are at transaction N, another 124845bb5d6aSNikolay Borisov * task might be running finish_extent_commit() for the previous 124945bb5d6aSNikolay Borisov * transaction N - 1, and have seen a range belonging to the block 1250fe119a6eSNikolay Borisov * group in pinned_extents before we were able to clear the whole block 1251fe119a6eSNikolay Borisov * group range from pinned_extents. This means that task can lookup for 1252fe119a6eSNikolay Borisov * the block group after we unpinned it from pinned_extents and removed 1253fe119a6eSNikolay Borisov * it, leading to a BUG_ON() at unpin_extent_range(). 125445bb5d6aSNikolay Borisov */ 125545bb5d6aSNikolay Borisov mutex_lock(&fs_info->unused_bg_unpin_mutex); 1256fe119a6eSNikolay Borisov if (prev_trans) { 1257fe119a6eSNikolay Borisov ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 125845bb5d6aSNikolay Borisov EXTENT_DIRTY); 125945bb5d6aSNikolay Borisov if (ret) 1260534cf531SFilipe Manana goto out; 1261fe119a6eSNikolay Borisov } 126245bb5d6aSNikolay Borisov 1263fe119a6eSNikolay Borisov ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 126445bb5d6aSNikolay Borisov EXTENT_DIRTY); 1265534cf531SFilipe Manana out: 126645bb5d6aSNikolay Borisov mutex_unlock(&fs_info->unused_bg_unpin_mutex); 12675150bf19SFilipe Manana if (prev_trans) 12685150bf19SFilipe Manana btrfs_put_transaction(prev_trans); 126945bb5d6aSNikolay Borisov 1270534cf531SFilipe Manana return ret == 0; 127145bb5d6aSNikolay Borisov } 127245bb5d6aSNikolay Borisov 127326ce2095SJosef Bacik /* 1274e3e0520bSJosef Bacik * Process the unused_bgs list and remove any that don't have any allocated 1275e3e0520bSJosef Bacik * space inside of them. 1276e3e0520bSJosef Bacik */ 1277e3e0520bSJosef Bacik void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1278e3e0520bSJosef Bacik { 127932da5386SDavid Sterba struct btrfs_block_group *block_group; 1280e3e0520bSJosef Bacik struct btrfs_space_info *space_info; 1281e3e0520bSJosef Bacik struct btrfs_trans_handle *trans; 12826e80d4f8SDennis Zhou const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1283e3e0520bSJosef Bacik int ret = 0; 1284e3e0520bSJosef Bacik 1285e3e0520bSJosef Bacik if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1286e3e0520bSJosef Bacik return; 1287e3e0520bSJosef Bacik 1288ddfd08cbSJosef Bacik /* 1289ddfd08cbSJosef Bacik * Long running balances can keep us blocked here for eternity, so 1290ddfd08cbSJosef Bacik * simply skip deletion if we're unable to get the mutex. 1291ddfd08cbSJosef Bacik */ 1292f3372065SJohannes Thumshirn if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1293ddfd08cbSJosef Bacik return; 1294ddfd08cbSJosef Bacik 1295e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1296e3e0520bSJosef Bacik while (!list_empty(&fs_info->unused_bgs)) { 1297e3e0520bSJosef Bacik int trimming; 1298e3e0520bSJosef Bacik 1299e3e0520bSJosef Bacik block_group = list_first_entry(&fs_info->unused_bgs, 130032da5386SDavid Sterba struct btrfs_block_group, 1301e3e0520bSJosef Bacik bg_list); 1302e3e0520bSJosef Bacik list_del_init(&block_group->bg_list); 1303e3e0520bSJosef Bacik 1304e3e0520bSJosef Bacik space_info = block_group->space_info; 1305e3e0520bSJosef Bacik 1306e3e0520bSJosef Bacik if (ret || btrfs_mixed_space_info(space_info)) { 1307e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1308e3e0520bSJosef Bacik continue; 1309e3e0520bSJosef Bacik } 1310e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1311e3e0520bSJosef Bacik 1312b0643e59SDennis Zhou btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1313b0643e59SDennis Zhou 1314e3e0520bSJosef Bacik /* Don't want to race with allocators so take the groups_sem */ 1315e3e0520bSJosef Bacik down_write(&space_info->groups_sem); 13166e80d4f8SDennis Zhou 13176e80d4f8SDennis Zhou /* 13186e80d4f8SDennis Zhou * Async discard moves the final block group discard to be prior 13196e80d4f8SDennis Zhou * to the unused_bgs code path. Therefore, if it's not fully 13206e80d4f8SDennis Zhou * trimmed, punt it back to the async discard lists. 13216e80d4f8SDennis Zhou */ 13226e80d4f8SDennis Zhou if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 13236e80d4f8SDennis Zhou !btrfs_is_free_space_trimmed(block_group)) { 13246e80d4f8SDennis Zhou trace_btrfs_skip_unused_block_group(block_group); 13256e80d4f8SDennis Zhou up_write(&space_info->groups_sem); 13266e80d4f8SDennis Zhou /* Requeue if we failed because of async discard */ 13276e80d4f8SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 13286e80d4f8SDennis Zhou block_group); 13296e80d4f8SDennis Zhou goto next; 13306e80d4f8SDennis Zhou } 13316e80d4f8SDennis Zhou 1332e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1333e3e0520bSJosef Bacik if (block_group->reserved || block_group->pinned || 1334bf38be65SDavid Sterba block_group->used || block_group->ro || 1335e3e0520bSJosef Bacik list_is_singular(&block_group->list)) { 1336e3e0520bSJosef Bacik /* 1337e3e0520bSJosef Bacik * We want to bail if we made new allocations or have 1338e3e0520bSJosef Bacik * outstanding allocations in this block group. We do 1339e3e0520bSJosef Bacik * the ro check in case balance is currently acting on 1340e3e0520bSJosef Bacik * this block group. 1341e3e0520bSJosef Bacik */ 1342e3e0520bSJosef Bacik trace_btrfs_skip_unused_block_group(block_group); 1343e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1344e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1345e3e0520bSJosef Bacik goto next; 1346e3e0520bSJosef Bacik } 1347e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1348e3e0520bSJosef Bacik 1349e3e0520bSJosef Bacik /* We don't want to force the issue, only flip if it's ok. */ 1350e11c0406SJosef Bacik ret = inc_block_group_ro(block_group, 0); 1351e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1352e3e0520bSJosef Bacik if (ret < 0) { 1353e3e0520bSJosef Bacik ret = 0; 1354e3e0520bSJosef Bacik goto next; 1355e3e0520bSJosef Bacik } 1356e3e0520bSJosef Bacik 1357e3e0520bSJosef Bacik /* 1358e3e0520bSJosef Bacik * Want to do this before we do anything else so we can recover 1359e3e0520bSJosef Bacik * properly if we fail to join the transaction. 1360e3e0520bSJosef Bacik */ 1361e3e0520bSJosef Bacik trans = btrfs_start_trans_remove_block_group(fs_info, 1362b3470b5dSDavid Sterba block_group->start); 1363e3e0520bSJosef Bacik if (IS_ERR(trans)) { 1364e3e0520bSJosef Bacik btrfs_dec_block_group_ro(block_group); 1365e3e0520bSJosef Bacik ret = PTR_ERR(trans); 1366e3e0520bSJosef Bacik goto next; 1367e3e0520bSJosef Bacik } 1368e3e0520bSJosef Bacik 1369e3e0520bSJosef Bacik /* 1370e3e0520bSJosef Bacik * We could have pending pinned extents for this block group, 1371e3e0520bSJosef Bacik * just delete them, we don't care about them anymore. 1372e3e0520bSJosef Bacik */ 1373534cf531SFilipe Manana if (!clean_pinned_extents(trans, block_group)) { 1374534cf531SFilipe Manana btrfs_dec_block_group_ro(block_group); 1375e3e0520bSJosef Bacik goto end_trans; 1376534cf531SFilipe Manana } 1377e3e0520bSJosef Bacik 1378b0643e59SDennis Zhou /* 1379b0643e59SDennis Zhou * At this point, the block_group is read only and should fail 1380b0643e59SDennis Zhou * new allocations. However, btrfs_finish_extent_commit() can 1381b0643e59SDennis Zhou * cause this block_group to be placed back on the discard 1382b0643e59SDennis Zhou * lists because now the block_group isn't fully discarded. 1383b0643e59SDennis Zhou * Bail here and try again later after discarding everything. 1384b0643e59SDennis Zhou */ 1385b0643e59SDennis Zhou spin_lock(&fs_info->discard_ctl.lock); 1386b0643e59SDennis Zhou if (!list_empty(&block_group->discard_list)) { 1387b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1388b0643e59SDennis Zhou btrfs_dec_block_group_ro(block_group); 1389b0643e59SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 1390b0643e59SDennis Zhou block_group); 1391b0643e59SDennis Zhou goto end_trans; 1392b0643e59SDennis Zhou } 1393b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1394b0643e59SDennis Zhou 1395e3e0520bSJosef Bacik /* Reset pinned so btrfs_put_block_group doesn't complain */ 1396e3e0520bSJosef Bacik spin_lock(&space_info->lock); 1397e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1398e3e0520bSJosef Bacik 1399e3e0520bSJosef Bacik btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1400e3e0520bSJosef Bacik -block_group->pinned); 1401e3e0520bSJosef Bacik space_info->bytes_readonly += block_group->pinned; 14022187374fSJosef Bacik __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned); 1403e3e0520bSJosef Bacik block_group->pinned = 0; 1404e3e0520bSJosef Bacik 1405e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1406e3e0520bSJosef Bacik spin_unlock(&space_info->lock); 1407e3e0520bSJosef Bacik 14086e80d4f8SDennis Zhou /* 14096e80d4f8SDennis Zhou * The normal path here is an unused block group is passed here, 14106e80d4f8SDennis Zhou * then trimming is handled in the transaction commit path. 14116e80d4f8SDennis Zhou * Async discard interposes before this to do the trimming 14126e80d4f8SDennis Zhou * before coming down the unused block group path as trimming 14136e80d4f8SDennis Zhou * will no longer be done later in the transaction commit path. 14146e80d4f8SDennis Zhou */ 14156e80d4f8SDennis Zhou if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 14166e80d4f8SDennis Zhou goto flip_async; 14176e80d4f8SDennis Zhou 1418dcba6e48SNaohiro Aota /* 1419dcba6e48SNaohiro Aota * DISCARD can flip during remount. On zoned filesystems, we 1420dcba6e48SNaohiro Aota * need to reset sequential-required zones. 1421dcba6e48SNaohiro Aota */ 1422dcba6e48SNaohiro Aota trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1423dcba6e48SNaohiro Aota btrfs_is_zoned(fs_info); 1424e3e0520bSJosef Bacik 1425e3e0520bSJosef Bacik /* Implicit trim during transaction commit. */ 1426e3e0520bSJosef Bacik if (trimming) 14276b7304afSFilipe Manana btrfs_freeze_block_group(block_group); 1428e3e0520bSJosef Bacik 1429e3e0520bSJosef Bacik /* 1430e3e0520bSJosef Bacik * Btrfs_remove_chunk will abort the transaction if things go 1431e3e0520bSJosef Bacik * horribly wrong. 1432e3e0520bSJosef Bacik */ 1433b3470b5dSDavid Sterba ret = btrfs_remove_chunk(trans, block_group->start); 1434e3e0520bSJosef Bacik 1435e3e0520bSJosef Bacik if (ret) { 1436e3e0520bSJosef Bacik if (trimming) 14376b7304afSFilipe Manana btrfs_unfreeze_block_group(block_group); 1438e3e0520bSJosef Bacik goto end_trans; 1439e3e0520bSJosef Bacik } 1440e3e0520bSJosef Bacik 1441e3e0520bSJosef Bacik /* 1442e3e0520bSJosef Bacik * If we're not mounted with -odiscard, we can just forget 1443e3e0520bSJosef Bacik * about this block group. Otherwise we'll need to wait 1444e3e0520bSJosef Bacik * until transaction commit to do the actual discard. 1445e3e0520bSJosef Bacik */ 1446e3e0520bSJosef Bacik if (trimming) { 1447e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1448e3e0520bSJosef Bacik /* 1449e3e0520bSJosef Bacik * A concurrent scrub might have added us to the list 1450e3e0520bSJosef Bacik * fs_info->unused_bgs, so use a list_move operation 1451e3e0520bSJosef Bacik * to add the block group to the deleted_bgs list. 1452e3e0520bSJosef Bacik */ 1453e3e0520bSJosef Bacik list_move(&block_group->bg_list, 1454e3e0520bSJosef Bacik &trans->transaction->deleted_bgs); 1455e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1456e3e0520bSJosef Bacik btrfs_get_block_group(block_group); 1457e3e0520bSJosef Bacik } 1458e3e0520bSJosef Bacik end_trans: 1459e3e0520bSJosef Bacik btrfs_end_transaction(trans); 1460e3e0520bSJosef Bacik next: 1461e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1462e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1463e3e0520bSJosef Bacik } 1464e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1465f3372065SJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 14666e80d4f8SDennis Zhou return; 14676e80d4f8SDennis Zhou 14686e80d4f8SDennis Zhou flip_async: 14696e80d4f8SDennis Zhou btrfs_end_transaction(trans); 1470f3372065SJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 14716e80d4f8SDennis Zhou btrfs_put_block_group(block_group); 14726e80d4f8SDennis Zhou btrfs_discard_punt_unused_bgs_list(fs_info); 1473e3e0520bSJosef Bacik } 1474e3e0520bSJosef Bacik 147532da5386SDavid Sterba void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1476e3e0520bSJosef Bacik { 1477e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = bg->fs_info; 1478e3e0520bSJosef Bacik 1479e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1480e3e0520bSJosef Bacik if (list_empty(&bg->bg_list)) { 1481e3e0520bSJosef Bacik btrfs_get_block_group(bg); 1482e3e0520bSJosef Bacik trace_btrfs_add_unused_block_group(bg); 1483e3e0520bSJosef Bacik list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1484e3e0520bSJosef Bacik } 1485e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1486e3e0520bSJosef Bacik } 14874358d963SJosef Bacik 148818bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs_work(struct work_struct *work) 148918bb8bbfSJohannes Thumshirn { 149018bb8bbfSJohannes Thumshirn struct btrfs_fs_info *fs_info = 149118bb8bbfSJohannes Thumshirn container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 149218bb8bbfSJohannes Thumshirn struct btrfs_block_group *bg; 149318bb8bbfSJohannes Thumshirn struct btrfs_space_info *space_info; 149418bb8bbfSJohannes Thumshirn int ret; 149518bb8bbfSJohannes Thumshirn 149618bb8bbfSJohannes Thumshirn if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 149718bb8bbfSJohannes Thumshirn return; 149818bb8bbfSJohannes Thumshirn 149918bb8bbfSJohannes Thumshirn if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 150018bb8bbfSJohannes Thumshirn return; 150118bb8bbfSJohannes Thumshirn 150218bb8bbfSJohannes Thumshirn mutex_lock(&fs_info->reclaim_bgs_lock); 150318bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 150418bb8bbfSJohannes Thumshirn while (!list_empty(&fs_info->reclaim_bgs)) { 150518bb8bbfSJohannes Thumshirn bg = list_first_entry(&fs_info->reclaim_bgs, 150618bb8bbfSJohannes Thumshirn struct btrfs_block_group, 150718bb8bbfSJohannes Thumshirn bg_list); 150818bb8bbfSJohannes Thumshirn list_del_init(&bg->bg_list); 150918bb8bbfSJohannes Thumshirn 151018bb8bbfSJohannes Thumshirn space_info = bg->space_info; 151118bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 151218bb8bbfSJohannes Thumshirn 151318bb8bbfSJohannes Thumshirn /* Don't race with allocators so take the groups_sem */ 151418bb8bbfSJohannes Thumshirn down_write(&space_info->groups_sem); 151518bb8bbfSJohannes Thumshirn 151618bb8bbfSJohannes Thumshirn spin_lock(&bg->lock); 151718bb8bbfSJohannes Thumshirn if (bg->reserved || bg->pinned || bg->ro) { 151818bb8bbfSJohannes Thumshirn /* 151918bb8bbfSJohannes Thumshirn * We want to bail if we made new allocations or have 152018bb8bbfSJohannes Thumshirn * outstanding allocations in this block group. We do 152118bb8bbfSJohannes Thumshirn * the ro check in case balance is currently acting on 152218bb8bbfSJohannes Thumshirn * this block group. 152318bb8bbfSJohannes Thumshirn */ 152418bb8bbfSJohannes Thumshirn spin_unlock(&bg->lock); 152518bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 152618bb8bbfSJohannes Thumshirn goto next; 152718bb8bbfSJohannes Thumshirn } 152818bb8bbfSJohannes Thumshirn spin_unlock(&bg->lock); 152918bb8bbfSJohannes Thumshirn 153018bb8bbfSJohannes Thumshirn /* Get out fast, in case we're unmounting the filesystem */ 153118bb8bbfSJohannes Thumshirn if (btrfs_fs_closing(fs_info)) { 153218bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 153318bb8bbfSJohannes Thumshirn goto next; 153418bb8bbfSJohannes Thumshirn } 153518bb8bbfSJohannes Thumshirn 153618bb8bbfSJohannes Thumshirn ret = inc_block_group_ro(bg, 0); 153718bb8bbfSJohannes Thumshirn up_write(&space_info->groups_sem); 153818bb8bbfSJohannes Thumshirn if (ret < 0) 153918bb8bbfSJohannes Thumshirn goto next; 154018bb8bbfSJohannes Thumshirn 154118bb8bbfSJohannes Thumshirn btrfs_info(fs_info, "reclaiming chunk %llu with %llu%% used", 154218bb8bbfSJohannes Thumshirn bg->start, div_u64(bg->used * 100, bg->length)); 154318bb8bbfSJohannes Thumshirn trace_btrfs_reclaim_block_group(bg); 154418bb8bbfSJohannes Thumshirn ret = btrfs_relocate_chunk(fs_info, bg->start); 154518bb8bbfSJohannes Thumshirn if (ret) 154618bb8bbfSJohannes Thumshirn btrfs_err(fs_info, "error relocating chunk %llu", 154718bb8bbfSJohannes Thumshirn bg->start); 154818bb8bbfSJohannes Thumshirn 154918bb8bbfSJohannes Thumshirn next: 155018bb8bbfSJohannes Thumshirn btrfs_put_block_group(bg); 155118bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 155218bb8bbfSJohannes Thumshirn } 155318bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 155418bb8bbfSJohannes Thumshirn mutex_unlock(&fs_info->reclaim_bgs_lock); 155518bb8bbfSJohannes Thumshirn btrfs_exclop_finish(fs_info); 155618bb8bbfSJohannes Thumshirn } 155718bb8bbfSJohannes Thumshirn 155818bb8bbfSJohannes Thumshirn void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 155918bb8bbfSJohannes Thumshirn { 156018bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 156118bb8bbfSJohannes Thumshirn if (!list_empty(&fs_info->reclaim_bgs)) 156218bb8bbfSJohannes Thumshirn queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 156318bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 156418bb8bbfSJohannes Thumshirn } 156518bb8bbfSJohannes Thumshirn 156618bb8bbfSJohannes Thumshirn void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 156718bb8bbfSJohannes Thumshirn { 156818bb8bbfSJohannes Thumshirn struct btrfs_fs_info *fs_info = bg->fs_info; 156918bb8bbfSJohannes Thumshirn 157018bb8bbfSJohannes Thumshirn spin_lock(&fs_info->unused_bgs_lock); 157118bb8bbfSJohannes Thumshirn if (list_empty(&bg->bg_list)) { 157218bb8bbfSJohannes Thumshirn btrfs_get_block_group(bg); 157318bb8bbfSJohannes Thumshirn trace_btrfs_add_reclaim_block_group(bg); 157418bb8bbfSJohannes Thumshirn list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); 157518bb8bbfSJohannes Thumshirn } 157618bb8bbfSJohannes Thumshirn spin_unlock(&fs_info->unused_bgs_lock); 157718bb8bbfSJohannes Thumshirn } 157818bb8bbfSJohannes Thumshirn 1579e3ba67a1SJohannes Thumshirn static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1580e3ba67a1SJohannes Thumshirn struct btrfs_path *path) 1581e3ba67a1SJohannes Thumshirn { 1582e3ba67a1SJohannes Thumshirn struct extent_map_tree *em_tree; 1583e3ba67a1SJohannes Thumshirn struct extent_map *em; 1584e3ba67a1SJohannes Thumshirn struct btrfs_block_group_item bg; 1585e3ba67a1SJohannes Thumshirn struct extent_buffer *leaf; 1586e3ba67a1SJohannes Thumshirn int slot; 1587e3ba67a1SJohannes Thumshirn u64 flags; 1588e3ba67a1SJohannes Thumshirn int ret = 0; 1589e3ba67a1SJohannes Thumshirn 1590e3ba67a1SJohannes Thumshirn slot = path->slots[0]; 1591e3ba67a1SJohannes Thumshirn leaf = path->nodes[0]; 1592e3ba67a1SJohannes Thumshirn 1593e3ba67a1SJohannes Thumshirn em_tree = &fs_info->mapping_tree; 1594e3ba67a1SJohannes Thumshirn read_lock(&em_tree->lock); 1595e3ba67a1SJohannes Thumshirn em = lookup_extent_mapping(em_tree, key->objectid, key->offset); 1596e3ba67a1SJohannes Thumshirn read_unlock(&em_tree->lock); 1597e3ba67a1SJohannes Thumshirn if (!em) { 1598e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1599e3ba67a1SJohannes Thumshirn "logical %llu len %llu found bg but no related chunk", 1600e3ba67a1SJohannes Thumshirn key->objectid, key->offset); 1601e3ba67a1SJohannes Thumshirn return -ENOENT; 1602e3ba67a1SJohannes Thumshirn } 1603e3ba67a1SJohannes Thumshirn 1604e3ba67a1SJohannes Thumshirn if (em->start != key->objectid || em->len != key->offset) { 1605e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1606e3ba67a1SJohannes Thumshirn "block group %llu len %llu mismatch with chunk %llu len %llu", 1607e3ba67a1SJohannes Thumshirn key->objectid, key->offset, em->start, em->len); 1608e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1609e3ba67a1SJohannes Thumshirn goto out_free_em; 1610e3ba67a1SJohannes Thumshirn } 1611e3ba67a1SJohannes Thumshirn 1612e3ba67a1SJohannes Thumshirn read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 1613e3ba67a1SJohannes Thumshirn sizeof(bg)); 1614e3ba67a1SJohannes Thumshirn flags = btrfs_stack_block_group_flags(&bg) & 1615e3ba67a1SJohannes Thumshirn BTRFS_BLOCK_GROUP_TYPE_MASK; 1616e3ba67a1SJohannes Thumshirn 1617e3ba67a1SJohannes Thumshirn if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 1618e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1619e3ba67a1SJohannes Thumshirn "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 1620e3ba67a1SJohannes Thumshirn key->objectid, key->offset, flags, 1621e3ba67a1SJohannes Thumshirn (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); 1622e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1623e3ba67a1SJohannes Thumshirn } 1624e3ba67a1SJohannes Thumshirn 1625e3ba67a1SJohannes Thumshirn out_free_em: 1626e3ba67a1SJohannes Thumshirn free_extent_map(em); 1627e3ba67a1SJohannes Thumshirn return ret; 1628e3ba67a1SJohannes Thumshirn } 1629e3ba67a1SJohannes Thumshirn 16304358d963SJosef Bacik static int find_first_block_group(struct btrfs_fs_info *fs_info, 16314358d963SJosef Bacik struct btrfs_path *path, 16324358d963SJosef Bacik struct btrfs_key *key) 16334358d963SJosef Bacik { 16344358d963SJosef Bacik struct btrfs_root *root = fs_info->extent_root; 1635e3ba67a1SJohannes Thumshirn int ret; 16364358d963SJosef Bacik struct btrfs_key found_key; 16374358d963SJosef Bacik struct extent_buffer *leaf; 16384358d963SJosef Bacik int slot; 16394358d963SJosef Bacik 16404358d963SJosef Bacik ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 16414358d963SJosef Bacik if (ret < 0) 1642e3ba67a1SJohannes Thumshirn return ret; 16434358d963SJosef Bacik 16444358d963SJosef Bacik while (1) { 16454358d963SJosef Bacik slot = path->slots[0]; 16464358d963SJosef Bacik leaf = path->nodes[0]; 16474358d963SJosef Bacik if (slot >= btrfs_header_nritems(leaf)) { 16484358d963SJosef Bacik ret = btrfs_next_leaf(root, path); 16494358d963SJosef Bacik if (ret == 0) 16504358d963SJosef Bacik continue; 16514358d963SJosef Bacik if (ret < 0) 16524358d963SJosef Bacik goto out; 16534358d963SJosef Bacik break; 16544358d963SJosef Bacik } 16554358d963SJosef Bacik btrfs_item_key_to_cpu(leaf, &found_key, slot); 16564358d963SJosef Bacik 16574358d963SJosef Bacik if (found_key.objectid >= key->objectid && 16584358d963SJosef Bacik found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1659e3ba67a1SJohannes Thumshirn ret = read_bg_from_eb(fs_info, &found_key, path); 1660e3ba67a1SJohannes Thumshirn break; 1661e3ba67a1SJohannes Thumshirn } 16624358d963SJosef Bacik 16634358d963SJosef Bacik path->slots[0]++; 16644358d963SJosef Bacik } 16654358d963SJosef Bacik out: 16664358d963SJosef Bacik return ret; 16674358d963SJosef Bacik } 16684358d963SJosef Bacik 16694358d963SJosef Bacik static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 16704358d963SJosef Bacik { 16714358d963SJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 16724358d963SJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 16734358d963SJosef Bacik 16744358d963SJosef Bacik write_seqlock(&fs_info->profiles_lock); 16754358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 16764358d963SJosef Bacik fs_info->avail_data_alloc_bits |= extra_flags; 16774358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 16784358d963SJosef Bacik fs_info->avail_metadata_alloc_bits |= extra_flags; 16794358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 16804358d963SJosef Bacik fs_info->avail_system_alloc_bits |= extra_flags; 16814358d963SJosef Bacik write_sequnlock(&fs_info->profiles_lock); 16824358d963SJosef Bacik } 16834358d963SJosef Bacik 168496a14336SNikolay Borisov /** 16859ee9b979SNikolay Borisov * Map a physical disk address to a list of logical addresses 16869ee9b979SNikolay Borisov * 16879ee9b979SNikolay Borisov * @fs_info: the filesystem 168896a14336SNikolay Borisov * @chunk_start: logical address of block group 1689138082f3SNaohiro Aota * @bdev: physical device to resolve, can be NULL to indicate any device 169096a14336SNikolay Borisov * @physical: physical address to map to logical addresses 169196a14336SNikolay Borisov * @logical: return array of logical addresses which map to @physical 169296a14336SNikolay Borisov * @naddrs: length of @logical 169396a14336SNikolay Borisov * @stripe_len: size of IO stripe for the given block group 169496a14336SNikolay Borisov * 169596a14336SNikolay Borisov * Maps a particular @physical disk address to a list of @logical addresses. 169696a14336SNikolay Borisov * Used primarily to exclude those portions of a block group that contain super 169796a14336SNikolay Borisov * block copies. 169896a14336SNikolay Borisov */ 169996a14336SNikolay Borisov int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 1700138082f3SNaohiro Aota struct block_device *bdev, u64 physical, u64 **logical, 1701138082f3SNaohiro Aota int *naddrs, int *stripe_len) 170296a14336SNikolay Borisov { 170396a14336SNikolay Borisov struct extent_map *em; 170496a14336SNikolay Borisov struct map_lookup *map; 170596a14336SNikolay Borisov u64 *buf; 170696a14336SNikolay Borisov u64 bytenr; 17071776ad17SNikolay Borisov u64 data_stripe_length; 17081776ad17SNikolay Borisov u64 io_stripe_size; 17091776ad17SNikolay Borisov int i, nr = 0; 17101776ad17SNikolay Borisov int ret = 0; 171196a14336SNikolay Borisov 171296a14336SNikolay Borisov em = btrfs_get_chunk_map(fs_info, chunk_start, 1); 171396a14336SNikolay Borisov if (IS_ERR(em)) 171496a14336SNikolay Borisov return -EIO; 171596a14336SNikolay Borisov 171696a14336SNikolay Borisov map = em->map_lookup; 17179e22b925SNikolay Borisov data_stripe_length = em->orig_block_len; 17181776ad17SNikolay Borisov io_stripe_size = map->stripe_len; 1719138082f3SNaohiro Aota chunk_start = em->start; 172096a14336SNikolay Borisov 17219e22b925SNikolay Borisov /* For RAID5/6 adjust to a full IO stripe length */ 17229e22b925SNikolay Borisov if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 17231776ad17SNikolay Borisov io_stripe_size = map->stripe_len * nr_data_stripes(map); 172496a14336SNikolay Borisov 172596a14336SNikolay Borisov buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 17261776ad17SNikolay Borisov if (!buf) { 17271776ad17SNikolay Borisov ret = -ENOMEM; 17281776ad17SNikolay Borisov goto out; 17291776ad17SNikolay Borisov } 173096a14336SNikolay Borisov 173196a14336SNikolay Borisov for (i = 0; i < map->num_stripes; i++) { 17321776ad17SNikolay Borisov bool already_inserted = false; 17331776ad17SNikolay Borisov u64 stripe_nr; 1734138082f3SNaohiro Aota u64 offset; 17351776ad17SNikolay Borisov int j; 17361776ad17SNikolay Borisov 17371776ad17SNikolay Borisov if (!in_range(physical, map->stripes[i].physical, 17381776ad17SNikolay Borisov data_stripe_length)) 173996a14336SNikolay Borisov continue; 174096a14336SNikolay Borisov 1741138082f3SNaohiro Aota if (bdev && map->stripes[i].dev->bdev != bdev) 1742138082f3SNaohiro Aota continue; 1743138082f3SNaohiro Aota 174496a14336SNikolay Borisov stripe_nr = physical - map->stripes[i].physical; 1745138082f3SNaohiro Aota stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset); 174696a14336SNikolay Borisov 174796a14336SNikolay Borisov if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 174896a14336SNikolay Borisov stripe_nr = stripe_nr * map->num_stripes + i; 174996a14336SNikolay Borisov stripe_nr = div_u64(stripe_nr, map->sub_stripes); 175096a14336SNikolay Borisov } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 175196a14336SNikolay Borisov stripe_nr = stripe_nr * map->num_stripes + i; 175296a14336SNikolay Borisov } 175396a14336SNikolay Borisov /* 175496a14336SNikolay Borisov * The remaining case would be for RAID56, multiply by 175596a14336SNikolay Borisov * nr_data_stripes(). Alternatively, just use rmap_len below 175696a14336SNikolay Borisov * instead of map->stripe_len 175796a14336SNikolay Borisov */ 175896a14336SNikolay Borisov 1759138082f3SNaohiro Aota bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 17601776ad17SNikolay Borisov 17611776ad17SNikolay Borisov /* Ensure we don't add duplicate addresses */ 176296a14336SNikolay Borisov for (j = 0; j < nr; j++) { 17631776ad17SNikolay Borisov if (buf[j] == bytenr) { 17641776ad17SNikolay Borisov already_inserted = true; 176596a14336SNikolay Borisov break; 176696a14336SNikolay Borisov } 176796a14336SNikolay Borisov } 17681776ad17SNikolay Borisov 17691776ad17SNikolay Borisov if (!already_inserted) 17701776ad17SNikolay Borisov buf[nr++] = bytenr; 177196a14336SNikolay Borisov } 177296a14336SNikolay Borisov 177396a14336SNikolay Borisov *logical = buf; 177496a14336SNikolay Borisov *naddrs = nr; 17751776ad17SNikolay Borisov *stripe_len = io_stripe_size; 17761776ad17SNikolay Borisov out: 177796a14336SNikolay Borisov free_extent_map(em); 17781776ad17SNikolay Borisov return ret; 177996a14336SNikolay Borisov } 178096a14336SNikolay Borisov 178132da5386SDavid Sterba static int exclude_super_stripes(struct btrfs_block_group *cache) 17824358d963SJosef Bacik { 17834358d963SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 178412659251SNaohiro Aota const bool zoned = btrfs_is_zoned(fs_info); 17854358d963SJosef Bacik u64 bytenr; 17864358d963SJosef Bacik u64 *logical; 17874358d963SJosef Bacik int stripe_len; 17884358d963SJosef Bacik int i, nr, ret; 17894358d963SJosef Bacik 1790b3470b5dSDavid Sterba if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 1791b3470b5dSDavid Sterba stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 17924358d963SJosef Bacik cache->bytes_super += stripe_len; 1793b3470b5dSDavid Sterba ret = btrfs_add_excluded_extent(fs_info, cache->start, 17944358d963SJosef Bacik stripe_len); 17954358d963SJosef Bacik if (ret) 17964358d963SJosef Bacik return ret; 17974358d963SJosef Bacik } 17984358d963SJosef Bacik 17994358d963SJosef Bacik for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 18004358d963SJosef Bacik bytenr = btrfs_sb_offset(i); 1801138082f3SNaohiro Aota ret = btrfs_rmap_block(fs_info, cache->start, NULL, 18024358d963SJosef Bacik bytenr, &logical, &nr, &stripe_len); 18034358d963SJosef Bacik if (ret) 18044358d963SJosef Bacik return ret; 18054358d963SJosef Bacik 180612659251SNaohiro Aota /* Shouldn't have super stripes in sequential zones */ 180712659251SNaohiro Aota if (zoned && nr) { 180812659251SNaohiro Aota btrfs_err(fs_info, 180912659251SNaohiro Aota "zoned: block group %llu must not contain super block", 181012659251SNaohiro Aota cache->start); 181112659251SNaohiro Aota return -EUCLEAN; 181212659251SNaohiro Aota } 181312659251SNaohiro Aota 18144358d963SJosef Bacik while (nr--) { 181596f9b0f2SNikolay Borisov u64 len = min_t(u64, stripe_len, 181696f9b0f2SNikolay Borisov cache->start + cache->length - logical[nr]); 18174358d963SJosef Bacik 18184358d963SJosef Bacik cache->bytes_super += len; 181996f9b0f2SNikolay Borisov ret = btrfs_add_excluded_extent(fs_info, logical[nr], 182096f9b0f2SNikolay Borisov len); 18214358d963SJosef Bacik if (ret) { 18224358d963SJosef Bacik kfree(logical); 18234358d963SJosef Bacik return ret; 18244358d963SJosef Bacik } 18254358d963SJosef Bacik } 18264358d963SJosef Bacik 18274358d963SJosef Bacik kfree(logical); 18284358d963SJosef Bacik } 18294358d963SJosef Bacik return 0; 18304358d963SJosef Bacik } 18314358d963SJosef Bacik 183232da5386SDavid Sterba static void link_block_group(struct btrfs_block_group *cache) 18334358d963SJosef Bacik { 18344358d963SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 18354358d963SJosef Bacik int index = btrfs_bg_flags_to_raid_index(cache->flags); 18364358d963SJosef Bacik 18374358d963SJosef Bacik down_write(&space_info->groups_sem); 18384358d963SJosef Bacik list_add_tail(&cache->list, &space_info->block_groups[index]); 18394358d963SJosef Bacik up_write(&space_info->groups_sem); 18404358d963SJosef Bacik } 18414358d963SJosef Bacik 184232da5386SDavid Sterba static struct btrfs_block_group *btrfs_create_block_group_cache( 18439afc6649SQu Wenruo struct btrfs_fs_info *fs_info, u64 start) 18444358d963SJosef Bacik { 184532da5386SDavid Sterba struct btrfs_block_group *cache; 18464358d963SJosef Bacik 18474358d963SJosef Bacik cache = kzalloc(sizeof(*cache), GFP_NOFS); 18484358d963SJosef Bacik if (!cache) 18494358d963SJosef Bacik return NULL; 18504358d963SJosef Bacik 18514358d963SJosef Bacik cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 18524358d963SJosef Bacik GFP_NOFS); 18534358d963SJosef Bacik if (!cache->free_space_ctl) { 18544358d963SJosef Bacik kfree(cache); 18554358d963SJosef Bacik return NULL; 18564358d963SJosef Bacik } 18574358d963SJosef Bacik 1858b3470b5dSDavid Sterba cache->start = start; 18594358d963SJosef Bacik 18604358d963SJosef Bacik cache->fs_info = fs_info; 18614358d963SJosef Bacik cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 18624358d963SJosef Bacik 18636e80d4f8SDennis Zhou cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 18646e80d4f8SDennis Zhou 186548aaeebeSJosef Bacik refcount_set(&cache->refs, 1); 18664358d963SJosef Bacik spin_lock_init(&cache->lock); 18674358d963SJosef Bacik init_rwsem(&cache->data_rwsem); 18684358d963SJosef Bacik INIT_LIST_HEAD(&cache->list); 18694358d963SJosef Bacik INIT_LIST_HEAD(&cache->cluster_list); 18704358d963SJosef Bacik INIT_LIST_HEAD(&cache->bg_list); 18714358d963SJosef Bacik INIT_LIST_HEAD(&cache->ro_list); 1872b0643e59SDennis Zhou INIT_LIST_HEAD(&cache->discard_list); 18734358d963SJosef Bacik INIT_LIST_HEAD(&cache->dirty_list); 18744358d963SJosef Bacik INIT_LIST_HEAD(&cache->io_list); 1875cd79909bSJosef Bacik btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 18766b7304afSFilipe Manana atomic_set(&cache->frozen, 0); 18774358d963SJosef Bacik mutex_init(&cache->free_space_lock); 18784358d963SJosef Bacik btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); 18794358d963SJosef Bacik 18804358d963SJosef Bacik return cache; 18814358d963SJosef Bacik } 18824358d963SJosef Bacik 18834358d963SJosef Bacik /* 18844358d963SJosef Bacik * Iterate all chunks and verify that each of them has the corresponding block 18854358d963SJosef Bacik * group 18864358d963SJosef Bacik */ 18874358d963SJosef Bacik static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 18884358d963SJosef Bacik { 18894358d963SJosef Bacik struct extent_map_tree *map_tree = &fs_info->mapping_tree; 18904358d963SJosef Bacik struct extent_map *em; 189132da5386SDavid Sterba struct btrfs_block_group *bg; 18924358d963SJosef Bacik u64 start = 0; 18934358d963SJosef Bacik int ret = 0; 18944358d963SJosef Bacik 18954358d963SJosef Bacik while (1) { 18964358d963SJosef Bacik read_lock(&map_tree->lock); 18974358d963SJosef Bacik /* 18984358d963SJosef Bacik * lookup_extent_mapping will return the first extent map 18994358d963SJosef Bacik * intersecting the range, so setting @len to 1 is enough to 19004358d963SJosef Bacik * get the first chunk. 19014358d963SJosef Bacik */ 19024358d963SJosef Bacik em = lookup_extent_mapping(map_tree, start, 1); 19034358d963SJosef Bacik read_unlock(&map_tree->lock); 19044358d963SJosef Bacik if (!em) 19054358d963SJosef Bacik break; 19064358d963SJosef Bacik 19074358d963SJosef Bacik bg = btrfs_lookup_block_group(fs_info, em->start); 19084358d963SJosef Bacik if (!bg) { 19094358d963SJosef Bacik btrfs_err(fs_info, 19104358d963SJosef Bacik "chunk start=%llu len=%llu doesn't have corresponding block group", 19114358d963SJosef Bacik em->start, em->len); 19124358d963SJosef Bacik ret = -EUCLEAN; 19134358d963SJosef Bacik free_extent_map(em); 19144358d963SJosef Bacik break; 19154358d963SJosef Bacik } 1916b3470b5dSDavid Sterba if (bg->start != em->start || bg->length != em->len || 19174358d963SJosef Bacik (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 19184358d963SJosef Bacik (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 19194358d963SJosef Bacik btrfs_err(fs_info, 19204358d963SJosef Bacik "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 19214358d963SJosef Bacik em->start, em->len, 19224358d963SJosef Bacik em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 1923b3470b5dSDavid Sterba bg->start, bg->length, 19244358d963SJosef Bacik bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 19254358d963SJosef Bacik ret = -EUCLEAN; 19264358d963SJosef Bacik free_extent_map(em); 19274358d963SJosef Bacik btrfs_put_block_group(bg); 19284358d963SJosef Bacik break; 19294358d963SJosef Bacik } 19304358d963SJosef Bacik start = em->start + em->len; 19314358d963SJosef Bacik free_extent_map(em); 19324358d963SJosef Bacik btrfs_put_block_group(bg); 19334358d963SJosef Bacik } 19344358d963SJosef Bacik return ret; 19354358d963SJosef Bacik } 19364358d963SJosef Bacik 1937ffb9e0f0SQu Wenruo static int read_one_block_group(struct btrfs_fs_info *info, 19384afd2fe8SJohannes Thumshirn struct btrfs_block_group_item *bgi, 1939d49a2ddbSQu Wenruo const struct btrfs_key *key, 1940ffb9e0f0SQu Wenruo int need_clear) 1941ffb9e0f0SQu Wenruo { 194232da5386SDavid Sterba struct btrfs_block_group *cache; 1943ffb9e0f0SQu Wenruo struct btrfs_space_info *space_info; 1944ffb9e0f0SQu Wenruo const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 1945ffb9e0f0SQu Wenruo int ret; 1946ffb9e0f0SQu Wenruo 1947d49a2ddbSQu Wenruo ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 1948ffb9e0f0SQu Wenruo 19499afc6649SQu Wenruo cache = btrfs_create_block_group_cache(info, key->objectid); 1950ffb9e0f0SQu Wenruo if (!cache) 1951ffb9e0f0SQu Wenruo return -ENOMEM; 1952ffb9e0f0SQu Wenruo 19534afd2fe8SJohannes Thumshirn cache->length = key->offset; 19544afd2fe8SJohannes Thumshirn cache->used = btrfs_stack_block_group_used(bgi); 19554afd2fe8SJohannes Thumshirn cache->flags = btrfs_stack_block_group_flags(bgi); 19569afc6649SQu Wenruo 1957e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 1958e3e39c72SMarcos Paulo de Souza 1959ffb9e0f0SQu Wenruo if (need_clear) { 1960ffb9e0f0SQu Wenruo /* 1961ffb9e0f0SQu Wenruo * When we mount with old space cache, we need to 1962ffb9e0f0SQu Wenruo * set BTRFS_DC_CLEAR and set dirty flag. 1963ffb9e0f0SQu Wenruo * 1964ffb9e0f0SQu Wenruo * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 1965ffb9e0f0SQu Wenruo * truncate the old free space cache inode and 1966ffb9e0f0SQu Wenruo * setup a new one. 1967ffb9e0f0SQu Wenruo * b) Setting 'dirty flag' makes sure that we flush 1968ffb9e0f0SQu Wenruo * the new space cache info onto disk. 1969ffb9e0f0SQu Wenruo */ 1970ffb9e0f0SQu Wenruo if (btrfs_test_opt(info, SPACE_CACHE)) 1971ffb9e0f0SQu Wenruo cache->disk_cache_state = BTRFS_DC_CLEAR; 1972ffb9e0f0SQu Wenruo } 1973ffb9e0f0SQu Wenruo if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 1974ffb9e0f0SQu Wenruo (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 1975ffb9e0f0SQu Wenruo btrfs_err(info, 1976ffb9e0f0SQu Wenruo "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 1977ffb9e0f0SQu Wenruo cache->start); 1978ffb9e0f0SQu Wenruo ret = -EINVAL; 1979ffb9e0f0SQu Wenruo goto error; 1980ffb9e0f0SQu Wenruo } 1981ffb9e0f0SQu Wenruo 1982a94794d5SNaohiro Aota ret = btrfs_load_block_group_zone_info(cache, false); 198308e11a3dSNaohiro Aota if (ret) { 198408e11a3dSNaohiro Aota btrfs_err(info, "zoned: failed to load zone info of bg %llu", 198508e11a3dSNaohiro Aota cache->start); 198608e11a3dSNaohiro Aota goto error; 198708e11a3dSNaohiro Aota } 198808e11a3dSNaohiro Aota 1989ffb9e0f0SQu Wenruo /* 1990ffb9e0f0SQu Wenruo * We need to exclude the super stripes now so that the space info has 1991ffb9e0f0SQu Wenruo * super bytes accounted for, otherwise we'll think we have more space 1992ffb9e0f0SQu Wenruo * than we actually do. 1993ffb9e0f0SQu Wenruo */ 1994ffb9e0f0SQu Wenruo ret = exclude_super_stripes(cache); 1995ffb9e0f0SQu Wenruo if (ret) { 1996ffb9e0f0SQu Wenruo /* We may have excluded something, so call this just in case. */ 1997ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 1998ffb9e0f0SQu Wenruo goto error; 1999ffb9e0f0SQu Wenruo } 2000ffb9e0f0SQu Wenruo 2001ffb9e0f0SQu Wenruo /* 2002169e0da9SNaohiro Aota * For zoned filesystem, space after the allocation offset is the only 2003169e0da9SNaohiro Aota * free space for a block group. So, we don't need any caching work. 2004169e0da9SNaohiro Aota * btrfs_calc_zone_unusable() will set the amount of free space and 2005169e0da9SNaohiro Aota * zone_unusable space. 2006169e0da9SNaohiro Aota * 2007169e0da9SNaohiro Aota * For regular filesystem, check for two cases, either we are full, and 2008169e0da9SNaohiro Aota * therefore don't need to bother with the caching work since we won't 2009169e0da9SNaohiro Aota * find any space, or we are empty, and we can just add all the space 2010169e0da9SNaohiro Aota * in and be done with it. This saves us _a_lot_ of time, particularly 2011169e0da9SNaohiro Aota * in the full case. 2012ffb9e0f0SQu Wenruo */ 2013169e0da9SNaohiro Aota if (btrfs_is_zoned(info)) { 2014169e0da9SNaohiro Aota btrfs_calc_zone_unusable(cache); 2015169e0da9SNaohiro Aota } else if (cache->length == cache->used) { 2016ffb9e0f0SQu Wenruo cache->last_byte_to_unpin = (u64)-1; 2017ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 2018ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2019ffb9e0f0SQu Wenruo } else if (cache->used == 0) { 2020ffb9e0f0SQu Wenruo cache->last_byte_to_unpin = (u64)-1; 2021ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 20229afc6649SQu Wenruo add_new_free_space(cache, cache->start, 20239afc6649SQu Wenruo cache->start + cache->length); 2024ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 2025ffb9e0f0SQu Wenruo } 2026ffb9e0f0SQu Wenruo 2027ffb9e0f0SQu Wenruo ret = btrfs_add_block_group_cache(info, cache); 2028ffb9e0f0SQu Wenruo if (ret) { 2029ffb9e0f0SQu Wenruo btrfs_remove_free_space_cache(cache); 2030ffb9e0f0SQu Wenruo goto error; 2031ffb9e0f0SQu Wenruo } 2032ffb9e0f0SQu Wenruo trace_btrfs_add_block_group(info, cache, 0); 20339afc6649SQu Wenruo btrfs_update_space_info(info, cache->flags, cache->length, 2034169e0da9SNaohiro Aota cache->used, cache->bytes_super, 2035169e0da9SNaohiro Aota cache->zone_unusable, &space_info); 2036ffb9e0f0SQu Wenruo 2037ffb9e0f0SQu Wenruo cache->space_info = space_info; 2038ffb9e0f0SQu Wenruo 2039ffb9e0f0SQu Wenruo link_block_group(cache); 2040ffb9e0f0SQu Wenruo 2041ffb9e0f0SQu Wenruo set_avail_alloc_bits(info, cache->flags); 2042ffb9e0f0SQu Wenruo if (btrfs_chunk_readonly(info, cache->start)) { 2043ffb9e0f0SQu Wenruo inc_block_group_ro(cache, 1); 2044ffb9e0f0SQu Wenruo } else if (cache->used == 0) { 2045ffb9e0f0SQu Wenruo ASSERT(list_empty(&cache->bg_list)); 20466e80d4f8SDennis Zhou if (btrfs_test_opt(info, DISCARD_ASYNC)) 20476e80d4f8SDennis Zhou btrfs_discard_queue_work(&info->discard_ctl, cache); 20486e80d4f8SDennis Zhou else 2049ffb9e0f0SQu Wenruo btrfs_mark_bg_unused(cache); 2050ffb9e0f0SQu Wenruo } 2051ffb9e0f0SQu Wenruo return 0; 2052ffb9e0f0SQu Wenruo error: 2053ffb9e0f0SQu Wenruo btrfs_put_block_group(cache); 2054ffb9e0f0SQu Wenruo return ret; 2055ffb9e0f0SQu Wenruo } 2056ffb9e0f0SQu Wenruo 205742437a63SJosef Bacik static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 205842437a63SJosef Bacik { 205942437a63SJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 206042437a63SJosef Bacik struct btrfs_space_info *space_info; 206142437a63SJosef Bacik struct rb_node *node; 206242437a63SJosef Bacik int ret = 0; 206342437a63SJosef Bacik 206442437a63SJosef Bacik for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { 206542437a63SJosef Bacik struct extent_map *em; 206642437a63SJosef Bacik struct map_lookup *map; 206742437a63SJosef Bacik struct btrfs_block_group *bg; 206842437a63SJosef Bacik 206942437a63SJosef Bacik em = rb_entry(node, struct extent_map, rb_node); 207042437a63SJosef Bacik map = em->map_lookup; 207142437a63SJosef Bacik bg = btrfs_create_block_group_cache(fs_info, em->start); 207242437a63SJosef Bacik if (!bg) { 207342437a63SJosef Bacik ret = -ENOMEM; 207442437a63SJosef Bacik break; 207542437a63SJosef Bacik } 207642437a63SJosef Bacik 207742437a63SJosef Bacik /* Fill dummy cache as FULL */ 207842437a63SJosef Bacik bg->length = em->len; 207942437a63SJosef Bacik bg->flags = map->type; 208042437a63SJosef Bacik bg->last_byte_to_unpin = (u64)-1; 208142437a63SJosef Bacik bg->cached = BTRFS_CACHE_FINISHED; 208242437a63SJosef Bacik bg->used = em->len; 208342437a63SJosef Bacik bg->flags = map->type; 208442437a63SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, bg); 208542437a63SJosef Bacik if (ret) { 208642437a63SJosef Bacik btrfs_remove_free_space_cache(bg); 208742437a63SJosef Bacik btrfs_put_block_group(bg); 208842437a63SJosef Bacik break; 208942437a63SJosef Bacik } 209042437a63SJosef Bacik btrfs_update_space_info(fs_info, bg->flags, em->len, em->len, 2091169e0da9SNaohiro Aota 0, 0, &space_info); 209242437a63SJosef Bacik bg->space_info = space_info; 209342437a63SJosef Bacik link_block_group(bg); 209442437a63SJosef Bacik 209542437a63SJosef Bacik set_avail_alloc_bits(fs_info, bg->flags); 209642437a63SJosef Bacik } 209742437a63SJosef Bacik if (!ret) 209842437a63SJosef Bacik btrfs_init_global_block_rsv(fs_info); 209942437a63SJosef Bacik return ret; 210042437a63SJosef Bacik } 210142437a63SJosef Bacik 21024358d963SJosef Bacik int btrfs_read_block_groups(struct btrfs_fs_info *info) 21034358d963SJosef Bacik { 21044358d963SJosef Bacik struct btrfs_path *path; 21054358d963SJosef Bacik int ret; 210632da5386SDavid Sterba struct btrfs_block_group *cache; 21074358d963SJosef Bacik struct btrfs_space_info *space_info; 21084358d963SJosef Bacik struct btrfs_key key; 21094358d963SJosef Bacik int need_clear = 0; 21104358d963SJosef Bacik u64 cache_gen; 21114358d963SJosef Bacik 211242437a63SJosef Bacik if (!info->extent_root) 211342437a63SJosef Bacik return fill_dummy_bgs(info); 211442437a63SJosef Bacik 21154358d963SJosef Bacik key.objectid = 0; 21164358d963SJosef Bacik key.offset = 0; 21174358d963SJosef Bacik key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 21184358d963SJosef Bacik path = btrfs_alloc_path(); 21194358d963SJosef Bacik if (!path) 21204358d963SJosef Bacik return -ENOMEM; 21214358d963SJosef Bacik 21224358d963SJosef Bacik cache_gen = btrfs_super_cache_generation(info->super_copy); 21234358d963SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 21244358d963SJosef Bacik btrfs_super_generation(info->super_copy) != cache_gen) 21254358d963SJosef Bacik need_clear = 1; 21264358d963SJosef Bacik if (btrfs_test_opt(info, CLEAR_CACHE)) 21274358d963SJosef Bacik need_clear = 1; 21284358d963SJosef Bacik 21294358d963SJosef Bacik while (1) { 21304afd2fe8SJohannes Thumshirn struct btrfs_block_group_item bgi; 21314afd2fe8SJohannes Thumshirn struct extent_buffer *leaf; 21324afd2fe8SJohannes Thumshirn int slot; 21334afd2fe8SJohannes Thumshirn 21344358d963SJosef Bacik ret = find_first_block_group(info, path, &key); 21354358d963SJosef Bacik if (ret > 0) 21364358d963SJosef Bacik break; 21374358d963SJosef Bacik if (ret != 0) 21384358d963SJosef Bacik goto error; 21394358d963SJosef Bacik 21404afd2fe8SJohannes Thumshirn leaf = path->nodes[0]; 21414afd2fe8SJohannes Thumshirn slot = path->slots[0]; 21424afd2fe8SJohannes Thumshirn 21434afd2fe8SJohannes Thumshirn read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 21444afd2fe8SJohannes Thumshirn sizeof(bgi)); 21454afd2fe8SJohannes Thumshirn 21464afd2fe8SJohannes Thumshirn btrfs_item_key_to_cpu(leaf, &key, slot); 21474afd2fe8SJohannes Thumshirn btrfs_release_path(path); 21484afd2fe8SJohannes Thumshirn ret = read_one_block_group(info, &bgi, &key, need_clear); 2149ffb9e0f0SQu Wenruo if (ret < 0) 21504358d963SJosef Bacik goto error; 2151ffb9e0f0SQu Wenruo key.objectid += key.offset; 2152ffb9e0f0SQu Wenruo key.offset = 0; 21534358d963SJosef Bacik } 21547837fa88SJosef Bacik btrfs_release_path(path); 21554358d963SJosef Bacik 215672804905SJosef Bacik list_for_each_entry(space_info, &info->space_info, list) { 215749ea112dSJosef Bacik int i; 215849ea112dSJosef Bacik 215949ea112dSJosef Bacik for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 216049ea112dSJosef Bacik if (list_empty(&space_info->block_groups[i])) 216149ea112dSJosef Bacik continue; 216249ea112dSJosef Bacik cache = list_first_entry(&space_info->block_groups[i], 216349ea112dSJosef Bacik struct btrfs_block_group, 216449ea112dSJosef Bacik list); 216549ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(cache); 216649ea112dSJosef Bacik } 216749ea112dSJosef Bacik 21684358d963SJosef Bacik if (!(btrfs_get_alloc_profile(info, space_info->flags) & 21694358d963SJosef Bacik (BTRFS_BLOCK_GROUP_RAID10 | 21704358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID1_MASK | 21714358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID56_MASK | 21724358d963SJosef Bacik BTRFS_BLOCK_GROUP_DUP))) 21734358d963SJosef Bacik continue; 21744358d963SJosef Bacik /* 21754358d963SJosef Bacik * Avoid allocating from un-mirrored block group if there are 21764358d963SJosef Bacik * mirrored block groups. 21774358d963SJosef Bacik */ 21784358d963SJosef Bacik list_for_each_entry(cache, 21794358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_RAID0], 21804358d963SJosef Bacik list) 2181e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 21824358d963SJosef Bacik list_for_each_entry(cache, 21834358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_SINGLE], 21844358d963SJosef Bacik list) 2185e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 21864358d963SJosef Bacik } 21874358d963SJosef Bacik 21884358d963SJosef Bacik btrfs_init_global_block_rsv(info); 21894358d963SJosef Bacik ret = check_chunk_block_group_mappings(info); 21904358d963SJosef Bacik error: 21914358d963SJosef Bacik btrfs_free_path(path); 21924358d963SJosef Bacik return ret; 21934358d963SJosef Bacik } 21944358d963SJosef Bacik 219597f4728aSQu Wenruo static int insert_block_group_item(struct btrfs_trans_handle *trans, 219697f4728aSQu Wenruo struct btrfs_block_group *block_group) 219797f4728aSQu Wenruo { 219897f4728aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 219997f4728aSQu Wenruo struct btrfs_block_group_item bgi; 220097f4728aSQu Wenruo struct btrfs_root *root; 220197f4728aSQu Wenruo struct btrfs_key key; 220297f4728aSQu Wenruo 220397f4728aSQu Wenruo spin_lock(&block_group->lock); 220497f4728aSQu Wenruo btrfs_set_stack_block_group_used(&bgi, block_group->used); 220597f4728aSQu Wenruo btrfs_set_stack_block_group_chunk_objectid(&bgi, 220697f4728aSQu Wenruo BTRFS_FIRST_CHUNK_TREE_OBJECTID); 220797f4728aSQu Wenruo btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 220897f4728aSQu Wenruo key.objectid = block_group->start; 220997f4728aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 221097f4728aSQu Wenruo key.offset = block_group->length; 221197f4728aSQu Wenruo spin_unlock(&block_group->lock); 221297f4728aSQu Wenruo 221397f4728aSQu Wenruo root = fs_info->extent_root; 221497f4728aSQu Wenruo return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 221597f4728aSQu Wenruo } 221697f4728aSQu Wenruo 22174358d963SJosef Bacik void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 22184358d963SJosef Bacik { 22194358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 222032da5386SDavid Sterba struct btrfs_block_group *block_group; 22214358d963SJosef Bacik int ret = 0; 22224358d963SJosef Bacik 22234358d963SJosef Bacik if (!trans->can_flush_pending_bgs) 22244358d963SJosef Bacik return; 22254358d963SJosef Bacik 22264358d963SJosef Bacik while (!list_empty(&trans->new_bgs)) { 222749ea112dSJosef Bacik int index; 222849ea112dSJosef Bacik 22294358d963SJosef Bacik block_group = list_first_entry(&trans->new_bgs, 223032da5386SDavid Sterba struct btrfs_block_group, 22314358d963SJosef Bacik bg_list); 22324358d963SJosef Bacik if (ret) 22334358d963SJosef Bacik goto next; 22344358d963SJosef Bacik 223549ea112dSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 223649ea112dSJosef Bacik 223797f4728aSQu Wenruo ret = insert_block_group_item(trans, block_group); 22384358d963SJosef Bacik if (ret) 22394358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 224097f4728aSQu Wenruo ret = btrfs_finish_chunk_alloc(trans, block_group->start, 224197f4728aSQu Wenruo block_group->length); 22424358d963SJosef Bacik if (ret) 22434358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 22444358d963SJosef Bacik add_block_group_free_space(trans, block_group); 224549ea112dSJosef Bacik 224649ea112dSJosef Bacik /* 224749ea112dSJosef Bacik * If we restriped during balance, we may have added a new raid 224849ea112dSJosef Bacik * type, so now add the sysfs entries when it is safe to do so. 224949ea112dSJosef Bacik * We don't have to worry about locking here as it's handled in 225049ea112dSJosef Bacik * btrfs_sysfs_add_block_group_type. 225149ea112dSJosef Bacik */ 225249ea112dSJosef Bacik if (block_group->space_info->block_group_kobjs[index] == NULL) 225349ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(block_group); 225449ea112dSJosef Bacik 22554358d963SJosef Bacik /* Already aborted the transaction if it failed. */ 22564358d963SJosef Bacik next: 22574358d963SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 22584358d963SJosef Bacik list_del_init(&block_group->bg_list); 22594358d963SJosef Bacik } 22604358d963SJosef Bacik btrfs_trans_release_chunk_metadata(trans); 22614358d963SJosef Bacik } 22624358d963SJosef Bacik 22634358d963SJosef Bacik int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, 22644358d963SJosef Bacik u64 type, u64 chunk_offset, u64 size) 22654358d963SJosef Bacik { 22664358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 226732da5386SDavid Sterba struct btrfs_block_group *cache; 22684358d963SJosef Bacik int ret; 22694358d963SJosef Bacik 22704358d963SJosef Bacik btrfs_set_log_full_commit(trans); 22714358d963SJosef Bacik 22729afc6649SQu Wenruo cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 22734358d963SJosef Bacik if (!cache) 22744358d963SJosef Bacik return -ENOMEM; 22754358d963SJosef Bacik 22769afc6649SQu Wenruo cache->length = size; 2277e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 2278bf38be65SDavid Sterba cache->used = bytes_used; 22794358d963SJosef Bacik cache->flags = type; 22804358d963SJosef Bacik cache->last_byte_to_unpin = (u64)-1; 22814358d963SJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 2282997e3e2eSBoris Burkov if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 22834358d963SJosef Bacik cache->needs_free_space = 1; 228408e11a3dSNaohiro Aota 2285a94794d5SNaohiro Aota ret = btrfs_load_block_group_zone_info(cache, true); 228608e11a3dSNaohiro Aota if (ret) { 228708e11a3dSNaohiro Aota btrfs_put_block_group(cache); 228808e11a3dSNaohiro Aota return ret; 228908e11a3dSNaohiro Aota } 229008e11a3dSNaohiro Aota 22914358d963SJosef Bacik ret = exclude_super_stripes(cache); 22924358d963SJosef Bacik if (ret) { 22934358d963SJosef Bacik /* We may have excluded something, so call this just in case */ 22944358d963SJosef Bacik btrfs_free_excluded_extents(cache); 22954358d963SJosef Bacik btrfs_put_block_group(cache); 22964358d963SJosef Bacik return ret; 22974358d963SJosef Bacik } 22984358d963SJosef Bacik 22994358d963SJosef Bacik add_new_free_space(cache, chunk_offset, chunk_offset + size); 23004358d963SJosef Bacik 23014358d963SJosef Bacik btrfs_free_excluded_extents(cache); 23024358d963SJosef Bacik 23034358d963SJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 23044358d963SJosef Bacik if (btrfs_should_fragment_free_space(cache)) { 23054358d963SJosef Bacik u64 new_bytes_used = size - bytes_used; 23064358d963SJosef Bacik 23074358d963SJosef Bacik bytes_used += new_bytes_used >> 1; 2308e11c0406SJosef Bacik fragment_free_space(cache); 23094358d963SJosef Bacik } 23104358d963SJosef Bacik #endif 23114358d963SJosef Bacik /* 23124358d963SJosef Bacik * Ensure the corresponding space_info object is created and 23134358d963SJosef Bacik * assigned to our block group. We want our bg to be added to the rbtree 23144358d963SJosef Bacik * with its ->space_info set. 23154358d963SJosef Bacik */ 23164358d963SJosef Bacik cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 23174358d963SJosef Bacik ASSERT(cache->space_info); 23184358d963SJosef Bacik 23194358d963SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, cache); 23204358d963SJosef Bacik if (ret) { 23214358d963SJosef Bacik btrfs_remove_free_space_cache(cache); 23224358d963SJosef Bacik btrfs_put_block_group(cache); 23234358d963SJosef Bacik return ret; 23244358d963SJosef Bacik } 23254358d963SJosef Bacik 23264358d963SJosef Bacik /* 23274358d963SJosef Bacik * Now that our block group has its ->space_info set and is inserted in 23284358d963SJosef Bacik * the rbtree, update the space info's counters. 23294358d963SJosef Bacik */ 23304358d963SJosef Bacik trace_btrfs_add_block_group(fs_info, cache, 1); 23314358d963SJosef Bacik btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, 2332169e0da9SNaohiro Aota cache->bytes_super, 0, &cache->space_info); 23334358d963SJosef Bacik btrfs_update_global_block_rsv(fs_info); 23344358d963SJosef Bacik 23354358d963SJosef Bacik link_block_group(cache); 23364358d963SJosef Bacik 23374358d963SJosef Bacik list_add_tail(&cache->bg_list, &trans->new_bgs); 23384358d963SJosef Bacik trans->delayed_ref_updates++; 23394358d963SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 23404358d963SJosef Bacik 23414358d963SJosef Bacik set_avail_alloc_bits(fs_info, type); 23424358d963SJosef Bacik return 0; 23434358d963SJosef Bacik } 234426ce2095SJosef Bacik 2345b12de528SQu Wenruo /* 2346b12de528SQu Wenruo * Mark one block group RO, can be called several times for the same block 2347b12de528SQu Wenruo * group. 2348b12de528SQu Wenruo * 2349b12de528SQu Wenruo * @cache: the destination block group 2350b12de528SQu Wenruo * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2351b12de528SQu Wenruo * ensure we still have some free space after marking this 2352b12de528SQu Wenruo * block group RO. 2353b12de528SQu Wenruo */ 2354b12de528SQu Wenruo int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2355b12de528SQu Wenruo bool do_chunk_alloc) 235626ce2095SJosef Bacik { 235726ce2095SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 235826ce2095SJosef Bacik struct btrfs_trans_handle *trans; 235926ce2095SJosef Bacik u64 alloc_flags; 236026ce2095SJosef Bacik int ret; 2361b6e9f16cSNikolay Borisov bool dirty_bg_running; 236226ce2095SJosef Bacik 2363b6e9f16cSNikolay Borisov do { 236426ce2095SJosef Bacik trans = btrfs_join_transaction(fs_info->extent_root); 236526ce2095SJosef Bacik if (IS_ERR(trans)) 236626ce2095SJosef Bacik return PTR_ERR(trans); 236726ce2095SJosef Bacik 2368b6e9f16cSNikolay Borisov dirty_bg_running = false; 2369b6e9f16cSNikolay Borisov 237026ce2095SJosef Bacik /* 2371b6e9f16cSNikolay Borisov * We're not allowed to set block groups readonly after the dirty 2372b6e9f16cSNikolay Borisov * block group cache has started writing. If it already started, 2373b6e9f16cSNikolay Borisov * back off and let this transaction commit. 237426ce2095SJosef Bacik */ 237526ce2095SJosef Bacik mutex_lock(&fs_info->ro_block_group_mutex); 237626ce2095SJosef Bacik if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 237726ce2095SJosef Bacik u64 transid = trans->transid; 237826ce2095SJosef Bacik 237926ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 238026ce2095SJosef Bacik btrfs_end_transaction(trans); 238126ce2095SJosef Bacik 238226ce2095SJosef Bacik ret = btrfs_wait_for_commit(fs_info, transid); 238326ce2095SJosef Bacik if (ret) 238426ce2095SJosef Bacik return ret; 2385b6e9f16cSNikolay Borisov dirty_bg_running = true; 238626ce2095SJosef Bacik } 2387b6e9f16cSNikolay Borisov } while (dirty_bg_running); 238826ce2095SJosef Bacik 2389b12de528SQu Wenruo if (do_chunk_alloc) { 239026ce2095SJosef Bacik /* 2391b12de528SQu Wenruo * If we are changing raid levels, try to allocate a 2392b12de528SQu Wenruo * corresponding block group with the new raid level. 239326ce2095SJosef Bacik */ 2394349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 239526ce2095SJosef Bacik if (alloc_flags != cache->flags) { 2396b12de528SQu Wenruo ret = btrfs_chunk_alloc(trans, alloc_flags, 2397b12de528SQu Wenruo CHUNK_ALLOC_FORCE); 239826ce2095SJosef Bacik /* 239926ce2095SJosef Bacik * ENOSPC is allowed here, we may have enough space 2400b12de528SQu Wenruo * already allocated at the new raid level to carry on 240126ce2095SJosef Bacik */ 240226ce2095SJosef Bacik if (ret == -ENOSPC) 240326ce2095SJosef Bacik ret = 0; 240426ce2095SJosef Bacik if (ret < 0) 240526ce2095SJosef Bacik goto out; 240626ce2095SJosef Bacik } 2407b12de528SQu Wenruo } 240826ce2095SJosef Bacik 2409a7a63accSJosef Bacik ret = inc_block_group_ro(cache, 0); 2410195a49eaSFilipe Manana if (!do_chunk_alloc || ret == -ETXTBSY) 2411b12de528SQu Wenruo goto unlock_out; 241226ce2095SJosef Bacik if (!ret) 241326ce2095SJosef Bacik goto out; 241426ce2095SJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 241526ce2095SJosef Bacik ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 241626ce2095SJosef Bacik if (ret < 0) 241726ce2095SJosef Bacik goto out; 2418e11c0406SJosef Bacik ret = inc_block_group_ro(cache, 0); 2419195a49eaSFilipe Manana if (ret == -ETXTBSY) 2420195a49eaSFilipe Manana goto unlock_out; 242126ce2095SJosef Bacik out: 242226ce2095SJosef Bacik if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 2423349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 242426ce2095SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 242526ce2095SJosef Bacik check_system_chunk(trans, alloc_flags); 242626ce2095SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 242726ce2095SJosef Bacik } 2428b12de528SQu Wenruo unlock_out: 242926ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 243026ce2095SJosef Bacik 243126ce2095SJosef Bacik btrfs_end_transaction(trans); 243226ce2095SJosef Bacik return ret; 243326ce2095SJosef Bacik } 243426ce2095SJosef Bacik 243532da5386SDavid Sterba void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 243626ce2095SJosef Bacik { 243726ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 243826ce2095SJosef Bacik u64 num_bytes; 243926ce2095SJosef Bacik 244026ce2095SJosef Bacik BUG_ON(!cache->ro); 244126ce2095SJosef Bacik 244226ce2095SJosef Bacik spin_lock(&sinfo->lock); 244326ce2095SJosef Bacik spin_lock(&cache->lock); 244426ce2095SJosef Bacik if (!--cache->ro) { 2445169e0da9SNaohiro Aota if (btrfs_is_zoned(cache->fs_info)) { 2446169e0da9SNaohiro Aota /* Migrate zone_unusable bytes back */ 2447169e0da9SNaohiro Aota cache->zone_unusable = cache->alloc_offset - cache->used; 2448169e0da9SNaohiro Aota sinfo->bytes_zone_unusable += cache->zone_unusable; 2449169e0da9SNaohiro Aota sinfo->bytes_readonly -= cache->zone_unusable; 2450169e0da9SNaohiro Aota } 2451*f9f28e5bSNaohiro Aota num_bytes = cache->length - cache->reserved - 2452*f9f28e5bSNaohiro Aota cache->pinned - cache->bytes_super - 2453*f9f28e5bSNaohiro Aota cache->zone_unusable - cache->used; 2454*f9f28e5bSNaohiro Aota sinfo->bytes_readonly -= num_bytes; 245526ce2095SJosef Bacik list_del_init(&cache->ro_list); 245626ce2095SJosef Bacik } 245726ce2095SJosef Bacik spin_unlock(&cache->lock); 245826ce2095SJosef Bacik spin_unlock(&sinfo->lock); 245926ce2095SJosef Bacik } 246077745c05SJosef Bacik 24613be4d8efSQu Wenruo static int update_block_group_item(struct btrfs_trans_handle *trans, 246277745c05SJosef Bacik struct btrfs_path *path, 246332da5386SDavid Sterba struct btrfs_block_group *cache) 246477745c05SJosef Bacik { 246577745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 246677745c05SJosef Bacik int ret; 24673be4d8efSQu Wenruo struct btrfs_root *root = fs_info->extent_root; 246877745c05SJosef Bacik unsigned long bi; 246977745c05SJosef Bacik struct extent_buffer *leaf; 2470bf38be65SDavid Sterba struct btrfs_block_group_item bgi; 2471b3470b5dSDavid Sterba struct btrfs_key key; 247277745c05SJosef Bacik 2473b3470b5dSDavid Sterba key.objectid = cache->start; 2474b3470b5dSDavid Sterba key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2475b3470b5dSDavid Sterba key.offset = cache->length; 2476b3470b5dSDavid Sterba 24773be4d8efSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 247877745c05SJosef Bacik if (ret) { 247977745c05SJosef Bacik if (ret > 0) 248077745c05SJosef Bacik ret = -ENOENT; 248177745c05SJosef Bacik goto fail; 248277745c05SJosef Bacik } 248377745c05SJosef Bacik 248477745c05SJosef Bacik leaf = path->nodes[0]; 248577745c05SJosef Bacik bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2486de0dc456SDavid Sterba btrfs_set_stack_block_group_used(&bgi, cache->used); 2487de0dc456SDavid Sterba btrfs_set_stack_block_group_chunk_objectid(&bgi, 24883d976388SDavid Sterba BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2489de0dc456SDavid Sterba btrfs_set_stack_block_group_flags(&bgi, cache->flags); 2490bf38be65SDavid Sterba write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 249177745c05SJosef Bacik btrfs_mark_buffer_dirty(leaf); 249277745c05SJosef Bacik fail: 249377745c05SJosef Bacik btrfs_release_path(path); 249477745c05SJosef Bacik return ret; 249577745c05SJosef Bacik 249677745c05SJosef Bacik } 249777745c05SJosef Bacik 249832da5386SDavid Sterba static int cache_save_setup(struct btrfs_block_group *block_group, 249977745c05SJosef Bacik struct btrfs_trans_handle *trans, 250077745c05SJosef Bacik struct btrfs_path *path) 250177745c05SJosef Bacik { 250277745c05SJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 250377745c05SJosef Bacik struct btrfs_root *root = fs_info->tree_root; 250477745c05SJosef Bacik struct inode *inode = NULL; 250577745c05SJosef Bacik struct extent_changeset *data_reserved = NULL; 250677745c05SJosef Bacik u64 alloc_hint = 0; 250777745c05SJosef Bacik int dcs = BTRFS_DC_ERROR; 250877745c05SJosef Bacik u64 num_pages = 0; 250977745c05SJosef Bacik int retries = 0; 251077745c05SJosef Bacik int ret = 0; 251177745c05SJosef Bacik 2512af456a2cSBoris Burkov if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 2513af456a2cSBoris Burkov return 0; 2514af456a2cSBoris Burkov 251577745c05SJosef Bacik /* 251677745c05SJosef Bacik * If this block group is smaller than 100 megs don't bother caching the 251777745c05SJosef Bacik * block group. 251877745c05SJosef Bacik */ 2519b3470b5dSDavid Sterba if (block_group->length < (100 * SZ_1M)) { 252077745c05SJosef Bacik spin_lock(&block_group->lock); 252177745c05SJosef Bacik block_group->disk_cache_state = BTRFS_DC_WRITTEN; 252277745c05SJosef Bacik spin_unlock(&block_group->lock); 252377745c05SJosef Bacik return 0; 252477745c05SJosef Bacik } 252577745c05SJosef Bacik 2526bf31f87fSDavid Sterba if (TRANS_ABORTED(trans)) 252777745c05SJosef Bacik return 0; 252877745c05SJosef Bacik again: 252977745c05SJosef Bacik inode = lookup_free_space_inode(block_group, path); 253077745c05SJosef Bacik if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 253177745c05SJosef Bacik ret = PTR_ERR(inode); 253277745c05SJosef Bacik btrfs_release_path(path); 253377745c05SJosef Bacik goto out; 253477745c05SJosef Bacik } 253577745c05SJosef Bacik 253677745c05SJosef Bacik if (IS_ERR(inode)) { 253777745c05SJosef Bacik BUG_ON(retries); 253877745c05SJosef Bacik retries++; 253977745c05SJosef Bacik 254077745c05SJosef Bacik if (block_group->ro) 254177745c05SJosef Bacik goto out_free; 254277745c05SJosef Bacik 254377745c05SJosef Bacik ret = create_free_space_inode(trans, block_group, path); 254477745c05SJosef Bacik if (ret) 254577745c05SJosef Bacik goto out_free; 254677745c05SJosef Bacik goto again; 254777745c05SJosef Bacik } 254877745c05SJosef Bacik 254977745c05SJosef Bacik /* 255077745c05SJosef Bacik * We want to set the generation to 0, that way if anything goes wrong 255177745c05SJosef Bacik * from here on out we know not to trust this cache when we load up next 255277745c05SJosef Bacik * time. 255377745c05SJosef Bacik */ 255477745c05SJosef Bacik BTRFS_I(inode)->generation = 0; 25559a56fcd1SNikolay Borisov ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 255677745c05SJosef Bacik if (ret) { 255777745c05SJosef Bacik /* 255877745c05SJosef Bacik * So theoretically we could recover from this, simply set the 255977745c05SJosef Bacik * super cache generation to 0 so we know to invalidate the 256077745c05SJosef Bacik * cache, but then we'd have to keep track of the block groups 256177745c05SJosef Bacik * that fail this way so we know we _have_ to reset this cache 256277745c05SJosef Bacik * before the next commit or risk reading stale cache. So to 256377745c05SJosef Bacik * limit our exposure to horrible edge cases lets just abort the 256477745c05SJosef Bacik * transaction, this only happens in really bad situations 256577745c05SJosef Bacik * anyway. 256677745c05SJosef Bacik */ 256777745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 256877745c05SJosef Bacik goto out_put; 256977745c05SJosef Bacik } 257077745c05SJosef Bacik WARN_ON(ret); 257177745c05SJosef Bacik 257277745c05SJosef Bacik /* We've already setup this transaction, go ahead and exit */ 257377745c05SJosef Bacik if (block_group->cache_generation == trans->transid && 257477745c05SJosef Bacik i_size_read(inode)) { 257577745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 257677745c05SJosef Bacik goto out_put; 257777745c05SJosef Bacik } 257877745c05SJosef Bacik 257977745c05SJosef Bacik if (i_size_read(inode) > 0) { 258077745c05SJosef Bacik ret = btrfs_check_trunc_cache_free_space(fs_info, 258177745c05SJosef Bacik &fs_info->global_block_rsv); 258277745c05SJosef Bacik if (ret) 258377745c05SJosef Bacik goto out_put; 258477745c05SJosef Bacik 258577745c05SJosef Bacik ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 258677745c05SJosef Bacik if (ret) 258777745c05SJosef Bacik goto out_put; 258877745c05SJosef Bacik } 258977745c05SJosef Bacik 259077745c05SJosef Bacik spin_lock(&block_group->lock); 259177745c05SJosef Bacik if (block_group->cached != BTRFS_CACHE_FINISHED || 259277745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) { 259377745c05SJosef Bacik /* 259477745c05SJosef Bacik * don't bother trying to write stuff out _if_ 259577745c05SJosef Bacik * a) we're not cached, 259677745c05SJosef Bacik * b) we're with nospace_cache mount option, 259777745c05SJosef Bacik * c) we're with v2 space_cache (FREE_SPACE_TREE). 259877745c05SJosef Bacik */ 259977745c05SJosef Bacik dcs = BTRFS_DC_WRITTEN; 260077745c05SJosef Bacik spin_unlock(&block_group->lock); 260177745c05SJosef Bacik goto out_put; 260277745c05SJosef Bacik } 260377745c05SJosef Bacik spin_unlock(&block_group->lock); 260477745c05SJosef Bacik 260577745c05SJosef Bacik /* 260677745c05SJosef Bacik * We hit an ENOSPC when setting up the cache in this transaction, just 260777745c05SJosef Bacik * skip doing the setup, we've already cleared the cache so we're safe. 260877745c05SJosef Bacik */ 260977745c05SJosef Bacik if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 261077745c05SJosef Bacik ret = -ENOSPC; 261177745c05SJosef Bacik goto out_put; 261277745c05SJosef Bacik } 261377745c05SJosef Bacik 261477745c05SJosef Bacik /* 261577745c05SJosef Bacik * Try to preallocate enough space based on how big the block group is. 261677745c05SJosef Bacik * Keep in mind this has to include any pinned space which could end up 261777745c05SJosef Bacik * taking up quite a bit since it's not folded into the other space 261877745c05SJosef Bacik * cache. 261977745c05SJosef Bacik */ 2620b3470b5dSDavid Sterba num_pages = div_u64(block_group->length, SZ_256M); 262177745c05SJosef Bacik if (!num_pages) 262277745c05SJosef Bacik num_pages = 1; 262377745c05SJosef Bacik 262477745c05SJosef Bacik num_pages *= 16; 262577745c05SJosef Bacik num_pages *= PAGE_SIZE; 262677745c05SJosef Bacik 262736ea6f3eSNikolay Borisov ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 262836ea6f3eSNikolay Borisov num_pages); 262977745c05SJosef Bacik if (ret) 263077745c05SJosef Bacik goto out_put; 263177745c05SJosef Bacik 263277745c05SJosef Bacik ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 263377745c05SJosef Bacik num_pages, num_pages, 263477745c05SJosef Bacik &alloc_hint); 263577745c05SJosef Bacik /* 263677745c05SJosef Bacik * Our cache requires contiguous chunks so that we don't modify a bunch 263777745c05SJosef Bacik * of metadata or split extents when writing the cache out, which means 263877745c05SJosef Bacik * we can enospc if we are heavily fragmented in addition to just normal 263977745c05SJosef Bacik * out of space conditions. So if we hit this just skip setting up any 264077745c05SJosef Bacik * other block groups for this transaction, maybe we'll unpin enough 264177745c05SJosef Bacik * space the next time around. 264277745c05SJosef Bacik */ 264377745c05SJosef Bacik if (!ret) 264477745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 264577745c05SJosef Bacik else if (ret == -ENOSPC) 264677745c05SJosef Bacik set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 264777745c05SJosef Bacik 264877745c05SJosef Bacik out_put: 264977745c05SJosef Bacik iput(inode); 265077745c05SJosef Bacik out_free: 265177745c05SJosef Bacik btrfs_release_path(path); 265277745c05SJosef Bacik out: 265377745c05SJosef Bacik spin_lock(&block_group->lock); 265477745c05SJosef Bacik if (!ret && dcs == BTRFS_DC_SETUP) 265577745c05SJosef Bacik block_group->cache_generation = trans->transid; 265677745c05SJosef Bacik block_group->disk_cache_state = dcs; 265777745c05SJosef Bacik spin_unlock(&block_group->lock); 265877745c05SJosef Bacik 265977745c05SJosef Bacik extent_changeset_free(data_reserved); 266077745c05SJosef Bacik return ret; 266177745c05SJosef Bacik } 266277745c05SJosef Bacik 266377745c05SJosef Bacik int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 266477745c05SJosef Bacik { 266577745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 266632da5386SDavid Sterba struct btrfs_block_group *cache, *tmp; 266777745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 266877745c05SJosef Bacik struct btrfs_path *path; 266977745c05SJosef Bacik 267077745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs) || 267177745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) 267277745c05SJosef Bacik return 0; 267377745c05SJosef Bacik 267477745c05SJosef Bacik path = btrfs_alloc_path(); 267577745c05SJosef Bacik if (!path) 267677745c05SJosef Bacik return -ENOMEM; 267777745c05SJosef Bacik 267877745c05SJosef Bacik /* Could add new block groups, use _safe just in case */ 267977745c05SJosef Bacik list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 268077745c05SJosef Bacik dirty_list) { 268177745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_CLEAR) 268277745c05SJosef Bacik cache_save_setup(cache, trans, path); 268377745c05SJosef Bacik } 268477745c05SJosef Bacik 268577745c05SJosef Bacik btrfs_free_path(path); 268677745c05SJosef Bacik return 0; 268777745c05SJosef Bacik } 268877745c05SJosef Bacik 268977745c05SJosef Bacik /* 269077745c05SJosef Bacik * Transaction commit does final block group cache writeback during a critical 269177745c05SJosef Bacik * section where nothing is allowed to change the FS. This is required in 269277745c05SJosef Bacik * order for the cache to actually match the block group, but can introduce a 269377745c05SJosef Bacik * lot of latency into the commit. 269477745c05SJosef Bacik * 269577745c05SJosef Bacik * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 269677745c05SJosef Bacik * There's a chance we'll have to redo some of it if the block group changes 269777745c05SJosef Bacik * again during the commit, but it greatly reduces the commit latency by 269877745c05SJosef Bacik * getting rid of the easy block groups while we're still allowing others to 269977745c05SJosef Bacik * join the commit. 270077745c05SJosef Bacik */ 270177745c05SJosef Bacik int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 270277745c05SJosef Bacik { 270377745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 270432da5386SDavid Sterba struct btrfs_block_group *cache; 270577745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 270677745c05SJosef Bacik int ret = 0; 270777745c05SJosef Bacik int should_put; 270877745c05SJosef Bacik struct btrfs_path *path = NULL; 270977745c05SJosef Bacik LIST_HEAD(dirty); 271077745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 271177745c05SJosef Bacik int num_started = 0; 271277745c05SJosef Bacik int loops = 0; 271377745c05SJosef Bacik 271477745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 271577745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs)) { 271677745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 271777745c05SJosef Bacik return 0; 271877745c05SJosef Bacik } 271977745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 272077745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 272177745c05SJosef Bacik 272277745c05SJosef Bacik again: 272377745c05SJosef Bacik /* Make sure all the block groups on our dirty list actually exist */ 272477745c05SJosef Bacik btrfs_create_pending_block_groups(trans); 272577745c05SJosef Bacik 272677745c05SJosef Bacik if (!path) { 272777745c05SJosef Bacik path = btrfs_alloc_path(); 2728938fcbfbSJosef Bacik if (!path) { 2729938fcbfbSJosef Bacik ret = -ENOMEM; 2730938fcbfbSJosef Bacik goto out; 2731938fcbfbSJosef Bacik } 273277745c05SJosef Bacik } 273377745c05SJosef Bacik 273477745c05SJosef Bacik /* 273577745c05SJosef Bacik * cache_write_mutex is here only to save us from balance or automatic 273677745c05SJosef Bacik * removal of empty block groups deleting this block group while we are 273777745c05SJosef Bacik * writing out the cache 273877745c05SJosef Bacik */ 273977745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 274077745c05SJosef Bacik while (!list_empty(&dirty)) { 274177745c05SJosef Bacik bool drop_reserve = true; 274277745c05SJosef Bacik 274332da5386SDavid Sterba cache = list_first_entry(&dirty, struct btrfs_block_group, 274477745c05SJosef Bacik dirty_list); 274577745c05SJosef Bacik /* 274677745c05SJosef Bacik * This can happen if something re-dirties a block group that 274777745c05SJosef Bacik * is already under IO. Just wait for it to finish and then do 274877745c05SJosef Bacik * it all again 274977745c05SJosef Bacik */ 275077745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 275177745c05SJosef Bacik list_del_init(&cache->io_list); 275277745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 275377745c05SJosef Bacik btrfs_put_block_group(cache); 275477745c05SJosef Bacik } 275577745c05SJosef Bacik 275677745c05SJosef Bacik 275777745c05SJosef Bacik /* 275877745c05SJosef Bacik * btrfs_wait_cache_io uses the cache->dirty_list to decide if 275977745c05SJosef Bacik * it should update the cache_state. Don't delete until after 276077745c05SJosef Bacik * we wait. 276177745c05SJosef Bacik * 276277745c05SJosef Bacik * Since we're not running in the commit critical section 276377745c05SJosef Bacik * we need the dirty_bgs_lock to protect from update_block_group 276477745c05SJosef Bacik */ 276577745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 276677745c05SJosef Bacik list_del_init(&cache->dirty_list); 276777745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 276877745c05SJosef Bacik 276977745c05SJosef Bacik should_put = 1; 277077745c05SJosef Bacik 277177745c05SJosef Bacik cache_save_setup(cache, trans, path); 277277745c05SJosef Bacik 277377745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_SETUP) { 277477745c05SJosef Bacik cache->io_ctl.inode = NULL; 277577745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 277677745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 277777745c05SJosef Bacik num_started++; 277877745c05SJosef Bacik should_put = 0; 277977745c05SJosef Bacik 278077745c05SJosef Bacik /* 278177745c05SJosef Bacik * The cache_write_mutex is protecting the 278277745c05SJosef Bacik * io_list, also refer to the definition of 278377745c05SJosef Bacik * btrfs_transaction::io_bgs for more details 278477745c05SJosef Bacik */ 278577745c05SJosef Bacik list_add_tail(&cache->io_list, io); 278677745c05SJosef Bacik } else { 278777745c05SJosef Bacik /* 278877745c05SJosef Bacik * If we failed to write the cache, the 278977745c05SJosef Bacik * generation will be bad and life goes on 279077745c05SJosef Bacik */ 279177745c05SJosef Bacik ret = 0; 279277745c05SJosef Bacik } 279377745c05SJosef Bacik } 279477745c05SJosef Bacik if (!ret) { 27953be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 279677745c05SJosef Bacik /* 279777745c05SJosef Bacik * Our block group might still be attached to the list 279877745c05SJosef Bacik * of new block groups in the transaction handle of some 279977745c05SJosef Bacik * other task (struct btrfs_trans_handle->new_bgs). This 280077745c05SJosef Bacik * means its block group item isn't yet in the extent 280177745c05SJosef Bacik * tree. If this happens ignore the error, as we will 280277745c05SJosef Bacik * try again later in the critical section of the 280377745c05SJosef Bacik * transaction commit. 280477745c05SJosef Bacik */ 280577745c05SJosef Bacik if (ret == -ENOENT) { 280677745c05SJosef Bacik ret = 0; 280777745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 280877745c05SJosef Bacik if (list_empty(&cache->dirty_list)) { 280977745c05SJosef Bacik list_add_tail(&cache->dirty_list, 281077745c05SJosef Bacik &cur_trans->dirty_bgs); 281177745c05SJosef Bacik btrfs_get_block_group(cache); 281277745c05SJosef Bacik drop_reserve = false; 281377745c05SJosef Bacik } 281477745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 281577745c05SJosef Bacik } else if (ret) { 281677745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 281777745c05SJosef Bacik } 281877745c05SJosef Bacik } 281977745c05SJosef Bacik 282077745c05SJosef Bacik /* If it's not on the io list, we need to put the block group */ 282177745c05SJosef Bacik if (should_put) 282277745c05SJosef Bacik btrfs_put_block_group(cache); 282377745c05SJosef Bacik if (drop_reserve) 282477745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 282577745c05SJosef Bacik /* 282677745c05SJosef Bacik * Avoid blocking other tasks for too long. It might even save 282777745c05SJosef Bacik * us from writing caches for block groups that are going to be 282877745c05SJosef Bacik * removed. 282977745c05SJosef Bacik */ 283077745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 2831938fcbfbSJosef Bacik if (ret) 2832938fcbfbSJosef Bacik goto out; 283377745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 283477745c05SJosef Bacik } 283577745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 283677745c05SJosef Bacik 283777745c05SJosef Bacik /* 283877745c05SJosef Bacik * Go through delayed refs for all the stuff we've just kicked off 283977745c05SJosef Bacik * and then loop back (just once) 284077745c05SJosef Bacik */ 284134d1eb0eSJosef Bacik if (!ret) 284277745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 0); 284377745c05SJosef Bacik if (!ret && loops == 0) { 284477745c05SJosef Bacik loops++; 284577745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 284677745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 284777745c05SJosef Bacik /* 284877745c05SJosef Bacik * dirty_bgs_lock protects us from concurrent block group 284977745c05SJosef Bacik * deletes too (not just cache_write_mutex). 285077745c05SJosef Bacik */ 285177745c05SJosef Bacik if (!list_empty(&dirty)) { 285277745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 285377745c05SJosef Bacik goto again; 285477745c05SJosef Bacik } 285577745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 2856938fcbfbSJosef Bacik } 2857938fcbfbSJosef Bacik out: 2858938fcbfbSJosef Bacik if (ret < 0) { 2859938fcbfbSJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 2860938fcbfbSJosef Bacik list_splice_init(&dirty, &cur_trans->dirty_bgs); 2861938fcbfbSJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 286277745c05SJosef Bacik btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 286377745c05SJosef Bacik } 286477745c05SJosef Bacik 286577745c05SJosef Bacik btrfs_free_path(path); 286677745c05SJosef Bacik return ret; 286777745c05SJosef Bacik } 286877745c05SJosef Bacik 286977745c05SJosef Bacik int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 287077745c05SJosef Bacik { 287177745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 287232da5386SDavid Sterba struct btrfs_block_group *cache; 287377745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 287477745c05SJosef Bacik int ret = 0; 287577745c05SJosef Bacik int should_put; 287677745c05SJosef Bacik struct btrfs_path *path; 287777745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 287877745c05SJosef Bacik int num_started = 0; 287977745c05SJosef Bacik 288077745c05SJosef Bacik path = btrfs_alloc_path(); 288177745c05SJosef Bacik if (!path) 288277745c05SJosef Bacik return -ENOMEM; 288377745c05SJosef Bacik 288477745c05SJosef Bacik /* 288577745c05SJosef Bacik * Even though we are in the critical section of the transaction commit, 288677745c05SJosef Bacik * we can still have concurrent tasks adding elements to this 288777745c05SJosef Bacik * transaction's list of dirty block groups. These tasks correspond to 288877745c05SJosef Bacik * endio free space workers started when writeback finishes for a 288977745c05SJosef Bacik * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 289077745c05SJosef Bacik * allocate new block groups as a result of COWing nodes of the root 289177745c05SJosef Bacik * tree when updating the free space inode. The writeback for the space 289277745c05SJosef Bacik * caches is triggered by an earlier call to 289377745c05SJosef Bacik * btrfs_start_dirty_block_groups() and iterations of the following 289477745c05SJosef Bacik * loop. 289577745c05SJosef Bacik * Also we want to do the cache_save_setup first and then run the 289677745c05SJosef Bacik * delayed refs to make sure we have the best chance at doing this all 289777745c05SJosef Bacik * in one shot. 289877745c05SJosef Bacik */ 289977745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 290077745c05SJosef Bacik while (!list_empty(&cur_trans->dirty_bgs)) { 290177745c05SJosef Bacik cache = list_first_entry(&cur_trans->dirty_bgs, 290232da5386SDavid Sterba struct btrfs_block_group, 290377745c05SJosef Bacik dirty_list); 290477745c05SJosef Bacik 290577745c05SJosef Bacik /* 290677745c05SJosef Bacik * This can happen if cache_save_setup re-dirties a block group 290777745c05SJosef Bacik * that is already under IO. Just wait for it to finish and 290877745c05SJosef Bacik * then do it all again 290977745c05SJosef Bacik */ 291077745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 291177745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 291277745c05SJosef Bacik list_del_init(&cache->io_list); 291377745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 291477745c05SJosef Bacik btrfs_put_block_group(cache); 291577745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 291677745c05SJosef Bacik } 291777745c05SJosef Bacik 291877745c05SJosef Bacik /* 291977745c05SJosef Bacik * Don't remove from the dirty list until after we've waited on 292077745c05SJosef Bacik * any pending IO 292177745c05SJosef Bacik */ 292277745c05SJosef Bacik list_del_init(&cache->dirty_list); 292377745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 292477745c05SJosef Bacik should_put = 1; 292577745c05SJosef Bacik 292677745c05SJosef Bacik cache_save_setup(cache, trans, path); 292777745c05SJosef Bacik 292877745c05SJosef Bacik if (!ret) 292977745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 293077745c05SJosef Bacik (unsigned long) -1); 293177745c05SJosef Bacik 293277745c05SJosef Bacik if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 293377745c05SJosef Bacik cache->io_ctl.inode = NULL; 293477745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 293577745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 293677745c05SJosef Bacik num_started++; 293777745c05SJosef Bacik should_put = 0; 293877745c05SJosef Bacik list_add_tail(&cache->io_list, io); 293977745c05SJosef Bacik } else { 294077745c05SJosef Bacik /* 294177745c05SJosef Bacik * If we failed to write the cache, the 294277745c05SJosef Bacik * generation will be bad and life goes on 294377745c05SJosef Bacik */ 294477745c05SJosef Bacik ret = 0; 294577745c05SJosef Bacik } 294677745c05SJosef Bacik } 294777745c05SJosef Bacik if (!ret) { 29483be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 294977745c05SJosef Bacik /* 295077745c05SJosef Bacik * One of the free space endio workers might have 295177745c05SJosef Bacik * created a new block group while updating a free space 295277745c05SJosef Bacik * cache's inode (at inode.c:btrfs_finish_ordered_io()) 295377745c05SJosef Bacik * and hasn't released its transaction handle yet, in 295477745c05SJosef Bacik * which case the new block group is still attached to 295577745c05SJosef Bacik * its transaction handle and its creation has not 295677745c05SJosef Bacik * finished yet (no block group item in the extent tree 295777745c05SJosef Bacik * yet, etc). If this is the case, wait for all free 295877745c05SJosef Bacik * space endio workers to finish and retry. This is a 2959260db43cSRandy Dunlap * very rare case so no need for a more efficient and 296077745c05SJosef Bacik * complex approach. 296177745c05SJosef Bacik */ 296277745c05SJosef Bacik if (ret == -ENOENT) { 296377745c05SJosef Bacik wait_event(cur_trans->writer_wait, 296477745c05SJosef Bacik atomic_read(&cur_trans->num_writers) == 1); 29653be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 296677745c05SJosef Bacik } 296777745c05SJosef Bacik if (ret) 296877745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 296977745c05SJosef Bacik } 297077745c05SJosef Bacik 297177745c05SJosef Bacik /* If its not on the io list, we need to put the block group */ 297277745c05SJosef Bacik if (should_put) 297377745c05SJosef Bacik btrfs_put_block_group(cache); 297477745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 297577745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 297677745c05SJosef Bacik } 297777745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 297877745c05SJosef Bacik 297977745c05SJosef Bacik /* 298077745c05SJosef Bacik * Refer to the definition of io_bgs member for details why it's safe 298177745c05SJosef Bacik * to use it without any locking 298277745c05SJosef Bacik */ 298377745c05SJosef Bacik while (!list_empty(io)) { 298432da5386SDavid Sterba cache = list_first_entry(io, struct btrfs_block_group, 298577745c05SJosef Bacik io_list); 298677745c05SJosef Bacik list_del_init(&cache->io_list); 298777745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 298877745c05SJosef Bacik btrfs_put_block_group(cache); 298977745c05SJosef Bacik } 299077745c05SJosef Bacik 299177745c05SJosef Bacik btrfs_free_path(path); 299277745c05SJosef Bacik return ret; 299377745c05SJosef Bacik } 2994606d1bf1SJosef Bacik 2995606d1bf1SJosef Bacik int btrfs_update_block_group(struct btrfs_trans_handle *trans, 2996606d1bf1SJosef Bacik u64 bytenr, u64 num_bytes, int alloc) 2997606d1bf1SJosef Bacik { 2998606d1bf1SJosef Bacik struct btrfs_fs_info *info = trans->fs_info; 299932da5386SDavid Sterba struct btrfs_block_group *cache = NULL; 3000606d1bf1SJosef Bacik u64 total = num_bytes; 3001606d1bf1SJosef Bacik u64 old_val; 3002606d1bf1SJosef Bacik u64 byte_in_group; 3003606d1bf1SJosef Bacik int factor; 3004606d1bf1SJosef Bacik int ret = 0; 3005606d1bf1SJosef Bacik 3006606d1bf1SJosef Bacik /* Block accounting for super block */ 3007606d1bf1SJosef Bacik spin_lock(&info->delalloc_root_lock); 3008606d1bf1SJosef Bacik old_val = btrfs_super_bytes_used(info->super_copy); 3009606d1bf1SJosef Bacik if (alloc) 3010606d1bf1SJosef Bacik old_val += num_bytes; 3011606d1bf1SJosef Bacik else 3012606d1bf1SJosef Bacik old_val -= num_bytes; 3013606d1bf1SJosef Bacik btrfs_set_super_bytes_used(info->super_copy, old_val); 3014606d1bf1SJosef Bacik spin_unlock(&info->delalloc_root_lock); 3015606d1bf1SJosef Bacik 3016606d1bf1SJosef Bacik while (total) { 3017606d1bf1SJosef Bacik cache = btrfs_lookup_block_group(info, bytenr); 3018606d1bf1SJosef Bacik if (!cache) { 3019606d1bf1SJosef Bacik ret = -ENOENT; 3020606d1bf1SJosef Bacik break; 3021606d1bf1SJosef Bacik } 3022606d1bf1SJosef Bacik factor = btrfs_bg_type_to_factor(cache->flags); 3023606d1bf1SJosef Bacik 3024606d1bf1SJosef Bacik /* 3025606d1bf1SJosef Bacik * If this block group has free space cache written out, we 3026606d1bf1SJosef Bacik * need to make sure to load it if we are removing space. This 3027606d1bf1SJosef Bacik * is because we need the unpinning stage to actually add the 3028606d1bf1SJosef Bacik * space back to the block group, otherwise we will leak space. 3029606d1bf1SJosef Bacik */ 303032da5386SDavid Sterba if (!alloc && !btrfs_block_group_done(cache)) 3031606d1bf1SJosef Bacik btrfs_cache_block_group(cache, 1); 3032606d1bf1SJosef Bacik 3033b3470b5dSDavid Sterba byte_in_group = bytenr - cache->start; 3034b3470b5dSDavid Sterba WARN_ON(byte_in_group > cache->length); 3035606d1bf1SJosef Bacik 3036606d1bf1SJosef Bacik spin_lock(&cache->space_info->lock); 3037606d1bf1SJosef Bacik spin_lock(&cache->lock); 3038606d1bf1SJosef Bacik 3039606d1bf1SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 3040606d1bf1SJosef Bacik cache->disk_cache_state < BTRFS_DC_CLEAR) 3041606d1bf1SJosef Bacik cache->disk_cache_state = BTRFS_DC_CLEAR; 3042606d1bf1SJosef Bacik 3043bf38be65SDavid Sterba old_val = cache->used; 3044b3470b5dSDavid Sterba num_bytes = min(total, cache->length - byte_in_group); 3045606d1bf1SJosef Bacik if (alloc) { 3046606d1bf1SJosef Bacik old_val += num_bytes; 3047bf38be65SDavid Sterba cache->used = old_val; 3048606d1bf1SJosef Bacik cache->reserved -= num_bytes; 3049606d1bf1SJosef Bacik cache->space_info->bytes_reserved -= num_bytes; 3050606d1bf1SJosef Bacik cache->space_info->bytes_used += num_bytes; 3051606d1bf1SJosef Bacik cache->space_info->disk_used += num_bytes * factor; 3052606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3053606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 3054606d1bf1SJosef Bacik } else { 3055606d1bf1SJosef Bacik old_val -= num_bytes; 3056bf38be65SDavid Sterba cache->used = old_val; 3057606d1bf1SJosef Bacik cache->pinned += num_bytes; 3058606d1bf1SJosef Bacik btrfs_space_info_update_bytes_pinned(info, 3059606d1bf1SJosef Bacik cache->space_info, num_bytes); 3060606d1bf1SJosef Bacik cache->space_info->bytes_used -= num_bytes; 3061606d1bf1SJosef Bacik cache->space_info->disk_used -= num_bytes * factor; 3062606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3063606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 3064606d1bf1SJosef Bacik 30652187374fSJosef Bacik __btrfs_mod_total_bytes_pinned(cache->space_info, 30662187374fSJosef Bacik num_bytes); 3067fe119a6eSNikolay Borisov set_extent_dirty(&trans->transaction->pinned_extents, 3068606d1bf1SJosef Bacik bytenr, bytenr + num_bytes - 1, 3069606d1bf1SJosef Bacik GFP_NOFS | __GFP_NOFAIL); 3070606d1bf1SJosef Bacik } 3071606d1bf1SJosef Bacik 3072606d1bf1SJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 3073606d1bf1SJosef Bacik if (list_empty(&cache->dirty_list)) { 3074606d1bf1SJosef Bacik list_add_tail(&cache->dirty_list, 3075606d1bf1SJosef Bacik &trans->transaction->dirty_bgs); 3076606d1bf1SJosef Bacik trans->delayed_ref_updates++; 3077606d1bf1SJosef Bacik btrfs_get_block_group(cache); 3078606d1bf1SJosef Bacik } 3079606d1bf1SJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 3080606d1bf1SJosef Bacik 3081606d1bf1SJosef Bacik /* 3082606d1bf1SJosef Bacik * No longer have used bytes in this block group, queue it for 3083606d1bf1SJosef Bacik * deletion. We do this after adding the block group to the 3084606d1bf1SJosef Bacik * dirty list to avoid races between cleaner kthread and space 3085606d1bf1SJosef Bacik * cache writeout. 3086606d1bf1SJosef Bacik */ 30876e80d4f8SDennis Zhou if (!alloc && old_val == 0) { 30886e80d4f8SDennis Zhou if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3089606d1bf1SJosef Bacik btrfs_mark_bg_unused(cache); 30906e80d4f8SDennis Zhou } 3091606d1bf1SJosef Bacik 3092606d1bf1SJosef Bacik btrfs_put_block_group(cache); 3093606d1bf1SJosef Bacik total -= num_bytes; 3094606d1bf1SJosef Bacik bytenr += num_bytes; 3095606d1bf1SJosef Bacik } 3096606d1bf1SJosef Bacik 3097606d1bf1SJosef Bacik /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3098606d1bf1SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 3099606d1bf1SJosef Bacik return ret; 3100606d1bf1SJosef Bacik } 3101606d1bf1SJosef Bacik 3102606d1bf1SJosef Bacik /** 3103606d1bf1SJosef Bacik * btrfs_add_reserved_bytes - update the block_group and space info counters 3104606d1bf1SJosef Bacik * @cache: The cache we are manipulating 3105606d1bf1SJosef Bacik * @ram_bytes: The number of bytes of file content, and will be same to 3106606d1bf1SJosef Bacik * @num_bytes except for the compress path. 3107606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 3108606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 3109606d1bf1SJosef Bacik * 3110606d1bf1SJosef Bacik * This is called by the allocator when it reserves space. If this is a 3111606d1bf1SJosef Bacik * reservation and the block group has become read only we cannot make the 3112606d1bf1SJosef Bacik * reservation and return -EAGAIN, otherwise this function always succeeds. 3113606d1bf1SJosef Bacik */ 311432da5386SDavid Sterba int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3115606d1bf1SJosef Bacik u64 ram_bytes, u64 num_bytes, int delalloc) 3116606d1bf1SJosef Bacik { 3117606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 3118606d1bf1SJosef Bacik int ret = 0; 3119606d1bf1SJosef Bacik 3120606d1bf1SJosef Bacik spin_lock(&space_info->lock); 3121606d1bf1SJosef Bacik spin_lock(&cache->lock); 3122606d1bf1SJosef Bacik if (cache->ro) { 3123606d1bf1SJosef Bacik ret = -EAGAIN; 3124606d1bf1SJosef Bacik } else { 3125606d1bf1SJosef Bacik cache->reserved += num_bytes; 3126606d1bf1SJosef Bacik space_info->bytes_reserved += num_bytes; 3127a43c3835SJosef Bacik trace_btrfs_space_reservation(cache->fs_info, "space_info", 3128a43c3835SJosef Bacik space_info->flags, num_bytes, 1); 3129606d1bf1SJosef Bacik btrfs_space_info_update_bytes_may_use(cache->fs_info, 3130606d1bf1SJosef Bacik space_info, -ram_bytes); 3131606d1bf1SJosef Bacik if (delalloc) 3132606d1bf1SJosef Bacik cache->delalloc_bytes += num_bytes; 313399ffb43eSJosef Bacik 313499ffb43eSJosef Bacik /* 313599ffb43eSJosef Bacik * Compression can use less space than we reserved, so wake 313699ffb43eSJosef Bacik * tickets if that happens 313799ffb43eSJosef Bacik */ 313899ffb43eSJosef Bacik if (num_bytes < ram_bytes) 313999ffb43eSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 3140606d1bf1SJosef Bacik } 3141606d1bf1SJosef Bacik spin_unlock(&cache->lock); 3142606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 3143606d1bf1SJosef Bacik return ret; 3144606d1bf1SJosef Bacik } 3145606d1bf1SJosef Bacik 3146606d1bf1SJosef Bacik /** 3147606d1bf1SJosef Bacik * btrfs_free_reserved_bytes - update the block_group and space info counters 3148606d1bf1SJosef Bacik * @cache: The cache we are manipulating 3149606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 3150606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 3151606d1bf1SJosef Bacik * 3152606d1bf1SJosef Bacik * This is called by somebody who is freeing space that was never actually used 3153606d1bf1SJosef Bacik * on disk. For example if you reserve some space for a new leaf in transaction 3154606d1bf1SJosef Bacik * A and before transaction A commits you free that leaf, you call this with 3155606d1bf1SJosef Bacik * reserve set to 0 in order to clear the reservation. 3156606d1bf1SJosef Bacik */ 315732da5386SDavid Sterba void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3158606d1bf1SJosef Bacik u64 num_bytes, int delalloc) 3159606d1bf1SJosef Bacik { 3160606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 3161606d1bf1SJosef Bacik 3162606d1bf1SJosef Bacik spin_lock(&space_info->lock); 3163606d1bf1SJosef Bacik spin_lock(&cache->lock); 3164606d1bf1SJosef Bacik if (cache->ro) 3165606d1bf1SJosef Bacik space_info->bytes_readonly += num_bytes; 3166606d1bf1SJosef Bacik cache->reserved -= num_bytes; 3167606d1bf1SJosef Bacik space_info->bytes_reserved -= num_bytes; 3168606d1bf1SJosef Bacik space_info->max_extent_size = 0; 3169606d1bf1SJosef Bacik 3170606d1bf1SJosef Bacik if (delalloc) 3171606d1bf1SJosef Bacik cache->delalloc_bytes -= num_bytes; 3172606d1bf1SJosef Bacik spin_unlock(&cache->lock); 31733308234aSJosef Bacik 31743308234aSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 3175606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 3176606d1bf1SJosef Bacik } 317707730d87SJosef Bacik 317807730d87SJosef Bacik static void force_metadata_allocation(struct btrfs_fs_info *info) 317907730d87SJosef Bacik { 318007730d87SJosef Bacik struct list_head *head = &info->space_info; 318107730d87SJosef Bacik struct btrfs_space_info *found; 318207730d87SJosef Bacik 318372804905SJosef Bacik list_for_each_entry(found, head, list) { 318407730d87SJosef Bacik if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 318507730d87SJosef Bacik found->force_alloc = CHUNK_ALLOC_FORCE; 318607730d87SJosef Bacik } 318707730d87SJosef Bacik } 318807730d87SJosef Bacik 318907730d87SJosef Bacik static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 319007730d87SJosef Bacik struct btrfs_space_info *sinfo, int force) 319107730d87SJosef Bacik { 319207730d87SJosef Bacik u64 bytes_used = btrfs_space_info_used(sinfo, false); 319307730d87SJosef Bacik u64 thresh; 319407730d87SJosef Bacik 319507730d87SJosef Bacik if (force == CHUNK_ALLOC_FORCE) 319607730d87SJosef Bacik return 1; 319707730d87SJosef Bacik 319807730d87SJosef Bacik /* 319907730d87SJosef Bacik * in limited mode, we want to have some free space up to 320007730d87SJosef Bacik * about 1% of the FS size. 320107730d87SJosef Bacik */ 320207730d87SJosef Bacik if (force == CHUNK_ALLOC_LIMITED) { 320307730d87SJosef Bacik thresh = btrfs_super_total_bytes(fs_info->super_copy); 320407730d87SJosef Bacik thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); 320507730d87SJosef Bacik 320607730d87SJosef Bacik if (sinfo->total_bytes - bytes_used < thresh) 320707730d87SJosef Bacik return 1; 320807730d87SJosef Bacik } 320907730d87SJosef Bacik 321007730d87SJosef Bacik if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) 321107730d87SJosef Bacik return 0; 321207730d87SJosef Bacik return 1; 321307730d87SJosef Bacik } 321407730d87SJosef Bacik 321507730d87SJosef Bacik int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 321607730d87SJosef Bacik { 321707730d87SJosef Bacik u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 321807730d87SJosef Bacik 321907730d87SJosef Bacik return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 322007730d87SJosef Bacik } 322107730d87SJosef Bacik 322207730d87SJosef Bacik /* 322307730d87SJosef Bacik * If force is CHUNK_ALLOC_FORCE: 322407730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 322507730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 322607730d87SJosef Bacik * If force is NOT CHUNK_ALLOC_FORCE: 322707730d87SJosef Bacik * - return 0 if it doesn't need to allocate a new chunk, 322807730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 322907730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 323007730d87SJosef Bacik */ 323107730d87SJosef Bacik int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 323207730d87SJosef Bacik enum btrfs_chunk_alloc_enum force) 323307730d87SJosef Bacik { 323407730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 323507730d87SJosef Bacik struct btrfs_space_info *space_info; 323607730d87SJosef Bacik bool wait_for_alloc = false; 323707730d87SJosef Bacik bool should_alloc = false; 323807730d87SJosef Bacik int ret = 0; 323907730d87SJosef Bacik 324007730d87SJosef Bacik /* Don't re-enter if we're already allocating a chunk */ 324107730d87SJosef Bacik if (trans->allocating_chunk) 324207730d87SJosef Bacik return -ENOSPC; 324307730d87SJosef Bacik 324407730d87SJosef Bacik space_info = btrfs_find_space_info(fs_info, flags); 324507730d87SJosef Bacik ASSERT(space_info); 324607730d87SJosef Bacik 324707730d87SJosef Bacik do { 324807730d87SJosef Bacik spin_lock(&space_info->lock); 324907730d87SJosef Bacik if (force < space_info->force_alloc) 325007730d87SJosef Bacik force = space_info->force_alloc; 325107730d87SJosef Bacik should_alloc = should_alloc_chunk(fs_info, space_info, force); 325207730d87SJosef Bacik if (space_info->full) { 325307730d87SJosef Bacik /* No more free physical space */ 325407730d87SJosef Bacik if (should_alloc) 325507730d87SJosef Bacik ret = -ENOSPC; 325607730d87SJosef Bacik else 325707730d87SJosef Bacik ret = 0; 325807730d87SJosef Bacik spin_unlock(&space_info->lock); 325907730d87SJosef Bacik return ret; 326007730d87SJosef Bacik } else if (!should_alloc) { 326107730d87SJosef Bacik spin_unlock(&space_info->lock); 326207730d87SJosef Bacik return 0; 326307730d87SJosef Bacik } else if (space_info->chunk_alloc) { 326407730d87SJosef Bacik /* 326507730d87SJosef Bacik * Someone is already allocating, so we need to block 326607730d87SJosef Bacik * until this someone is finished and then loop to 326707730d87SJosef Bacik * recheck if we should continue with our allocation 326807730d87SJosef Bacik * attempt. 326907730d87SJosef Bacik */ 327007730d87SJosef Bacik wait_for_alloc = true; 327107730d87SJosef Bacik spin_unlock(&space_info->lock); 327207730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 327307730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 327407730d87SJosef Bacik } else { 327507730d87SJosef Bacik /* Proceed with allocation */ 327607730d87SJosef Bacik space_info->chunk_alloc = 1; 327707730d87SJosef Bacik wait_for_alloc = false; 327807730d87SJosef Bacik spin_unlock(&space_info->lock); 327907730d87SJosef Bacik } 328007730d87SJosef Bacik 328107730d87SJosef Bacik cond_resched(); 328207730d87SJosef Bacik } while (wait_for_alloc); 328307730d87SJosef Bacik 328407730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 328507730d87SJosef Bacik trans->allocating_chunk = true; 328607730d87SJosef Bacik 328707730d87SJosef Bacik /* 328807730d87SJosef Bacik * If we have mixed data/metadata chunks we want to make sure we keep 328907730d87SJosef Bacik * allocating mixed chunks instead of individual chunks. 329007730d87SJosef Bacik */ 329107730d87SJosef Bacik if (btrfs_mixed_space_info(space_info)) 329207730d87SJosef Bacik flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 329307730d87SJosef Bacik 329407730d87SJosef Bacik /* 329507730d87SJosef Bacik * if we're doing a data chunk, go ahead and make sure that 329607730d87SJosef Bacik * we keep a reasonable number of metadata chunks allocated in the 329707730d87SJosef Bacik * FS as well. 329807730d87SJosef Bacik */ 329907730d87SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 330007730d87SJosef Bacik fs_info->data_chunk_allocations++; 330107730d87SJosef Bacik if (!(fs_info->data_chunk_allocations % 330207730d87SJosef Bacik fs_info->metadata_ratio)) 330307730d87SJosef Bacik force_metadata_allocation(fs_info); 330407730d87SJosef Bacik } 330507730d87SJosef Bacik 330607730d87SJosef Bacik /* 330707730d87SJosef Bacik * Check if we have enough space in SYSTEM chunk because we may need 330807730d87SJosef Bacik * to update devices. 330907730d87SJosef Bacik */ 331007730d87SJosef Bacik check_system_chunk(trans, flags); 331107730d87SJosef Bacik 331207730d87SJosef Bacik ret = btrfs_alloc_chunk(trans, flags); 331307730d87SJosef Bacik trans->allocating_chunk = false; 331407730d87SJosef Bacik 331507730d87SJosef Bacik spin_lock(&space_info->lock); 331607730d87SJosef Bacik if (ret < 0) { 331707730d87SJosef Bacik if (ret == -ENOSPC) 331807730d87SJosef Bacik space_info->full = 1; 331907730d87SJosef Bacik else 332007730d87SJosef Bacik goto out; 332107730d87SJosef Bacik } else { 332207730d87SJosef Bacik ret = 1; 332307730d87SJosef Bacik space_info->max_extent_size = 0; 332407730d87SJosef Bacik } 332507730d87SJosef Bacik 332607730d87SJosef Bacik space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 332707730d87SJosef Bacik out: 332807730d87SJosef Bacik space_info->chunk_alloc = 0; 332907730d87SJosef Bacik spin_unlock(&space_info->lock); 333007730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 333107730d87SJosef Bacik /* 333207730d87SJosef Bacik * When we allocate a new chunk we reserve space in the chunk block 333307730d87SJosef Bacik * reserve to make sure we can COW nodes/leafs in the chunk tree or 333407730d87SJosef Bacik * add new nodes/leafs to it if we end up needing to do it when 333507730d87SJosef Bacik * inserting the chunk item and updating device items as part of the 333607730d87SJosef Bacik * second phase of chunk allocation, performed by 333707730d87SJosef Bacik * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a 333807730d87SJosef Bacik * large number of new block groups to create in our transaction 333907730d87SJosef Bacik * handle's new_bgs list to avoid exhausting the chunk block reserve 334007730d87SJosef Bacik * in extreme cases - like having a single transaction create many new 334107730d87SJosef Bacik * block groups when starting to write out the free space caches of all 334207730d87SJosef Bacik * the block groups that were made dirty during the lifetime of the 334307730d87SJosef Bacik * transaction. 334407730d87SJosef Bacik */ 334507730d87SJosef Bacik if (trans->chunk_bytes_reserved >= (u64)SZ_2M) 334607730d87SJosef Bacik btrfs_create_pending_block_groups(trans); 334707730d87SJosef Bacik 334807730d87SJosef Bacik return ret; 334907730d87SJosef Bacik } 335007730d87SJosef Bacik 335107730d87SJosef Bacik static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 335207730d87SJosef Bacik { 335307730d87SJosef Bacik u64 num_dev; 335407730d87SJosef Bacik 335507730d87SJosef Bacik num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 335607730d87SJosef Bacik if (!num_dev) 335707730d87SJosef Bacik num_dev = fs_info->fs_devices->rw_devices; 335807730d87SJosef Bacik 335907730d87SJosef Bacik return num_dev; 336007730d87SJosef Bacik } 336107730d87SJosef Bacik 336207730d87SJosef Bacik /* 3363a9143bd3SMarcos Paulo de Souza * Reserve space in the system space for allocating or removing a chunk 336407730d87SJosef Bacik */ 336507730d87SJosef Bacik void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 336607730d87SJosef Bacik { 3367eafa4fd0SFilipe Manana struct btrfs_transaction *cur_trans = trans->transaction; 336807730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 336907730d87SJosef Bacik struct btrfs_space_info *info; 337007730d87SJosef Bacik u64 left; 337107730d87SJosef Bacik u64 thresh; 337207730d87SJosef Bacik int ret = 0; 337307730d87SJosef Bacik u64 num_devs; 337407730d87SJosef Bacik 337507730d87SJosef Bacik /* 337607730d87SJosef Bacik * Needed because we can end up allocating a system chunk and for an 337707730d87SJosef Bacik * atomic and race free space reservation in the chunk block reserve. 337807730d87SJosef Bacik */ 337907730d87SJosef Bacik lockdep_assert_held(&fs_info->chunk_mutex); 338007730d87SJosef Bacik 338107730d87SJosef Bacik info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 3382eafa4fd0SFilipe Manana again: 338307730d87SJosef Bacik spin_lock(&info->lock); 338407730d87SJosef Bacik left = info->total_bytes - btrfs_space_info_used(info, true); 338507730d87SJosef Bacik spin_unlock(&info->lock); 338607730d87SJosef Bacik 338707730d87SJosef Bacik num_devs = get_profile_num_devs(fs_info, type); 338807730d87SJosef Bacik 338907730d87SJosef Bacik /* num_devs device items to update and 1 chunk item to add or remove */ 33902bd36e7bSJosef Bacik thresh = btrfs_calc_metadata_size(fs_info, num_devs) + 33912bd36e7bSJosef Bacik btrfs_calc_insert_metadata_size(fs_info, 1); 339207730d87SJosef Bacik 339307730d87SJosef Bacik if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 339407730d87SJosef Bacik btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 339507730d87SJosef Bacik left, thresh, type); 339607730d87SJosef Bacik btrfs_dump_space_info(fs_info, info, 0, 0); 339707730d87SJosef Bacik } 339807730d87SJosef Bacik 339907730d87SJosef Bacik if (left < thresh) { 340007730d87SJosef Bacik u64 flags = btrfs_system_alloc_profile(fs_info); 3401eafa4fd0SFilipe Manana u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved); 3402eafa4fd0SFilipe Manana 3403eafa4fd0SFilipe Manana /* 3404eafa4fd0SFilipe Manana * If there's not available space for the chunk tree (system 3405eafa4fd0SFilipe Manana * space) and there are other tasks that reserved space for 3406eafa4fd0SFilipe Manana * creating a new system block group, wait for them to complete 3407eafa4fd0SFilipe Manana * the creation of their system block group and release excess 3408eafa4fd0SFilipe Manana * reserved space. We do this because: 3409eafa4fd0SFilipe Manana * 3410eafa4fd0SFilipe Manana * *) We can end up allocating more system chunks than necessary 3411eafa4fd0SFilipe Manana * when there are multiple tasks that are concurrently 3412eafa4fd0SFilipe Manana * allocating block groups, which can lead to exhaustion of 3413eafa4fd0SFilipe Manana * the system array in the superblock; 3414eafa4fd0SFilipe Manana * 3415eafa4fd0SFilipe Manana * *) If we allocate extra and unnecessary system block groups, 3416eafa4fd0SFilipe Manana * despite being empty for a long time, and possibly forever, 3417eafa4fd0SFilipe Manana * they end not being added to the list of unused block groups 3418eafa4fd0SFilipe Manana * because that typically happens only when deallocating the 3419eafa4fd0SFilipe Manana * last extent from a block group - which never happens since 3420eafa4fd0SFilipe Manana * we never allocate from them in the first place. The few 3421eafa4fd0SFilipe Manana * exceptions are when mounting a filesystem or running scrub, 3422eafa4fd0SFilipe Manana * which add unused block groups to the list of unused block 3423eafa4fd0SFilipe Manana * groups, to be deleted by the cleaner kthread. 3424eafa4fd0SFilipe Manana * And even when they are added to the list of unused block 3425eafa4fd0SFilipe Manana * groups, it can take a long time until they get deleted, 3426eafa4fd0SFilipe Manana * since the cleaner kthread might be sleeping or busy with 3427eafa4fd0SFilipe Manana * other work (deleting subvolumes, running delayed iputs, 3428eafa4fd0SFilipe Manana * defrag scheduling, etc); 3429eafa4fd0SFilipe Manana * 3430eafa4fd0SFilipe Manana * This is rare in practice, but can happen when too many tasks 3431eafa4fd0SFilipe Manana * are allocating blocks groups in parallel (via fallocate()) 3432eafa4fd0SFilipe Manana * and before the one that reserved space for a new system block 3433eafa4fd0SFilipe Manana * group finishes the block group creation and releases the space 3434eafa4fd0SFilipe Manana * reserved in excess (at btrfs_create_pending_block_groups()), 3435eafa4fd0SFilipe Manana * other tasks end up here and see free system space temporarily 3436eafa4fd0SFilipe Manana * not enough for updating the chunk tree. 3437eafa4fd0SFilipe Manana * 3438eafa4fd0SFilipe Manana * We unlock the chunk mutex before waiting for such tasks and 3439eafa4fd0SFilipe Manana * lock it again after the wait, otherwise we would deadlock. 3440eafa4fd0SFilipe Manana * It is safe to do so because allocating a system chunk is the 3441eafa4fd0SFilipe Manana * first thing done while allocating a new block group. 3442eafa4fd0SFilipe Manana */ 3443eafa4fd0SFilipe Manana if (reserved > trans->chunk_bytes_reserved) { 3444eafa4fd0SFilipe Manana const u64 min_needed = reserved - thresh; 3445eafa4fd0SFilipe Manana 3446eafa4fd0SFilipe Manana mutex_unlock(&fs_info->chunk_mutex); 3447eafa4fd0SFilipe Manana wait_event(cur_trans->chunk_reserve_wait, 3448eafa4fd0SFilipe Manana atomic64_read(&cur_trans->chunk_bytes_reserved) <= 3449eafa4fd0SFilipe Manana min_needed); 3450eafa4fd0SFilipe Manana mutex_lock(&fs_info->chunk_mutex); 3451eafa4fd0SFilipe Manana goto again; 3452eafa4fd0SFilipe Manana } 345307730d87SJosef Bacik 345407730d87SJosef Bacik /* 345507730d87SJosef Bacik * Ignore failure to create system chunk. We might end up not 345607730d87SJosef Bacik * needing it, as we might not need to COW all nodes/leafs from 345707730d87SJosef Bacik * the paths we visit in the chunk tree (they were already COWed 345807730d87SJosef Bacik * or created in the current transaction for example). 345907730d87SJosef Bacik */ 346007730d87SJosef Bacik ret = btrfs_alloc_chunk(trans, flags); 346107730d87SJosef Bacik } 346207730d87SJosef Bacik 346307730d87SJosef Bacik if (!ret) { 346407730d87SJosef Bacik ret = btrfs_block_rsv_add(fs_info->chunk_root, 346507730d87SJosef Bacik &fs_info->chunk_block_rsv, 346607730d87SJosef Bacik thresh, BTRFS_RESERVE_NO_FLUSH); 3467eafa4fd0SFilipe Manana if (!ret) { 3468eafa4fd0SFilipe Manana atomic64_add(thresh, &cur_trans->chunk_bytes_reserved); 346907730d87SJosef Bacik trans->chunk_bytes_reserved += thresh; 347007730d87SJosef Bacik } 347107730d87SJosef Bacik } 3472eafa4fd0SFilipe Manana } 347307730d87SJosef Bacik 34743e43c279SJosef Bacik void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 34753e43c279SJosef Bacik { 347632da5386SDavid Sterba struct btrfs_block_group *block_group; 34773e43c279SJosef Bacik u64 last = 0; 34783e43c279SJosef Bacik 34793e43c279SJosef Bacik while (1) { 34803e43c279SJosef Bacik struct inode *inode; 34813e43c279SJosef Bacik 34823e43c279SJosef Bacik block_group = btrfs_lookup_first_block_group(info, last); 34833e43c279SJosef Bacik while (block_group) { 34843e43c279SJosef Bacik btrfs_wait_block_group_cache_done(block_group); 34853e43c279SJosef Bacik spin_lock(&block_group->lock); 34863e43c279SJosef Bacik if (block_group->iref) 34873e43c279SJosef Bacik break; 34883e43c279SJosef Bacik spin_unlock(&block_group->lock); 34893e43c279SJosef Bacik block_group = btrfs_next_block_group(block_group); 34903e43c279SJosef Bacik } 34913e43c279SJosef Bacik if (!block_group) { 34923e43c279SJosef Bacik if (last == 0) 34933e43c279SJosef Bacik break; 34943e43c279SJosef Bacik last = 0; 34953e43c279SJosef Bacik continue; 34963e43c279SJosef Bacik } 34973e43c279SJosef Bacik 34983e43c279SJosef Bacik inode = block_group->inode; 34993e43c279SJosef Bacik block_group->iref = 0; 35003e43c279SJosef Bacik block_group->inode = NULL; 35013e43c279SJosef Bacik spin_unlock(&block_group->lock); 35023e43c279SJosef Bacik ASSERT(block_group->io_ctl.inode == NULL); 35033e43c279SJosef Bacik iput(inode); 3504b3470b5dSDavid Sterba last = block_group->start + block_group->length; 35053e43c279SJosef Bacik btrfs_put_block_group(block_group); 35063e43c279SJosef Bacik } 35073e43c279SJosef Bacik } 35083e43c279SJosef Bacik 35093e43c279SJosef Bacik /* 35103e43c279SJosef Bacik * Must be called only after stopping all workers, since we could have block 35113e43c279SJosef Bacik * group caching kthreads running, and therefore they could race with us if we 35123e43c279SJosef Bacik * freed the block groups before stopping them. 35133e43c279SJosef Bacik */ 35143e43c279SJosef Bacik int btrfs_free_block_groups(struct btrfs_fs_info *info) 35153e43c279SJosef Bacik { 351632da5386SDavid Sterba struct btrfs_block_group *block_group; 35173e43c279SJosef Bacik struct btrfs_space_info *space_info; 35183e43c279SJosef Bacik struct btrfs_caching_control *caching_ctl; 35193e43c279SJosef Bacik struct rb_node *n; 35203e43c279SJosef Bacik 3521bbb86a37SJosef Bacik spin_lock(&info->block_group_cache_lock); 35223e43c279SJosef Bacik while (!list_empty(&info->caching_block_groups)) { 35233e43c279SJosef Bacik caching_ctl = list_entry(info->caching_block_groups.next, 35243e43c279SJosef Bacik struct btrfs_caching_control, list); 35253e43c279SJosef Bacik list_del(&caching_ctl->list); 35263e43c279SJosef Bacik btrfs_put_caching_control(caching_ctl); 35273e43c279SJosef Bacik } 3528bbb86a37SJosef Bacik spin_unlock(&info->block_group_cache_lock); 35293e43c279SJosef Bacik 35303e43c279SJosef Bacik spin_lock(&info->unused_bgs_lock); 35313e43c279SJosef Bacik while (!list_empty(&info->unused_bgs)) { 35323e43c279SJosef Bacik block_group = list_first_entry(&info->unused_bgs, 353332da5386SDavid Sterba struct btrfs_block_group, 35343e43c279SJosef Bacik bg_list); 35353e43c279SJosef Bacik list_del_init(&block_group->bg_list); 35363e43c279SJosef Bacik btrfs_put_block_group(block_group); 35373e43c279SJosef Bacik } 35383e43c279SJosef Bacik spin_unlock(&info->unused_bgs_lock); 35393e43c279SJosef Bacik 354018bb8bbfSJohannes Thumshirn spin_lock(&info->unused_bgs_lock); 354118bb8bbfSJohannes Thumshirn while (!list_empty(&info->reclaim_bgs)) { 354218bb8bbfSJohannes Thumshirn block_group = list_first_entry(&info->reclaim_bgs, 354318bb8bbfSJohannes Thumshirn struct btrfs_block_group, 354418bb8bbfSJohannes Thumshirn bg_list); 354518bb8bbfSJohannes Thumshirn list_del_init(&block_group->bg_list); 354618bb8bbfSJohannes Thumshirn btrfs_put_block_group(block_group); 354718bb8bbfSJohannes Thumshirn } 354818bb8bbfSJohannes Thumshirn spin_unlock(&info->unused_bgs_lock); 354918bb8bbfSJohannes Thumshirn 35503e43c279SJosef Bacik spin_lock(&info->block_group_cache_lock); 35513e43c279SJosef Bacik while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 355232da5386SDavid Sterba block_group = rb_entry(n, struct btrfs_block_group, 35533e43c279SJosef Bacik cache_node); 35543e43c279SJosef Bacik rb_erase(&block_group->cache_node, 35553e43c279SJosef Bacik &info->block_group_cache_tree); 35563e43c279SJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 35573e43c279SJosef Bacik spin_unlock(&info->block_group_cache_lock); 35583e43c279SJosef Bacik 35593e43c279SJosef Bacik down_write(&block_group->space_info->groups_sem); 35603e43c279SJosef Bacik list_del(&block_group->list); 35613e43c279SJosef Bacik up_write(&block_group->space_info->groups_sem); 35623e43c279SJosef Bacik 35633e43c279SJosef Bacik /* 35643e43c279SJosef Bacik * We haven't cached this block group, which means we could 35653e43c279SJosef Bacik * possibly have excluded extents on this block group. 35663e43c279SJosef Bacik */ 35673e43c279SJosef Bacik if (block_group->cached == BTRFS_CACHE_NO || 35683e43c279SJosef Bacik block_group->cached == BTRFS_CACHE_ERROR) 35693e43c279SJosef Bacik btrfs_free_excluded_extents(block_group); 35703e43c279SJosef Bacik 35713e43c279SJosef Bacik btrfs_remove_free_space_cache(block_group); 35723e43c279SJosef Bacik ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 35733e43c279SJosef Bacik ASSERT(list_empty(&block_group->dirty_list)); 35743e43c279SJosef Bacik ASSERT(list_empty(&block_group->io_list)); 35753e43c279SJosef Bacik ASSERT(list_empty(&block_group->bg_list)); 357648aaeebeSJosef Bacik ASSERT(refcount_read(&block_group->refs) == 1); 3577195a49eaSFilipe Manana ASSERT(block_group->swap_extents == 0); 35783e43c279SJosef Bacik btrfs_put_block_group(block_group); 35793e43c279SJosef Bacik 35803e43c279SJosef Bacik spin_lock(&info->block_group_cache_lock); 35813e43c279SJosef Bacik } 35823e43c279SJosef Bacik spin_unlock(&info->block_group_cache_lock); 35833e43c279SJosef Bacik 35843e43c279SJosef Bacik btrfs_release_global_block_rsv(info); 35853e43c279SJosef Bacik 35863e43c279SJosef Bacik while (!list_empty(&info->space_info)) { 35873e43c279SJosef Bacik space_info = list_entry(info->space_info.next, 35883e43c279SJosef Bacik struct btrfs_space_info, 35893e43c279SJosef Bacik list); 35903e43c279SJosef Bacik 35913e43c279SJosef Bacik /* 35923e43c279SJosef Bacik * Do not hide this behind enospc_debug, this is actually 35933e43c279SJosef Bacik * important and indicates a real bug if this happens. 35943e43c279SJosef Bacik */ 35953e43c279SJosef Bacik if (WARN_ON(space_info->bytes_pinned > 0 || 35963e43c279SJosef Bacik space_info->bytes_reserved > 0 || 35973e43c279SJosef Bacik space_info->bytes_may_use > 0)) 35983e43c279SJosef Bacik btrfs_dump_space_info(info, space_info, 0, 0); 3599d611add4SFilipe Manana WARN_ON(space_info->reclaim_size > 0); 36003e43c279SJosef Bacik list_del(&space_info->list); 36013e43c279SJosef Bacik btrfs_sysfs_remove_space_info(space_info); 36023e43c279SJosef Bacik } 36033e43c279SJosef Bacik return 0; 36043e43c279SJosef Bacik } 3605684b752bSFilipe Manana 3606684b752bSFilipe Manana void btrfs_freeze_block_group(struct btrfs_block_group *cache) 3607684b752bSFilipe Manana { 3608684b752bSFilipe Manana atomic_inc(&cache->frozen); 3609684b752bSFilipe Manana } 3610684b752bSFilipe Manana 3611684b752bSFilipe Manana void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 3612684b752bSFilipe Manana { 3613684b752bSFilipe Manana struct btrfs_fs_info *fs_info = block_group->fs_info; 3614684b752bSFilipe Manana struct extent_map_tree *em_tree; 3615684b752bSFilipe Manana struct extent_map *em; 3616684b752bSFilipe Manana bool cleanup; 3617684b752bSFilipe Manana 3618684b752bSFilipe Manana spin_lock(&block_group->lock); 3619684b752bSFilipe Manana cleanup = (atomic_dec_and_test(&block_group->frozen) && 3620684b752bSFilipe Manana block_group->removed); 3621684b752bSFilipe Manana spin_unlock(&block_group->lock); 3622684b752bSFilipe Manana 3623684b752bSFilipe Manana if (cleanup) { 3624684b752bSFilipe Manana em_tree = &fs_info->mapping_tree; 3625684b752bSFilipe Manana write_lock(&em_tree->lock); 3626684b752bSFilipe Manana em = lookup_extent_mapping(em_tree, block_group->start, 3627684b752bSFilipe Manana 1); 3628684b752bSFilipe Manana BUG_ON(!em); /* logic error, can't happen */ 3629684b752bSFilipe Manana remove_extent_mapping(em_tree, em); 3630684b752bSFilipe Manana write_unlock(&em_tree->lock); 3631684b752bSFilipe Manana 3632684b752bSFilipe Manana /* once for us and once for the tree */ 3633684b752bSFilipe Manana free_extent_map(em); 3634684b752bSFilipe Manana free_extent_map(em); 3635684b752bSFilipe Manana 3636684b752bSFilipe Manana /* 3637684b752bSFilipe Manana * We may have left one free space entry and other possible 3638684b752bSFilipe Manana * tasks trimming this block group have left 1 entry each one. 3639684b752bSFilipe Manana * Free them if any. 3640684b752bSFilipe Manana */ 3641684b752bSFilipe Manana __btrfs_remove_free_space_cache(block_group->free_space_ctl); 3642684b752bSFilipe Manana } 3643684b752bSFilipe Manana } 3644195a49eaSFilipe Manana 3645195a49eaSFilipe Manana bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 3646195a49eaSFilipe Manana { 3647195a49eaSFilipe Manana bool ret = true; 3648195a49eaSFilipe Manana 3649195a49eaSFilipe Manana spin_lock(&bg->lock); 3650195a49eaSFilipe Manana if (bg->ro) 3651195a49eaSFilipe Manana ret = false; 3652195a49eaSFilipe Manana else 3653195a49eaSFilipe Manana bg->swap_extents++; 3654195a49eaSFilipe Manana spin_unlock(&bg->lock); 3655195a49eaSFilipe Manana 3656195a49eaSFilipe Manana return ret; 3657195a49eaSFilipe Manana } 3658195a49eaSFilipe Manana 3659195a49eaSFilipe Manana void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 3660195a49eaSFilipe Manana { 3661195a49eaSFilipe Manana spin_lock(&bg->lock); 3662195a49eaSFilipe Manana ASSERT(!bg->ro); 3663195a49eaSFilipe Manana ASSERT(bg->swap_extents >= amount); 3664195a49eaSFilipe Manana bg->swap_extents -= amount; 3665195a49eaSFilipe Manana spin_unlock(&bg->lock); 3666195a49eaSFilipe Manana } 3667