12e405ad8SJosef Bacik // SPDX-License-Identifier: GPL-2.0 22e405ad8SJosef Bacik 3784352feSDavid Sterba #include "misc.h" 42e405ad8SJosef Bacik #include "ctree.h" 52e405ad8SJosef Bacik #include "block-group.h" 63eeb3226SJosef Bacik #include "space-info.h" 79f21246dSJosef Bacik #include "disk-io.h" 89f21246dSJosef Bacik #include "free-space-cache.h" 99f21246dSJosef Bacik #include "free-space-tree.h" 10e3e0520bSJosef Bacik #include "volumes.h" 11e3e0520bSJosef Bacik #include "transaction.h" 12e3e0520bSJosef Bacik #include "ref-verify.h" 134358d963SJosef Bacik #include "sysfs.h" 144358d963SJosef Bacik #include "tree-log.h" 1577745c05SJosef Bacik #include "delalloc-space.h" 16b0643e59SDennis Zhou #include "discard.h" 1796a14336SNikolay Borisov #include "raid56.h" 182e405ad8SJosef Bacik 19878d7b67SJosef Bacik /* 20878d7b67SJosef Bacik * Return target flags in extended format or 0 if restripe for this chunk_type 21878d7b67SJosef Bacik * is not in progress 22878d7b67SJosef Bacik * 23878d7b67SJosef Bacik * Should be called with balance_lock held 24878d7b67SJosef Bacik */ 25e11c0406SJosef Bacik static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 26878d7b67SJosef Bacik { 27878d7b67SJosef Bacik struct btrfs_balance_control *bctl = fs_info->balance_ctl; 28878d7b67SJosef Bacik u64 target = 0; 29878d7b67SJosef Bacik 30878d7b67SJosef Bacik if (!bctl) 31878d7b67SJosef Bacik return 0; 32878d7b67SJosef Bacik 33878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && 34878d7b67SJosef Bacik bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 35878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 36878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 37878d7b67SJosef Bacik bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 38878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 39878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 40878d7b67SJosef Bacik bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 41878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 42878d7b67SJosef Bacik } 43878d7b67SJosef Bacik 44878d7b67SJosef Bacik return target; 45878d7b67SJosef Bacik } 46878d7b67SJosef Bacik 47878d7b67SJosef Bacik /* 48878d7b67SJosef Bacik * @flags: available profiles in extended format (see ctree.h) 49878d7b67SJosef Bacik * 50878d7b67SJosef Bacik * Return reduced profile in chunk format. If profile changing is in progress 51878d7b67SJosef Bacik * (either running or paused) picks the target profile (if it's already 52878d7b67SJosef Bacik * available), otherwise falls back to plain reducing. 53878d7b67SJosef Bacik */ 54878d7b67SJosef Bacik static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 55878d7b67SJosef Bacik { 56878d7b67SJosef Bacik u64 num_devices = fs_info->fs_devices->rw_devices; 57878d7b67SJosef Bacik u64 target; 58878d7b67SJosef Bacik u64 raid_type; 59878d7b67SJosef Bacik u64 allowed = 0; 60878d7b67SJosef Bacik 61878d7b67SJosef Bacik /* 62878d7b67SJosef Bacik * See if restripe for this chunk_type is in progress, if so try to 63878d7b67SJosef Bacik * reduce to the target profile 64878d7b67SJosef Bacik */ 65878d7b67SJosef Bacik spin_lock(&fs_info->balance_lock); 66e11c0406SJosef Bacik target = get_restripe_target(fs_info, flags); 67878d7b67SJosef Bacik if (target) { 68878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 69878d7b67SJosef Bacik return extended_to_chunk(target); 70878d7b67SJosef Bacik } 71878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 72878d7b67SJosef Bacik 73878d7b67SJosef Bacik /* First, mask out the RAID levels which aren't possible */ 74878d7b67SJosef Bacik for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 75878d7b67SJosef Bacik if (num_devices >= btrfs_raid_array[raid_type].devs_min) 76878d7b67SJosef Bacik allowed |= btrfs_raid_array[raid_type].bg_flag; 77878d7b67SJosef Bacik } 78878d7b67SJosef Bacik allowed &= flags; 79878d7b67SJosef Bacik 80878d7b67SJosef Bacik if (allowed & BTRFS_BLOCK_GROUP_RAID6) 81878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID6; 82878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 83878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID5; 84878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 85878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID10; 86878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 87878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID1; 88878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 89878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID0; 90878d7b67SJosef Bacik 91878d7b67SJosef Bacik flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 92878d7b67SJosef Bacik 93878d7b67SJosef Bacik return extended_to_chunk(flags | allowed); 94878d7b67SJosef Bacik } 95878d7b67SJosef Bacik 96ef0a82daSJohannes Thumshirn u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 97878d7b67SJosef Bacik { 98878d7b67SJosef Bacik unsigned seq; 99878d7b67SJosef Bacik u64 flags; 100878d7b67SJosef Bacik 101878d7b67SJosef Bacik do { 102878d7b67SJosef Bacik flags = orig_flags; 103878d7b67SJosef Bacik seq = read_seqbegin(&fs_info->profiles_lock); 104878d7b67SJosef Bacik 105878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 106878d7b67SJosef Bacik flags |= fs_info->avail_data_alloc_bits; 107878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 108878d7b67SJosef Bacik flags |= fs_info->avail_system_alloc_bits; 109878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_METADATA) 110878d7b67SJosef Bacik flags |= fs_info->avail_metadata_alloc_bits; 111878d7b67SJosef Bacik } while (read_seqretry(&fs_info->profiles_lock, seq)); 112878d7b67SJosef Bacik 113878d7b67SJosef Bacik return btrfs_reduce_alloc_profile(fs_info, flags); 114878d7b67SJosef Bacik } 115878d7b67SJosef Bacik 11632da5386SDavid Sterba void btrfs_get_block_group(struct btrfs_block_group *cache) 1173cad1284SJosef Bacik { 11848aaeebeSJosef Bacik refcount_inc(&cache->refs); 1193cad1284SJosef Bacik } 1203cad1284SJosef Bacik 12132da5386SDavid Sterba void btrfs_put_block_group(struct btrfs_block_group *cache) 1223cad1284SJosef Bacik { 12348aaeebeSJosef Bacik if (refcount_dec_and_test(&cache->refs)) { 1243cad1284SJosef Bacik WARN_ON(cache->pinned > 0); 1253cad1284SJosef Bacik WARN_ON(cache->reserved > 0); 1263cad1284SJosef Bacik 1273cad1284SJosef Bacik /* 128b0643e59SDennis Zhou * A block_group shouldn't be on the discard_list anymore. 129b0643e59SDennis Zhou * Remove the block_group from the discard_list to prevent us 130b0643e59SDennis Zhou * from causing a panic due to NULL pointer dereference. 131b0643e59SDennis Zhou */ 132b0643e59SDennis Zhou if (WARN_ON(!list_empty(&cache->discard_list))) 133b0643e59SDennis Zhou btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 134b0643e59SDennis Zhou cache); 135b0643e59SDennis Zhou 136b0643e59SDennis Zhou /* 1373cad1284SJosef Bacik * If not empty, someone is still holding mutex of 1383cad1284SJosef Bacik * full_stripe_lock, which can only be released by caller. 1393cad1284SJosef Bacik * And it will definitely cause use-after-free when caller 1403cad1284SJosef Bacik * tries to release full stripe lock. 1413cad1284SJosef Bacik * 1423cad1284SJosef Bacik * No better way to resolve, but only to warn. 1433cad1284SJosef Bacik */ 1443cad1284SJosef Bacik WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); 1453cad1284SJosef Bacik kfree(cache->free_space_ctl); 1463cad1284SJosef Bacik kfree(cache); 1473cad1284SJosef Bacik } 1483cad1284SJosef Bacik } 1493cad1284SJosef Bacik 1502e405ad8SJosef Bacik /* 1514358d963SJosef Bacik * This adds the block group to the fs_info rb tree for the block group cache 1524358d963SJosef Bacik */ 1534358d963SJosef Bacik static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 15432da5386SDavid Sterba struct btrfs_block_group *block_group) 1554358d963SJosef Bacik { 1564358d963SJosef Bacik struct rb_node **p; 1574358d963SJosef Bacik struct rb_node *parent = NULL; 15832da5386SDavid Sterba struct btrfs_block_group *cache; 1594358d963SJosef Bacik 1609afc6649SQu Wenruo ASSERT(block_group->length != 0); 1619afc6649SQu Wenruo 1624358d963SJosef Bacik spin_lock(&info->block_group_cache_lock); 1634358d963SJosef Bacik p = &info->block_group_cache_tree.rb_node; 1644358d963SJosef Bacik 1654358d963SJosef Bacik while (*p) { 1664358d963SJosef Bacik parent = *p; 16732da5386SDavid Sterba cache = rb_entry(parent, struct btrfs_block_group, cache_node); 168b3470b5dSDavid Sterba if (block_group->start < cache->start) { 1694358d963SJosef Bacik p = &(*p)->rb_left; 170b3470b5dSDavid Sterba } else if (block_group->start > cache->start) { 1714358d963SJosef Bacik p = &(*p)->rb_right; 1724358d963SJosef Bacik } else { 1734358d963SJosef Bacik spin_unlock(&info->block_group_cache_lock); 1744358d963SJosef Bacik return -EEXIST; 1754358d963SJosef Bacik } 1764358d963SJosef Bacik } 1774358d963SJosef Bacik 1784358d963SJosef Bacik rb_link_node(&block_group->cache_node, parent, p); 1794358d963SJosef Bacik rb_insert_color(&block_group->cache_node, 1804358d963SJosef Bacik &info->block_group_cache_tree); 1814358d963SJosef Bacik 182b3470b5dSDavid Sterba if (info->first_logical_byte > block_group->start) 183b3470b5dSDavid Sterba info->first_logical_byte = block_group->start; 1844358d963SJosef Bacik 1854358d963SJosef Bacik spin_unlock(&info->block_group_cache_lock); 1864358d963SJosef Bacik 1874358d963SJosef Bacik return 0; 1884358d963SJosef Bacik } 1894358d963SJosef Bacik 1904358d963SJosef Bacik /* 1912e405ad8SJosef Bacik * This will return the block group at or after bytenr if contains is 0, else 1922e405ad8SJosef Bacik * it will return the block group that contains the bytenr 1932e405ad8SJosef Bacik */ 19432da5386SDavid Sterba static struct btrfs_block_group *block_group_cache_tree_search( 1952e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr, int contains) 1962e405ad8SJosef Bacik { 19732da5386SDavid Sterba struct btrfs_block_group *cache, *ret = NULL; 1982e405ad8SJosef Bacik struct rb_node *n; 1992e405ad8SJosef Bacik u64 end, start; 2002e405ad8SJosef Bacik 2012e405ad8SJosef Bacik spin_lock(&info->block_group_cache_lock); 2022e405ad8SJosef Bacik n = info->block_group_cache_tree.rb_node; 2032e405ad8SJosef Bacik 2042e405ad8SJosef Bacik while (n) { 20532da5386SDavid Sterba cache = rb_entry(n, struct btrfs_block_group, cache_node); 206b3470b5dSDavid Sterba end = cache->start + cache->length - 1; 207b3470b5dSDavid Sterba start = cache->start; 2082e405ad8SJosef Bacik 2092e405ad8SJosef Bacik if (bytenr < start) { 210b3470b5dSDavid Sterba if (!contains && (!ret || start < ret->start)) 2112e405ad8SJosef Bacik ret = cache; 2122e405ad8SJosef Bacik n = n->rb_left; 2132e405ad8SJosef Bacik } else if (bytenr > start) { 2142e405ad8SJosef Bacik if (contains && bytenr <= end) { 2152e405ad8SJosef Bacik ret = cache; 2162e405ad8SJosef Bacik break; 2172e405ad8SJosef Bacik } 2182e405ad8SJosef Bacik n = n->rb_right; 2192e405ad8SJosef Bacik } else { 2202e405ad8SJosef Bacik ret = cache; 2212e405ad8SJosef Bacik break; 2222e405ad8SJosef Bacik } 2232e405ad8SJosef Bacik } 2242e405ad8SJosef Bacik if (ret) { 2252e405ad8SJosef Bacik btrfs_get_block_group(ret); 226b3470b5dSDavid Sterba if (bytenr == 0 && info->first_logical_byte > ret->start) 227b3470b5dSDavid Sterba info->first_logical_byte = ret->start; 2282e405ad8SJosef Bacik } 2292e405ad8SJosef Bacik spin_unlock(&info->block_group_cache_lock); 2302e405ad8SJosef Bacik 2312e405ad8SJosef Bacik return ret; 2322e405ad8SJosef Bacik } 2332e405ad8SJosef Bacik 2342e405ad8SJosef Bacik /* 2352e405ad8SJosef Bacik * Return the block group that starts at or after bytenr 2362e405ad8SJosef Bacik */ 23732da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_first_block_group( 2382e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2392e405ad8SJosef Bacik { 2402e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 0); 2412e405ad8SJosef Bacik } 2422e405ad8SJosef Bacik 2432e405ad8SJosef Bacik /* 2442e405ad8SJosef Bacik * Return the block group that contains the given bytenr 2452e405ad8SJosef Bacik */ 24632da5386SDavid Sterba struct btrfs_block_group *btrfs_lookup_block_group( 2472e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2482e405ad8SJosef Bacik { 2492e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 1); 2502e405ad8SJosef Bacik } 2512e405ad8SJosef Bacik 25232da5386SDavid Sterba struct btrfs_block_group *btrfs_next_block_group( 25332da5386SDavid Sterba struct btrfs_block_group *cache) 2542e405ad8SJosef Bacik { 2552e405ad8SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 2562e405ad8SJosef Bacik struct rb_node *node; 2572e405ad8SJosef Bacik 2582e405ad8SJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 2592e405ad8SJosef Bacik 2602e405ad8SJosef Bacik /* If our block group was removed, we need a full search. */ 2612e405ad8SJosef Bacik if (RB_EMPTY_NODE(&cache->cache_node)) { 262b3470b5dSDavid Sterba const u64 next_bytenr = cache->start + cache->length; 2632e405ad8SJosef Bacik 2642e405ad8SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 2652e405ad8SJosef Bacik btrfs_put_block_group(cache); 2662e405ad8SJosef Bacik cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; 2672e405ad8SJosef Bacik } 2682e405ad8SJosef Bacik node = rb_next(&cache->cache_node); 2692e405ad8SJosef Bacik btrfs_put_block_group(cache); 2702e405ad8SJosef Bacik if (node) { 27132da5386SDavid Sterba cache = rb_entry(node, struct btrfs_block_group, cache_node); 2722e405ad8SJosef Bacik btrfs_get_block_group(cache); 2732e405ad8SJosef Bacik } else 2742e405ad8SJosef Bacik cache = NULL; 2752e405ad8SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 2762e405ad8SJosef Bacik return cache; 2772e405ad8SJosef Bacik } 2783eeb3226SJosef Bacik 2793eeb3226SJosef Bacik bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) 2803eeb3226SJosef Bacik { 28132da5386SDavid Sterba struct btrfs_block_group *bg; 2823eeb3226SJosef Bacik bool ret = true; 2833eeb3226SJosef Bacik 2843eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 2853eeb3226SJosef Bacik if (!bg) 2863eeb3226SJosef Bacik return false; 2873eeb3226SJosef Bacik 2883eeb3226SJosef Bacik spin_lock(&bg->lock); 2893eeb3226SJosef Bacik if (bg->ro) 2903eeb3226SJosef Bacik ret = false; 2913eeb3226SJosef Bacik else 2923eeb3226SJosef Bacik atomic_inc(&bg->nocow_writers); 2933eeb3226SJosef Bacik spin_unlock(&bg->lock); 2943eeb3226SJosef Bacik 2953eeb3226SJosef Bacik /* No put on block group, done by btrfs_dec_nocow_writers */ 2963eeb3226SJosef Bacik if (!ret) 2973eeb3226SJosef Bacik btrfs_put_block_group(bg); 2983eeb3226SJosef Bacik 2993eeb3226SJosef Bacik return ret; 3003eeb3226SJosef Bacik } 3013eeb3226SJosef Bacik 3023eeb3226SJosef Bacik void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) 3033eeb3226SJosef Bacik { 30432da5386SDavid Sterba struct btrfs_block_group *bg; 3053eeb3226SJosef Bacik 3063eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 3073eeb3226SJosef Bacik ASSERT(bg); 3083eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->nocow_writers)) 3093eeb3226SJosef Bacik wake_up_var(&bg->nocow_writers); 3103eeb3226SJosef Bacik /* 3113eeb3226SJosef Bacik * Once for our lookup and once for the lookup done by a previous call 3123eeb3226SJosef Bacik * to btrfs_inc_nocow_writers() 3133eeb3226SJosef Bacik */ 3143eeb3226SJosef Bacik btrfs_put_block_group(bg); 3153eeb3226SJosef Bacik btrfs_put_block_group(bg); 3163eeb3226SJosef Bacik } 3173eeb3226SJosef Bacik 31832da5386SDavid Sterba void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 3193eeb3226SJosef Bacik { 3203eeb3226SJosef Bacik wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 3213eeb3226SJosef Bacik } 3223eeb3226SJosef Bacik 3233eeb3226SJosef Bacik void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 3243eeb3226SJosef Bacik const u64 start) 3253eeb3226SJosef Bacik { 32632da5386SDavid Sterba struct btrfs_block_group *bg; 3273eeb3226SJosef Bacik 3283eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, start); 3293eeb3226SJosef Bacik ASSERT(bg); 3303eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->reservations)) 3313eeb3226SJosef Bacik wake_up_var(&bg->reservations); 3323eeb3226SJosef Bacik btrfs_put_block_group(bg); 3333eeb3226SJosef Bacik } 3343eeb3226SJosef Bacik 33532da5386SDavid Sterba void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 3363eeb3226SJosef Bacik { 3373eeb3226SJosef Bacik struct btrfs_space_info *space_info = bg->space_info; 3383eeb3226SJosef Bacik 3393eeb3226SJosef Bacik ASSERT(bg->ro); 3403eeb3226SJosef Bacik 3413eeb3226SJosef Bacik if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 3423eeb3226SJosef Bacik return; 3433eeb3226SJosef Bacik 3443eeb3226SJosef Bacik /* 3453eeb3226SJosef Bacik * Our block group is read only but before we set it to read only, 3463eeb3226SJosef Bacik * some task might have had allocated an extent from it already, but it 3473eeb3226SJosef Bacik * has not yet created a respective ordered extent (and added it to a 3483eeb3226SJosef Bacik * root's list of ordered extents). 3493eeb3226SJosef Bacik * Therefore wait for any task currently allocating extents, since the 3503eeb3226SJosef Bacik * block group's reservations counter is incremented while a read lock 3513eeb3226SJosef Bacik * on the groups' semaphore is held and decremented after releasing 3523eeb3226SJosef Bacik * the read access on that semaphore and creating the ordered extent. 3533eeb3226SJosef Bacik */ 3543eeb3226SJosef Bacik down_write(&space_info->groups_sem); 3553eeb3226SJosef Bacik up_write(&space_info->groups_sem); 3563eeb3226SJosef Bacik 3573eeb3226SJosef Bacik wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 3583eeb3226SJosef Bacik } 3599f21246dSJosef Bacik 3609f21246dSJosef Bacik struct btrfs_caching_control *btrfs_get_caching_control( 36132da5386SDavid Sterba struct btrfs_block_group *cache) 3629f21246dSJosef Bacik { 3639f21246dSJosef Bacik struct btrfs_caching_control *ctl; 3649f21246dSJosef Bacik 3659f21246dSJosef Bacik spin_lock(&cache->lock); 3669f21246dSJosef Bacik if (!cache->caching_ctl) { 3679f21246dSJosef Bacik spin_unlock(&cache->lock); 3689f21246dSJosef Bacik return NULL; 3699f21246dSJosef Bacik } 3709f21246dSJosef Bacik 3719f21246dSJosef Bacik ctl = cache->caching_ctl; 3729f21246dSJosef Bacik refcount_inc(&ctl->count); 3739f21246dSJosef Bacik spin_unlock(&cache->lock); 3749f21246dSJosef Bacik return ctl; 3759f21246dSJosef Bacik } 3769f21246dSJosef Bacik 3779f21246dSJosef Bacik void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 3789f21246dSJosef Bacik { 3799f21246dSJosef Bacik if (refcount_dec_and_test(&ctl->count)) 3809f21246dSJosef Bacik kfree(ctl); 3819f21246dSJosef Bacik } 3829f21246dSJosef Bacik 3839f21246dSJosef Bacik /* 3849f21246dSJosef Bacik * When we wait for progress in the block group caching, its because our 3859f21246dSJosef Bacik * allocation attempt failed at least once. So, we must sleep and let some 3869f21246dSJosef Bacik * progress happen before we try again. 3879f21246dSJosef Bacik * 3889f21246dSJosef Bacik * This function will sleep at least once waiting for new free space to show 3899f21246dSJosef Bacik * up, and then it will check the block group free space numbers for our min 3909f21246dSJosef Bacik * num_bytes. Another option is to have it go ahead and look in the rbtree for 3919f21246dSJosef Bacik * a free extent of a given size, but this is a good start. 3929f21246dSJosef Bacik * 3939f21246dSJosef Bacik * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 3949f21246dSJosef Bacik * any of the information in this block group. 3959f21246dSJosef Bacik */ 39632da5386SDavid Sterba void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 3979f21246dSJosef Bacik u64 num_bytes) 3989f21246dSJosef Bacik { 3999f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 4009f21246dSJosef Bacik 4019f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4029f21246dSJosef Bacik if (!caching_ctl) 4039f21246dSJosef Bacik return; 4049f21246dSJosef Bacik 40532da5386SDavid Sterba wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 4069f21246dSJosef Bacik (cache->free_space_ctl->free_space >= num_bytes)); 4079f21246dSJosef Bacik 4089f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4099f21246dSJosef Bacik } 4109f21246dSJosef Bacik 41132da5386SDavid Sterba int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 4129f21246dSJosef Bacik { 4139f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 4149f21246dSJosef Bacik int ret = 0; 4159f21246dSJosef Bacik 4169f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4179f21246dSJosef Bacik if (!caching_ctl) 4189f21246dSJosef Bacik return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 4199f21246dSJosef Bacik 42032da5386SDavid Sterba wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 4219f21246dSJosef Bacik if (cache->cached == BTRFS_CACHE_ERROR) 4229f21246dSJosef Bacik ret = -EIO; 4239f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4249f21246dSJosef Bacik return ret; 4259f21246dSJosef Bacik } 4269f21246dSJosef Bacik 4279f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 42832da5386SDavid Sterba static void fragment_free_space(struct btrfs_block_group *block_group) 4299f21246dSJosef Bacik { 4309f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 431b3470b5dSDavid Sterba u64 start = block_group->start; 432b3470b5dSDavid Sterba u64 len = block_group->length; 4339f21246dSJosef Bacik u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 4349f21246dSJosef Bacik fs_info->nodesize : fs_info->sectorsize; 4359f21246dSJosef Bacik u64 step = chunk << 1; 4369f21246dSJosef Bacik 4379f21246dSJosef Bacik while (len > chunk) { 4389f21246dSJosef Bacik btrfs_remove_free_space(block_group, start, chunk); 4399f21246dSJosef Bacik start += step; 4409f21246dSJosef Bacik if (len < step) 4419f21246dSJosef Bacik len = 0; 4429f21246dSJosef Bacik else 4439f21246dSJosef Bacik len -= step; 4449f21246dSJosef Bacik } 4459f21246dSJosef Bacik } 4469f21246dSJosef Bacik #endif 4479f21246dSJosef Bacik 4489f21246dSJosef Bacik /* 4499f21246dSJosef Bacik * This is only called by btrfs_cache_block_group, since we could have freed 4509f21246dSJosef Bacik * extents we need to check the pinned_extents for any extents that can't be 4519f21246dSJosef Bacik * used yet since their free space will be released as soon as the transaction 4529f21246dSJosef Bacik * commits. 4539f21246dSJosef Bacik */ 45432da5386SDavid Sterba u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) 4559f21246dSJosef Bacik { 4569f21246dSJosef Bacik struct btrfs_fs_info *info = block_group->fs_info; 4579f21246dSJosef Bacik u64 extent_start, extent_end, size, total_added = 0; 4589f21246dSJosef Bacik int ret; 4599f21246dSJosef Bacik 4609f21246dSJosef Bacik while (start < end) { 461fe119a6eSNikolay Borisov ret = find_first_extent_bit(&info->excluded_extents, start, 4629f21246dSJosef Bacik &extent_start, &extent_end, 4639f21246dSJosef Bacik EXTENT_DIRTY | EXTENT_UPTODATE, 4649f21246dSJosef Bacik NULL); 4659f21246dSJosef Bacik if (ret) 4669f21246dSJosef Bacik break; 4679f21246dSJosef Bacik 4689f21246dSJosef Bacik if (extent_start <= start) { 4699f21246dSJosef Bacik start = extent_end + 1; 4709f21246dSJosef Bacik } else if (extent_start > start && extent_start < end) { 4719f21246dSJosef Bacik size = extent_start - start; 4729f21246dSJosef Bacik total_added += size; 473b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, 474b0643e59SDennis Zhou start, size); 4759f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 4769f21246dSJosef Bacik start = extent_end + 1; 4779f21246dSJosef Bacik } else { 4789f21246dSJosef Bacik break; 4799f21246dSJosef Bacik } 4809f21246dSJosef Bacik } 4819f21246dSJosef Bacik 4829f21246dSJosef Bacik if (start < end) { 4839f21246dSJosef Bacik size = end - start; 4849f21246dSJosef Bacik total_added += size; 485b0643e59SDennis Zhou ret = btrfs_add_free_space_async_trimmed(block_group, start, 486b0643e59SDennis Zhou size); 4879f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 4889f21246dSJosef Bacik } 4899f21246dSJosef Bacik 4909f21246dSJosef Bacik return total_added; 4919f21246dSJosef Bacik } 4929f21246dSJosef Bacik 4939f21246dSJosef Bacik static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 4949f21246dSJosef Bacik { 49532da5386SDavid Sterba struct btrfs_block_group *block_group = caching_ctl->block_group; 4969f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 4979f21246dSJosef Bacik struct btrfs_root *extent_root = fs_info->extent_root; 4989f21246dSJosef Bacik struct btrfs_path *path; 4999f21246dSJosef Bacik struct extent_buffer *leaf; 5009f21246dSJosef Bacik struct btrfs_key key; 5019f21246dSJosef Bacik u64 total_found = 0; 5029f21246dSJosef Bacik u64 last = 0; 5039f21246dSJosef Bacik u32 nritems; 5049f21246dSJosef Bacik int ret; 5059f21246dSJosef Bacik bool wakeup = true; 5069f21246dSJosef Bacik 5079f21246dSJosef Bacik path = btrfs_alloc_path(); 5089f21246dSJosef Bacik if (!path) 5099f21246dSJosef Bacik return -ENOMEM; 5109f21246dSJosef Bacik 511b3470b5dSDavid Sterba last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 5129f21246dSJosef Bacik 5139f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 5149f21246dSJosef Bacik /* 5159f21246dSJosef Bacik * If we're fragmenting we don't want to make anybody think we can 5169f21246dSJosef Bacik * allocate from this block group until we've had a chance to fragment 5179f21246dSJosef Bacik * the free space. 5189f21246dSJosef Bacik */ 5199f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) 5209f21246dSJosef Bacik wakeup = false; 5219f21246dSJosef Bacik #endif 5229f21246dSJosef Bacik /* 5239f21246dSJosef Bacik * We don't want to deadlock with somebody trying to allocate a new 5249f21246dSJosef Bacik * extent for the extent root while also trying to search the extent 5259f21246dSJosef Bacik * root to add free space. So we skip locking and search the commit 5269f21246dSJosef Bacik * root, since its read-only 5279f21246dSJosef Bacik */ 5289f21246dSJosef Bacik path->skip_locking = 1; 5299f21246dSJosef Bacik path->search_commit_root = 1; 5309f21246dSJosef Bacik path->reada = READA_FORWARD; 5319f21246dSJosef Bacik 5329f21246dSJosef Bacik key.objectid = last; 5339f21246dSJosef Bacik key.offset = 0; 5349f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 5359f21246dSJosef Bacik 5369f21246dSJosef Bacik next: 5379f21246dSJosef Bacik ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 5389f21246dSJosef Bacik if (ret < 0) 5399f21246dSJosef Bacik goto out; 5409f21246dSJosef Bacik 5419f21246dSJosef Bacik leaf = path->nodes[0]; 5429f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 5439f21246dSJosef Bacik 5449f21246dSJosef Bacik while (1) { 5459f21246dSJosef Bacik if (btrfs_fs_closing(fs_info) > 1) { 5469f21246dSJosef Bacik last = (u64)-1; 5479f21246dSJosef Bacik break; 5489f21246dSJosef Bacik } 5499f21246dSJosef Bacik 5509f21246dSJosef Bacik if (path->slots[0] < nritems) { 5519f21246dSJosef Bacik btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5529f21246dSJosef Bacik } else { 5539f21246dSJosef Bacik ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 5549f21246dSJosef Bacik if (ret) 5559f21246dSJosef Bacik break; 5569f21246dSJosef Bacik 5579f21246dSJosef Bacik if (need_resched() || 5589f21246dSJosef Bacik rwsem_is_contended(&fs_info->commit_root_sem)) { 5599f21246dSJosef Bacik if (wakeup) 5609f21246dSJosef Bacik caching_ctl->progress = last; 5619f21246dSJosef Bacik btrfs_release_path(path); 5629f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 5639f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 5649f21246dSJosef Bacik cond_resched(); 5659f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 5669f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 5679f21246dSJosef Bacik goto next; 5689f21246dSJosef Bacik } 5699f21246dSJosef Bacik 5709f21246dSJosef Bacik ret = btrfs_next_leaf(extent_root, path); 5719f21246dSJosef Bacik if (ret < 0) 5729f21246dSJosef Bacik goto out; 5739f21246dSJosef Bacik if (ret) 5749f21246dSJosef Bacik break; 5759f21246dSJosef Bacik leaf = path->nodes[0]; 5769f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 5779f21246dSJosef Bacik continue; 5789f21246dSJosef Bacik } 5799f21246dSJosef Bacik 5809f21246dSJosef Bacik if (key.objectid < last) { 5819f21246dSJosef Bacik key.objectid = last; 5829f21246dSJosef Bacik key.offset = 0; 5839f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 5849f21246dSJosef Bacik 5859f21246dSJosef Bacik if (wakeup) 5869f21246dSJosef Bacik caching_ctl->progress = last; 5879f21246dSJosef Bacik btrfs_release_path(path); 5889f21246dSJosef Bacik goto next; 5899f21246dSJosef Bacik } 5909f21246dSJosef Bacik 591b3470b5dSDavid Sterba if (key.objectid < block_group->start) { 5929f21246dSJosef Bacik path->slots[0]++; 5939f21246dSJosef Bacik continue; 5949f21246dSJosef Bacik } 5959f21246dSJosef Bacik 596b3470b5dSDavid Sterba if (key.objectid >= block_group->start + block_group->length) 5979f21246dSJosef Bacik break; 5989f21246dSJosef Bacik 5999f21246dSJosef Bacik if (key.type == BTRFS_EXTENT_ITEM_KEY || 6009f21246dSJosef Bacik key.type == BTRFS_METADATA_ITEM_KEY) { 6019f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 6029f21246dSJosef Bacik key.objectid); 6039f21246dSJosef Bacik if (key.type == BTRFS_METADATA_ITEM_KEY) 6049f21246dSJosef Bacik last = key.objectid + 6059f21246dSJosef Bacik fs_info->nodesize; 6069f21246dSJosef Bacik else 6079f21246dSJosef Bacik last = key.objectid + key.offset; 6089f21246dSJosef Bacik 6099f21246dSJosef Bacik if (total_found > CACHING_CTL_WAKE_UP) { 6109f21246dSJosef Bacik total_found = 0; 6119f21246dSJosef Bacik if (wakeup) 6129f21246dSJosef Bacik wake_up(&caching_ctl->wait); 6139f21246dSJosef Bacik } 6149f21246dSJosef Bacik } 6159f21246dSJosef Bacik path->slots[0]++; 6169f21246dSJosef Bacik } 6179f21246dSJosef Bacik ret = 0; 6189f21246dSJosef Bacik 6199f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 620b3470b5dSDavid Sterba block_group->start + block_group->length); 6219f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 6229f21246dSJosef Bacik 6239f21246dSJosef Bacik out: 6249f21246dSJosef Bacik btrfs_free_path(path); 6259f21246dSJosef Bacik return ret; 6269f21246dSJosef Bacik } 6279f21246dSJosef Bacik 6289f21246dSJosef Bacik static noinline void caching_thread(struct btrfs_work *work) 6299f21246dSJosef Bacik { 63032da5386SDavid Sterba struct btrfs_block_group *block_group; 6319f21246dSJosef Bacik struct btrfs_fs_info *fs_info; 6329f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 6339f21246dSJosef Bacik int ret; 6349f21246dSJosef Bacik 6359f21246dSJosef Bacik caching_ctl = container_of(work, struct btrfs_caching_control, work); 6369f21246dSJosef Bacik block_group = caching_ctl->block_group; 6379f21246dSJosef Bacik fs_info = block_group->fs_info; 6389f21246dSJosef Bacik 6399f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 6409f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 6419f21246dSJosef Bacik 6429f21246dSJosef Bacik if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 6439f21246dSJosef Bacik ret = load_free_space_tree(caching_ctl); 6449f21246dSJosef Bacik else 6459f21246dSJosef Bacik ret = load_extent_tree_free(caching_ctl); 6469f21246dSJosef Bacik 6479f21246dSJosef Bacik spin_lock(&block_group->lock); 6489f21246dSJosef Bacik block_group->caching_ctl = NULL; 6499f21246dSJosef Bacik block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 6509f21246dSJosef Bacik spin_unlock(&block_group->lock); 6519f21246dSJosef Bacik 6529f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 6539f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) { 6549f21246dSJosef Bacik u64 bytes_used; 6559f21246dSJosef Bacik 6569f21246dSJosef Bacik spin_lock(&block_group->space_info->lock); 6579f21246dSJosef Bacik spin_lock(&block_group->lock); 658b3470b5dSDavid Sterba bytes_used = block_group->length - block_group->used; 6599f21246dSJosef Bacik block_group->space_info->bytes_used += bytes_used >> 1; 6609f21246dSJosef Bacik spin_unlock(&block_group->lock); 6619f21246dSJosef Bacik spin_unlock(&block_group->space_info->lock); 662e11c0406SJosef Bacik fragment_free_space(block_group); 6639f21246dSJosef Bacik } 6649f21246dSJosef Bacik #endif 6659f21246dSJosef Bacik 6669f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 6679f21246dSJosef Bacik 6689f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 6699f21246dSJosef Bacik btrfs_free_excluded_extents(block_group); 6709f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 6719f21246dSJosef Bacik 6729f21246dSJosef Bacik wake_up(&caching_ctl->wait); 6739f21246dSJosef Bacik 6749f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 6759f21246dSJosef Bacik btrfs_put_block_group(block_group); 6769f21246dSJosef Bacik } 6779f21246dSJosef Bacik 67832da5386SDavid Sterba int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only) 6799f21246dSJosef Bacik { 6809f21246dSJosef Bacik DEFINE_WAIT(wait); 6819f21246dSJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 6829f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 6839f21246dSJosef Bacik int ret = 0; 6849f21246dSJosef Bacik 6859f21246dSJosef Bacik caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 6869f21246dSJosef Bacik if (!caching_ctl) 6879f21246dSJosef Bacik return -ENOMEM; 6889f21246dSJosef Bacik 6899f21246dSJosef Bacik INIT_LIST_HEAD(&caching_ctl->list); 6909f21246dSJosef Bacik mutex_init(&caching_ctl->mutex); 6919f21246dSJosef Bacik init_waitqueue_head(&caching_ctl->wait); 6929f21246dSJosef Bacik caching_ctl->block_group = cache; 693b3470b5dSDavid Sterba caching_ctl->progress = cache->start; 6949f21246dSJosef Bacik refcount_set(&caching_ctl->count, 1); 695a0cac0ecSOmar Sandoval btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); 6969f21246dSJosef Bacik 6979f21246dSJosef Bacik spin_lock(&cache->lock); 6989f21246dSJosef Bacik /* 6999f21246dSJosef Bacik * This should be a rare occasion, but this could happen I think in the 7009f21246dSJosef Bacik * case where one thread starts to load the space cache info, and then 7019f21246dSJosef Bacik * some other thread starts a transaction commit which tries to do an 7029f21246dSJosef Bacik * allocation while the other thread is still loading the space cache 7039f21246dSJosef Bacik * info. The previous loop should have kept us from choosing this block 7049f21246dSJosef Bacik * group, but if we've moved to the state where we will wait on caching 7059f21246dSJosef Bacik * block groups we need to first check if we're doing a fast load here, 7069f21246dSJosef Bacik * so we can wait for it to finish, otherwise we could end up allocating 7079f21246dSJosef Bacik * from a block group who's cache gets evicted for one reason or 7089f21246dSJosef Bacik * another. 7099f21246dSJosef Bacik */ 7109f21246dSJosef Bacik while (cache->cached == BTRFS_CACHE_FAST) { 7119f21246dSJosef Bacik struct btrfs_caching_control *ctl; 7129f21246dSJosef Bacik 7139f21246dSJosef Bacik ctl = cache->caching_ctl; 7149f21246dSJosef Bacik refcount_inc(&ctl->count); 7159f21246dSJosef Bacik prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); 7169f21246dSJosef Bacik spin_unlock(&cache->lock); 7179f21246dSJosef Bacik 7189f21246dSJosef Bacik schedule(); 7199f21246dSJosef Bacik 7209f21246dSJosef Bacik finish_wait(&ctl->wait, &wait); 7219f21246dSJosef Bacik btrfs_put_caching_control(ctl); 7229f21246dSJosef Bacik spin_lock(&cache->lock); 7239f21246dSJosef Bacik } 7249f21246dSJosef Bacik 7259f21246dSJosef Bacik if (cache->cached != BTRFS_CACHE_NO) { 7269f21246dSJosef Bacik spin_unlock(&cache->lock); 7279f21246dSJosef Bacik kfree(caching_ctl); 7289f21246dSJosef Bacik return 0; 7299f21246dSJosef Bacik } 7309f21246dSJosef Bacik WARN_ON(cache->caching_ctl); 7319f21246dSJosef Bacik cache->caching_ctl = caching_ctl; 7329f21246dSJosef Bacik cache->cached = BTRFS_CACHE_FAST; 7339f21246dSJosef Bacik spin_unlock(&cache->lock); 7349f21246dSJosef Bacik 7359f21246dSJosef Bacik if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 7369f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 7379f21246dSJosef Bacik ret = load_free_space_cache(cache); 7389f21246dSJosef Bacik 7399f21246dSJosef Bacik spin_lock(&cache->lock); 7409f21246dSJosef Bacik if (ret == 1) { 7419f21246dSJosef Bacik cache->caching_ctl = NULL; 7429f21246dSJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 7439f21246dSJosef Bacik cache->last_byte_to_unpin = (u64)-1; 7449f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 7459f21246dSJosef Bacik } else { 7469f21246dSJosef Bacik if (load_cache_only) { 7479f21246dSJosef Bacik cache->caching_ctl = NULL; 7489f21246dSJosef Bacik cache->cached = BTRFS_CACHE_NO; 7499f21246dSJosef Bacik } else { 7509f21246dSJosef Bacik cache->cached = BTRFS_CACHE_STARTED; 7519f21246dSJosef Bacik cache->has_caching_ctl = 1; 7529f21246dSJosef Bacik } 7539f21246dSJosef Bacik } 7549f21246dSJosef Bacik spin_unlock(&cache->lock); 7559f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 7569f21246dSJosef Bacik if (ret == 1 && 7579f21246dSJosef Bacik btrfs_should_fragment_free_space(cache)) { 7589f21246dSJosef Bacik u64 bytes_used; 7599f21246dSJosef Bacik 7609f21246dSJosef Bacik spin_lock(&cache->space_info->lock); 7619f21246dSJosef Bacik spin_lock(&cache->lock); 762b3470b5dSDavid Sterba bytes_used = cache->length - cache->used; 7639f21246dSJosef Bacik cache->space_info->bytes_used += bytes_used >> 1; 7649f21246dSJosef Bacik spin_unlock(&cache->lock); 7659f21246dSJosef Bacik spin_unlock(&cache->space_info->lock); 766e11c0406SJosef Bacik fragment_free_space(cache); 7679f21246dSJosef Bacik } 7689f21246dSJosef Bacik #endif 7699f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 7709f21246dSJosef Bacik 7719f21246dSJosef Bacik wake_up(&caching_ctl->wait); 7729f21246dSJosef Bacik if (ret == 1) { 7739f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 7749f21246dSJosef Bacik btrfs_free_excluded_extents(cache); 7759f21246dSJosef Bacik return 0; 7769f21246dSJosef Bacik } 7779f21246dSJosef Bacik } else { 7789f21246dSJosef Bacik /* 7799f21246dSJosef Bacik * We're either using the free space tree or no caching at all. 7809f21246dSJosef Bacik * Set cached to the appropriate value and wakeup any waiters. 7819f21246dSJosef Bacik */ 7829f21246dSJosef Bacik spin_lock(&cache->lock); 7839f21246dSJosef Bacik if (load_cache_only) { 7849f21246dSJosef Bacik cache->caching_ctl = NULL; 7859f21246dSJosef Bacik cache->cached = BTRFS_CACHE_NO; 7869f21246dSJosef Bacik } else { 7879f21246dSJosef Bacik cache->cached = BTRFS_CACHE_STARTED; 7889f21246dSJosef Bacik cache->has_caching_ctl = 1; 7899f21246dSJosef Bacik } 7909f21246dSJosef Bacik spin_unlock(&cache->lock); 7919f21246dSJosef Bacik wake_up(&caching_ctl->wait); 7929f21246dSJosef Bacik } 7939f21246dSJosef Bacik 7949f21246dSJosef Bacik if (load_cache_only) { 7959f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 7969f21246dSJosef Bacik return 0; 7979f21246dSJosef Bacik } 7989f21246dSJosef Bacik 7999f21246dSJosef Bacik down_write(&fs_info->commit_root_sem); 8009f21246dSJosef Bacik refcount_inc(&caching_ctl->count); 8019f21246dSJosef Bacik list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 8029f21246dSJosef Bacik up_write(&fs_info->commit_root_sem); 8039f21246dSJosef Bacik 8049f21246dSJosef Bacik btrfs_get_block_group(cache); 8059f21246dSJosef Bacik 8069f21246dSJosef Bacik btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 8079f21246dSJosef Bacik 8089f21246dSJosef Bacik return ret; 8099f21246dSJosef Bacik } 810e3e0520bSJosef Bacik 811e3e0520bSJosef Bacik static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 812e3e0520bSJosef Bacik { 813e3e0520bSJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 814e3e0520bSJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 815e3e0520bSJosef Bacik 816e3e0520bSJosef Bacik write_seqlock(&fs_info->profiles_lock); 817e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 818e3e0520bSJosef Bacik fs_info->avail_data_alloc_bits &= ~extra_flags; 819e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 820e3e0520bSJosef Bacik fs_info->avail_metadata_alloc_bits &= ~extra_flags; 821e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 822e3e0520bSJosef Bacik fs_info->avail_system_alloc_bits &= ~extra_flags; 823e3e0520bSJosef Bacik write_sequnlock(&fs_info->profiles_lock); 824e3e0520bSJosef Bacik } 825e3e0520bSJosef Bacik 826e3e0520bSJosef Bacik /* 827e3e0520bSJosef Bacik * Clear incompat bits for the following feature(s): 828e3e0520bSJosef Bacik * 829e3e0520bSJosef Bacik * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 830e3e0520bSJosef Bacik * in the whole filesystem 8319c907446SDavid Sterba * 8329c907446SDavid Sterba * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 833e3e0520bSJosef Bacik */ 834e3e0520bSJosef Bacik static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 835e3e0520bSJosef Bacik { 8369c907446SDavid Sterba bool found_raid56 = false; 8379c907446SDavid Sterba bool found_raid1c34 = false; 8389c907446SDavid Sterba 8399c907446SDavid Sterba if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 8409c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 8419c907446SDavid Sterba (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 842e3e0520bSJosef Bacik struct list_head *head = &fs_info->space_info; 843e3e0520bSJosef Bacik struct btrfs_space_info *sinfo; 844e3e0520bSJosef Bacik 845e3e0520bSJosef Bacik list_for_each_entry_rcu(sinfo, head, list) { 846e3e0520bSJosef Bacik down_read(&sinfo->groups_sem); 847e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 8489c907446SDavid Sterba found_raid56 = true; 849e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 8509c907446SDavid Sterba found_raid56 = true; 8519c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 8529c907446SDavid Sterba found_raid1c34 = true; 8539c907446SDavid Sterba if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 8549c907446SDavid Sterba found_raid1c34 = true; 855e3e0520bSJosef Bacik up_read(&sinfo->groups_sem); 856e3e0520bSJosef Bacik } 857d8e6fd5cSFilipe Manana if (!found_raid56) 858e3e0520bSJosef Bacik btrfs_clear_fs_incompat(fs_info, RAID56); 859d8e6fd5cSFilipe Manana if (!found_raid1c34) 8609c907446SDavid Sterba btrfs_clear_fs_incompat(fs_info, RAID1C34); 861e3e0520bSJosef Bacik } 862e3e0520bSJosef Bacik } 863e3e0520bSJosef Bacik 8647357623aSQu Wenruo static int remove_block_group_item(struct btrfs_trans_handle *trans, 8657357623aSQu Wenruo struct btrfs_path *path, 8667357623aSQu Wenruo struct btrfs_block_group *block_group) 8677357623aSQu Wenruo { 8687357623aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 8697357623aSQu Wenruo struct btrfs_root *root; 8707357623aSQu Wenruo struct btrfs_key key; 8717357623aSQu Wenruo int ret; 8727357623aSQu Wenruo 8737357623aSQu Wenruo root = fs_info->extent_root; 8747357623aSQu Wenruo key.objectid = block_group->start; 8757357623aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 8767357623aSQu Wenruo key.offset = block_group->length; 8777357623aSQu Wenruo 8787357623aSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 8797357623aSQu Wenruo if (ret > 0) 8807357623aSQu Wenruo ret = -ENOENT; 8817357623aSQu Wenruo if (ret < 0) 8827357623aSQu Wenruo return ret; 8837357623aSQu Wenruo 8847357623aSQu Wenruo ret = btrfs_del_item(trans, root, path); 8857357623aSQu Wenruo return ret; 8867357623aSQu Wenruo } 8877357623aSQu Wenruo 888e3e0520bSJosef Bacik int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 889e3e0520bSJosef Bacik u64 group_start, struct extent_map *em) 890e3e0520bSJosef Bacik { 891e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 892e3e0520bSJosef Bacik struct btrfs_path *path; 89332da5386SDavid Sterba struct btrfs_block_group *block_group; 894e3e0520bSJosef Bacik struct btrfs_free_cluster *cluster; 895e3e0520bSJosef Bacik struct btrfs_root *tree_root = fs_info->tree_root; 896e3e0520bSJosef Bacik struct btrfs_key key; 897e3e0520bSJosef Bacik struct inode *inode; 898e3e0520bSJosef Bacik struct kobject *kobj = NULL; 899e3e0520bSJosef Bacik int ret; 900e3e0520bSJosef Bacik int index; 901e3e0520bSJosef Bacik int factor; 902e3e0520bSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 903e3e0520bSJosef Bacik bool remove_em; 904e3e0520bSJosef Bacik bool remove_rsv = false; 905e3e0520bSJosef Bacik 906e3e0520bSJosef Bacik block_group = btrfs_lookup_block_group(fs_info, group_start); 907e3e0520bSJosef Bacik BUG_ON(!block_group); 908e3e0520bSJosef Bacik BUG_ON(!block_group->ro); 909e3e0520bSJosef Bacik 910e3e0520bSJosef Bacik trace_btrfs_remove_block_group(block_group); 911e3e0520bSJosef Bacik /* 912e3e0520bSJosef Bacik * Free the reserved super bytes from this block group before 913e3e0520bSJosef Bacik * remove it. 914e3e0520bSJosef Bacik */ 915e3e0520bSJosef Bacik btrfs_free_excluded_extents(block_group); 916b3470b5dSDavid Sterba btrfs_free_ref_tree_range(fs_info, block_group->start, 917b3470b5dSDavid Sterba block_group->length); 918e3e0520bSJosef Bacik 919e3e0520bSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 920e3e0520bSJosef Bacik factor = btrfs_bg_type_to_factor(block_group->flags); 921e3e0520bSJosef Bacik 922e3e0520bSJosef Bacik /* make sure this block group isn't part of an allocation cluster */ 923e3e0520bSJosef Bacik cluster = &fs_info->data_alloc_cluster; 924e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 925e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 926e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 927e3e0520bSJosef Bacik 928e3e0520bSJosef Bacik /* 929e3e0520bSJosef Bacik * make sure this block group isn't part of a metadata 930e3e0520bSJosef Bacik * allocation cluster 931e3e0520bSJosef Bacik */ 932e3e0520bSJosef Bacik cluster = &fs_info->meta_alloc_cluster; 933e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 934e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 935e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 936e3e0520bSJosef Bacik 937e3e0520bSJosef Bacik path = btrfs_alloc_path(); 938e3e0520bSJosef Bacik if (!path) { 939e3e0520bSJosef Bacik ret = -ENOMEM; 9409fecd132SFilipe Manana goto out; 941e3e0520bSJosef Bacik } 942e3e0520bSJosef Bacik 943e3e0520bSJosef Bacik /* 944e3e0520bSJosef Bacik * get the inode first so any iput calls done for the io_list 945e3e0520bSJosef Bacik * aren't the final iput (no unlinks allowed now) 946e3e0520bSJosef Bacik */ 947e3e0520bSJosef Bacik inode = lookup_free_space_inode(block_group, path); 948e3e0520bSJosef Bacik 949e3e0520bSJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 950e3e0520bSJosef Bacik /* 951e3e0520bSJosef Bacik * Make sure our free space cache IO is done before removing the 952e3e0520bSJosef Bacik * free space inode 953e3e0520bSJosef Bacik */ 954e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 955e3e0520bSJosef Bacik if (!list_empty(&block_group->io_list)) { 956e3e0520bSJosef Bacik list_del_init(&block_group->io_list); 957e3e0520bSJosef Bacik 958e3e0520bSJosef Bacik WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 959e3e0520bSJosef Bacik 960e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 961e3e0520bSJosef Bacik btrfs_wait_cache_io(trans, block_group, path); 962e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 963e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 964e3e0520bSJosef Bacik } 965e3e0520bSJosef Bacik 966e3e0520bSJosef Bacik if (!list_empty(&block_group->dirty_list)) { 967e3e0520bSJosef Bacik list_del_init(&block_group->dirty_list); 968e3e0520bSJosef Bacik remove_rsv = true; 969e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 970e3e0520bSJosef Bacik } 971e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 972e3e0520bSJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 973e3e0520bSJosef Bacik 974e3e0520bSJosef Bacik if (!IS_ERR(inode)) { 975e3e0520bSJosef Bacik ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 976e3e0520bSJosef Bacik if (ret) { 977e3e0520bSJosef Bacik btrfs_add_delayed_iput(inode); 9789fecd132SFilipe Manana goto out; 979e3e0520bSJosef Bacik } 980e3e0520bSJosef Bacik clear_nlink(inode); 981e3e0520bSJosef Bacik /* One for the block groups ref */ 982e3e0520bSJosef Bacik spin_lock(&block_group->lock); 983e3e0520bSJosef Bacik if (block_group->iref) { 984e3e0520bSJosef Bacik block_group->iref = 0; 985e3e0520bSJosef Bacik block_group->inode = NULL; 986e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 987e3e0520bSJosef Bacik iput(inode); 988e3e0520bSJosef Bacik } else { 989e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 990e3e0520bSJosef Bacik } 991e3e0520bSJosef Bacik /* One for our lookup ref */ 992e3e0520bSJosef Bacik btrfs_add_delayed_iput(inode); 993e3e0520bSJosef Bacik } 994e3e0520bSJosef Bacik 995e3e0520bSJosef Bacik key.objectid = BTRFS_FREE_SPACE_OBJECTID; 996e3e0520bSJosef Bacik key.type = 0; 997b3470b5dSDavid Sterba key.offset = block_group->start; 998e3e0520bSJosef Bacik 999e3e0520bSJosef Bacik ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); 1000e3e0520bSJosef Bacik if (ret < 0) 10019fecd132SFilipe Manana goto out; 1002e3e0520bSJosef Bacik if (ret > 0) 1003e3e0520bSJosef Bacik btrfs_release_path(path); 1004e3e0520bSJosef Bacik if (ret == 0) { 1005e3e0520bSJosef Bacik ret = btrfs_del_item(trans, tree_root, path); 1006e3e0520bSJosef Bacik if (ret) 10079fecd132SFilipe Manana goto out; 1008e3e0520bSJosef Bacik btrfs_release_path(path); 1009e3e0520bSJosef Bacik } 1010e3e0520bSJosef Bacik 1011e3e0520bSJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 1012e3e0520bSJosef Bacik rb_erase(&block_group->cache_node, 1013e3e0520bSJosef Bacik &fs_info->block_group_cache_tree); 1014e3e0520bSJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 1015e3e0520bSJosef Bacik 10169fecd132SFilipe Manana /* Once for the block groups rbtree */ 10179fecd132SFilipe Manana btrfs_put_block_group(block_group); 10189fecd132SFilipe Manana 1019b3470b5dSDavid Sterba if (fs_info->first_logical_byte == block_group->start) 1020e3e0520bSJosef Bacik fs_info->first_logical_byte = (u64)-1; 1021e3e0520bSJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 1022e3e0520bSJosef Bacik 1023e3e0520bSJosef Bacik down_write(&block_group->space_info->groups_sem); 1024e3e0520bSJosef Bacik /* 1025e3e0520bSJosef Bacik * we must use list_del_init so people can check to see if they 1026e3e0520bSJosef Bacik * are still on the list after taking the semaphore 1027e3e0520bSJosef Bacik */ 1028e3e0520bSJosef Bacik list_del_init(&block_group->list); 1029e3e0520bSJosef Bacik if (list_empty(&block_group->space_info->block_groups[index])) { 1030e3e0520bSJosef Bacik kobj = block_group->space_info->block_group_kobjs[index]; 1031e3e0520bSJosef Bacik block_group->space_info->block_group_kobjs[index] = NULL; 1032e3e0520bSJosef Bacik clear_avail_alloc_bits(fs_info, block_group->flags); 1033e3e0520bSJosef Bacik } 1034e3e0520bSJosef Bacik up_write(&block_group->space_info->groups_sem); 1035e3e0520bSJosef Bacik clear_incompat_bg_bits(fs_info, block_group->flags); 1036e3e0520bSJosef Bacik if (kobj) { 1037e3e0520bSJosef Bacik kobject_del(kobj); 1038e3e0520bSJosef Bacik kobject_put(kobj); 1039e3e0520bSJosef Bacik } 1040e3e0520bSJosef Bacik 1041e3e0520bSJosef Bacik if (block_group->has_caching_ctl) 1042e3e0520bSJosef Bacik caching_ctl = btrfs_get_caching_control(block_group); 1043e3e0520bSJosef Bacik if (block_group->cached == BTRFS_CACHE_STARTED) 1044e3e0520bSJosef Bacik btrfs_wait_block_group_cache_done(block_group); 1045e3e0520bSJosef Bacik if (block_group->has_caching_ctl) { 1046e3e0520bSJosef Bacik down_write(&fs_info->commit_root_sem); 1047e3e0520bSJosef Bacik if (!caching_ctl) { 1048e3e0520bSJosef Bacik struct btrfs_caching_control *ctl; 1049e3e0520bSJosef Bacik 1050e3e0520bSJosef Bacik list_for_each_entry(ctl, 1051e3e0520bSJosef Bacik &fs_info->caching_block_groups, list) 1052e3e0520bSJosef Bacik if (ctl->block_group == block_group) { 1053e3e0520bSJosef Bacik caching_ctl = ctl; 1054e3e0520bSJosef Bacik refcount_inc(&caching_ctl->count); 1055e3e0520bSJosef Bacik break; 1056e3e0520bSJosef Bacik } 1057e3e0520bSJosef Bacik } 1058e3e0520bSJosef Bacik if (caching_ctl) 1059e3e0520bSJosef Bacik list_del_init(&caching_ctl->list); 1060e3e0520bSJosef Bacik up_write(&fs_info->commit_root_sem); 1061e3e0520bSJosef Bacik if (caching_ctl) { 1062e3e0520bSJosef Bacik /* Once for the caching bgs list and once for us. */ 1063e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1064e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1065e3e0520bSJosef Bacik } 1066e3e0520bSJosef Bacik } 1067e3e0520bSJosef Bacik 1068e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 1069e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->dirty_list)); 1070e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->io_list)); 1071e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 1072e3e0520bSJosef Bacik 1073e3e0520bSJosef Bacik btrfs_remove_free_space_cache(block_group); 1074e3e0520bSJosef Bacik 1075e3e0520bSJosef Bacik spin_lock(&block_group->space_info->lock); 1076e3e0520bSJosef Bacik list_del_init(&block_group->ro_list); 1077e3e0520bSJosef Bacik 1078e3e0520bSJosef Bacik if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1079e3e0520bSJosef Bacik WARN_ON(block_group->space_info->total_bytes 1080b3470b5dSDavid Sterba < block_group->length); 1081e3e0520bSJosef Bacik WARN_ON(block_group->space_info->bytes_readonly 1082b3470b5dSDavid Sterba < block_group->length); 1083e3e0520bSJosef Bacik WARN_ON(block_group->space_info->disk_total 1084b3470b5dSDavid Sterba < block_group->length * factor); 1085e3e0520bSJosef Bacik } 1086b3470b5dSDavid Sterba block_group->space_info->total_bytes -= block_group->length; 1087b3470b5dSDavid Sterba block_group->space_info->bytes_readonly -= block_group->length; 1088b3470b5dSDavid Sterba block_group->space_info->disk_total -= block_group->length * factor; 1089e3e0520bSJosef Bacik 1090e3e0520bSJosef Bacik spin_unlock(&block_group->space_info->lock); 1091e3e0520bSJosef Bacik 1092ffcb9d44SFilipe Manana /* 1093ffcb9d44SFilipe Manana * Remove the free space for the block group from the free space tree 1094ffcb9d44SFilipe Manana * and the block group's item from the extent tree before marking the 1095ffcb9d44SFilipe Manana * block group as removed. This is to prevent races with tasks that 1096ffcb9d44SFilipe Manana * freeze and unfreeze a block group, this task and another task 1097ffcb9d44SFilipe Manana * allocating a new block group - the unfreeze task ends up removing 1098ffcb9d44SFilipe Manana * the block group's extent map before the task calling this function 1099ffcb9d44SFilipe Manana * deletes the block group item from the extent tree, allowing for 1100ffcb9d44SFilipe Manana * another task to attempt to create another block group with the same 1101ffcb9d44SFilipe Manana * item key (and failing with -EEXIST and a transaction abort). 1102ffcb9d44SFilipe Manana */ 1103ffcb9d44SFilipe Manana ret = remove_block_group_free_space(trans, block_group); 1104ffcb9d44SFilipe Manana if (ret) 1105ffcb9d44SFilipe Manana goto out; 1106ffcb9d44SFilipe Manana 1107ffcb9d44SFilipe Manana ret = remove_block_group_item(trans, path, block_group); 1108ffcb9d44SFilipe Manana if (ret < 0) 1109ffcb9d44SFilipe Manana goto out; 1110ffcb9d44SFilipe Manana 1111e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1112e3e0520bSJosef Bacik block_group->removed = 1; 1113e3e0520bSJosef Bacik /* 11146b7304afSFilipe Manana * At this point trimming or scrub can't start on this block group, 11156b7304afSFilipe Manana * because we removed the block group from the rbtree 11166b7304afSFilipe Manana * fs_info->block_group_cache_tree so no one can't find it anymore and 11176b7304afSFilipe Manana * even if someone already got this block group before we removed it 11186b7304afSFilipe Manana * from the rbtree, they have already incremented block_group->frozen - 11196b7304afSFilipe Manana * if they didn't, for the trimming case they won't find any free space 11206b7304afSFilipe Manana * entries because we already removed them all when we called 11216b7304afSFilipe Manana * btrfs_remove_free_space_cache(). 1122e3e0520bSJosef Bacik * 1123e3e0520bSJosef Bacik * And we must not remove the extent map from the fs_info->mapping_tree 1124e3e0520bSJosef Bacik * to prevent the same logical address range and physical device space 11256b7304afSFilipe Manana * ranges from being reused for a new block group. This is needed to 11266b7304afSFilipe Manana * avoid races with trimming and scrub. 11276b7304afSFilipe Manana * 11286b7304afSFilipe Manana * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1129e3e0520bSJosef Bacik * completely transactionless, so while it is trimming a range the 1130e3e0520bSJosef Bacik * currently running transaction might finish and a new one start, 1131e3e0520bSJosef Bacik * allowing for new block groups to be created that can reuse the same 1132e3e0520bSJosef Bacik * physical device locations unless we take this special care. 1133e3e0520bSJosef Bacik * 1134e3e0520bSJosef Bacik * There may also be an implicit trim operation if the file system 1135e3e0520bSJosef Bacik * is mounted with -odiscard. The same protections must remain 1136e3e0520bSJosef Bacik * in place until the extents have been discarded completely when 1137e3e0520bSJosef Bacik * the transaction commit has completed. 1138e3e0520bSJosef Bacik */ 11396b7304afSFilipe Manana remove_em = (atomic_read(&block_group->frozen) == 0); 1140e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1141e3e0520bSJosef Bacik 1142e3e0520bSJosef Bacik if (remove_em) { 1143e3e0520bSJosef Bacik struct extent_map_tree *em_tree; 1144e3e0520bSJosef Bacik 1145e3e0520bSJosef Bacik em_tree = &fs_info->mapping_tree; 1146e3e0520bSJosef Bacik write_lock(&em_tree->lock); 1147e3e0520bSJosef Bacik remove_extent_mapping(em_tree, em); 1148e3e0520bSJosef Bacik write_unlock(&em_tree->lock); 1149e3e0520bSJosef Bacik /* once for the tree */ 1150e3e0520bSJosef Bacik free_extent_map(em); 1151e3e0520bSJosef Bacik } 1152f6033c5eSXiyu Yang 11539fecd132SFilipe Manana out: 1154f6033c5eSXiyu Yang /* Once for the lookup reference */ 1155f6033c5eSXiyu Yang btrfs_put_block_group(block_group); 1156e3e0520bSJosef Bacik if (remove_rsv) 1157e3e0520bSJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 1158e3e0520bSJosef Bacik btrfs_free_path(path); 1159e3e0520bSJosef Bacik return ret; 1160e3e0520bSJosef Bacik } 1161e3e0520bSJosef Bacik 1162e3e0520bSJosef Bacik struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1163e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1164e3e0520bSJosef Bacik { 1165e3e0520bSJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 1166e3e0520bSJosef Bacik struct extent_map *em; 1167e3e0520bSJosef Bacik struct map_lookup *map; 1168e3e0520bSJosef Bacik unsigned int num_items; 1169e3e0520bSJosef Bacik 1170e3e0520bSJosef Bacik read_lock(&em_tree->lock); 1171e3e0520bSJosef Bacik em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1172e3e0520bSJosef Bacik read_unlock(&em_tree->lock); 1173e3e0520bSJosef Bacik ASSERT(em && em->start == chunk_offset); 1174e3e0520bSJosef Bacik 1175e3e0520bSJosef Bacik /* 1176e3e0520bSJosef Bacik * We need to reserve 3 + N units from the metadata space info in order 1177e3e0520bSJosef Bacik * to remove a block group (done at btrfs_remove_chunk() and at 1178e3e0520bSJosef Bacik * btrfs_remove_block_group()), which are used for: 1179e3e0520bSJosef Bacik * 1180e3e0520bSJosef Bacik * 1 unit for adding the free space inode's orphan (located in the tree 1181e3e0520bSJosef Bacik * of tree roots). 1182e3e0520bSJosef Bacik * 1 unit for deleting the block group item (located in the extent 1183e3e0520bSJosef Bacik * tree). 1184e3e0520bSJosef Bacik * 1 unit for deleting the free space item (located in tree of tree 1185e3e0520bSJosef Bacik * roots). 1186e3e0520bSJosef Bacik * N units for deleting N device extent items corresponding to each 1187e3e0520bSJosef Bacik * stripe (located in the device tree). 1188e3e0520bSJosef Bacik * 1189e3e0520bSJosef Bacik * In order to remove a block group we also need to reserve units in the 1190e3e0520bSJosef Bacik * system space info in order to update the chunk tree (update one or 1191e3e0520bSJosef Bacik * more device items and remove one chunk item), but this is done at 1192e3e0520bSJosef Bacik * btrfs_remove_chunk() through a call to check_system_chunk(). 1193e3e0520bSJosef Bacik */ 1194e3e0520bSJosef Bacik map = em->map_lookup; 1195e3e0520bSJosef Bacik num_items = 3 + map->num_stripes; 1196e3e0520bSJosef Bacik free_extent_map(em); 1197e3e0520bSJosef Bacik 1198e3e0520bSJosef Bacik return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, 11997f9fe614SJosef Bacik num_items); 1200e3e0520bSJosef Bacik } 1201e3e0520bSJosef Bacik 1202e3e0520bSJosef Bacik /* 120326ce2095SJosef Bacik * Mark block group @cache read-only, so later write won't happen to block 120426ce2095SJosef Bacik * group @cache. 120526ce2095SJosef Bacik * 120626ce2095SJosef Bacik * If @force is not set, this function will only mark the block group readonly 120726ce2095SJosef Bacik * if we have enough free space (1M) in other metadata/system block groups. 120826ce2095SJosef Bacik * If @force is not set, this function will mark the block group readonly 120926ce2095SJosef Bacik * without checking free space. 121026ce2095SJosef Bacik * 121126ce2095SJosef Bacik * NOTE: This function doesn't care if other block groups can contain all the 121226ce2095SJosef Bacik * data in this block group. That check should be done by relocation routine, 121326ce2095SJosef Bacik * not this function. 121426ce2095SJosef Bacik */ 121532da5386SDavid Sterba static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 121626ce2095SJosef Bacik { 121726ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 121826ce2095SJosef Bacik u64 num_bytes; 121926ce2095SJosef Bacik int ret = -ENOSPC; 122026ce2095SJosef Bacik 122126ce2095SJosef Bacik spin_lock(&sinfo->lock); 122226ce2095SJosef Bacik spin_lock(&cache->lock); 122326ce2095SJosef Bacik 122426ce2095SJosef Bacik if (cache->ro) { 122526ce2095SJosef Bacik cache->ro++; 122626ce2095SJosef Bacik ret = 0; 122726ce2095SJosef Bacik goto out; 122826ce2095SJosef Bacik } 122926ce2095SJosef Bacik 1230b3470b5dSDavid Sterba num_bytes = cache->length - cache->reserved - cache->pinned - 1231bf38be65SDavid Sterba cache->bytes_super - cache->used; 123226ce2095SJosef Bacik 123326ce2095SJosef Bacik /* 1234a30a3d20SJosef Bacik * Data never overcommits, even in mixed mode, so do just the straight 1235a30a3d20SJosef Bacik * check of left over space in how much we have allocated. 1236a30a3d20SJosef Bacik */ 1237a30a3d20SJosef Bacik if (force) { 1238a30a3d20SJosef Bacik ret = 0; 1239a30a3d20SJosef Bacik } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1240a30a3d20SJosef Bacik u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1241a30a3d20SJosef Bacik 1242a30a3d20SJosef Bacik /* 124326ce2095SJosef Bacik * Here we make sure if we mark this bg RO, we still have enough 1244f8935566SJosef Bacik * free space as buffer. 124526ce2095SJosef Bacik */ 1246a30a3d20SJosef Bacik if (sinfo_used + num_bytes <= sinfo->total_bytes) 1247a30a3d20SJosef Bacik ret = 0; 1248a30a3d20SJosef Bacik } else { 1249a30a3d20SJosef Bacik /* 1250a30a3d20SJosef Bacik * We overcommit metadata, so we need to do the 1251a30a3d20SJosef Bacik * btrfs_can_overcommit check here, and we need to pass in 1252a30a3d20SJosef Bacik * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1253a30a3d20SJosef Bacik * leeway to allow us to mark this block group as read only. 1254a30a3d20SJosef Bacik */ 1255a30a3d20SJosef Bacik if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1256a30a3d20SJosef Bacik BTRFS_RESERVE_NO_FLUSH)) 1257a30a3d20SJosef Bacik ret = 0; 1258a30a3d20SJosef Bacik } 1259a30a3d20SJosef Bacik 1260a30a3d20SJosef Bacik if (!ret) { 126126ce2095SJosef Bacik sinfo->bytes_readonly += num_bytes; 126226ce2095SJosef Bacik cache->ro++; 126326ce2095SJosef Bacik list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 126426ce2095SJosef Bacik } 126526ce2095SJosef Bacik out: 126626ce2095SJosef Bacik spin_unlock(&cache->lock); 126726ce2095SJosef Bacik spin_unlock(&sinfo->lock); 126826ce2095SJosef Bacik if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 126926ce2095SJosef Bacik btrfs_info(cache->fs_info, 1270b3470b5dSDavid Sterba "unable to make block group %llu ro", cache->start); 127126ce2095SJosef Bacik btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 127226ce2095SJosef Bacik } 127326ce2095SJosef Bacik return ret; 127426ce2095SJosef Bacik } 127526ce2095SJosef Bacik 1276fe119a6eSNikolay Borisov static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1277fe119a6eSNikolay Borisov struct btrfs_block_group *bg) 127845bb5d6aSNikolay Borisov { 127945bb5d6aSNikolay Borisov struct btrfs_fs_info *fs_info = bg->fs_info; 1280fe119a6eSNikolay Borisov struct btrfs_transaction *prev_trans = NULL; 128145bb5d6aSNikolay Borisov const u64 start = bg->start; 128245bb5d6aSNikolay Borisov const u64 end = start + bg->length - 1; 128345bb5d6aSNikolay Borisov int ret; 128445bb5d6aSNikolay Borisov 1285fe119a6eSNikolay Borisov spin_lock(&fs_info->trans_lock); 1286fe119a6eSNikolay Borisov if (trans->transaction->list.prev != &fs_info->trans_list) { 1287fe119a6eSNikolay Borisov prev_trans = list_last_entry(&trans->transaction->list, 1288fe119a6eSNikolay Borisov struct btrfs_transaction, list); 1289fe119a6eSNikolay Borisov refcount_inc(&prev_trans->use_count); 1290fe119a6eSNikolay Borisov } 1291fe119a6eSNikolay Borisov spin_unlock(&fs_info->trans_lock); 1292fe119a6eSNikolay Borisov 129345bb5d6aSNikolay Borisov /* 129445bb5d6aSNikolay Borisov * Hold the unused_bg_unpin_mutex lock to avoid racing with 129545bb5d6aSNikolay Borisov * btrfs_finish_extent_commit(). If we are at transaction N, another 129645bb5d6aSNikolay Borisov * task might be running finish_extent_commit() for the previous 129745bb5d6aSNikolay Borisov * transaction N - 1, and have seen a range belonging to the block 1298fe119a6eSNikolay Borisov * group in pinned_extents before we were able to clear the whole block 1299fe119a6eSNikolay Borisov * group range from pinned_extents. This means that task can lookup for 1300fe119a6eSNikolay Borisov * the block group after we unpinned it from pinned_extents and removed 1301fe119a6eSNikolay Borisov * it, leading to a BUG_ON() at unpin_extent_range(). 130245bb5d6aSNikolay Borisov */ 130345bb5d6aSNikolay Borisov mutex_lock(&fs_info->unused_bg_unpin_mutex); 1304fe119a6eSNikolay Borisov if (prev_trans) { 1305fe119a6eSNikolay Borisov ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 130645bb5d6aSNikolay Borisov EXTENT_DIRTY); 130745bb5d6aSNikolay Borisov if (ret) 1308534cf531SFilipe Manana goto out; 1309fe119a6eSNikolay Borisov } 131045bb5d6aSNikolay Borisov 1311fe119a6eSNikolay Borisov ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 131245bb5d6aSNikolay Borisov EXTENT_DIRTY); 1313534cf531SFilipe Manana out: 131445bb5d6aSNikolay Borisov mutex_unlock(&fs_info->unused_bg_unpin_mutex); 13155150bf19SFilipe Manana if (prev_trans) 13165150bf19SFilipe Manana btrfs_put_transaction(prev_trans); 131745bb5d6aSNikolay Borisov 1318534cf531SFilipe Manana return ret == 0; 131945bb5d6aSNikolay Borisov } 132045bb5d6aSNikolay Borisov 132126ce2095SJosef Bacik /* 1322e3e0520bSJosef Bacik * Process the unused_bgs list and remove any that don't have any allocated 1323e3e0520bSJosef Bacik * space inside of them. 1324e3e0520bSJosef Bacik */ 1325e3e0520bSJosef Bacik void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1326e3e0520bSJosef Bacik { 132732da5386SDavid Sterba struct btrfs_block_group *block_group; 1328e3e0520bSJosef Bacik struct btrfs_space_info *space_info; 1329e3e0520bSJosef Bacik struct btrfs_trans_handle *trans; 13306e80d4f8SDennis Zhou const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1331e3e0520bSJosef Bacik int ret = 0; 1332e3e0520bSJosef Bacik 1333e3e0520bSJosef Bacik if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1334e3e0520bSJosef Bacik return; 1335e3e0520bSJosef Bacik 1336e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1337e3e0520bSJosef Bacik while (!list_empty(&fs_info->unused_bgs)) { 1338e3e0520bSJosef Bacik int trimming; 1339e3e0520bSJosef Bacik 1340e3e0520bSJosef Bacik block_group = list_first_entry(&fs_info->unused_bgs, 134132da5386SDavid Sterba struct btrfs_block_group, 1342e3e0520bSJosef Bacik bg_list); 1343e3e0520bSJosef Bacik list_del_init(&block_group->bg_list); 1344e3e0520bSJosef Bacik 1345e3e0520bSJosef Bacik space_info = block_group->space_info; 1346e3e0520bSJosef Bacik 1347e3e0520bSJosef Bacik if (ret || btrfs_mixed_space_info(space_info)) { 1348e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1349e3e0520bSJosef Bacik continue; 1350e3e0520bSJosef Bacik } 1351e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1352e3e0520bSJosef Bacik 1353b0643e59SDennis Zhou btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1354b0643e59SDennis Zhou 1355e3e0520bSJosef Bacik mutex_lock(&fs_info->delete_unused_bgs_mutex); 1356e3e0520bSJosef Bacik 1357e3e0520bSJosef Bacik /* Don't want to race with allocators so take the groups_sem */ 1358e3e0520bSJosef Bacik down_write(&space_info->groups_sem); 13596e80d4f8SDennis Zhou 13606e80d4f8SDennis Zhou /* 13616e80d4f8SDennis Zhou * Async discard moves the final block group discard to be prior 13626e80d4f8SDennis Zhou * to the unused_bgs code path. Therefore, if it's not fully 13636e80d4f8SDennis Zhou * trimmed, punt it back to the async discard lists. 13646e80d4f8SDennis Zhou */ 13656e80d4f8SDennis Zhou if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 13666e80d4f8SDennis Zhou !btrfs_is_free_space_trimmed(block_group)) { 13676e80d4f8SDennis Zhou trace_btrfs_skip_unused_block_group(block_group); 13686e80d4f8SDennis Zhou up_write(&space_info->groups_sem); 13696e80d4f8SDennis Zhou /* Requeue if we failed because of async discard */ 13706e80d4f8SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 13716e80d4f8SDennis Zhou block_group); 13726e80d4f8SDennis Zhou goto next; 13736e80d4f8SDennis Zhou } 13746e80d4f8SDennis Zhou 1375e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1376e3e0520bSJosef Bacik if (block_group->reserved || block_group->pinned || 1377bf38be65SDavid Sterba block_group->used || block_group->ro || 1378e3e0520bSJosef Bacik list_is_singular(&block_group->list)) { 1379e3e0520bSJosef Bacik /* 1380e3e0520bSJosef Bacik * We want to bail if we made new allocations or have 1381e3e0520bSJosef Bacik * outstanding allocations in this block group. We do 1382e3e0520bSJosef Bacik * the ro check in case balance is currently acting on 1383e3e0520bSJosef Bacik * this block group. 1384e3e0520bSJosef Bacik */ 1385e3e0520bSJosef Bacik trace_btrfs_skip_unused_block_group(block_group); 1386e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1387e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1388e3e0520bSJosef Bacik goto next; 1389e3e0520bSJosef Bacik } 1390e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1391e3e0520bSJosef Bacik 1392e3e0520bSJosef Bacik /* We don't want to force the issue, only flip if it's ok. */ 1393e11c0406SJosef Bacik ret = inc_block_group_ro(block_group, 0); 1394e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1395e3e0520bSJosef Bacik if (ret < 0) { 1396e3e0520bSJosef Bacik ret = 0; 1397e3e0520bSJosef Bacik goto next; 1398e3e0520bSJosef Bacik } 1399e3e0520bSJosef Bacik 1400e3e0520bSJosef Bacik /* 1401e3e0520bSJosef Bacik * Want to do this before we do anything else so we can recover 1402e3e0520bSJosef Bacik * properly if we fail to join the transaction. 1403e3e0520bSJosef Bacik */ 1404e3e0520bSJosef Bacik trans = btrfs_start_trans_remove_block_group(fs_info, 1405b3470b5dSDavid Sterba block_group->start); 1406e3e0520bSJosef Bacik if (IS_ERR(trans)) { 1407e3e0520bSJosef Bacik btrfs_dec_block_group_ro(block_group); 1408e3e0520bSJosef Bacik ret = PTR_ERR(trans); 1409e3e0520bSJosef Bacik goto next; 1410e3e0520bSJosef Bacik } 1411e3e0520bSJosef Bacik 1412e3e0520bSJosef Bacik /* 1413e3e0520bSJosef Bacik * We could have pending pinned extents for this block group, 1414e3e0520bSJosef Bacik * just delete them, we don't care about them anymore. 1415e3e0520bSJosef Bacik */ 1416534cf531SFilipe Manana if (!clean_pinned_extents(trans, block_group)) { 1417534cf531SFilipe Manana btrfs_dec_block_group_ro(block_group); 1418e3e0520bSJosef Bacik goto end_trans; 1419534cf531SFilipe Manana } 1420e3e0520bSJosef Bacik 1421b0643e59SDennis Zhou /* 1422b0643e59SDennis Zhou * At this point, the block_group is read only and should fail 1423b0643e59SDennis Zhou * new allocations. However, btrfs_finish_extent_commit() can 1424b0643e59SDennis Zhou * cause this block_group to be placed back on the discard 1425b0643e59SDennis Zhou * lists because now the block_group isn't fully discarded. 1426b0643e59SDennis Zhou * Bail here and try again later after discarding everything. 1427b0643e59SDennis Zhou */ 1428b0643e59SDennis Zhou spin_lock(&fs_info->discard_ctl.lock); 1429b0643e59SDennis Zhou if (!list_empty(&block_group->discard_list)) { 1430b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1431b0643e59SDennis Zhou btrfs_dec_block_group_ro(block_group); 1432b0643e59SDennis Zhou btrfs_discard_queue_work(&fs_info->discard_ctl, 1433b0643e59SDennis Zhou block_group); 1434b0643e59SDennis Zhou goto end_trans; 1435b0643e59SDennis Zhou } 1436b0643e59SDennis Zhou spin_unlock(&fs_info->discard_ctl.lock); 1437b0643e59SDennis Zhou 1438e3e0520bSJosef Bacik /* Reset pinned so btrfs_put_block_group doesn't complain */ 1439e3e0520bSJosef Bacik spin_lock(&space_info->lock); 1440e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1441e3e0520bSJosef Bacik 1442e3e0520bSJosef Bacik btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1443e3e0520bSJosef Bacik -block_group->pinned); 1444e3e0520bSJosef Bacik space_info->bytes_readonly += block_group->pinned; 1445e3e0520bSJosef Bacik percpu_counter_add_batch(&space_info->total_bytes_pinned, 1446e3e0520bSJosef Bacik -block_group->pinned, 1447e3e0520bSJosef Bacik BTRFS_TOTAL_BYTES_PINNED_BATCH); 1448e3e0520bSJosef Bacik block_group->pinned = 0; 1449e3e0520bSJosef Bacik 1450e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1451e3e0520bSJosef Bacik spin_unlock(&space_info->lock); 1452e3e0520bSJosef Bacik 14536e80d4f8SDennis Zhou /* 14546e80d4f8SDennis Zhou * The normal path here is an unused block group is passed here, 14556e80d4f8SDennis Zhou * then trimming is handled in the transaction commit path. 14566e80d4f8SDennis Zhou * Async discard interposes before this to do the trimming 14576e80d4f8SDennis Zhou * before coming down the unused block group path as trimming 14586e80d4f8SDennis Zhou * will no longer be done later in the transaction commit path. 14596e80d4f8SDennis Zhou */ 14606e80d4f8SDennis Zhou if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 14616e80d4f8SDennis Zhou goto flip_async; 14626e80d4f8SDennis Zhou 1463e3e0520bSJosef Bacik /* DISCARD can flip during remount */ 146446b27f50SDennis Zhou trimming = btrfs_test_opt(fs_info, DISCARD_SYNC); 1465e3e0520bSJosef Bacik 1466e3e0520bSJosef Bacik /* Implicit trim during transaction commit. */ 1467e3e0520bSJosef Bacik if (trimming) 14686b7304afSFilipe Manana btrfs_freeze_block_group(block_group); 1469e3e0520bSJosef Bacik 1470e3e0520bSJosef Bacik /* 1471e3e0520bSJosef Bacik * Btrfs_remove_chunk will abort the transaction if things go 1472e3e0520bSJosef Bacik * horribly wrong. 1473e3e0520bSJosef Bacik */ 1474b3470b5dSDavid Sterba ret = btrfs_remove_chunk(trans, block_group->start); 1475e3e0520bSJosef Bacik 1476e3e0520bSJosef Bacik if (ret) { 1477e3e0520bSJosef Bacik if (trimming) 14786b7304afSFilipe Manana btrfs_unfreeze_block_group(block_group); 1479e3e0520bSJosef Bacik goto end_trans; 1480e3e0520bSJosef Bacik } 1481e3e0520bSJosef Bacik 1482e3e0520bSJosef Bacik /* 1483e3e0520bSJosef Bacik * If we're not mounted with -odiscard, we can just forget 1484e3e0520bSJosef Bacik * about this block group. Otherwise we'll need to wait 1485e3e0520bSJosef Bacik * until transaction commit to do the actual discard. 1486e3e0520bSJosef Bacik */ 1487e3e0520bSJosef Bacik if (trimming) { 1488e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1489e3e0520bSJosef Bacik /* 1490e3e0520bSJosef Bacik * A concurrent scrub might have added us to the list 1491e3e0520bSJosef Bacik * fs_info->unused_bgs, so use a list_move operation 1492e3e0520bSJosef Bacik * to add the block group to the deleted_bgs list. 1493e3e0520bSJosef Bacik */ 1494e3e0520bSJosef Bacik list_move(&block_group->bg_list, 1495e3e0520bSJosef Bacik &trans->transaction->deleted_bgs); 1496e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1497e3e0520bSJosef Bacik btrfs_get_block_group(block_group); 1498e3e0520bSJosef Bacik } 1499e3e0520bSJosef Bacik end_trans: 1500e3e0520bSJosef Bacik btrfs_end_transaction(trans); 1501e3e0520bSJosef Bacik next: 1502e3e0520bSJosef Bacik mutex_unlock(&fs_info->delete_unused_bgs_mutex); 1503e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1504e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1505e3e0520bSJosef Bacik } 1506e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 15076e80d4f8SDennis Zhou return; 15086e80d4f8SDennis Zhou 15096e80d4f8SDennis Zhou flip_async: 15106e80d4f8SDennis Zhou btrfs_end_transaction(trans); 15116e80d4f8SDennis Zhou mutex_unlock(&fs_info->delete_unused_bgs_mutex); 15126e80d4f8SDennis Zhou btrfs_put_block_group(block_group); 15136e80d4f8SDennis Zhou btrfs_discard_punt_unused_bgs_list(fs_info); 1514e3e0520bSJosef Bacik } 1515e3e0520bSJosef Bacik 151632da5386SDavid Sterba void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1517e3e0520bSJosef Bacik { 1518e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = bg->fs_info; 1519e3e0520bSJosef Bacik 1520e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1521e3e0520bSJosef Bacik if (list_empty(&bg->bg_list)) { 1522e3e0520bSJosef Bacik btrfs_get_block_group(bg); 1523e3e0520bSJosef Bacik trace_btrfs_add_unused_block_group(bg); 1524e3e0520bSJosef Bacik list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1525e3e0520bSJosef Bacik } 1526e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1527e3e0520bSJosef Bacik } 15284358d963SJosef Bacik 1529e3ba67a1SJohannes Thumshirn static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1530e3ba67a1SJohannes Thumshirn struct btrfs_path *path) 1531e3ba67a1SJohannes Thumshirn { 1532e3ba67a1SJohannes Thumshirn struct extent_map_tree *em_tree; 1533e3ba67a1SJohannes Thumshirn struct extent_map *em; 1534e3ba67a1SJohannes Thumshirn struct btrfs_block_group_item bg; 1535e3ba67a1SJohannes Thumshirn struct extent_buffer *leaf; 1536e3ba67a1SJohannes Thumshirn int slot; 1537e3ba67a1SJohannes Thumshirn u64 flags; 1538e3ba67a1SJohannes Thumshirn int ret = 0; 1539e3ba67a1SJohannes Thumshirn 1540e3ba67a1SJohannes Thumshirn slot = path->slots[0]; 1541e3ba67a1SJohannes Thumshirn leaf = path->nodes[0]; 1542e3ba67a1SJohannes Thumshirn 1543e3ba67a1SJohannes Thumshirn em_tree = &fs_info->mapping_tree; 1544e3ba67a1SJohannes Thumshirn read_lock(&em_tree->lock); 1545e3ba67a1SJohannes Thumshirn em = lookup_extent_mapping(em_tree, key->objectid, key->offset); 1546e3ba67a1SJohannes Thumshirn read_unlock(&em_tree->lock); 1547e3ba67a1SJohannes Thumshirn if (!em) { 1548e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1549e3ba67a1SJohannes Thumshirn "logical %llu len %llu found bg but no related chunk", 1550e3ba67a1SJohannes Thumshirn key->objectid, key->offset); 1551e3ba67a1SJohannes Thumshirn return -ENOENT; 1552e3ba67a1SJohannes Thumshirn } 1553e3ba67a1SJohannes Thumshirn 1554e3ba67a1SJohannes Thumshirn if (em->start != key->objectid || em->len != key->offset) { 1555e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1556e3ba67a1SJohannes Thumshirn "block group %llu len %llu mismatch with chunk %llu len %llu", 1557e3ba67a1SJohannes Thumshirn key->objectid, key->offset, em->start, em->len); 1558e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1559e3ba67a1SJohannes Thumshirn goto out_free_em; 1560e3ba67a1SJohannes Thumshirn } 1561e3ba67a1SJohannes Thumshirn 1562e3ba67a1SJohannes Thumshirn read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 1563e3ba67a1SJohannes Thumshirn sizeof(bg)); 1564e3ba67a1SJohannes Thumshirn flags = btrfs_stack_block_group_flags(&bg) & 1565e3ba67a1SJohannes Thumshirn BTRFS_BLOCK_GROUP_TYPE_MASK; 1566e3ba67a1SJohannes Thumshirn 1567e3ba67a1SJohannes Thumshirn if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 1568e3ba67a1SJohannes Thumshirn btrfs_err(fs_info, 1569e3ba67a1SJohannes Thumshirn "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 1570e3ba67a1SJohannes Thumshirn key->objectid, key->offset, flags, 1571e3ba67a1SJohannes Thumshirn (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); 1572e3ba67a1SJohannes Thumshirn ret = -EUCLEAN; 1573e3ba67a1SJohannes Thumshirn } 1574e3ba67a1SJohannes Thumshirn 1575e3ba67a1SJohannes Thumshirn out_free_em: 1576e3ba67a1SJohannes Thumshirn free_extent_map(em); 1577e3ba67a1SJohannes Thumshirn return ret; 1578e3ba67a1SJohannes Thumshirn } 1579e3ba67a1SJohannes Thumshirn 15804358d963SJosef Bacik static int find_first_block_group(struct btrfs_fs_info *fs_info, 15814358d963SJosef Bacik struct btrfs_path *path, 15824358d963SJosef Bacik struct btrfs_key *key) 15834358d963SJosef Bacik { 15844358d963SJosef Bacik struct btrfs_root *root = fs_info->extent_root; 1585e3ba67a1SJohannes Thumshirn int ret; 15864358d963SJosef Bacik struct btrfs_key found_key; 15874358d963SJosef Bacik struct extent_buffer *leaf; 15884358d963SJosef Bacik int slot; 15894358d963SJosef Bacik 15904358d963SJosef Bacik ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 15914358d963SJosef Bacik if (ret < 0) 1592e3ba67a1SJohannes Thumshirn return ret; 15934358d963SJosef Bacik 15944358d963SJosef Bacik while (1) { 15954358d963SJosef Bacik slot = path->slots[0]; 15964358d963SJosef Bacik leaf = path->nodes[0]; 15974358d963SJosef Bacik if (slot >= btrfs_header_nritems(leaf)) { 15984358d963SJosef Bacik ret = btrfs_next_leaf(root, path); 15994358d963SJosef Bacik if (ret == 0) 16004358d963SJosef Bacik continue; 16014358d963SJosef Bacik if (ret < 0) 16024358d963SJosef Bacik goto out; 16034358d963SJosef Bacik break; 16044358d963SJosef Bacik } 16054358d963SJosef Bacik btrfs_item_key_to_cpu(leaf, &found_key, slot); 16064358d963SJosef Bacik 16074358d963SJosef Bacik if (found_key.objectid >= key->objectid && 16084358d963SJosef Bacik found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1609e3ba67a1SJohannes Thumshirn ret = read_bg_from_eb(fs_info, &found_key, path); 1610e3ba67a1SJohannes Thumshirn break; 1611e3ba67a1SJohannes Thumshirn } 16124358d963SJosef Bacik 16134358d963SJosef Bacik path->slots[0]++; 16144358d963SJosef Bacik } 16154358d963SJosef Bacik out: 16164358d963SJosef Bacik return ret; 16174358d963SJosef Bacik } 16184358d963SJosef Bacik 16194358d963SJosef Bacik static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 16204358d963SJosef Bacik { 16214358d963SJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 16224358d963SJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 16234358d963SJosef Bacik 16244358d963SJosef Bacik write_seqlock(&fs_info->profiles_lock); 16254358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 16264358d963SJosef Bacik fs_info->avail_data_alloc_bits |= extra_flags; 16274358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 16284358d963SJosef Bacik fs_info->avail_metadata_alloc_bits |= extra_flags; 16294358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 16304358d963SJosef Bacik fs_info->avail_system_alloc_bits |= extra_flags; 16314358d963SJosef Bacik write_sequnlock(&fs_info->profiles_lock); 16324358d963SJosef Bacik } 16334358d963SJosef Bacik 163496a14336SNikolay Borisov /** 163596a14336SNikolay Borisov * btrfs_rmap_block - Map a physical disk address to a list of logical addresses 163696a14336SNikolay Borisov * @chunk_start: logical address of block group 163796a14336SNikolay Borisov * @physical: physical address to map to logical addresses 163896a14336SNikolay Borisov * @logical: return array of logical addresses which map to @physical 163996a14336SNikolay Borisov * @naddrs: length of @logical 164096a14336SNikolay Borisov * @stripe_len: size of IO stripe for the given block group 164196a14336SNikolay Borisov * 164296a14336SNikolay Borisov * Maps a particular @physical disk address to a list of @logical addresses. 164396a14336SNikolay Borisov * Used primarily to exclude those portions of a block group that contain super 164496a14336SNikolay Borisov * block copies. 164596a14336SNikolay Borisov */ 164696a14336SNikolay Borisov EXPORT_FOR_TESTS 164796a14336SNikolay Borisov int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 164896a14336SNikolay Borisov u64 physical, u64 **logical, int *naddrs, int *stripe_len) 164996a14336SNikolay Borisov { 165096a14336SNikolay Borisov struct extent_map *em; 165196a14336SNikolay Borisov struct map_lookup *map; 165296a14336SNikolay Borisov u64 *buf; 165396a14336SNikolay Borisov u64 bytenr; 16541776ad17SNikolay Borisov u64 data_stripe_length; 16551776ad17SNikolay Borisov u64 io_stripe_size; 16561776ad17SNikolay Borisov int i, nr = 0; 16571776ad17SNikolay Borisov int ret = 0; 165896a14336SNikolay Borisov 165996a14336SNikolay Borisov em = btrfs_get_chunk_map(fs_info, chunk_start, 1); 166096a14336SNikolay Borisov if (IS_ERR(em)) 166196a14336SNikolay Borisov return -EIO; 166296a14336SNikolay Borisov 166396a14336SNikolay Borisov map = em->map_lookup; 16649e22b925SNikolay Borisov data_stripe_length = em->orig_block_len; 16651776ad17SNikolay Borisov io_stripe_size = map->stripe_len; 166696a14336SNikolay Borisov 16679e22b925SNikolay Borisov /* For RAID5/6 adjust to a full IO stripe length */ 16689e22b925SNikolay Borisov if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 16691776ad17SNikolay Borisov io_stripe_size = map->stripe_len * nr_data_stripes(map); 167096a14336SNikolay Borisov 167196a14336SNikolay Borisov buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 16721776ad17SNikolay Borisov if (!buf) { 16731776ad17SNikolay Borisov ret = -ENOMEM; 16741776ad17SNikolay Borisov goto out; 16751776ad17SNikolay Borisov } 167696a14336SNikolay Borisov 167796a14336SNikolay Borisov for (i = 0; i < map->num_stripes; i++) { 16781776ad17SNikolay Borisov bool already_inserted = false; 16791776ad17SNikolay Borisov u64 stripe_nr; 16801776ad17SNikolay Borisov int j; 16811776ad17SNikolay Borisov 16821776ad17SNikolay Borisov if (!in_range(physical, map->stripes[i].physical, 16831776ad17SNikolay Borisov data_stripe_length)) 168496a14336SNikolay Borisov continue; 168596a14336SNikolay Borisov 168696a14336SNikolay Borisov stripe_nr = physical - map->stripes[i].physical; 168796a14336SNikolay Borisov stripe_nr = div64_u64(stripe_nr, map->stripe_len); 168896a14336SNikolay Borisov 168996a14336SNikolay Borisov if (map->type & BTRFS_BLOCK_GROUP_RAID10) { 169096a14336SNikolay Borisov stripe_nr = stripe_nr * map->num_stripes + i; 169196a14336SNikolay Borisov stripe_nr = div_u64(stripe_nr, map->sub_stripes); 169296a14336SNikolay Borisov } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { 169396a14336SNikolay Borisov stripe_nr = stripe_nr * map->num_stripes + i; 169496a14336SNikolay Borisov } 169596a14336SNikolay Borisov /* 169696a14336SNikolay Borisov * The remaining case would be for RAID56, multiply by 169796a14336SNikolay Borisov * nr_data_stripes(). Alternatively, just use rmap_len below 169896a14336SNikolay Borisov * instead of map->stripe_len 169996a14336SNikolay Borisov */ 170096a14336SNikolay Borisov 17011776ad17SNikolay Borisov bytenr = chunk_start + stripe_nr * io_stripe_size; 17021776ad17SNikolay Borisov 17031776ad17SNikolay Borisov /* Ensure we don't add duplicate addresses */ 170496a14336SNikolay Borisov for (j = 0; j < nr; j++) { 17051776ad17SNikolay Borisov if (buf[j] == bytenr) { 17061776ad17SNikolay Borisov already_inserted = true; 170796a14336SNikolay Borisov break; 170896a14336SNikolay Borisov } 170996a14336SNikolay Borisov } 17101776ad17SNikolay Borisov 17111776ad17SNikolay Borisov if (!already_inserted) 17121776ad17SNikolay Borisov buf[nr++] = bytenr; 171396a14336SNikolay Borisov } 171496a14336SNikolay Borisov 171596a14336SNikolay Borisov *logical = buf; 171696a14336SNikolay Borisov *naddrs = nr; 17171776ad17SNikolay Borisov *stripe_len = io_stripe_size; 17181776ad17SNikolay Borisov out: 171996a14336SNikolay Borisov free_extent_map(em); 17201776ad17SNikolay Borisov return ret; 172196a14336SNikolay Borisov } 172296a14336SNikolay Borisov 172332da5386SDavid Sterba static int exclude_super_stripes(struct btrfs_block_group *cache) 17244358d963SJosef Bacik { 17254358d963SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 17264358d963SJosef Bacik u64 bytenr; 17274358d963SJosef Bacik u64 *logical; 17284358d963SJosef Bacik int stripe_len; 17294358d963SJosef Bacik int i, nr, ret; 17304358d963SJosef Bacik 1731b3470b5dSDavid Sterba if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 1732b3470b5dSDavid Sterba stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 17334358d963SJosef Bacik cache->bytes_super += stripe_len; 1734b3470b5dSDavid Sterba ret = btrfs_add_excluded_extent(fs_info, cache->start, 17354358d963SJosef Bacik stripe_len); 17364358d963SJosef Bacik if (ret) 17374358d963SJosef Bacik return ret; 17384358d963SJosef Bacik } 17394358d963SJosef Bacik 17404358d963SJosef Bacik for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 17414358d963SJosef Bacik bytenr = btrfs_sb_offset(i); 1742b3470b5dSDavid Sterba ret = btrfs_rmap_block(fs_info, cache->start, 17434358d963SJosef Bacik bytenr, &logical, &nr, &stripe_len); 17444358d963SJosef Bacik if (ret) 17454358d963SJosef Bacik return ret; 17464358d963SJosef Bacik 17474358d963SJosef Bacik while (nr--) { 174896f9b0f2SNikolay Borisov u64 len = min_t(u64, stripe_len, 174996f9b0f2SNikolay Borisov cache->start + cache->length - logical[nr]); 17504358d963SJosef Bacik 17514358d963SJosef Bacik cache->bytes_super += len; 175296f9b0f2SNikolay Borisov ret = btrfs_add_excluded_extent(fs_info, logical[nr], 175396f9b0f2SNikolay Borisov len); 17544358d963SJosef Bacik if (ret) { 17554358d963SJosef Bacik kfree(logical); 17564358d963SJosef Bacik return ret; 17574358d963SJosef Bacik } 17584358d963SJosef Bacik } 17594358d963SJosef Bacik 17604358d963SJosef Bacik kfree(logical); 17614358d963SJosef Bacik } 17624358d963SJosef Bacik return 0; 17634358d963SJosef Bacik } 17644358d963SJosef Bacik 176532da5386SDavid Sterba static void link_block_group(struct btrfs_block_group *cache) 17664358d963SJosef Bacik { 17674358d963SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 17684358d963SJosef Bacik int index = btrfs_bg_flags_to_raid_index(cache->flags); 17694358d963SJosef Bacik 17704358d963SJosef Bacik down_write(&space_info->groups_sem); 17714358d963SJosef Bacik list_add_tail(&cache->list, &space_info->block_groups[index]); 17724358d963SJosef Bacik up_write(&space_info->groups_sem); 17734358d963SJosef Bacik } 17744358d963SJosef Bacik 177532da5386SDavid Sterba static struct btrfs_block_group *btrfs_create_block_group_cache( 17769afc6649SQu Wenruo struct btrfs_fs_info *fs_info, u64 start) 17774358d963SJosef Bacik { 177832da5386SDavid Sterba struct btrfs_block_group *cache; 17794358d963SJosef Bacik 17804358d963SJosef Bacik cache = kzalloc(sizeof(*cache), GFP_NOFS); 17814358d963SJosef Bacik if (!cache) 17824358d963SJosef Bacik return NULL; 17834358d963SJosef Bacik 17844358d963SJosef Bacik cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 17854358d963SJosef Bacik GFP_NOFS); 17864358d963SJosef Bacik if (!cache->free_space_ctl) { 17874358d963SJosef Bacik kfree(cache); 17884358d963SJosef Bacik return NULL; 17894358d963SJosef Bacik } 17904358d963SJosef Bacik 1791b3470b5dSDavid Sterba cache->start = start; 17924358d963SJosef Bacik 17934358d963SJosef Bacik cache->fs_info = fs_info; 17944358d963SJosef Bacik cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 17954358d963SJosef Bacik 17966e80d4f8SDennis Zhou cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 17976e80d4f8SDennis Zhou 179848aaeebeSJosef Bacik refcount_set(&cache->refs, 1); 17994358d963SJosef Bacik spin_lock_init(&cache->lock); 18004358d963SJosef Bacik init_rwsem(&cache->data_rwsem); 18014358d963SJosef Bacik INIT_LIST_HEAD(&cache->list); 18024358d963SJosef Bacik INIT_LIST_HEAD(&cache->cluster_list); 18034358d963SJosef Bacik INIT_LIST_HEAD(&cache->bg_list); 18044358d963SJosef Bacik INIT_LIST_HEAD(&cache->ro_list); 1805b0643e59SDennis Zhou INIT_LIST_HEAD(&cache->discard_list); 18064358d963SJosef Bacik INIT_LIST_HEAD(&cache->dirty_list); 18074358d963SJosef Bacik INIT_LIST_HEAD(&cache->io_list); 18084358d963SJosef Bacik btrfs_init_free_space_ctl(cache); 18096b7304afSFilipe Manana atomic_set(&cache->frozen, 0); 18104358d963SJosef Bacik mutex_init(&cache->free_space_lock); 18114358d963SJosef Bacik btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); 18124358d963SJosef Bacik 18134358d963SJosef Bacik return cache; 18144358d963SJosef Bacik } 18154358d963SJosef Bacik 18164358d963SJosef Bacik /* 18174358d963SJosef Bacik * Iterate all chunks and verify that each of them has the corresponding block 18184358d963SJosef Bacik * group 18194358d963SJosef Bacik */ 18204358d963SJosef Bacik static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 18214358d963SJosef Bacik { 18224358d963SJosef Bacik struct extent_map_tree *map_tree = &fs_info->mapping_tree; 18234358d963SJosef Bacik struct extent_map *em; 182432da5386SDavid Sterba struct btrfs_block_group *bg; 18254358d963SJosef Bacik u64 start = 0; 18264358d963SJosef Bacik int ret = 0; 18274358d963SJosef Bacik 18284358d963SJosef Bacik while (1) { 18294358d963SJosef Bacik read_lock(&map_tree->lock); 18304358d963SJosef Bacik /* 18314358d963SJosef Bacik * lookup_extent_mapping will return the first extent map 18324358d963SJosef Bacik * intersecting the range, so setting @len to 1 is enough to 18334358d963SJosef Bacik * get the first chunk. 18344358d963SJosef Bacik */ 18354358d963SJosef Bacik em = lookup_extent_mapping(map_tree, start, 1); 18364358d963SJosef Bacik read_unlock(&map_tree->lock); 18374358d963SJosef Bacik if (!em) 18384358d963SJosef Bacik break; 18394358d963SJosef Bacik 18404358d963SJosef Bacik bg = btrfs_lookup_block_group(fs_info, em->start); 18414358d963SJosef Bacik if (!bg) { 18424358d963SJosef Bacik btrfs_err(fs_info, 18434358d963SJosef Bacik "chunk start=%llu len=%llu doesn't have corresponding block group", 18444358d963SJosef Bacik em->start, em->len); 18454358d963SJosef Bacik ret = -EUCLEAN; 18464358d963SJosef Bacik free_extent_map(em); 18474358d963SJosef Bacik break; 18484358d963SJosef Bacik } 1849b3470b5dSDavid Sterba if (bg->start != em->start || bg->length != em->len || 18504358d963SJosef Bacik (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 18514358d963SJosef Bacik (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 18524358d963SJosef Bacik btrfs_err(fs_info, 18534358d963SJosef Bacik "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 18544358d963SJosef Bacik em->start, em->len, 18554358d963SJosef Bacik em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 1856b3470b5dSDavid Sterba bg->start, bg->length, 18574358d963SJosef Bacik bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 18584358d963SJosef Bacik ret = -EUCLEAN; 18594358d963SJosef Bacik free_extent_map(em); 18604358d963SJosef Bacik btrfs_put_block_group(bg); 18614358d963SJosef Bacik break; 18624358d963SJosef Bacik } 18634358d963SJosef Bacik start = em->start + em->len; 18644358d963SJosef Bacik free_extent_map(em); 18654358d963SJosef Bacik btrfs_put_block_group(bg); 18664358d963SJosef Bacik } 18674358d963SJosef Bacik return ret; 18684358d963SJosef Bacik } 18694358d963SJosef Bacik 18704c448ce8SMarcos Paulo de Souza static void read_block_group_item(struct btrfs_block_group *cache, 18719afc6649SQu Wenruo struct btrfs_path *path, 18729afc6649SQu Wenruo const struct btrfs_key *key) 18739afc6649SQu Wenruo { 18749afc6649SQu Wenruo struct extent_buffer *leaf = path->nodes[0]; 18759afc6649SQu Wenruo struct btrfs_block_group_item bgi; 18769afc6649SQu Wenruo int slot = path->slots[0]; 18779afc6649SQu Wenruo 18789afc6649SQu Wenruo cache->length = key->offset; 18799afc6649SQu Wenruo 18809afc6649SQu Wenruo read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 18819afc6649SQu Wenruo sizeof(bgi)); 18829afc6649SQu Wenruo cache->used = btrfs_stack_block_group_used(&bgi); 18839afc6649SQu Wenruo cache->flags = btrfs_stack_block_group_flags(&bgi); 18849afc6649SQu Wenruo } 18859afc6649SQu Wenruo 1886ffb9e0f0SQu Wenruo static int read_one_block_group(struct btrfs_fs_info *info, 1887ffb9e0f0SQu Wenruo struct btrfs_path *path, 1888d49a2ddbSQu Wenruo const struct btrfs_key *key, 1889ffb9e0f0SQu Wenruo int need_clear) 1890ffb9e0f0SQu Wenruo { 189132da5386SDavid Sterba struct btrfs_block_group *cache; 1892ffb9e0f0SQu Wenruo struct btrfs_space_info *space_info; 1893ffb9e0f0SQu Wenruo const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 1894ffb9e0f0SQu Wenruo int ret; 1895ffb9e0f0SQu Wenruo 1896d49a2ddbSQu Wenruo ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 1897ffb9e0f0SQu Wenruo 18989afc6649SQu Wenruo cache = btrfs_create_block_group_cache(info, key->objectid); 1899ffb9e0f0SQu Wenruo if (!cache) 1900ffb9e0f0SQu Wenruo return -ENOMEM; 1901ffb9e0f0SQu Wenruo 19024c448ce8SMarcos Paulo de Souza read_block_group_item(cache, path, key); 19039afc6649SQu Wenruo 1904e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 1905e3e39c72SMarcos Paulo de Souza 1906ffb9e0f0SQu Wenruo if (need_clear) { 1907ffb9e0f0SQu Wenruo /* 1908ffb9e0f0SQu Wenruo * When we mount with old space cache, we need to 1909ffb9e0f0SQu Wenruo * set BTRFS_DC_CLEAR and set dirty flag. 1910ffb9e0f0SQu Wenruo * 1911ffb9e0f0SQu Wenruo * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 1912ffb9e0f0SQu Wenruo * truncate the old free space cache inode and 1913ffb9e0f0SQu Wenruo * setup a new one. 1914ffb9e0f0SQu Wenruo * b) Setting 'dirty flag' makes sure that we flush 1915ffb9e0f0SQu Wenruo * the new space cache info onto disk. 1916ffb9e0f0SQu Wenruo */ 1917ffb9e0f0SQu Wenruo if (btrfs_test_opt(info, SPACE_CACHE)) 1918ffb9e0f0SQu Wenruo cache->disk_cache_state = BTRFS_DC_CLEAR; 1919ffb9e0f0SQu Wenruo } 1920ffb9e0f0SQu Wenruo if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 1921ffb9e0f0SQu Wenruo (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 1922ffb9e0f0SQu Wenruo btrfs_err(info, 1923ffb9e0f0SQu Wenruo "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 1924ffb9e0f0SQu Wenruo cache->start); 1925ffb9e0f0SQu Wenruo ret = -EINVAL; 1926ffb9e0f0SQu Wenruo goto error; 1927ffb9e0f0SQu Wenruo } 1928ffb9e0f0SQu Wenruo 1929ffb9e0f0SQu Wenruo /* 1930ffb9e0f0SQu Wenruo * We need to exclude the super stripes now so that the space info has 1931ffb9e0f0SQu Wenruo * super bytes accounted for, otherwise we'll think we have more space 1932ffb9e0f0SQu Wenruo * than we actually do. 1933ffb9e0f0SQu Wenruo */ 1934ffb9e0f0SQu Wenruo ret = exclude_super_stripes(cache); 1935ffb9e0f0SQu Wenruo if (ret) { 1936ffb9e0f0SQu Wenruo /* We may have excluded something, so call this just in case. */ 1937ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 1938ffb9e0f0SQu Wenruo goto error; 1939ffb9e0f0SQu Wenruo } 1940ffb9e0f0SQu Wenruo 1941ffb9e0f0SQu Wenruo /* 1942ffb9e0f0SQu Wenruo * Check for two cases, either we are full, and therefore don't need 1943ffb9e0f0SQu Wenruo * to bother with the caching work since we won't find any space, or we 1944ffb9e0f0SQu Wenruo * are empty, and we can just add all the space in and be done with it. 1945ffb9e0f0SQu Wenruo * This saves us _a_lot_ of time, particularly in the full case. 1946ffb9e0f0SQu Wenruo */ 19479afc6649SQu Wenruo if (cache->length == cache->used) { 1948ffb9e0f0SQu Wenruo cache->last_byte_to_unpin = (u64)-1; 1949ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 1950ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 1951ffb9e0f0SQu Wenruo } else if (cache->used == 0) { 1952ffb9e0f0SQu Wenruo cache->last_byte_to_unpin = (u64)-1; 1953ffb9e0f0SQu Wenruo cache->cached = BTRFS_CACHE_FINISHED; 19549afc6649SQu Wenruo add_new_free_space(cache, cache->start, 19559afc6649SQu Wenruo cache->start + cache->length); 1956ffb9e0f0SQu Wenruo btrfs_free_excluded_extents(cache); 1957ffb9e0f0SQu Wenruo } 1958ffb9e0f0SQu Wenruo 1959ffb9e0f0SQu Wenruo ret = btrfs_add_block_group_cache(info, cache); 1960ffb9e0f0SQu Wenruo if (ret) { 1961ffb9e0f0SQu Wenruo btrfs_remove_free_space_cache(cache); 1962ffb9e0f0SQu Wenruo goto error; 1963ffb9e0f0SQu Wenruo } 1964ffb9e0f0SQu Wenruo trace_btrfs_add_block_group(info, cache, 0); 19659afc6649SQu Wenruo btrfs_update_space_info(info, cache->flags, cache->length, 1966ffb9e0f0SQu Wenruo cache->used, cache->bytes_super, &space_info); 1967ffb9e0f0SQu Wenruo 1968ffb9e0f0SQu Wenruo cache->space_info = space_info; 1969ffb9e0f0SQu Wenruo 1970ffb9e0f0SQu Wenruo link_block_group(cache); 1971ffb9e0f0SQu Wenruo 1972ffb9e0f0SQu Wenruo set_avail_alloc_bits(info, cache->flags); 1973ffb9e0f0SQu Wenruo if (btrfs_chunk_readonly(info, cache->start)) { 1974ffb9e0f0SQu Wenruo inc_block_group_ro(cache, 1); 1975ffb9e0f0SQu Wenruo } else if (cache->used == 0) { 1976ffb9e0f0SQu Wenruo ASSERT(list_empty(&cache->bg_list)); 19776e80d4f8SDennis Zhou if (btrfs_test_opt(info, DISCARD_ASYNC)) 19786e80d4f8SDennis Zhou btrfs_discard_queue_work(&info->discard_ctl, cache); 19796e80d4f8SDennis Zhou else 1980ffb9e0f0SQu Wenruo btrfs_mark_bg_unused(cache); 1981ffb9e0f0SQu Wenruo } 1982ffb9e0f0SQu Wenruo return 0; 1983ffb9e0f0SQu Wenruo error: 1984ffb9e0f0SQu Wenruo btrfs_put_block_group(cache); 1985ffb9e0f0SQu Wenruo return ret; 1986ffb9e0f0SQu Wenruo } 1987ffb9e0f0SQu Wenruo 19884358d963SJosef Bacik int btrfs_read_block_groups(struct btrfs_fs_info *info) 19894358d963SJosef Bacik { 19904358d963SJosef Bacik struct btrfs_path *path; 19914358d963SJosef Bacik int ret; 199232da5386SDavid Sterba struct btrfs_block_group *cache; 19934358d963SJosef Bacik struct btrfs_space_info *space_info; 19944358d963SJosef Bacik struct btrfs_key key; 19954358d963SJosef Bacik int need_clear = 0; 19964358d963SJosef Bacik u64 cache_gen; 19974358d963SJosef Bacik 19984358d963SJosef Bacik key.objectid = 0; 19994358d963SJosef Bacik key.offset = 0; 20004358d963SJosef Bacik key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 20014358d963SJosef Bacik path = btrfs_alloc_path(); 20024358d963SJosef Bacik if (!path) 20034358d963SJosef Bacik return -ENOMEM; 20044358d963SJosef Bacik 20054358d963SJosef Bacik cache_gen = btrfs_super_cache_generation(info->super_copy); 20064358d963SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 20074358d963SJosef Bacik btrfs_super_generation(info->super_copy) != cache_gen) 20084358d963SJosef Bacik need_clear = 1; 20094358d963SJosef Bacik if (btrfs_test_opt(info, CLEAR_CACHE)) 20104358d963SJosef Bacik need_clear = 1; 20114358d963SJosef Bacik 20124358d963SJosef Bacik while (1) { 20134358d963SJosef Bacik ret = find_first_block_group(info, path, &key); 20144358d963SJosef Bacik if (ret > 0) 20154358d963SJosef Bacik break; 20164358d963SJosef Bacik if (ret != 0) 20174358d963SJosef Bacik goto error; 20184358d963SJosef Bacik 2019ffb9e0f0SQu Wenruo btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2020d49a2ddbSQu Wenruo ret = read_one_block_group(info, path, &key, need_clear); 2021ffb9e0f0SQu Wenruo if (ret < 0) 20224358d963SJosef Bacik goto error; 2023ffb9e0f0SQu Wenruo key.objectid += key.offset; 2024ffb9e0f0SQu Wenruo key.offset = 0; 20254358d963SJosef Bacik btrfs_release_path(path); 20264358d963SJosef Bacik } 20274358d963SJosef Bacik 202872804905SJosef Bacik list_for_each_entry(space_info, &info->space_info, list) { 2029*49ea112dSJosef Bacik int i; 2030*49ea112dSJosef Bacik 2031*49ea112dSJosef Bacik for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2032*49ea112dSJosef Bacik if (list_empty(&space_info->block_groups[i])) 2033*49ea112dSJosef Bacik continue; 2034*49ea112dSJosef Bacik cache = list_first_entry(&space_info->block_groups[i], 2035*49ea112dSJosef Bacik struct btrfs_block_group, 2036*49ea112dSJosef Bacik list); 2037*49ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(cache); 2038*49ea112dSJosef Bacik } 2039*49ea112dSJosef Bacik 20404358d963SJosef Bacik if (!(btrfs_get_alloc_profile(info, space_info->flags) & 20414358d963SJosef Bacik (BTRFS_BLOCK_GROUP_RAID10 | 20424358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID1_MASK | 20434358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID56_MASK | 20444358d963SJosef Bacik BTRFS_BLOCK_GROUP_DUP))) 20454358d963SJosef Bacik continue; 20464358d963SJosef Bacik /* 20474358d963SJosef Bacik * Avoid allocating from un-mirrored block group if there are 20484358d963SJosef Bacik * mirrored block groups. 20494358d963SJosef Bacik */ 20504358d963SJosef Bacik list_for_each_entry(cache, 20514358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_RAID0], 20524358d963SJosef Bacik list) 2053e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 20544358d963SJosef Bacik list_for_each_entry(cache, 20554358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_SINGLE], 20564358d963SJosef Bacik list) 2057e11c0406SJosef Bacik inc_block_group_ro(cache, 1); 20584358d963SJosef Bacik } 20594358d963SJosef Bacik 20604358d963SJosef Bacik btrfs_init_global_block_rsv(info); 20614358d963SJosef Bacik ret = check_chunk_block_group_mappings(info); 20624358d963SJosef Bacik error: 20634358d963SJosef Bacik btrfs_free_path(path); 20644358d963SJosef Bacik return ret; 20654358d963SJosef Bacik } 20664358d963SJosef Bacik 206797f4728aSQu Wenruo static int insert_block_group_item(struct btrfs_trans_handle *trans, 206897f4728aSQu Wenruo struct btrfs_block_group *block_group) 206997f4728aSQu Wenruo { 207097f4728aSQu Wenruo struct btrfs_fs_info *fs_info = trans->fs_info; 207197f4728aSQu Wenruo struct btrfs_block_group_item bgi; 207297f4728aSQu Wenruo struct btrfs_root *root; 207397f4728aSQu Wenruo struct btrfs_key key; 207497f4728aSQu Wenruo 207597f4728aSQu Wenruo spin_lock(&block_group->lock); 207697f4728aSQu Wenruo btrfs_set_stack_block_group_used(&bgi, block_group->used); 207797f4728aSQu Wenruo btrfs_set_stack_block_group_chunk_objectid(&bgi, 207897f4728aSQu Wenruo BTRFS_FIRST_CHUNK_TREE_OBJECTID); 207997f4728aSQu Wenruo btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 208097f4728aSQu Wenruo key.objectid = block_group->start; 208197f4728aSQu Wenruo key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 208297f4728aSQu Wenruo key.offset = block_group->length; 208397f4728aSQu Wenruo spin_unlock(&block_group->lock); 208497f4728aSQu Wenruo 208597f4728aSQu Wenruo root = fs_info->extent_root; 208697f4728aSQu Wenruo return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 208797f4728aSQu Wenruo } 208897f4728aSQu Wenruo 20894358d963SJosef Bacik void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 20904358d963SJosef Bacik { 20914358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 209232da5386SDavid Sterba struct btrfs_block_group *block_group; 20934358d963SJosef Bacik int ret = 0; 20944358d963SJosef Bacik 20954358d963SJosef Bacik if (!trans->can_flush_pending_bgs) 20964358d963SJosef Bacik return; 20974358d963SJosef Bacik 20984358d963SJosef Bacik while (!list_empty(&trans->new_bgs)) { 2099*49ea112dSJosef Bacik int index; 2100*49ea112dSJosef Bacik 21014358d963SJosef Bacik block_group = list_first_entry(&trans->new_bgs, 210232da5386SDavid Sterba struct btrfs_block_group, 21034358d963SJosef Bacik bg_list); 21044358d963SJosef Bacik if (ret) 21054358d963SJosef Bacik goto next; 21064358d963SJosef Bacik 2107*49ea112dSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 2108*49ea112dSJosef Bacik 210997f4728aSQu Wenruo ret = insert_block_group_item(trans, block_group); 21104358d963SJosef Bacik if (ret) 21114358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 211297f4728aSQu Wenruo ret = btrfs_finish_chunk_alloc(trans, block_group->start, 211397f4728aSQu Wenruo block_group->length); 21144358d963SJosef Bacik if (ret) 21154358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 21164358d963SJosef Bacik add_block_group_free_space(trans, block_group); 2117*49ea112dSJosef Bacik 2118*49ea112dSJosef Bacik /* 2119*49ea112dSJosef Bacik * If we restriped during balance, we may have added a new raid 2120*49ea112dSJosef Bacik * type, so now add the sysfs entries when it is safe to do so. 2121*49ea112dSJosef Bacik * We don't have to worry about locking here as it's handled in 2122*49ea112dSJosef Bacik * btrfs_sysfs_add_block_group_type. 2123*49ea112dSJosef Bacik */ 2124*49ea112dSJosef Bacik if (block_group->space_info->block_group_kobjs[index] == NULL) 2125*49ea112dSJosef Bacik btrfs_sysfs_add_block_group_type(block_group); 2126*49ea112dSJosef Bacik 21274358d963SJosef Bacik /* Already aborted the transaction if it failed. */ 21284358d963SJosef Bacik next: 21294358d963SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 21304358d963SJosef Bacik list_del_init(&block_group->bg_list); 21314358d963SJosef Bacik } 21324358d963SJosef Bacik btrfs_trans_release_chunk_metadata(trans); 21334358d963SJosef Bacik } 21344358d963SJosef Bacik 21354358d963SJosef Bacik int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, 21364358d963SJosef Bacik u64 type, u64 chunk_offset, u64 size) 21374358d963SJosef Bacik { 21384358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 213932da5386SDavid Sterba struct btrfs_block_group *cache; 21404358d963SJosef Bacik int ret; 21414358d963SJosef Bacik 21424358d963SJosef Bacik btrfs_set_log_full_commit(trans); 21434358d963SJosef Bacik 21449afc6649SQu Wenruo cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 21454358d963SJosef Bacik if (!cache) 21464358d963SJosef Bacik return -ENOMEM; 21474358d963SJosef Bacik 21489afc6649SQu Wenruo cache->length = size; 2149e3e39c72SMarcos Paulo de Souza set_free_space_tree_thresholds(cache); 2150bf38be65SDavid Sterba cache->used = bytes_used; 21514358d963SJosef Bacik cache->flags = type; 21524358d963SJosef Bacik cache->last_byte_to_unpin = (u64)-1; 21534358d963SJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 21544358d963SJosef Bacik cache->needs_free_space = 1; 21554358d963SJosef Bacik ret = exclude_super_stripes(cache); 21564358d963SJosef Bacik if (ret) { 21574358d963SJosef Bacik /* We may have excluded something, so call this just in case */ 21584358d963SJosef Bacik btrfs_free_excluded_extents(cache); 21594358d963SJosef Bacik btrfs_put_block_group(cache); 21604358d963SJosef Bacik return ret; 21614358d963SJosef Bacik } 21624358d963SJosef Bacik 21634358d963SJosef Bacik add_new_free_space(cache, chunk_offset, chunk_offset + size); 21644358d963SJosef Bacik 21654358d963SJosef Bacik btrfs_free_excluded_extents(cache); 21664358d963SJosef Bacik 21674358d963SJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 21684358d963SJosef Bacik if (btrfs_should_fragment_free_space(cache)) { 21694358d963SJosef Bacik u64 new_bytes_used = size - bytes_used; 21704358d963SJosef Bacik 21714358d963SJosef Bacik bytes_used += new_bytes_used >> 1; 2172e11c0406SJosef Bacik fragment_free_space(cache); 21734358d963SJosef Bacik } 21744358d963SJosef Bacik #endif 21754358d963SJosef Bacik /* 21764358d963SJosef Bacik * Ensure the corresponding space_info object is created and 21774358d963SJosef Bacik * assigned to our block group. We want our bg to be added to the rbtree 21784358d963SJosef Bacik * with its ->space_info set. 21794358d963SJosef Bacik */ 21804358d963SJosef Bacik cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 21814358d963SJosef Bacik ASSERT(cache->space_info); 21824358d963SJosef Bacik 21834358d963SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, cache); 21844358d963SJosef Bacik if (ret) { 21854358d963SJosef Bacik btrfs_remove_free_space_cache(cache); 21864358d963SJosef Bacik btrfs_put_block_group(cache); 21874358d963SJosef Bacik return ret; 21884358d963SJosef Bacik } 21894358d963SJosef Bacik 21904358d963SJosef Bacik /* 21914358d963SJosef Bacik * Now that our block group has its ->space_info set and is inserted in 21924358d963SJosef Bacik * the rbtree, update the space info's counters. 21934358d963SJosef Bacik */ 21944358d963SJosef Bacik trace_btrfs_add_block_group(fs_info, cache, 1); 21954358d963SJosef Bacik btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, 21964358d963SJosef Bacik cache->bytes_super, &cache->space_info); 21974358d963SJosef Bacik btrfs_update_global_block_rsv(fs_info); 21984358d963SJosef Bacik 21994358d963SJosef Bacik link_block_group(cache); 22004358d963SJosef Bacik 22014358d963SJosef Bacik list_add_tail(&cache->bg_list, &trans->new_bgs); 22024358d963SJosef Bacik trans->delayed_ref_updates++; 22034358d963SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 22044358d963SJosef Bacik 22054358d963SJosef Bacik set_avail_alloc_bits(fs_info, type); 22064358d963SJosef Bacik return 0; 22074358d963SJosef Bacik } 220826ce2095SJosef Bacik 2209b12de528SQu Wenruo /* 2210b12de528SQu Wenruo * Mark one block group RO, can be called several times for the same block 2211b12de528SQu Wenruo * group. 2212b12de528SQu Wenruo * 2213b12de528SQu Wenruo * @cache: the destination block group 2214b12de528SQu Wenruo * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2215b12de528SQu Wenruo * ensure we still have some free space after marking this 2216b12de528SQu Wenruo * block group RO. 2217b12de528SQu Wenruo */ 2218b12de528SQu Wenruo int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2219b12de528SQu Wenruo bool do_chunk_alloc) 222026ce2095SJosef Bacik { 222126ce2095SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 222226ce2095SJosef Bacik struct btrfs_trans_handle *trans; 222326ce2095SJosef Bacik u64 alloc_flags; 222426ce2095SJosef Bacik int ret; 222526ce2095SJosef Bacik 222626ce2095SJosef Bacik again: 222726ce2095SJosef Bacik trans = btrfs_join_transaction(fs_info->extent_root); 222826ce2095SJosef Bacik if (IS_ERR(trans)) 222926ce2095SJosef Bacik return PTR_ERR(trans); 223026ce2095SJosef Bacik 223126ce2095SJosef Bacik /* 223226ce2095SJosef Bacik * we're not allowed to set block groups readonly after the dirty 223326ce2095SJosef Bacik * block groups cache has started writing. If it already started, 223426ce2095SJosef Bacik * back off and let this transaction commit 223526ce2095SJosef Bacik */ 223626ce2095SJosef Bacik mutex_lock(&fs_info->ro_block_group_mutex); 223726ce2095SJosef Bacik if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 223826ce2095SJosef Bacik u64 transid = trans->transid; 223926ce2095SJosef Bacik 224026ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 224126ce2095SJosef Bacik btrfs_end_transaction(trans); 224226ce2095SJosef Bacik 224326ce2095SJosef Bacik ret = btrfs_wait_for_commit(fs_info, transid); 224426ce2095SJosef Bacik if (ret) 224526ce2095SJosef Bacik return ret; 224626ce2095SJosef Bacik goto again; 224726ce2095SJosef Bacik } 224826ce2095SJosef Bacik 2249b12de528SQu Wenruo if (do_chunk_alloc) { 225026ce2095SJosef Bacik /* 2251b12de528SQu Wenruo * If we are changing raid levels, try to allocate a 2252b12de528SQu Wenruo * corresponding block group with the new raid level. 225326ce2095SJosef Bacik */ 2254349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 225526ce2095SJosef Bacik if (alloc_flags != cache->flags) { 2256b12de528SQu Wenruo ret = btrfs_chunk_alloc(trans, alloc_flags, 2257b12de528SQu Wenruo CHUNK_ALLOC_FORCE); 225826ce2095SJosef Bacik /* 225926ce2095SJosef Bacik * ENOSPC is allowed here, we may have enough space 2260b12de528SQu Wenruo * already allocated at the new raid level to carry on 226126ce2095SJosef Bacik */ 226226ce2095SJosef Bacik if (ret == -ENOSPC) 226326ce2095SJosef Bacik ret = 0; 226426ce2095SJosef Bacik if (ret < 0) 226526ce2095SJosef Bacik goto out; 226626ce2095SJosef Bacik } 2267b12de528SQu Wenruo } 226826ce2095SJosef Bacik 2269a7a63accSJosef Bacik ret = inc_block_group_ro(cache, 0); 2270b12de528SQu Wenruo if (!do_chunk_alloc) 2271b12de528SQu Wenruo goto unlock_out; 227226ce2095SJosef Bacik if (!ret) 227326ce2095SJosef Bacik goto out; 227426ce2095SJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 227526ce2095SJosef Bacik ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 227626ce2095SJosef Bacik if (ret < 0) 227726ce2095SJosef Bacik goto out; 2278e11c0406SJosef Bacik ret = inc_block_group_ro(cache, 0); 227926ce2095SJosef Bacik out: 228026ce2095SJosef Bacik if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 2281349e120eSJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 228226ce2095SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 228326ce2095SJosef Bacik check_system_chunk(trans, alloc_flags); 228426ce2095SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 228526ce2095SJosef Bacik } 2286b12de528SQu Wenruo unlock_out: 228726ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 228826ce2095SJosef Bacik 228926ce2095SJosef Bacik btrfs_end_transaction(trans); 229026ce2095SJosef Bacik return ret; 229126ce2095SJosef Bacik } 229226ce2095SJosef Bacik 229332da5386SDavid Sterba void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 229426ce2095SJosef Bacik { 229526ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 229626ce2095SJosef Bacik u64 num_bytes; 229726ce2095SJosef Bacik 229826ce2095SJosef Bacik BUG_ON(!cache->ro); 229926ce2095SJosef Bacik 230026ce2095SJosef Bacik spin_lock(&sinfo->lock); 230126ce2095SJosef Bacik spin_lock(&cache->lock); 230226ce2095SJosef Bacik if (!--cache->ro) { 2303b3470b5dSDavid Sterba num_bytes = cache->length - cache->reserved - 2304bf38be65SDavid Sterba cache->pinned - cache->bytes_super - cache->used; 230526ce2095SJosef Bacik sinfo->bytes_readonly -= num_bytes; 230626ce2095SJosef Bacik list_del_init(&cache->ro_list); 230726ce2095SJosef Bacik } 230826ce2095SJosef Bacik spin_unlock(&cache->lock); 230926ce2095SJosef Bacik spin_unlock(&sinfo->lock); 231026ce2095SJosef Bacik } 231177745c05SJosef Bacik 23123be4d8efSQu Wenruo static int update_block_group_item(struct btrfs_trans_handle *trans, 231377745c05SJosef Bacik struct btrfs_path *path, 231432da5386SDavid Sterba struct btrfs_block_group *cache) 231577745c05SJosef Bacik { 231677745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 231777745c05SJosef Bacik int ret; 23183be4d8efSQu Wenruo struct btrfs_root *root = fs_info->extent_root; 231977745c05SJosef Bacik unsigned long bi; 232077745c05SJosef Bacik struct extent_buffer *leaf; 2321bf38be65SDavid Sterba struct btrfs_block_group_item bgi; 2322b3470b5dSDavid Sterba struct btrfs_key key; 232377745c05SJosef Bacik 2324b3470b5dSDavid Sterba key.objectid = cache->start; 2325b3470b5dSDavid Sterba key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2326b3470b5dSDavid Sterba key.offset = cache->length; 2327b3470b5dSDavid Sterba 23283be4d8efSQu Wenruo ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 232977745c05SJosef Bacik if (ret) { 233077745c05SJosef Bacik if (ret > 0) 233177745c05SJosef Bacik ret = -ENOENT; 233277745c05SJosef Bacik goto fail; 233377745c05SJosef Bacik } 233477745c05SJosef Bacik 233577745c05SJosef Bacik leaf = path->nodes[0]; 233677745c05SJosef Bacik bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2337de0dc456SDavid Sterba btrfs_set_stack_block_group_used(&bgi, cache->used); 2338de0dc456SDavid Sterba btrfs_set_stack_block_group_chunk_objectid(&bgi, 23393d976388SDavid Sterba BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2340de0dc456SDavid Sterba btrfs_set_stack_block_group_flags(&bgi, cache->flags); 2341bf38be65SDavid Sterba write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 234277745c05SJosef Bacik btrfs_mark_buffer_dirty(leaf); 234377745c05SJosef Bacik fail: 234477745c05SJosef Bacik btrfs_release_path(path); 234577745c05SJosef Bacik return ret; 234677745c05SJosef Bacik 234777745c05SJosef Bacik } 234877745c05SJosef Bacik 234932da5386SDavid Sterba static int cache_save_setup(struct btrfs_block_group *block_group, 235077745c05SJosef Bacik struct btrfs_trans_handle *trans, 235177745c05SJosef Bacik struct btrfs_path *path) 235277745c05SJosef Bacik { 235377745c05SJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 235477745c05SJosef Bacik struct btrfs_root *root = fs_info->tree_root; 235577745c05SJosef Bacik struct inode *inode = NULL; 235677745c05SJosef Bacik struct extent_changeset *data_reserved = NULL; 235777745c05SJosef Bacik u64 alloc_hint = 0; 235877745c05SJosef Bacik int dcs = BTRFS_DC_ERROR; 235977745c05SJosef Bacik u64 num_pages = 0; 236077745c05SJosef Bacik int retries = 0; 236177745c05SJosef Bacik int ret = 0; 236277745c05SJosef Bacik 236377745c05SJosef Bacik /* 236477745c05SJosef Bacik * If this block group is smaller than 100 megs don't bother caching the 236577745c05SJosef Bacik * block group. 236677745c05SJosef Bacik */ 2367b3470b5dSDavid Sterba if (block_group->length < (100 * SZ_1M)) { 236877745c05SJosef Bacik spin_lock(&block_group->lock); 236977745c05SJosef Bacik block_group->disk_cache_state = BTRFS_DC_WRITTEN; 237077745c05SJosef Bacik spin_unlock(&block_group->lock); 237177745c05SJosef Bacik return 0; 237277745c05SJosef Bacik } 237377745c05SJosef Bacik 2374bf31f87fSDavid Sterba if (TRANS_ABORTED(trans)) 237577745c05SJosef Bacik return 0; 237677745c05SJosef Bacik again: 237777745c05SJosef Bacik inode = lookup_free_space_inode(block_group, path); 237877745c05SJosef Bacik if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 237977745c05SJosef Bacik ret = PTR_ERR(inode); 238077745c05SJosef Bacik btrfs_release_path(path); 238177745c05SJosef Bacik goto out; 238277745c05SJosef Bacik } 238377745c05SJosef Bacik 238477745c05SJosef Bacik if (IS_ERR(inode)) { 238577745c05SJosef Bacik BUG_ON(retries); 238677745c05SJosef Bacik retries++; 238777745c05SJosef Bacik 238877745c05SJosef Bacik if (block_group->ro) 238977745c05SJosef Bacik goto out_free; 239077745c05SJosef Bacik 239177745c05SJosef Bacik ret = create_free_space_inode(trans, block_group, path); 239277745c05SJosef Bacik if (ret) 239377745c05SJosef Bacik goto out_free; 239477745c05SJosef Bacik goto again; 239577745c05SJosef Bacik } 239677745c05SJosef Bacik 239777745c05SJosef Bacik /* 239877745c05SJosef Bacik * We want to set the generation to 0, that way if anything goes wrong 239977745c05SJosef Bacik * from here on out we know not to trust this cache when we load up next 240077745c05SJosef Bacik * time. 240177745c05SJosef Bacik */ 240277745c05SJosef Bacik BTRFS_I(inode)->generation = 0; 240377745c05SJosef Bacik ret = btrfs_update_inode(trans, root, inode); 240477745c05SJosef Bacik if (ret) { 240577745c05SJosef Bacik /* 240677745c05SJosef Bacik * So theoretically we could recover from this, simply set the 240777745c05SJosef Bacik * super cache generation to 0 so we know to invalidate the 240877745c05SJosef Bacik * cache, but then we'd have to keep track of the block groups 240977745c05SJosef Bacik * that fail this way so we know we _have_ to reset this cache 241077745c05SJosef Bacik * before the next commit or risk reading stale cache. So to 241177745c05SJosef Bacik * limit our exposure to horrible edge cases lets just abort the 241277745c05SJosef Bacik * transaction, this only happens in really bad situations 241377745c05SJosef Bacik * anyway. 241477745c05SJosef Bacik */ 241577745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 241677745c05SJosef Bacik goto out_put; 241777745c05SJosef Bacik } 241877745c05SJosef Bacik WARN_ON(ret); 241977745c05SJosef Bacik 242077745c05SJosef Bacik /* We've already setup this transaction, go ahead and exit */ 242177745c05SJosef Bacik if (block_group->cache_generation == trans->transid && 242277745c05SJosef Bacik i_size_read(inode)) { 242377745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 242477745c05SJosef Bacik goto out_put; 242577745c05SJosef Bacik } 242677745c05SJosef Bacik 242777745c05SJosef Bacik if (i_size_read(inode) > 0) { 242877745c05SJosef Bacik ret = btrfs_check_trunc_cache_free_space(fs_info, 242977745c05SJosef Bacik &fs_info->global_block_rsv); 243077745c05SJosef Bacik if (ret) 243177745c05SJosef Bacik goto out_put; 243277745c05SJosef Bacik 243377745c05SJosef Bacik ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 243477745c05SJosef Bacik if (ret) 243577745c05SJosef Bacik goto out_put; 243677745c05SJosef Bacik } 243777745c05SJosef Bacik 243877745c05SJosef Bacik spin_lock(&block_group->lock); 243977745c05SJosef Bacik if (block_group->cached != BTRFS_CACHE_FINISHED || 244077745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) { 244177745c05SJosef Bacik /* 244277745c05SJosef Bacik * don't bother trying to write stuff out _if_ 244377745c05SJosef Bacik * a) we're not cached, 244477745c05SJosef Bacik * b) we're with nospace_cache mount option, 244577745c05SJosef Bacik * c) we're with v2 space_cache (FREE_SPACE_TREE). 244677745c05SJosef Bacik */ 244777745c05SJosef Bacik dcs = BTRFS_DC_WRITTEN; 244877745c05SJosef Bacik spin_unlock(&block_group->lock); 244977745c05SJosef Bacik goto out_put; 245077745c05SJosef Bacik } 245177745c05SJosef Bacik spin_unlock(&block_group->lock); 245277745c05SJosef Bacik 245377745c05SJosef Bacik /* 245477745c05SJosef Bacik * We hit an ENOSPC when setting up the cache in this transaction, just 245577745c05SJosef Bacik * skip doing the setup, we've already cleared the cache so we're safe. 245677745c05SJosef Bacik */ 245777745c05SJosef Bacik if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 245877745c05SJosef Bacik ret = -ENOSPC; 245977745c05SJosef Bacik goto out_put; 246077745c05SJosef Bacik } 246177745c05SJosef Bacik 246277745c05SJosef Bacik /* 246377745c05SJosef Bacik * Try to preallocate enough space based on how big the block group is. 246477745c05SJosef Bacik * Keep in mind this has to include any pinned space which could end up 246577745c05SJosef Bacik * taking up quite a bit since it's not folded into the other space 246677745c05SJosef Bacik * cache. 246777745c05SJosef Bacik */ 2468b3470b5dSDavid Sterba num_pages = div_u64(block_group->length, SZ_256M); 246977745c05SJosef Bacik if (!num_pages) 247077745c05SJosef Bacik num_pages = 1; 247177745c05SJosef Bacik 247277745c05SJosef Bacik num_pages *= 16; 247377745c05SJosef Bacik num_pages *= PAGE_SIZE; 247477745c05SJosef Bacik 247536ea6f3eSNikolay Borisov ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 247636ea6f3eSNikolay Borisov num_pages); 247777745c05SJosef Bacik if (ret) 247877745c05SJosef Bacik goto out_put; 247977745c05SJosef Bacik 248077745c05SJosef Bacik ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 248177745c05SJosef Bacik num_pages, num_pages, 248277745c05SJosef Bacik &alloc_hint); 248377745c05SJosef Bacik /* 248477745c05SJosef Bacik * Our cache requires contiguous chunks so that we don't modify a bunch 248577745c05SJosef Bacik * of metadata or split extents when writing the cache out, which means 248677745c05SJosef Bacik * we can enospc if we are heavily fragmented in addition to just normal 248777745c05SJosef Bacik * out of space conditions. So if we hit this just skip setting up any 248877745c05SJosef Bacik * other block groups for this transaction, maybe we'll unpin enough 248977745c05SJosef Bacik * space the next time around. 249077745c05SJosef Bacik */ 249177745c05SJosef Bacik if (!ret) 249277745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 249377745c05SJosef Bacik else if (ret == -ENOSPC) 249477745c05SJosef Bacik set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 249577745c05SJosef Bacik 249677745c05SJosef Bacik out_put: 249777745c05SJosef Bacik iput(inode); 249877745c05SJosef Bacik out_free: 249977745c05SJosef Bacik btrfs_release_path(path); 250077745c05SJosef Bacik out: 250177745c05SJosef Bacik spin_lock(&block_group->lock); 250277745c05SJosef Bacik if (!ret && dcs == BTRFS_DC_SETUP) 250377745c05SJosef Bacik block_group->cache_generation = trans->transid; 250477745c05SJosef Bacik block_group->disk_cache_state = dcs; 250577745c05SJosef Bacik spin_unlock(&block_group->lock); 250677745c05SJosef Bacik 250777745c05SJosef Bacik extent_changeset_free(data_reserved); 250877745c05SJosef Bacik return ret; 250977745c05SJosef Bacik } 251077745c05SJosef Bacik 251177745c05SJosef Bacik int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 251277745c05SJosef Bacik { 251377745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 251432da5386SDavid Sterba struct btrfs_block_group *cache, *tmp; 251577745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 251677745c05SJosef Bacik struct btrfs_path *path; 251777745c05SJosef Bacik 251877745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs) || 251977745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) 252077745c05SJosef Bacik return 0; 252177745c05SJosef Bacik 252277745c05SJosef Bacik path = btrfs_alloc_path(); 252377745c05SJosef Bacik if (!path) 252477745c05SJosef Bacik return -ENOMEM; 252577745c05SJosef Bacik 252677745c05SJosef Bacik /* Could add new block groups, use _safe just in case */ 252777745c05SJosef Bacik list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 252877745c05SJosef Bacik dirty_list) { 252977745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_CLEAR) 253077745c05SJosef Bacik cache_save_setup(cache, trans, path); 253177745c05SJosef Bacik } 253277745c05SJosef Bacik 253377745c05SJosef Bacik btrfs_free_path(path); 253477745c05SJosef Bacik return 0; 253577745c05SJosef Bacik } 253677745c05SJosef Bacik 253777745c05SJosef Bacik /* 253877745c05SJosef Bacik * Transaction commit does final block group cache writeback during a critical 253977745c05SJosef Bacik * section where nothing is allowed to change the FS. This is required in 254077745c05SJosef Bacik * order for the cache to actually match the block group, but can introduce a 254177745c05SJosef Bacik * lot of latency into the commit. 254277745c05SJosef Bacik * 254377745c05SJosef Bacik * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 254477745c05SJosef Bacik * There's a chance we'll have to redo some of it if the block group changes 254577745c05SJosef Bacik * again during the commit, but it greatly reduces the commit latency by 254677745c05SJosef Bacik * getting rid of the easy block groups while we're still allowing others to 254777745c05SJosef Bacik * join the commit. 254877745c05SJosef Bacik */ 254977745c05SJosef Bacik int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 255077745c05SJosef Bacik { 255177745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 255232da5386SDavid Sterba struct btrfs_block_group *cache; 255377745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 255477745c05SJosef Bacik int ret = 0; 255577745c05SJosef Bacik int should_put; 255677745c05SJosef Bacik struct btrfs_path *path = NULL; 255777745c05SJosef Bacik LIST_HEAD(dirty); 255877745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 255977745c05SJosef Bacik int num_started = 0; 256077745c05SJosef Bacik int loops = 0; 256177745c05SJosef Bacik 256277745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 256377745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs)) { 256477745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 256577745c05SJosef Bacik return 0; 256677745c05SJosef Bacik } 256777745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 256877745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 256977745c05SJosef Bacik 257077745c05SJosef Bacik again: 257177745c05SJosef Bacik /* Make sure all the block groups on our dirty list actually exist */ 257277745c05SJosef Bacik btrfs_create_pending_block_groups(trans); 257377745c05SJosef Bacik 257477745c05SJosef Bacik if (!path) { 257577745c05SJosef Bacik path = btrfs_alloc_path(); 257677745c05SJosef Bacik if (!path) 257777745c05SJosef Bacik return -ENOMEM; 257877745c05SJosef Bacik } 257977745c05SJosef Bacik 258077745c05SJosef Bacik /* 258177745c05SJosef Bacik * cache_write_mutex is here only to save us from balance or automatic 258277745c05SJosef Bacik * removal of empty block groups deleting this block group while we are 258377745c05SJosef Bacik * writing out the cache 258477745c05SJosef Bacik */ 258577745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 258677745c05SJosef Bacik while (!list_empty(&dirty)) { 258777745c05SJosef Bacik bool drop_reserve = true; 258877745c05SJosef Bacik 258932da5386SDavid Sterba cache = list_first_entry(&dirty, struct btrfs_block_group, 259077745c05SJosef Bacik dirty_list); 259177745c05SJosef Bacik /* 259277745c05SJosef Bacik * This can happen if something re-dirties a block group that 259377745c05SJosef Bacik * is already under IO. Just wait for it to finish and then do 259477745c05SJosef Bacik * it all again 259577745c05SJosef Bacik */ 259677745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 259777745c05SJosef Bacik list_del_init(&cache->io_list); 259877745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 259977745c05SJosef Bacik btrfs_put_block_group(cache); 260077745c05SJosef Bacik } 260177745c05SJosef Bacik 260277745c05SJosef Bacik 260377745c05SJosef Bacik /* 260477745c05SJosef Bacik * btrfs_wait_cache_io uses the cache->dirty_list to decide if 260577745c05SJosef Bacik * it should update the cache_state. Don't delete until after 260677745c05SJosef Bacik * we wait. 260777745c05SJosef Bacik * 260877745c05SJosef Bacik * Since we're not running in the commit critical section 260977745c05SJosef Bacik * we need the dirty_bgs_lock to protect from update_block_group 261077745c05SJosef Bacik */ 261177745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 261277745c05SJosef Bacik list_del_init(&cache->dirty_list); 261377745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 261477745c05SJosef Bacik 261577745c05SJosef Bacik should_put = 1; 261677745c05SJosef Bacik 261777745c05SJosef Bacik cache_save_setup(cache, trans, path); 261877745c05SJosef Bacik 261977745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_SETUP) { 262077745c05SJosef Bacik cache->io_ctl.inode = NULL; 262177745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 262277745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 262377745c05SJosef Bacik num_started++; 262477745c05SJosef Bacik should_put = 0; 262577745c05SJosef Bacik 262677745c05SJosef Bacik /* 262777745c05SJosef Bacik * The cache_write_mutex is protecting the 262877745c05SJosef Bacik * io_list, also refer to the definition of 262977745c05SJosef Bacik * btrfs_transaction::io_bgs for more details 263077745c05SJosef Bacik */ 263177745c05SJosef Bacik list_add_tail(&cache->io_list, io); 263277745c05SJosef Bacik } else { 263377745c05SJosef Bacik /* 263477745c05SJosef Bacik * If we failed to write the cache, the 263577745c05SJosef Bacik * generation will be bad and life goes on 263677745c05SJosef Bacik */ 263777745c05SJosef Bacik ret = 0; 263877745c05SJosef Bacik } 263977745c05SJosef Bacik } 264077745c05SJosef Bacik if (!ret) { 26413be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 264277745c05SJosef Bacik /* 264377745c05SJosef Bacik * Our block group might still be attached to the list 264477745c05SJosef Bacik * of new block groups in the transaction handle of some 264577745c05SJosef Bacik * other task (struct btrfs_trans_handle->new_bgs). This 264677745c05SJosef Bacik * means its block group item isn't yet in the extent 264777745c05SJosef Bacik * tree. If this happens ignore the error, as we will 264877745c05SJosef Bacik * try again later in the critical section of the 264977745c05SJosef Bacik * transaction commit. 265077745c05SJosef Bacik */ 265177745c05SJosef Bacik if (ret == -ENOENT) { 265277745c05SJosef Bacik ret = 0; 265377745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 265477745c05SJosef Bacik if (list_empty(&cache->dirty_list)) { 265577745c05SJosef Bacik list_add_tail(&cache->dirty_list, 265677745c05SJosef Bacik &cur_trans->dirty_bgs); 265777745c05SJosef Bacik btrfs_get_block_group(cache); 265877745c05SJosef Bacik drop_reserve = false; 265977745c05SJosef Bacik } 266077745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 266177745c05SJosef Bacik } else if (ret) { 266277745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 266377745c05SJosef Bacik } 266477745c05SJosef Bacik } 266577745c05SJosef Bacik 266677745c05SJosef Bacik /* If it's not on the io list, we need to put the block group */ 266777745c05SJosef Bacik if (should_put) 266877745c05SJosef Bacik btrfs_put_block_group(cache); 266977745c05SJosef Bacik if (drop_reserve) 267077745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 267177745c05SJosef Bacik 267277745c05SJosef Bacik if (ret) 267377745c05SJosef Bacik break; 267477745c05SJosef Bacik 267577745c05SJosef Bacik /* 267677745c05SJosef Bacik * Avoid blocking other tasks for too long. It might even save 267777745c05SJosef Bacik * us from writing caches for block groups that are going to be 267877745c05SJosef Bacik * removed. 267977745c05SJosef Bacik */ 268077745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 268177745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 268277745c05SJosef Bacik } 268377745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 268477745c05SJosef Bacik 268577745c05SJosef Bacik /* 268677745c05SJosef Bacik * Go through delayed refs for all the stuff we've just kicked off 268777745c05SJosef Bacik * and then loop back (just once) 268877745c05SJosef Bacik */ 268977745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 0); 269077745c05SJosef Bacik if (!ret && loops == 0) { 269177745c05SJosef Bacik loops++; 269277745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 269377745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 269477745c05SJosef Bacik /* 269577745c05SJosef Bacik * dirty_bgs_lock protects us from concurrent block group 269677745c05SJosef Bacik * deletes too (not just cache_write_mutex). 269777745c05SJosef Bacik */ 269877745c05SJosef Bacik if (!list_empty(&dirty)) { 269977745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 270077745c05SJosef Bacik goto again; 270177745c05SJosef Bacik } 270277745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 270377745c05SJosef Bacik } else if (ret < 0) { 270477745c05SJosef Bacik btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 270577745c05SJosef Bacik } 270677745c05SJosef Bacik 270777745c05SJosef Bacik btrfs_free_path(path); 270877745c05SJosef Bacik return ret; 270977745c05SJosef Bacik } 271077745c05SJosef Bacik 271177745c05SJosef Bacik int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 271277745c05SJosef Bacik { 271377745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 271432da5386SDavid Sterba struct btrfs_block_group *cache; 271577745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 271677745c05SJosef Bacik int ret = 0; 271777745c05SJosef Bacik int should_put; 271877745c05SJosef Bacik struct btrfs_path *path; 271977745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 272077745c05SJosef Bacik int num_started = 0; 272177745c05SJosef Bacik 272277745c05SJosef Bacik path = btrfs_alloc_path(); 272377745c05SJosef Bacik if (!path) 272477745c05SJosef Bacik return -ENOMEM; 272577745c05SJosef Bacik 272677745c05SJosef Bacik /* 272777745c05SJosef Bacik * Even though we are in the critical section of the transaction commit, 272877745c05SJosef Bacik * we can still have concurrent tasks adding elements to this 272977745c05SJosef Bacik * transaction's list of dirty block groups. These tasks correspond to 273077745c05SJosef Bacik * endio free space workers started when writeback finishes for a 273177745c05SJosef Bacik * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 273277745c05SJosef Bacik * allocate new block groups as a result of COWing nodes of the root 273377745c05SJosef Bacik * tree when updating the free space inode. The writeback for the space 273477745c05SJosef Bacik * caches is triggered by an earlier call to 273577745c05SJosef Bacik * btrfs_start_dirty_block_groups() and iterations of the following 273677745c05SJosef Bacik * loop. 273777745c05SJosef Bacik * Also we want to do the cache_save_setup first and then run the 273877745c05SJosef Bacik * delayed refs to make sure we have the best chance at doing this all 273977745c05SJosef Bacik * in one shot. 274077745c05SJosef Bacik */ 274177745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 274277745c05SJosef Bacik while (!list_empty(&cur_trans->dirty_bgs)) { 274377745c05SJosef Bacik cache = list_first_entry(&cur_trans->dirty_bgs, 274432da5386SDavid Sterba struct btrfs_block_group, 274577745c05SJosef Bacik dirty_list); 274677745c05SJosef Bacik 274777745c05SJosef Bacik /* 274877745c05SJosef Bacik * This can happen if cache_save_setup re-dirties a block group 274977745c05SJosef Bacik * that is already under IO. Just wait for it to finish and 275077745c05SJosef Bacik * then do it all again 275177745c05SJosef Bacik */ 275277745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 275377745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 275477745c05SJosef Bacik list_del_init(&cache->io_list); 275577745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 275677745c05SJosef Bacik btrfs_put_block_group(cache); 275777745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 275877745c05SJosef Bacik } 275977745c05SJosef Bacik 276077745c05SJosef Bacik /* 276177745c05SJosef Bacik * Don't remove from the dirty list until after we've waited on 276277745c05SJosef Bacik * any pending IO 276377745c05SJosef Bacik */ 276477745c05SJosef Bacik list_del_init(&cache->dirty_list); 276577745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 276677745c05SJosef Bacik should_put = 1; 276777745c05SJosef Bacik 276877745c05SJosef Bacik cache_save_setup(cache, trans, path); 276977745c05SJosef Bacik 277077745c05SJosef Bacik if (!ret) 277177745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 277277745c05SJosef Bacik (unsigned long) -1); 277377745c05SJosef Bacik 277477745c05SJosef Bacik if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 277577745c05SJosef Bacik cache->io_ctl.inode = NULL; 277677745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 277777745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 277877745c05SJosef Bacik num_started++; 277977745c05SJosef Bacik should_put = 0; 278077745c05SJosef Bacik list_add_tail(&cache->io_list, io); 278177745c05SJosef Bacik } else { 278277745c05SJosef Bacik /* 278377745c05SJosef Bacik * If we failed to write the cache, the 278477745c05SJosef Bacik * generation will be bad and life goes on 278577745c05SJosef Bacik */ 278677745c05SJosef Bacik ret = 0; 278777745c05SJosef Bacik } 278877745c05SJosef Bacik } 278977745c05SJosef Bacik if (!ret) { 27903be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 279177745c05SJosef Bacik /* 279277745c05SJosef Bacik * One of the free space endio workers might have 279377745c05SJosef Bacik * created a new block group while updating a free space 279477745c05SJosef Bacik * cache's inode (at inode.c:btrfs_finish_ordered_io()) 279577745c05SJosef Bacik * and hasn't released its transaction handle yet, in 279677745c05SJosef Bacik * which case the new block group is still attached to 279777745c05SJosef Bacik * its transaction handle and its creation has not 279877745c05SJosef Bacik * finished yet (no block group item in the extent tree 279977745c05SJosef Bacik * yet, etc). If this is the case, wait for all free 280077745c05SJosef Bacik * space endio workers to finish and retry. This is a 2801260db43cSRandy Dunlap * very rare case so no need for a more efficient and 280277745c05SJosef Bacik * complex approach. 280377745c05SJosef Bacik */ 280477745c05SJosef Bacik if (ret == -ENOENT) { 280577745c05SJosef Bacik wait_event(cur_trans->writer_wait, 280677745c05SJosef Bacik atomic_read(&cur_trans->num_writers) == 1); 28073be4d8efSQu Wenruo ret = update_block_group_item(trans, path, cache); 280877745c05SJosef Bacik } 280977745c05SJosef Bacik if (ret) 281077745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 281177745c05SJosef Bacik } 281277745c05SJosef Bacik 281377745c05SJosef Bacik /* If its not on the io list, we need to put the block group */ 281477745c05SJosef Bacik if (should_put) 281577745c05SJosef Bacik btrfs_put_block_group(cache); 281677745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 281777745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 281877745c05SJosef Bacik } 281977745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 282077745c05SJosef Bacik 282177745c05SJosef Bacik /* 282277745c05SJosef Bacik * Refer to the definition of io_bgs member for details why it's safe 282377745c05SJosef Bacik * to use it without any locking 282477745c05SJosef Bacik */ 282577745c05SJosef Bacik while (!list_empty(io)) { 282632da5386SDavid Sterba cache = list_first_entry(io, struct btrfs_block_group, 282777745c05SJosef Bacik io_list); 282877745c05SJosef Bacik list_del_init(&cache->io_list); 282977745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 283077745c05SJosef Bacik btrfs_put_block_group(cache); 283177745c05SJosef Bacik } 283277745c05SJosef Bacik 283377745c05SJosef Bacik btrfs_free_path(path); 283477745c05SJosef Bacik return ret; 283577745c05SJosef Bacik } 2836606d1bf1SJosef Bacik 2837606d1bf1SJosef Bacik int btrfs_update_block_group(struct btrfs_trans_handle *trans, 2838606d1bf1SJosef Bacik u64 bytenr, u64 num_bytes, int alloc) 2839606d1bf1SJosef Bacik { 2840606d1bf1SJosef Bacik struct btrfs_fs_info *info = trans->fs_info; 284132da5386SDavid Sterba struct btrfs_block_group *cache = NULL; 2842606d1bf1SJosef Bacik u64 total = num_bytes; 2843606d1bf1SJosef Bacik u64 old_val; 2844606d1bf1SJosef Bacik u64 byte_in_group; 2845606d1bf1SJosef Bacik int factor; 2846606d1bf1SJosef Bacik int ret = 0; 2847606d1bf1SJosef Bacik 2848606d1bf1SJosef Bacik /* Block accounting for super block */ 2849606d1bf1SJosef Bacik spin_lock(&info->delalloc_root_lock); 2850606d1bf1SJosef Bacik old_val = btrfs_super_bytes_used(info->super_copy); 2851606d1bf1SJosef Bacik if (alloc) 2852606d1bf1SJosef Bacik old_val += num_bytes; 2853606d1bf1SJosef Bacik else 2854606d1bf1SJosef Bacik old_val -= num_bytes; 2855606d1bf1SJosef Bacik btrfs_set_super_bytes_used(info->super_copy, old_val); 2856606d1bf1SJosef Bacik spin_unlock(&info->delalloc_root_lock); 2857606d1bf1SJosef Bacik 2858606d1bf1SJosef Bacik while (total) { 2859606d1bf1SJosef Bacik cache = btrfs_lookup_block_group(info, bytenr); 2860606d1bf1SJosef Bacik if (!cache) { 2861606d1bf1SJosef Bacik ret = -ENOENT; 2862606d1bf1SJosef Bacik break; 2863606d1bf1SJosef Bacik } 2864606d1bf1SJosef Bacik factor = btrfs_bg_type_to_factor(cache->flags); 2865606d1bf1SJosef Bacik 2866606d1bf1SJosef Bacik /* 2867606d1bf1SJosef Bacik * If this block group has free space cache written out, we 2868606d1bf1SJosef Bacik * need to make sure to load it if we are removing space. This 2869606d1bf1SJosef Bacik * is because we need the unpinning stage to actually add the 2870606d1bf1SJosef Bacik * space back to the block group, otherwise we will leak space. 2871606d1bf1SJosef Bacik */ 287232da5386SDavid Sterba if (!alloc && !btrfs_block_group_done(cache)) 2873606d1bf1SJosef Bacik btrfs_cache_block_group(cache, 1); 2874606d1bf1SJosef Bacik 2875b3470b5dSDavid Sterba byte_in_group = bytenr - cache->start; 2876b3470b5dSDavid Sterba WARN_ON(byte_in_group > cache->length); 2877606d1bf1SJosef Bacik 2878606d1bf1SJosef Bacik spin_lock(&cache->space_info->lock); 2879606d1bf1SJosef Bacik spin_lock(&cache->lock); 2880606d1bf1SJosef Bacik 2881606d1bf1SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 2882606d1bf1SJosef Bacik cache->disk_cache_state < BTRFS_DC_CLEAR) 2883606d1bf1SJosef Bacik cache->disk_cache_state = BTRFS_DC_CLEAR; 2884606d1bf1SJosef Bacik 2885bf38be65SDavid Sterba old_val = cache->used; 2886b3470b5dSDavid Sterba num_bytes = min(total, cache->length - byte_in_group); 2887606d1bf1SJosef Bacik if (alloc) { 2888606d1bf1SJosef Bacik old_val += num_bytes; 2889bf38be65SDavid Sterba cache->used = old_val; 2890606d1bf1SJosef Bacik cache->reserved -= num_bytes; 2891606d1bf1SJosef Bacik cache->space_info->bytes_reserved -= num_bytes; 2892606d1bf1SJosef Bacik cache->space_info->bytes_used += num_bytes; 2893606d1bf1SJosef Bacik cache->space_info->disk_used += num_bytes * factor; 2894606d1bf1SJosef Bacik spin_unlock(&cache->lock); 2895606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 2896606d1bf1SJosef Bacik } else { 2897606d1bf1SJosef Bacik old_val -= num_bytes; 2898bf38be65SDavid Sterba cache->used = old_val; 2899606d1bf1SJosef Bacik cache->pinned += num_bytes; 2900606d1bf1SJosef Bacik btrfs_space_info_update_bytes_pinned(info, 2901606d1bf1SJosef Bacik cache->space_info, num_bytes); 2902606d1bf1SJosef Bacik cache->space_info->bytes_used -= num_bytes; 2903606d1bf1SJosef Bacik cache->space_info->disk_used -= num_bytes * factor; 2904606d1bf1SJosef Bacik spin_unlock(&cache->lock); 2905606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 2906606d1bf1SJosef Bacik 2907606d1bf1SJosef Bacik percpu_counter_add_batch( 2908606d1bf1SJosef Bacik &cache->space_info->total_bytes_pinned, 2909606d1bf1SJosef Bacik num_bytes, 2910606d1bf1SJosef Bacik BTRFS_TOTAL_BYTES_PINNED_BATCH); 2911fe119a6eSNikolay Borisov set_extent_dirty(&trans->transaction->pinned_extents, 2912606d1bf1SJosef Bacik bytenr, bytenr + num_bytes - 1, 2913606d1bf1SJosef Bacik GFP_NOFS | __GFP_NOFAIL); 2914606d1bf1SJosef Bacik } 2915606d1bf1SJosef Bacik 2916606d1bf1SJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 2917606d1bf1SJosef Bacik if (list_empty(&cache->dirty_list)) { 2918606d1bf1SJosef Bacik list_add_tail(&cache->dirty_list, 2919606d1bf1SJosef Bacik &trans->transaction->dirty_bgs); 2920606d1bf1SJosef Bacik trans->delayed_ref_updates++; 2921606d1bf1SJosef Bacik btrfs_get_block_group(cache); 2922606d1bf1SJosef Bacik } 2923606d1bf1SJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 2924606d1bf1SJosef Bacik 2925606d1bf1SJosef Bacik /* 2926606d1bf1SJosef Bacik * No longer have used bytes in this block group, queue it for 2927606d1bf1SJosef Bacik * deletion. We do this after adding the block group to the 2928606d1bf1SJosef Bacik * dirty list to avoid races between cleaner kthread and space 2929606d1bf1SJosef Bacik * cache writeout. 2930606d1bf1SJosef Bacik */ 29316e80d4f8SDennis Zhou if (!alloc && old_val == 0) { 29326e80d4f8SDennis Zhou if (!btrfs_test_opt(info, DISCARD_ASYNC)) 2933606d1bf1SJosef Bacik btrfs_mark_bg_unused(cache); 29346e80d4f8SDennis Zhou } 2935606d1bf1SJosef Bacik 2936606d1bf1SJosef Bacik btrfs_put_block_group(cache); 2937606d1bf1SJosef Bacik total -= num_bytes; 2938606d1bf1SJosef Bacik bytenr += num_bytes; 2939606d1bf1SJosef Bacik } 2940606d1bf1SJosef Bacik 2941606d1bf1SJosef Bacik /* Modified block groups are accounted for in the delayed_refs_rsv. */ 2942606d1bf1SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 2943606d1bf1SJosef Bacik return ret; 2944606d1bf1SJosef Bacik } 2945606d1bf1SJosef Bacik 2946606d1bf1SJosef Bacik /** 2947606d1bf1SJosef Bacik * btrfs_add_reserved_bytes - update the block_group and space info counters 2948606d1bf1SJosef Bacik * @cache: The cache we are manipulating 2949606d1bf1SJosef Bacik * @ram_bytes: The number of bytes of file content, and will be same to 2950606d1bf1SJosef Bacik * @num_bytes except for the compress path. 2951606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 2952606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 2953606d1bf1SJosef Bacik * 2954606d1bf1SJosef Bacik * This is called by the allocator when it reserves space. If this is a 2955606d1bf1SJosef Bacik * reservation and the block group has become read only we cannot make the 2956606d1bf1SJosef Bacik * reservation and return -EAGAIN, otherwise this function always succeeds. 2957606d1bf1SJosef Bacik */ 295832da5386SDavid Sterba int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 2959606d1bf1SJosef Bacik u64 ram_bytes, u64 num_bytes, int delalloc) 2960606d1bf1SJosef Bacik { 2961606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 2962606d1bf1SJosef Bacik int ret = 0; 2963606d1bf1SJosef Bacik 2964606d1bf1SJosef Bacik spin_lock(&space_info->lock); 2965606d1bf1SJosef Bacik spin_lock(&cache->lock); 2966606d1bf1SJosef Bacik if (cache->ro) { 2967606d1bf1SJosef Bacik ret = -EAGAIN; 2968606d1bf1SJosef Bacik } else { 2969606d1bf1SJosef Bacik cache->reserved += num_bytes; 2970606d1bf1SJosef Bacik space_info->bytes_reserved += num_bytes; 2971a43c3835SJosef Bacik trace_btrfs_space_reservation(cache->fs_info, "space_info", 2972a43c3835SJosef Bacik space_info->flags, num_bytes, 1); 2973606d1bf1SJosef Bacik btrfs_space_info_update_bytes_may_use(cache->fs_info, 2974606d1bf1SJosef Bacik space_info, -ram_bytes); 2975606d1bf1SJosef Bacik if (delalloc) 2976606d1bf1SJosef Bacik cache->delalloc_bytes += num_bytes; 297799ffb43eSJosef Bacik 297899ffb43eSJosef Bacik /* 297999ffb43eSJosef Bacik * Compression can use less space than we reserved, so wake 298099ffb43eSJosef Bacik * tickets if that happens 298199ffb43eSJosef Bacik */ 298299ffb43eSJosef Bacik if (num_bytes < ram_bytes) 298399ffb43eSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 2984606d1bf1SJosef Bacik } 2985606d1bf1SJosef Bacik spin_unlock(&cache->lock); 2986606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 2987606d1bf1SJosef Bacik return ret; 2988606d1bf1SJosef Bacik } 2989606d1bf1SJosef Bacik 2990606d1bf1SJosef Bacik /** 2991606d1bf1SJosef Bacik * btrfs_free_reserved_bytes - update the block_group and space info counters 2992606d1bf1SJosef Bacik * @cache: The cache we are manipulating 2993606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 2994606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 2995606d1bf1SJosef Bacik * 2996606d1bf1SJosef Bacik * This is called by somebody who is freeing space that was never actually used 2997606d1bf1SJosef Bacik * on disk. For example if you reserve some space for a new leaf in transaction 2998606d1bf1SJosef Bacik * A and before transaction A commits you free that leaf, you call this with 2999606d1bf1SJosef Bacik * reserve set to 0 in order to clear the reservation. 3000606d1bf1SJosef Bacik */ 300132da5386SDavid Sterba void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3002606d1bf1SJosef Bacik u64 num_bytes, int delalloc) 3003606d1bf1SJosef Bacik { 3004606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 3005606d1bf1SJosef Bacik 3006606d1bf1SJosef Bacik spin_lock(&space_info->lock); 3007606d1bf1SJosef Bacik spin_lock(&cache->lock); 3008606d1bf1SJosef Bacik if (cache->ro) 3009606d1bf1SJosef Bacik space_info->bytes_readonly += num_bytes; 3010606d1bf1SJosef Bacik cache->reserved -= num_bytes; 3011606d1bf1SJosef Bacik space_info->bytes_reserved -= num_bytes; 3012606d1bf1SJosef Bacik space_info->max_extent_size = 0; 3013606d1bf1SJosef Bacik 3014606d1bf1SJosef Bacik if (delalloc) 3015606d1bf1SJosef Bacik cache->delalloc_bytes -= num_bytes; 3016606d1bf1SJosef Bacik spin_unlock(&cache->lock); 30173308234aSJosef Bacik 30183308234aSJosef Bacik btrfs_try_granting_tickets(cache->fs_info, space_info); 3019606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 3020606d1bf1SJosef Bacik } 302107730d87SJosef Bacik 302207730d87SJosef Bacik static void force_metadata_allocation(struct btrfs_fs_info *info) 302307730d87SJosef Bacik { 302407730d87SJosef Bacik struct list_head *head = &info->space_info; 302507730d87SJosef Bacik struct btrfs_space_info *found; 302607730d87SJosef Bacik 302772804905SJosef Bacik list_for_each_entry(found, head, list) { 302807730d87SJosef Bacik if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 302907730d87SJosef Bacik found->force_alloc = CHUNK_ALLOC_FORCE; 303007730d87SJosef Bacik } 303107730d87SJosef Bacik } 303207730d87SJosef Bacik 303307730d87SJosef Bacik static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 303407730d87SJosef Bacik struct btrfs_space_info *sinfo, int force) 303507730d87SJosef Bacik { 303607730d87SJosef Bacik u64 bytes_used = btrfs_space_info_used(sinfo, false); 303707730d87SJosef Bacik u64 thresh; 303807730d87SJosef Bacik 303907730d87SJosef Bacik if (force == CHUNK_ALLOC_FORCE) 304007730d87SJosef Bacik return 1; 304107730d87SJosef Bacik 304207730d87SJosef Bacik /* 304307730d87SJosef Bacik * in limited mode, we want to have some free space up to 304407730d87SJosef Bacik * about 1% of the FS size. 304507730d87SJosef Bacik */ 304607730d87SJosef Bacik if (force == CHUNK_ALLOC_LIMITED) { 304707730d87SJosef Bacik thresh = btrfs_super_total_bytes(fs_info->super_copy); 304807730d87SJosef Bacik thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); 304907730d87SJosef Bacik 305007730d87SJosef Bacik if (sinfo->total_bytes - bytes_used < thresh) 305107730d87SJosef Bacik return 1; 305207730d87SJosef Bacik } 305307730d87SJosef Bacik 305407730d87SJosef Bacik if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) 305507730d87SJosef Bacik return 0; 305607730d87SJosef Bacik return 1; 305707730d87SJosef Bacik } 305807730d87SJosef Bacik 305907730d87SJosef Bacik int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 306007730d87SJosef Bacik { 306107730d87SJosef Bacik u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 306207730d87SJosef Bacik 306307730d87SJosef Bacik return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 306407730d87SJosef Bacik } 306507730d87SJosef Bacik 306607730d87SJosef Bacik /* 306707730d87SJosef Bacik * If force is CHUNK_ALLOC_FORCE: 306807730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 306907730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 307007730d87SJosef Bacik * If force is NOT CHUNK_ALLOC_FORCE: 307107730d87SJosef Bacik * - return 0 if it doesn't need to allocate a new chunk, 307207730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 307307730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 307407730d87SJosef Bacik */ 307507730d87SJosef Bacik int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 307607730d87SJosef Bacik enum btrfs_chunk_alloc_enum force) 307707730d87SJosef Bacik { 307807730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 307907730d87SJosef Bacik struct btrfs_space_info *space_info; 308007730d87SJosef Bacik bool wait_for_alloc = false; 308107730d87SJosef Bacik bool should_alloc = false; 308207730d87SJosef Bacik int ret = 0; 308307730d87SJosef Bacik 308407730d87SJosef Bacik /* Don't re-enter if we're already allocating a chunk */ 308507730d87SJosef Bacik if (trans->allocating_chunk) 308607730d87SJosef Bacik return -ENOSPC; 308707730d87SJosef Bacik 308807730d87SJosef Bacik space_info = btrfs_find_space_info(fs_info, flags); 308907730d87SJosef Bacik ASSERT(space_info); 309007730d87SJosef Bacik 309107730d87SJosef Bacik do { 309207730d87SJosef Bacik spin_lock(&space_info->lock); 309307730d87SJosef Bacik if (force < space_info->force_alloc) 309407730d87SJosef Bacik force = space_info->force_alloc; 309507730d87SJosef Bacik should_alloc = should_alloc_chunk(fs_info, space_info, force); 309607730d87SJosef Bacik if (space_info->full) { 309707730d87SJosef Bacik /* No more free physical space */ 309807730d87SJosef Bacik if (should_alloc) 309907730d87SJosef Bacik ret = -ENOSPC; 310007730d87SJosef Bacik else 310107730d87SJosef Bacik ret = 0; 310207730d87SJosef Bacik spin_unlock(&space_info->lock); 310307730d87SJosef Bacik return ret; 310407730d87SJosef Bacik } else if (!should_alloc) { 310507730d87SJosef Bacik spin_unlock(&space_info->lock); 310607730d87SJosef Bacik return 0; 310707730d87SJosef Bacik } else if (space_info->chunk_alloc) { 310807730d87SJosef Bacik /* 310907730d87SJosef Bacik * Someone is already allocating, so we need to block 311007730d87SJosef Bacik * until this someone is finished and then loop to 311107730d87SJosef Bacik * recheck if we should continue with our allocation 311207730d87SJosef Bacik * attempt. 311307730d87SJosef Bacik */ 311407730d87SJosef Bacik wait_for_alloc = true; 311507730d87SJosef Bacik spin_unlock(&space_info->lock); 311607730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 311707730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 311807730d87SJosef Bacik } else { 311907730d87SJosef Bacik /* Proceed with allocation */ 312007730d87SJosef Bacik space_info->chunk_alloc = 1; 312107730d87SJosef Bacik wait_for_alloc = false; 312207730d87SJosef Bacik spin_unlock(&space_info->lock); 312307730d87SJosef Bacik } 312407730d87SJosef Bacik 312507730d87SJosef Bacik cond_resched(); 312607730d87SJosef Bacik } while (wait_for_alloc); 312707730d87SJosef Bacik 312807730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 312907730d87SJosef Bacik trans->allocating_chunk = true; 313007730d87SJosef Bacik 313107730d87SJosef Bacik /* 313207730d87SJosef Bacik * If we have mixed data/metadata chunks we want to make sure we keep 313307730d87SJosef Bacik * allocating mixed chunks instead of individual chunks. 313407730d87SJosef Bacik */ 313507730d87SJosef Bacik if (btrfs_mixed_space_info(space_info)) 313607730d87SJosef Bacik flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 313707730d87SJosef Bacik 313807730d87SJosef Bacik /* 313907730d87SJosef Bacik * if we're doing a data chunk, go ahead and make sure that 314007730d87SJosef Bacik * we keep a reasonable number of metadata chunks allocated in the 314107730d87SJosef Bacik * FS as well. 314207730d87SJosef Bacik */ 314307730d87SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 314407730d87SJosef Bacik fs_info->data_chunk_allocations++; 314507730d87SJosef Bacik if (!(fs_info->data_chunk_allocations % 314607730d87SJosef Bacik fs_info->metadata_ratio)) 314707730d87SJosef Bacik force_metadata_allocation(fs_info); 314807730d87SJosef Bacik } 314907730d87SJosef Bacik 315007730d87SJosef Bacik /* 315107730d87SJosef Bacik * Check if we have enough space in SYSTEM chunk because we may need 315207730d87SJosef Bacik * to update devices. 315307730d87SJosef Bacik */ 315407730d87SJosef Bacik check_system_chunk(trans, flags); 315507730d87SJosef Bacik 315607730d87SJosef Bacik ret = btrfs_alloc_chunk(trans, flags); 315707730d87SJosef Bacik trans->allocating_chunk = false; 315807730d87SJosef Bacik 315907730d87SJosef Bacik spin_lock(&space_info->lock); 316007730d87SJosef Bacik if (ret < 0) { 316107730d87SJosef Bacik if (ret == -ENOSPC) 316207730d87SJosef Bacik space_info->full = 1; 316307730d87SJosef Bacik else 316407730d87SJosef Bacik goto out; 316507730d87SJosef Bacik } else { 316607730d87SJosef Bacik ret = 1; 316707730d87SJosef Bacik space_info->max_extent_size = 0; 316807730d87SJosef Bacik } 316907730d87SJosef Bacik 317007730d87SJosef Bacik space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 317107730d87SJosef Bacik out: 317207730d87SJosef Bacik space_info->chunk_alloc = 0; 317307730d87SJosef Bacik spin_unlock(&space_info->lock); 317407730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 317507730d87SJosef Bacik /* 317607730d87SJosef Bacik * When we allocate a new chunk we reserve space in the chunk block 317707730d87SJosef Bacik * reserve to make sure we can COW nodes/leafs in the chunk tree or 317807730d87SJosef Bacik * add new nodes/leafs to it if we end up needing to do it when 317907730d87SJosef Bacik * inserting the chunk item and updating device items as part of the 318007730d87SJosef Bacik * second phase of chunk allocation, performed by 318107730d87SJosef Bacik * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a 318207730d87SJosef Bacik * large number of new block groups to create in our transaction 318307730d87SJosef Bacik * handle's new_bgs list to avoid exhausting the chunk block reserve 318407730d87SJosef Bacik * in extreme cases - like having a single transaction create many new 318507730d87SJosef Bacik * block groups when starting to write out the free space caches of all 318607730d87SJosef Bacik * the block groups that were made dirty during the lifetime of the 318707730d87SJosef Bacik * transaction. 318807730d87SJosef Bacik */ 318907730d87SJosef Bacik if (trans->chunk_bytes_reserved >= (u64)SZ_2M) 319007730d87SJosef Bacik btrfs_create_pending_block_groups(trans); 319107730d87SJosef Bacik 319207730d87SJosef Bacik return ret; 319307730d87SJosef Bacik } 319407730d87SJosef Bacik 319507730d87SJosef Bacik static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 319607730d87SJosef Bacik { 319707730d87SJosef Bacik u64 num_dev; 319807730d87SJosef Bacik 319907730d87SJosef Bacik num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 320007730d87SJosef Bacik if (!num_dev) 320107730d87SJosef Bacik num_dev = fs_info->fs_devices->rw_devices; 320207730d87SJosef Bacik 320307730d87SJosef Bacik return num_dev; 320407730d87SJosef Bacik } 320507730d87SJosef Bacik 320607730d87SJosef Bacik /* 3207a9143bd3SMarcos Paulo de Souza * Reserve space in the system space for allocating or removing a chunk 320807730d87SJosef Bacik */ 320907730d87SJosef Bacik void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 321007730d87SJosef Bacik { 321107730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 321207730d87SJosef Bacik struct btrfs_space_info *info; 321307730d87SJosef Bacik u64 left; 321407730d87SJosef Bacik u64 thresh; 321507730d87SJosef Bacik int ret = 0; 321607730d87SJosef Bacik u64 num_devs; 321707730d87SJosef Bacik 321807730d87SJosef Bacik /* 321907730d87SJosef Bacik * Needed because we can end up allocating a system chunk and for an 322007730d87SJosef Bacik * atomic and race free space reservation in the chunk block reserve. 322107730d87SJosef Bacik */ 322207730d87SJosef Bacik lockdep_assert_held(&fs_info->chunk_mutex); 322307730d87SJosef Bacik 322407730d87SJosef Bacik info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 322507730d87SJosef Bacik spin_lock(&info->lock); 322607730d87SJosef Bacik left = info->total_bytes - btrfs_space_info_used(info, true); 322707730d87SJosef Bacik spin_unlock(&info->lock); 322807730d87SJosef Bacik 322907730d87SJosef Bacik num_devs = get_profile_num_devs(fs_info, type); 323007730d87SJosef Bacik 323107730d87SJosef Bacik /* num_devs device items to update and 1 chunk item to add or remove */ 32322bd36e7bSJosef Bacik thresh = btrfs_calc_metadata_size(fs_info, num_devs) + 32332bd36e7bSJosef Bacik btrfs_calc_insert_metadata_size(fs_info, 1); 323407730d87SJosef Bacik 323507730d87SJosef Bacik if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 323607730d87SJosef Bacik btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 323707730d87SJosef Bacik left, thresh, type); 323807730d87SJosef Bacik btrfs_dump_space_info(fs_info, info, 0, 0); 323907730d87SJosef Bacik } 324007730d87SJosef Bacik 324107730d87SJosef Bacik if (left < thresh) { 324207730d87SJosef Bacik u64 flags = btrfs_system_alloc_profile(fs_info); 324307730d87SJosef Bacik 324407730d87SJosef Bacik /* 324507730d87SJosef Bacik * Ignore failure to create system chunk. We might end up not 324607730d87SJosef Bacik * needing it, as we might not need to COW all nodes/leafs from 324707730d87SJosef Bacik * the paths we visit in the chunk tree (they were already COWed 324807730d87SJosef Bacik * or created in the current transaction for example). 324907730d87SJosef Bacik */ 325007730d87SJosef Bacik ret = btrfs_alloc_chunk(trans, flags); 325107730d87SJosef Bacik } 325207730d87SJosef Bacik 325307730d87SJosef Bacik if (!ret) { 325407730d87SJosef Bacik ret = btrfs_block_rsv_add(fs_info->chunk_root, 325507730d87SJosef Bacik &fs_info->chunk_block_rsv, 325607730d87SJosef Bacik thresh, BTRFS_RESERVE_NO_FLUSH); 325707730d87SJosef Bacik if (!ret) 325807730d87SJosef Bacik trans->chunk_bytes_reserved += thresh; 325907730d87SJosef Bacik } 326007730d87SJosef Bacik } 326107730d87SJosef Bacik 32623e43c279SJosef Bacik void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 32633e43c279SJosef Bacik { 326432da5386SDavid Sterba struct btrfs_block_group *block_group; 32653e43c279SJosef Bacik u64 last = 0; 32663e43c279SJosef Bacik 32673e43c279SJosef Bacik while (1) { 32683e43c279SJosef Bacik struct inode *inode; 32693e43c279SJosef Bacik 32703e43c279SJosef Bacik block_group = btrfs_lookup_first_block_group(info, last); 32713e43c279SJosef Bacik while (block_group) { 32723e43c279SJosef Bacik btrfs_wait_block_group_cache_done(block_group); 32733e43c279SJosef Bacik spin_lock(&block_group->lock); 32743e43c279SJosef Bacik if (block_group->iref) 32753e43c279SJosef Bacik break; 32763e43c279SJosef Bacik spin_unlock(&block_group->lock); 32773e43c279SJosef Bacik block_group = btrfs_next_block_group(block_group); 32783e43c279SJosef Bacik } 32793e43c279SJosef Bacik if (!block_group) { 32803e43c279SJosef Bacik if (last == 0) 32813e43c279SJosef Bacik break; 32823e43c279SJosef Bacik last = 0; 32833e43c279SJosef Bacik continue; 32843e43c279SJosef Bacik } 32853e43c279SJosef Bacik 32863e43c279SJosef Bacik inode = block_group->inode; 32873e43c279SJosef Bacik block_group->iref = 0; 32883e43c279SJosef Bacik block_group->inode = NULL; 32893e43c279SJosef Bacik spin_unlock(&block_group->lock); 32903e43c279SJosef Bacik ASSERT(block_group->io_ctl.inode == NULL); 32913e43c279SJosef Bacik iput(inode); 3292b3470b5dSDavid Sterba last = block_group->start + block_group->length; 32933e43c279SJosef Bacik btrfs_put_block_group(block_group); 32943e43c279SJosef Bacik } 32953e43c279SJosef Bacik } 32963e43c279SJosef Bacik 32973e43c279SJosef Bacik /* 32983e43c279SJosef Bacik * Must be called only after stopping all workers, since we could have block 32993e43c279SJosef Bacik * group caching kthreads running, and therefore they could race with us if we 33003e43c279SJosef Bacik * freed the block groups before stopping them. 33013e43c279SJosef Bacik */ 33023e43c279SJosef Bacik int btrfs_free_block_groups(struct btrfs_fs_info *info) 33033e43c279SJosef Bacik { 330432da5386SDavid Sterba struct btrfs_block_group *block_group; 33053e43c279SJosef Bacik struct btrfs_space_info *space_info; 33063e43c279SJosef Bacik struct btrfs_caching_control *caching_ctl; 33073e43c279SJosef Bacik struct rb_node *n; 33083e43c279SJosef Bacik 33093e43c279SJosef Bacik down_write(&info->commit_root_sem); 33103e43c279SJosef Bacik while (!list_empty(&info->caching_block_groups)) { 33113e43c279SJosef Bacik caching_ctl = list_entry(info->caching_block_groups.next, 33123e43c279SJosef Bacik struct btrfs_caching_control, list); 33133e43c279SJosef Bacik list_del(&caching_ctl->list); 33143e43c279SJosef Bacik btrfs_put_caching_control(caching_ctl); 33153e43c279SJosef Bacik } 33163e43c279SJosef Bacik up_write(&info->commit_root_sem); 33173e43c279SJosef Bacik 33183e43c279SJosef Bacik spin_lock(&info->unused_bgs_lock); 33193e43c279SJosef Bacik while (!list_empty(&info->unused_bgs)) { 33203e43c279SJosef Bacik block_group = list_first_entry(&info->unused_bgs, 332132da5386SDavid Sterba struct btrfs_block_group, 33223e43c279SJosef Bacik bg_list); 33233e43c279SJosef Bacik list_del_init(&block_group->bg_list); 33243e43c279SJosef Bacik btrfs_put_block_group(block_group); 33253e43c279SJosef Bacik } 33263e43c279SJosef Bacik spin_unlock(&info->unused_bgs_lock); 33273e43c279SJosef Bacik 33283e43c279SJosef Bacik spin_lock(&info->block_group_cache_lock); 33293e43c279SJosef Bacik while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 333032da5386SDavid Sterba block_group = rb_entry(n, struct btrfs_block_group, 33313e43c279SJosef Bacik cache_node); 33323e43c279SJosef Bacik rb_erase(&block_group->cache_node, 33333e43c279SJosef Bacik &info->block_group_cache_tree); 33343e43c279SJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 33353e43c279SJosef Bacik spin_unlock(&info->block_group_cache_lock); 33363e43c279SJosef Bacik 33373e43c279SJosef Bacik down_write(&block_group->space_info->groups_sem); 33383e43c279SJosef Bacik list_del(&block_group->list); 33393e43c279SJosef Bacik up_write(&block_group->space_info->groups_sem); 33403e43c279SJosef Bacik 33413e43c279SJosef Bacik /* 33423e43c279SJosef Bacik * We haven't cached this block group, which means we could 33433e43c279SJosef Bacik * possibly have excluded extents on this block group. 33443e43c279SJosef Bacik */ 33453e43c279SJosef Bacik if (block_group->cached == BTRFS_CACHE_NO || 33463e43c279SJosef Bacik block_group->cached == BTRFS_CACHE_ERROR) 33473e43c279SJosef Bacik btrfs_free_excluded_extents(block_group); 33483e43c279SJosef Bacik 33493e43c279SJosef Bacik btrfs_remove_free_space_cache(block_group); 33503e43c279SJosef Bacik ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 33513e43c279SJosef Bacik ASSERT(list_empty(&block_group->dirty_list)); 33523e43c279SJosef Bacik ASSERT(list_empty(&block_group->io_list)); 33533e43c279SJosef Bacik ASSERT(list_empty(&block_group->bg_list)); 335448aaeebeSJosef Bacik ASSERT(refcount_read(&block_group->refs) == 1); 33553e43c279SJosef Bacik btrfs_put_block_group(block_group); 33563e43c279SJosef Bacik 33573e43c279SJosef Bacik spin_lock(&info->block_group_cache_lock); 33583e43c279SJosef Bacik } 33593e43c279SJosef Bacik spin_unlock(&info->block_group_cache_lock); 33603e43c279SJosef Bacik 33613e43c279SJosef Bacik btrfs_release_global_block_rsv(info); 33623e43c279SJosef Bacik 33633e43c279SJosef Bacik while (!list_empty(&info->space_info)) { 33643e43c279SJosef Bacik space_info = list_entry(info->space_info.next, 33653e43c279SJosef Bacik struct btrfs_space_info, 33663e43c279SJosef Bacik list); 33673e43c279SJosef Bacik 33683e43c279SJosef Bacik /* 33693e43c279SJosef Bacik * Do not hide this behind enospc_debug, this is actually 33703e43c279SJosef Bacik * important and indicates a real bug if this happens. 33713e43c279SJosef Bacik */ 33723e43c279SJosef Bacik if (WARN_ON(space_info->bytes_pinned > 0 || 33733e43c279SJosef Bacik space_info->bytes_reserved > 0 || 33743e43c279SJosef Bacik space_info->bytes_may_use > 0)) 33753e43c279SJosef Bacik btrfs_dump_space_info(info, space_info, 0, 0); 3376d611add4SFilipe Manana WARN_ON(space_info->reclaim_size > 0); 33773e43c279SJosef Bacik list_del(&space_info->list); 33783e43c279SJosef Bacik btrfs_sysfs_remove_space_info(space_info); 33793e43c279SJosef Bacik } 33803e43c279SJosef Bacik return 0; 33813e43c279SJosef Bacik } 3382684b752bSFilipe Manana 3383684b752bSFilipe Manana void btrfs_freeze_block_group(struct btrfs_block_group *cache) 3384684b752bSFilipe Manana { 3385684b752bSFilipe Manana atomic_inc(&cache->frozen); 3386684b752bSFilipe Manana } 3387684b752bSFilipe Manana 3388684b752bSFilipe Manana void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 3389684b752bSFilipe Manana { 3390684b752bSFilipe Manana struct btrfs_fs_info *fs_info = block_group->fs_info; 3391684b752bSFilipe Manana struct extent_map_tree *em_tree; 3392684b752bSFilipe Manana struct extent_map *em; 3393684b752bSFilipe Manana bool cleanup; 3394684b752bSFilipe Manana 3395684b752bSFilipe Manana spin_lock(&block_group->lock); 3396684b752bSFilipe Manana cleanup = (atomic_dec_and_test(&block_group->frozen) && 3397684b752bSFilipe Manana block_group->removed); 3398684b752bSFilipe Manana spin_unlock(&block_group->lock); 3399684b752bSFilipe Manana 3400684b752bSFilipe Manana if (cleanup) { 3401684b752bSFilipe Manana em_tree = &fs_info->mapping_tree; 3402684b752bSFilipe Manana write_lock(&em_tree->lock); 3403684b752bSFilipe Manana em = lookup_extent_mapping(em_tree, block_group->start, 3404684b752bSFilipe Manana 1); 3405684b752bSFilipe Manana BUG_ON(!em); /* logic error, can't happen */ 3406684b752bSFilipe Manana remove_extent_mapping(em_tree, em); 3407684b752bSFilipe Manana write_unlock(&em_tree->lock); 3408684b752bSFilipe Manana 3409684b752bSFilipe Manana /* once for us and once for the tree */ 3410684b752bSFilipe Manana free_extent_map(em); 3411684b752bSFilipe Manana free_extent_map(em); 3412684b752bSFilipe Manana 3413684b752bSFilipe Manana /* 3414684b752bSFilipe Manana * We may have left one free space entry and other possible 3415684b752bSFilipe Manana * tasks trimming this block group have left 1 entry each one. 3416684b752bSFilipe Manana * Free them if any. 3417684b752bSFilipe Manana */ 3418684b752bSFilipe Manana __btrfs_remove_free_space_cache(block_group->free_space_ctl); 3419684b752bSFilipe Manana } 3420684b752bSFilipe Manana } 3421