12e405ad8SJosef Bacik // SPDX-License-Identifier: GPL-2.0 22e405ad8SJosef Bacik 32e405ad8SJosef Bacik #include "ctree.h" 42e405ad8SJosef Bacik #include "block-group.h" 53eeb3226SJosef Bacik #include "space-info.h" 69f21246dSJosef Bacik #include "disk-io.h" 79f21246dSJosef Bacik #include "free-space-cache.h" 89f21246dSJosef Bacik #include "free-space-tree.h" 9e3e0520bSJosef Bacik #include "disk-io.h" 10e3e0520bSJosef Bacik #include "volumes.h" 11e3e0520bSJosef Bacik #include "transaction.h" 12e3e0520bSJosef Bacik #include "ref-verify.h" 134358d963SJosef Bacik #include "sysfs.h" 144358d963SJosef Bacik #include "tree-log.h" 1577745c05SJosef Bacik #include "delalloc-space.h" 1607730d87SJosef Bacik #include "math.h" 172e405ad8SJosef Bacik 18878d7b67SJosef Bacik /* 19878d7b67SJosef Bacik * Return target flags in extended format or 0 if restripe for this chunk_type 20878d7b67SJosef Bacik * is not in progress 21878d7b67SJosef Bacik * 22878d7b67SJosef Bacik * Should be called with balance_lock held 23878d7b67SJosef Bacik */ 24878d7b67SJosef Bacik u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 25878d7b67SJosef Bacik { 26878d7b67SJosef Bacik struct btrfs_balance_control *bctl = fs_info->balance_ctl; 27878d7b67SJosef Bacik u64 target = 0; 28878d7b67SJosef Bacik 29878d7b67SJosef Bacik if (!bctl) 30878d7b67SJosef Bacik return 0; 31878d7b67SJosef Bacik 32878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && 33878d7b67SJosef Bacik bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 34878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 35878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 36878d7b67SJosef Bacik bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 37878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 38878d7b67SJosef Bacik } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 39878d7b67SJosef Bacik bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 40878d7b67SJosef Bacik target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 41878d7b67SJosef Bacik } 42878d7b67SJosef Bacik 43878d7b67SJosef Bacik return target; 44878d7b67SJosef Bacik } 45878d7b67SJosef Bacik 46878d7b67SJosef Bacik /* 47878d7b67SJosef Bacik * @flags: available profiles in extended format (see ctree.h) 48878d7b67SJosef Bacik * 49878d7b67SJosef Bacik * Return reduced profile in chunk format. If profile changing is in progress 50878d7b67SJosef Bacik * (either running or paused) picks the target profile (if it's already 51878d7b67SJosef Bacik * available), otherwise falls back to plain reducing. 52878d7b67SJosef Bacik */ 53878d7b67SJosef Bacik static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 54878d7b67SJosef Bacik { 55878d7b67SJosef Bacik u64 num_devices = fs_info->fs_devices->rw_devices; 56878d7b67SJosef Bacik u64 target; 57878d7b67SJosef Bacik u64 raid_type; 58878d7b67SJosef Bacik u64 allowed = 0; 59878d7b67SJosef Bacik 60878d7b67SJosef Bacik /* 61878d7b67SJosef Bacik * See if restripe for this chunk_type is in progress, if so try to 62878d7b67SJosef Bacik * reduce to the target profile 63878d7b67SJosef Bacik */ 64878d7b67SJosef Bacik spin_lock(&fs_info->balance_lock); 65878d7b67SJosef Bacik target = btrfs_get_restripe_target(fs_info, flags); 66878d7b67SJosef Bacik if (target) { 67878d7b67SJosef Bacik /* Pick target profile only if it's already available */ 68878d7b67SJosef Bacik if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { 69878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 70878d7b67SJosef Bacik return extended_to_chunk(target); 71878d7b67SJosef Bacik } 72878d7b67SJosef Bacik } 73878d7b67SJosef Bacik spin_unlock(&fs_info->balance_lock); 74878d7b67SJosef Bacik 75878d7b67SJosef Bacik /* First, mask out the RAID levels which aren't possible */ 76878d7b67SJosef Bacik for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 77878d7b67SJosef Bacik if (num_devices >= btrfs_raid_array[raid_type].devs_min) 78878d7b67SJosef Bacik allowed |= btrfs_raid_array[raid_type].bg_flag; 79878d7b67SJosef Bacik } 80878d7b67SJosef Bacik allowed &= flags; 81878d7b67SJosef Bacik 82878d7b67SJosef Bacik if (allowed & BTRFS_BLOCK_GROUP_RAID6) 83878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID6; 84878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 85878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID5; 86878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 87878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID10; 88878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 89878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID1; 90878d7b67SJosef Bacik else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 91878d7b67SJosef Bacik allowed = BTRFS_BLOCK_GROUP_RAID0; 92878d7b67SJosef Bacik 93878d7b67SJosef Bacik flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 94878d7b67SJosef Bacik 95878d7b67SJosef Bacik return extended_to_chunk(flags | allowed); 96878d7b67SJosef Bacik } 97878d7b67SJosef Bacik 98878d7b67SJosef Bacik static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 99878d7b67SJosef Bacik { 100878d7b67SJosef Bacik unsigned seq; 101878d7b67SJosef Bacik u64 flags; 102878d7b67SJosef Bacik 103878d7b67SJosef Bacik do { 104878d7b67SJosef Bacik flags = orig_flags; 105878d7b67SJosef Bacik seq = read_seqbegin(&fs_info->profiles_lock); 106878d7b67SJosef Bacik 107878d7b67SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 108878d7b67SJosef Bacik flags |= fs_info->avail_data_alloc_bits; 109878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 110878d7b67SJosef Bacik flags |= fs_info->avail_system_alloc_bits; 111878d7b67SJosef Bacik else if (flags & BTRFS_BLOCK_GROUP_METADATA) 112878d7b67SJosef Bacik flags |= fs_info->avail_metadata_alloc_bits; 113878d7b67SJosef Bacik } while (read_seqretry(&fs_info->profiles_lock, seq)); 114878d7b67SJosef Bacik 115878d7b67SJosef Bacik return btrfs_reduce_alloc_profile(fs_info, flags); 116878d7b67SJosef Bacik } 117878d7b67SJosef Bacik 118878d7b67SJosef Bacik u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 119878d7b67SJosef Bacik { 120878d7b67SJosef Bacik return get_alloc_profile(fs_info, orig_flags); 121878d7b67SJosef Bacik } 122878d7b67SJosef Bacik 1233cad1284SJosef Bacik void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 1243cad1284SJosef Bacik { 1253cad1284SJosef Bacik atomic_inc(&cache->count); 1263cad1284SJosef Bacik } 1273cad1284SJosef Bacik 1283cad1284SJosef Bacik void btrfs_put_block_group(struct btrfs_block_group_cache *cache) 1293cad1284SJosef Bacik { 1303cad1284SJosef Bacik if (atomic_dec_and_test(&cache->count)) { 1313cad1284SJosef Bacik WARN_ON(cache->pinned > 0); 1323cad1284SJosef Bacik WARN_ON(cache->reserved > 0); 1333cad1284SJosef Bacik 1343cad1284SJosef Bacik /* 1353cad1284SJosef Bacik * If not empty, someone is still holding mutex of 1363cad1284SJosef Bacik * full_stripe_lock, which can only be released by caller. 1373cad1284SJosef Bacik * And it will definitely cause use-after-free when caller 1383cad1284SJosef Bacik * tries to release full stripe lock. 1393cad1284SJosef Bacik * 1403cad1284SJosef Bacik * No better way to resolve, but only to warn. 1413cad1284SJosef Bacik */ 1423cad1284SJosef Bacik WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); 1433cad1284SJosef Bacik kfree(cache->free_space_ctl); 1443cad1284SJosef Bacik kfree(cache); 1453cad1284SJosef Bacik } 1463cad1284SJosef Bacik } 1473cad1284SJosef Bacik 1482e405ad8SJosef Bacik /* 1494358d963SJosef Bacik * This adds the block group to the fs_info rb tree for the block group cache 1504358d963SJosef Bacik */ 1514358d963SJosef Bacik static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 1524358d963SJosef Bacik struct btrfs_block_group_cache *block_group) 1534358d963SJosef Bacik { 1544358d963SJosef Bacik struct rb_node **p; 1554358d963SJosef Bacik struct rb_node *parent = NULL; 1564358d963SJosef Bacik struct btrfs_block_group_cache *cache; 1574358d963SJosef Bacik 1584358d963SJosef Bacik spin_lock(&info->block_group_cache_lock); 1594358d963SJosef Bacik p = &info->block_group_cache_tree.rb_node; 1604358d963SJosef Bacik 1614358d963SJosef Bacik while (*p) { 1624358d963SJosef Bacik parent = *p; 1634358d963SJosef Bacik cache = rb_entry(parent, struct btrfs_block_group_cache, 1644358d963SJosef Bacik cache_node); 1654358d963SJosef Bacik if (block_group->key.objectid < cache->key.objectid) { 1664358d963SJosef Bacik p = &(*p)->rb_left; 1674358d963SJosef Bacik } else if (block_group->key.objectid > cache->key.objectid) { 1684358d963SJosef Bacik p = &(*p)->rb_right; 1694358d963SJosef Bacik } else { 1704358d963SJosef Bacik spin_unlock(&info->block_group_cache_lock); 1714358d963SJosef Bacik return -EEXIST; 1724358d963SJosef Bacik } 1734358d963SJosef Bacik } 1744358d963SJosef Bacik 1754358d963SJosef Bacik rb_link_node(&block_group->cache_node, parent, p); 1764358d963SJosef Bacik rb_insert_color(&block_group->cache_node, 1774358d963SJosef Bacik &info->block_group_cache_tree); 1784358d963SJosef Bacik 1794358d963SJosef Bacik if (info->first_logical_byte > block_group->key.objectid) 1804358d963SJosef Bacik info->first_logical_byte = block_group->key.objectid; 1814358d963SJosef Bacik 1824358d963SJosef Bacik spin_unlock(&info->block_group_cache_lock); 1834358d963SJosef Bacik 1844358d963SJosef Bacik return 0; 1854358d963SJosef Bacik } 1864358d963SJosef Bacik 1874358d963SJosef Bacik /* 1882e405ad8SJosef Bacik * This will return the block group at or after bytenr if contains is 0, else 1892e405ad8SJosef Bacik * it will return the block group that contains the bytenr 1902e405ad8SJosef Bacik */ 1912e405ad8SJosef Bacik static struct btrfs_block_group_cache *block_group_cache_tree_search( 1922e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr, int contains) 1932e405ad8SJosef Bacik { 1942e405ad8SJosef Bacik struct btrfs_block_group_cache *cache, *ret = NULL; 1952e405ad8SJosef Bacik struct rb_node *n; 1962e405ad8SJosef Bacik u64 end, start; 1972e405ad8SJosef Bacik 1982e405ad8SJosef Bacik spin_lock(&info->block_group_cache_lock); 1992e405ad8SJosef Bacik n = info->block_group_cache_tree.rb_node; 2002e405ad8SJosef Bacik 2012e405ad8SJosef Bacik while (n) { 2022e405ad8SJosef Bacik cache = rb_entry(n, struct btrfs_block_group_cache, 2032e405ad8SJosef Bacik cache_node); 2042e405ad8SJosef Bacik end = cache->key.objectid + cache->key.offset - 1; 2052e405ad8SJosef Bacik start = cache->key.objectid; 2062e405ad8SJosef Bacik 2072e405ad8SJosef Bacik if (bytenr < start) { 2082e405ad8SJosef Bacik if (!contains && (!ret || start < ret->key.objectid)) 2092e405ad8SJosef Bacik ret = cache; 2102e405ad8SJosef Bacik n = n->rb_left; 2112e405ad8SJosef Bacik } else if (bytenr > start) { 2122e405ad8SJosef Bacik if (contains && bytenr <= end) { 2132e405ad8SJosef Bacik ret = cache; 2142e405ad8SJosef Bacik break; 2152e405ad8SJosef Bacik } 2162e405ad8SJosef Bacik n = n->rb_right; 2172e405ad8SJosef Bacik } else { 2182e405ad8SJosef Bacik ret = cache; 2192e405ad8SJosef Bacik break; 2202e405ad8SJosef Bacik } 2212e405ad8SJosef Bacik } 2222e405ad8SJosef Bacik if (ret) { 2232e405ad8SJosef Bacik btrfs_get_block_group(ret); 2242e405ad8SJosef Bacik if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) 2252e405ad8SJosef Bacik info->first_logical_byte = ret->key.objectid; 2262e405ad8SJosef Bacik } 2272e405ad8SJosef Bacik spin_unlock(&info->block_group_cache_lock); 2282e405ad8SJosef Bacik 2292e405ad8SJosef Bacik return ret; 2302e405ad8SJosef Bacik } 2312e405ad8SJosef Bacik 2322e405ad8SJosef Bacik /* 2332e405ad8SJosef Bacik * Return the block group that starts at or after bytenr 2342e405ad8SJosef Bacik */ 2352e405ad8SJosef Bacik struct btrfs_block_group_cache *btrfs_lookup_first_block_group( 2362e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2372e405ad8SJosef Bacik { 2382e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 0); 2392e405ad8SJosef Bacik } 2402e405ad8SJosef Bacik 2412e405ad8SJosef Bacik /* 2422e405ad8SJosef Bacik * Return the block group that contains the given bytenr 2432e405ad8SJosef Bacik */ 2442e405ad8SJosef Bacik struct btrfs_block_group_cache *btrfs_lookup_block_group( 2452e405ad8SJosef Bacik struct btrfs_fs_info *info, u64 bytenr) 2462e405ad8SJosef Bacik { 2472e405ad8SJosef Bacik return block_group_cache_tree_search(info, bytenr, 1); 2482e405ad8SJosef Bacik } 2492e405ad8SJosef Bacik 2502e405ad8SJosef Bacik struct btrfs_block_group_cache *btrfs_next_block_group( 2512e405ad8SJosef Bacik struct btrfs_block_group_cache *cache) 2522e405ad8SJosef Bacik { 2532e405ad8SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 2542e405ad8SJosef Bacik struct rb_node *node; 2552e405ad8SJosef Bacik 2562e405ad8SJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 2572e405ad8SJosef Bacik 2582e405ad8SJosef Bacik /* If our block group was removed, we need a full search. */ 2592e405ad8SJosef Bacik if (RB_EMPTY_NODE(&cache->cache_node)) { 2602e405ad8SJosef Bacik const u64 next_bytenr = cache->key.objectid + cache->key.offset; 2612e405ad8SJosef Bacik 2622e405ad8SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 2632e405ad8SJosef Bacik btrfs_put_block_group(cache); 2642e405ad8SJosef Bacik cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; 2652e405ad8SJosef Bacik } 2662e405ad8SJosef Bacik node = rb_next(&cache->cache_node); 2672e405ad8SJosef Bacik btrfs_put_block_group(cache); 2682e405ad8SJosef Bacik if (node) { 2692e405ad8SJosef Bacik cache = rb_entry(node, struct btrfs_block_group_cache, 2702e405ad8SJosef Bacik cache_node); 2712e405ad8SJosef Bacik btrfs_get_block_group(cache); 2722e405ad8SJosef Bacik } else 2732e405ad8SJosef Bacik cache = NULL; 2742e405ad8SJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 2752e405ad8SJosef Bacik return cache; 2762e405ad8SJosef Bacik } 2773eeb3226SJosef Bacik 2783eeb3226SJosef Bacik bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) 2793eeb3226SJosef Bacik { 2803eeb3226SJosef Bacik struct btrfs_block_group_cache *bg; 2813eeb3226SJosef Bacik bool ret = true; 2823eeb3226SJosef Bacik 2833eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 2843eeb3226SJosef Bacik if (!bg) 2853eeb3226SJosef Bacik return false; 2863eeb3226SJosef Bacik 2873eeb3226SJosef Bacik spin_lock(&bg->lock); 2883eeb3226SJosef Bacik if (bg->ro) 2893eeb3226SJosef Bacik ret = false; 2903eeb3226SJosef Bacik else 2913eeb3226SJosef Bacik atomic_inc(&bg->nocow_writers); 2923eeb3226SJosef Bacik spin_unlock(&bg->lock); 2933eeb3226SJosef Bacik 2943eeb3226SJosef Bacik /* No put on block group, done by btrfs_dec_nocow_writers */ 2953eeb3226SJosef Bacik if (!ret) 2963eeb3226SJosef Bacik btrfs_put_block_group(bg); 2973eeb3226SJosef Bacik 2983eeb3226SJosef Bacik return ret; 2993eeb3226SJosef Bacik } 3003eeb3226SJosef Bacik 3013eeb3226SJosef Bacik void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) 3023eeb3226SJosef Bacik { 3033eeb3226SJosef Bacik struct btrfs_block_group_cache *bg; 3043eeb3226SJosef Bacik 3053eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, bytenr); 3063eeb3226SJosef Bacik ASSERT(bg); 3073eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->nocow_writers)) 3083eeb3226SJosef Bacik wake_up_var(&bg->nocow_writers); 3093eeb3226SJosef Bacik /* 3103eeb3226SJosef Bacik * Once for our lookup and once for the lookup done by a previous call 3113eeb3226SJosef Bacik * to btrfs_inc_nocow_writers() 3123eeb3226SJosef Bacik */ 3133eeb3226SJosef Bacik btrfs_put_block_group(bg); 3143eeb3226SJosef Bacik btrfs_put_block_group(bg); 3153eeb3226SJosef Bacik } 3163eeb3226SJosef Bacik 3173eeb3226SJosef Bacik void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) 3183eeb3226SJosef Bacik { 3193eeb3226SJosef Bacik wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 3203eeb3226SJosef Bacik } 3213eeb3226SJosef Bacik 3223eeb3226SJosef Bacik void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 3233eeb3226SJosef Bacik const u64 start) 3243eeb3226SJosef Bacik { 3253eeb3226SJosef Bacik struct btrfs_block_group_cache *bg; 3263eeb3226SJosef Bacik 3273eeb3226SJosef Bacik bg = btrfs_lookup_block_group(fs_info, start); 3283eeb3226SJosef Bacik ASSERT(bg); 3293eeb3226SJosef Bacik if (atomic_dec_and_test(&bg->reservations)) 3303eeb3226SJosef Bacik wake_up_var(&bg->reservations); 3313eeb3226SJosef Bacik btrfs_put_block_group(bg); 3323eeb3226SJosef Bacik } 3333eeb3226SJosef Bacik 3343eeb3226SJosef Bacik void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) 3353eeb3226SJosef Bacik { 3363eeb3226SJosef Bacik struct btrfs_space_info *space_info = bg->space_info; 3373eeb3226SJosef Bacik 3383eeb3226SJosef Bacik ASSERT(bg->ro); 3393eeb3226SJosef Bacik 3403eeb3226SJosef Bacik if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 3413eeb3226SJosef Bacik return; 3423eeb3226SJosef Bacik 3433eeb3226SJosef Bacik /* 3443eeb3226SJosef Bacik * Our block group is read only but before we set it to read only, 3453eeb3226SJosef Bacik * some task might have had allocated an extent from it already, but it 3463eeb3226SJosef Bacik * has not yet created a respective ordered extent (and added it to a 3473eeb3226SJosef Bacik * root's list of ordered extents). 3483eeb3226SJosef Bacik * Therefore wait for any task currently allocating extents, since the 3493eeb3226SJosef Bacik * block group's reservations counter is incremented while a read lock 3503eeb3226SJosef Bacik * on the groups' semaphore is held and decremented after releasing 3513eeb3226SJosef Bacik * the read access on that semaphore and creating the ordered extent. 3523eeb3226SJosef Bacik */ 3533eeb3226SJosef Bacik down_write(&space_info->groups_sem); 3543eeb3226SJosef Bacik up_write(&space_info->groups_sem); 3553eeb3226SJosef Bacik 3563eeb3226SJosef Bacik wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 3573eeb3226SJosef Bacik } 3589f21246dSJosef Bacik 3599f21246dSJosef Bacik struct btrfs_caching_control *btrfs_get_caching_control( 3609f21246dSJosef Bacik struct btrfs_block_group_cache *cache) 3619f21246dSJosef Bacik { 3629f21246dSJosef Bacik struct btrfs_caching_control *ctl; 3639f21246dSJosef Bacik 3649f21246dSJosef Bacik spin_lock(&cache->lock); 3659f21246dSJosef Bacik if (!cache->caching_ctl) { 3669f21246dSJosef Bacik spin_unlock(&cache->lock); 3679f21246dSJosef Bacik return NULL; 3689f21246dSJosef Bacik } 3699f21246dSJosef Bacik 3709f21246dSJosef Bacik ctl = cache->caching_ctl; 3719f21246dSJosef Bacik refcount_inc(&ctl->count); 3729f21246dSJosef Bacik spin_unlock(&cache->lock); 3739f21246dSJosef Bacik return ctl; 3749f21246dSJosef Bacik } 3759f21246dSJosef Bacik 3769f21246dSJosef Bacik void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 3779f21246dSJosef Bacik { 3789f21246dSJosef Bacik if (refcount_dec_and_test(&ctl->count)) 3799f21246dSJosef Bacik kfree(ctl); 3809f21246dSJosef Bacik } 3819f21246dSJosef Bacik 3829f21246dSJosef Bacik /* 3839f21246dSJosef Bacik * When we wait for progress in the block group caching, its because our 3849f21246dSJosef Bacik * allocation attempt failed at least once. So, we must sleep and let some 3859f21246dSJosef Bacik * progress happen before we try again. 3869f21246dSJosef Bacik * 3879f21246dSJosef Bacik * This function will sleep at least once waiting for new free space to show 3889f21246dSJosef Bacik * up, and then it will check the block group free space numbers for our min 3899f21246dSJosef Bacik * num_bytes. Another option is to have it go ahead and look in the rbtree for 3909f21246dSJosef Bacik * a free extent of a given size, but this is a good start. 3919f21246dSJosef Bacik * 3929f21246dSJosef Bacik * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 3939f21246dSJosef Bacik * any of the information in this block group. 3949f21246dSJosef Bacik */ 3959f21246dSJosef Bacik void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, 3969f21246dSJosef Bacik u64 num_bytes) 3979f21246dSJosef Bacik { 3989f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 3999f21246dSJosef Bacik 4009f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4019f21246dSJosef Bacik if (!caching_ctl) 4029f21246dSJosef Bacik return; 4039f21246dSJosef Bacik 4049f21246dSJosef Bacik wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || 4059f21246dSJosef Bacik (cache->free_space_ctl->free_space >= num_bytes)); 4069f21246dSJosef Bacik 4079f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4089f21246dSJosef Bacik } 4099f21246dSJosef Bacik 4109f21246dSJosef Bacik int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) 4119f21246dSJosef Bacik { 4129f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 4139f21246dSJosef Bacik int ret = 0; 4149f21246dSJosef Bacik 4159f21246dSJosef Bacik caching_ctl = btrfs_get_caching_control(cache); 4169f21246dSJosef Bacik if (!caching_ctl) 4179f21246dSJosef Bacik return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 4189f21246dSJosef Bacik 4199f21246dSJosef Bacik wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); 4209f21246dSJosef Bacik if (cache->cached == BTRFS_CACHE_ERROR) 4219f21246dSJosef Bacik ret = -EIO; 4229f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 4239f21246dSJosef Bacik return ret; 4249f21246dSJosef Bacik } 4259f21246dSJosef Bacik 4269f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 4279f21246dSJosef Bacik void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group) 4289f21246dSJosef Bacik { 4299f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 4309f21246dSJosef Bacik u64 start = block_group->key.objectid; 4319f21246dSJosef Bacik u64 len = block_group->key.offset; 4329f21246dSJosef Bacik u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 4339f21246dSJosef Bacik fs_info->nodesize : fs_info->sectorsize; 4349f21246dSJosef Bacik u64 step = chunk << 1; 4359f21246dSJosef Bacik 4369f21246dSJosef Bacik while (len > chunk) { 4379f21246dSJosef Bacik btrfs_remove_free_space(block_group, start, chunk); 4389f21246dSJosef Bacik start += step; 4399f21246dSJosef Bacik if (len < step) 4409f21246dSJosef Bacik len = 0; 4419f21246dSJosef Bacik else 4429f21246dSJosef Bacik len -= step; 4439f21246dSJosef Bacik } 4449f21246dSJosef Bacik } 4459f21246dSJosef Bacik #endif 4469f21246dSJosef Bacik 4479f21246dSJosef Bacik /* 4489f21246dSJosef Bacik * This is only called by btrfs_cache_block_group, since we could have freed 4499f21246dSJosef Bacik * extents we need to check the pinned_extents for any extents that can't be 4509f21246dSJosef Bacik * used yet since their free space will be released as soon as the transaction 4519f21246dSJosef Bacik * commits. 4529f21246dSJosef Bacik */ 4539f21246dSJosef Bacik u64 add_new_free_space(struct btrfs_block_group_cache *block_group, 4549f21246dSJosef Bacik u64 start, u64 end) 4559f21246dSJosef Bacik { 4569f21246dSJosef Bacik struct btrfs_fs_info *info = block_group->fs_info; 4579f21246dSJosef Bacik u64 extent_start, extent_end, size, total_added = 0; 4589f21246dSJosef Bacik int ret; 4599f21246dSJosef Bacik 4609f21246dSJosef Bacik while (start < end) { 4619f21246dSJosef Bacik ret = find_first_extent_bit(info->pinned_extents, start, 4629f21246dSJosef Bacik &extent_start, &extent_end, 4639f21246dSJosef Bacik EXTENT_DIRTY | EXTENT_UPTODATE, 4649f21246dSJosef Bacik NULL); 4659f21246dSJosef Bacik if (ret) 4669f21246dSJosef Bacik break; 4679f21246dSJosef Bacik 4689f21246dSJosef Bacik if (extent_start <= start) { 4699f21246dSJosef Bacik start = extent_end + 1; 4709f21246dSJosef Bacik } else if (extent_start > start && extent_start < end) { 4719f21246dSJosef Bacik size = extent_start - start; 4729f21246dSJosef Bacik total_added += size; 4739f21246dSJosef Bacik ret = btrfs_add_free_space(block_group, start, 4749f21246dSJosef Bacik size); 4759f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 4769f21246dSJosef Bacik start = extent_end + 1; 4779f21246dSJosef Bacik } else { 4789f21246dSJosef Bacik break; 4799f21246dSJosef Bacik } 4809f21246dSJosef Bacik } 4819f21246dSJosef Bacik 4829f21246dSJosef Bacik if (start < end) { 4839f21246dSJosef Bacik size = end - start; 4849f21246dSJosef Bacik total_added += size; 4859f21246dSJosef Bacik ret = btrfs_add_free_space(block_group, start, size); 4869f21246dSJosef Bacik BUG_ON(ret); /* -ENOMEM or logic error */ 4879f21246dSJosef Bacik } 4889f21246dSJosef Bacik 4899f21246dSJosef Bacik return total_added; 4909f21246dSJosef Bacik } 4919f21246dSJosef Bacik 4929f21246dSJosef Bacik static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 4939f21246dSJosef Bacik { 4949f21246dSJosef Bacik struct btrfs_block_group_cache *block_group = caching_ctl->block_group; 4959f21246dSJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 4969f21246dSJosef Bacik struct btrfs_root *extent_root = fs_info->extent_root; 4979f21246dSJosef Bacik struct btrfs_path *path; 4989f21246dSJosef Bacik struct extent_buffer *leaf; 4999f21246dSJosef Bacik struct btrfs_key key; 5009f21246dSJosef Bacik u64 total_found = 0; 5019f21246dSJosef Bacik u64 last = 0; 5029f21246dSJosef Bacik u32 nritems; 5039f21246dSJosef Bacik int ret; 5049f21246dSJosef Bacik bool wakeup = true; 5059f21246dSJosef Bacik 5069f21246dSJosef Bacik path = btrfs_alloc_path(); 5079f21246dSJosef Bacik if (!path) 5089f21246dSJosef Bacik return -ENOMEM; 5099f21246dSJosef Bacik 5109f21246dSJosef Bacik last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 5119f21246dSJosef Bacik 5129f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 5139f21246dSJosef Bacik /* 5149f21246dSJosef Bacik * If we're fragmenting we don't want to make anybody think we can 5159f21246dSJosef Bacik * allocate from this block group until we've had a chance to fragment 5169f21246dSJosef Bacik * the free space. 5179f21246dSJosef Bacik */ 5189f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) 5199f21246dSJosef Bacik wakeup = false; 5209f21246dSJosef Bacik #endif 5219f21246dSJosef Bacik /* 5229f21246dSJosef Bacik * We don't want to deadlock with somebody trying to allocate a new 5239f21246dSJosef Bacik * extent for the extent root while also trying to search the extent 5249f21246dSJosef Bacik * root to add free space. So we skip locking and search the commit 5259f21246dSJosef Bacik * root, since its read-only 5269f21246dSJosef Bacik */ 5279f21246dSJosef Bacik path->skip_locking = 1; 5289f21246dSJosef Bacik path->search_commit_root = 1; 5299f21246dSJosef Bacik path->reada = READA_FORWARD; 5309f21246dSJosef Bacik 5319f21246dSJosef Bacik key.objectid = last; 5329f21246dSJosef Bacik key.offset = 0; 5339f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 5349f21246dSJosef Bacik 5359f21246dSJosef Bacik next: 5369f21246dSJosef Bacik ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 5379f21246dSJosef Bacik if (ret < 0) 5389f21246dSJosef Bacik goto out; 5399f21246dSJosef Bacik 5409f21246dSJosef Bacik leaf = path->nodes[0]; 5419f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 5429f21246dSJosef Bacik 5439f21246dSJosef Bacik while (1) { 5449f21246dSJosef Bacik if (btrfs_fs_closing(fs_info) > 1) { 5459f21246dSJosef Bacik last = (u64)-1; 5469f21246dSJosef Bacik break; 5479f21246dSJosef Bacik } 5489f21246dSJosef Bacik 5499f21246dSJosef Bacik if (path->slots[0] < nritems) { 5509f21246dSJosef Bacik btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5519f21246dSJosef Bacik } else { 5529f21246dSJosef Bacik ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 5539f21246dSJosef Bacik if (ret) 5549f21246dSJosef Bacik break; 5559f21246dSJosef Bacik 5569f21246dSJosef Bacik if (need_resched() || 5579f21246dSJosef Bacik rwsem_is_contended(&fs_info->commit_root_sem)) { 5589f21246dSJosef Bacik if (wakeup) 5599f21246dSJosef Bacik caching_ctl->progress = last; 5609f21246dSJosef Bacik btrfs_release_path(path); 5619f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 5629f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 5639f21246dSJosef Bacik cond_resched(); 5649f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 5659f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 5669f21246dSJosef Bacik goto next; 5679f21246dSJosef Bacik } 5689f21246dSJosef Bacik 5699f21246dSJosef Bacik ret = btrfs_next_leaf(extent_root, path); 5709f21246dSJosef Bacik if (ret < 0) 5719f21246dSJosef Bacik goto out; 5729f21246dSJosef Bacik if (ret) 5739f21246dSJosef Bacik break; 5749f21246dSJosef Bacik leaf = path->nodes[0]; 5759f21246dSJosef Bacik nritems = btrfs_header_nritems(leaf); 5769f21246dSJosef Bacik continue; 5779f21246dSJosef Bacik } 5789f21246dSJosef Bacik 5799f21246dSJosef Bacik if (key.objectid < last) { 5809f21246dSJosef Bacik key.objectid = last; 5819f21246dSJosef Bacik key.offset = 0; 5829f21246dSJosef Bacik key.type = BTRFS_EXTENT_ITEM_KEY; 5839f21246dSJosef Bacik 5849f21246dSJosef Bacik if (wakeup) 5859f21246dSJosef Bacik caching_ctl->progress = last; 5869f21246dSJosef Bacik btrfs_release_path(path); 5879f21246dSJosef Bacik goto next; 5889f21246dSJosef Bacik } 5899f21246dSJosef Bacik 5909f21246dSJosef Bacik if (key.objectid < block_group->key.objectid) { 5919f21246dSJosef Bacik path->slots[0]++; 5929f21246dSJosef Bacik continue; 5939f21246dSJosef Bacik } 5949f21246dSJosef Bacik 5959f21246dSJosef Bacik if (key.objectid >= block_group->key.objectid + 5969f21246dSJosef Bacik block_group->key.offset) 5979f21246dSJosef Bacik break; 5989f21246dSJosef Bacik 5999f21246dSJosef Bacik if (key.type == BTRFS_EXTENT_ITEM_KEY || 6009f21246dSJosef Bacik key.type == BTRFS_METADATA_ITEM_KEY) { 6019f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 6029f21246dSJosef Bacik key.objectid); 6039f21246dSJosef Bacik if (key.type == BTRFS_METADATA_ITEM_KEY) 6049f21246dSJosef Bacik last = key.objectid + 6059f21246dSJosef Bacik fs_info->nodesize; 6069f21246dSJosef Bacik else 6079f21246dSJosef Bacik last = key.objectid + key.offset; 6089f21246dSJosef Bacik 6099f21246dSJosef Bacik if (total_found > CACHING_CTL_WAKE_UP) { 6109f21246dSJosef Bacik total_found = 0; 6119f21246dSJosef Bacik if (wakeup) 6129f21246dSJosef Bacik wake_up(&caching_ctl->wait); 6139f21246dSJosef Bacik } 6149f21246dSJosef Bacik } 6159f21246dSJosef Bacik path->slots[0]++; 6169f21246dSJosef Bacik } 6179f21246dSJosef Bacik ret = 0; 6189f21246dSJosef Bacik 6199f21246dSJosef Bacik total_found += add_new_free_space(block_group, last, 6209f21246dSJosef Bacik block_group->key.objectid + 6219f21246dSJosef Bacik block_group->key.offset); 6229f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 6239f21246dSJosef Bacik 6249f21246dSJosef Bacik out: 6259f21246dSJosef Bacik btrfs_free_path(path); 6269f21246dSJosef Bacik return ret; 6279f21246dSJosef Bacik } 6289f21246dSJosef Bacik 6299f21246dSJosef Bacik static noinline void caching_thread(struct btrfs_work *work) 6309f21246dSJosef Bacik { 6319f21246dSJosef Bacik struct btrfs_block_group_cache *block_group; 6329f21246dSJosef Bacik struct btrfs_fs_info *fs_info; 6339f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 6349f21246dSJosef Bacik int ret; 6359f21246dSJosef Bacik 6369f21246dSJosef Bacik caching_ctl = container_of(work, struct btrfs_caching_control, work); 6379f21246dSJosef Bacik block_group = caching_ctl->block_group; 6389f21246dSJosef Bacik fs_info = block_group->fs_info; 6399f21246dSJosef Bacik 6409f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 6419f21246dSJosef Bacik down_read(&fs_info->commit_root_sem); 6429f21246dSJosef Bacik 6439f21246dSJosef Bacik if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 6449f21246dSJosef Bacik ret = load_free_space_tree(caching_ctl); 6459f21246dSJosef Bacik else 6469f21246dSJosef Bacik ret = load_extent_tree_free(caching_ctl); 6479f21246dSJosef Bacik 6489f21246dSJosef Bacik spin_lock(&block_group->lock); 6499f21246dSJosef Bacik block_group->caching_ctl = NULL; 6509f21246dSJosef Bacik block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 6519f21246dSJosef Bacik spin_unlock(&block_group->lock); 6529f21246dSJosef Bacik 6539f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 6549f21246dSJosef Bacik if (btrfs_should_fragment_free_space(block_group)) { 6559f21246dSJosef Bacik u64 bytes_used; 6569f21246dSJosef Bacik 6579f21246dSJosef Bacik spin_lock(&block_group->space_info->lock); 6589f21246dSJosef Bacik spin_lock(&block_group->lock); 6599f21246dSJosef Bacik bytes_used = block_group->key.offset - 6609f21246dSJosef Bacik btrfs_block_group_used(&block_group->item); 6619f21246dSJosef Bacik block_group->space_info->bytes_used += bytes_used >> 1; 6629f21246dSJosef Bacik spin_unlock(&block_group->lock); 6639f21246dSJosef Bacik spin_unlock(&block_group->space_info->lock); 6649f21246dSJosef Bacik btrfs_fragment_free_space(block_group); 6659f21246dSJosef Bacik } 6669f21246dSJosef Bacik #endif 6679f21246dSJosef Bacik 6689f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 6699f21246dSJosef Bacik 6709f21246dSJosef Bacik up_read(&fs_info->commit_root_sem); 6719f21246dSJosef Bacik btrfs_free_excluded_extents(block_group); 6729f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 6739f21246dSJosef Bacik 6749f21246dSJosef Bacik wake_up(&caching_ctl->wait); 6759f21246dSJosef Bacik 6769f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 6779f21246dSJosef Bacik btrfs_put_block_group(block_group); 6789f21246dSJosef Bacik } 6799f21246dSJosef Bacik 6809f21246dSJosef Bacik int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, 6819f21246dSJosef Bacik int load_cache_only) 6829f21246dSJosef Bacik { 6839f21246dSJosef Bacik DEFINE_WAIT(wait); 6849f21246dSJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 6859f21246dSJosef Bacik struct btrfs_caching_control *caching_ctl; 6869f21246dSJosef Bacik int ret = 0; 6879f21246dSJosef Bacik 6889f21246dSJosef Bacik caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 6899f21246dSJosef Bacik if (!caching_ctl) 6909f21246dSJosef Bacik return -ENOMEM; 6919f21246dSJosef Bacik 6929f21246dSJosef Bacik INIT_LIST_HEAD(&caching_ctl->list); 6939f21246dSJosef Bacik mutex_init(&caching_ctl->mutex); 6949f21246dSJosef Bacik init_waitqueue_head(&caching_ctl->wait); 6959f21246dSJosef Bacik caching_ctl->block_group = cache; 6969f21246dSJosef Bacik caching_ctl->progress = cache->key.objectid; 6979f21246dSJosef Bacik refcount_set(&caching_ctl->count, 1); 6989f21246dSJosef Bacik btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, 6999f21246dSJosef Bacik caching_thread, NULL, NULL); 7009f21246dSJosef Bacik 7019f21246dSJosef Bacik spin_lock(&cache->lock); 7029f21246dSJosef Bacik /* 7039f21246dSJosef Bacik * This should be a rare occasion, but this could happen I think in the 7049f21246dSJosef Bacik * case where one thread starts to load the space cache info, and then 7059f21246dSJosef Bacik * some other thread starts a transaction commit which tries to do an 7069f21246dSJosef Bacik * allocation while the other thread is still loading the space cache 7079f21246dSJosef Bacik * info. The previous loop should have kept us from choosing this block 7089f21246dSJosef Bacik * group, but if we've moved to the state where we will wait on caching 7099f21246dSJosef Bacik * block groups we need to first check if we're doing a fast load here, 7109f21246dSJosef Bacik * so we can wait for it to finish, otherwise we could end up allocating 7119f21246dSJosef Bacik * from a block group who's cache gets evicted for one reason or 7129f21246dSJosef Bacik * another. 7139f21246dSJosef Bacik */ 7149f21246dSJosef Bacik while (cache->cached == BTRFS_CACHE_FAST) { 7159f21246dSJosef Bacik struct btrfs_caching_control *ctl; 7169f21246dSJosef Bacik 7179f21246dSJosef Bacik ctl = cache->caching_ctl; 7189f21246dSJosef Bacik refcount_inc(&ctl->count); 7199f21246dSJosef Bacik prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); 7209f21246dSJosef Bacik spin_unlock(&cache->lock); 7219f21246dSJosef Bacik 7229f21246dSJosef Bacik schedule(); 7239f21246dSJosef Bacik 7249f21246dSJosef Bacik finish_wait(&ctl->wait, &wait); 7259f21246dSJosef Bacik btrfs_put_caching_control(ctl); 7269f21246dSJosef Bacik spin_lock(&cache->lock); 7279f21246dSJosef Bacik } 7289f21246dSJosef Bacik 7299f21246dSJosef Bacik if (cache->cached != BTRFS_CACHE_NO) { 7309f21246dSJosef Bacik spin_unlock(&cache->lock); 7319f21246dSJosef Bacik kfree(caching_ctl); 7329f21246dSJosef Bacik return 0; 7339f21246dSJosef Bacik } 7349f21246dSJosef Bacik WARN_ON(cache->caching_ctl); 7359f21246dSJosef Bacik cache->caching_ctl = caching_ctl; 7369f21246dSJosef Bacik cache->cached = BTRFS_CACHE_FAST; 7379f21246dSJosef Bacik spin_unlock(&cache->lock); 7389f21246dSJosef Bacik 7399f21246dSJosef Bacik if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 7409f21246dSJosef Bacik mutex_lock(&caching_ctl->mutex); 7419f21246dSJosef Bacik ret = load_free_space_cache(cache); 7429f21246dSJosef Bacik 7439f21246dSJosef Bacik spin_lock(&cache->lock); 7449f21246dSJosef Bacik if (ret == 1) { 7459f21246dSJosef Bacik cache->caching_ctl = NULL; 7469f21246dSJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 7479f21246dSJosef Bacik cache->last_byte_to_unpin = (u64)-1; 7489f21246dSJosef Bacik caching_ctl->progress = (u64)-1; 7499f21246dSJosef Bacik } else { 7509f21246dSJosef Bacik if (load_cache_only) { 7519f21246dSJosef Bacik cache->caching_ctl = NULL; 7529f21246dSJosef Bacik cache->cached = BTRFS_CACHE_NO; 7539f21246dSJosef Bacik } else { 7549f21246dSJosef Bacik cache->cached = BTRFS_CACHE_STARTED; 7559f21246dSJosef Bacik cache->has_caching_ctl = 1; 7569f21246dSJosef Bacik } 7579f21246dSJosef Bacik } 7589f21246dSJosef Bacik spin_unlock(&cache->lock); 7599f21246dSJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 7609f21246dSJosef Bacik if (ret == 1 && 7619f21246dSJosef Bacik btrfs_should_fragment_free_space(cache)) { 7629f21246dSJosef Bacik u64 bytes_used; 7639f21246dSJosef Bacik 7649f21246dSJosef Bacik spin_lock(&cache->space_info->lock); 7659f21246dSJosef Bacik spin_lock(&cache->lock); 7669f21246dSJosef Bacik bytes_used = cache->key.offset - 7679f21246dSJosef Bacik btrfs_block_group_used(&cache->item); 7689f21246dSJosef Bacik cache->space_info->bytes_used += bytes_used >> 1; 7699f21246dSJosef Bacik spin_unlock(&cache->lock); 7709f21246dSJosef Bacik spin_unlock(&cache->space_info->lock); 7719f21246dSJosef Bacik btrfs_fragment_free_space(cache); 7729f21246dSJosef Bacik } 7739f21246dSJosef Bacik #endif 7749f21246dSJosef Bacik mutex_unlock(&caching_ctl->mutex); 7759f21246dSJosef Bacik 7769f21246dSJosef Bacik wake_up(&caching_ctl->wait); 7779f21246dSJosef Bacik if (ret == 1) { 7789f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 7799f21246dSJosef Bacik btrfs_free_excluded_extents(cache); 7809f21246dSJosef Bacik return 0; 7819f21246dSJosef Bacik } 7829f21246dSJosef Bacik } else { 7839f21246dSJosef Bacik /* 7849f21246dSJosef Bacik * We're either using the free space tree or no caching at all. 7859f21246dSJosef Bacik * Set cached to the appropriate value and wakeup any waiters. 7869f21246dSJosef Bacik */ 7879f21246dSJosef Bacik spin_lock(&cache->lock); 7889f21246dSJosef Bacik if (load_cache_only) { 7899f21246dSJosef Bacik cache->caching_ctl = NULL; 7909f21246dSJosef Bacik cache->cached = BTRFS_CACHE_NO; 7919f21246dSJosef Bacik } else { 7929f21246dSJosef Bacik cache->cached = BTRFS_CACHE_STARTED; 7939f21246dSJosef Bacik cache->has_caching_ctl = 1; 7949f21246dSJosef Bacik } 7959f21246dSJosef Bacik spin_unlock(&cache->lock); 7969f21246dSJosef Bacik wake_up(&caching_ctl->wait); 7979f21246dSJosef Bacik } 7989f21246dSJosef Bacik 7999f21246dSJosef Bacik if (load_cache_only) { 8009f21246dSJosef Bacik btrfs_put_caching_control(caching_ctl); 8019f21246dSJosef Bacik return 0; 8029f21246dSJosef Bacik } 8039f21246dSJosef Bacik 8049f21246dSJosef Bacik down_write(&fs_info->commit_root_sem); 8059f21246dSJosef Bacik refcount_inc(&caching_ctl->count); 8069f21246dSJosef Bacik list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 8079f21246dSJosef Bacik up_write(&fs_info->commit_root_sem); 8089f21246dSJosef Bacik 8099f21246dSJosef Bacik btrfs_get_block_group(cache); 8109f21246dSJosef Bacik 8119f21246dSJosef Bacik btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 8129f21246dSJosef Bacik 8139f21246dSJosef Bacik return ret; 8149f21246dSJosef Bacik } 815e3e0520bSJosef Bacik 816e3e0520bSJosef Bacik static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 817e3e0520bSJosef Bacik { 818e3e0520bSJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 819e3e0520bSJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 820e3e0520bSJosef Bacik 821e3e0520bSJosef Bacik write_seqlock(&fs_info->profiles_lock); 822e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 823e3e0520bSJosef Bacik fs_info->avail_data_alloc_bits &= ~extra_flags; 824e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 825e3e0520bSJosef Bacik fs_info->avail_metadata_alloc_bits &= ~extra_flags; 826e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 827e3e0520bSJosef Bacik fs_info->avail_system_alloc_bits &= ~extra_flags; 828e3e0520bSJosef Bacik write_sequnlock(&fs_info->profiles_lock); 829e3e0520bSJosef Bacik } 830e3e0520bSJosef Bacik 831e3e0520bSJosef Bacik /* 832e3e0520bSJosef Bacik * Clear incompat bits for the following feature(s): 833e3e0520bSJosef Bacik * 834e3e0520bSJosef Bacik * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 835e3e0520bSJosef Bacik * in the whole filesystem 836e3e0520bSJosef Bacik */ 837e3e0520bSJosef Bacik static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 838e3e0520bSJosef Bacik { 839e3e0520bSJosef Bacik if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) { 840e3e0520bSJosef Bacik struct list_head *head = &fs_info->space_info; 841e3e0520bSJosef Bacik struct btrfs_space_info *sinfo; 842e3e0520bSJosef Bacik 843e3e0520bSJosef Bacik list_for_each_entry_rcu(sinfo, head, list) { 844e3e0520bSJosef Bacik bool found = false; 845e3e0520bSJosef Bacik 846e3e0520bSJosef Bacik down_read(&sinfo->groups_sem); 847e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 848e3e0520bSJosef Bacik found = true; 849e3e0520bSJosef Bacik if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 850e3e0520bSJosef Bacik found = true; 851e3e0520bSJosef Bacik up_read(&sinfo->groups_sem); 852e3e0520bSJosef Bacik 853e3e0520bSJosef Bacik if (found) 854e3e0520bSJosef Bacik return; 855e3e0520bSJosef Bacik } 856e3e0520bSJosef Bacik btrfs_clear_fs_incompat(fs_info, RAID56); 857e3e0520bSJosef Bacik } 858e3e0520bSJosef Bacik } 859e3e0520bSJosef Bacik 860e3e0520bSJosef Bacik int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 861e3e0520bSJosef Bacik u64 group_start, struct extent_map *em) 862e3e0520bSJosef Bacik { 863e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 864e3e0520bSJosef Bacik struct btrfs_root *root = fs_info->extent_root; 865e3e0520bSJosef Bacik struct btrfs_path *path; 866e3e0520bSJosef Bacik struct btrfs_block_group_cache *block_group; 867e3e0520bSJosef Bacik struct btrfs_free_cluster *cluster; 868e3e0520bSJosef Bacik struct btrfs_root *tree_root = fs_info->tree_root; 869e3e0520bSJosef Bacik struct btrfs_key key; 870e3e0520bSJosef Bacik struct inode *inode; 871e3e0520bSJosef Bacik struct kobject *kobj = NULL; 872e3e0520bSJosef Bacik int ret; 873e3e0520bSJosef Bacik int index; 874e3e0520bSJosef Bacik int factor; 875e3e0520bSJosef Bacik struct btrfs_caching_control *caching_ctl = NULL; 876e3e0520bSJosef Bacik bool remove_em; 877e3e0520bSJosef Bacik bool remove_rsv = false; 878e3e0520bSJosef Bacik 879e3e0520bSJosef Bacik block_group = btrfs_lookup_block_group(fs_info, group_start); 880e3e0520bSJosef Bacik BUG_ON(!block_group); 881e3e0520bSJosef Bacik BUG_ON(!block_group->ro); 882e3e0520bSJosef Bacik 883e3e0520bSJosef Bacik trace_btrfs_remove_block_group(block_group); 884e3e0520bSJosef Bacik /* 885e3e0520bSJosef Bacik * Free the reserved super bytes from this block group before 886e3e0520bSJosef Bacik * remove it. 887e3e0520bSJosef Bacik */ 888e3e0520bSJosef Bacik btrfs_free_excluded_extents(block_group); 889e3e0520bSJosef Bacik btrfs_free_ref_tree_range(fs_info, block_group->key.objectid, 890e3e0520bSJosef Bacik block_group->key.offset); 891e3e0520bSJosef Bacik 892e3e0520bSJosef Bacik memcpy(&key, &block_group->key, sizeof(key)); 893e3e0520bSJosef Bacik index = btrfs_bg_flags_to_raid_index(block_group->flags); 894e3e0520bSJosef Bacik factor = btrfs_bg_type_to_factor(block_group->flags); 895e3e0520bSJosef Bacik 896e3e0520bSJosef Bacik /* make sure this block group isn't part of an allocation cluster */ 897e3e0520bSJosef Bacik cluster = &fs_info->data_alloc_cluster; 898e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 899e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 900e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 901e3e0520bSJosef Bacik 902e3e0520bSJosef Bacik /* 903e3e0520bSJosef Bacik * make sure this block group isn't part of a metadata 904e3e0520bSJosef Bacik * allocation cluster 905e3e0520bSJosef Bacik */ 906e3e0520bSJosef Bacik cluster = &fs_info->meta_alloc_cluster; 907e3e0520bSJosef Bacik spin_lock(&cluster->refill_lock); 908e3e0520bSJosef Bacik btrfs_return_cluster_to_free_space(block_group, cluster); 909e3e0520bSJosef Bacik spin_unlock(&cluster->refill_lock); 910e3e0520bSJosef Bacik 911e3e0520bSJosef Bacik path = btrfs_alloc_path(); 912e3e0520bSJosef Bacik if (!path) { 913e3e0520bSJosef Bacik ret = -ENOMEM; 914e3e0520bSJosef Bacik goto out; 915e3e0520bSJosef Bacik } 916e3e0520bSJosef Bacik 917e3e0520bSJosef Bacik /* 918e3e0520bSJosef Bacik * get the inode first so any iput calls done for the io_list 919e3e0520bSJosef Bacik * aren't the final iput (no unlinks allowed now) 920e3e0520bSJosef Bacik */ 921e3e0520bSJosef Bacik inode = lookup_free_space_inode(block_group, path); 922e3e0520bSJosef Bacik 923e3e0520bSJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 924e3e0520bSJosef Bacik /* 925e3e0520bSJosef Bacik * Make sure our free space cache IO is done before removing the 926e3e0520bSJosef Bacik * free space inode 927e3e0520bSJosef Bacik */ 928e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 929e3e0520bSJosef Bacik if (!list_empty(&block_group->io_list)) { 930e3e0520bSJosef Bacik list_del_init(&block_group->io_list); 931e3e0520bSJosef Bacik 932e3e0520bSJosef Bacik WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 933e3e0520bSJosef Bacik 934e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 935e3e0520bSJosef Bacik btrfs_wait_cache_io(trans, block_group, path); 936e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 937e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 938e3e0520bSJosef Bacik } 939e3e0520bSJosef Bacik 940e3e0520bSJosef Bacik if (!list_empty(&block_group->dirty_list)) { 941e3e0520bSJosef Bacik list_del_init(&block_group->dirty_list); 942e3e0520bSJosef Bacik remove_rsv = true; 943e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 944e3e0520bSJosef Bacik } 945e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 946e3e0520bSJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 947e3e0520bSJosef Bacik 948e3e0520bSJosef Bacik if (!IS_ERR(inode)) { 949e3e0520bSJosef Bacik ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 950e3e0520bSJosef Bacik if (ret) { 951e3e0520bSJosef Bacik btrfs_add_delayed_iput(inode); 952e3e0520bSJosef Bacik goto out; 953e3e0520bSJosef Bacik } 954e3e0520bSJosef Bacik clear_nlink(inode); 955e3e0520bSJosef Bacik /* One for the block groups ref */ 956e3e0520bSJosef Bacik spin_lock(&block_group->lock); 957e3e0520bSJosef Bacik if (block_group->iref) { 958e3e0520bSJosef Bacik block_group->iref = 0; 959e3e0520bSJosef Bacik block_group->inode = NULL; 960e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 961e3e0520bSJosef Bacik iput(inode); 962e3e0520bSJosef Bacik } else { 963e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 964e3e0520bSJosef Bacik } 965e3e0520bSJosef Bacik /* One for our lookup ref */ 966e3e0520bSJosef Bacik btrfs_add_delayed_iput(inode); 967e3e0520bSJosef Bacik } 968e3e0520bSJosef Bacik 969e3e0520bSJosef Bacik key.objectid = BTRFS_FREE_SPACE_OBJECTID; 970e3e0520bSJosef Bacik key.offset = block_group->key.objectid; 971e3e0520bSJosef Bacik key.type = 0; 972e3e0520bSJosef Bacik 973e3e0520bSJosef Bacik ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); 974e3e0520bSJosef Bacik if (ret < 0) 975e3e0520bSJosef Bacik goto out; 976e3e0520bSJosef Bacik if (ret > 0) 977e3e0520bSJosef Bacik btrfs_release_path(path); 978e3e0520bSJosef Bacik if (ret == 0) { 979e3e0520bSJosef Bacik ret = btrfs_del_item(trans, tree_root, path); 980e3e0520bSJosef Bacik if (ret) 981e3e0520bSJosef Bacik goto out; 982e3e0520bSJosef Bacik btrfs_release_path(path); 983e3e0520bSJosef Bacik } 984e3e0520bSJosef Bacik 985e3e0520bSJosef Bacik spin_lock(&fs_info->block_group_cache_lock); 986e3e0520bSJosef Bacik rb_erase(&block_group->cache_node, 987e3e0520bSJosef Bacik &fs_info->block_group_cache_tree); 988e3e0520bSJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 989e3e0520bSJosef Bacik 990e3e0520bSJosef Bacik if (fs_info->first_logical_byte == block_group->key.objectid) 991e3e0520bSJosef Bacik fs_info->first_logical_byte = (u64)-1; 992e3e0520bSJosef Bacik spin_unlock(&fs_info->block_group_cache_lock); 993e3e0520bSJosef Bacik 994e3e0520bSJosef Bacik down_write(&block_group->space_info->groups_sem); 995e3e0520bSJosef Bacik /* 996e3e0520bSJosef Bacik * we must use list_del_init so people can check to see if they 997e3e0520bSJosef Bacik * are still on the list after taking the semaphore 998e3e0520bSJosef Bacik */ 999e3e0520bSJosef Bacik list_del_init(&block_group->list); 1000e3e0520bSJosef Bacik if (list_empty(&block_group->space_info->block_groups[index])) { 1001e3e0520bSJosef Bacik kobj = block_group->space_info->block_group_kobjs[index]; 1002e3e0520bSJosef Bacik block_group->space_info->block_group_kobjs[index] = NULL; 1003e3e0520bSJosef Bacik clear_avail_alloc_bits(fs_info, block_group->flags); 1004e3e0520bSJosef Bacik } 1005e3e0520bSJosef Bacik up_write(&block_group->space_info->groups_sem); 1006e3e0520bSJosef Bacik clear_incompat_bg_bits(fs_info, block_group->flags); 1007e3e0520bSJosef Bacik if (kobj) { 1008e3e0520bSJosef Bacik kobject_del(kobj); 1009e3e0520bSJosef Bacik kobject_put(kobj); 1010e3e0520bSJosef Bacik } 1011e3e0520bSJosef Bacik 1012e3e0520bSJosef Bacik if (block_group->has_caching_ctl) 1013e3e0520bSJosef Bacik caching_ctl = btrfs_get_caching_control(block_group); 1014e3e0520bSJosef Bacik if (block_group->cached == BTRFS_CACHE_STARTED) 1015e3e0520bSJosef Bacik btrfs_wait_block_group_cache_done(block_group); 1016e3e0520bSJosef Bacik if (block_group->has_caching_ctl) { 1017e3e0520bSJosef Bacik down_write(&fs_info->commit_root_sem); 1018e3e0520bSJosef Bacik if (!caching_ctl) { 1019e3e0520bSJosef Bacik struct btrfs_caching_control *ctl; 1020e3e0520bSJosef Bacik 1021e3e0520bSJosef Bacik list_for_each_entry(ctl, 1022e3e0520bSJosef Bacik &fs_info->caching_block_groups, list) 1023e3e0520bSJosef Bacik if (ctl->block_group == block_group) { 1024e3e0520bSJosef Bacik caching_ctl = ctl; 1025e3e0520bSJosef Bacik refcount_inc(&caching_ctl->count); 1026e3e0520bSJosef Bacik break; 1027e3e0520bSJosef Bacik } 1028e3e0520bSJosef Bacik } 1029e3e0520bSJosef Bacik if (caching_ctl) 1030e3e0520bSJosef Bacik list_del_init(&caching_ctl->list); 1031e3e0520bSJosef Bacik up_write(&fs_info->commit_root_sem); 1032e3e0520bSJosef Bacik if (caching_ctl) { 1033e3e0520bSJosef Bacik /* Once for the caching bgs list and once for us. */ 1034e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1035e3e0520bSJosef Bacik btrfs_put_caching_control(caching_ctl); 1036e3e0520bSJosef Bacik } 1037e3e0520bSJosef Bacik } 1038e3e0520bSJosef Bacik 1039e3e0520bSJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 1040e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->dirty_list)); 1041e3e0520bSJosef Bacik WARN_ON(!list_empty(&block_group->io_list)); 1042e3e0520bSJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 1043e3e0520bSJosef Bacik 1044e3e0520bSJosef Bacik btrfs_remove_free_space_cache(block_group); 1045e3e0520bSJosef Bacik 1046e3e0520bSJosef Bacik spin_lock(&block_group->space_info->lock); 1047e3e0520bSJosef Bacik list_del_init(&block_group->ro_list); 1048e3e0520bSJosef Bacik 1049e3e0520bSJosef Bacik if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1050e3e0520bSJosef Bacik WARN_ON(block_group->space_info->total_bytes 1051e3e0520bSJosef Bacik < block_group->key.offset); 1052e3e0520bSJosef Bacik WARN_ON(block_group->space_info->bytes_readonly 1053e3e0520bSJosef Bacik < block_group->key.offset); 1054e3e0520bSJosef Bacik WARN_ON(block_group->space_info->disk_total 1055e3e0520bSJosef Bacik < block_group->key.offset * factor); 1056e3e0520bSJosef Bacik } 1057e3e0520bSJosef Bacik block_group->space_info->total_bytes -= block_group->key.offset; 1058e3e0520bSJosef Bacik block_group->space_info->bytes_readonly -= block_group->key.offset; 1059e3e0520bSJosef Bacik block_group->space_info->disk_total -= block_group->key.offset * factor; 1060e3e0520bSJosef Bacik 1061e3e0520bSJosef Bacik spin_unlock(&block_group->space_info->lock); 1062e3e0520bSJosef Bacik 1063e3e0520bSJosef Bacik memcpy(&key, &block_group->key, sizeof(key)); 1064e3e0520bSJosef Bacik 1065e3e0520bSJosef Bacik mutex_lock(&fs_info->chunk_mutex); 1066e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1067e3e0520bSJosef Bacik block_group->removed = 1; 1068e3e0520bSJosef Bacik /* 1069e3e0520bSJosef Bacik * At this point trimming can't start on this block group, because we 1070e3e0520bSJosef Bacik * removed the block group from the tree fs_info->block_group_cache_tree 1071e3e0520bSJosef Bacik * so no one can't find it anymore and even if someone already got this 1072e3e0520bSJosef Bacik * block group before we removed it from the rbtree, they have already 1073e3e0520bSJosef Bacik * incremented block_group->trimming - if they didn't, they won't find 1074e3e0520bSJosef Bacik * any free space entries because we already removed them all when we 1075e3e0520bSJosef Bacik * called btrfs_remove_free_space_cache(). 1076e3e0520bSJosef Bacik * 1077e3e0520bSJosef Bacik * And we must not remove the extent map from the fs_info->mapping_tree 1078e3e0520bSJosef Bacik * to prevent the same logical address range and physical device space 1079e3e0520bSJosef Bacik * ranges from being reused for a new block group. This is because our 1080e3e0520bSJosef Bacik * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1081e3e0520bSJosef Bacik * completely transactionless, so while it is trimming a range the 1082e3e0520bSJosef Bacik * currently running transaction might finish and a new one start, 1083e3e0520bSJosef Bacik * allowing for new block groups to be created that can reuse the same 1084e3e0520bSJosef Bacik * physical device locations unless we take this special care. 1085e3e0520bSJosef Bacik * 1086e3e0520bSJosef Bacik * There may also be an implicit trim operation if the file system 1087e3e0520bSJosef Bacik * is mounted with -odiscard. The same protections must remain 1088e3e0520bSJosef Bacik * in place until the extents have been discarded completely when 1089e3e0520bSJosef Bacik * the transaction commit has completed. 1090e3e0520bSJosef Bacik */ 1091e3e0520bSJosef Bacik remove_em = (atomic_read(&block_group->trimming) == 0); 1092e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1093e3e0520bSJosef Bacik 1094e3e0520bSJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 1095e3e0520bSJosef Bacik 1096e3e0520bSJosef Bacik ret = remove_block_group_free_space(trans, block_group); 1097e3e0520bSJosef Bacik if (ret) 1098e3e0520bSJosef Bacik goto out; 1099e3e0520bSJosef Bacik 1100e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1101e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1102e3e0520bSJosef Bacik 1103e3e0520bSJosef Bacik ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1104e3e0520bSJosef Bacik if (ret > 0) 1105e3e0520bSJosef Bacik ret = -EIO; 1106e3e0520bSJosef Bacik if (ret < 0) 1107e3e0520bSJosef Bacik goto out; 1108e3e0520bSJosef Bacik 1109e3e0520bSJosef Bacik ret = btrfs_del_item(trans, root, path); 1110e3e0520bSJosef Bacik if (ret) 1111e3e0520bSJosef Bacik goto out; 1112e3e0520bSJosef Bacik 1113e3e0520bSJosef Bacik if (remove_em) { 1114e3e0520bSJosef Bacik struct extent_map_tree *em_tree; 1115e3e0520bSJosef Bacik 1116e3e0520bSJosef Bacik em_tree = &fs_info->mapping_tree; 1117e3e0520bSJosef Bacik write_lock(&em_tree->lock); 1118e3e0520bSJosef Bacik remove_extent_mapping(em_tree, em); 1119e3e0520bSJosef Bacik write_unlock(&em_tree->lock); 1120e3e0520bSJosef Bacik /* once for the tree */ 1121e3e0520bSJosef Bacik free_extent_map(em); 1122e3e0520bSJosef Bacik } 1123e3e0520bSJosef Bacik out: 1124e3e0520bSJosef Bacik if (remove_rsv) 1125e3e0520bSJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 1126e3e0520bSJosef Bacik btrfs_free_path(path); 1127e3e0520bSJosef Bacik return ret; 1128e3e0520bSJosef Bacik } 1129e3e0520bSJosef Bacik 1130e3e0520bSJosef Bacik struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1131e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1132e3e0520bSJosef Bacik { 1133e3e0520bSJosef Bacik struct extent_map_tree *em_tree = &fs_info->mapping_tree; 1134e3e0520bSJosef Bacik struct extent_map *em; 1135e3e0520bSJosef Bacik struct map_lookup *map; 1136e3e0520bSJosef Bacik unsigned int num_items; 1137e3e0520bSJosef Bacik 1138e3e0520bSJosef Bacik read_lock(&em_tree->lock); 1139e3e0520bSJosef Bacik em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1140e3e0520bSJosef Bacik read_unlock(&em_tree->lock); 1141e3e0520bSJosef Bacik ASSERT(em && em->start == chunk_offset); 1142e3e0520bSJosef Bacik 1143e3e0520bSJosef Bacik /* 1144e3e0520bSJosef Bacik * We need to reserve 3 + N units from the metadata space info in order 1145e3e0520bSJosef Bacik * to remove a block group (done at btrfs_remove_chunk() and at 1146e3e0520bSJosef Bacik * btrfs_remove_block_group()), which are used for: 1147e3e0520bSJosef Bacik * 1148e3e0520bSJosef Bacik * 1 unit for adding the free space inode's orphan (located in the tree 1149e3e0520bSJosef Bacik * of tree roots). 1150e3e0520bSJosef Bacik * 1 unit for deleting the block group item (located in the extent 1151e3e0520bSJosef Bacik * tree). 1152e3e0520bSJosef Bacik * 1 unit for deleting the free space item (located in tree of tree 1153e3e0520bSJosef Bacik * roots). 1154e3e0520bSJosef Bacik * N units for deleting N device extent items corresponding to each 1155e3e0520bSJosef Bacik * stripe (located in the device tree). 1156e3e0520bSJosef Bacik * 1157e3e0520bSJosef Bacik * In order to remove a block group we also need to reserve units in the 1158e3e0520bSJosef Bacik * system space info in order to update the chunk tree (update one or 1159e3e0520bSJosef Bacik * more device items and remove one chunk item), but this is done at 1160e3e0520bSJosef Bacik * btrfs_remove_chunk() through a call to check_system_chunk(). 1161e3e0520bSJosef Bacik */ 1162e3e0520bSJosef Bacik map = em->map_lookup; 1163e3e0520bSJosef Bacik num_items = 3 + map->num_stripes; 1164e3e0520bSJosef Bacik free_extent_map(em); 1165e3e0520bSJosef Bacik 1166e3e0520bSJosef Bacik return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, 1167e3e0520bSJosef Bacik num_items, 1); 1168e3e0520bSJosef Bacik } 1169e3e0520bSJosef Bacik 1170e3e0520bSJosef Bacik /* 117126ce2095SJosef Bacik * Mark block group @cache read-only, so later write won't happen to block 117226ce2095SJosef Bacik * group @cache. 117326ce2095SJosef Bacik * 117426ce2095SJosef Bacik * If @force is not set, this function will only mark the block group readonly 117526ce2095SJosef Bacik * if we have enough free space (1M) in other metadata/system block groups. 117626ce2095SJosef Bacik * If @force is not set, this function will mark the block group readonly 117726ce2095SJosef Bacik * without checking free space. 117826ce2095SJosef Bacik * 117926ce2095SJosef Bacik * NOTE: This function doesn't care if other block groups can contain all the 118026ce2095SJosef Bacik * data in this block group. That check should be done by relocation routine, 118126ce2095SJosef Bacik * not this function. 118226ce2095SJosef Bacik */ 118326ce2095SJosef Bacik int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) 118426ce2095SJosef Bacik { 118526ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 118626ce2095SJosef Bacik u64 num_bytes; 118726ce2095SJosef Bacik u64 sinfo_used; 118826ce2095SJosef Bacik u64 min_allocable_bytes; 118926ce2095SJosef Bacik int ret = -ENOSPC; 119026ce2095SJosef Bacik 119126ce2095SJosef Bacik /* 119226ce2095SJosef Bacik * We need some metadata space and system metadata space for 119326ce2095SJosef Bacik * allocating chunks in some corner cases until we force to set 119426ce2095SJosef Bacik * it to be readonly. 119526ce2095SJosef Bacik */ 119626ce2095SJosef Bacik if ((sinfo->flags & 119726ce2095SJosef Bacik (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && 119826ce2095SJosef Bacik !force) 119926ce2095SJosef Bacik min_allocable_bytes = SZ_1M; 120026ce2095SJosef Bacik else 120126ce2095SJosef Bacik min_allocable_bytes = 0; 120226ce2095SJosef Bacik 120326ce2095SJosef Bacik spin_lock(&sinfo->lock); 120426ce2095SJosef Bacik spin_lock(&cache->lock); 120526ce2095SJosef Bacik 120626ce2095SJosef Bacik if (cache->ro) { 120726ce2095SJosef Bacik cache->ro++; 120826ce2095SJosef Bacik ret = 0; 120926ce2095SJosef Bacik goto out; 121026ce2095SJosef Bacik } 121126ce2095SJosef Bacik 121226ce2095SJosef Bacik num_bytes = cache->key.offset - cache->reserved - cache->pinned - 121326ce2095SJosef Bacik cache->bytes_super - btrfs_block_group_used(&cache->item); 121426ce2095SJosef Bacik sinfo_used = btrfs_space_info_used(sinfo, true); 121526ce2095SJosef Bacik 121626ce2095SJosef Bacik /* 121726ce2095SJosef Bacik * sinfo_used + num_bytes should always <= sinfo->total_bytes. 121826ce2095SJosef Bacik * 121926ce2095SJosef Bacik * Here we make sure if we mark this bg RO, we still have enough 122026ce2095SJosef Bacik * free space as buffer (if min_allocable_bytes is not 0). 122126ce2095SJosef Bacik */ 122226ce2095SJosef Bacik if (sinfo_used + num_bytes + min_allocable_bytes <= 122326ce2095SJosef Bacik sinfo->total_bytes) { 122426ce2095SJosef Bacik sinfo->bytes_readonly += num_bytes; 122526ce2095SJosef Bacik cache->ro++; 122626ce2095SJosef Bacik list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 122726ce2095SJosef Bacik ret = 0; 122826ce2095SJosef Bacik } 122926ce2095SJosef Bacik out: 123026ce2095SJosef Bacik spin_unlock(&cache->lock); 123126ce2095SJosef Bacik spin_unlock(&sinfo->lock); 123226ce2095SJosef Bacik if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 123326ce2095SJosef Bacik btrfs_info(cache->fs_info, 123426ce2095SJosef Bacik "unable to make block group %llu ro", 123526ce2095SJosef Bacik cache->key.objectid); 123626ce2095SJosef Bacik btrfs_info(cache->fs_info, 123726ce2095SJosef Bacik "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu", 123826ce2095SJosef Bacik sinfo_used, num_bytes, min_allocable_bytes); 123926ce2095SJosef Bacik btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 124026ce2095SJosef Bacik } 124126ce2095SJosef Bacik return ret; 124226ce2095SJosef Bacik } 124326ce2095SJosef Bacik 124426ce2095SJosef Bacik /* 1245e3e0520bSJosef Bacik * Process the unused_bgs list and remove any that don't have any allocated 1246e3e0520bSJosef Bacik * space inside of them. 1247e3e0520bSJosef Bacik */ 1248e3e0520bSJosef Bacik void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1249e3e0520bSJosef Bacik { 1250e3e0520bSJosef Bacik struct btrfs_block_group_cache *block_group; 1251e3e0520bSJosef Bacik struct btrfs_space_info *space_info; 1252e3e0520bSJosef Bacik struct btrfs_trans_handle *trans; 1253e3e0520bSJosef Bacik int ret = 0; 1254e3e0520bSJosef Bacik 1255e3e0520bSJosef Bacik if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1256e3e0520bSJosef Bacik return; 1257e3e0520bSJosef Bacik 1258e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1259e3e0520bSJosef Bacik while (!list_empty(&fs_info->unused_bgs)) { 1260e3e0520bSJosef Bacik u64 start, end; 1261e3e0520bSJosef Bacik int trimming; 1262e3e0520bSJosef Bacik 1263e3e0520bSJosef Bacik block_group = list_first_entry(&fs_info->unused_bgs, 1264e3e0520bSJosef Bacik struct btrfs_block_group_cache, 1265e3e0520bSJosef Bacik bg_list); 1266e3e0520bSJosef Bacik list_del_init(&block_group->bg_list); 1267e3e0520bSJosef Bacik 1268e3e0520bSJosef Bacik space_info = block_group->space_info; 1269e3e0520bSJosef Bacik 1270e3e0520bSJosef Bacik if (ret || btrfs_mixed_space_info(space_info)) { 1271e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1272e3e0520bSJosef Bacik continue; 1273e3e0520bSJosef Bacik } 1274e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1275e3e0520bSJosef Bacik 1276e3e0520bSJosef Bacik mutex_lock(&fs_info->delete_unused_bgs_mutex); 1277e3e0520bSJosef Bacik 1278e3e0520bSJosef Bacik /* Don't want to race with allocators so take the groups_sem */ 1279e3e0520bSJosef Bacik down_write(&space_info->groups_sem); 1280e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1281e3e0520bSJosef Bacik if (block_group->reserved || block_group->pinned || 1282e3e0520bSJosef Bacik btrfs_block_group_used(&block_group->item) || 1283e3e0520bSJosef Bacik block_group->ro || 1284e3e0520bSJosef Bacik list_is_singular(&block_group->list)) { 1285e3e0520bSJosef Bacik /* 1286e3e0520bSJosef Bacik * We want to bail if we made new allocations or have 1287e3e0520bSJosef Bacik * outstanding allocations in this block group. We do 1288e3e0520bSJosef Bacik * the ro check in case balance is currently acting on 1289e3e0520bSJosef Bacik * this block group. 1290e3e0520bSJosef Bacik */ 1291e3e0520bSJosef Bacik trace_btrfs_skip_unused_block_group(block_group); 1292e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1293e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1294e3e0520bSJosef Bacik goto next; 1295e3e0520bSJosef Bacik } 1296e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1297e3e0520bSJosef Bacik 1298e3e0520bSJosef Bacik /* We don't want to force the issue, only flip if it's ok. */ 1299e3e0520bSJosef Bacik ret = __btrfs_inc_block_group_ro(block_group, 0); 1300e3e0520bSJosef Bacik up_write(&space_info->groups_sem); 1301e3e0520bSJosef Bacik if (ret < 0) { 1302e3e0520bSJosef Bacik ret = 0; 1303e3e0520bSJosef Bacik goto next; 1304e3e0520bSJosef Bacik } 1305e3e0520bSJosef Bacik 1306e3e0520bSJosef Bacik /* 1307e3e0520bSJosef Bacik * Want to do this before we do anything else so we can recover 1308e3e0520bSJosef Bacik * properly if we fail to join the transaction. 1309e3e0520bSJosef Bacik */ 1310e3e0520bSJosef Bacik trans = btrfs_start_trans_remove_block_group(fs_info, 1311e3e0520bSJosef Bacik block_group->key.objectid); 1312e3e0520bSJosef Bacik if (IS_ERR(trans)) { 1313e3e0520bSJosef Bacik btrfs_dec_block_group_ro(block_group); 1314e3e0520bSJosef Bacik ret = PTR_ERR(trans); 1315e3e0520bSJosef Bacik goto next; 1316e3e0520bSJosef Bacik } 1317e3e0520bSJosef Bacik 1318e3e0520bSJosef Bacik /* 1319e3e0520bSJosef Bacik * We could have pending pinned extents for this block group, 1320e3e0520bSJosef Bacik * just delete them, we don't care about them anymore. 1321e3e0520bSJosef Bacik */ 1322e3e0520bSJosef Bacik start = block_group->key.objectid; 1323e3e0520bSJosef Bacik end = start + block_group->key.offset - 1; 1324e3e0520bSJosef Bacik /* 1325e3e0520bSJosef Bacik * Hold the unused_bg_unpin_mutex lock to avoid racing with 1326e3e0520bSJosef Bacik * btrfs_finish_extent_commit(). If we are at transaction N, 1327e3e0520bSJosef Bacik * another task might be running finish_extent_commit() for the 1328e3e0520bSJosef Bacik * previous transaction N - 1, and have seen a range belonging 1329e3e0520bSJosef Bacik * to the block group in freed_extents[] before we were able to 1330e3e0520bSJosef Bacik * clear the whole block group range from freed_extents[]. This 1331e3e0520bSJosef Bacik * means that task can lookup for the block group after we 1332e3e0520bSJosef Bacik * unpinned it from freed_extents[] and removed it, leading to 1333e3e0520bSJosef Bacik * a BUG_ON() at btrfs_unpin_extent_range(). 1334e3e0520bSJosef Bacik */ 1335e3e0520bSJosef Bacik mutex_lock(&fs_info->unused_bg_unpin_mutex); 1336e3e0520bSJosef Bacik ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, 1337e3e0520bSJosef Bacik EXTENT_DIRTY); 1338e3e0520bSJosef Bacik if (ret) { 1339e3e0520bSJosef Bacik mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1340e3e0520bSJosef Bacik btrfs_dec_block_group_ro(block_group); 1341e3e0520bSJosef Bacik goto end_trans; 1342e3e0520bSJosef Bacik } 1343e3e0520bSJosef Bacik ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, 1344e3e0520bSJosef Bacik EXTENT_DIRTY); 1345e3e0520bSJosef Bacik if (ret) { 1346e3e0520bSJosef Bacik mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1347e3e0520bSJosef Bacik btrfs_dec_block_group_ro(block_group); 1348e3e0520bSJosef Bacik goto end_trans; 1349e3e0520bSJosef Bacik } 1350e3e0520bSJosef Bacik mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1351e3e0520bSJosef Bacik 1352e3e0520bSJosef Bacik /* Reset pinned so btrfs_put_block_group doesn't complain */ 1353e3e0520bSJosef Bacik spin_lock(&space_info->lock); 1354e3e0520bSJosef Bacik spin_lock(&block_group->lock); 1355e3e0520bSJosef Bacik 1356e3e0520bSJosef Bacik btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1357e3e0520bSJosef Bacik -block_group->pinned); 1358e3e0520bSJosef Bacik space_info->bytes_readonly += block_group->pinned; 1359e3e0520bSJosef Bacik percpu_counter_add_batch(&space_info->total_bytes_pinned, 1360e3e0520bSJosef Bacik -block_group->pinned, 1361e3e0520bSJosef Bacik BTRFS_TOTAL_BYTES_PINNED_BATCH); 1362e3e0520bSJosef Bacik block_group->pinned = 0; 1363e3e0520bSJosef Bacik 1364e3e0520bSJosef Bacik spin_unlock(&block_group->lock); 1365e3e0520bSJosef Bacik spin_unlock(&space_info->lock); 1366e3e0520bSJosef Bacik 1367e3e0520bSJosef Bacik /* DISCARD can flip during remount */ 1368e3e0520bSJosef Bacik trimming = btrfs_test_opt(fs_info, DISCARD); 1369e3e0520bSJosef Bacik 1370e3e0520bSJosef Bacik /* Implicit trim during transaction commit. */ 1371e3e0520bSJosef Bacik if (trimming) 1372e3e0520bSJosef Bacik btrfs_get_block_group_trimming(block_group); 1373e3e0520bSJosef Bacik 1374e3e0520bSJosef Bacik /* 1375e3e0520bSJosef Bacik * Btrfs_remove_chunk will abort the transaction if things go 1376e3e0520bSJosef Bacik * horribly wrong. 1377e3e0520bSJosef Bacik */ 1378e3e0520bSJosef Bacik ret = btrfs_remove_chunk(trans, block_group->key.objectid); 1379e3e0520bSJosef Bacik 1380e3e0520bSJosef Bacik if (ret) { 1381e3e0520bSJosef Bacik if (trimming) 1382e3e0520bSJosef Bacik btrfs_put_block_group_trimming(block_group); 1383e3e0520bSJosef Bacik goto end_trans; 1384e3e0520bSJosef Bacik } 1385e3e0520bSJosef Bacik 1386e3e0520bSJosef Bacik /* 1387e3e0520bSJosef Bacik * If we're not mounted with -odiscard, we can just forget 1388e3e0520bSJosef Bacik * about this block group. Otherwise we'll need to wait 1389e3e0520bSJosef Bacik * until transaction commit to do the actual discard. 1390e3e0520bSJosef Bacik */ 1391e3e0520bSJosef Bacik if (trimming) { 1392e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1393e3e0520bSJosef Bacik /* 1394e3e0520bSJosef Bacik * A concurrent scrub might have added us to the list 1395e3e0520bSJosef Bacik * fs_info->unused_bgs, so use a list_move operation 1396e3e0520bSJosef Bacik * to add the block group to the deleted_bgs list. 1397e3e0520bSJosef Bacik */ 1398e3e0520bSJosef Bacik list_move(&block_group->bg_list, 1399e3e0520bSJosef Bacik &trans->transaction->deleted_bgs); 1400e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1401e3e0520bSJosef Bacik btrfs_get_block_group(block_group); 1402e3e0520bSJosef Bacik } 1403e3e0520bSJosef Bacik end_trans: 1404e3e0520bSJosef Bacik btrfs_end_transaction(trans); 1405e3e0520bSJosef Bacik next: 1406e3e0520bSJosef Bacik mutex_unlock(&fs_info->delete_unused_bgs_mutex); 1407e3e0520bSJosef Bacik btrfs_put_block_group(block_group); 1408e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1409e3e0520bSJosef Bacik } 1410e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1411e3e0520bSJosef Bacik } 1412e3e0520bSJosef Bacik 1413e3e0520bSJosef Bacik void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg) 1414e3e0520bSJosef Bacik { 1415e3e0520bSJosef Bacik struct btrfs_fs_info *fs_info = bg->fs_info; 1416e3e0520bSJosef Bacik 1417e3e0520bSJosef Bacik spin_lock(&fs_info->unused_bgs_lock); 1418e3e0520bSJosef Bacik if (list_empty(&bg->bg_list)) { 1419e3e0520bSJosef Bacik btrfs_get_block_group(bg); 1420e3e0520bSJosef Bacik trace_btrfs_add_unused_block_group(bg); 1421e3e0520bSJosef Bacik list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1422e3e0520bSJosef Bacik } 1423e3e0520bSJosef Bacik spin_unlock(&fs_info->unused_bgs_lock); 1424e3e0520bSJosef Bacik } 14254358d963SJosef Bacik 14264358d963SJosef Bacik static int find_first_block_group(struct btrfs_fs_info *fs_info, 14274358d963SJosef Bacik struct btrfs_path *path, 14284358d963SJosef Bacik struct btrfs_key *key) 14294358d963SJosef Bacik { 14304358d963SJosef Bacik struct btrfs_root *root = fs_info->extent_root; 14314358d963SJosef Bacik int ret = 0; 14324358d963SJosef Bacik struct btrfs_key found_key; 14334358d963SJosef Bacik struct extent_buffer *leaf; 14344358d963SJosef Bacik struct btrfs_block_group_item bg; 14354358d963SJosef Bacik u64 flags; 14364358d963SJosef Bacik int slot; 14374358d963SJosef Bacik 14384358d963SJosef Bacik ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 14394358d963SJosef Bacik if (ret < 0) 14404358d963SJosef Bacik goto out; 14414358d963SJosef Bacik 14424358d963SJosef Bacik while (1) { 14434358d963SJosef Bacik slot = path->slots[0]; 14444358d963SJosef Bacik leaf = path->nodes[0]; 14454358d963SJosef Bacik if (slot >= btrfs_header_nritems(leaf)) { 14464358d963SJosef Bacik ret = btrfs_next_leaf(root, path); 14474358d963SJosef Bacik if (ret == 0) 14484358d963SJosef Bacik continue; 14494358d963SJosef Bacik if (ret < 0) 14504358d963SJosef Bacik goto out; 14514358d963SJosef Bacik break; 14524358d963SJosef Bacik } 14534358d963SJosef Bacik btrfs_item_key_to_cpu(leaf, &found_key, slot); 14544358d963SJosef Bacik 14554358d963SJosef Bacik if (found_key.objectid >= key->objectid && 14564358d963SJosef Bacik found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 14574358d963SJosef Bacik struct extent_map_tree *em_tree; 14584358d963SJosef Bacik struct extent_map *em; 14594358d963SJosef Bacik 14604358d963SJosef Bacik em_tree = &root->fs_info->mapping_tree; 14614358d963SJosef Bacik read_lock(&em_tree->lock); 14624358d963SJosef Bacik em = lookup_extent_mapping(em_tree, found_key.objectid, 14634358d963SJosef Bacik found_key.offset); 14644358d963SJosef Bacik read_unlock(&em_tree->lock); 14654358d963SJosef Bacik if (!em) { 14664358d963SJosef Bacik btrfs_err(fs_info, 14674358d963SJosef Bacik "logical %llu len %llu found bg but no related chunk", 14684358d963SJosef Bacik found_key.objectid, found_key.offset); 14694358d963SJosef Bacik ret = -ENOENT; 14704358d963SJosef Bacik } else if (em->start != found_key.objectid || 14714358d963SJosef Bacik em->len != found_key.offset) { 14724358d963SJosef Bacik btrfs_err(fs_info, 14734358d963SJosef Bacik "block group %llu len %llu mismatch with chunk %llu len %llu", 14744358d963SJosef Bacik found_key.objectid, found_key.offset, 14754358d963SJosef Bacik em->start, em->len); 14764358d963SJosef Bacik ret = -EUCLEAN; 14774358d963SJosef Bacik } else { 14784358d963SJosef Bacik read_extent_buffer(leaf, &bg, 14794358d963SJosef Bacik btrfs_item_ptr_offset(leaf, slot), 14804358d963SJosef Bacik sizeof(bg)); 14814358d963SJosef Bacik flags = btrfs_block_group_flags(&bg) & 14824358d963SJosef Bacik BTRFS_BLOCK_GROUP_TYPE_MASK; 14834358d963SJosef Bacik 14844358d963SJosef Bacik if (flags != (em->map_lookup->type & 14854358d963SJosef Bacik BTRFS_BLOCK_GROUP_TYPE_MASK)) { 14864358d963SJosef Bacik btrfs_err(fs_info, 14874358d963SJosef Bacik "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 14884358d963SJosef Bacik found_key.objectid, 14894358d963SJosef Bacik found_key.offset, flags, 14904358d963SJosef Bacik (BTRFS_BLOCK_GROUP_TYPE_MASK & 14914358d963SJosef Bacik em->map_lookup->type)); 14924358d963SJosef Bacik ret = -EUCLEAN; 14934358d963SJosef Bacik } else { 14944358d963SJosef Bacik ret = 0; 14954358d963SJosef Bacik } 14964358d963SJosef Bacik } 14974358d963SJosef Bacik free_extent_map(em); 14984358d963SJosef Bacik goto out; 14994358d963SJosef Bacik } 15004358d963SJosef Bacik path->slots[0]++; 15014358d963SJosef Bacik } 15024358d963SJosef Bacik out: 15034358d963SJosef Bacik return ret; 15044358d963SJosef Bacik } 15054358d963SJosef Bacik 15064358d963SJosef Bacik static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 15074358d963SJosef Bacik { 15084358d963SJosef Bacik u64 extra_flags = chunk_to_extended(flags) & 15094358d963SJosef Bacik BTRFS_EXTENDED_PROFILE_MASK; 15104358d963SJosef Bacik 15114358d963SJosef Bacik write_seqlock(&fs_info->profiles_lock); 15124358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA) 15134358d963SJosef Bacik fs_info->avail_data_alloc_bits |= extra_flags; 15144358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_METADATA) 15154358d963SJosef Bacik fs_info->avail_metadata_alloc_bits |= extra_flags; 15164358d963SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 15174358d963SJosef Bacik fs_info->avail_system_alloc_bits |= extra_flags; 15184358d963SJosef Bacik write_sequnlock(&fs_info->profiles_lock); 15194358d963SJosef Bacik } 15204358d963SJosef Bacik 15214358d963SJosef Bacik static int exclude_super_stripes(struct btrfs_block_group_cache *cache) 15224358d963SJosef Bacik { 15234358d963SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 15244358d963SJosef Bacik u64 bytenr; 15254358d963SJosef Bacik u64 *logical; 15264358d963SJosef Bacik int stripe_len; 15274358d963SJosef Bacik int i, nr, ret; 15284358d963SJosef Bacik 15294358d963SJosef Bacik if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { 15304358d963SJosef Bacik stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; 15314358d963SJosef Bacik cache->bytes_super += stripe_len; 15324358d963SJosef Bacik ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid, 15334358d963SJosef Bacik stripe_len); 15344358d963SJosef Bacik if (ret) 15354358d963SJosef Bacik return ret; 15364358d963SJosef Bacik } 15374358d963SJosef Bacik 15384358d963SJosef Bacik for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 15394358d963SJosef Bacik bytenr = btrfs_sb_offset(i); 15404358d963SJosef Bacik ret = btrfs_rmap_block(fs_info, cache->key.objectid, 15414358d963SJosef Bacik bytenr, &logical, &nr, &stripe_len); 15424358d963SJosef Bacik if (ret) 15434358d963SJosef Bacik return ret; 15444358d963SJosef Bacik 15454358d963SJosef Bacik while (nr--) { 15464358d963SJosef Bacik u64 start, len; 15474358d963SJosef Bacik 15484358d963SJosef Bacik if (logical[nr] > cache->key.objectid + 15494358d963SJosef Bacik cache->key.offset) 15504358d963SJosef Bacik continue; 15514358d963SJosef Bacik 15524358d963SJosef Bacik if (logical[nr] + stripe_len <= cache->key.objectid) 15534358d963SJosef Bacik continue; 15544358d963SJosef Bacik 15554358d963SJosef Bacik start = logical[nr]; 15564358d963SJosef Bacik if (start < cache->key.objectid) { 15574358d963SJosef Bacik start = cache->key.objectid; 15584358d963SJosef Bacik len = (logical[nr] + stripe_len) - start; 15594358d963SJosef Bacik } else { 15604358d963SJosef Bacik len = min_t(u64, stripe_len, 15614358d963SJosef Bacik cache->key.objectid + 15624358d963SJosef Bacik cache->key.offset - start); 15634358d963SJosef Bacik } 15644358d963SJosef Bacik 15654358d963SJosef Bacik cache->bytes_super += len; 15664358d963SJosef Bacik ret = btrfs_add_excluded_extent(fs_info, start, len); 15674358d963SJosef Bacik if (ret) { 15684358d963SJosef Bacik kfree(logical); 15694358d963SJosef Bacik return ret; 15704358d963SJosef Bacik } 15714358d963SJosef Bacik } 15724358d963SJosef Bacik 15734358d963SJosef Bacik kfree(logical); 15744358d963SJosef Bacik } 15754358d963SJosef Bacik return 0; 15764358d963SJosef Bacik } 15774358d963SJosef Bacik 15784358d963SJosef Bacik static void link_block_group(struct btrfs_block_group_cache *cache) 15794358d963SJosef Bacik { 15804358d963SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 15814358d963SJosef Bacik int index = btrfs_bg_flags_to_raid_index(cache->flags); 15824358d963SJosef Bacik bool first = false; 15834358d963SJosef Bacik 15844358d963SJosef Bacik down_write(&space_info->groups_sem); 15854358d963SJosef Bacik if (list_empty(&space_info->block_groups[index])) 15864358d963SJosef Bacik first = true; 15874358d963SJosef Bacik list_add_tail(&cache->list, &space_info->block_groups[index]); 15884358d963SJosef Bacik up_write(&space_info->groups_sem); 15894358d963SJosef Bacik 15904358d963SJosef Bacik if (first) 15914358d963SJosef Bacik btrfs_sysfs_add_block_group_type(cache); 15924358d963SJosef Bacik } 15934358d963SJosef Bacik 15944358d963SJosef Bacik static struct btrfs_block_group_cache *btrfs_create_block_group_cache( 15954358d963SJosef Bacik struct btrfs_fs_info *fs_info, u64 start, u64 size) 15964358d963SJosef Bacik { 15974358d963SJosef Bacik struct btrfs_block_group_cache *cache; 15984358d963SJosef Bacik 15994358d963SJosef Bacik cache = kzalloc(sizeof(*cache), GFP_NOFS); 16004358d963SJosef Bacik if (!cache) 16014358d963SJosef Bacik return NULL; 16024358d963SJosef Bacik 16034358d963SJosef Bacik cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 16044358d963SJosef Bacik GFP_NOFS); 16054358d963SJosef Bacik if (!cache->free_space_ctl) { 16064358d963SJosef Bacik kfree(cache); 16074358d963SJosef Bacik return NULL; 16084358d963SJosef Bacik } 16094358d963SJosef Bacik 16104358d963SJosef Bacik cache->key.objectid = start; 16114358d963SJosef Bacik cache->key.offset = size; 16124358d963SJosef Bacik cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 16134358d963SJosef Bacik 16144358d963SJosef Bacik cache->fs_info = fs_info; 16154358d963SJosef Bacik cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 16164358d963SJosef Bacik set_free_space_tree_thresholds(cache); 16174358d963SJosef Bacik 16184358d963SJosef Bacik atomic_set(&cache->count, 1); 16194358d963SJosef Bacik spin_lock_init(&cache->lock); 16204358d963SJosef Bacik init_rwsem(&cache->data_rwsem); 16214358d963SJosef Bacik INIT_LIST_HEAD(&cache->list); 16224358d963SJosef Bacik INIT_LIST_HEAD(&cache->cluster_list); 16234358d963SJosef Bacik INIT_LIST_HEAD(&cache->bg_list); 16244358d963SJosef Bacik INIT_LIST_HEAD(&cache->ro_list); 16254358d963SJosef Bacik INIT_LIST_HEAD(&cache->dirty_list); 16264358d963SJosef Bacik INIT_LIST_HEAD(&cache->io_list); 16274358d963SJosef Bacik btrfs_init_free_space_ctl(cache); 16284358d963SJosef Bacik atomic_set(&cache->trimming, 0); 16294358d963SJosef Bacik mutex_init(&cache->free_space_lock); 16304358d963SJosef Bacik btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); 16314358d963SJosef Bacik 16324358d963SJosef Bacik return cache; 16334358d963SJosef Bacik } 16344358d963SJosef Bacik 16354358d963SJosef Bacik /* 16364358d963SJosef Bacik * Iterate all chunks and verify that each of them has the corresponding block 16374358d963SJosef Bacik * group 16384358d963SJosef Bacik */ 16394358d963SJosef Bacik static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 16404358d963SJosef Bacik { 16414358d963SJosef Bacik struct extent_map_tree *map_tree = &fs_info->mapping_tree; 16424358d963SJosef Bacik struct extent_map *em; 16434358d963SJosef Bacik struct btrfs_block_group_cache *bg; 16444358d963SJosef Bacik u64 start = 0; 16454358d963SJosef Bacik int ret = 0; 16464358d963SJosef Bacik 16474358d963SJosef Bacik while (1) { 16484358d963SJosef Bacik read_lock(&map_tree->lock); 16494358d963SJosef Bacik /* 16504358d963SJosef Bacik * lookup_extent_mapping will return the first extent map 16514358d963SJosef Bacik * intersecting the range, so setting @len to 1 is enough to 16524358d963SJosef Bacik * get the first chunk. 16534358d963SJosef Bacik */ 16544358d963SJosef Bacik em = lookup_extent_mapping(map_tree, start, 1); 16554358d963SJosef Bacik read_unlock(&map_tree->lock); 16564358d963SJosef Bacik if (!em) 16574358d963SJosef Bacik break; 16584358d963SJosef Bacik 16594358d963SJosef Bacik bg = btrfs_lookup_block_group(fs_info, em->start); 16604358d963SJosef Bacik if (!bg) { 16614358d963SJosef Bacik btrfs_err(fs_info, 16624358d963SJosef Bacik "chunk start=%llu len=%llu doesn't have corresponding block group", 16634358d963SJosef Bacik em->start, em->len); 16644358d963SJosef Bacik ret = -EUCLEAN; 16654358d963SJosef Bacik free_extent_map(em); 16664358d963SJosef Bacik break; 16674358d963SJosef Bacik } 16684358d963SJosef Bacik if (bg->key.objectid != em->start || 16694358d963SJosef Bacik bg->key.offset != em->len || 16704358d963SJosef Bacik (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 16714358d963SJosef Bacik (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 16724358d963SJosef Bacik btrfs_err(fs_info, 16734358d963SJosef Bacik "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 16744358d963SJosef Bacik em->start, em->len, 16754358d963SJosef Bacik em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 16764358d963SJosef Bacik bg->key.objectid, bg->key.offset, 16774358d963SJosef Bacik bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 16784358d963SJosef Bacik ret = -EUCLEAN; 16794358d963SJosef Bacik free_extent_map(em); 16804358d963SJosef Bacik btrfs_put_block_group(bg); 16814358d963SJosef Bacik break; 16824358d963SJosef Bacik } 16834358d963SJosef Bacik start = em->start + em->len; 16844358d963SJosef Bacik free_extent_map(em); 16854358d963SJosef Bacik btrfs_put_block_group(bg); 16864358d963SJosef Bacik } 16874358d963SJosef Bacik return ret; 16884358d963SJosef Bacik } 16894358d963SJosef Bacik 16904358d963SJosef Bacik int btrfs_read_block_groups(struct btrfs_fs_info *info) 16914358d963SJosef Bacik { 16924358d963SJosef Bacik struct btrfs_path *path; 16934358d963SJosef Bacik int ret; 16944358d963SJosef Bacik struct btrfs_block_group_cache *cache; 16954358d963SJosef Bacik struct btrfs_space_info *space_info; 16964358d963SJosef Bacik struct btrfs_key key; 16974358d963SJosef Bacik struct btrfs_key found_key; 16984358d963SJosef Bacik struct extent_buffer *leaf; 16994358d963SJosef Bacik int need_clear = 0; 17004358d963SJosef Bacik u64 cache_gen; 17014358d963SJosef Bacik u64 feature; 17024358d963SJosef Bacik int mixed; 17034358d963SJosef Bacik 17044358d963SJosef Bacik feature = btrfs_super_incompat_flags(info->super_copy); 17054358d963SJosef Bacik mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS); 17064358d963SJosef Bacik 17074358d963SJosef Bacik key.objectid = 0; 17084358d963SJosef Bacik key.offset = 0; 17094358d963SJosef Bacik key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 17104358d963SJosef Bacik path = btrfs_alloc_path(); 17114358d963SJosef Bacik if (!path) 17124358d963SJosef Bacik return -ENOMEM; 17134358d963SJosef Bacik path->reada = READA_FORWARD; 17144358d963SJosef Bacik 17154358d963SJosef Bacik cache_gen = btrfs_super_cache_generation(info->super_copy); 17164358d963SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 17174358d963SJosef Bacik btrfs_super_generation(info->super_copy) != cache_gen) 17184358d963SJosef Bacik need_clear = 1; 17194358d963SJosef Bacik if (btrfs_test_opt(info, CLEAR_CACHE)) 17204358d963SJosef Bacik need_clear = 1; 17214358d963SJosef Bacik 17224358d963SJosef Bacik while (1) { 17234358d963SJosef Bacik ret = find_first_block_group(info, path, &key); 17244358d963SJosef Bacik if (ret > 0) 17254358d963SJosef Bacik break; 17264358d963SJosef Bacik if (ret != 0) 17274358d963SJosef Bacik goto error; 17284358d963SJosef Bacik 17294358d963SJosef Bacik leaf = path->nodes[0]; 17304358d963SJosef Bacik btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 17314358d963SJosef Bacik 17324358d963SJosef Bacik cache = btrfs_create_block_group_cache(info, found_key.objectid, 17334358d963SJosef Bacik found_key.offset); 17344358d963SJosef Bacik if (!cache) { 17354358d963SJosef Bacik ret = -ENOMEM; 17364358d963SJosef Bacik goto error; 17374358d963SJosef Bacik } 17384358d963SJosef Bacik 17394358d963SJosef Bacik if (need_clear) { 17404358d963SJosef Bacik /* 17414358d963SJosef Bacik * When we mount with old space cache, we need to 17424358d963SJosef Bacik * set BTRFS_DC_CLEAR and set dirty flag. 17434358d963SJosef Bacik * 17444358d963SJosef Bacik * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 17454358d963SJosef Bacik * truncate the old free space cache inode and 17464358d963SJosef Bacik * setup a new one. 17474358d963SJosef Bacik * b) Setting 'dirty flag' makes sure that we flush 17484358d963SJosef Bacik * the new space cache info onto disk. 17494358d963SJosef Bacik */ 17504358d963SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE)) 17514358d963SJosef Bacik cache->disk_cache_state = BTRFS_DC_CLEAR; 17524358d963SJosef Bacik } 17534358d963SJosef Bacik 17544358d963SJosef Bacik read_extent_buffer(leaf, &cache->item, 17554358d963SJosef Bacik btrfs_item_ptr_offset(leaf, path->slots[0]), 17564358d963SJosef Bacik sizeof(cache->item)); 17574358d963SJosef Bacik cache->flags = btrfs_block_group_flags(&cache->item); 17584358d963SJosef Bacik if (!mixed && 17594358d963SJosef Bacik ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 17604358d963SJosef Bacik (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 17614358d963SJosef Bacik btrfs_err(info, 17624358d963SJosef Bacik "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 17634358d963SJosef Bacik cache->key.objectid); 17644358d963SJosef Bacik ret = -EINVAL; 17654358d963SJosef Bacik goto error; 17664358d963SJosef Bacik } 17674358d963SJosef Bacik 17684358d963SJosef Bacik key.objectid = found_key.objectid + found_key.offset; 17694358d963SJosef Bacik btrfs_release_path(path); 17704358d963SJosef Bacik 17714358d963SJosef Bacik /* 17724358d963SJosef Bacik * We need to exclude the super stripes now so that the space 17734358d963SJosef Bacik * info has super bytes accounted for, otherwise we'll think 17744358d963SJosef Bacik * we have more space than we actually do. 17754358d963SJosef Bacik */ 17764358d963SJosef Bacik ret = exclude_super_stripes(cache); 17774358d963SJosef Bacik if (ret) { 17784358d963SJosef Bacik /* 17794358d963SJosef Bacik * We may have excluded something, so call this just in 17804358d963SJosef Bacik * case. 17814358d963SJosef Bacik */ 17824358d963SJosef Bacik btrfs_free_excluded_extents(cache); 17834358d963SJosef Bacik btrfs_put_block_group(cache); 17844358d963SJosef Bacik goto error; 17854358d963SJosef Bacik } 17864358d963SJosef Bacik 17874358d963SJosef Bacik /* 17884358d963SJosef Bacik * Check for two cases, either we are full, and therefore 17894358d963SJosef Bacik * don't need to bother with the caching work since we won't 17904358d963SJosef Bacik * find any space, or we are empty, and we can just add all 17914358d963SJosef Bacik * the space in and be done with it. This saves us _a_lot_ of 17924358d963SJosef Bacik * time, particularly in the full case. 17934358d963SJosef Bacik */ 17944358d963SJosef Bacik if (found_key.offset == btrfs_block_group_used(&cache->item)) { 17954358d963SJosef Bacik cache->last_byte_to_unpin = (u64)-1; 17964358d963SJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 17974358d963SJosef Bacik btrfs_free_excluded_extents(cache); 17984358d963SJosef Bacik } else if (btrfs_block_group_used(&cache->item) == 0) { 17994358d963SJosef Bacik cache->last_byte_to_unpin = (u64)-1; 18004358d963SJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 18014358d963SJosef Bacik add_new_free_space(cache, found_key.objectid, 18024358d963SJosef Bacik found_key.objectid + 18034358d963SJosef Bacik found_key.offset); 18044358d963SJosef Bacik btrfs_free_excluded_extents(cache); 18054358d963SJosef Bacik } 18064358d963SJosef Bacik 18074358d963SJosef Bacik ret = btrfs_add_block_group_cache(info, cache); 18084358d963SJosef Bacik if (ret) { 18094358d963SJosef Bacik btrfs_remove_free_space_cache(cache); 18104358d963SJosef Bacik btrfs_put_block_group(cache); 18114358d963SJosef Bacik goto error; 18124358d963SJosef Bacik } 18134358d963SJosef Bacik 18144358d963SJosef Bacik trace_btrfs_add_block_group(info, cache, 0); 18154358d963SJosef Bacik btrfs_update_space_info(info, cache->flags, found_key.offset, 18164358d963SJosef Bacik btrfs_block_group_used(&cache->item), 18174358d963SJosef Bacik cache->bytes_super, &space_info); 18184358d963SJosef Bacik 18194358d963SJosef Bacik cache->space_info = space_info; 18204358d963SJosef Bacik 18214358d963SJosef Bacik link_block_group(cache); 18224358d963SJosef Bacik 18234358d963SJosef Bacik set_avail_alloc_bits(info, cache->flags); 18244358d963SJosef Bacik if (btrfs_chunk_readonly(info, cache->key.objectid)) { 18254358d963SJosef Bacik __btrfs_inc_block_group_ro(cache, 1); 18264358d963SJosef Bacik } else if (btrfs_block_group_used(&cache->item) == 0) { 18274358d963SJosef Bacik ASSERT(list_empty(&cache->bg_list)); 18284358d963SJosef Bacik btrfs_mark_bg_unused(cache); 18294358d963SJosef Bacik } 18304358d963SJosef Bacik } 18314358d963SJosef Bacik 18324358d963SJosef Bacik list_for_each_entry_rcu(space_info, &info->space_info, list) { 18334358d963SJosef Bacik if (!(btrfs_get_alloc_profile(info, space_info->flags) & 18344358d963SJosef Bacik (BTRFS_BLOCK_GROUP_RAID10 | 18354358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID1_MASK | 18364358d963SJosef Bacik BTRFS_BLOCK_GROUP_RAID56_MASK | 18374358d963SJosef Bacik BTRFS_BLOCK_GROUP_DUP))) 18384358d963SJosef Bacik continue; 18394358d963SJosef Bacik /* 18404358d963SJosef Bacik * Avoid allocating from un-mirrored block group if there are 18414358d963SJosef Bacik * mirrored block groups. 18424358d963SJosef Bacik */ 18434358d963SJosef Bacik list_for_each_entry(cache, 18444358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_RAID0], 18454358d963SJosef Bacik list) 18464358d963SJosef Bacik __btrfs_inc_block_group_ro(cache, 1); 18474358d963SJosef Bacik list_for_each_entry(cache, 18484358d963SJosef Bacik &space_info->block_groups[BTRFS_RAID_SINGLE], 18494358d963SJosef Bacik list) 18504358d963SJosef Bacik __btrfs_inc_block_group_ro(cache, 1); 18514358d963SJosef Bacik } 18524358d963SJosef Bacik 18534358d963SJosef Bacik btrfs_init_global_block_rsv(info); 18544358d963SJosef Bacik ret = check_chunk_block_group_mappings(info); 18554358d963SJosef Bacik error: 18564358d963SJosef Bacik btrfs_free_path(path); 18574358d963SJosef Bacik return ret; 18584358d963SJosef Bacik } 18594358d963SJosef Bacik 18604358d963SJosef Bacik void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 18614358d963SJosef Bacik { 18624358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 18634358d963SJosef Bacik struct btrfs_block_group_cache *block_group; 18644358d963SJosef Bacik struct btrfs_root *extent_root = fs_info->extent_root; 18654358d963SJosef Bacik struct btrfs_block_group_item item; 18664358d963SJosef Bacik struct btrfs_key key; 18674358d963SJosef Bacik int ret = 0; 18684358d963SJosef Bacik 18694358d963SJosef Bacik if (!trans->can_flush_pending_bgs) 18704358d963SJosef Bacik return; 18714358d963SJosef Bacik 18724358d963SJosef Bacik while (!list_empty(&trans->new_bgs)) { 18734358d963SJosef Bacik block_group = list_first_entry(&trans->new_bgs, 18744358d963SJosef Bacik struct btrfs_block_group_cache, 18754358d963SJosef Bacik bg_list); 18764358d963SJosef Bacik if (ret) 18774358d963SJosef Bacik goto next; 18784358d963SJosef Bacik 18794358d963SJosef Bacik spin_lock(&block_group->lock); 18804358d963SJosef Bacik memcpy(&item, &block_group->item, sizeof(item)); 18814358d963SJosef Bacik memcpy(&key, &block_group->key, sizeof(key)); 18824358d963SJosef Bacik spin_unlock(&block_group->lock); 18834358d963SJosef Bacik 18844358d963SJosef Bacik ret = btrfs_insert_item(trans, extent_root, &key, &item, 18854358d963SJosef Bacik sizeof(item)); 18864358d963SJosef Bacik if (ret) 18874358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 18884358d963SJosef Bacik ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset); 18894358d963SJosef Bacik if (ret) 18904358d963SJosef Bacik btrfs_abort_transaction(trans, ret); 18914358d963SJosef Bacik add_block_group_free_space(trans, block_group); 18924358d963SJosef Bacik /* Already aborted the transaction if it failed. */ 18934358d963SJosef Bacik next: 18944358d963SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 18954358d963SJosef Bacik list_del_init(&block_group->bg_list); 18964358d963SJosef Bacik } 18974358d963SJosef Bacik btrfs_trans_release_chunk_metadata(trans); 18984358d963SJosef Bacik } 18994358d963SJosef Bacik 19004358d963SJosef Bacik int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, 19014358d963SJosef Bacik u64 type, u64 chunk_offset, u64 size) 19024358d963SJosef Bacik { 19034358d963SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 19044358d963SJosef Bacik struct btrfs_block_group_cache *cache; 19054358d963SJosef Bacik int ret; 19064358d963SJosef Bacik 19074358d963SJosef Bacik btrfs_set_log_full_commit(trans); 19084358d963SJosef Bacik 19094358d963SJosef Bacik cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size); 19104358d963SJosef Bacik if (!cache) 19114358d963SJosef Bacik return -ENOMEM; 19124358d963SJosef Bacik 19134358d963SJosef Bacik btrfs_set_block_group_used(&cache->item, bytes_used); 19144358d963SJosef Bacik btrfs_set_block_group_chunk_objectid(&cache->item, 19154358d963SJosef Bacik BTRFS_FIRST_CHUNK_TREE_OBJECTID); 19164358d963SJosef Bacik btrfs_set_block_group_flags(&cache->item, type); 19174358d963SJosef Bacik 19184358d963SJosef Bacik cache->flags = type; 19194358d963SJosef Bacik cache->last_byte_to_unpin = (u64)-1; 19204358d963SJosef Bacik cache->cached = BTRFS_CACHE_FINISHED; 19214358d963SJosef Bacik cache->needs_free_space = 1; 19224358d963SJosef Bacik ret = exclude_super_stripes(cache); 19234358d963SJosef Bacik if (ret) { 19244358d963SJosef Bacik /* We may have excluded something, so call this just in case */ 19254358d963SJosef Bacik btrfs_free_excluded_extents(cache); 19264358d963SJosef Bacik btrfs_put_block_group(cache); 19274358d963SJosef Bacik return ret; 19284358d963SJosef Bacik } 19294358d963SJosef Bacik 19304358d963SJosef Bacik add_new_free_space(cache, chunk_offset, chunk_offset + size); 19314358d963SJosef Bacik 19324358d963SJosef Bacik btrfs_free_excluded_extents(cache); 19334358d963SJosef Bacik 19344358d963SJosef Bacik #ifdef CONFIG_BTRFS_DEBUG 19354358d963SJosef Bacik if (btrfs_should_fragment_free_space(cache)) { 19364358d963SJosef Bacik u64 new_bytes_used = size - bytes_used; 19374358d963SJosef Bacik 19384358d963SJosef Bacik bytes_used += new_bytes_used >> 1; 19394358d963SJosef Bacik btrfs_fragment_free_space(cache); 19404358d963SJosef Bacik } 19414358d963SJosef Bacik #endif 19424358d963SJosef Bacik /* 19434358d963SJosef Bacik * Ensure the corresponding space_info object is created and 19444358d963SJosef Bacik * assigned to our block group. We want our bg to be added to the rbtree 19454358d963SJosef Bacik * with its ->space_info set. 19464358d963SJosef Bacik */ 19474358d963SJosef Bacik cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 19484358d963SJosef Bacik ASSERT(cache->space_info); 19494358d963SJosef Bacik 19504358d963SJosef Bacik ret = btrfs_add_block_group_cache(fs_info, cache); 19514358d963SJosef Bacik if (ret) { 19524358d963SJosef Bacik btrfs_remove_free_space_cache(cache); 19534358d963SJosef Bacik btrfs_put_block_group(cache); 19544358d963SJosef Bacik return ret; 19554358d963SJosef Bacik } 19564358d963SJosef Bacik 19574358d963SJosef Bacik /* 19584358d963SJosef Bacik * Now that our block group has its ->space_info set and is inserted in 19594358d963SJosef Bacik * the rbtree, update the space info's counters. 19604358d963SJosef Bacik */ 19614358d963SJosef Bacik trace_btrfs_add_block_group(fs_info, cache, 1); 19624358d963SJosef Bacik btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, 19634358d963SJosef Bacik cache->bytes_super, &cache->space_info); 19644358d963SJosef Bacik btrfs_update_global_block_rsv(fs_info); 19654358d963SJosef Bacik 19664358d963SJosef Bacik link_block_group(cache); 19674358d963SJosef Bacik 19684358d963SJosef Bacik list_add_tail(&cache->bg_list, &trans->new_bgs); 19694358d963SJosef Bacik trans->delayed_ref_updates++; 19704358d963SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 19714358d963SJosef Bacik 19724358d963SJosef Bacik set_avail_alloc_bits(fs_info, type); 19734358d963SJosef Bacik return 0; 19744358d963SJosef Bacik } 197526ce2095SJosef Bacik 197626ce2095SJosef Bacik static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) 197726ce2095SJosef Bacik { 197826ce2095SJosef Bacik u64 num_devices; 197926ce2095SJosef Bacik u64 stripped; 198026ce2095SJosef Bacik 198126ce2095SJosef Bacik /* 198226ce2095SJosef Bacik * if restripe for this chunk_type is on pick target profile and 198326ce2095SJosef Bacik * return, otherwise do the usual balance 198426ce2095SJosef Bacik */ 198526ce2095SJosef Bacik stripped = btrfs_get_restripe_target(fs_info, flags); 198626ce2095SJosef Bacik if (stripped) 198726ce2095SJosef Bacik return extended_to_chunk(stripped); 198826ce2095SJosef Bacik 198926ce2095SJosef Bacik num_devices = fs_info->fs_devices->rw_devices; 199026ce2095SJosef Bacik 199126ce2095SJosef Bacik stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK | 199226ce2095SJosef Bacik BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10; 199326ce2095SJosef Bacik 199426ce2095SJosef Bacik if (num_devices == 1) { 199526ce2095SJosef Bacik stripped |= BTRFS_BLOCK_GROUP_DUP; 199626ce2095SJosef Bacik stripped = flags & ~stripped; 199726ce2095SJosef Bacik 199826ce2095SJosef Bacik /* turn raid0 into single device chunks */ 199926ce2095SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_RAID0) 200026ce2095SJosef Bacik return stripped; 200126ce2095SJosef Bacik 200226ce2095SJosef Bacik /* turn mirroring into duplication */ 200326ce2095SJosef Bacik if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK | 200426ce2095SJosef Bacik BTRFS_BLOCK_GROUP_RAID10)) 200526ce2095SJosef Bacik return stripped | BTRFS_BLOCK_GROUP_DUP; 200626ce2095SJosef Bacik } else { 200726ce2095SJosef Bacik /* they already had raid on here, just return */ 200826ce2095SJosef Bacik if (flags & stripped) 200926ce2095SJosef Bacik return flags; 201026ce2095SJosef Bacik 201126ce2095SJosef Bacik stripped |= BTRFS_BLOCK_GROUP_DUP; 201226ce2095SJosef Bacik stripped = flags & ~stripped; 201326ce2095SJosef Bacik 201426ce2095SJosef Bacik /* switch duplicated blocks with raid1 */ 201526ce2095SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DUP) 201626ce2095SJosef Bacik return stripped | BTRFS_BLOCK_GROUP_RAID1; 201726ce2095SJosef Bacik 201826ce2095SJosef Bacik /* this is drive concat, leave it alone */ 201926ce2095SJosef Bacik } 202026ce2095SJosef Bacik 202126ce2095SJosef Bacik return flags; 202226ce2095SJosef Bacik } 202326ce2095SJosef Bacik 202426ce2095SJosef Bacik int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) 202526ce2095SJosef Bacik 202626ce2095SJosef Bacik { 202726ce2095SJosef Bacik struct btrfs_fs_info *fs_info = cache->fs_info; 202826ce2095SJosef Bacik struct btrfs_trans_handle *trans; 202926ce2095SJosef Bacik u64 alloc_flags; 203026ce2095SJosef Bacik int ret; 203126ce2095SJosef Bacik 203226ce2095SJosef Bacik again: 203326ce2095SJosef Bacik trans = btrfs_join_transaction(fs_info->extent_root); 203426ce2095SJosef Bacik if (IS_ERR(trans)) 203526ce2095SJosef Bacik return PTR_ERR(trans); 203626ce2095SJosef Bacik 203726ce2095SJosef Bacik /* 203826ce2095SJosef Bacik * we're not allowed to set block groups readonly after the dirty 203926ce2095SJosef Bacik * block groups cache has started writing. If it already started, 204026ce2095SJosef Bacik * back off and let this transaction commit 204126ce2095SJosef Bacik */ 204226ce2095SJosef Bacik mutex_lock(&fs_info->ro_block_group_mutex); 204326ce2095SJosef Bacik if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 204426ce2095SJosef Bacik u64 transid = trans->transid; 204526ce2095SJosef Bacik 204626ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 204726ce2095SJosef Bacik btrfs_end_transaction(trans); 204826ce2095SJosef Bacik 204926ce2095SJosef Bacik ret = btrfs_wait_for_commit(fs_info, transid); 205026ce2095SJosef Bacik if (ret) 205126ce2095SJosef Bacik return ret; 205226ce2095SJosef Bacik goto again; 205326ce2095SJosef Bacik } 205426ce2095SJosef Bacik 205526ce2095SJosef Bacik /* 205626ce2095SJosef Bacik * if we are changing raid levels, try to allocate a corresponding 205726ce2095SJosef Bacik * block group with the new raid level. 205826ce2095SJosef Bacik */ 205926ce2095SJosef Bacik alloc_flags = update_block_group_flags(fs_info, cache->flags); 206026ce2095SJosef Bacik if (alloc_flags != cache->flags) { 206126ce2095SJosef Bacik ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 206226ce2095SJosef Bacik /* 206326ce2095SJosef Bacik * ENOSPC is allowed here, we may have enough space 206426ce2095SJosef Bacik * already allocated at the new raid level to 206526ce2095SJosef Bacik * carry on 206626ce2095SJosef Bacik */ 206726ce2095SJosef Bacik if (ret == -ENOSPC) 206826ce2095SJosef Bacik ret = 0; 206926ce2095SJosef Bacik if (ret < 0) 207026ce2095SJosef Bacik goto out; 207126ce2095SJosef Bacik } 207226ce2095SJosef Bacik 207326ce2095SJosef Bacik ret = __btrfs_inc_block_group_ro(cache, 0); 207426ce2095SJosef Bacik if (!ret) 207526ce2095SJosef Bacik goto out; 207626ce2095SJosef Bacik alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 207726ce2095SJosef Bacik ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 207826ce2095SJosef Bacik if (ret < 0) 207926ce2095SJosef Bacik goto out; 208026ce2095SJosef Bacik ret = __btrfs_inc_block_group_ro(cache, 0); 208126ce2095SJosef Bacik out: 208226ce2095SJosef Bacik if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 208326ce2095SJosef Bacik alloc_flags = update_block_group_flags(fs_info, cache->flags); 208426ce2095SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 208526ce2095SJosef Bacik check_system_chunk(trans, alloc_flags); 208626ce2095SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 208726ce2095SJosef Bacik } 208826ce2095SJosef Bacik mutex_unlock(&fs_info->ro_block_group_mutex); 208926ce2095SJosef Bacik 209026ce2095SJosef Bacik btrfs_end_transaction(trans); 209126ce2095SJosef Bacik return ret; 209226ce2095SJosef Bacik } 209326ce2095SJosef Bacik 209426ce2095SJosef Bacik void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) 209526ce2095SJosef Bacik { 209626ce2095SJosef Bacik struct btrfs_space_info *sinfo = cache->space_info; 209726ce2095SJosef Bacik u64 num_bytes; 209826ce2095SJosef Bacik 209926ce2095SJosef Bacik BUG_ON(!cache->ro); 210026ce2095SJosef Bacik 210126ce2095SJosef Bacik spin_lock(&sinfo->lock); 210226ce2095SJosef Bacik spin_lock(&cache->lock); 210326ce2095SJosef Bacik if (!--cache->ro) { 210426ce2095SJosef Bacik num_bytes = cache->key.offset - cache->reserved - 210526ce2095SJosef Bacik cache->pinned - cache->bytes_super - 210626ce2095SJosef Bacik btrfs_block_group_used(&cache->item); 210726ce2095SJosef Bacik sinfo->bytes_readonly -= num_bytes; 210826ce2095SJosef Bacik list_del_init(&cache->ro_list); 210926ce2095SJosef Bacik } 211026ce2095SJosef Bacik spin_unlock(&cache->lock); 211126ce2095SJosef Bacik spin_unlock(&sinfo->lock); 211226ce2095SJosef Bacik } 211377745c05SJosef Bacik 211477745c05SJosef Bacik static int write_one_cache_group(struct btrfs_trans_handle *trans, 211577745c05SJosef Bacik struct btrfs_path *path, 211677745c05SJosef Bacik struct btrfs_block_group_cache *cache) 211777745c05SJosef Bacik { 211877745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 211977745c05SJosef Bacik int ret; 212077745c05SJosef Bacik struct btrfs_root *extent_root = fs_info->extent_root; 212177745c05SJosef Bacik unsigned long bi; 212277745c05SJosef Bacik struct extent_buffer *leaf; 212377745c05SJosef Bacik 212477745c05SJosef Bacik ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 212577745c05SJosef Bacik if (ret) { 212677745c05SJosef Bacik if (ret > 0) 212777745c05SJosef Bacik ret = -ENOENT; 212877745c05SJosef Bacik goto fail; 212977745c05SJosef Bacik } 213077745c05SJosef Bacik 213177745c05SJosef Bacik leaf = path->nodes[0]; 213277745c05SJosef Bacik bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 213377745c05SJosef Bacik write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); 213477745c05SJosef Bacik btrfs_mark_buffer_dirty(leaf); 213577745c05SJosef Bacik fail: 213677745c05SJosef Bacik btrfs_release_path(path); 213777745c05SJosef Bacik return ret; 213877745c05SJosef Bacik 213977745c05SJosef Bacik } 214077745c05SJosef Bacik 214177745c05SJosef Bacik static int cache_save_setup(struct btrfs_block_group_cache *block_group, 214277745c05SJosef Bacik struct btrfs_trans_handle *trans, 214377745c05SJosef Bacik struct btrfs_path *path) 214477745c05SJosef Bacik { 214577745c05SJosef Bacik struct btrfs_fs_info *fs_info = block_group->fs_info; 214677745c05SJosef Bacik struct btrfs_root *root = fs_info->tree_root; 214777745c05SJosef Bacik struct inode *inode = NULL; 214877745c05SJosef Bacik struct extent_changeset *data_reserved = NULL; 214977745c05SJosef Bacik u64 alloc_hint = 0; 215077745c05SJosef Bacik int dcs = BTRFS_DC_ERROR; 215177745c05SJosef Bacik u64 num_pages = 0; 215277745c05SJosef Bacik int retries = 0; 215377745c05SJosef Bacik int ret = 0; 215477745c05SJosef Bacik 215577745c05SJosef Bacik /* 215677745c05SJosef Bacik * If this block group is smaller than 100 megs don't bother caching the 215777745c05SJosef Bacik * block group. 215877745c05SJosef Bacik */ 215977745c05SJosef Bacik if (block_group->key.offset < (100 * SZ_1M)) { 216077745c05SJosef Bacik spin_lock(&block_group->lock); 216177745c05SJosef Bacik block_group->disk_cache_state = BTRFS_DC_WRITTEN; 216277745c05SJosef Bacik spin_unlock(&block_group->lock); 216377745c05SJosef Bacik return 0; 216477745c05SJosef Bacik } 216577745c05SJosef Bacik 216677745c05SJosef Bacik if (trans->aborted) 216777745c05SJosef Bacik return 0; 216877745c05SJosef Bacik again: 216977745c05SJosef Bacik inode = lookup_free_space_inode(block_group, path); 217077745c05SJosef Bacik if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 217177745c05SJosef Bacik ret = PTR_ERR(inode); 217277745c05SJosef Bacik btrfs_release_path(path); 217377745c05SJosef Bacik goto out; 217477745c05SJosef Bacik } 217577745c05SJosef Bacik 217677745c05SJosef Bacik if (IS_ERR(inode)) { 217777745c05SJosef Bacik BUG_ON(retries); 217877745c05SJosef Bacik retries++; 217977745c05SJosef Bacik 218077745c05SJosef Bacik if (block_group->ro) 218177745c05SJosef Bacik goto out_free; 218277745c05SJosef Bacik 218377745c05SJosef Bacik ret = create_free_space_inode(trans, block_group, path); 218477745c05SJosef Bacik if (ret) 218577745c05SJosef Bacik goto out_free; 218677745c05SJosef Bacik goto again; 218777745c05SJosef Bacik } 218877745c05SJosef Bacik 218977745c05SJosef Bacik /* 219077745c05SJosef Bacik * We want to set the generation to 0, that way if anything goes wrong 219177745c05SJosef Bacik * from here on out we know not to trust this cache when we load up next 219277745c05SJosef Bacik * time. 219377745c05SJosef Bacik */ 219477745c05SJosef Bacik BTRFS_I(inode)->generation = 0; 219577745c05SJosef Bacik ret = btrfs_update_inode(trans, root, inode); 219677745c05SJosef Bacik if (ret) { 219777745c05SJosef Bacik /* 219877745c05SJosef Bacik * So theoretically we could recover from this, simply set the 219977745c05SJosef Bacik * super cache generation to 0 so we know to invalidate the 220077745c05SJosef Bacik * cache, but then we'd have to keep track of the block groups 220177745c05SJosef Bacik * that fail this way so we know we _have_ to reset this cache 220277745c05SJosef Bacik * before the next commit or risk reading stale cache. So to 220377745c05SJosef Bacik * limit our exposure to horrible edge cases lets just abort the 220477745c05SJosef Bacik * transaction, this only happens in really bad situations 220577745c05SJosef Bacik * anyway. 220677745c05SJosef Bacik */ 220777745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 220877745c05SJosef Bacik goto out_put; 220977745c05SJosef Bacik } 221077745c05SJosef Bacik WARN_ON(ret); 221177745c05SJosef Bacik 221277745c05SJosef Bacik /* We've already setup this transaction, go ahead and exit */ 221377745c05SJosef Bacik if (block_group->cache_generation == trans->transid && 221477745c05SJosef Bacik i_size_read(inode)) { 221577745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 221677745c05SJosef Bacik goto out_put; 221777745c05SJosef Bacik } 221877745c05SJosef Bacik 221977745c05SJosef Bacik if (i_size_read(inode) > 0) { 222077745c05SJosef Bacik ret = btrfs_check_trunc_cache_free_space(fs_info, 222177745c05SJosef Bacik &fs_info->global_block_rsv); 222277745c05SJosef Bacik if (ret) 222377745c05SJosef Bacik goto out_put; 222477745c05SJosef Bacik 222577745c05SJosef Bacik ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 222677745c05SJosef Bacik if (ret) 222777745c05SJosef Bacik goto out_put; 222877745c05SJosef Bacik } 222977745c05SJosef Bacik 223077745c05SJosef Bacik spin_lock(&block_group->lock); 223177745c05SJosef Bacik if (block_group->cached != BTRFS_CACHE_FINISHED || 223277745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) { 223377745c05SJosef Bacik /* 223477745c05SJosef Bacik * don't bother trying to write stuff out _if_ 223577745c05SJosef Bacik * a) we're not cached, 223677745c05SJosef Bacik * b) we're with nospace_cache mount option, 223777745c05SJosef Bacik * c) we're with v2 space_cache (FREE_SPACE_TREE). 223877745c05SJosef Bacik */ 223977745c05SJosef Bacik dcs = BTRFS_DC_WRITTEN; 224077745c05SJosef Bacik spin_unlock(&block_group->lock); 224177745c05SJosef Bacik goto out_put; 224277745c05SJosef Bacik } 224377745c05SJosef Bacik spin_unlock(&block_group->lock); 224477745c05SJosef Bacik 224577745c05SJosef Bacik /* 224677745c05SJosef Bacik * We hit an ENOSPC when setting up the cache in this transaction, just 224777745c05SJosef Bacik * skip doing the setup, we've already cleared the cache so we're safe. 224877745c05SJosef Bacik */ 224977745c05SJosef Bacik if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 225077745c05SJosef Bacik ret = -ENOSPC; 225177745c05SJosef Bacik goto out_put; 225277745c05SJosef Bacik } 225377745c05SJosef Bacik 225477745c05SJosef Bacik /* 225577745c05SJosef Bacik * Try to preallocate enough space based on how big the block group is. 225677745c05SJosef Bacik * Keep in mind this has to include any pinned space which could end up 225777745c05SJosef Bacik * taking up quite a bit since it's not folded into the other space 225877745c05SJosef Bacik * cache. 225977745c05SJosef Bacik */ 226077745c05SJosef Bacik num_pages = div_u64(block_group->key.offset, SZ_256M); 226177745c05SJosef Bacik if (!num_pages) 226277745c05SJosef Bacik num_pages = 1; 226377745c05SJosef Bacik 226477745c05SJosef Bacik num_pages *= 16; 226577745c05SJosef Bacik num_pages *= PAGE_SIZE; 226677745c05SJosef Bacik 226777745c05SJosef Bacik ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages); 226877745c05SJosef Bacik if (ret) 226977745c05SJosef Bacik goto out_put; 227077745c05SJosef Bacik 227177745c05SJosef Bacik ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 227277745c05SJosef Bacik num_pages, num_pages, 227377745c05SJosef Bacik &alloc_hint); 227477745c05SJosef Bacik /* 227577745c05SJosef Bacik * Our cache requires contiguous chunks so that we don't modify a bunch 227677745c05SJosef Bacik * of metadata or split extents when writing the cache out, which means 227777745c05SJosef Bacik * we can enospc if we are heavily fragmented in addition to just normal 227877745c05SJosef Bacik * out of space conditions. So if we hit this just skip setting up any 227977745c05SJosef Bacik * other block groups for this transaction, maybe we'll unpin enough 228077745c05SJosef Bacik * space the next time around. 228177745c05SJosef Bacik */ 228277745c05SJosef Bacik if (!ret) 228377745c05SJosef Bacik dcs = BTRFS_DC_SETUP; 228477745c05SJosef Bacik else if (ret == -ENOSPC) 228577745c05SJosef Bacik set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 228677745c05SJosef Bacik 228777745c05SJosef Bacik out_put: 228877745c05SJosef Bacik iput(inode); 228977745c05SJosef Bacik out_free: 229077745c05SJosef Bacik btrfs_release_path(path); 229177745c05SJosef Bacik out: 229277745c05SJosef Bacik spin_lock(&block_group->lock); 229377745c05SJosef Bacik if (!ret && dcs == BTRFS_DC_SETUP) 229477745c05SJosef Bacik block_group->cache_generation = trans->transid; 229577745c05SJosef Bacik block_group->disk_cache_state = dcs; 229677745c05SJosef Bacik spin_unlock(&block_group->lock); 229777745c05SJosef Bacik 229877745c05SJosef Bacik extent_changeset_free(data_reserved); 229977745c05SJosef Bacik return ret; 230077745c05SJosef Bacik } 230177745c05SJosef Bacik 230277745c05SJosef Bacik int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 230377745c05SJosef Bacik { 230477745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 230577745c05SJosef Bacik struct btrfs_block_group_cache *cache, *tmp; 230677745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 230777745c05SJosef Bacik struct btrfs_path *path; 230877745c05SJosef Bacik 230977745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs) || 231077745c05SJosef Bacik !btrfs_test_opt(fs_info, SPACE_CACHE)) 231177745c05SJosef Bacik return 0; 231277745c05SJosef Bacik 231377745c05SJosef Bacik path = btrfs_alloc_path(); 231477745c05SJosef Bacik if (!path) 231577745c05SJosef Bacik return -ENOMEM; 231677745c05SJosef Bacik 231777745c05SJosef Bacik /* Could add new block groups, use _safe just in case */ 231877745c05SJosef Bacik list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 231977745c05SJosef Bacik dirty_list) { 232077745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_CLEAR) 232177745c05SJosef Bacik cache_save_setup(cache, trans, path); 232277745c05SJosef Bacik } 232377745c05SJosef Bacik 232477745c05SJosef Bacik btrfs_free_path(path); 232577745c05SJosef Bacik return 0; 232677745c05SJosef Bacik } 232777745c05SJosef Bacik 232877745c05SJosef Bacik /* 232977745c05SJosef Bacik * Transaction commit does final block group cache writeback during a critical 233077745c05SJosef Bacik * section where nothing is allowed to change the FS. This is required in 233177745c05SJosef Bacik * order for the cache to actually match the block group, but can introduce a 233277745c05SJosef Bacik * lot of latency into the commit. 233377745c05SJosef Bacik * 233477745c05SJosef Bacik * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 233577745c05SJosef Bacik * There's a chance we'll have to redo some of it if the block group changes 233677745c05SJosef Bacik * again during the commit, but it greatly reduces the commit latency by 233777745c05SJosef Bacik * getting rid of the easy block groups while we're still allowing others to 233877745c05SJosef Bacik * join the commit. 233977745c05SJosef Bacik */ 234077745c05SJosef Bacik int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 234177745c05SJosef Bacik { 234277745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 234377745c05SJosef Bacik struct btrfs_block_group_cache *cache; 234477745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 234577745c05SJosef Bacik int ret = 0; 234677745c05SJosef Bacik int should_put; 234777745c05SJosef Bacik struct btrfs_path *path = NULL; 234877745c05SJosef Bacik LIST_HEAD(dirty); 234977745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 235077745c05SJosef Bacik int num_started = 0; 235177745c05SJosef Bacik int loops = 0; 235277745c05SJosef Bacik 235377745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 235477745c05SJosef Bacik if (list_empty(&cur_trans->dirty_bgs)) { 235577745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 235677745c05SJosef Bacik return 0; 235777745c05SJosef Bacik } 235877745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 235977745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 236077745c05SJosef Bacik 236177745c05SJosef Bacik again: 236277745c05SJosef Bacik /* Make sure all the block groups on our dirty list actually exist */ 236377745c05SJosef Bacik btrfs_create_pending_block_groups(trans); 236477745c05SJosef Bacik 236577745c05SJosef Bacik if (!path) { 236677745c05SJosef Bacik path = btrfs_alloc_path(); 236777745c05SJosef Bacik if (!path) 236877745c05SJosef Bacik return -ENOMEM; 236977745c05SJosef Bacik } 237077745c05SJosef Bacik 237177745c05SJosef Bacik /* 237277745c05SJosef Bacik * cache_write_mutex is here only to save us from balance or automatic 237377745c05SJosef Bacik * removal of empty block groups deleting this block group while we are 237477745c05SJosef Bacik * writing out the cache 237577745c05SJosef Bacik */ 237677745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 237777745c05SJosef Bacik while (!list_empty(&dirty)) { 237877745c05SJosef Bacik bool drop_reserve = true; 237977745c05SJosef Bacik 238077745c05SJosef Bacik cache = list_first_entry(&dirty, 238177745c05SJosef Bacik struct btrfs_block_group_cache, 238277745c05SJosef Bacik dirty_list); 238377745c05SJosef Bacik /* 238477745c05SJosef Bacik * This can happen if something re-dirties a block group that 238577745c05SJosef Bacik * is already under IO. Just wait for it to finish and then do 238677745c05SJosef Bacik * it all again 238777745c05SJosef Bacik */ 238877745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 238977745c05SJosef Bacik list_del_init(&cache->io_list); 239077745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 239177745c05SJosef Bacik btrfs_put_block_group(cache); 239277745c05SJosef Bacik } 239377745c05SJosef Bacik 239477745c05SJosef Bacik 239577745c05SJosef Bacik /* 239677745c05SJosef Bacik * btrfs_wait_cache_io uses the cache->dirty_list to decide if 239777745c05SJosef Bacik * it should update the cache_state. Don't delete until after 239877745c05SJosef Bacik * we wait. 239977745c05SJosef Bacik * 240077745c05SJosef Bacik * Since we're not running in the commit critical section 240177745c05SJosef Bacik * we need the dirty_bgs_lock to protect from update_block_group 240277745c05SJosef Bacik */ 240377745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 240477745c05SJosef Bacik list_del_init(&cache->dirty_list); 240577745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 240677745c05SJosef Bacik 240777745c05SJosef Bacik should_put = 1; 240877745c05SJosef Bacik 240977745c05SJosef Bacik cache_save_setup(cache, trans, path); 241077745c05SJosef Bacik 241177745c05SJosef Bacik if (cache->disk_cache_state == BTRFS_DC_SETUP) { 241277745c05SJosef Bacik cache->io_ctl.inode = NULL; 241377745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 241477745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 241577745c05SJosef Bacik num_started++; 241677745c05SJosef Bacik should_put = 0; 241777745c05SJosef Bacik 241877745c05SJosef Bacik /* 241977745c05SJosef Bacik * The cache_write_mutex is protecting the 242077745c05SJosef Bacik * io_list, also refer to the definition of 242177745c05SJosef Bacik * btrfs_transaction::io_bgs for more details 242277745c05SJosef Bacik */ 242377745c05SJosef Bacik list_add_tail(&cache->io_list, io); 242477745c05SJosef Bacik } else { 242577745c05SJosef Bacik /* 242677745c05SJosef Bacik * If we failed to write the cache, the 242777745c05SJosef Bacik * generation will be bad and life goes on 242877745c05SJosef Bacik */ 242977745c05SJosef Bacik ret = 0; 243077745c05SJosef Bacik } 243177745c05SJosef Bacik } 243277745c05SJosef Bacik if (!ret) { 243377745c05SJosef Bacik ret = write_one_cache_group(trans, path, cache); 243477745c05SJosef Bacik /* 243577745c05SJosef Bacik * Our block group might still be attached to the list 243677745c05SJosef Bacik * of new block groups in the transaction handle of some 243777745c05SJosef Bacik * other task (struct btrfs_trans_handle->new_bgs). This 243877745c05SJosef Bacik * means its block group item isn't yet in the extent 243977745c05SJosef Bacik * tree. If this happens ignore the error, as we will 244077745c05SJosef Bacik * try again later in the critical section of the 244177745c05SJosef Bacik * transaction commit. 244277745c05SJosef Bacik */ 244377745c05SJosef Bacik if (ret == -ENOENT) { 244477745c05SJosef Bacik ret = 0; 244577745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 244677745c05SJosef Bacik if (list_empty(&cache->dirty_list)) { 244777745c05SJosef Bacik list_add_tail(&cache->dirty_list, 244877745c05SJosef Bacik &cur_trans->dirty_bgs); 244977745c05SJosef Bacik btrfs_get_block_group(cache); 245077745c05SJosef Bacik drop_reserve = false; 245177745c05SJosef Bacik } 245277745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 245377745c05SJosef Bacik } else if (ret) { 245477745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 245577745c05SJosef Bacik } 245677745c05SJosef Bacik } 245777745c05SJosef Bacik 245877745c05SJosef Bacik /* If it's not on the io list, we need to put the block group */ 245977745c05SJosef Bacik if (should_put) 246077745c05SJosef Bacik btrfs_put_block_group(cache); 246177745c05SJosef Bacik if (drop_reserve) 246277745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 246377745c05SJosef Bacik 246477745c05SJosef Bacik if (ret) 246577745c05SJosef Bacik break; 246677745c05SJosef Bacik 246777745c05SJosef Bacik /* 246877745c05SJosef Bacik * Avoid blocking other tasks for too long. It might even save 246977745c05SJosef Bacik * us from writing caches for block groups that are going to be 247077745c05SJosef Bacik * removed. 247177745c05SJosef Bacik */ 247277745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 247377745c05SJosef Bacik mutex_lock(&trans->transaction->cache_write_mutex); 247477745c05SJosef Bacik } 247577745c05SJosef Bacik mutex_unlock(&trans->transaction->cache_write_mutex); 247677745c05SJosef Bacik 247777745c05SJosef Bacik /* 247877745c05SJosef Bacik * Go through delayed refs for all the stuff we've just kicked off 247977745c05SJosef Bacik * and then loop back (just once) 248077745c05SJosef Bacik */ 248177745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 0); 248277745c05SJosef Bacik if (!ret && loops == 0) { 248377745c05SJosef Bacik loops++; 248477745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 248577745c05SJosef Bacik list_splice_init(&cur_trans->dirty_bgs, &dirty); 248677745c05SJosef Bacik /* 248777745c05SJosef Bacik * dirty_bgs_lock protects us from concurrent block group 248877745c05SJosef Bacik * deletes too (not just cache_write_mutex). 248977745c05SJosef Bacik */ 249077745c05SJosef Bacik if (!list_empty(&dirty)) { 249177745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 249277745c05SJosef Bacik goto again; 249377745c05SJosef Bacik } 249477745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 249577745c05SJosef Bacik } else if (ret < 0) { 249677745c05SJosef Bacik btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 249777745c05SJosef Bacik } 249877745c05SJosef Bacik 249977745c05SJosef Bacik btrfs_free_path(path); 250077745c05SJosef Bacik return ret; 250177745c05SJosef Bacik } 250277745c05SJosef Bacik 250377745c05SJosef Bacik int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 250477745c05SJosef Bacik { 250577745c05SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 250677745c05SJosef Bacik struct btrfs_block_group_cache *cache; 250777745c05SJosef Bacik struct btrfs_transaction *cur_trans = trans->transaction; 250877745c05SJosef Bacik int ret = 0; 250977745c05SJosef Bacik int should_put; 251077745c05SJosef Bacik struct btrfs_path *path; 251177745c05SJosef Bacik struct list_head *io = &cur_trans->io_bgs; 251277745c05SJosef Bacik int num_started = 0; 251377745c05SJosef Bacik 251477745c05SJosef Bacik path = btrfs_alloc_path(); 251577745c05SJosef Bacik if (!path) 251677745c05SJosef Bacik return -ENOMEM; 251777745c05SJosef Bacik 251877745c05SJosef Bacik /* 251977745c05SJosef Bacik * Even though we are in the critical section of the transaction commit, 252077745c05SJosef Bacik * we can still have concurrent tasks adding elements to this 252177745c05SJosef Bacik * transaction's list of dirty block groups. These tasks correspond to 252277745c05SJosef Bacik * endio free space workers started when writeback finishes for a 252377745c05SJosef Bacik * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 252477745c05SJosef Bacik * allocate new block groups as a result of COWing nodes of the root 252577745c05SJosef Bacik * tree when updating the free space inode. The writeback for the space 252677745c05SJosef Bacik * caches is triggered by an earlier call to 252777745c05SJosef Bacik * btrfs_start_dirty_block_groups() and iterations of the following 252877745c05SJosef Bacik * loop. 252977745c05SJosef Bacik * Also we want to do the cache_save_setup first and then run the 253077745c05SJosef Bacik * delayed refs to make sure we have the best chance at doing this all 253177745c05SJosef Bacik * in one shot. 253277745c05SJosef Bacik */ 253377745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 253477745c05SJosef Bacik while (!list_empty(&cur_trans->dirty_bgs)) { 253577745c05SJosef Bacik cache = list_first_entry(&cur_trans->dirty_bgs, 253677745c05SJosef Bacik struct btrfs_block_group_cache, 253777745c05SJosef Bacik dirty_list); 253877745c05SJosef Bacik 253977745c05SJosef Bacik /* 254077745c05SJosef Bacik * This can happen if cache_save_setup re-dirties a block group 254177745c05SJosef Bacik * that is already under IO. Just wait for it to finish and 254277745c05SJosef Bacik * then do it all again 254377745c05SJosef Bacik */ 254477745c05SJosef Bacik if (!list_empty(&cache->io_list)) { 254577745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 254677745c05SJosef Bacik list_del_init(&cache->io_list); 254777745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 254877745c05SJosef Bacik btrfs_put_block_group(cache); 254977745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 255077745c05SJosef Bacik } 255177745c05SJosef Bacik 255277745c05SJosef Bacik /* 255377745c05SJosef Bacik * Don't remove from the dirty list until after we've waited on 255477745c05SJosef Bacik * any pending IO 255577745c05SJosef Bacik */ 255677745c05SJosef Bacik list_del_init(&cache->dirty_list); 255777745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 255877745c05SJosef Bacik should_put = 1; 255977745c05SJosef Bacik 256077745c05SJosef Bacik cache_save_setup(cache, trans, path); 256177745c05SJosef Bacik 256277745c05SJosef Bacik if (!ret) 256377745c05SJosef Bacik ret = btrfs_run_delayed_refs(trans, 256477745c05SJosef Bacik (unsigned long) -1); 256577745c05SJosef Bacik 256677745c05SJosef Bacik if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 256777745c05SJosef Bacik cache->io_ctl.inode = NULL; 256877745c05SJosef Bacik ret = btrfs_write_out_cache(trans, cache, path); 256977745c05SJosef Bacik if (ret == 0 && cache->io_ctl.inode) { 257077745c05SJosef Bacik num_started++; 257177745c05SJosef Bacik should_put = 0; 257277745c05SJosef Bacik list_add_tail(&cache->io_list, io); 257377745c05SJosef Bacik } else { 257477745c05SJosef Bacik /* 257577745c05SJosef Bacik * If we failed to write the cache, the 257677745c05SJosef Bacik * generation will be bad and life goes on 257777745c05SJosef Bacik */ 257877745c05SJosef Bacik ret = 0; 257977745c05SJosef Bacik } 258077745c05SJosef Bacik } 258177745c05SJosef Bacik if (!ret) { 258277745c05SJosef Bacik ret = write_one_cache_group(trans, path, cache); 258377745c05SJosef Bacik /* 258477745c05SJosef Bacik * One of the free space endio workers might have 258577745c05SJosef Bacik * created a new block group while updating a free space 258677745c05SJosef Bacik * cache's inode (at inode.c:btrfs_finish_ordered_io()) 258777745c05SJosef Bacik * and hasn't released its transaction handle yet, in 258877745c05SJosef Bacik * which case the new block group is still attached to 258977745c05SJosef Bacik * its transaction handle and its creation has not 259077745c05SJosef Bacik * finished yet (no block group item in the extent tree 259177745c05SJosef Bacik * yet, etc). If this is the case, wait for all free 259277745c05SJosef Bacik * space endio workers to finish and retry. This is a 259377745c05SJosef Bacik * a very rare case so no need for a more efficient and 259477745c05SJosef Bacik * complex approach. 259577745c05SJosef Bacik */ 259677745c05SJosef Bacik if (ret == -ENOENT) { 259777745c05SJosef Bacik wait_event(cur_trans->writer_wait, 259877745c05SJosef Bacik atomic_read(&cur_trans->num_writers) == 1); 259977745c05SJosef Bacik ret = write_one_cache_group(trans, path, cache); 260077745c05SJosef Bacik } 260177745c05SJosef Bacik if (ret) 260277745c05SJosef Bacik btrfs_abort_transaction(trans, ret); 260377745c05SJosef Bacik } 260477745c05SJosef Bacik 260577745c05SJosef Bacik /* If its not on the io list, we need to put the block group */ 260677745c05SJosef Bacik if (should_put) 260777745c05SJosef Bacik btrfs_put_block_group(cache); 260877745c05SJosef Bacik btrfs_delayed_refs_rsv_release(fs_info, 1); 260977745c05SJosef Bacik spin_lock(&cur_trans->dirty_bgs_lock); 261077745c05SJosef Bacik } 261177745c05SJosef Bacik spin_unlock(&cur_trans->dirty_bgs_lock); 261277745c05SJosef Bacik 261377745c05SJosef Bacik /* 261477745c05SJosef Bacik * Refer to the definition of io_bgs member for details why it's safe 261577745c05SJosef Bacik * to use it without any locking 261677745c05SJosef Bacik */ 261777745c05SJosef Bacik while (!list_empty(io)) { 261877745c05SJosef Bacik cache = list_first_entry(io, struct btrfs_block_group_cache, 261977745c05SJosef Bacik io_list); 262077745c05SJosef Bacik list_del_init(&cache->io_list); 262177745c05SJosef Bacik btrfs_wait_cache_io(trans, cache, path); 262277745c05SJosef Bacik btrfs_put_block_group(cache); 262377745c05SJosef Bacik } 262477745c05SJosef Bacik 262577745c05SJosef Bacik btrfs_free_path(path); 262677745c05SJosef Bacik return ret; 262777745c05SJosef Bacik } 2628606d1bf1SJosef Bacik 2629606d1bf1SJosef Bacik int btrfs_update_block_group(struct btrfs_trans_handle *trans, 2630606d1bf1SJosef Bacik u64 bytenr, u64 num_bytes, int alloc) 2631606d1bf1SJosef Bacik { 2632606d1bf1SJosef Bacik struct btrfs_fs_info *info = trans->fs_info; 2633606d1bf1SJosef Bacik struct btrfs_block_group_cache *cache = NULL; 2634606d1bf1SJosef Bacik u64 total = num_bytes; 2635606d1bf1SJosef Bacik u64 old_val; 2636606d1bf1SJosef Bacik u64 byte_in_group; 2637606d1bf1SJosef Bacik int factor; 2638606d1bf1SJosef Bacik int ret = 0; 2639606d1bf1SJosef Bacik 2640606d1bf1SJosef Bacik /* Block accounting for super block */ 2641606d1bf1SJosef Bacik spin_lock(&info->delalloc_root_lock); 2642606d1bf1SJosef Bacik old_val = btrfs_super_bytes_used(info->super_copy); 2643606d1bf1SJosef Bacik if (alloc) 2644606d1bf1SJosef Bacik old_val += num_bytes; 2645606d1bf1SJosef Bacik else 2646606d1bf1SJosef Bacik old_val -= num_bytes; 2647606d1bf1SJosef Bacik btrfs_set_super_bytes_used(info->super_copy, old_val); 2648606d1bf1SJosef Bacik spin_unlock(&info->delalloc_root_lock); 2649606d1bf1SJosef Bacik 2650606d1bf1SJosef Bacik while (total) { 2651606d1bf1SJosef Bacik cache = btrfs_lookup_block_group(info, bytenr); 2652606d1bf1SJosef Bacik if (!cache) { 2653606d1bf1SJosef Bacik ret = -ENOENT; 2654606d1bf1SJosef Bacik break; 2655606d1bf1SJosef Bacik } 2656606d1bf1SJosef Bacik factor = btrfs_bg_type_to_factor(cache->flags); 2657606d1bf1SJosef Bacik 2658606d1bf1SJosef Bacik /* 2659606d1bf1SJosef Bacik * If this block group has free space cache written out, we 2660606d1bf1SJosef Bacik * need to make sure to load it if we are removing space. This 2661606d1bf1SJosef Bacik * is because we need the unpinning stage to actually add the 2662606d1bf1SJosef Bacik * space back to the block group, otherwise we will leak space. 2663606d1bf1SJosef Bacik */ 2664606d1bf1SJosef Bacik if (!alloc && cache->cached == BTRFS_CACHE_NO) 2665606d1bf1SJosef Bacik btrfs_cache_block_group(cache, 1); 2666606d1bf1SJosef Bacik 2667606d1bf1SJosef Bacik byte_in_group = bytenr - cache->key.objectid; 2668606d1bf1SJosef Bacik WARN_ON(byte_in_group > cache->key.offset); 2669606d1bf1SJosef Bacik 2670606d1bf1SJosef Bacik spin_lock(&cache->space_info->lock); 2671606d1bf1SJosef Bacik spin_lock(&cache->lock); 2672606d1bf1SJosef Bacik 2673606d1bf1SJosef Bacik if (btrfs_test_opt(info, SPACE_CACHE) && 2674606d1bf1SJosef Bacik cache->disk_cache_state < BTRFS_DC_CLEAR) 2675606d1bf1SJosef Bacik cache->disk_cache_state = BTRFS_DC_CLEAR; 2676606d1bf1SJosef Bacik 2677606d1bf1SJosef Bacik old_val = btrfs_block_group_used(&cache->item); 2678606d1bf1SJosef Bacik num_bytes = min(total, cache->key.offset - byte_in_group); 2679606d1bf1SJosef Bacik if (alloc) { 2680606d1bf1SJosef Bacik old_val += num_bytes; 2681606d1bf1SJosef Bacik btrfs_set_block_group_used(&cache->item, old_val); 2682606d1bf1SJosef Bacik cache->reserved -= num_bytes; 2683606d1bf1SJosef Bacik cache->space_info->bytes_reserved -= num_bytes; 2684606d1bf1SJosef Bacik cache->space_info->bytes_used += num_bytes; 2685606d1bf1SJosef Bacik cache->space_info->disk_used += num_bytes * factor; 2686606d1bf1SJosef Bacik spin_unlock(&cache->lock); 2687606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 2688606d1bf1SJosef Bacik } else { 2689606d1bf1SJosef Bacik old_val -= num_bytes; 2690606d1bf1SJosef Bacik btrfs_set_block_group_used(&cache->item, old_val); 2691606d1bf1SJosef Bacik cache->pinned += num_bytes; 2692606d1bf1SJosef Bacik btrfs_space_info_update_bytes_pinned(info, 2693606d1bf1SJosef Bacik cache->space_info, num_bytes); 2694606d1bf1SJosef Bacik cache->space_info->bytes_used -= num_bytes; 2695606d1bf1SJosef Bacik cache->space_info->disk_used -= num_bytes * factor; 2696606d1bf1SJosef Bacik spin_unlock(&cache->lock); 2697606d1bf1SJosef Bacik spin_unlock(&cache->space_info->lock); 2698606d1bf1SJosef Bacik 2699606d1bf1SJosef Bacik trace_btrfs_space_reservation(info, "pinned", 2700606d1bf1SJosef Bacik cache->space_info->flags, 2701606d1bf1SJosef Bacik num_bytes, 1); 2702606d1bf1SJosef Bacik percpu_counter_add_batch( 2703606d1bf1SJosef Bacik &cache->space_info->total_bytes_pinned, 2704606d1bf1SJosef Bacik num_bytes, 2705606d1bf1SJosef Bacik BTRFS_TOTAL_BYTES_PINNED_BATCH); 2706606d1bf1SJosef Bacik set_extent_dirty(info->pinned_extents, 2707606d1bf1SJosef Bacik bytenr, bytenr + num_bytes - 1, 2708606d1bf1SJosef Bacik GFP_NOFS | __GFP_NOFAIL); 2709606d1bf1SJosef Bacik } 2710606d1bf1SJosef Bacik 2711606d1bf1SJosef Bacik spin_lock(&trans->transaction->dirty_bgs_lock); 2712606d1bf1SJosef Bacik if (list_empty(&cache->dirty_list)) { 2713606d1bf1SJosef Bacik list_add_tail(&cache->dirty_list, 2714606d1bf1SJosef Bacik &trans->transaction->dirty_bgs); 2715606d1bf1SJosef Bacik trans->delayed_ref_updates++; 2716606d1bf1SJosef Bacik btrfs_get_block_group(cache); 2717606d1bf1SJosef Bacik } 2718606d1bf1SJosef Bacik spin_unlock(&trans->transaction->dirty_bgs_lock); 2719606d1bf1SJosef Bacik 2720606d1bf1SJosef Bacik /* 2721606d1bf1SJosef Bacik * No longer have used bytes in this block group, queue it for 2722606d1bf1SJosef Bacik * deletion. We do this after adding the block group to the 2723606d1bf1SJosef Bacik * dirty list to avoid races between cleaner kthread and space 2724606d1bf1SJosef Bacik * cache writeout. 2725606d1bf1SJosef Bacik */ 2726606d1bf1SJosef Bacik if (!alloc && old_val == 0) 2727606d1bf1SJosef Bacik btrfs_mark_bg_unused(cache); 2728606d1bf1SJosef Bacik 2729606d1bf1SJosef Bacik btrfs_put_block_group(cache); 2730606d1bf1SJosef Bacik total -= num_bytes; 2731606d1bf1SJosef Bacik bytenr += num_bytes; 2732606d1bf1SJosef Bacik } 2733606d1bf1SJosef Bacik 2734606d1bf1SJosef Bacik /* Modified block groups are accounted for in the delayed_refs_rsv. */ 2735606d1bf1SJosef Bacik btrfs_update_delayed_refs_rsv(trans); 2736606d1bf1SJosef Bacik return ret; 2737606d1bf1SJosef Bacik } 2738606d1bf1SJosef Bacik 2739606d1bf1SJosef Bacik /** 2740606d1bf1SJosef Bacik * btrfs_add_reserved_bytes - update the block_group and space info counters 2741606d1bf1SJosef Bacik * @cache: The cache we are manipulating 2742606d1bf1SJosef Bacik * @ram_bytes: The number of bytes of file content, and will be same to 2743606d1bf1SJosef Bacik * @num_bytes except for the compress path. 2744606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 2745606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 2746606d1bf1SJosef Bacik * 2747606d1bf1SJosef Bacik * This is called by the allocator when it reserves space. If this is a 2748606d1bf1SJosef Bacik * reservation and the block group has become read only we cannot make the 2749606d1bf1SJosef Bacik * reservation and return -EAGAIN, otherwise this function always succeeds. 2750606d1bf1SJosef Bacik */ 2751606d1bf1SJosef Bacik int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, 2752606d1bf1SJosef Bacik u64 ram_bytes, u64 num_bytes, int delalloc) 2753606d1bf1SJosef Bacik { 2754606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 2755606d1bf1SJosef Bacik int ret = 0; 2756606d1bf1SJosef Bacik 2757606d1bf1SJosef Bacik spin_lock(&space_info->lock); 2758606d1bf1SJosef Bacik spin_lock(&cache->lock); 2759606d1bf1SJosef Bacik if (cache->ro) { 2760606d1bf1SJosef Bacik ret = -EAGAIN; 2761606d1bf1SJosef Bacik } else { 2762606d1bf1SJosef Bacik cache->reserved += num_bytes; 2763606d1bf1SJosef Bacik space_info->bytes_reserved += num_bytes; 2764606d1bf1SJosef Bacik btrfs_space_info_update_bytes_may_use(cache->fs_info, 2765606d1bf1SJosef Bacik space_info, -ram_bytes); 2766606d1bf1SJosef Bacik if (delalloc) 2767606d1bf1SJosef Bacik cache->delalloc_bytes += num_bytes; 2768606d1bf1SJosef Bacik } 2769606d1bf1SJosef Bacik spin_unlock(&cache->lock); 2770606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 2771606d1bf1SJosef Bacik return ret; 2772606d1bf1SJosef Bacik } 2773606d1bf1SJosef Bacik 2774606d1bf1SJosef Bacik /** 2775606d1bf1SJosef Bacik * btrfs_free_reserved_bytes - update the block_group and space info counters 2776606d1bf1SJosef Bacik * @cache: The cache we are manipulating 2777606d1bf1SJosef Bacik * @num_bytes: The number of bytes in question 2778606d1bf1SJosef Bacik * @delalloc: The blocks are allocated for the delalloc write 2779606d1bf1SJosef Bacik * 2780606d1bf1SJosef Bacik * This is called by somebody who is freeing space that was never actually used 2781606d1bf1SJosef Bacik * on disk. For example if you reserve some space for a new leaf in transaction 2782606d1bf1SJosef Bacik * A and before transaction A commits you free that leaf, you call this with 2783606d1bf1SJosef Bacik * reserve set to 0 in order to clear the reservation. 2784606d1bf1SJosef Bacik */ 2785606d1bf1SJosef Bacik void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, 2786606d1bf1SJosef Bacik u64 num_bytes, int delalloc) 2787606d1bf1SJosef Bacik { 2788606d1bf1SJosef Bacik struct btrfs_space_info *space_info = cache->space_info; 2789606d1bf1SJosef Bacik 2790606d1bf1SJosef Bacik spin_lock(&space_info->lock); 2791606d1bf1SJosef Bacik spin_lock(&cache->lock); 2792606d1bf1SJosef Bacik if (cache->ro) 2793606d1bf1SJosef Bacik space_info->bytes_readonly += num_bytes; 2794606d1bf1SJosef Bacik cache->reserved -= num_bytes; 2795606d1bf1SJosef Bacik space_info->bytes_reserved -= num_bytes; 2796606d1bf1SJosef Bacik space_info->max_extent_size = 0; 2797606d1bf1SJosef Bacik 2798606d1bf1SJosef Bacik if (delalloc) 2799606d1bf1SJosef Bacik cache->delalloc_bytes -= num_bytes; 2800606d1bf1SJosef Bacik spin_unlock(&cache->lock); 2801606d1bf1SJosef Bacik spin_unlock(&space_info->lock); 2802606d1bf1SJosef Bacik } 280307730d87SJosef Bacik 280407730d87SJosef Bacik static void force_metadata_allocation(struct btrfs_fs_info *info) 280507730d87SJosef Bacik { 280607730d87SJosef Bacik struct list_head *head = &info->space_info; 280707730d87SJosef Bacik struct btrfs_space_info *found; 280807730d87SJosef Bacik 280907730d87SJosef Bacik rcu_read_lock(); 281007730d87SJosef Bacik list_for_each_entry_rcu(found, head, list) { 281107730d87SJosef Bacik if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 281207730d87SJosef Bacik found->force_alloc = CHUNK_ALLOC_FORCE; 281307730d87SJosef Bacik } 281407730d87SJosef Bacik rcu_read_unlock(); 281507730d87SJosef Bacik } 281607730d87SJosef Bacik 281707730d87SJosef Bacik static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 281807730d87SJosef Bacik struct btrfs_space_info *sinfo, int force) 281907730d87SJosef Bacik { 282007730d87SJosef Bacik u64 bytes_used = btrfs_space_info_used(sinfo, false); 282107730d87SJosef Bacik u64 thresh; 282207730d87SJosef Bacik 282307730d87SJosef Bacik if (force == CHUNK_ALLOC_FORCE) 282407730d87SJosef Bacik return 1; 282507730d87SJosef Bacik 282607730d87SJosef Bacik /* 282707730d87SJosef Bacik * in limited mode, we want to have some free space up to 282807730d87SJosef Bacik * about 1% of the FS size. 282907730d87SJosef Bacik */ 283007730d87SJosef Bacik if (force == CHUNK_ALLOC_LIMITED) { 283107730d87SJosef Bacik thresh = btrfs_super_total_bytes(fs_info->super_copy); 283207730d87SJosef Bacik thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); 283307730d87SJosef Bacik 283407730d87SJosef Bacik if (sinfo->total_bytes - bytes_used < thresh) 283507730d87SJosef Bacik return 1; 283607730d87SJosef Bacik } 283707730d87SJosef Bacik 283807730d87SJosef Bacik if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) 283907730d87SJosef Bacik return 0; 284007730d87SJosef Bacik return 1; 284107730d87SJosef Bacik } 284207730d87SJosef Bacik 284307730d87SJosef Bacik int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 284407730d87SJosef Bacik { 284507730d87SJosef Bacik u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 284607730d87SJosef Bacik 284707730d87SJosef Bacik return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 284807730d87SJosef Bacik } 284907730d87SJosef Bacik 285007730d87SJosef Bacik /* 285107730d87SJosef Bacik * If force is CHUNK_ALLOC_FORCE: 285207730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 285307730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 285407730d87SJosef Bacik * If force is NOT CHUNK_ALLOC_FORCE: 285507730d87SJosef Bacik * - return 0 if it doesn't need to allocate a new chunk, 285607730d87SJosef Bacik * - return 1 if it successfully allocates a chunk, 285707730d87SJosef Bacik * - return errors including -ENOSPC otherwise. 285807730d87SJosef Bacik */ 285907730d87SJosef Bacik int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 286007730d87SJosef Bacik enum btrfs_chunk_alloc_enum force) 286107730d87SJosef Bacik { 286207730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 286307730d87SJosef Bacik struct btrfs_space_info *space_info; 286407730d87SJosef Bacik bool wait_for_alloc = false; 286507730d87SJosef Bacik bool should_alloc = false; 286607730d87SJosef Bacik int ret = 0; 286707730d87SJosef Bacik 286807730d87SJosef Bacik /* Don't re-enter if we're already allocating a chunk */ 286907730d87SJosef Bacik if (trans->allocating_chunk) 287007730d87SJosef Bacik return -ENOSPC; 287107730d87SJosef Bacik 287207730d87SJosef Bacik space_info = btrfs_find_space_info(fs_info, flags); 287307730d87SJosef Bacik ASSERT(space_info); 287407730d87SJosef Bacik 287507730d87SJosef Bacik do { 287607730d87SJosef Bacik spin_lock(&space_info->lock); 287707730d87SJosef Bacik if (force < space_info->force_alloc) 287807730d87SJosef Bacik force = space_info->force_alloc; 287907730d87SJosef Bacik should_alloc = should_alloc_chunk(fs_info, space_info, force); 288007730d87SJosef Bacik if (space_info->full) { 288107730d87SJosef Bacik /* No more free physical space */ 288207730d87SJosef Bacik if (should_alloc) 288307730d87SJosef Bacik ret = -ENOSPC; 288407730d87SJosef Bacik else 288507730d87SJosef Bacik ret = 0; 288607730d87SJosef Bacik spin_unlock(&space_info->lock); 288707730d87SJosef Bacik return ret; 288807730d87SJosef Bacik } else if (!should_alloc) { 288907730d87SJosef Bacik spin_unlock(&space_info->lock); 289007730d87SJosef Bacik return 0; 289107730d87SJosef Bacik } else if (space_info->chunk_alloc) { 289207730d87SJosef Bacik /* 289307730d87SJosef Bacik * Someone is already allocating, so we need to block 289407730d87SJosef Bacik * until this someone is finished and then loop to 289507730d87SJosef Bacik * recheck if we should continue with our allocation 289607730d87SJosef Bacik * attempt. 289707730d87SJosef Bacik */ 289807730d87SJosef Bacik wait_for_alloc = true; 289907730d87SJosef Bacik spin_unlock(&space_info->lock); 290007730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 290107730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 290207730d87SJosef Bacik } else { 290307730d87SJosef Bacik /* Proceed with allocation */ 290407730d87SJosef Bacik space_info->chunk_alloc = 1; 290507730d87SJosef Bacik wait_for_alloc = false; 290607730d87SJosef Bacik spin_unlock(&space_info->lock); 290707730d87SJosef Bacik } 290807730d87SJosef Bacik 290907730d87SJosef Bacik cond_resched(); 291007730d87SJosef Bacik } while (wait_for_alloc); 291107730d87SJosef Bacik 291207730d87SJosef Bacik mutex_lock(&fs_info->chunk_mutex); 291307730d87SJosef Bacik trans->allocating_chunk = true; 291407730d87SJosef Bacik 291507730d87SJosef Bacik /* 291607730d87SJosef Bacik * If we have mixed data/metadata chunks we want to make sure we keep 291707730d87SJosef Bacik * allocating mixed chunks instead of individual chunks. 291807730d87SJosef Bacik */ 291907730d87SJosef Bacik if (btrfs_mixed_space_info(space_info)) 292007730d87SJosef Bacik flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 292107730d87SJosef Bacik 292207730d87SJosef Bacik /* 292307730d87SJosef Bacik * if we're doing a data chunk, go ahead and make sure that 292407730d87SJosef Bacik * we keep a reasonable number of metadata chunks allocated in the 292507730d87SJosef Bacik * FS as well. 292607730d87SJosef Bacik */ 292707730d87SJosef Bacik if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 292807730d87SJosef Bacik fs_info->data_chunk_allocations++; 292907730d87SJosef Bacik if (!(fs_info->data_chunk_allocations % 293007730d87SJosef Bacik fs_info->metadata_ratio)) 293107730d87SJosef Bacik force_metadata_allocation(fs_info); 293207730d87SJosef Bacik } 293307730d87SJosef Bacik 293407730d87SJosef Bacik /* 293507730d87SJosef Bacik * Check if we have enough space in SYSTEM chunk because we may need 293607730d87SJosef Bacik * to update devices. 293707730d87SJosef Bacik */ 293807730d87SJosef Bacik check_system_chunk(trans, flags); 293907730d87SJosef Bacik 294007730d87SJosef Bacik ret = btrfs_alloc_chunk(trans, flags); 294107730d87SJosef Bacik trans->allocating_chunk = false; 294207730d87SJosef Bacik 294307730d87SJosef Bacik spin_lock(&space_info->lock); 294407730d87SJosef Bacik if (ret < 0) { 294507730d87SJosef Bacik if (ret == -ENOSPC) 294607730d87SJosef Bacik space_info->full = 1; 294707730d87SJosef Bacik else 294807730d87SJosef Bacik goto out; 294907730d87SJosef Bacik } else { 295007730d87SJosef Bacik ret = 1; 295107730d87SJosef Bacik space_info->max_extent_size = 0; 295207730d87SJosef Bacik } 295307730d87SJosef Bacik 295407730d87SJosef Bacik space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 295507730d87SJosef Bacik out: 295607730d87SJosef Bacik space_info->chunk_alloc = 0; 295707730d87SJosef Bacik spin_unlock(&space_info->lock); 295807730d87SJosef Bacik mutex_unlock(&fs_info->chunk_mutex); 295907730d87SJosef Bacik /* 296007730d87SJosef Bacik * When we allocate a new chunk we reserve space in the chunk block 296107730d87SJosef Bacik * reserve to make sure we can COW nodes/leafs in the chunk tree or 296207730d87SJosef Bacik * add new nodes/leafs to it if we end up needing to do it when 296307730d87SJosef Bacik * inserting the chunk item and updating device items as part of the 296407730d87SJosef Bacik * second phase of chunk allocation, performed by 296507730d87SJosef Bacik * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a 296607730d87SJosef Bacik * large number of new block groups to create in our transaction 296707730d87SJosef Bacik * handle's new_bgs list to avoid exhausting the chunk block reserve 296807730d87SJosef Bacik * in extreme cases - like having a single transaction create many new 296907730d87SJosef Bacik * block groups when starting to write out the free space caches of all 297007730d87SJosef Bacik * the block groups that were made dirty during the lifetime of the 297107730d87SJosef Bacik * transaction. 297207730d87SJosef Bacik */ 297307730d87SJosef Bacik if (trans->chunk_bytes_reserved >= (u64)SZ_2M) 297407730d87SJosef Bacik btrfs_create_pending_block_groups(trans); 297507730d87SJosef Bacik 297607730d87SJosef Bacik return ret; 297707730d87SJosef Bacik } 297807730d87SJosef Bacik 297907730d87SJosef Bacik static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 298007730d87SJosef Bacik { 298107730d87SJosef Bacik u64 num_dev; 298207730d87SJosef Bacik 298307730d87SJosef Bacik num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 298407730d87SJosef Bacik if (!num_dev) 298507730d87SJosef Bacik num_dev = fs_info->fs_devices->rw_devices; 298607730d87SJosef Bacik 298707730d87SJosef Bacik return num_dev; 298807730d87SJosef Bacik } 298907730d87SJosef Bacik 299007730d87SJosef Bacik /* 299107730d87SJosef Bacik * If @is_allocation is true, reserve space in the system space info necessary 299207730d87SJosef Bacik * for allocating a chunk, otherwise if it's false, reserve space necessary for 299307730d87SJosef Bacik * removing a chunk. 299407730d87SJosef Bacik */ 299507730d87SJosef Bacik void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 299607730d87SJosef Bacik { 299707730d87SJosef Bacik struct btrfs_fs_info *fs_info = trans->fs_info; 299807730d87SJosef Bacik struct btrfs_space_info *info; 299907730d87SJosef Bacik u64 left; 300007730d87SJosef Bacik u64 thresh; 300107730d87SJosef Bacik int ret = 0; 300207730d87SJosef Bacik u64 num_devs; 300307730d87SJosef Bacik 300407730d87SJosef Bacik /* 300507730d87SJosef Bacik * Needed because we can end up allocating a system chunk and for an 300607730d87SJosef Bacik * atomic and race free space reservation in the chunk block reserve. 300707730d87SJosef Bacik */ 300807730d87SJosef Bacik lockdep_assert_held(&fs_info->chunk_mutex); 300907730d87SJosef Bacik 301007730d87SJosef Bacik info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 301107730d87SJosef Bacik spin_lock(&info->lock); 301207730d87SJosef Bacik left = info->total_bytes - btrfs_space_info_used(info, true); 301307730d87SJosef Bacik spin_unlock(&info->lock); 301407730d87SJosef Bacik 301507730d87SJosef Bacik num_devs = get_profile_num_devs(fs_info, type); 301607730d87SJosef Bacik 301707730d87SJosef Bacik /* num_devs device items to update and 1 chunk item to add or remove */ 301807730d87SJosef Bacik thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) + 301907730d87SJosef Bacik btrfs_calc_trans_metadata_size(fs_info, 1); 302007730d87SJosef Bacik 302107730d87SJosef Bacik if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 302207730d87SJosef Bacik btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 302307730d87SJosef Bacik left, thresh, type); 302407730d87SJosef Bacik btrfs_dump_space_info(fs_info, info, 0, 0); 302507730d87SJosef Bacik } 302607730d87SJosef Bacik 302707730d87SJosef Bacik if (left < thresh) { 302807730d87SJosef Bacik u64 flags = btrfs_system_alloc_profile(fs_info); 302907730d87SJosef Bacik 303007730d87SJosef Bacik /* 303107730d87SJosef Bacik * Ignore failure to create system chunk. We might end up not 303207730d87SJosef Bacik * needing it, as we might not need to COW all nodes/leafs from 303307730d87SJosef Bacik * the paths we visit in the chunk tree (they were already COWed 303407730d87SJosef Bacik * or created in the current transaction for example). 303507730d87SJosef Bacik */ 303607730d87SJosef Bacik ret = btrfs_alloc_chunk(trans, flags); 303707730d87SJosef Bacik } 303807730d87SJosef Bacik 303907730d87SJosef Bacik if (!ret) { 304007730d87SJosef Bacik ret = btrfs_block_rsv_add(fs_info->chunk_root, 304107730d87SJosef Bacik &fs_info->chunk_block_rsv, 304207730d87SJosef Bacik thresh, BTRFS_RESERVE_NO_FLUSH); 304307730d87SJosef Bacik if (!ret) 304407730d87SJosef Bacik trans->chunk_bytes_reserved += thresh; 304507730d87SJosef Bacik } 304607730d87SJosef Bacik } 304707730d87SJosef Bacik 3048*3e43c279SJosef Bacik void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 3049*3e43c279SJosef Bacik { 3050*3e43c279SJosef Bacik struct btrfs_block_group_cache *block_group; 3051*3e43c279SJosef Bacik u64 last = 0; 3052*3e43c279SJosef Bacik 3053*3e43c279SJosef Bacik while (1) { 3054*3e43c279SJosef Bacik struct inode *inode; 3055*3e43c279SJosef Bacik 3056*3e43c279SJosef Bacik block_group = btrfs_lookup_first_block_group(info, last); 3057*3e43c279SJosef Bacik while (block_group) { 3058*3e43c279SJosef Bacik btrfs_wait_block_group_cache_done(block_group); 3059*3e43c279SJosef Bacik spin_lock(&block_group->lock); 3060*3e43c279SJosef Bacik if (block_group->iref) 3061*3e43c279SJosef Bacik break; 3062*3e43c279SJosef Bacik spin_unlock(&block_group->lock); 3063*3e43c279SJosef Bacik block_group = btrfs_next_block_group(block_group); 3064*3e43c279SJosef Bacik } 3065*3e43c279SJosef Bacik if (!block_group) { 3066*3e43c279SJosef Bacik if (last == 0) 3067*3e43c279SJosef Bacik break; 3068*3e43c279SJosef Bacik last = 0; 3069*3e43c279SJosef Bacik continue; 3070*3e43c279SJosef Bacik } 3071*3e43c279SJosef Bacik 3072*3e43c279SJosef Bacik inode = block_group->inode; 3073*3e43c279SJosef Bacik block_group->iref = 0; 3074*3e43c279SJosef Bacik block_group->inode = NULL; 3075*3e43c279SJosef Bacik spin_unlock(&block_group->lock); 3076*3e43c279SJosef Bacik ASSERT(block_group->io_ctl.inode == NULL); 3077*3e43c279SJosef Bacik iput(inode); 3078*3e43c279SJosef Bacik last = block_group->key.objectid + block_group->key.offset; 3079*3e43c279SJosef Bacik btrfs_put_block_group(block_group); 3080*3e43c279SJosef Bacik } 3081*3e43c279SJosef Bacik } 3082*3e43c279SJosef Bacik 3083*3e43c279SJosef Bacik /* 3084*3e43c279SJosef Bacik * Must be called only after stopping all workers, since we could have block 3085*3e43c279SJosef Bacik * group caching kthreads running, and therefore they could race with us if we 3086*3e43c279SJosef Bacik * freed the block groups before stopping them. 3087*3e43c279SJosef Bacik */ 3088*3e43c279SJosef Bacik int btrfs_free_block_groups(struct btrfs_fs_info *info) 3089*3e43c279SJosef Bacik { 3090*3e43c279SJosef Bacik struct btrfs_block_group_cache *block_group; 3091*3e43c279SJosef Bacik struct btrfs_space_info *space_info; 3092*3e43c279SJosef Bacik struct btrfs_caching_control *caching_ctl; 3093*3e43c279SJosef Bacik struct rb_node *n; 3094*3e43c279SJosef Bacik 3095*3e43c279SJosef Bacik down_write(&info->commit_root_sem); 3096*3e43c279SJosef Bacik while (!list_empty(&info->caching_block_groups)) { 3097*3e43c279SJosef Bacik caching_ctl = list_entry(info->caching_block_groups.next, 3098*3e43c279SJosef Bacik struct btrfs_caching_control, list); 3099*3e43c279SJosef Bacik list_del(&caching_ctl->list); 3100*3e43c279SJosef Bacik btrfs_put_caching_control(caching_ctl); 3101*3e43c279SJosef Bacik } 3102*3e43c279SJosef Bacik up_write(&info->commit_root_sem); 3103*3e43c279SJosef Bacik 3104*3e43c279SJosef Bacik spin_lock(&info->unused_bgs_lock); 3105*3e43c279SJosef Bacik while (!list_empty(&info->unused_bgs)) { 3106*3e43c279SJosef Bacik block_group = list_first_entry(&info->unused_bgs, 3107*3e43c279SJosef Bacik struct btrfs_block_group_cache, 3108*3e43c279SJosef Bacik bg_list); 3109*3e43c279SJosef Bacik list_del_init(&block_group->bg_list); 3110*3e43c279SJosef Bacik btrfs_put_block_group(block_group); 3111*3e43c279SJosef Bacik } 3112*3e43c279SJosef Bacik spin_unlock(&info->unused_bgs_lock); 3113*3e43c279SJosef Bacik 3114*3e43c279SJosef Bacik spin_lock(&info->block_group_cache_lock); 3115*3e43c279SJosef Bacik while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 3116*3e43c279SJosef Bacik block_group = rb_entry(n, struct btrfs_block_group_cache, 3117*3e43c279SJosef Bacik cache_node); 3118*3e43c279SJosef Bacik rb_erase(&block_group->cache_node, 3119*3e43c279SJosef Bacik &info->block_group_cache_tree); 3120*3e43c279SJosef Bacik RB_CLEAR_NODE(&block_group->cache_node); 3121*3e43c279SJosef Bacik spin_unlock(&info->block_group_cache_lock); 3122*3e43c279SJosef Bacik 3123*3e43c279SJosef Bacik down_write(&block_group->space_info->groups_sem); 3124*3e43c279SJosef Bacik list_del(&block_group->list); 3125*3e43c279SJosef Bacik up_write(&block_group->space_info->groups_sem); 3126*3e43c279SJosef Bacik 3127*3e43c279SJosef Bacik /* 3128*3e43c279SJosef Bacik * We haven't cached this block group, which means we could 3129*3e43c279SJosef Bacik * possibly have excluded extents on this block group. 3130*3e43c279SJosef Bacik */ 3131*3e43c279SJosef Bacik if (block_group->cached == BTRFS_CACHE_NO || 3132*3e43c279SJosef Bacik block_group->cached == BTRFS_CACHE_ERROR) 3133*3e43c279SJosef Bacik btrfs_free_excluded_extents(block_group); 3134*3e43c279SJosef Bacik 3135*3e43c279SJosef Bacik btrfs_remove_free_space_cache(block_group); 3136*3e43c279SJosef Bacik ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 3137*3e43c279SJosef Bacik ASSERT(list_empty(&block_group->dirty_list)); 3138*3e43c279SJosef Bacik ASSERT(list_empty(&block_group->io_list)); 3139*3e43c279SJosef Bacik ASSERT(list_empty(&block_group->bg_list)); 3140*3e43c279SJosef Bacik ASSERT(atomic_read(&block_group->count) == 1); 3141*3e43c279SJosef Bacik btrfs_put_block_group(block_group); 3142*3e43c279SJosef Bacik 3143*3e43c279SJosef Bacik spin_lock(&info->block_group_cache_lock); 3144*3e43c279SJosef Bacik } 3145*3e43c279SJosef Bacik spin_unlock(&info->block_group_cache_lock); 3146*3e43c279SJosef Bacik 3147*3e43c279SJosef Bacik /* 3148*3e43c279SJosef Bacik * Now that all the block groups are freed, go through and free all the 3149*3e43c279SJosef Bacik * space_info structs. This is only called during the final stages of 3150*3e43c279SJosef Bacik * unmount, and so we know nobody is using them. We call 3151*3e43c279SJosef Bacik * synchronize_rcu() once before we start, just to be on the safe side. 3152*3e43c279SJosef Bacik */ 3153*3e43c279SJosef Bacik synchronize_rcu(); 3154*3e43c279SJosef Bacik 3155*3e43c279SJosef Bacik btrfs_release_global_block_rsv(info); 3156*3e43c279SJosef Bacik 3157*3e43c279SJosef Bacik while (!list_empty(&info->space_info)) { 3158*3e43c279SJosef Bacik space_info = list_entry(info->space_info.next, 3159*3e43c279SJosef Bacik struct btrfs_space_info, 3160*3e43c279SJosef Bacik list); 3161*3e43c279SJosef Bacik 3162*3e43c279SJosef Bacik /* 3163*3e43c279SJosef Bacik * Do not hide this behind enospc_debug, this is actually 3164*3e43c279SJosef Bacik * important and indicates a real bug if this happens. 3165*3e43c279SJosef Bacik */ 3166*3e43c279SJosef Bacik if (WARN_ON(space_info->bytes_pinned > 0 || 3167*3e43c279SJosef Bacik space_info->bytes_reserved > 0 || 3168*3e43c279SJosef Bacik space_info->bytes_may_use > 0)) 3169*3e43c279SJosef Bacik btrfs_dump_space_info(info, space_info, 0, 0); 3170*3e43c279SJosef Bacik list_del(&space_info->list); 3171*3e43c279SJosef Bacik btrfs_sysfs_remove_space_info(space_info); 3172*3e43c279SJosef Bacik } 3173*3e43c279SJosef Bacik return 0; 3174*3e43c279SJosef Bacik } 3175