1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/sizes.h> 4 #include <linux/list_sort.h> 5 #include "misc.h" 6 #include "ctree.h" 7 #include "block-group.h" 8 #include "space-info.h" 9 #include "disk-io.h" 10 #include "free-space-cache.h" 11 #include "free-space-tree.h" 12 #include "volumes.h" 13 #include "transaction.h" 14 #include "ref-verify.h" 15 #include "sysfs.h" 16 #include "tree-log.h" 17 #include "delalloc-space.h" 18 #include "discard.h" 19 #include "raid56.h" 20 #include "zoned.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 25 #ifdef CONFIG_BTRFS_DEBUG 26 int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group) 27 { 28 struct btrfs_fs_info *fs_info = block_group->fs_info; 29 30 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && 31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 32 (btrfs_test_opt(fs_info, FRAGMENT_DATA) && 33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 34 } 35 #endif 36 37 static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group) 38 { 39 /* The meta_write_pointer is available only on the zoned setup. */ 40 if (!btrfs_is_zoned(block_group->fs_info)) 41 return false; 42 43 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) 44 return false; 45 46 return block_group->start + block_group->alloc_offset > 47 block_group->meta_write_pointer; 48 } 49 50 /* 51 * Return target flags in extended format or 0 if restripe for this chunk_type 52 * is not in progress 53 * 54 * Should be called with balance_lock held 55 */ 56 static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags) 57 { 58 const struct btrfs_balance_control *bctl = fs_info->balance_ctl; 59 u64 target = 0; 60 61 if (!bctl) 62 return 0; 63 64 if (flags & BTRFS_BLOCK_GROUP_DATA && 65 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 66 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 67 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 68 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 69 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 70 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 71 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 72 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 73 } 74 75 return target; 76 } 77 78 /* 79 * @flags: available profiles in extended format (see ctree.h) 80 * 81 * Return reduced profile in chunk format. If profile changing is in progress 82 * (either running or paused) picks the target profile (if it's already 83 * available), otherwise falls back to plain reducing. 84 */ 85 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 86 { 87 u64 num_devices = fs_info->fs_devices->rw_devices; 88 u64 target; 89 u64 raid_type; 90 u64 allowed = 0; 91 92 /* 93 * See if restripe for this chunk_type is in progress, if so try to 94 * reduce to the target profile 95 */ 96 spin_lock(&fs_info->balance_lock); 97 target = get_restripe_target(fs_info, flags); 98 if (target) { 99 spin_unlock(&fs_info->balance_lock); 100 return extended_to_chunk(target); 101 } 102 spin_unlock(&fs_info->balance_lock); 103 104 /* First, mask out the RAID levels which aren't possible */ 105 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 106 if (num_devices >= btrfs_raid_array[raid_type].devs_min) 107 allowed |= btrfs_raid_array[raid_type].bg_flag; 108 } 109 allowed &= flags; 110 111 /* Select the highest-redundancy RAID level. */ 112 if (allowed & BTRFS_BLOCK_GROUP_RAID1C4) 113 allowed = BTRFS_BLOCK_GROUP_RAID1C4; 114 else if (allowed & BTRFS_BLOCK_GROUP_RAID6) 115 allowed = BTRFS_BLOCK_GROUP_RAID6; 116 else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3) 117 allowed = BTRFS_BLOCK_GROUP_RAID1C3; 118 else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 119 allowed = BTRFS_BLOCK_GROUP_RAID5; 120 else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 121 allowed = BTRFS_BLOCK_GROUP_RAID10; 122 else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 123 allowed = BTRFS_BLOCK_GROUP_RAID1; 124 else if (allowed & BTRFS_BLOCK_GROUP_DUP) 125 allowed = BTRFS_BLOCK_GROUP_DUP; 126 else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 127 allowed = BTRFS_BLOCK_GROUP_RAID0; 128 129 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 130 131 return extended_to_chunk(flags | allowed); 132 } 133 134 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 135 { 136 unsigned seq; 137 u64 flags; 138 139 do { 140 flags = orig_flags; 141 seq = read_seqbegin(&fs_info->profiles_lock); 142 143 if (flags & BTRFS_BLOCK_GROUP_DATA) 144 flags |= fs_info->avail_data_alloc_bits; 145 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 146 flags |= fs_info->avail_system_alloc_bits; 147 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 148 flags |= fs_info->avail_metadata_alloc_bits; 149 } while (read_seqretry(&fs_info->profiles_lock, seq)); 150 151 return btrfs_reduce_alloc_profile(fs_info, flags); 152 } 153 154 void btrfs_get_block_group(struct btrfs_block_group *cache) 155 { 156 refcount_inc(&cache->refs); 157 } 158 159 void btrfs_put_block_group(struct btrfs_block_group *cache) 160 { 161 if (refcount_dec_and_test(&cache->refs)) { 162 WARN_ON(cache->pinned > 0); 163 /* 164 * If there was a failure to cleanup a log tree, very likely due 165 * to an IO failure on a writeback attempt of one or more of its 166 * extent buffers, we could not do proper (and cheap) unaccounting 167 * of their reserved space, so don't warn on reserved > 0 in that 168 * case. 169 */ 170 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 171 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 172 WARN_ON(cache->reserved > 0); 173 174 /* 175 * A block_group shouldn't be on the discard_list anymore. 176 * Remove the block_group from the discard_list to prevent us 177 * from causing a panic due to NULL pointer dereference. 178 */ 179 if (WARN_ON(!list_empty(&cache->discard_list))) 180 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 181 cache); 182 183 kfree(cache->free_space_ctl); 184 btrfs_free_chunk_map(cache->physical_map); 185 kfree(cache); 186 } 187 } 188 189 static int btrfs_bg_start_cmp(const struct rb_node *new, 190 const struct rb_node *exist) 191 { 192 const struct btrfs_block_group *new_bg = 193 rb_entry(new, struct btrfs_block_group, cache_node); 194 const struct btrfs_block_group *exist_bg = 195 rb_entry(exist, struct btrfs_block_group, cache_node); 196 197 if (new_bg->start < exist_bg->start) 198 return -1; 199 if (new_bg->start > exist_bg->start) 200 return 1; 201 return 0; 202 } 203 204 /* 205 * This adds the block group to the fs_info rb tree for the block group cache 206 */ 207 static int btrfs_add_block_group_cache(struct btrfs_block_group *block_group) 208 { 209 struct btrfs_fs_info *fs_info = block_group->fs_info; 210 struct rb_node *exist; 211 int ret = 0; 212 213 ASSERT(block_group->length != 0); 214 215 write_lock(&fs_info->block_group_cache_lock); 216 217 exist = rb_find_add_cached(&block_group->cache_node, 218 &fs_info->block_group_cache_tree, btrfs_bg_start_cmp); 219 if (exist) 220 ret = -EEXIST; 221 write_unlock(&fs_info->block_group_cache_lock); 222 223 return ret; 224 } 225 226 /* 227 * This will return the block group at or after bytenr if contains is 0, else 228 * it will return the block group that contains the bytenr 229 */ 230 static struct btrfs_block_group *block_group_cache_tree_search( 231 struct btrfs_fs_info *info, u64 bytenr, int contains) 232 { 233 struct btrfs_block_group *cache, *ret = NULL; 234 struct rb_node *n; 235 u64 end, start; 236 237 read_lock(&info->block_group_cache_lock); 238 n = info->block_group_cache_tree.rb_root.rb_node; 239 240 while (n) { 241 cache = rb_entry(n, struct btrfs_block_group, cache_node); 242 end = btrfs_block_group_end(cache) - 1; 243 start = cache->start; 244 245 if (bytenr < start) { 246 if (!contains && (!ret || start < ret->start)) 247 ret = cache; 248 n = n->rb_left; 249 } else if (bytenr > start) { 250 if (contains && bytenr <= end) { 251 ret = cache; 252 break; 253 } 254 n = n->rb_right; 255 } else { 256 ret = cache; 257 break; 258 } 259 } 260 if (ret) 261 btrfs_get_block_group(ret); 262 read_unlock(&info->block_group_cache_lock); 263 264 return ret; 265 } 266 267 /* 268 * Return the block group that starts at or after bytenr 269 */ 270 struct btrfs_block_group *btrfs_lookup_first_block_group( 271 struct btrfs_fs_info *info, u64 bytenr) 272 { 273 return block_group_cache_tree_search(info, bytenr, 0); 274 } 275 276 /* 277 * Return the block group that contains the given bytenr 278 */ 279 struct btrfs_block_group *btrfs_lookup_block_group( 280 struct btrfs_fs_info *info, u64 bytenr) 281 { 282 return block_group_cache_tree_search(info, bytenr, 1); 283 } 284 285 struct btrfs_block_group *btrfs_next_block_group( 286 struct btrfs_block_group *cache) 287 { 288 struct btrfs_fs_info *fs_info = cache->fs_info; 289 struct rb_node *node; 290 291 read_lock(&fs_info->block_group_cache_lock); 292 293 /* If our block group was removed, we need a full search. */ 294 if (RB_EMPTY_NODE(&cache->cache_node)) { 295 const u64 next_bytenr = btrfs_block_group_end(cache); 296 297 read_unlock(&fs_info->block_group_cache_lock); 298 btrfs_put_block_group(cache); 299 return btrfs_lookup_first_block_group(fs_info, next_bytenr); 300 } 301 node = rb_next(&cache->cache_node); 302 btrfs_put_block_group(cache); 303 if (node) { 304 cache = rb_entry(node, struct btrfs_block_group, cache_node); 305 btrfs_get_block_group(cache); 306 } else 307 cache = NULL; 308 read_unlock(&fs_info->block_group_cache_lock); 309 return cache; 310 } 311 312 /* 313 * Check if we can do a NOCOW write for a given extent. 314 * 315 * @fs_info: The filesystem information object. 316 * @bytenr: Logical start address of the extent. 317 * 318 * Check if we can do a NOCOW write for the given extent, and increments the 319 * number of NOCOW writers in the block group that contains the extent, as long 320 * as the block group exists and it's currently not in read-only mode. 321 * 322 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 323 * is responsible for calling btrfs_dec_nocow_writers() later. 324 * 325 * Or NULL if we can not do a NOCOW write 326 */ 327 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 328 u64 bytenr) 329 { 330 struct btrfs_block_group *bg; 331 bool can_nocow = true; 332 333 bg = btrfs_lookup_block_group(fs_info, bytenr); 334 if (!bg) 335 return NULL; 336 337 spin_lock(&bg->lock); 338 if (bg->ro) 339 can_nocow = false; 340 else 341 atomic_inc(&bg->nocow_writers); 342 spin_unlock(&bg->lock); 343 344 if (!can_nocow) { 345 btrfs_put_block_group(bg); 346 return NULL; 347 } 348 349 /* No put on block group, done by btrfs_dec_nocow_writers(). */ 350 return bg; 351 } 352 353 /* 354 * Decrement the number of NOCOW writers in a block group. 355 * 356 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 357 * and on the block group returned by that call. Typically this is called after 358 * creating an ordered extent for a NOCOW write, to prevent races with scrub and 359 * relocation. 360 * 361 * After this call, the caller should not use the block group anymore. It it wants 362 * to use it, then it should get a reference on it before calling this function. 363 */ 364 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 365 { 366 if (atomic_dec_and_test(&bg->nocow_writers)) 367 wake_up_var(&bg->nocow_writers); 368 369 /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 370 btrfs_put_block_group(bg); 371 } 372 373 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 374 { 375 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 376 } 377 378 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 379 const u64 start) 380 { 381 struct btrfs_block_group *bg; 382 383 bg = btrfs_lookup_block_group(fs_info, start); 384 ASSERT(bg); 385 if (atomic_dec_and_test(&bg->reservations)) 386 wake_up_var(&bg->reservations); 387 btrfs_put_block_group(bg); 388 } 389 390 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 391 { 392 struct btrfs_space_info *space_info = bg->space_info; 393 394 ASSERT(bg->ro); 395 396 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 397 return; 398 399 /* 400 * Our block group is read only but before we set it to read only, 401 * some task might have had allocated an extent from it already, but it 402 * has not yet created a respective ordered extent (and added it to a 403 * root's list of ordered extents). 404 * Therefore wait for any task currently allocating extents, since the 405 * block group's reservations counter is incremented while a read lock 406 * on the groups' semaphore is held and decremented after releasing 407 * the read access on that semaphore and creating the ordered extent. 408 */ 409 down_write(&space_info->groups_sem); 410 up_write(&space_info->groups_sem); 411 412 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 413 } 414 415 struct btrfs_caching_control *btrfs_get_caching_control( 416 struct btrfs_block_group *cache) 417 { 418 struct btrfs_caching_control *ctl; 419 420 spin_lock(&cache->lock); 421 if (!cache->caching_ctl) { 422 spin_unlock(&cache->lock); 423 return NULL; 424 } 425 426 ctl = cache->caching_ctl; 427 refcount_inc(&ctl->count); 428 spin_unlock(&cache->lock); 429 return ctl; 430 } 431 432 static void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 433 { 434 if (refcount_dec_and_test(&ctl->count)) 435 kfree(ctl); 436 } 437 438 /* 439 * When we wait for progress in the block group caching, its because our 440 * allocation attempt failed at least once. So, we must sleep and let some 441 * progress happen before we try again. 442 * 443 * This function will sleep at least once waiting for new free space to show 444 * up, and then it will check the block group free space numbers for our min 445 * num_bytes. Another option is to have it go ahead and look in the rbtree for 446 * a free extent of a given size, but this is a good start. 447 * 448 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 449 * any of the information in this block group. 450 */ 451 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 452 u64 num_bytes) 453 { 454 struct btrfs_caching_control *caching_ctl; 455 int progress; 456 457 caching_ctl = btrfs_get_caching_control(cache); 458 if (!caching_ctl) 459 return; 460 461 /* 462 * We've already failed to allocate from this block group, so even if 463 * there's enough space in the block group it isn't contiguous enough to 464 * allow for an allocation, so wait for at least the next wakeup tick, 465 * or for the thing to be done. 466 */ 467 progress = atomic_read(&caching_ctl->progress); 468 469 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 470 (progress != atomic_read(&caching_ctl->progress) && 471 (cache->free_space_ctl->free_space >= num_bytes))); 472 473 btrfs_put_caching_control(caching_ctl); 474 } 475 476 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 477 struct btrfs_caching_control *caching_ctl) 478 { 479 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 480 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 481 } 482 483 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 484 { 485 struct btrfs_caching_control *caching_ctl; 486 int ret; 487 488 caching_ctl = btrfs_get_caching_control(cache); 489 if (!caching_ctl) 490 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 491 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 492 btrfs_put_caching_control(caching_ctl); 493 return ret; 494 } 495 496 #ifdef CONFIG_BTRFS_DEBUG 497 static void fragment_free_space(struct btrfs_block_group *block_group) 498 { 499 struct btrfs_fs_info *fs_info = block_group->fs_info; 500 u64 start = block_group->start; 501 u64 len = block_group->length; 502 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 503 fs_info->nodesize : fs_info->sectorsize; 504 u64 step = chunk << 1; 505 506 while (len > chunk) { 507 btrfs_remove_free_space(block_group, start, chunk); 508 start += step; 509 if (len < step) 510 len = 0; 511 else 512 len -= step; 513 } 514 } 515 #endif 516 517 /* 518 * Add a free space range to the in memory free space cache of a block group. 519 * This checks if the range contains super block locations and any such 520 * locations are not added to the free space cache. 521 * 522 * @block_group: The target block group. 523 * @start: Start offset of the range. 524 * @end: End offset of the range (exclusive). 525 * @total_added_ret: Optional pointer to return the total amount of space 526 * added to the block group's free space cache. 527 * 528 * Returns 0 on success or < 0 on error. 529 */ 530 int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, 531 u64 end, u64 *total_added_ret) 532 { 533 struct btrfs_fs_info *info = block_group->fs_info; 534 u64 extent_start, extent_end, size; 535 int ret; 536 537 if (total_added_ret) 538 *total_added_ret = 0; 539 540 while (start < end) { 541 if (!btrfs_find_first_extent_bit(&info->excluded_extents, start, 542 &extent_start, &extent_end, 543 EXTENT_DIRTY, NULL)) 544 break; 545 546 if (extent_start <= start) { 547 start = extent_end + 1; 548 } else if (extent_start > start && extent_start < end) { 549 size = extent_start - start; 550 ret = btrfs_add_free_space_async_trimmed(block_group, 551 start, size); 552 if (ret) 553 return ret; 554 if (total_added_ret) 555 *total_added_ret += size; 556 start = extent_end + 1; 557 } else { 558 break; 559 } 560 } 561 562 if (start < end) { 563 size = end - start; 564 ret = btrfs_add_free_space_async_trimmed(block_group, start, 565 size); 566 if (ret) 567 return ret; 568 if (total_added_ret) 569 *total_added_ret += size; 570 } 571 572 return 0; 573 } 574 575 /* 576 * Get an arbitrary extent item index / max_index through the block group 577 * 578 * @caching_ctl the caching control containing the block group to sample from 579 * @index: the integral step through the block group to grab from 580 * @max_index: the granularity of the sampling 581 * @key: return value parameter for the item we find 582 * @path: path to use for searching in the extent tree 583 * 584 * Pre-conditions on indices: 585 * 0 <= index <= max_index 586 * 0 < max_index 587 * 588 * Returns: 0 on success, 1 if the search didn't yield a useful item. 589 */ 590 static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, 591 int index, int max_index, 592 struct btrfs_key *found_key, 593 struct btrfs_path *path) 594 { 595 struct btrfs_block_group *block_group = caching_ctl->block_group; 596 struct btrfs_fs_info *fs_info = block_group->fs_info; 597 struct btrfs_root *extent_root; 598 u64 search_offset; 599 const u64 search_end = btrfs_block_group_end(block_group); 600 struct btrfs_key search_key; 601 int ret = 0; 602 603 ASSERT(index >= 0); 604 ASSERT(index <= max_index); 605 ASSERT(max_index > 0); 606 lockdep_assert_held(&caching_ctl->mutex); 607 lockdep_assert_held_read(&fs_info->commit_root_sem); 608 609 extent_root = btrfs_extent_root(fs_info, block_group->start); 610 if (unlikely(!extent_root)) { 611 btrfs_err(fs_info, 612 "missing extent root for block group at offset %llu", 613 block_group->start); 614 return -EUCLEAN; 615 } 616 617 search_offset = index * div_u64(block_group->length, max_index); 618 search_key.objectid = block_group->start + search_offset; 619 search_key.type = BTRFS_EXTENT_ITEM_KEY; 620 search_key.offset = 0; 621 622 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { 623 /* Success; sampled an extent item in the block group */ 624 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && 625 found_key->objectid >= block_group->start && 626 found_key->objectid + found_key->offset <= search_end) 627 break; 628 629 /* We can't possibly find a valid extent item anymore */ 630 if (found_key->objectid >= search_end) { 631 ret = 1; 632 break; 633 } 634 } 635 636 lockdep_assert_held(&caching_ctl->mutex); 637 lockdep_assert_held_read(&fs_info->commit_root_sem); 638 return ret; 639 } 640 641 /* 642 * Best effort attempt to compute a block group's size class while caching it. 643 * 644 * @block_group: the block group we are caching 645 * 646 * We cannot infer the size class while adding free space extents, because that 647 * logic doesn't care about contiguous file extents (it doesn't differentiate 648 * between a 100M extent and 100 contiguous 1M extents). So we need to read the 649 * file extent items. Reading all of them is quite wasteful, because usually 650 * only a handful are enough to give a good answer. Therefore, we just grab 5 of 651 * them at even steps through the block group and pick the smallest size class 652 * we see. Since size class is best effort, and not guaranteed in general, 653 * inaccuracy is acceptable. 654 * 655 * To be more explicit about why this algorithm makes sense: 656 * 657 * If we are caching in a block group from disk, then there are three major cases 658 * to consider: 659 * 1. the block group is well behaved and all extents in it are the same size 660 * class. 661 * 2. the block group is mostly one size class with rare exceptions for last 662 * ditch allocations 663 * 3. the block group was populated before size classes and can have a totally 664 * arbitrary mix of size classes. 665 * 666 * In case 1, looking at any extent in the block group will yield the correct 667 * result. For the mixed cases, taking the minimum size class seems like a good 668 * approximation, since gaps from frees will be usable to the size class. For 669 * 2., a small handful of file extents is likely to yield the right answer. For 670 * 3, we can either read every file extent, or admit that this is best effort 671 * anyway and try to stay fast. 672 * 673 * No errors are returned since failing to determine the size class is not a 674 * critical error, size classes are just an optimization. 675 */ 676 static void load_block_group_size_class(struct btrfs_caching_control *caching_ctl) 677 { 678 BTRFS_PATH_AUTO_RELEASE(path); 679 struct btrfs_block_group *block_group = caching_ctl->block_group; 680 struct btrfs_fs_info *fs_info = block_group->fs_info; 681 struct btrfs_key key; 682 int i; 683 u64 min_size = block_group->length; 684 enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; 685 686 /* 687 * Since we run in workqueue context, we allocate the path on stack to 688 * avoid memory allocation failure, as the stack in a work queue task 689 * is not deep. 690 */ 691 ASSERT(current_work() == &caching_ctl->work.normal_work); 692 693 if (!btrfs_block_group_should_use_size_class(block_group)) 694 return; 695 696 path.skip_locking = true; 697 path.search_commit_root = true; 698 path.reada = READA_FORWARD; 699 700 lockdep_assert_held(&caching_ctl->mutex); 701 lockdep_assert_held_read(&fs_info->commit_root_sem); 702 for (i = 0; i < 5; ++i) { 703 int ret; 704 705 ret = sample_block_group_extent_item(caching_ctl, i, 5, &key, &path); 706 if (ret < 0) 707 return; 708 btrfs_release_path(&path); 709 if (ret > 0) 710 continue; 711 min_size = min_t(u64, min_size, key.offset); 712 size_class = btrfs_calc_block_group_size_class(min_size); 713 } 714 if (size_class != BTRFS_BG_SZ_NONE) { 715 spin_lock(&block_group->lock); 716 block_group->size_class = size_class; 717 spin_unlock(&block_group->lock); 718 } 719 } 720 721 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 722 { 723 struct btrfs_block_group *block_group = caching_ctl->block_group; 724 const u64 block_group_end = btrfs_block_group_end(block_group); 725 struct btrfs_fs_info *fs_info = block_group->fs_info; 726 struct btrfs_root *extent_root; 727 BTRFS_PATH_AUTO_FREE(path); 728 struct extent_buffer *leaf; 729 struct btrfs_key key; 730 u64 total_found = 0; 731 u64 last = 0; 732 u32 nritems; 733 int ret; 734 bool wakeup = true; 735 736 path = btrfs_alloc_path(); 737 if (!path) 738 return -ENOMEM; 739 740 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 741 extent_root = btrfs_extent_root(fs_info, last); 742 743 #ifdef CONFIG_BTRFS_DEBUG 744 /* 745 * If we're fragmenting we don't want to make anybody think we can 746 * allocate from this block group until we've had a chance to fragment 747 * the free space. 748 */ 749 if (btrfs_should_fragment_free_space(block_group)) 750 wakeup = false; 751 #endif 752 /* 753 * We don't want to deadlock with somebody trying to allocate a new 754 * extent for the extent root while also trying to search the extent 755 * root to add free space. So we skip locking and search the commit 756 * root, since its read-only 757 */ 758 path->skip_locking = true; 759 path->search_commit_root = true; 760 path->reada = READA_FORWARD; 761 762 key.objectid = last; 763 key.type = BTRFS_EXTENT_ITEM_KEY; 764 key.offset = 0; 765 766 next: 767 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 768 if (ret < 0) 769 return ret; 770 771 leaf = path->nodes[0]; 772 nritems = btrfs_header_nritems(leaf); 773 774 while (1) { 775 if (btrfs_fs_closing_done(fs_info)) { 776 last = (u64)-1; 777 break; 778 } 779 780 if (path->slots[0] < nritems) { 781 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 782 } else { 783 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 784 if (ret) 785 break; 786 787 if (need_resched() || 788 rwsem_is_contended(&fs_info->commit_root_sem)) { 789 btrfs_release_path(path); 790 up_read(&fs_info->commit_root_sem); 791 mutex_unlock(&caching_ctl->mutex); 792 cond_resched(); 793 mutex_lock(&caching_ctl->mutex); 794 down_read(&fs_info->commit_root_sem); 795 goto next; 796 } 797 798 ret = btrfs_next_leaf(extent_root, path); 799 if (ret < 0) 800 return ret; 801 if (ret) 802 break; 803 leaf = path->nodes[0]; 804 nritems = btrfs_header_nritems(leaf); 805 continue; 806 } 807 808 if (key.objectid < last) { 809 key.objectid = last; 810 key.type = BTRFS_EXTENT_ITEM_KEY; 811 key.offset = 0; 812 btrfs_release_path(path); 813 goto next; 814 } 815 816 if (key.objectid < block_group->start) { 817 path->slots[0]++; 818 continue; 819 } 820 821 if (key.objectid >= block_group_end) 822 break; 823 824 if (key.type == BTRFS_EXTENT_ITEM_KEY || 825 key.type == BTRFS_METADATA_ITEM_KEY) { 826 u64 space_added; 827 828 ret = btrfs_add_new_free_space(block_group, last, 829 key.objectid, &space_added); 830 if (ret) 831 return ret; 832 total_found += space_added; 833 if (key.type == BTRFS_METADATA_ITEM_KEY) 834 last = key.objectid + 835 fs_info->nodesize; 836 else 837 last = key.objectid + key.offset; 838 839 if (total_found > CACHING_CTL_WAKE_UP) { 840 total_found = 0; 841 if (wakeup) { 842 atomic_inc(&caching_ctl->progress); 843 wake_up(&caching_ctl->wait); 844 } 845 } 846 } 847 path->slots[0]++; 848 } 849 850 return btrfs_add_new_free_space(block_group, last, block_group_end, NULL); 851 } 852 853 static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) 854 { 855 btrfs_clear_extent_bit(&bg->fs_info->excluded_extents, bg->start, 856 btrfs_block_group_end(bg) - 1, EXTENT_DIRTY, NULL); 857 } 858 859 static noinline void caching_thread(struct btrfs_work *work) 860 { 861 struct btrfs_block_group *block_group; 862 struct btrfs_fs_info *fs_info; 863 struct btrfs_caching_control *caching_ctl; 864 int ret; 865 866 caching_ctl = container_of(work, struct btrfs_caching_control, work); 867 block_group = caching_ctl->block_group; 868 fs_info = block_group->fs_info; 869 870 mutex_lock(&caching_ctl->mutex); 871 down_read(&fs_info->commit_root_sem); 872 873 load_block_group_size_class(caching_ctl); 874 if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 875 ret = load_free_space_cache(block_group); 876 if (ret == 1) { 877 ret = 0; 878 goto done; 879 } 880 881 /* 882 * We failed to load the space cache, set ourselves to 883 * CACHE_STARTED and carry on. 884 */ 885 spin_lock(&block_group->lock); 886 block_group->cached = BTRFS_CACHE_STARTED; 887 spin_unlock(&block_group->lock); 888 wake_up(&caching_ctl->wait); 889 } 890 891 /* 892 * If we are in the transaction that populated the free space tree we 893 * can't actually cache from the free space tree as our commit root and 894 * real root are the same, so we could change the contents of the blocks 895 * while caching. Instead do the slow caching in this case, and after 896 * the transaction has committed we will be safe. 897 */ 898 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 899 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 900 ret = btrfs_load_free_space_tree(caching_ctl); 901 else 902 ret = load_extent_tree_free(caching_ctl); 903 done: 904 spin_lock(&block_group->lock); 905 block_group->caching_ctl = NULL; 906 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 907 spin_unlock(&block_group->lock); 908 909 #ifdef CONFIG_BTRFS_DEBUG 910 if (btrfs_should_fragment_free_space(block_group)) { 911 u64 bytes_used; 912 913 spin_lock(&block_group->space_info->lock); 914 spin_lock(&block_group->lock); 915 bytes_used = block_group->length - block_group->used; 916 block_group->space_info->bytes_used += bytes_used >> 1; 917 spin_unlock(&block_group->lock); 918 spin_unlock(&block_group->space_info->lock); 919 fragment_free_space(block_group); 920 } 921 #endif 922 923 up_read(&fs_info->commit_root_sem); 924 btrfs_free_excluded_extents(block_group); 925 mutex_unlock(&caching_ctl->mutex); 926 927 wake_up(&caching_ctl->wait); 928 929 btrfs_put_caching_control(caching_ctl); 930 btrfs_put_block_group(block_group); 931 } 932 933 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 934 { 935 struct btrfs_fs_info *fs_info = cache->fs_info; 936 struct btrfs_caching_control *caching_ctl = NULL; 937 int ret = 0; 938 939 /* Allocator for zoned filesystems does not use the cache at all */ 940 if (btrfs_is_zoned(fs_info)) 941 return 0; 942 943 /* 944 * No allocations can be done from remapped block groups, so they have 945 * no entries in the free-space tree. 946 */ 947 if (cache->flags & BTRFS_BLOCK_GROUP_REMAPPED) 948 return 0; 949 950 caching_ctl = kzalloc_obj(*caching_ctl, GFP_NOFS); 951 if (!caching_ctl) 952 return -ENOMEM; 953 954 INIT_LIST_HEAD(&caching_ctl->list); 955 mutex_init(&caching_ctl->mutex); 956 init_waitqueue_head(&caching_ctl->wait); 957 caching_ctl->block_group = cache; 958 refcount_set(&caching_ctl->count, 2); 959 atomic_set(&caching_ctl->progress, 0); 960 btrfs_init_work(&caching_ctl->work, caching_thread, NULL); 961 962 spin_lock(&cache->lock); 963 if (cache->cached != BTRFS_CACHE_NO) { 964 kfree(caching_ctl); 965 966 caching_ctl = cache->caching_ctl; 967 if (caching_ctl) 968 refcount_inc(&caching_ctl->count); 969 spin_unlock(&cache->lock); 970 goto out; 971 } 972 WARN_ON(cache->caching_ctl); 973 cache->caching_ctl = caching_ctl; 974 cache->cached = BTRFS_CACHE_STARTED; 975 spin_unlock(&cache->lock); 976 977 write_lock(&fs_info->block_group_cache_lock); 978 refcount_inc(&caching_ctl->count); 979 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 980 write_unlock(&fs_info->block_group_cache_lock); 981 982 btrfs_get_block_group(cache); 983 984 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 985 out: 986 if (wait && caching_ctl) 987 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 988 if (caching_ctl) 989 btrfs_put_caching_control(caching_ctl); 990 991 return ret; 992 } 993 994 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 995 { 996 u64 extra_flags = chunk_to_extended(flags) & 997 BTRFS_EXTENDED_PROFILE_MASK; 998 999 write_seqlock(&fs_info->profiles_lock); 1000 if (flags & BTRFS_BLOCK_GROUP_DATA) 1001 fs_info->avail_data_alloc_bits &= ~extra_flags; 1002 if (flags & BTRFS_BLOCK_GROUP_METADATA) 1003 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 1004 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 1005 fs_info->avail_system_alloc_bits &= ~extra_flags; 1006 write_sequnlock(&fs_info->profiles_lock); 1007 } 1008 1009 /* 1010 * Clear incompat bits for the following feature(s): 1011 * 1012 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 1013 * in the whole filesystem 1014 * 1015 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 1016 */ 1017 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 1018 { 1019 bool found_raid56 = false; 1020 bool found_raid1c34 = false; 1021 1022 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 1023 (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 1024 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 1025 struct list_head *head = &fs_info->space_info; 1026 struct btrfs_space_info *sinfo; 1027 1028 list_for_each_entry_rcu(sinfo, head, list) { 1029 down_read(&sinfo->groups_sem); 1030 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 1031 found_raid56 = true; 1032 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 1033 found_raid56 = true; 1034 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 1035 found_raid1c34 = true; 1036 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 1037 found_raid1c34 = true; 1038 up_read(&sinfo->groups_sem); 1039 } 1040 if (!found_raid56) 1041 btrfs_clear_fs_incompat(fs_info, RAID56); 1042 if (!found_raid1c34) 1043 btrfs_clear_fs_incompat(fs_info, RAID1C34); 1044 } 1045 } 1046 1047 static struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info) 1048 { 1049 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) 1050 return fs_info->block_group_root; 1051 return btrfs_extent_root(fs_info, 0); 1052 } 1053 1054 static int remove_block_group_item(struct btrfs_trans_handle *trans, 1055 struct btrfs_path *path, 1056 struct btrfs_block_group *block_group) 1057 { 1058 struct btrfs_fs_info *fs_info = trans->fs_info; 1059 struct btrfs_root *root; 1060 struct btrfs_key key; 1061 int ret; 1062 1063 root = btrfs_block_group_root(fs_info); 1064 key.objectid = block_group->start; 1065 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1066 key.offset = block_group->length; 1067 1068 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1069 if (ret > 0) 1070 ret = -ENOENT; 1071 if (ret < 0) 1072 return ret; 1073 1074 return btrfs_del_item(trans, root, path); 1075 } 1076 1077 void btrfs_remove_bg_from_sinfo(struct btrfs_block_group *bg) 1078 { 1079 int factor = btrfs_bg_type_to_factor(bg->flags); 1080 1081 spin_lock(&bg->space_info->lock); 1082 if (btrfs_test_opt(bg->fs_info, ENOSPC_DEBUG)) { 1083 WARN_ON(bg->space_info->total_bytes < bg->length); 1084 WARN_ON(bg->space_info->bytes_readonly < bg->length - bg->zone_unusable); 1085 WARN_ON(bg->space_info->bytes_zone_unusable < bg->zone_unusable); 1086 WARN_ON(bg->space_info->disk_total < bg->length * factor); 1087 } 1088 bg->space_info->total_bytes -= bg->length; 1089 bg->space_info->bytes_readonly -= (bg->length - bg->zone_unusable); 1090 btrfs_space_info_update_bytes_zone_unusable(bg->space_info, -bg->zone_unusable); 1091 bg->space_info->disk_total -= bg->length * factor; 1092 spin_unlock(&bg->space_info->lock); 1093 } 1094 1095 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 1096 struct btrfs_chunk_map *map) 1097 { 1098 struct btrfs_fs_info *fs_info = trans->fs_info; 1099 BTRFS_PATH_AUTO_FREE(path); 1100 struct btrfs_block_group *block_group; 1101 struct btrfs_free_cluster *cluster; 1102 struct inode *inode; 1103 struct kobject *kobj = NULL; 1104 int ret; 1105 int index; 1106 struct btrfs_caching_control *caching_ctl = NULL; 1107 bool remove_map; 1108 bool remove_rsv = false; 1109 1110 block_group = btrfs_lookup_block_group(fs_info, map->start); 1111 if (unlikely(!block_group)) { 1112 btrfs_abort_transaction(trans, -ENOENT); 1113 return -ENOENT; 1114 } 1115 1116 if (unlikely(!block_group->ro && 1117 !(block_group->flags & BTRFS_BLOCK_GROUP_REMAPPED))) { 1118 ret = -EUCLEAN; 1119 btrfs_abort_transaction(trans, ret); 1120 goto out; 1121 } 1122 1123 trace_btrfs_remove_block_group(block_group); 1124 /* 1125 * Free the reserved super bytes from this block group before 1126 * remove it. 1127 */ 1128 btrfs_free_excluded_extents(block_group); 1129 btrfs_free_ref_tree_range(fs_info, block_group->start, 1130 block_group->length); 1131 1132 index = btrfs_bg_flags_to_raid_index(block_group->flags); 1133 1134 /* make sure this block group isn't part of an allocation cluster */ 1135 cluster = &fs_info->data_alloc_cluster; 1136 spin_lock(&cluster->refill_lock); 1137 btrfs_return_cluster_to_free_space(block_group, cluster); 1138 spin_unlock(&cluster->refill_lock); 1139 1140 /* 1141 * make sure this block group isn't part of a metadata 1142 * allocation cluster 1143 */ 1144 cluster = &fs_info->meta_alloc_cluster; 1145 spin_lock(&cluster->refill_lock); 1146 btrfs_return_cluster_to_free_space(block_group, cluster); 1147 spin_unlock(&cluster->refill_lock); 1148 1149 btrfs_clear_treelog_bg(block_group); 1150 btrfs_clear_data_reloc_bg(block_group); 1151 1152 path = btrfs_alloc_path(); 1153 if (unlikely(!path)) { 1154 ret = -ENOMEM; 1155 btrfs_abort_transaction(trans, ret); 1156 goto out; 1157 } 1158 1159 /* 1160 * get the inode first so any iput calls done for the io_list 1161 * aren't the final iput (no unlinks allowed now) 1162 */ 1163 inode = lookup_free_space_inode(block_group, path); 1164 1165 mutex_lock(&trans->transaction->cache_write_mutex); 1166 /* 1167 * Make sure our free space cache IO is done before removing the 1168 * free space inode 1169 */ 1170 spin_lock(&trans->transaction->dirty_bgs_lock); 1171 if (!list_empty(&block_group->io_list)) { 1172 list_del_init(&block_group->io_list); 1173 1174 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 1175 1176 spin_unlock(&trans->transaction->dirty_bgs_lock); 1177 btrfs_wait_cache_io(trans, block_group, path); 1178 btrfs_put_block_group(block_group); 1179 spin_lock(&trans->transaction->dirty_bgs_lock); 1180 } 1181 1182 if (!list_empty(&block_group->dirty_list)) { 1183 list_del_init(&block_group->dirty_list); 1184 remove_rsv = true; 1185 btrfs_put_block_group(block_group); 1186 } 1187 spin_unlock(&trans->transaction->dirty_bgs_lock); 1188 mutex_unlock(&trans->transaction->cache_write_mutex); 1189 1190 ret = btrfs_remove_free_space_inode(trans, inode, block_group); 1191 if (unlikely(ret)) { 1192 btrfs_abort_transaction(trans, ret); 1193 goto out; 1194 } 1195 1196 write_lock(&fs_info->block_group_cache_lock); 1197 rb_erase_cached(&block_group->cache_node, 1198 &fs_info->block_group_cache_tree); 1199 RB_CLEAR_NODE(&block_group->cache_node); 1200 1201 /* Once for the block groups rbtree */ 1202 btrfs_put_block_group(block_group); 1203 1204 write_unlock(&fs_info->block_group_cache_lock); 1205 1206 down_write(&block_group->space_info->groups_sem); 1207 /* 1208 * we must use list_del_init so people can check to see if they 1209 * are still on the list after taking the semaphore 1210 */ 1211 list_del_init(&block_group->list); 1212 if (list_empty(&block_group->space_info->block_groups[index])) { 1213 kobj = block_group->space_info->block_group_kobjs[index]; 1214 block_group->space_info->block_group_kobjs[index] = NULL; 1215 clear_avail_alloc_bits(fs_info, block_group->flags); 1216 } 1217 up_write(&block_group->space_info->groups_sem); 1218 clear_incompat_bg_bits(fs_info, block_group->flags); 1219 if (kobj) { 1220 kobject_del(kobj); 1221 kobject_put(kobj); 1222 } 1223 1224 if (block_group->cached == BTRFS_CACHE_STARTED) 1225 btrfs_wait_block_group_cache_done(block_group); 1226 1227 write_lock(&fs_info->block_group_cache_lock); 1228 caching_ctl = btrfs_get_caching_control(block_group); 1229 if (!caching_ctl) { 1230 struct btrfs_caching_control *ctl; 1231 1232 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { 1233 if (ctl->block_group == block_group) { 1234 caching_ctl = ctl; 1235 refcount_inc(&caching_ctl->count); 1236 break; 1237 } 1238 } 1239 } 1240 if (caching_ctl) 1241 list_del_init(&caching_ctl->list); 1242 write_unlock(&fs_info->block_group_cache_lock); 1243 1244 if (caching_ctl) { 1245 /* Once for the caching bgs list and once for us. */ 1246 btrfs_put_caching_control(caching_ctl); 1247 btrfs_put_caching_control(caching_ctl); 1248 } 1249 1250 spin_lock(&trans->transaction->dirty_bgs_lock); 1251 WARN_ON(!list_empty(&block_group->dirty_list)); 1252 WARN_ON(!list_empty(&block_group->io_list)); 1253 spin_unlock(&trans->transaction->dirty_bgs_lock); 1254 1255 btrfs_remove_free_space_cache(block_group); 1256 1257 spin_lock(&block_group->space_info->lock); 1258 list_del_init(&block_group->ro_list); 1259 spin_unlock(&block_group->space_info->lock); 1260 1261 if (!(block_group->flags & BTRFS_BLOCK_GROUP_REMAPPED)) 1262 btrfs_remove_bg_from_sinfo(block_group); 1263 1264 /* 1265 * Remove the free space for the block group from the free space tree 1266 * and the block group's item from the extent tree before marking the 1267 * block group as removed. This is to prevent races with tasks that 1268 * freeze and unfreeze a block group, this task and another task 1269 * allocating a new block group - the unfreeze task ends up removing 1270 * the block group's extent map before the task calling this function 1271 * deletes the block group item from the extent tree, allowing for 1272 * another task to attempt to create another block group with the same 1273 * item key (and failing with -EEXIST and a transaction abort). 1274 * 1275 * If the REMAPPED flag has been set the block group's free space 1276 * has already been removed, so we can skip the call to 1277 * btrfs_remove_block_group_free_space(). 1278 */ 1279 if (!(block_group->flags & BTRFS_BLOCK_GROUP_REMAPPED)) { 1280 ret = btrfs_remove_block_group_free_space(trans, block_group); 1281 if (unlikely(ret)) { 1282 btrfs_abort_transaction(trans, ret); 1283 goto out; 1284 } 1285 } 1286 1287 ret = remove_block_group_item(trans, path, block_group); 1288 if (unlikely(ret < 0)) { 1289 btrfs_abort_transaction(trans, ret); 1290 goto out; 1291 } 1292 1293 spin_lock(&block_group->lock); 1294 /* 1295 * Hitting this WARN means we removed a block group with an unwritten 1296 * region. It will cause "unable to find chunk map for logical" errors. 1297 */ 1298 if (WARN_ON(has_unwritten_metadata(block_group))) 1299 btrfs_warn(fs_info, 1300 "block group %llu is removed before metadata write out", 1301 block_group->start); 1302 1303 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); 1304 1305 /* 1306 * At this point trimming or scrub can't start on this block group, 1307 * because we removed the block group from the rbtree 1308 * fs_info->block_group_cache_tree so no one can't find it anymore and 1309 * even if someone already got this block group before we removed it 1310 * from the rbtree, they have already incremented block_group->frozen - 1311 * if they didn't, for the trimming case they won't find any free space 1312 * entries because we already removed them all when we called 1313 * btrfs_remove_free_space_cache(). 1314 * 1315 * And we must not remove the chunk map from the fs_info->mapping_tree 1316 * to prevent the same logical address range and physical device space 1317 * ranges from being reused for a new block group. This is needed to 1318 * avoid races with trimming and scrub. 1319 * 1320 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1321 * completely transactionless, so while it is trimming a range the 1322 * currently running transaction might finish and a new one start, 1323 * allowing for new block groups to be created that can reuse the same 1324 * physical device locations unless we take this special care. 1325 * 1326 * There may also be an implicit trim operation if the file system 1327 * is mounted with -odiscard. The same protections must remain 1328 * in place until the extents have been discarded completely when 1329 * the transaction commit has completed. 1330 */ 1331 remove_map = (atomic_read(&block_group->frozen) == 0); 1332 spin_unlock(&block_group->lock); 1333 1334 if (remove_map) 1335 btrfs_remove_chunk_map(fs_info, map); 1336 1337 out: 1338 /* Once for the lookup reference */ 1339 btrfs_put_block_group(block_group); 1340 if (remove_rsv) 1341 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 1342 return ret; 1343 } 1344 1345 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1346 struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1347 { 1348 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1349 struct btrfs_chunk_map *map; 1350 unsigned int num_items; 1351 1352 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 1353 ASSERT(map != NULL); 1354 ASSERT(map->start == chunk_offset); 1355 1356 /* 1357 * We need to reserve 3 + N units from the metadata space info in order 1358 * to remove a block group (done at btrfs_remove_chunk() and at 1359 * btrfs_remove_block_group()), which are used for: 1360 * 1361 * 1 unit for adding the free space inode's orphan (located in the tree 1362 * of tree roots). 1363 * 1 unit for deleting the block group item (located in the extent 1364 * tree). 1365 * 1 unit for deleting the free space item (located in tree of tree 1366 * roots). 1367 * N units for deleting N device extent items corresponding to each 1368 * stripe (located in the device tree). 1369 * 1370 * In order to remove a block group we also need to reserve units in the 1371 * system space info in order to update the chunk tree (update one or 1372 * more device items and remove one chunk item), but this is done at 1373 * btrfs_remove_chunk() through a call to check_system_chunk(). 1374 */ 1375 num_items = 3 + map->num_stripes; 1376 btrfs_free_chunk_map(map); 1377 1378 return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1379 } 1380 1381 /* 1382 * Mark block group @cache read-only, so later write won't happen to block 1383 * group @cache. 1384 * 1385 * If @force is not set, this function will only mark the block group readonly 1386 * if we have enough free space (1M) in other metadata/system block groups. 1387 * If @force is not set, this function will mark the block group readonly 1388 * without checking free space. 1389 * 1390 * NOTE: This function doesn't care if other block groups can contain all the 1391 * data in this block group. That check should be done by relocation routine, 1392 * not this function. 1393 */ 1394 static int inc_block_group_ro(struct btrfs_block_group *cache, bool force) 1395 { 1396 struct btrfs_space_info *sinfo = cache->space_info; 1397 u64 num_bytes; 1398 int ret = -ENOSPC; 1399 1400 spin_lock(&sinfo->lock); 1401 spin_lock(&cache->lock); 1402 1403 if (cache->swap_extents) { 1404 ret = -ETXTBSY; 1405 goto out; 1406 } 1407 1408 if (cache->ro) { 1409 cache->ro++; 1410 ret = 0; 1411 goto out; 1412 } 1413 1414 num_bytes = btrfs_block_group_available_space(cache); 1415 1416 /* 1417 * Data never overcommits, even in mixed mode, so do just the straight 1418 * check of left over space in how much we have allocated. 1419 */ 1420 if (force) { 1421 ret = 0; 1422 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1423 u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1424 1425 /* 1426 * Here we make sure if we mark this bg RO, we still have enough 1427 * free space as buffer. 1428 */ 1429 if (sinfo_used + num_bytes <= sinfo->total_bytes) 1430 ret = 0; 1431 } else { 1432 /* 1433 * We overcommit metadata, so we need to do the 1434 * btrfs_can_overcommit check here, and we need to pass in 1435 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1436 * leeway to allow us to mark this block group as read only. 1437 */ 1438 if (btrfs_can_overcommit(sinfo, num_bytes, BTRFS_RESERVE_NO_FLUSH)) 1439 ret = 0; 1440 } 1441 1442 if (!ret) { 1443 sinfo->bytes_readonly += num_bytes; 1444 if (btrfs_is_zoned(cache->fs_info)) { 1445 /* Migrate zone_unusable bytes to readonly */ 1446 sinfo->bytes_readonly += cache->zone_unusable; 1447 btrfs_space_info_update_bytes_zone_unusable(sinfo, -cache->zone_unusable); 1448 cache->zone_unusable = 0; 1449 } 1450 cache->ro++; 1451 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 1452 } 1453 out: 1454 spin_unlock(&cache->lock); 1455 spin_unlock(&sinfo->lock); 1456 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 1457 btrfs_info(cache->fs_info, 1458 "unable to make block group %llu ro", cache->start); 1459 btrfs_dump_space_info(cache->space_info, 0, false); 1460 } 1461 return ret; 1462 } 1463 1464 static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1465 const struct btrfs_block_group *bg) 1466 { 1467 struct btrfs_fs_info *fs_info = trans->fs_info; 1468 struct btrfs_transaction *prev_trans = NULL; 1469 const u64 start = bg->start; 1470 const u64 end = start + bg->length - 1; 1471 int ret; 1472 1473 spin_lock(&fs_info->trans_lock); 1474 if (!list_is_first(&trans->transaction->list, &fs_info->trans_list)) { 1475 prev_trans = list_prev_entry(trans->transaction, list); 1476 refcount_inc(&prev_trans->use_count); 1477 } 1478 spin_unlock(&fs_info->trans_lock); 1479 1480 /* 1481 * Hold the unused_bg_unpin_mutex lock to avoid racing with 1482 * btrfs_finish_extent_commit(). If we are at transaction N, another 1483 * task might be running finish_extent_commit() for the previous 1484 * transaction N - 1, and have seen a range belonging to the block 1485 * group in pinned_extents before we were able to clear the whole block 1486 * group range from pinned_extents. This means that task can lookup for 1487 * the block group after we unpinned it from pinned_extents and removed 1488 * it, leading to an error at unpin_extent_range(). 1489 */ 1490 mutex_lock(&fs_info->unused_bg_unpin_mutex); 1491 if (prev_trans) { 1492 ret = btrfs_clear_extent_bit(&prev_trans->pinned_extents, start, end, 1493 EXTENT_DIRTY, NULL); 1494 if (ret) 1495 goto out; 1496 } 1497 1498 ret = btrfs_clear_extent_bit(&trans->transaction->pinned_extents, start, end, 1499 EXTENT_DIRTY, NULL); 1500 out: 1501 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1502 if (prev_trans) 1503 btrfs_put_transaction(prev_trans); 1504 1505 return ret == 0; 1506 } 1507 1508 /* 1509 * Link the block_group to a list via bg_list. 1510 * 1511 * @bg: The block_group to link to the list. 1512 * @list: The list to link it to. 1513 * 1514 * Use this rather than list_add_tail() directly to ensure proper respect 1515 * to locking and refcounting. 1516 * 1517 * Returns: true if the bg was linked with a refcount bump and false otherwise. 1518 */ 1519 static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *list) 1520 { 1521 struct btrfs_fs_info *fs_info = bg->fs_info; 1522 bool added = false; 1523 1524 spin_lock(&fs_info->unused_bgs_lock); 1525 if (list_empty(&bg->bg_list)) { 1526 btrfs_get_block_group(bg); 1527 list_add_tail(&bg->bg_list, list); 1528 added = true; 1529 } 1530 spin_unlock(&fs_info->unused_bgs_lock); 1531 return added; 1532 } 1533 1534 /* 1535 * Process the unused_bgs list and remove any that don't have any allocated 1536 * space inside of them. 1537 */ 1538 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1539 { 1540 LIST_HEAD(retry_list); 1541 struct btrfs_block_group *block_group; 1542 struct btrfs_space_info *space_info; 1543 struct btrfs_trans_handle *trans; 1544 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1545 int ret = 0; 1546 1547 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1548 return; 1549 1550 if (btrfs_fs_closing(fs_info)) 1551 return; 1552 1553 /* 1554 * Long running balances can keep us blocked here for eternity, so 1555 * simply skip deletion if we're unable to get the mutex. 1556 */ 1557 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1558 return; 1559 1560 spin_lock(&fs_info->unused_bgs_lock); 1561 while (!list_empty(&fs_info->unused_bgs)) { 1562 u64 used; 1563 int trimming; 1564 1565 block_group = list_first_entry(&fs_info->unused_bgs, 1566 struct btrfs_block_group, 1567 bg_list); 1568 list_del_init(&block_group->bg_list); 1569 1570 space_info = block_group->space_info; 1571 1572 if (ret || btrfs_mixed_space_info(space_info)) { 1573 btrfs_put_block_group(block_group); 1574 continue; 1575 } 1576 spin_unlock(&fs_info->unused_bgs_lock); 1577 1578 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1579 1580 /* Don't want to race with allocators so take the groups_sem */ 1581 down_write(&space_info->groups_sem); 1582 1583 /* 1584 * Async discard moves the final block group discard to be prior 1585 * to the unused_bgs code path. Therefore, if it's not fully 1586 * trimmed, punt it back to the async discard lists. 1587 */ 1588 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 1589 !btrfs_is_free_space_trimmed(block_group)) { 1590 trace_btrfs_skip_unused_block_group(block_group); 1591 up_write(&space_info->groups_sem); 1592 /* Requeue if we failed because of async discard */ 1593 btrfs_discard_queue_work(&fs_info->discard_ctl, 1594 block_group); 1595 goto next; 1596 } 1597 1598 spin_lock(&space_info->lock); 1599 spin_lock(&block_group->lock); 1600 if (btrfs_is_block_group_used(block_group) || 1601 (block_group->ro && !(block_group->flags & BTRFS_BLOCK_GROUP_REMAPPED)) || 1602 list_is_singular(&block_group->list) || 1603 test_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &block_group->runtime_flags)) { 1604 /* 1605 * We want to bail if we made new allocations or have 1606 * outstanding allocations in this block group. We do 1607 * the ro check in case balance is currently acting on 1608 * this block group. 1609 * 1610 * Also bail out if this is the only block group for its 1611 * type, because otherwise we would lose profile 1612 * information from fs_info->avail_*_alloc_bits and the 1613 * next block group of this type would be created with a 1614 * "single" profile (even if we're in a raid fs) because 1615 * fs_info->avail_*_alloc_bits would be 0. 1616 */ 1617 trace_btrfs_skip_unused_block_group(block_group); 1618 spin_unlock(&block_group->lock); 1619 spin_unlock(&space_info->lock); 1620 up_write(&space_info->groups_sem); 1621 goto next; 1622 } 1623 1624 /* 1625 * The block group may be unused but there may be space reserved 1626 * accounting with the existence of that block group, that is, 1627 * space_info->bytes_may_use was incremented by a task but no 1628 * space was yet allocated from the block group by the task. 1629 * That space may or may not be allocated, as we are generally 1630 * pessimistic about space reservation for metadata as well as 1631 * for data when using compression (as we reserve space based on 1632 * the worst case, when data can't be compressed, and before 1633 * actually attempting compression, before starting writeback). 1634 * 1635 * So check if the total space of the space_info minus the size 1636 * of this block group is less than the used space of the 1637 * space_info - if that's the case, then it means we have tasks 1638 * that might be relying on the block group in order to allocate 1639 * extents, and add back the block group to the unused list when 1640 * we finish, so that we retry later in case no tasks ended up 1641 * needing to allocate extents from the block group. 1642 */ 1643 used = btrfs_space_info_used(space_info, true); 1644 if (((space_info->total_bytes - block_group->length < used && 1645 block_group->zone_unusable < block_group->length) || 1646 has_unwritten_metadata(block_group)) && 1647 !(block_group->flags & BTRFS_BLOCK_GROUP_REMAPPED)) { 1648 /* 1649 * Add a reference for the list, compensate for the ref 1650 * drop under the "next" label for the 1651 * fs_info->unused_bgs list. 1652 */ 1653 btrfs_link_bg_list(block_group, &retry_list); 1654 1655 trace_btrfs_skip_unused_block_group(block_group); 1656 spin_unlock(&block_group->lock); 1657 spin_unlock(&space_info->lock); 1658 up_write(&space_info->groups_sem); 1659 goto next; 1660 } 1661 1662 spin_unlock(&block_group->lock); 1663 spin_unlock(&space_info->lock); 1664 1665 /* We don't want to force the issue, only flip if it's ok. */ 1666 ret = inc_block_group_ro(block_group, 0); 1667 up_write(&space_info->groups_sem); 1668 if (ret < 0) { 1669 ret = 0; 1670 goto next; 1671 } 1672 1673 ret = btrfs_zone_finish(block_group); 1674 if (ret < 0) { 1675 btrfs_dec_block_group_ro(block_group); 1676 if (ret == -EAGAIN) { 1677 btrfs_link_bg_list(block_group, &retry_list); 1678 ret = 0; 1679 } 1680 goto next; 1681 } 1682 1683 /* 1684 * Want to do this before we do anything else so we can recover 1685 * properly if we fail to join the transaction. 1686 */ 1687 trans = btrfs_start_trans_remove_block_group(fs_info, 1688 block_group->start); 1689 if (IS_ERR(trans)) { 1690 btrfs_dec_block_group_ro(block_group); 1691 ret = PTR_ERR(trans); 1692 goto next; 1693 } 1694 1695 /* 1696 * We could have pending pinned extents for this block group, 1697 * just delete them, we don't care about them anymore. 1698 */ 1699 if (!clean_pinned_extents(trans, block_group)) { 1700 btrfs_dec_block_group_ro(block_group); 1701 goto end_trans; 1702 } 1703 1704 /* 1705 * At this point, the block_group is read only and should fail 1706 * new allocations. However, btrfs_finish_extent_commit() can 1707 * cause this block_group to be placed back on the discard 1708 * lists because now the block_group isn't fully discarded. 1709 * Bail here and try again later after discarding everything. 1710 */ 1711 spin_lock(&fs_info->discard_ctl.lock); 1712 if (!list_empty(&block_group->discard_list)) { 1713 spin_unlock(&fs_info->discard_ctl.lock); 1714 btrfs_dec_block_group_ro(block_group); 1715 btrfs_discard_queue_work(&fs_info->discard_ctl, 1716 block_group); 1717 goto end_trans; 1718 } 1719 spin_unlock(&fs_info->discard_ctl.lock); 1720 1721 /* Reset pinned so btrfs_put_block_group doesn't complain */ 1722 spin_lock(&space_info->lock); 1723 spin_lock(&block_group->lock); 1724 1725 btrfs_space_info_update_bytes_pinned(space_info, -block_group->pinned); 1726 space_info->bytes_readonly += block_group->pinned; 1727 block_group->pinned = 0; 1728 1729 spin_unlock(&block_group->lock); 1730 spin_unlock(&space_info->lock); 1731 1732 /* 1733 * The normal path here is an unused block group is passed here, 1734 * then trimming is handled in the transaction commit path. 1735 * Async discard interposes before this to do the trimming 1736 * before coming down the unused block group path as trimming 1737 * will no longer be done later in the transaction commit path. 1738 */ 1739 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1740 goto flip_async; 1741 1742 /* 1743 * DISCARD can flip during remount. On zoned filesystems, we 1744 * need to reset sequential-required zones. 1745 */ 1746 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1747 btrfs_is_zoned(fs_info); 1748 1749 /* Implicit trim during transaction commit. */ 1750 if (trimming) 1751 btrfs_freeze_block_group(block_group); 1752 1753 /* 1754 * Btrfs_remove_chunk will abort the transaction if things go 1755 * horribly wrong. 1756 */ 1757 ret = btrfs_remove_chunk(trans, block_group->start); 1758 1759 if (ret) { 1760 if (trimming) 1761 btrfs_unfreeze_block_group(block_group); 1762 goto end_trans; 1763 } 1764 1765 /* 1766 * If we're not mounted with -odiscard, we can just forget 1767 * about this block group. Otherwise we'll need to wait 1768 * until transaction commit to do the actual discard. 1769 */ 1770 if (trimming) { 1771 spin_lock(&fs_info->unused_bgs_lock); 1772 /* 1773 * A concurrent scrub might have added us to the list 1774 * fs_info->unused_bgs, so use a list_move operation 1775 * to add the block group to the deleted_bgs list. 1776 */ 1777 list_move(&block_group->bg_list, 1778 &trans->transaction->deleted_bgs); 1779 spin_unlock(&fs_info->unused_bgs_lock); 1780 btrfs_get_block_group(block_group); 1781 } 1782 end_trans: 1783 btrfs_end_transaction(trans); 1784 next: 1785 btrfs_put_block_group(block_group); 1786 spin_lock(&fs_info->unused_bgs_lock); 1787 } 1788 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1789 spin_unlock(&fs_info->unused_bgs_lock); 1790 mutex_unlock(&fs_info->reclaim_bgs_lock); 1791 return; 1792 1793 flip_async: 1794 btrfs_end_transaction(trans); 1795 spin_lock(&fs_info->unused_bgs_lock); 1796 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1797 spin_unlock(&fs_info->unused_bgs_lock); 1798 mutex_unlock(&fs_info->reclaim_bgs_lock); 1799 btrfs_put_block_group(block_group); 1800 btrfs_discard_punt_unused_bgs_list(fs_info); 1801 } 1802 1803 void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1804 { 1805 struct btrfs_fs_info *fs_info = bg->fs_info; 1806 1807 spin_lock(&fs_info->unused_bgs_lock); 1808 if (list_empty(&bg->bg_list)) { 1809 btrfs_get_block_group(bg); 1810 trace_btrfs_add_unused_block_group(bg); 1811 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1812 } else if (bg->flags & BTRFS_BLOCK_GROUP_REMAPPED && 1813 bg->identity_remap_count == 0) { 1814 /* Leave fully remapped block groups on the fully_remapped_bgs list. */ 1815 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { 1816 /* Pull out the block group from the reclaim_bgs list. */ 1817 trace_btrfs_add_unused_block_group(bg); 1818 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); 1819 } 1820 spin_unlock(&fs_info->unused_bgs_lock); 1821 } 1822 1823 /* 1824 * We want block groups with a low number of used bytes to be in the beginning 1825 * of the list, so they will get reclaimed first. 1826 */ 1827 static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 1828 const struct list_head *b) 1829 { 1830 const struct btrfs_block_group *bg1, *bg2; 1831 1832 bg1 = list_entry(a, struct btrfs_block_group, bg_list); 1833 bg2 = list_entry(b, struct btrfs_block_group, bg_list); 1834 1835 /* 1836 * Some other task may be updating the ->used field concurrently, but it 1837 * is not serious if we get a stale value or load/store tearing issues, 1838 * as sorting the list of block groups to reclaim is not critical and an 1839 * occasional imperfect order is ok. So silence KCSAN and avoid the 1840 * overhead of locking or any other synchronization. 1841 */ 1842 return data_race(bg1->used > bg2->used); 1843 } 1844 1845 static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info) 1846 { 1847 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1848 return false; 1849 1850 if (btrfs_fs_closing(fs_info)) 1851 return false; 1852 1853 if (btrfs_is_zoned(fs_info)) 1854 return btrfs_zoned_should_reclaim(fs_info); 1855 return true; 1856 } 1857 1858 static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed) 1859 { 1860 const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info); 1861 u64 thresh_bytes = mult_perc(bg->length, thresh_pct); 1862 const u64 new_val = bg->used; 1863 const u64 old_val = new_val + bytes_freed; 1864 1865 if (thresh_bytes == 0) 1866 return false; 1867 1868 /* 1869 * If we were below the threshold before don't reclaim, we are likely a 1870 * brand new block group and we don't want to relocate new block groups. 1871 */ 1872 if (old_val < thresh_bytes) 1873 return false; 1874 if (new_val >= thresh_bytes) 1875 return false; 1876 return true; 1877 } 1878 1879 void btrfs_reclaim_bgs_work(struct work_struct *work) 1880 { 1881 struct btrfs_fs_info *fs_info = 1882 container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 1883 struct btrfs_block_group *bg; 1884 struct btrfs_space_info *space_info; 1885 LIST_HEAD(retry_list); 1886 1887 if (!btrfs_should_reclaim(fs_info)) 1888 return; 1889 1890 guard(super_write)(fs_info->sb); 1891 1892 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 1893 return; 1894 1895 /* 1896 * Long running balances can keep us blocked here for eternity, so 1897 * simply skip reclaim if we're unable to get the mutex. 1898 */ 1899 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 1900 btrfs_exclop_finish(fs_info); 1901 return; 1902 } 1903 1904 spin_lock(&fs_info->unused_bgs_lock); 1905 /* 1906 * Sort happens under lock because we can't simply splice it and sort. 1907 * The block groups might still be in use and reachable via bg_list, 1908 * and their presence in the reclaim_bgs list must be preserved. 1909 */ 1910 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 1911 while (!list_empty(&fs_info->reclaim_bgs)) { 1912 u64 used; 1913 u64 reserved; 1914 u64 old_total; 1915 int ret = 0; 1916 1917 bg = list_first_entry(&fs_info->reclaim_bgs, 1918 struct btrfs_block_group, 1919 bg_list); 1920 list_del_init(&bg->bg_list); 1921 1922 space_info = bg->space_info; 1923 spin_unlock(&fs_info->unused_bgs_lock); 1924 1925 /* Don't race with allocators so take the groups_sem */ 1926 down_write(&space_info->groups_sem); 1927 1928 spin_lock(&space_info->lock); 1929 spin_lock(&bg->lock); 1930 if (bg->reserved || bg->pinned || bg->ro) { 1931 /* 1932 * We want to bail if we made new allocations or have 1933 * outstanding allocations in this block group. We do 1934 * the ro check in case balance is currently acting on 1935 * this block group. 1936 */ 1937 spin_unlock(&bg->lock); 1938 spin_unlock(&space_info->lock); 1939 up_write(&space_info->groups_sem); 1940 goto next; 1941 } 1942 if (bg->used == 0) { 1943 /* 1944 * It is possible that we trigger relocation on a block 1945 * group as its extents are deleted and it first goes 1946 * below the threshold, then shortly after goes empty. 1947 * 1948 * In this case, relocating it does delete it, but has 1949 * some overhead in relocation specific metadata, looking 1950 * for the non-existent extents and running some extra 1951 * transactions, which we can avoid by using one of the 1952 * other mechanisms for dealing with empty block groups. 1953 */ 1954 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1955 btrfs_mark_bg_unused(bg); 1956 spin_unlock(&bg->lock); 1957 spin_unlock(&space_info->lock); 1958 up_write(&space_info->groups_sem); 1959 goto next; 1960 1961 } 1962 /* 1963 * The block group might no longer meet the reclaim condition by 1964 * the time we get around to reclaiming it, so to avoid 1965 * reclaiming overly full block_groups, skip reclaiming them. 1966 * 1967 * Since the decision making process also depends on the amount 1968 * being freed, pass in a fake giant value to skip that extra 1969 * check, which is more meaningful when adding to the list in 1970 * the first place. 1971 */ 1972 if (!should_reclaim_block_group(bg, bg->length)) { 1973 spin_unlock(&bg->lock); 1974 spin_unlock(&space_info->lock); 1975 up_write(&space_info->groups_sem); 1976 goto next; 1977 } 1978 1979 spin_unlock(&bg->lock); 1980 old_total = space_info->total_bytes; 1981 spin_unlock(&space_info->lock); 1982 1983 /* 1984 * Get out fast, in case we're read-only or unmounting the 1985 * filesystem. It is OK to drop block groups from the list even 1986 * for the read-only case. As we did take the super write lock, 1987 * "mount -o remount,ro" won't happen and read-only filesystem 1988 * means it is forced read-only due to a fatal error. So, it 1989 * never gets back to read-write to let us reclaim again. 1990 */ 1991 if (btrfs_need_cleaner_sleep(fs_info)) { 1992 up_write(&space_info->groups_sem); 1993 goto next; 1994 } 1995 1996 ret = inc_block_group_ro(bg, 0); 1997 up_write(&space_info->groups_sem); 1998 if (ret < 0) 1999 goto next; 2000 2001 /* 2002 * The amount of bytes reclaimed corresponds to the sum of the 2003 * "used" and "reserved" counters. We have set the block group 2004 * to RO above, which prevents reservations from happening but 2005 * we may have existing reservations for which allocation has 2006 * not yet been done - btrfs_update_block_group() was not yet 2007 * called, which is where we will transfer a reserved extent's 2008 * size from the "reserved" counter to the "used" counter - this 2009 * happens when running delayed references. When we relocate the 2010 * chunk below, relocation first flushes delalloc, waits for 2011 * ordered extent completion (which is where we create delayed 2012 * references for data extents) and commits the current 2013 * transaction (which runs delayed references), and only after 2014 * it does the actual work to move extents out of the block 2015 * group. So the reported amount of reclaimed bytes is 2016 * effectively the sum of the 'used' and 'reserved' counters. 2017 */ 2018 spin_lock(&bg->lock); 2019 used = bg->used; 2020 reserved = bg->reserved; 2021 spin_unlock(&bg->lock); 2022 2023 trace_btrfs_reclaim_block_group(bg); 2024 ret = btrfs_relocate_chunk(fs_info, bg->start, false); 2025 if (ret) { 2026 btrfs_dec_block_group_ro(bg); 2027 btrfs_err(fs_info, "error relocating chunk %llu", 2028 bg->start); 2029 used = 0; 2030 reserved = 0; 2031 spin_lock(&space_info->lock); 2032 space_info->reclaim_errors++; 2033 spin_unlock(&space_info->lock); 2034 } 2035 spin_lock(&space_info->lock); 2036 space_info->reclaim_count++; 2037 space_info->reclaim_bytes += used; 2038 space_info->reclaim_bytes += reserved; 2039 if (space_info->total_bytes < old_total) 2040 btrfs_set_periodic_reclaim_ready(space_info, true); 2041 spin_unlock(&space_info->lock); 2042 2043 next: 2044 if (ret && !READ_ONCE(space_info->periodic_reclaim)) 2045 btrfs_link_bg_list(bg, &retry_list); 2046 btrfs_put_block_group(bg); 2047 2048 mutex_unlock(&fs_info->reclaim_bgs_lock); 2049 /* 2050 * Reclaiming all the block groups in the list can take really 2051 * long. Prioritize cleaning up unused block groups. 2052 */ 2053 btrfs_delete_unused_bgs(fs_info); 2054 /* 2055 * If we are interrupted by a balance, we can just bail out. The 2056 * cleaner thread restart again if necessary. 2057 */ 2058 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 2059 goto end; 2060 spin_lock(&fs_info->unused_bgs_lock); 2061 } 2062 spin_unlock(&fs_info->unused_bgs_lock); 2063 mutex_unlock(&fs_info->reclaim_bgs_lock); 2064 end: 2065 spin_lock(&fs_info->unused_bgs_lock); 2066 list_splice_tail(&retry_list, &fs_info->reclaim_bgs); 2067 spin_unlock(&fs_info->unused_bgs_lock); 2068 btrfs_exclop_finish(fs_info); 2069 } 2070 2071 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 2072 { 2073 btrfs_reclaim_sweep(fs_info); 2074 spin_lock(&fs_info->unused_bgs_lock); 2075 if (!list_empty(&fs_info->reclaim_bgs)) 2076 queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work); 2077 spin_unlock(&fs_info->unused_bgs_lock); 2078 } 2079 2080 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 2081 { 2082 struct btrfs_fs_info *fs_info = bg->fs_info; 2083 2084 if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs)) 2085 trace_btrfs_add_reclaim_block_group(bg); 2086 } 2087 2088 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key, 2089 const struct btrfs_path *path) 2090 { 2091 struct btrfs_chunk_map *map; 2092 struct btrfs_block_group_item bg; 2093 struct extent_buffer *leaf; 2094 int slot; 2095 u64 flags; 2096 int ret = 0; 2097 2098 slot = path->slots[0]; 2099 leaf = path->nodes[0]; 2100 2101 map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset); 2102 if (!map) { 2103 btrfs_err(fs_info, 2104 "logical %llu len %llu found bg but no related chunk", 2105 key->objectid, key->offset); 2106 return -ENOENT; 2107 } 2108 2109 if (unlikely(map->start != key->objectid || map->chunk_len != key->offset)) { 2110 btrfs_err(fs_info, 2111 "block group %llu len %llu mismatch with chunk %llu len %llu", 2112 key->objectid, key->offset, map->start, map->chunk_len); 2113 ret = -EUCLEAN; 2114 goto out_free_map; 2115 } 2116 2117 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 2118 sizeof(bg)); 2119 flags = btrfs_stack_block_group_flags(&bg) & 2120 BTRFS_BLOCK_GROUP_TYPE_MASK; 2121 2122 if (unlikely(flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) { 2123 btrfs_err(fs_info, 2124 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 2125 key->objectid, key->offset, flags, 2126 (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type)); 2127 ret = -EUCLEAN; 2128 } 2129 2130 out_free_map: 2131 btrfs_free_chunk_map(map); 2132 return ret; 2133 } 2134 2135 static int find_first_block_group(struct btrfs_fs_info *fs_info, 2136 struct btrfs_path *path, 2137 const struct btrfs_key *key) 2138 { 2139 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2140 int ret; 2141 struct btrfs_key found_key; 2142 2143 btrfs_for_each_slot(root, key, &found_key, path, ret) { 2144 if (found_key.objectid >= key->objectid && 2145 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 2146 return read_bg_from_eb(fs_info, &found_key, path); 2147 } 2148 } 2149 return ret; 2150 } 2151 2152 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 2153 { 2154 u64 extra_flags = chunk_to_extended(flags) & 2155 BTRFS_EXTENDED_PROFILE_MASK; 2156 2157 write_seqlock(&fs_info->profiles_lock); 2158 if (flags & BTRFS_BLOCK_GROUP_DATA) 2159 fs_info->avail_data_alloc_bits |= extra_flags; 2160 if (flags & BTRFS_BLOCK_GROUP_METADATA) 2161 fs_info->avail_metadata_alloc_bits |= extra_flags; 2162 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 2163 fs_info->avail_system_alloc_bits |= extra_flags; 2164 write_sequnlock(&fs_info->profiles_lock); 2165 } 2166 2167 /* 2168 * Map a physical disk address to a list of logical addresses. 2169 * 2170 * @fs_info: the filesystem 2171 * @chunk_start: logical address of block group 2172 * @physical: physical address to map to logical addresses 2173 * @logical: return array of logical addresses which map to @physical 2174 * @naddrs: length of @logical 2175 * @stripe_len: size of IO stripe for the given block group 2176 * 2177 * Maps a particular @physical disk address to a list of @logical addresses. 2178 * Used primarily to exclude those portions of a block group that contain super 2179 * block copies. 2180 */ 2181 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 2182 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 2183 { 2184 struct btrfs_chunk_map *map; 2185 u64 *buf; 2186 u64 bytenr; 2187 u64 data_stripe_length; 2188 u64 io_stripe_size; 2189 int i, nr = 0; 2190 int ret = 0; 2191 2192 map = btrfs_get_chunk_map(fs_info, chunk_start, 1); 2193 if (IS_ERR(map)) 2194 return -EIO; 2195 2196 data_stripe_length = map->stripe_size; 2197 io_stripe_size = BTRFS_STRIPE_LEN; 2198 chunk_start = map->start; 2199 2200 /* For RAID5/6 adjust to a full IO stripe length */ 2201 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2202 io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 2203 2204 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 2205 if (!buf) { 2206 ret = -ENOMEM; 2207 goto out; 2208 } 2209 2210 for (i = 0; i < map->num_stripes; i++) { 2211 bool already_inserted = false; 2212 u32 stripe_nr; 2213 u32 offset; 2214 int j; 2215 2216 if (!in_range(physical, map->stripes[i].physical, 2217 data_stripe_length)) 2218 continue; 2219 2220 stripe_nr = (physical - map->stripes[i].physical) >> 2221 BTRFS_STRIPE_LEN_SHIFT; 2222 offset = (physical - map->stripes[i].physical) & 2223 BTRFS_STRIPE_LEN_MASK; 2224 2225 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2226 BTRFS_BLOCK_GROUP_RAID10)) 2227 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, 2228 map->sub_stripes); 2229 /* 2230 * The remaining case would be for RAID56, multiply by 2231 * nr_data_stripes(). Alternatively, just use rmap_len below 2232 * instead of map->stripe_len 2233 */ 2234 bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 2235 2236 /* Ensure we don't add duplicate addresses */ 2237 for (j = 0; j < nr; j++) { 2238 if (buf[j] == bytenr) { 2239 already_inserted = true; 2240 break; 2241 } 2242 } 2243 2244 if (!already_inserted) 2245 buf[nr++] = bytenr; 2246 } 2247 2248 *logical = buf; 2249 *naddrs = nr; 2250 *stripe_len = io_stripe_size; 2251 out: 2252 btrfs_free_chunk_map(map); 2253 return ret; 2254 } 2255 2256 static int exclude_super_stripes(struct btrfs_block_group *cache) 2257 { 2258 struct btrfs_fs_info *fs_info = cache->fs_info; 2259 const bool zoned = btrfs_is_zoned(fs_info); 2260 u64 bytenr; 2261 u64 *logical; 2262 int stripe_len; 2263 int i, nr, ret; 2264 2265 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 2266 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 2267 cache->bytes_super += stripe_len; 2268 ret = btrfs_set_extent_bit(&fs_info->excluded_extents, cache->start, 2269 cache->start + stripe_len - 1, 2270 EXTENT_DIRTY, NULL); 2271 if (ret) 2272 return ret; 2273 } 2274 2275 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2276 bytenr = btrfs_sb_offset(i); 2277 ret = btrfs_rmap_block(fs_info, cache->start, 2278 bytenr, &logical, &nr, &stripe_len); 2279 if (ret) 2280 return ret; 2281 2282 /* Shouldn't have super stripes in sequential zones */ 2283 if (unlikely(zoned && nr)) { 2284 kfree(logical); 2285 btrfs_err(fs_info, 2286 "zoned: block group %llu must not contain super block", 2287 cache->start); 2288 return -EUCLEAN; 2289 } 2290 2291 while (nr--) { 2292 u64 len = min_t(u64, stripe_len, 2293 btrfs_block_group_end(cache) - logical[nr]); 2294 2295 cache->bytes_super += len; 2296 ret = btrfs_set_extent_bit(&fs_info->excluded_extents, 2297 logical[nr], logical[nr] + len - 1, 2298 EXTENT_DIRTY, NULL); 2299 if (ret) { 2300 kfree(logical); 2301 return ret; 2302 } 2303 } 2304 2305 kfree(logical); 2306 } 2307 return 0; 2308 } 2309 2310 static struct btrfs_block_group *btrfs_create_block_group( 2311 struct btrfs_fs_info *fs_info, u64 start) 2312 { 2313 struct btrfs_block_group *cache; 2314 2315 cache = kzalloc_obj(*cache, GFP_NOFS); 2316 if (!cache) 2317 return NULL; 2318 2319 cache->free_space_ctl = kzalloc_obj(*cache->free_space_ctl, GFP_NOFS); 2320 if (!cache->free_space_ctl) { 2321 kfree(cache); 2322 return NULL; 2323 } 2324 2325 cache->start = start; 2326 2327 cache->fs_info = fs_info; 2328 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 2329 2330 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 2331 2332 refcount_set(&cache->refs, 1); 2333 spin_lock_init(&cache->lock); 2334 init_rwsem(&cache->data_rwsem); 2335 INIT_LIST_HEAD(&cache->list); 2336 INIT_LIST_HEAD(&cache->cluster_list); 2337 INIT_LIST_HEAD(&cache->bg_list); 2338 INIT_LIST_HEAD(&cache->ro_list); 2339 INIT_LIST_HEAD(&cache->discard_list); 2340 INIT_LIST_HEAD(&cache->dirty_list); 2341 INIT_LIST_HEAD(&cache->io_list); 2342 INIT_LIST_HEAD(&cache->active_bg_list); 2343 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 2344 atomic_set(&cache->frozen, 0); 2345 mutex_init(&cache->free_space_lock); 2346 2347 return cache; 2348 } 2349 2350 /* 2351 * Iterate all chunks and verify that each of them has the corresponding block 2352 * group 2353 */ 2354 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 2355 { 2356 u64 start = 0; 2357 int ret = 0; 2358 2359 while (1) { 2360 struct btrfs_chunk_map *map; 2361 struct btrfs_block_group *bg; 2362 2363 /* 2364 * btrfs_find_chunk_map() will return the first chunk map 2365 * intersecting the range, so setting @length to 1 is enough to 2366 * get the first chunk. 2367 */ 2368 map = btrfs_find_chunk_map(fs_info, start, 1); 2369 if (!map) 2370 break; 2371 2372 bg = btrfs_lookup_block_group(fs_info, map->start); 2373 if (unlikely(!bg)) { 2374 btrfs_err(fs_info, 2375 "chunk start=%llu len=%llu doesn't have corresponding block group", 2376 map->start, map->chunk_len); 2377 ret = -EUCLEAN; 2378 btrfs_free_chunk_map(map); 2379 break; 2380 } 2381 if (unlikely(bg->start != map->start || bg->length != map->chunk_len || 2382 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 2383 (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) { 2384 btrfs_err(fs_info, 2385 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 2386 map->start, map->chunk_len, 2387 map->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 2388 bg->start, bg->length, 2389 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 2390 ret = -EUCLEAN; 2391 btrfs_free_chunk_map(map); 2392 btrfs_put_block_group(bg); 2393 break; 2394 } 2395 start = map->start + map->chunk_len; 2396 btrfs_free_chunk_map(map); 2397 btrfs_put_block_group(bg); 2398 } 2399 return ret; 2400 } 2401 2402 static int read_one_block_group(struct btrfs_fs_info *info, 2403 struct btrfs_block_group_item_v2 *bgi, 2404 const struct btrfs_key *key, 2405 int need_clear) 2406 { 2407 struct btrfs_block_group *cache; 2408 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2409 int ret; 2410 2411 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2412 2413 cache = btrfs_create_block_group(info, key->objectid); 2414 if (!cache) 2415 return -ENOMEM; 2416 2417 cache->length = key->offset; 2418 cache->used = btrfs_stack_block_group_v2_used(bgi); 2419 cache->last_used = cache->used; 2420 cache->flags = btrfs_stack_block_group_v2_flags(bgi); 2421 cache->last_flags = cache->flags; 2422 cache->global_root_id = btrfs_stack_block_group_v2_chunk_objectid(bgi); 2423 cache->space_info = btrfs_find_space_info(info, cache->flags); 2424 cache->remap_bytes = btrfs_stack_block_group_v2_remap_bytes(bgi); 2425 cache->last_remap_bytes = cache->remap_bytes; 2426 cache->identity_remap_count = btrfs_stack_block_group_v2_identity_remap_count(bgi); 2427 cache->last_identity_remap_count = cache->identity_remap_count; 2428 2429 btrfs_set_free_space_tree_thresholds(cache); 2430 2431 if (need_clear) { 2432 /* 2433 * When we mount with old space cache, we need to 2434 * set BTRFS_DC_CLEAR and set dirty flag. 2435 * 2436 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2437 * truncate the old free space cache inode and 2438 * setup a new one. 2439 * b) Setting 'dirty flag' makes sure that we flush 2440 * the new space cache info onto disk. 2441 */ 2442 if (btrfs_test_opt(info, SPACE_CACHE)) 2443 cache->disk_cache_state = BTRFS_DC_CLEAR; 2444 } 2445 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2446 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2447 btrfs_err(info, 2448 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2449 cache->start); 2450 ret = -EINVAL; 2451 goto error; 2452 } 2453 2454 ret = btrfs_load_block_group_zone_info(cache, false); 2455 if (ret) { 2456 btrfs_err(info, "zoned: failed to load zone info of bg %llu", 2457 cache->start); 2458 goto error; 2459 } 2460 2461 /* 2462 * We need to exclude the super stripes now so that the space info has 2463 * super bytes accounted for, otherwise we'll think we have more space 2464 * than we actually do. 2465 */ 2466 ret = exclude_super_stripes(cache); 2467 if (ret) { 2468 /* We may have excluded something, so call this just in case. */ 2469 btrfs_free_excluded_extents(cache); 2470 goto error; 2471 } 2472 2473 /* 2474 * For zoned filesystem, space after the allocation offset is the only 2475 * free space for a block group. So, we don't need any caching work. 2476 * btrfs_calc_zone_unusable() will set the amount of free space and 2477 * zone_unusable space. 2478 * 2479 * For regular filesystem, check for two cases, either we are full, and 2480 * therefore don't need to bother with the caching work since we won't 2481 * find any space, or we are empty, and we can just add all the space 2482 * in and be done with it. This saves us _a_lot_ of time, particularly 2483 * in the full case. 2484 */ 2485 if (btrfs_is_zoned(info)) { 2486 btrfs_calc_zone_unusable(cache); 2487 /* Should not have any excluded extents. Just in case, though. */ 2488 btrfs_free_excluded_extents(cache); 2489 } else if (cache->length == cache->used) { 2490 cache->cached = BTRFS_CACHE_FINISHED; 2491 btrfs_free_excluded_extents(cache); 2492 } else if (cache->used == 0 && cache->remap_bytes == 0) { 2493 cache->cached = BTRFS_CACHE_FINISHED; 2494 ret = btrfs_add_new_free_space(cache, cache->start, 2495 btrfs_block_group_end(cache), NULL); 2496 btrfs_free_excluded_extents(cache); 2497 if (ret) 2498 goto error; 2499 } 2500 2501 ret = btrfs_add_block_group_cache(cache); 2502 if (ret) { 2503 btrfs_remove_free_space_cache(cache); 2504 goto error; 2505 } 2506 2507 trace_btrfs_add_block_group(info, cache, 0); 2508 btrfs_add_bg_to_space_info(info, cache); 2509 2510 set_avail_alloc_bits(info, cache->flags); 2511 if (btrfs_chunk_writeable(info, cache->start)) { 2512 if (cache->used == 0 && cache->remap_bytes == 0) { 2513 ASSERT(list_empty(&cache->bg_list)); 2514 if (btrfs_test_opt(info, DISCARD_ASYNC)) 2515 btrfs_discard_queue_work(&info->discard_ctl, cache); 2516 else 2517 btrfs_mark_bg_unused(cache); 2518 } 2519 } else { 2520 inc_block_group_ro(cache, 1); 2521 } 2522 2523 return 0; 2524 error: 2525 btrfs_put_block_group(cache); 2526 return ret; 2527 } 2528 2529 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 2530 { 2531 struct rb_node *node; 2532 int ret = 0; 2533 2534 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 2535 struct btrfs_chunk_map *map; 2536 struct btrfs_block_group *bg; 2537 2538 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 2539 bg = btrfs_create_block_group(fs_info, map->start); 2540 if (!bg) { 2541 ret = -ENOMEM; 2542 break; 2543 } 2544 2545 /* Fill dummy cache as FULL */ 2546 bg->length = map->chunk_len; 2547 bg->flags = map->type; 2548 bg->cached = BTRFS_CACHE_FINISHED; 2549 bg->used = map->chunk_len; 2550 bg->flags = map->type; 2551 bg->space_info = btrfs_find_space_info(fs_info, bg->flags); 2552 ret = btrfs_add_block_group_cache(bg); 2553 /* 2554 * We may have some valid block group cache added already, in 2555 * that case we skip to the next one. 2556 */ 2557 if (ret == -EEXIST) { 2558 ret = 0; 2559 btrfs_put_block_group(bg); 2560 continue; 2561 } 2562 2563 if (ret) { 2564 btrfs_remove_free_space_cache(bg); 2565 btrfs_put_block_group(bg); 2566 break; 2567 } 2568 2569 btrfs_add_bg_to_space_info(fs_info, bg); 2570 2571 set_avail_alloc_bits(fs_info, bg->flags); 2572 } 2573 if (!ret) 2574 btrfs_init_global_block_rsv(fs_info); 2575 return ret; 2576 } 2577 2578 int btrfs_read_block_groups(struct btrfs_fs_info *info) 2579 { 2580 struct btrfs_root *root = btrfs_block_group_root(info); 2581 struct btrfs_path *path; 2582 int ret; 2583 struct btrfs_block_group *cache; 2584 struct btrfs_space_info *space_info; 2585 struct btrfs_key key; 2586 int need_clear = 0; 2587 u64 cache_gen; 2588 2589 /* 2590 * Either no extent root (with ibadroots rescue option) or we have 2591 * unsupported RO options. The fs can never be mounted read-write, so no 2592 * need to waste time searching block group items. 2593 * 2594 * This also allows new extent tree related changes to be RO compat, 2595 * no need for a full incompat flag. 2596 */ 2597 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & 2598 ~BTRFS_FEATURE_COMPAT_RO_SUPP)) 2599 return fill_dummy_bgs(info); 2600 2601 key.objectid = 0; 2602 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2603 key.offset = 0; 2604 path = btrfs_alloc_path(); 2605 if (!path) 2606 return -ENOMEM; 2607 2608 cache_gen = btrfs_super_cache_generation(info->super_copy); 2609 if (btrfs_test_opt(info, SPACE_CACHE) && 2610 btrfs_super_generation(info->super_copy) != cache_gen) 2611 need_clear = 1; 2612 if (btrfs_test_opt(info, CLEAR_CACHE)) 2613 need_clear = 1; 2614 2615 while (1) { 2616 struct btrfs_block_group_item_v2 bgi; 2617 struct extent_buffer *leaf; 2618 int slot; 2619 size_t size; 2620 2621 ret = find_first_block_group(info, path, &key); 2622 if (ret > 0) 2623 break; 2624 if (ret != 0) 2625 goto error; 2626 2627 leaf = path->nodes[0]; 2628 slot = path->slots[0]; 2629 2630 if (btrfs_fs_incompat(info, REMAP_TREE)) { 2631 size = sizeof(struct btrfs_block_group_item_v2); 2632 } else { 2633 size = sizeof(struct btrfs_block_group_item); 2634 btrfs_set_stack_block_group_v2_remap_bytes(&bgi, 0); 2635 btrfs_set_stack_block_group_v2_identity_remap_count(&bgi, 0); 2636 } 2637 2638 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 2639 size); 2640 2641 btrfs_item_key_to_cpu(leaf, &key, slot); 2642 btrfs_release_path(path); 2643 ret = read_one_block_group(info, &bgi, &key, need_clear); 2644 if (ret < 0) 2645 goto error; 2646 key.objectid += key.offset; 2647 key.offset = 0; 2648 } 2649 btrfs_release_path(path); 2650 2651 list_for_each_entry(space_info, &info->space_info, list) { 2652 int i; 2653 2654 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2655 if (list_empty(&space_info->block_groups[i])) 2656 continue; 2657 cache = list_first_entry(&space_info->block_groups[i], 2658 struct btrfs_block_group, 2659 list); 2660 btrfs_sysfs_add_block_group_type(cache); 2661 } 2662 2663 if (!(btrfs_get_alloc_profile(info, space_info->flags) & 2664 (BTRFS_BLOCK_GROUP_RAID10 | 2665 BTRFS_BLOCK_GROUP_RAID1_MASK | 2666 BTRFS_BLOCK_GROUP_RAID56_MASK | 2667 BTRFS_BLOCK_GROUP_DUP))) 2668 continue; 2669 /* 2670 * Avoid allocating from un-mirrored block group if there are 2671 * mirrored block groups. 2672 */ 2673 list_for_each_entry(cache, 2674 &space_info->block_groups[BTRFS_RAID_RAID0], 2675 list) 2676 inc_block_group_ro(cache, 1); 2677 list_for_each_entry(cache, 2678 &space_info->block_groups[BTRFS_RAID_SINGLE], 2679 list) 2680 inc_block_group_ro(cache, 1); 2681 } 2682 2683 btrfs_init_global_block_rsv(info); 2684 ret = check_chunk_block_group_mappings(info); 2685 error: 2686 btrfs_free_path(path); 2687 /* 2688 * We've hit some error while reading the extent tree, and have 2689 * rescue=ibadroots mount option. 2690 * Try to fill the tree using dummy block groups so that the user can 2691 * continue to mount and grab their data. 2692 */ 2693 if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 2694 ret = fill_dummy_bgs(info); 2695 return ret; 2696 } 2697 2698 /* 2699 * This function, insert_block_group_item(), belongs to the phase 2 of chunk 2700 * allocation. 2701 * 2702 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2703 * phases. 2704 */ 2705 static int insert_block_group_item(struct btrfs_trans_handle *trans, 2706 struct btrfs_block_group *block_group) 2707 { 2708 struct btrfs_fs_info *fs_info = trans->fs_info; 2709 struct btrfs_block_group_item_v2 bgi; 2710 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2711 struct btrfs_key key; 2712 u64 old_last_used; 2713 size_t size; 2714 int ret; 2715 2716 spin_lock(&block_group->lock); 2717 btrfs_set_stack_block_group_v2_used(&bgi, block_group->used); 2718 btrfs_set_stack_block_group_v2_chunk_objectid(&bgi, block_group->global_root_id); 2719 btrfs_set_stack_block_group_v2_flags(&bgi, block_group->flags); 2720 btrfs_set_stack_block_group_v2_remap_bytes(&bgi, block_group->remap_bytes); 2721 btrfs_set_stack_block_group_v2_identity_remap_count(&bgi, block_group->identity_remap_count); 2722 old_last_used = block_group->last_used; 2723 block_group->last_used = block_group->used; 2724 block_group->last_remap_bytes = block_group->remap_bytes; 2725 block_group->last_identity_remap_count = block_group->identity_remap_count; 2726 block_group->last_flags = block_group->flags; 2727 key.objectid = block_group->start; 2728 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2729 key.offset = block_group->length; 2730 spin_unlock(&block_group->lock); 2731 2732 if (btrfs_fs_incompat(fs_info, REMAP_TREE)) 2733 size = sizeof(struct btrfs_block_group_item_v2); 2734 else 2735 size = sizeof(struct btrfs_block_group_item); 2736 2737 ret = btrfs_insert_item(trans, root, &key, &bgi, size); 2738 if (ret < 0) { 2739 spin_lock(&block_group->lock); 2740 block_group->last_used = old_last_used; 2741 spin_unlock(&block_group->lock); 2742 } 2743 2744 return ret; 2745 } 2746 2747 static int insert_dev_extent(struct btrfs_trans_handle *trans, 2748 const struct btrfs_device *device, u64 chunk_offset, 2749 u64 start, u64 num_bytes) 2750 { 2751 struct btrfs_fs_info *fs_info = device->fs_info; 2752 struct btrfs_root *root = fs_info->dev_root; 2753 BTRFS_PATH_AUTO_FREE(path); 2754 struct btrfs_dev_extent *extent; 2755 struct extent_buffer *leaf; 2756 struct btrfs_key key; 2757 int ret; 2758 2759 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 2760 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 2761 path = btrfs_alloc_path(); 2762 if (!path) 2763 return -ENOMEM; 2764 2765 key.objectid = device->devid; 2766 key.type = BTRFS_DEV_EXTENT_KEY; 2767 key.offset = start; 2768 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 2769 if (ret) 2770 return ret; 2771 2772 leaf = path->nodes[0]; 2773 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 2774 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 2775 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 2776 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2777 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 2778 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 2779 2780 return ret; 2781 } 2782 2783 /* 2784 * This function belongs to phase 2. 2785 * 2786 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2787 * phases. 2788 */ 2789 static int insert_dev_extents(struct btrfs_trans_handle *trans, 2790 u64 chunk_offset, u64 chunk_size) 2791 { 2792 struct btrfs_fs_info *fs_info = trans->fs_info; 2793 struct btrfs_device *device; 2794 struct btrfs_chunk_map *map; 2795 u64 dev_offset; 2796 int i; 2797 int ret = 0; 2798 2799 map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 2800 if (IS_ERR(map)) 2801 return PTR_ERR(map); 2802 2803 /* 2804 * Take the device list mutex to prevent races with the final phase of 2805 * a device replace operation that replaces the device object associated 2806 * with the map's stripes, because the device object's id can change 2807 * at any time during that final phase of the device replace operation 2808 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 2809 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 2810 * resulting in persisting a device extent item with such ID. 2811 */ 2812 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2813 for (i = 0; i < map->num_stripes; i++) { 2814 device = map->stripes[i].dev; 2815 dev_offset = map->stripes[i].physical; 2816 2817 ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 2818 map->stripe_size); 2819 if (ret) 2820 break; 2821 } 2822 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2823 2824 btrfs_free_chunk_map(map); 2825 return ret; 2826 } 2827 2828 /* 2829 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 2830 * chunk allocation. 2831 * 2832 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2833 * phases. 2834 */ 2835 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 2836 { 2837 struct btrfs_fs_info *fs_info = trans->fs_info; 2838 struct btrfs_block_group *block_group; 2839 int ret = 0; 2840 2841 while (!list_empty(&trans->new_bgs)) { 2842 int index; 2843 2844 block_group = list_first_entry(&trans->new_bgs, 2845 struct btrfs_block_group, 2846 bg_list); 2847 if (ret) 2848 goto next; 2849 2850 index = btrfs_bg_flags_to_raid_index(block_group->flags); 2851 2852 ret = insert_block_group_item(trans, block_group); 2853 if (ret) 2854 btrfs_abort_transaction(trans, ret); 2855 if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, 2856 &block_group->runtime_flags)) { 2857 mutex_lock(&fs_info->chunk_mutex); 2858 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 2859 mutex_unlock(&fs_info->chunk_mutex); 2860 if (ret) 2861 btrfs_abort_transaction(trans, ret); 2862 } 2863 ret = insert_dev_extents(trans, block_group->start, 2864 block_group->length); 2865 if (ret) 2866 btrfs_abort_transaction(trans, ret); 2867 btrfs_add_block_group_free_space(trans, block_group); 2868 2869 /* 2870 * If we restriped during balance, we may have added a new raid 2871 * type, so now add the sysfs entries when it is safe to do so. 2872 * We don't have to worry about locking here as it's handled in 2873 * btrfs_sysfs_add_block_group_type. 2874 */ 2875 if (block_group->space_info->block_group_kobjs[index] == NULL) 2876 btrfs_sysfs_add_block_group_type(block_group); 2877 2878 /* Already aborted the transaction if it failed. */ 2879 next: 2880 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); 2881 2882 spin_lock(&fs_info->unused_bgs_lock); 2883 list_del_init(&block_group->bg_list); 2884 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); 2885 btrfs_put_block_group(block_group); 2886 spin_unlock(&fs_info->unused_bgs_lock); 2887 2888 /* 2889 * If the block group is still unused, add it to the list of 2890 * unused block groups. The block group may have been created in 2891 * order to satisfy a space reservation, in which case the 2892 * extent allocation only happens later. But often we don't 2893 * actually need to allocate space that we previously reserved, 2894 * so the block group may become unused for a long time. For 2895 * example for metadata we generally reserve space for a worst 2896 * possible scenario, but then don't end up allocating all that 2897 * space or none at all (due to no need to COW, extent buffers 2898 * were already COWed in the current transaction and still 2899 * unwritten, tree heights lower than the maximum possible 2900 * height, etc). For data we generally reserve the exact amount 2901 * of space we are going to allocate later, the exception is 2902 * when using compression, as we must reserve space based on the 2903 * uncompressed data size, because the compression is only done 2904 * when writeback triggered and we don't know how much space we 2905 * are actually going to need, so we reserve the uncompressed 2906 * size because the data may be incompressible in the worst case. 2907 */ 2908 if (ret == 0) { 2909 bool used; 2910 2911 spin_lock(&block_group->lock); 2912 used = btrfs_is_block_group_used(block_group); 2913 spin_unlock(&block_group->lock); 2914 2915 if (!used) 2916 btrfs_mark_bg_unused(block_group); 2917 } 2918 } 2919 btrfs_trans_release_chunk_metadata(trans); 2920 } 2921 2922 /* 2923 * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2924 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2925 */ 2926 static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset) 2927 { 2928 u64 div = SZ_1G; 2929 u64 index; 2930 2931 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2932 return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2933 2934 /* If we have a smaller fs index based on 128MiB. */ 2935 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2936 div = SZ_128M; 2937 2938 offset = div64_u64(offset, div); 2939 div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2940 return index; 2941 } 2942 2943 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 2944 struct btrfs_space_info *space_info, 2945 u64 type, u64 chunk_offset, u64 size) 2946 { 2947 struct btrfs_fs_info *fs_info = trans->fs_info; 2948 struct btrfs_block_group *cache; 2949 int ret; 2950 2951 btrfs_set_log_full_commit(trans); 2952 2953 cache = btrfs_create_block_group(fs_info, chunk_offset); 2954 if (!cache) 2955 return ERR_PTR(-ENOMEM); 2956 2957 /* 2958 * Mark it as new before adding it to the rbtree of block groups or any 2959 * list, so that no other task finds it and calls btrfs_mark_bg_unused() 2960 * before the new flag is set. 2961 */ 2962 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); 2963 2964 cache->length = size; 2965 btrfs_set_free_space_tree_thresholds(cache); 2966 cache->flags = type; 2967 cache->cached = BTRFS_CACHE_FINISHED; 2968 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2969 2970 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 2971 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); 2972 2973 ret = btrfs_load_block_group_zone_info(cache, true); 2974 if (ret) { 2975 btrfs_put_block_group(cache); 2976 return ERR_PTR(ret); 2977 } 2978 2979 ret = exclude_super_stripes(cache); 2980 if (ret) { 2981 /* We may have excluded something, so call this just in case */ 2982 btrfs_free_excluded_extents(cache); 2983 btrfs_put_block_group(cache); 2984 return ERR_PTR(ret); 2985 } 2986 2987 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); 2988 btrfs_free_excluded_extents(cache); 2989 if (ret) { 2990 btrfs_put_block_group(cache); 2991 return ERR_PTR(ret); 2992 } 2993 2994 /* 2995 * Ensure the corresponding space_info object is created and 2996 * assigned to our block group. We want our bg to be added to the rbtree 2997 * with its ->space_info set. 2998 */ 2999 cache->space_info = space_info; 3000 ASSERT(cache->space_info); 3001 3002 ret = btrfs_add_block_group_cache(cache); 3003 if (ret) { 3004 btrfs_remove_free_space_cache(cache); 3005 btrfs_put_block_group(cache); 3006 return ERR_PTR(ret); 3007 } 3008 3009 /* 3010 * Now that our block group has its ->space_info set and is inserted in 3011 * the rbtree, update the space info's counters. 3012 */ 3013 trace_btrfs_add_block_group(fs_info, cache, 1); 3014 btrfs_add_bg_to_space_info(fs_info, cache); 3015 btrfs_update_global_block_rsv(fs_info); 3016 3017 #ifdef CONFIG_BTRFS_DEBUG 3018 if (btrfs_should_fragment_free_space(cache)) { 3019 cache->space_info->bytes_used += size >> 1; 3020 fragment_free_space(cache); 3021 } 3022 #endif 3023 3024 btrfs_link_bg_list(cache, &trans->new_bgs); 3025 btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info); 3026 3027 set_avail_alloc_bits(fs_info, type); 3028 return cache; 3029 } 3030 3031 /* 3032 * Mark one block group RO, can be called several times for the same block 3033 * group. 3034 * 3035 * @cache: the destination block group 3036 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 3037 * ensure we still have some free space after marking this 3038 * block group RO. 3039 */ 3040 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 3041 bool do_chunk_alloc) 3042 { 3043 struct btrfs_fs_info *fs_info = cache->fs_info; 3044 struct btrfs_space_info *space_info = cache->space_info; 3045 struct btrfs_trans_handle *trans; 3046 struct btrfs_root *root = btrfs_block_group_root(fs_info); 3047 u64 alloc_flags; 3048 int ret; 3049 bool dirty_bg_running; 3050 3051 /* 3052 * This can only happen when we are doing read-only scrub on read-only 3053 * mount. 3054 * In that case we should not start a new transaction on read-only fs. 3055 * Thus here we skip all chunk allocations. 3056 */ 3057 if (sb_rdonly(fs_info->sb)) { 3058 mutex_lock(&fs_info->ro_block_group_mutex); 3059 ret = inc_block_group_ro(cache, 0); 3060 mutex_unlock(&fs_info->ro_block_group_mutex); 3061 return ret; 3062 } 3063 3064 do { 3065 trans = btrfs_join_transaction(root); 3066 if (IS_ERR(trans)) 3067 return PTR_ERR(trans); 3068 3069 dirty_bg_running = false; 3070 3071 /* 3072 * We're not allowed to set block groups readonly after the dirty 3073 * block group cache has started writing. If it already started, 3074 * back off and let this transaction commit. 3075 */ 3076 mutex_lock(&fs_info->ro_block_group_mutex); 3077 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 3078 u64 transid = trans->transid; 3079 3080 mutex_unlock(&fs_info->ro_block_group_mutex); 3081 btrfs_end_transaction(trans); 3082 3083 ret = btrfs_wait_for_commit(fs_info, transid); 3084 if (ret) 3085 return ret; 3086 dirty_bg_running = true; 3087 } 3088 } while (dirty_bg_running); 3089 3090 if (do_chunk_alloc) { 3091 /* 3092 * If we are changing raid levels, try to allocate a 3093 * corresponding block group with the new raid level. 3094 */ 3095 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 3096 if (alloc_flags != cache->flags) { 3097 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, 3098 CHUNK_ALLOC_FORCE); 3099 /* 3100 * ENOSPC is allowed here, we may have enough space 3101 * already allocated at the new raid level to carry on 3102 */ 3103 if (ret == -ENOSPC) 3104 ret = 0; 3105 if (ret < 0) 3106 goto out; 3107 } 3108 } 3109 3110 ret = inc_block_group_ro(cache, 0); 3111 if (!ret) 3112 goto out; 3113 if (ret == -ETXTBSY) 3114 goto unlock_out; 3115 3116 /* 3117 * Skip chunk allocation if the bg is SYSTEM, this is to avoid system 3118 * chunk allocation storm to exhaust the system chunk array. Otherwise 3119 * we still want to try our best to mark the block group read-only. 3120 */ 3121 if (!do_chunk_alloc && ret == -ENOSPC && 3122 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) 3123 goto unlock_out; 3124 3125 alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags); 3126 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); 3127 if (ret < 0) 3128 goto out; 3129 /* 3130 * We have allocated a new chunk. We also need to activate that chunk to 3131 * grant metadata tickets for zoned filesystem. 3132 */ 3133 ret = btrfs_zoned_activate_one_bg(space_info, true); 3134 if (ret < 0) 3135 goto out; 3136 3137 ret = inc_block_group_ro(cache, 0); 3138 if (ret == -ETXTBSY) 3139 goto unlock_out; 3140 out: 3141 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 3142 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 3143 mutex_lock(&fs_info->chunk_mutex); 3144 check_system_chunk(trans, alloc_flags); 3145 mutex_unlock(&fs_info->chunk_mutex); 3146 } 3147 unlock_out: 3148 mutex_unlock(&fs_info->ro_block_group_mutex); 3149 3150 btrfs_end_transaction(trans); 3151 return ret; 3152 } 3153 3154 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 3155 { 3156 struct btrfs_space_info *sinfo = cache->space_info; 3157 3158 BUG_ON(!cache->ro); 3159 3160 spin_lock(&sinfo->lock); 3161 spin_lock(&cache->lock); 3162 if (!--cache->ro) { 3163 if (btrfs_is_zoned(cache->fs_info)) { 3164 /* Migrate zone_unusable bytes back */ 3165 cache->zone_unusable = 3166 (cache->alloc_offset - cache->used - cache->pinned - 3167 cache->reserved) + 3168 (cache->length - cache->zone_capacity); 3169 btrfs_space_info_update_bytes_zone_unusable(sinfo, cache->zone_unusable); 3170 sinfo->bytes_readonly -= cache->zone_unusable; 3171 } 3172 sinfo->bytes_readonly -= btrfs_block_group_available_space(cache); 3173 list_del_init(&cache->ro_list); 3174 } 3175 spin_unlock(&cache->lock); 3176 spin_unlock(&sinfo->lock); 3177 } 3178 3179 static int update_block_group_item(struct btrfs_trans_handle *trans, 3180 struct btrfs_path *path, 3181 struct btrfs_block_group *cache) 3182 { 3183 struct btrfs_fs_info *fs_info = trans->fs_info; 3184 int ret; 3185 struct btrfs_root *root = btrfs_block_group_root(fs_info); 3186 unsigned long bi; 3187 struct extent_buffer *leaf; 3188 struct btrfs_block_group_item_v2 bgi; 3189 struct btrfs_key key; 3190 u64 old_last_used, old_last_remap_bytes; 3191 u32 old_last_identity_remap_count; 3192 u64 used, remap_bytes; 3193 u32 identity_remap_count; 3194 3195 /* 3196 * Block group items update can be triggered out of commit transaction 3197 * critical section, thus we need a consistent view of used bytes. 3198 * We cannot use cache->used directly outside of the spin lock, as it 3199 * may be changed. 3200 */ 3201 spin_lock(&cache->lock); 3202 old_last_used = cache->last_used; 3203 old_last_remap_bytes = cache->last_remap_bytes; 3204 old_last_identity_remap_count = cache->last_identity_remap_count; 3205 used = cache->used; 3206 remap_bytes = cache->remap_bytes; 3207 identity_remap_count = cache->identity_remap_count; 3208 /* No change in values, can safely skip it. */ 3209 if (cache->last_used == used && 3210 cache->last_remap_bytes == remap_bytes && 3211 cache->last_identity_remap_count == identity_remap_count && 3212 cache->last_flags == cache->flags) { 3213 spin_unlock(&cache->lock); 3214 return 0; 3215 } 3216 cache->last_used = used; 3217 cache->last_remap_bytes = remap_bytes; 3218 cache->last_identity_remap_count = identity_remap_count; 3219 cache->last_flags = cache->flags; 3220 spin_unlock(&cache->lock); 3221 3222 key.objectid = cache->start; 3223 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 3224 key.offset = cache->length; 3225 3226 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3227 if (ret) { 3228 if (ret > 0) 3229 ret = -ENOENT; 3230 goto fail; 3231 } 3232 3233 leaf = path->nodes[0]; 3234 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3235 btrfs_set_stack_block_group_v2_used(&bgi, used); 3236 btrfs_set_stack_block_group_v2_chunk_objectid(&bgi, cache->global_root_id); 3237 btrfs_set_stack_block_group_v2_flags(&bgi, cache->flags); 3238 3239 if (btrfs_fs_incompat(fs_info, REMAP_TREE)) { 3240 btrfs_set_stack_block_group_v2_remap_bytes(&bgi, cache->remap_bytes); 3241 btrfs_set_stack_block_group_v2_identity_remap_count(&bgi, 3242 cache->identity_remap_count); 3243 write_extent_buffer(leaf, &bgi, bi, 3244 sizeof(struct btrfs_block_group_item_v2)); 3245 } else { 3246 write_extent_buffer(leaf, &bgi, bi, 3247 sizeof(struct btrfs_block_group_item)); 3248 } 3249 3250 fail: 3251 btrfs_release_path(path); 3252 /* 3253 * We didn't update the block group item, need to revert last_used 3254 * unless the block group item didn't exist yet - this is to prevent a 3255 * race with a concurrent insertion of the block group item, with 3256 * insert_block_group_item(), that happened just after we attempted to 3257 * update. In that case we would reset last_used to 0 just after the 3258 * insertion set it to a value greater than 0 - if the block group later 3259 * becomes with 0 used bytes, we would incorrectly skip its update. 3260 */ 3261 if (ret < 0 && ret != -ENOENT) { 3262 spin_lock(&cache->lock); 3263 cache->last_used = old_last_used; 3264 cache->last_remap_bytes = old_last_remap_bytes; 3265 cache->last_identity_remap_count = old_last_identity_remap_count; 3266 spin_unlock(&cache->lock); 3267 } 3268 return ret; 3269 3270 } 3271 3272 static int cache_save_setup(struct btrfs_block_group *block_group, 3273 struct btrfs_trans_handle *trans, 3274 struct btrfs_path *path) 3275 { 3276 struct btrfs_fs_info *fs_info = block_group->fs_info; 3277 struct inode *inode = NULL; 3278 struct extent_changeset *data_reserved = NULL; 3279 u64 alloc_hint = 0; 3280 int dcs = BTRFS_DC_ERROR; 3281 u64 cache_size = 0; 3282 int retries = 0; 3283 int ret = 0; 3284 3285 if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 3286 return 0; 3287 3288 /* 3289 * If this block group is smaller than 100 megs don't bother caching the 3290 * block group. 3291 */ 3292 if (block_group->length < (100 * SZ_1M)) { 3293 spin_lock(&block_group->lock); 3294 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3295 spin_unlock(&block_group->lock); 3296 return 0; 3297 } 3298 3299 if (TRANS_ABORTED(trans)) 3300 return 0; 3301 again: 3302 inode = lookup_free_space_inode(block_group, path); 3303 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3304 ret = PTR_ERR(inode); 3305 btrfs_release_path(path); 3306 goto out; 3307 } 3308 3309 if (IS_ERR(inode)) { 3310 BUG_ON(retries); 3311 retries++; 3312 3313 if (block_group->ro) 3314 goto out_free; 3315 3316 ret = create_free_space_inode(trans, block_group, path); 3317 if (ret) 3318 goto out_free; 3319 goto again; 3320 } 3321 3322 /* 3323 * We want to set the generation to 0, that way if anything goes wrong 3324 * from here on out we know not to trust this cache when we load up next 3325 * time. 3326 */ 3327 BTRFS_I(inode)->generation = 0; 3328 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 3329 if (unlikely(ret)) { 3330 /* 3331 * So theoretically we could recover from this, simply set the 3332 * super cache generation to 0 so we know to invalidate the 3333 * cache, but then we'd have to keep track of the block groups 3334 * that fail this way so we know we _have_ to reset this cache 3335 * before the next commit or risk reading stale cache. So to 3336 * limit our exposure to horrible edge cases lets just abort the 3337 * transaction, this only happens in really bad situations 3338 * anyway. 3339 */ 3340 btrfs_abort_transaction(trans, ret); 3341 goto out_put; 3342 } 3343 WARN_ON(ret); 3344 3345 /* We've already setup this transaction, go ahead and exit */ 3346 if (block_group->cache_generation == trans->transid && 3347 i_size_read(inode)) { 3348 dcs = BTRFS_DC_SETUP; 3349 goto out_put; 3350 } 3351 3352 if (i_size_read(inode) > 0) { 3353 ret = btrfs_check_trunc_cache_free_space(fs_info, 3354 &fs_info->global_block_rsv); 3355 if (ret) 3356 goto out_put; 3357 3358 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 3359 if (ret) 3360 goto out_put; 3361 } 3362 3363 spin_lock(&block_group->lock); 3364 if (block_group->cached != BTRFS_CACHE_FINISHED || 3365 !btrfs_test_opt(fs_info, SPACE_CACHE)) { 3366 /* 3367 * don't bother trying to write stuff out _if_ 3368 * a) we're not cached, 3369 * b) we're with nospace_cache mount option, 3370 * c) we're with v2 space_cache (FREE_SPACE_TREE). 3371 */ 3372 dcs = BTRFS_DC_WRITTEN; 3373 spin_unlock(&block_group->lock); 3374 goto out_put; 3375 } 3376 spin_unlock(&block_group->lock); 3377 3378 /* 3379 * We hit an ENOSPC when setting up the cache in this transaction, just 3380 * skip doing the setup, we've already cleared the cache so we're safe. 3381 */ 3382 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 3383 ret = -ENOSPC; 3384 goto out_put; 3385 } 3386 3387 /* 3388 * Try to preallocate enough space based on how big the block group is. 3389 * Keep in mind this has to include any pinned space which could end up 3390 * taking up quite a bit since it's not folded into the other space 3391 * cache. 3392 */ 3393 cache_size = div_u64(block_group->length, SZ_256M); 3394 if (!cache_size) 3395 cache_size = 1; 3396 3397 cache_size *= 16; 3398 cache_size *= fs_info->sectorsize; 3399 3400 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 3401 cache_size, false); 3402 if (ret) 3403 goto out_put; 3404 3405 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 3406 cache_size, cache_size, 3407 &alloc_hint); 3408 /* 3409 * Our cache requires contiguous chunks so that we don't modify a bunch 3410 * of metadata or split extents when writing the cache out, which means 3411 * we can enospc if we are heavily fragmented in addition to just normal 3412 * out of space conditions. So if we hit this just skip setting up any 3413 * other block groups for this transaction, maybe we'll unpin enough 3414 * space the next time around. 3415 */ 3416 if (!ret) 3417 dcs = BTRFS_DC_SETUP; 3418 else if (ret == -ENOSPC) 3419 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 3420 3421 out_put: 3422 iput(inode); 3423 out_free: 3424 btrfs_release_path(path); 3425 out: 3426 spin_lock(&block_group->lock); 3427 if (!ret && dcs == BTRFS_DC_SETUP) 3428 block_group->cache_generation = trans->transid; 3429 block_group->disk_cache_state = dcs; 3430 spin_unlock(&block_group->lock); 3431 3432 extent_changeset_free(data_reserved); 3433 return ret; 3434 } 3435 3436 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 3437 { 3438 struct btrfs_fs_info *fs_info = trans->fs_info; 3439 struct btrfs_block_group *cache, *tmp; 3440 struct btrfs_transaction *cur_trans = trans->transaction; 3441 BTRFS_PATH_AUTO_FREE(path); 3442 3443 if (list_empty(&cur_trans->dirty_bgs) || 3444 !btrfs_test_opt(fs_info, SPACE_CACHE)) 3445 return 0; 3446 3447 path = btrfs_alloc_path(); 3448 if (!path) 3449 return -ENOMEM; 3450 3451 /* Could add new block groups, use _safe just in case */ 3452 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 3453 dirty_list) { 3454 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3455 cache_save_setup(cache, trans, path); 3456 } 3457 3458 return 0; 3459 } 3460 3461 /* 3462 * Transaction commit does final block group cache writeback during a critical 3463 * section where nothing is allowed to change the FS. This is required in 3464 * order for the cache to actually match the block group, but can introduce a 3465 * lot of latency into the commit. 3466 * 3467 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 3468 * There's a chance we'll have to redo some of it if the block group changes 3469 * again during the commit, but it greatly reduces the commit latency by 3470 * getting rid of the easy block groups while we're still allowing others to 3471 * join the commit. 3472 */ 3473 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 3474 { 3475 struct btrfs_fs_info *fs_info = trans->fs_info; 3476 struct btrfs_block_group *cache; 3477 struct btrfs_transaction *cur_trans = trans->transaction; 3478 int ret = 0; 3479 int should_put; 3480 BTRFS_PATH_AUTO_FREE(path); 3481 LIST_HEAD(dirty); 3482 struct list_head *io = &cur_trans->io_bgs; 3483 int loops = 0; 3484 3485 spin_lock(&cur_trans->dirty_bgs_lock); 3486 if (list_empty(&cur_trans->dirty_bgs)) { 3487 spin_unlock(&cur_trans->dirty_bgs_lock); 3488 return 0; 3489 } 3490 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3491 spin_unlock(&cur_trans->dirty_bgs_lock); 3492 3493 again: 3494 /* Make sure all the block groups on our dirty list actually exist */ 3495 btrfs_create_pending_block_groups(trans); 3496 3497 if (!path) { 3498 path = btrfs_alloc_path(); 3499 if (!path) { 3500 ret = -ENOMEM; 3501 goto out; 3502 } 3503 } 3504 3505 /* 3506 * cache_write_mutex is here only to save us from balance or automatic 3507 * removal of empty block groups deleting this block group while we are 3508 * writing out the cache 3509 */ 3510 mutex_lock(&trans->transaction->cache_write_mutex); 3511 while (!list_empty(&dirty)) { 3512 bool drop_reserve = true; 3513 3514 cache = list_first_entry(&dirty, struct btrfs_block_group, 3515 dirty_list); 3516 /* 3517 * This can happen if something re-dirties a block group that 3518 * is already under IO. Just wait for it to finish and then do 3519 * it all again 3520 */ 3521 if (!list_empty(&cache->io_list)) { 3522 list_del_init(&cache->io_list); 3523 btrfs_wait_cache_io(trans, cache, path); 3524 btrfs_put_block_group(cache); 3525 } 3526 3527 3528 /* 3529 * btrfs_wait_cache_io uses the cache->dirty_list to decide if 3530 * it should update the cache_state. Don't delete until after 3531 * we wait. 3532 * 3533 * Since we're not running in the commit critical section 3534 * we need the dirty_bgs_lock to protect from update_block_group 3535 */ 3536 spin_lock(&cur_trans->dirty_bgs_lock); 3537 list_del_init(&cache->dirty_list); 3538 spin_unlock(&cur_trans->dirty_bgs_lock); 3539 3540 should_put = 1; 3541 3542 cache_save_setup(cache, trans, path); 3543 3544 if (cache->disk_cache_state == BTRFS_DC_SETUP) { 3545 cache->io_ctl.inode = NULL; 3546 ret = btrfs_write_out_cache(trans, cache, path); 3547 if (ret == 0 && cache->io_ctl.inode) { 3548 should_put = 0; 3549 3550 /* 3551 * The cache_write_mutex is protecting the 3552 * io_list, also refer to the definition of 3553 * btrfs_transaction::io_bgs for more details 3554 */ 3555 list_add_tail(&cache->io_list, io); 3556 } else { 3557 /* 3558 * If we failed to write the cache, the 3559 * generation will be bad and life goes on 3560 */ 3561 ret = 0; 3562 } 3563 } 3564 if (!ret) { 3565 ret = update_block_group_item(trans, path, cache); 3566 /* 3567 * Our block group might still be attached to the list 3568 * of new block groups in the transaction handle of some 3569 * other task (struct btrfs_trans_handle->new_bgs). This 3570 * means its block group item isn't yet in the extent 3571 * tree. If this happens ignore the error, as we will 3572 * try again later in the critical section of the 3573 * transaction commit. 3574 */ 3575 if (ret == -ENOENT) { 3576 ret = 0; 3577 spin_lock(&cur_trans->dirty_bgs_lock); 3578 if (list_empty(&cache->dirty_list)) { 3579 list_add_tail(&cache->dirty_list, 3580 &cur_trans->dirty_bgs); 3581 btrfs_get_block_group(cache); 3582 drop_reserve = false; 3583 } 3584 spin_unlock(&cur_trans->dirty_bgs_lock); 3585 } else if (ret) { 3586 btrfs_abort_transaction(trans, ret); 3587 } 3588 } 3589 3590 /* If it's not on the io list, we need to put the block group */ 3591 if (should_put) 3592 btrfs_put_block_group(cache); 3593 if (drop_reserve) 3594 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3595 /* 3596 * Avoid blocking other tasks for too long. It might even save 3597 * us from writing caches for block groups that are going to be 3598 * removed. 3599 */ 3600 mutex_unlock(&trans->transaction->cache_write_mutex); 3601 if (ret) 3602 goto out; 3603 mutex_lock(&trans->transaction->cache_write_mutex); 3604 } 3605 mutex_unlock(&trans->transaction->cache_write_mutex); 3606 3607 /* 3608 * Go through delayed refs for all the stuff we've just kicked off 3609 * and then loop back (just once) 3610 */ 3611 if (!ret) 3612 ret = btrfs_run_delayed_refs(trans, 0); 3613 if (!ret && loops == 0) { 3614 loops++; 3615 spin_lock(&cur_trans->dirty_bgs_lock); 3616 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3617 /* 3618 * dirty_bgs_lock protects us from concurrent block group 3619 * deletes too (not just cache_write_mutex). 3620 */ 3621 if (!list_empty(&dirty)) { 3622 spin_unlock(&cur_trans->dirty_bgs_lock); 3623 goto again; 3624 } 3625 spin_unlock(&cur_trans->dirty_bgs_lock); 3626 } 3627 out: 3628 if (ret < 0) { 3629 spin_lock(&cur_trans->dirty_bgs_lock); 3630 list_splice_init(&dirty, &cur_trans->dirty_bgs); 3631 spin_unlock(&cur_trans->dirty_bgs_lock); 3632 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 3633 } 3634 3635 return ret; 3636 } 3637 3638 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 3639 { 3640 struct btrfs_fs_info *fs_info = trans->fs_info; 3641 struct btrfs_block_group *cache; 3642 struct btrfs_transaction *cur_trans = trans->transaction; 3643 int ret = 0; 3644 int should_put; 3645 BTRFS_PATH_AUTO_FREE(path); 3646 struct list_head *io = &cur_trans->io_bgs; 3647 3648 path = btrfs_alloc_path(); 3649 if (!path) 3650 return -ENOMEM; 3651 3652 /* 3653 * Even though we are in the critical section of the transaction commit, 3654 * we can still have concurrent tasks adding elements to this 3655 * transaction's list of dirty block groups. These tasks correspond to 3656 * endio free space workers started when writeback finishes for a 3657 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 3658 * allocate new block groups as a result of COWing nodes of the root 3659 * tree when updating the free space inode. The writeback for the space 3660 * caches is triggered by an earlier call to 3661 * btrfs_start_dirty_block_groups() and iterations of the following 3662 * loop. 3663 * Also we want to do the cache_save_setup first and then run the 3664 * delayed refs to make sure we have the best chance at doing this all 3665 * in one shot. 3666 */ 3667 spin_lock(&cur_trans->dirty_bgs_lock); 3668 while (!list_empty(&cur_trans->dirty_bgs)) { 3669 cache = list_first_entry(&cur_trans->dirty_bgs, 3670 struct btrfs_block_group, 3671 dirty_list); 3672 3673 /* 3674 * This can happen if cache_save_setup re-dirties a block group 3675 * that is already under IO. Just wait for it to finish and 3676 * then do it all again 3677 */ 3678 if (!list_empty(&cache->io_list)) { 3679 spin_unlock(&cur_trans->dirty_bgs_lock); 3680 list_del_init(&cache->io_list); 3681 btrfs_wait_cache_io(trans, cache, path); 3682 btrfs_put_block_group(cache); 3683 spin_lock(&cur_trans->dirty_bgs_lock); 3684 } 3685 3686 /* 3687 * Don't remove from the dirty list until after we've waited on 3688 * any pending IO 3689 */ 3690 list_del_init(&cache->dirty_list); 3691 spin_unlock(&cur_trans->dirty_bgs_lock); 3692 should_put = 1; 3693 3694 cache_save_setup(cache, trans, path); 3695 3696 if (!ret) 3697 ret = btrfs_run_delayed_refs(trans, U64_MAX); 3698 3699 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 3700 cache->io_ctl.inode = NULL; 3701 ret = btrfs_write_out_cache(trans, cache, path); 3702 if (ret == 0 && cache->io_ctl.inode) { 3703 should_put = 0; 3704 list_add_tail(&cache->io_list, io); 3705 } else { 3706 /* 3707 * If we failed to write the cache, the 3708 * generation will be bad and life goes on 3709 */ 3710 ret = 0; 3711 } 3712 } 3713 if (!ret) { 3714 ret = update_block_group_item(trans, path, cache); 3715 /* 3716 * One of the free space endio workers might have 3717 * created a new block group while updating a free space 3718 * cache's inode (at inode.c:btrfs_finish_ordered_io()) 3719 * and hasn't released its transaction handle yet, in 3720 * which case the new block group is still attached to 3721 * its transaction handle and its creation has not 3722 * finished yet (no block group item in the extent tree 3723 * yet, etc). If this is the case, wait for all free 3724 * space endio workers to finish and retry. This is a 3725 * very rare case so no need for a more efficient and 3726 * complex approach. 3727 */ 3728 if (ret == -ENOENT) { 3729 wait_event(cur_trans->writer_wait, 3730 atomic_read(&cur_trans->num_writers) == 1); 3731 ret = update_block_group_item(trans, path, cache); 3732 if (ret) 3733 btrfs_abort_transaction(trans, ret); 3734 } else if (ret) { 3735 btrfs_abort_transaction(trans, ret); 3736 } 3737 } 3738 3739 /* If its not on the io list, we need to put the block group */ 3740 if (should_put) 3741 btrfs_put_block_group(cache); 3742 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3743 spin_lock(&cur_trans->dirty_bgs_lock); 3744 } 3745 spin_unlock(&cur_trans->dirty_bgs_lock); 3746 3747 /* 3748 * Refer to the definition of io_bgs member for details why it's safe 3749 * to use it without any locking 3750 */ 3751 while (!list_empty(io)) { 3752 cache = list_first_entry(io, struct btrfs_block_group, 3753 io_list); 3754 list_del_init(&cache->io_list); 3755 btrfs_wait_cache_io(trans, cache, path); 3756 btrfs_put_block_group(cache); 3757 } 3758 3759 return ret; 3760 } 3761 3762 static void btrfs_maybe_reset_size_class(struct btrfs_block_group *bg) 3763 { 3764 lockdep_assert_held(&bg->lock); 3765 if (btrfs_block_group_should_use_size_class(bg) && 3766 bg->used == 0 && bg->reserved == 0) 3767 bg->size_class = BTRFS_BG_SZ_NONE; 3768 } 3769 3770 int btrfs_update_block_group(struct btrfs_trans_handle *trans, 3771 u64 bytenr, u64 num_bytes, bool alloc) 3772 { 3773 struct btrfs_fs_info *info = trans->fs_info; 3774 struct btrfs_space_info *space_info; 3775 struct btrfs_block_group *cache; 3776 u64 old_val; 3777 bool reclaim = false; 3778 bool bg_already_dirty = true; 3779 int factor; 3780 3781 /* Block accounting for super block */ 3782 spin_lock(&info->delalloc_root_lock); 3783 old_val = btrfs_super_bytes_used(info->super_copy); 3784 if (alloc) 3785 old_val += num_bytes; 3786 else 3787 old_val -= num_bytes; 3788 btrfs_set_super_bytes_used(info->super_copy, old_val); 3789 spin_unlock(&info->delalloc_root_lock); 3790 3791 cache = btrfs_lookup_block_group(info, bytenr); 3792 if (!cache) 3793 return -ENOENT; 3794 3795 /* An extent can not span multiple block groups. */ 3796 ASSERT(bytenr + num_bytes <= btrfs_block_group_end(cache)); 3797 3798 space_info = cache->space_info; 3799 factor = btrfs_bg_type_to_factor(cache->flags); 3800 3801 /* 3802 * If this block group has free space cache written out, we need to make 3803 * sure to load it if we are removing space. This is because we need 3804 * the unpinning stage to actually add the space back to the block group, 3805 * otherwise we will leak space. 3806 */ 3807 if (!alloc && !btrfs_block_group_done(cache)) 3808 btrfs_cache_block_group(cache, true); 3809 3810 spin_lock(&space_info->lock); 3811 spin_lock(&cache->lock); 3812 3813 if (btrfs_test_opt(info, SPACE_CACHE) && 3814 cache->disk_cache_state < BTRFS_DC_CLEAR) 3815 cache->disk_cache_state = BTRFS_DC_CLEAR; 3816 3817 old_val = cache->used; 3818 if (alloc) { 3819 old_val += num_bytes; 3820 cache->used = old_val; 3821 cache->reserved -= num_bytes; 3822 cache->reclaim_mark = 0; 3823 space_info->bytes_reserved -= num_bytes; 3824 space_info->bytes_used += num_bytes; 3825 space_info->disk_used += num_bytes * factor; 3826 if (READ_ONCE(space_info->periodic_reclaim)) 3827 btrfs_space_info_update_reclaimable(space_info, -num_bytes); 3828 spin_unlock(&cache->lock); 3829 spin_unlock(&space_info->lock); 3830 } else { 3831 old_val -= num_bytes; 3832 cache->used = old_val; 3833 cache->pinned += num_bytes; 3834 btrfs_maybe_reset_size_class(cache); 3835 btrfs_space_info_update_bytes_pinned(space_info, num_bytes); 3836 space_info->bytes_used -= num_bytes; 3837 space_info->disk_used -= num_bytes * factor; 3838 if (READ_ONCE(space_info->periodic_reclaim)) 3839 btrfs_space_info_update_reclaimable(space_info, num_bytes); 3840 else 3841 reclaim = should_reclaim_block_group(cache, num_bytes); 3842 3843 spin_unlock(&cache->lock); 3844 spin_unlock(&space_info->lock); 3845 3846 btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr, 3847 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); 3848 } 3849 3850 spin_lock(&trans->transaction->dirty_bgs_lock); 3851 if (list_empty(&cache->dirty_list)) { 3852 list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs); 3853 bg_already_dirty = false; 3854 btrfs_get_block_group(cache); 3855 } 3856 spin_unlock(&trans->transaction->dirty_bgs_lock); 3857 3858 /* 3859 * No longer have used bytes in this block group, queue it for deletion. 3860 * We do this after adding the block group to the dirty list to avoid 3861 * races between cleaner kthread and space cache writeout. 3862 */ 3863 if (!alloc && old_val == 0) { 3864 if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3865 btrfs_mark_bg_unused(cache); 3866 } else if (!alloc && reclaim) { 3867 btrfs_mark_bg_to_reclaim(cache); 3868 } 3869 3870 btrfs_put_block_group(cache); 3871 3872 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3873 if (!bg_already_dirty) 3874 btrfs_inc_delayed_refs_rsv_bg_updates(info); 3875 3876 return 0; 3877 } 3878 3879 /* 3880 * Update the block_group and space info counters. 3881 * 3882 * @cache: The cache we are manipulating 3883 * @ram_bytes: The number of bytes of file content, and will be same to 3884 * @num_bytes except for the compress path. 3885 * @num_bytes: The number of bytes in question 3886 * @delalloc: The blocks are allocated for the delalloc write 3887 * 3888 * This is called by the allocator when it reserves space. If this is a 3889 * reservation and the block group has become read only we cannot make the 3890 * reservation and return -EAGAIN, otherwise this function always succeeds. 3891 */ 3892 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3893 u64 ram_bytes, u64 num_bytes, bool delalloc, 3894 bool force_wrong_size_class) 3895 { 3896 struct btrfs_space_info *space_info = cache->space_info; 3897 enum btrfs_block_group_size_class size_class; 3898 int ret = 0; 3899 3900 spin_lock(&space_info->lock); 3901 spin_lock(&cache->lock); 3902 if (cache->ro) { 3903 ret = -EAGAIN; 3904 goto out_error; 3905 } 3906 3907 if (btrfs_block_group_should_use_size_class(cache)) { 3908 size_class = btrfs_calc_block_group_size_class(num_bytes); 3909 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); 3910 if (ret) 3911 goto out_error; 3912 } 3913 3914 cache->reserved += num_bytes; 3915 if (delalloc) 3916 cache->delalloc_bytes += num_bytes; 3917 3918 trace_btrfs_space_reservation(cache->fs_info, "space_info", 3919 space_info->flags, num_bytes, 1); 3920 spin_unlock(&cache->lock); 3921 3922 space_info->bytes_reserved += num_bytes; 3923 btrfs_space_info_update_bytes_may_use(space_info, -ram_bytes); 3924 3925 /* 3926 * Compression can use less space than we reserved, so wake tickets if 3927 * that happens. 3928 */ 3929 if (num_bytes < ram_bytes) 3930 btrfs_try_granting_tickets(space_info); 3931 spin_unlock(&space_info->lock); 3932 3933 return 0; 3934 3935 out_error: 3936 spin_unlock(&cache->lock); 3937 spin_unlock(&space_info->lock); 3938 return ret; 3939 } 3940 3941 /* 3942 * Update the block_group and space info counters. 3943 * 3944 * @cache: The cache we are manipulating. 3945 * @num_bytes: The number of bytes in question. 3946 * @is_delalloc: Whether the blocks are allocated for a delalloc write. 3947 * 3948 * This is called by somebody who is freeing space that was never actually used 3949 * on disk. For example if you reserve some space for a new leaf in transaction 3950 * A and before transaction A commits you free that leaf, you call this with 3951 * reserve set to 0 in order to clear the reservation. 3952 */ 3953 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, 3954 bool is_delalloc) 3955 { 3956 struct btrfs_space_info *space_info = cache->space_info; 3957 bool bg_ro; 3958 3959 spin_lock(&space_info->lock); 3960 spin_lock(&cache->lock); 3961 bg_ro = cache->ro; 3962 cache->reserved -= num_bytes; 3963 btrfs_maybe_reset_size_class(cache); 3964 if (is_delalloc) 3965 cache->delalloc_bytes -= num_bytes; 3966 spin_unlock(&cache->lock); 3967 3968 if (bg_ro) 3969 space_info->bytes_readonly += num_bytes; 3970 else if (btrfs_is_zoned(cache->fs_info)) 3971 space_info->bytes_zone_unusable += num_bytes; 3972 3973 space_info->bytes_reserved -= num_bytes; 3974 space_info->max_extent_size = 0; 3975 3976 btrfs_try_granting_tickets(space_info); 3977 spin_unlock(&space_info->lock); 3978 } 3979 3980 static void force_metadata_allocation(struct btrfs_fs_info *info) 3981 { 3982 struct list_head *head = &info->space_info; 3983 struct btrfs_space_info *found; 3984 3985 list_for_each_entry(found, head, list) { 3986 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3987 found->force_alloc = CHUNK_ALLOC_FORCE; 3988 } 3989 } 3990 3991 static bool should_alloc_chunk(const struct btrfs_fs_info *fs_info, 3992 const struct btrfs_space_info *sinfo, int force) 3993 { 3994 u64 bytes_used = btrfs_space_info_used(sinfo, false); 3995 u64 thresh; 3996 3997 if (force == CHUNK_ALLOC_FORCE) 3998 return true; 3999 4000 /* 4001 * in limited mode, we want to have some free space up to 4002 * about 1% of the FS size. 4003 */ 4004 if (force == CHUNK_ALLOC_LIMITED) { 4005 thresh = btrfs_super_total_bytes(fs_info->super_copy); 4006 thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); 4007 4008 if (sinfo->total_bytes - bytes_used < thresh) 4009 return true; 4010 } 4011 4012 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) 4013 return false; 4014 return true; 4015 } 4016 4017 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 4018 { 4019 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 4020 struct btrfs_space_info *space_info; 4021 4022 space_info = btrfs_find_space_info(trans->fs_info, type); 4023 if (!space_info) { 4024 DEBUG_WARN(); 4025 return -EINVAL; 4026 } 4027 4028 return btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); 4029 } 4030 4031 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, 4032 struct btrfs_space_info *space_info, 4033 u64 flags) 4034 { 4035 struct btrfs_block_group *bg; 4036 int ret; 4037 4038 /* 4039 * Check if we have enough space in the system space info because we 4040 * will need to update device items in the chunk btree and insert a new 4041 * chunk item in the chunk btree as well. This will allocate a new 4042 * system block group if needed. 4043 */ 4044 check_system_chunk(trans, flags); 4045 4046 bg = btrfs_create_chunk(trans, space_info, flags); 4047 if (IS_ERR(bg)) { 4048 ret = PTR_ERR(bg); 4049 goto out; 4050 } 4051 4052 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 4053 /* 4054 * Normally we are not expected to fail with -ENOSPC here, since we have 4055 * previously reserved space in the system space_info and allocated one 4056 * new system chunk if necessary. However there are three exceptions: 4057 * 4058 * 1) We may have enough free space in the system space_info but all the 4059 * existing system block groups have a profile which can not be used 4060 * for extent allocation. 4061 * 4062 * This happens when mounting in degraded mode. For example we have a 4063 * RAID1 filesystem with 2 devices, lose one device and mount the fs 4064 * using the other device in degraded mode. If we then allocate a chunk, 4065 * we may have enough free space in the existing system space_info, but 4066 * none of the block groups can be used for extent allocation since they 4067 * have a RAID1 profile, and because we are in degraded mode with a 4068 * single device, we are forced to allocate a new system chunk with a 4069 * SINGLE profile. Making check_system_chunk() iterate over all system 4070 * block groups and check if they have a usable profile and enough space 4071 * can be slow on very large filesystems, so we tolerate the -ENOSPC and 4072 * try again after forcing allocation of a new system chunk. Like this 4073 * we avoid paying the cost of that search in normal circumstances, when 4074 * we were not mounted in degraded mode; 4075 * 4076 * 2) We had enough free space info the system space_info, and one suitable 4077 * block group to allocate from when we called check_system_chunk() 4078 * above. However right after we called it, the only system block group 4079 * with enough free space got turned into RO mode by a running scrub, 4080 * and in this case we have to allocate a new one and retry. We only 4081 * need do this allocate and retry once, since we have a transaction 4082 * handle and scrub uses the commit root to search for block groups; 4083 * 4084 * 3) We had one system block group with enough free space when we called 4085 * check_system_chunk(), but after that, right before we tried to 4086 * allocate the last extent buffer we needed, a discard operation came 4087 * in and it temporarily removed the last free space entry from the 4088 * block group (discard removes a free space entry, discards it, and 4089 * then adds back the entry to the block group cache). 4090 */ 4091 if (ret == -ENOSPC) { 4092 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 4093 struct btrfs_block_group *sys_bg; 4094 struct btrfs_space_info *sys_space_info; 4095 4096 sys_space_info = btrfs_find_space_info(trans->fs_info, sys_flags); 4097 if (unlikely(!sys_space_info)) { 4098 ret = -EINVAL; 4099 btrfs_abort_transaction(trans, ret); 4100 goto out; 4101 } 4102 4103 sys_bg = btrfs_create_chunk(trans, sys_space_info, sys_flags); 4104 if (IS_ERR(sys_bg)) { 4105 ret = PTR_ERR(sys_bg); 4106 btrfs_abort_transaction(trans, ret); 4107 goto out; 4108 } 4109 4110 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 4111 if (unlikely(ret)) { 4112 btrfs_abort_transaction(trans, ret); 4113 goto out; 4114 } 4115 4116 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 4117 if (unlikely(ret)) { 4118 btrfs_abort_transaction(trans, ret); 4119 goto out; 4120 } 4121 } else if (unlikely(ret)) { 4122 btrfs_abort_transaction(trans, ret); 4123 goto out; 4124 } 4125 out: 4126 btrfs_trans_release_chunk_metadata(trans); 4127 4128 if (ret) 4129 return ERR_PTR(ret); 4130 4131 btrfs_get_block_group(bg); 4132 return bg; 4133 } 4134 4135 /* 4136 * Chunk allocation is done in 2 phases: 4137 * 4138 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 4139 * the chunk, the chunk mapping, create its block group and add the items 4140 * that belong in the chunk btree to it - more specifically, we need to 4141 * update device items in the chunk btree and add a new chunk item to it. 4142 * 4143 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 4144 * group item to the extent btree and the device extent items to the devices 4145 * btree. 4146 * 4147 * This is done to prevent deadlocks. For example when COWing a node from the 4148 * extent btree we are holding a write lock on the node's parent and if we 4149 * trigger chunk allocation and attempted to insert the new block group item 4150 * in the extent btree right way, we could deadlock because the path for the 4151 * insertion can include that parent node. At first glance it seems impossible 4152 * to trigger chunk allocation after starting a transaction since tasks should 4153 * reserve enough transaction units (metadata space), however while that is true 4154 * most of the time, chunk allocation may still be triggered for several reasons: 4155 * 4156 * 1) When reserving metadata, we check if there is enough free space in the 4157 * metadata space_info and therefore don't trigger allocation of a new chunk. 4158 * However later when the task actually tries to COW an extent buffer from 4159 * the extent btree or from the device btree for example, it is forced to 4160 * allocate a new block group (chunk) because the only one that had enough 4161 * free space was just turned to RO mode by a running scrub for example (or 4162 * device replace, block group reclaim thread, etc), so we can not use it 4163 * for allocating an extent and end up being forced to allocate a new one; 4164 * 4165 * 2) Because we only check that the metadata space_info has enough free bytes, 4166 * we end up not allocating a new metadata chunk in that case. However if 4167 * the filesystem was mounted in degraded mode, none of the existing block 4168 * groups might be suitable for extent allocation due to their incompatible 4169 * profile (for e.g. mounting a 2 devices filesystem, where all block groups 4170 * use a RAID1 profile, in degraded mode using a single device). In this case 4171 * when the task attempts to COW some extent buffer of the extent btree for 4172 * example, it will trigger allocation of a new metadata block group with a 4173 * suitable profile (SINGLE profile in the example of the degraded mount of 4174 * the RAID1 filesystem); 4175 * 4176 * 3) The task has reserved enough transaction units / metadata space, but when 4177 * it attempts to COW an extent buffer from the extent or device btree for 4178 * example, it does not find any free extent in any metadata block group, 4179 * therefore forced to try to allocate a new metadata block group. 4180 * This is because some other task allocated all available extents in the 4181 * meanwhile - this typically happens with tasks that don't reserve space 4182 * properly, either intentionally or as a bug. One example where this is 4183 * done intentionally is fsync, as it does not reserve any transaction units 4184 * and ends up allocating a variable number of metadata extents for log 4185 * tree extent buffers; 4186 * 4187 * 4) The task has reserved enough transaction units / metadata space, but right 4188 * before it tries to allocate the last extent buffer it needs, a discard 4189 * operation comes in and, temporarily, removes the last free space entry from 4190 * the only metadata block group that had free space (discard starts by 4191 * removing a free space entry from a block group, then does the discard 4192 * operation and, once it's done, it adds back the free space entry to the 4193 * block group). 4194 * 4195 * We also need this 2 phases setup when adding a device to a filesystem with 4196 * a seed device - we must create new metadata and system chunks without adding 4197 * any of the block group items to the chunk, extent and device btrees. If we 4198 * did not do it this way, we would get ENOSPC when attempting to update those 4199 * btrees, since all the chunks from the seed device are read-only. 4200 * 4201 * Phase 1 does the updates and insertions to the chunk btree because if we had 4202 * it done in phase 2 and have a thundering herd of tasks allocating chunks in 4203 * parallel, we risk having too many system chunks allocated by many tasks if 4204 * many tasks reach phase 1 without the previous ones completing phase 2. In the 4205 * extreme case this leads to exhaustion of the system chunk array in the 4206 * superblock. This is easier to trigger if using a btree node/leaf size of 64K 4207 * and with RAID filesystems (so we have more device items in the chunk btree). 4208 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 4209 * the system chunk array due to concurrent allocations") provides more details. 4210 * 4211 * Allocation of system chunks does not happen through this function. A task that 4212 * needs to update the chunk btree (the only btree that uses system chunks), must 4213 * preallocate chunk space by calling either check_system_chunk() or 4214 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 4215 * metadata chunk or when removing a chunk, while the later is used before doing 4216 * a modification to the chunk btree - use cases for the later are adding, 4217 * removing and resizing a device as well as relocation of a system chunk. 4218 * See the comment below for more details. 4219 * 4220 * The reservation of system space, done through check_system_chunk(), as well 4221 * as all the updates and insertions into the chunk btree must be done while 4222 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 4223 * an extent buffer from the chunks btree we never trigger allocation of a new 4224 * system chunk, which would result in a deadlock (trying to lock twice an 4225 * extent buffer of the chunk btree, first time before triggering the chunk 4226 * allocation and the second time during chunk allocation while attempting to 4227 * update the chunks btree). The system chunk array is also updated while holding 4228 * that mutex. The same logic applies to removing chunks - we must reserve system 4229 * space, update the chunk btree and the system chunk array in the superblock 4230 * while holding fs_info->chunk_mutex. 4231 * 4232 * This function, btrfs_chunk_alloc(), belongs to phase 1. 4233 * 4234 * @space_info: specify which space_info the new chunk should belong to. 4235 * 4236 * If @force is CHUNK_ALLOC_FORCE: 4237 * - return 1 if it successfully allocates a chunk, 4238 * - return errors including -ENOSPC otherwise. 4239 * If @force is NOT CHUNK_ALLOC_FORCE: 4240 * - return 0 if it doesn't need to allocate a new chunk, 4241 * - return 1 if it successfully allocates a chunk, 4242 * - return errors including -ENOSPC otherwise. 4243 */ 4244 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, 4245 struct btrfs_space_info *space_info, u64 flags, 4246 enum btrfs_chunk_alloc_enum force) 4247 { 4248 struct btrfs_fs_info *fs_info = trans->fs_info; 4249 struct btrfs_block_group *ret_bg; 4250 bool wait_for_alloc = false; 4251 bool should_alloc = false; 4252 bool from_extent_allocation = false; 4253 int ret = 0; 4254 4255 if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 4256 from_extent_allocation = true; 4257 force = CHUNK_ALLOC_FORCE; 4258 } 4259 4260 /* Don't re-enter if we're already allocating a chunk */ 4261 if (trans->allocating_chunk) 4262 return -ENOSPC; 4263 /* 4264 * Allocation of system chunks can not happen through this path, as we 4265 * could end up in a deadlock if we are allocating a data or metadata 4266 * chunk and there is another task modifying the chunk btree. 4267 * 4268 * This is because while we are holding the chunk mutex, we will attempt 4269 * to add the new chunk item to the chunk btree or update an existing 4270 * device item in the chunk btree, while the other task that is modifying 4271 * the chunk btree is attempting to COW an extent buffer while holding a 4272 * lock on it and on its parent - if the COW operation triggers a system 4273 * chunk allocation, then we can deadlock because we are holding the 4274 * chunk mutex and we may need to access that extent buffer or its parent 4275 * in order to add the chunk item or update a device item. 4276 * 4277 * Tasks that want to modify the chunk tree should reserve system space 4278 * before updating the chunk btree, by calling either 4279 * btrfs_reserve_chunk_metadata() or check_system_chunk(). 4280 * It's possible that after a task reserves the space, it still ends up 4281 * here - this happens in the cases described above at do_chunk_alloc(). 4282 * The task will have to either retry or fail. 4283 */ 4284 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 4285 return -ENOSPC; 4286 4287 do { 4288 spin_lock(&space_info->lock); 4289 if (force < space_info->force_alloc) 4290 force = space_info->force_alloc; 4291 should_alloc = should_alloc_chunk(fs_info, space_info, force); 4292 if (space_info->full) { 4293 /* No more free physical space */ 4294 spin_unlock(&space_info->lock); 4295 if (should_alloc) 4296 ret = -ENOSPC; 4297 else 4298 ret = 0; 4299 return ret; 4300 } else if (!should_alloc) { 4301 spin_unlock(&space_info->lock); 4302 return 0; 4303 } else if (space_info->chunk_alloc) { 4304 /* 4305 * Someone is already allocating, so we need to block 4306 * until this someone is finished and then loop to 4307 * recheck if we should continue with our allocation 4308 * attempt. 4309 */ 4310 spin_unlock(&space_info->lock); 4311 wait_for_alloc = true; 4312 force = CHUNK_ALLOC_NO_FORCE; 4313 mutex_lock(&fs_info->chunk_mutex); 4314 mutex_unlock(&fs_info->chunk_mutex); 4315 } else { 4316 /* Proceed with allocation */ 4317 space_info->chunk_alloc = true; 4318 spin_unlock(&space_info->lock); 4319 wait_for_alloc = false; 4320 } 4321 4322 cond_resched(); 4323 } while (wait_for_alloc); 4324 4325 mutex_lock(&fs_info->chunk_mutex); 4326 trans->allocating_chunk = true; 4327 4328 /* 4329 * If we have mixed data/metadata chunks we want to make sure we keep 4330 * allocating mixed chunks instead of individual chunks. 4331 */ 4332 if (btrfs_mixed_space_info(space_info)) 4333 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 4334 4335 /* 4336 * if we're doing a data chunk, go ahead and make sure that 4337 * we keep a reasonable number of metadata chunks allocated in the 4338 * FS as well. 4339 */ 4340 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 4341 fs_info->data_chunk_allocations++; 4342 if (!(fs_info->data_chunk_allocations % 4343 fs_info->metadata_ratio)) 4344 force_metadata_allocation(fs_info); 4345 } 4346 4347 ret_bg = do_chunk_alloc(trans, space_info, flags); 4348 trans->allocating_chunk = false; 4349 4350 if (IS_ERR(ret_bg)) { 4351 ret = PTR_ERR(ret_bg); 4352 } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) { 4353 /* 4354 * New block group is likely to be used soon. Try to activate 4355 * it now. Failure is OK for now. 4356 */ 4357 btrfs_zone_activate(ret_bg); 4358 } 4359 4360 if (!ret) 4361 btrfs_put_block_group(ret_bg); 4362 4363 spin_lock(&space_info->lock); 4364 if (ret < 0) { 4365 if (ret == -ENOSPC) 4366 space_info->full = true; 4367 else 4368 goto out; 4369 } else { 4370 ret = 1; 4371 space_info->max_extent_size = 0; 4372 } 4373 4374 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 4375 out: 4376 space_info->chunk_alloc = false; 4377 spin_unlock(&space_info->lock); 4378 mutex_unlock(&fs_info->chunk_mutex); 4379 4380 return ret; 4381 } 4382 4383 static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type) 4384 { 4385 u64 num_dev; 4386 4387 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 4388 if (!num_dev) 4389 num_dev = fs_info->fs_devices->rw_devices; 4390 4391 return num_dev; 4392 } 4393 4394 static void reserve_chunk_space(struct btrfs_trans_handle *trans, 4395 u64 bytes, 4396 u64 type) 4397 { 4398 struct btrfs_fs_info *fs_info = trans->fs_info; 4399 struct btrfs_space_info *info; 4400 u64 left; 4401 int ret = 0; 4402 4403 /* 4404 * Needed because we can end up allocating a system chunk and for an 4405 * atomic and race free space reservation in the chunk block reserve. 4406 */ 4407 lockdep_assert_held(&fs_info->chunk_mutex); 4408 4409 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4410 spin_lock(&info->lock); 4411 left = info->total_bytes - btrfs_space_info_used(info, true); 4412 spin_unlock(&info->lock); 4413 4414 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4415 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 4416 left, bytes, type); 4417 btrfs_dump_space_info(info, 0, false); 4418 } 4419 4420 if (left < bytes) { 4421 u64 flags = btrfs_system_alloc_profile(fs_info); 4422 struct btrfs_block_group *bg; 4423 struct btrfs_space_info *space_info; 4424 4425 space_info = btrfs_find_space_info(fs_info, flags); 4426 ASSERT(space_info); 4427 4428 /* 4429 * Ignore failure to create system chunk. We might end up not 4430 * needing it, as we might not need to COW all nodes/leafs from 4431 * the paths we visit in the chunk tree (they were already COWed 4432 * or created in the current transaction for example). 4433 */ 4434 bg = btrfs_create_chunk(trans, space_info, flags); 4435 if (IS_ERR(bg)) { 4436 ret = PTR_ERR(bg); 4437 } else { 4438 /* 4439 * We have a new chunk. We also need to activate it for 4440 * zoned filesystem. 4441 */ 4442 ret = btrfs_zoned_activate_one_bg(info, true); 4443 if (ret < 0) 4444 return; 4445 4446 /* 4447 * If we fail to add the chunk item here, we end up 4448 * trying again at phase 2 of chunk allocation, at 4449 * btrfs_create_pending_block_groups(). So ignore 4450 * any error here. An ENOSPC here could happen, due to 4451 * the cases described at do_chunk_alloc() - the system 4452 * block group we just created was just turned into RO 4453 * mode by a scrub for example, or a running discard 4454 * temporarily removed its free space entries, etc. 4455 */ 4456 btrfs_chunk_alloc_add_chunk_item(trans, bg); 4457 } 4458 } 4459 4460 if (!ret) { 4461 ret = btrfs_block_rsv_add(fs_info, 4462 &fs_info->chunk_block_rsv, 4463 bytes, BTRFS_RESERVE_NO_FLUSH); 4464 if (!ret) 4465 trans->chunk_bytes_reserved += bytes; 4466 } 4467 } 4468 4469 /* 4470 * Reserve space in the system space for allocating or removing a chunk. 4471 * The caller must be holding fs_info->chunk_mutex. 4472 */ 4473 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 4474 { 4475 struct btrfs_fs_info *fs_info = trans->fs_info; 4476 const u64 num_devs = get_profile_num_devs(fs_info, type); 4477 u64 bytes; 4478 4479 /* num_devs device items to update and 1 chunk item to add or remove. */ 4480 bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 4481 btrfs_calc_insert_metadata_size(fs_info, 1); 4482 4483 reserve_chunk_space(trans, bytes, type); 4484 } 4485 4486 /* 4487 * Reserve space in the system space, if needed, for doing a modification to the 4488 * chunk btree. 4489 * 4490 * @trans: A transaction handle. 4491 * @is_item_insertion: Indicate if the modification is for inserting a new item 4492 * in the chunk btree or if it's for the deletion or update 4493 * of an existing item. 4494 * 4495 * This is used in a context where we need to update the chunk btree outside 4496 * block group allocation and removal, to avoid a deadlock with a concurrent 4497 * task that is allocating a metadata or data block group and therefore needs to 4498 * update the chunk btree while holding the chunk mutex. After the update to the 4499 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 4500 * 4501 */ 4502 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 4503 bool is_item_insertion) 4504 { 4505 struct btrfs_fs_info *fs_info = trans->fs_info; 4506 u64 bytes; 4507 4508 if (is_item_insertion) 4509 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 4510 else 4511 bytes = btrfs_calc_metadata_size(fs_info, 1); 4512 4513 mutex_lock(&fs_info->chunk_mutex); 4514 reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 4515 mutex_unlock(&fs_info->chunk_mutex); 4516 } 4517 4518 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 4519 { 4520 struct btrfs_block_group *block_group; 4521 4522 block_group = btrfs_lookup_first_block_group(info, 0); 4523 while (block_group) { 4524 btrfs_wait_block_group_cache_done(block_group); 4525 spin_lock(&block_group->lock); 4526 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, 4527 &block_group->runtime_flags)) { 4528 struct btrfs_inode *inode = block_group->inode; 4529 4530 block_group->inode = NULL; 4531 spin_unlock(&block_group->lock); 4532 4533 ASSERT(block_group->io_ctl.inode == NULL); 4534 iput(&inode->vfs_inode); 4535 } else { 4536 spin_unlock(&block_group->lock); 4537 } 4538 block_group = btrfs_next_block_group(block_group); 4539 } 4540 } 4541 4542 static void check_removing_space_info(struct btrfs_space_info *space_info) 4543 { 4544 struct btrfs_fs_info *info = space_info->fs_info; 4545 4546 if (space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY) { 4547 /* This is a top space_info, proceed with its children first. */ 4548 for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) { 4549 if (space_info->sub_group[i]) { 4550 check_removing_space_info(space_info->sub_group[i]); 4551 kfree(space_info->sub_group[i]); 4552 space_info->sub_group[i] = NULL; 4553 } 4554 } 4555 } 4556 4557 /* 4558 * Do not hide this behind enospc_debug, this is actually important and 4559 * indicates a real bug if this happens. 4560 */ 4561 if (WARN_ON(space_info->bytes_pinned > 0 || space_info->bytes_may_use > 0)) 4562 btrfs_dump_space_info(space_info, 0, false); 4563 4564 /* 4565 * If there was a failure to cleanup a log tree, very likely due to an 4566 * IO failure on a writeback attempt of one or more of its extent 4567 * buffers, we could not do proper (and cheap) unaccounting of their 4568 * reserved space, so don't warn on bytes_reserved > 0 in that case. 4569 */ 4570 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 4571 !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 4572 if (WARN_ON(space_info->bytes_reserved > 0)) 4573 btrfs_dump_space_info(space_info, 0, false); 4574 } 4575 4576 WARN_ON(space_info->reclaim_size > 0); 4577 } 4578 4579 /* 4580 * Must be called only after stopping all workers, since we could have block 4581 * group caching kthreads running, and therefore they could race with us if we 4582 * freed the block groups before stopping them. 4583 */ 4584 int btrfs_free_block_groups(struct btrfs_fs_info *info) 4585 { 4586 struct btrfs_block_group *block_group; 4587 struct btrfs_space_info *space_info; 4588 struct btrfs_caching_control *caching_ctl; 4589 struct rb_node *n; 4590 4591 if (btrfs_is_zoned(info)) { 4592 if (info->active_meta_bg) { 4593 btrfs_put_block_group(info->active_meta_bg); 4594 info->active_meta_bg = NULL; 4595 } 4596 if (info->active_system_bg) { 4597 btrfs_put_block_group(info->active_system_bg); 4598 info->active_system_bg = NULL; 4599 } 4600 } 4601 4602 write_lock(&info->block_group_cache_lock); 4603 while (!list_empty(&info->caching_block_groups)) { 4604 caching_ctl = list_first_entry(&info->caching_block_groups, 4605 struct btrfs_caching_control, list); 4606 list_del(&caching_ctl->list); 4607 btrfs_put_caching_control(caching_ctl); 4608 } 4609 write_unlock(&info->block_group_cache_lock); 4610 4611 spin_lock(&info->unused_bgs_lock); 4612 while (!list_empty(&info->unused_bgs)) { 4613 block_group = list_first_entry(&info->unused_bgs, 4614 struct btrfs_block_group, 4615 bg_list); 4616 list_del_init(&block_group->bg_list); 4617 btrfs_put_block_group(block_group); 4618 } 4619 4620 while (!list_empty(&info->reclaim_bgs)) { 4621 block_group = list_first_entry(&info->reclaim_bgs, 4622 struct btrfs_block_group, 4623 bg_list); 4624 list_del_init(&block_group->bg_list); 4625 btrfs_put_block_group(block_group); 4626 } 4627 4628 while (!list_empty(&info->fully_remapped_bgs)) { 4629 block_group = list_first_entry(&info->fully_remapped_bgs, 4630 struct btrfs_block_group, bg_list); 4631 list_del_init(&block_group->bg_list); 4632 btrfs_put_block_group(block_group); 4633 } 4634 spin_unlock(&info->unused_bgs_lock); 4635 4636 spin_lock(&info->zone_active_bgs_lock); 4637 while (!list_empty(&info->zone_active_bgs)) { 4638 block_group = list_first_entry(&info->zone_active_bgs, 4639 struct btrfs_block_group, 4640 active_bg_list); 4641 list_del_init(&block_group->active_bg_list); 4642 btrfs_put_block_group(block_group); 4643 } 4644 spin_unlock(&info->zone_active_bgs_lock); 4645 4646 write_lock(&info->block_group_cache_lock); 4647 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 4648 block_group = rb_entry(n, struct btrfs_block_group, 4649 cache_node); 4650 rb_erase_cached(&block_group->cache_node, 4651 &info->block_group_cache_tree); 4652 RB_CLEAR_NODE(&block_group->cache_node); 4653 write_unlock(&info->block_group_cache_lock); 4654 4655 down_write(&block_group->space_info->groups_sem); 4656 list_del(&block_group->list); 4657 up_write(&block_group->space_info->groups_sem); 4658 4659 /* 4660 * We haven't cached this block group, which means we could 4661 * possibly have excluded extents on this block group. 4662 */ 4663 if (block_group->cached == BTRFS_CACHE_NO || 4664 block_group->cached == BTRFS_CACHE_ERROR) 4665 btrfs_free_excluded_extents(block_group); 4666 4667 btrfs_remove_free_space_cache(block_group); 4668 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 4669 ASSERT(list_empty(&block_group->dirty_list)); 4670 ASSERT(list_empty(&block_group->io_list)); 4671 ASSERT(list_empty(&block_group->bg_list)); 4672 ASSERT(refcount_read(&block_group->refs) == 1); 4673 ASSERT(block_group->swap_extents == 0); 4674 btrfs_put_block_group(block_group); 4675 4676 write_lock(&info->block_group_cache_lock); 4677 } 4678 write_unlock(&info->block_group_cache_lock); 4679 4680 btrfs_release_global_block_rsv(info); 4681 4682 while (!list_empty(&info->space_info)) { 4683 space_info = list_first_entry(&info->space_info, 4684 struct btrfs_space_info, list); 4685 4686 check_removing_space_info(space_info); 4687 list_del(&space_info->list); 4688 btrfs_sysfs_remove_space_info(space_info); 4689 } 4690 return 0; 4691 } 4692 4693 void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4694 { 4695 atomic_inc(&cache->frozen); 4696 } 4697 4698 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4699 { 4700 struct btrfs_fs_info *fs_info = block_group->fs_info; 4701 bool cleanup; 4702 4703 spin_lock(&block_group->lock); 4704 cleanup = (atomic_dec_and_test(&block_group->frozen) && 4705 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); 4706 spin_unlock(&block_group->lock); 4707 4708 if (cleanup) { 4709 struct btrfs_chunk_map *map; 4710 4711 map = btrfs_find_chunk_map(fs_info, block_group->start, 1); 4712 /* Logic error, can't happen. */ 4713 ASSERT(map); 4714 4715 btrfs_remove_chunk_map(fs_info, map); 4716 4717 /* Once for our lookup reference. */ 4718 btrfs_free_chunk_map(map); 4719 4720 /* 4721 * We may have left one free space entry and other possible 4722 * tasks trimming this block group have left 1 entry each one. 4723 * Free them if any. 4724 */ 4725 btrfs_remove_free_space_cache(block_group); 4726 } 4727 } 4728 4729 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4730 { 4731 bool ret = true; 4732 4733 spin_lock(&bg->lock); 4734 if (bg->ro) 4735 ret = false; 4736 else 4737 bg->swap_extents++; 4738 spin_unlock(&bg->lock); 4739 4740 return ret; 4741 } 4742 4743 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4744 { 4745 spin_lock(&bg->lock); 4746 ASSERT(!bg->ro); 4747 ASSERT(bg->swap_extents >= amount); 4748 bg->swap_extents -= amount; 4749 spin_unlock(&bg->lock); 4750 } 4751 4752 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) 4753 { 4754 if (size <= SZ_128K) 4755 return BTRFS_BG_SZ_SMALL; 4756 if (size <= SZ_8M) 4757 return BTRFS_BG_SZ_MEDIUM; 4758 return BTRFS_BG_SZ_LARGE; 4759 } 4760 4761 /* 4762 * Handle a block group allocating an extent in a size class 4763 * 4764 * @bg: The block group we allocated in. 4765 * @size_class: The size class of the allocation. 4766 * @force_wrong_size_class: Whether we are desperate enough to allow 4767 * mismatched size classes. 4768 * 4769 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the 4770 * case of a race that leads to the wrong size class without 4771 * force_wrong_size_class set. 4772 * 4773 * find_free_extent will skip block groups with a mismatched size class until 4774 * it really needs to avoid ENOSPC. In that case it will set 4775 * force_wrong_size_class. However, if a block group is newly allocated and 4776 * doesn't yet have a size class, then it is possible for two allocations of 4777 * different sizes to race and both try to use it. The loser is caught here and 4778 * has to retry. 4779 */ 4780 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 4781 enum btrfs_block_group_size_class size_class, 4782 bool force_wrong_size_class) 4783 { 4784 lockdep_assert_held(&bg->lock); 4785 ASSERT(size_class != BTRFS_BG_SZ_NONE); 4786 4787 /* The new allocation is in the right size class, do nothing */ 4788 if (bg->size_class == size_class) 4789 return 0; 4790 /* 4791 * The new allocation is in a mismatched size class. 4792 * This means one of two things: 4793 * 4794 * 1. Two tasks in find_free_extent for different size_classes raced 4795 * and hit the same empty block_group. Make the loser try again. 4796 * 2. A call to find_free_extent got desperate enough to set 4797 * 'force_wrong_slab'. Don't change the size_class, but allow the 4798 * allocation. 4799 */ 4800 if (bg->size_class != BTRFS_BG_SZ_NONE) { 4801 if (force_wrong_size_class) 4802 return 0; 4803 return -EAGAIN; 4804 } 4805 /* 4806 * The happy new block group case: the new allocation is the first 4807 * one in the block_group so we set size_class. 4808 */ 4809 bg->size_class = size_class; 4810 4811 return 0; 4812 } 4813 4814 bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg) 4815 { 4816 if (btrfs_is_zoned(bg->fs_info)) 4817 return false; 4818 if (!btrfs_is_block_group_data_only(bg)) 4819 return false; 4820 return true; 4821 } 4822 4823 void btrfs_mark_bg_fully_remapped(struct btrfs_block_group *bg, 4824 struct btrfs_trans_handle *trans) 4825 { 4826 struct btrfs_fs_info *fs_info = trans->fs_info; 4827 4828 4829 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 4830 spin_lock(&bg->lock); 4831 set_bit(BLOCK_GROUP_FLAG_STRIPE_REMOVAL_PENDING, &bg->runtime_flags); 4832 spin_unlock(&bg->lock); 4833 4834 btrfs_discard_queue_work(&fs_info->discard_ctl, bg); 4835 } else { 4836 spin_lock(&fs_info->unused_bgs_lock); 4837 /* 4838 * The block group might already be on the unused_bgs list, 4839 * remove it if it is. It'll get readded after 4840 * btrfs_handle_fully_remapped_bgs() finishes. 4841 */ 4842 if (!list_empty(&bg->bg_list)) 4843 list_del(&bg->bg_list); 4844 else 4845 btrfs_get_block_group(bg); 4846 4847 list_add_tail(&bg->bg_list, &fs_info->fully_remapped_bgs); 4848 spin_unlock(&fs_info->unused_bgs_lock); 4849 } 4850 } 4851 4852 /* 4853 * Compare the block group and chunk trees, and find any fully-remapped block 4854 * groups which haven't yet had their chunk stripes and device extents removed, 4855 * and put them on the fully_remapped_bgs list so this gets done. 4856 * 4857 * This happens when a block group becomes fully remapped, i.e. its last 4858 * identity mapping is removed, and the volume is unmounted before async 4859 * discard has finished. It's important this gets done as until it is the 4860 * chunk's stripes are dead space. 4861 */ 4862 int btrfs_populate_fully_remapped_bgs_list(struct btrfs_fs_info *fs_info) 4863 { 4864 struct rb_node *node_bg, *node_chunk; 4865 4866 node_bg = rb_first_cached(&fs_info->block_group_cache_tree); 4867 node_chunk = rb_first_cached(&fs_info->mapping_tree); 4868 4869 while (node_bg && node_chunk) { 4870 struct btrfs_block_group *bg; 4871 struct btrfs_chunk_map *map; 4872 4873 bg = rb_entry(node_bg, struct btrfs_block_group, cache_node); 4874 map = rb_entry(node_chunk, struct btrfs_chunk_map, rb_node); 4875 4876 ASSERT(bg->start == map->start); 4877 4878 if (!(bg->flags & BTRFS_BLOCK_GROUP_REMAPPED)) 4879 goto next; 4880 4881 if (bg->identity_remap_count != 0) 4882 goto next; 4883 4884 if (map->num_stripes == 0) 4885 goto next; 4886 4887 spin_lock(&fs_info->unused_bgs_lock); 4888 4889 if (list_empty(&bg->bg_list)) { 4890 btrfs_get_block_group(bg); 4891 list_add_tail(&bg->bg_list, &fs_info->fully_remapped_bgs); 4892 } else { 4893 list_move_tail(&bg->bg_list, &fs_info->fully_remapped_bgs); 4894 } 4895 4896 spin_unlock(&fs_info->unused_bgs_lock); 4897 4898 /* 4899 * Ideally we'd want to call btrfs_discard_queue_work() here, 4900 * but it'd do nothing as the discard worker hasn't been 4901 * started yet. 4902 * 4903 * The block group will get added to the discard list when 4904 * btrfs_handle_fully_remapped_bgs() gets called, when we 4905 * commit the first transaction. 4906 */ 4907 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 4908 spin_lock(&bg->lock); 4909 set_bit(BLOCK_GROUP_FLAG_STRIPE_REMOVAL_PENDING, &bg->runtime_flags); 4910 spin_unlock(&bg->lock); 4911 } 4912 4913 next: 4914 node_bg = rb_next(node_bg); 4915 node_chunk = rb_next(node_chunk); 4916 } 4917 4918 ASSERT(!node_bg && !node_chunk); 4919 4920 return 0; 4921 } 4922