1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/sizes.h> 4 #include <linux/list_sort.h> 5 #include "misc.h" 6 #include "ctree.h" 7 #include "block-group.h" 8 #include "space-info.h" 9 #include "disk-io.h" 10 #include "free-space-cache.h" 11 #include "free-space-tree.h" 12 #include "volumes.h" 13 #include "transaction.h" 14 #include "ref-verify.h" 15 #include "sysfs.h" 16 #include "tree-log.h" 17 #include "delalloc-space.h" 18 #include "discard.h" 19 #include "raid56.h" 20 #include "zoned.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 25 #ifdef CONFIG_BTRFS_DEBUG 26 int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group) 27 { 28 struct btrfs_fs_info *fs_info = block_group->fs_info; 29 30 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && 31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 32 (btrfs_test_opt(fs_info, FRAGMENT_DATA) && 33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 34 } 35 #endif 36 37 static inline bool has_unwritten_metadata(struct btrfs_block_group *block_group) 38 { 39 /* The meta_write_pointer is available only on the zoned setup. */ 40 if (!btrfs_is_zoned(block_group->fs_info)) 41 return false; 42 43 if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) 44 return false; 45 46 return block_group->start + block_group->alloc_offset > 47 block_group->meta_write_pointer; 48 } 49 50 /* 51 * Return target flags in extended format or 0 if restripe for this chunk_type 52 * is not in progress 53 * 54 * Should be called with balance_lock held 55 */ 56 static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags) 57 { 58 const struct btrfs_balance_control *bctl = fs_info->balance_ctl; 59 u64 target = 0; 60 61 if (!bctl) 62 return 0; 63 64 if (flags & BTRFS_BLOCK_GROUP_DATA && 65 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 66 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 67 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 68 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 69 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 70 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 71 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 72 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 73 } 74 75 return target; 76 } 77 78 /* 79 * @flags: available profiles in extended format (see ctree.h) 80 * 81 * Return reduced profile in chunk format. If profile changing is in progress 82 * (either running or paused) picks the target profile (if it's already 83 * available), otherwise falls back to plain reducing. 84 */ 85 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 86 { 87 u64 num_devices = fs_info->fs_devices->rw_devices; 88 u64 target; 89 u64 raid_type; 90 u64 allowed = 0; 91 92 /* 93 * See if restripe for this chunk_type is in progress, if so try to 94 * reduce to the target profile 95 */ 96 spin_lock(&fs_info->balance_lock); 97 target = get_restripe_target(fs_info, flags); 98 if (target) { 99 spin_unlock(&fs_info->balance_lock); 100 return extended_to_chunk(target); 101 } 102 spin_unlock(&fs_info->balance_lock); 103 104 /* First, mask out the RAID levels which aren't possible */ 105 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 106 if (num_devices >= btrfs_raid_array[raid_type].devs_min) 107 allowed |= btrfs_raid_array[raid_type].bg_flag; 108 } 109 allowed &= flags; 110 111 /* Select the highest-redundancy RAID level. */ 112 if (allowed & BTRFS_BLOCK_GROUP_RAID1C4) 113 allowed = BTRFS_BLOCK_GROUP_RAID1C4; 114 else if (allowed & BTRFS_BLOCK_GROUP_RAID6) 115 allowed = BTRFS_BLOCK_GROUP_RAID6; 116 else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3) 117 allowed = BTRFS_BLOCK_GROUP_RAID1C3; 118 else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 119 allowed = BTRFS_BLOCK_GROUP_RAID5; 120 else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 121 allowed = BTRFS_BLOCK_GROUP_RAID10; 122 else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 123 allowed = BTRFS_BLOCK_GROUP_RAID1; 124 else if (allowed & BTRFS_BLOCK_GROUP_DUP) 125 allowed = BTRFS_BLOCK_GROUP_DUP; 126 else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 127 allowed = BTRFS_BLOCK_GROUP_RAID0; 128 129 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 130 131 return extended_to_chunk(flags | allowed); 132 } 133 134 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 135 { 136 unsigned seq; 137 u64 flags; 138 139 do { 140 flags = orig_flags; 141 seq = read_seqbegin(&fs_info->profiles_lock); 142 143 if (flags & BTRFS_BLOCK_GROUP_DATA) 144 flags |= fs_info->avail_data_alloc_bits; 145 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 146 flags |= fs_info->avail_system_alloc_bits; 147 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 148 flags |= fs_info->avail_metadata_alloc_bits; 149 } while (read_seqretry(&fs_info->profiles_lock, seq)); 150 151 return btrfs_reduce_alloc_profile(fs_info, flags); 152 } 153 154 void btrfs_get_block_group(struct btrfs_block_group *cache) 155 { 156 refcount_inc(&cache->refs); 157 } 158 159 void btrfs_put_block_group(struct btrfs_block_group *cache) 160 { 161 if (refcount_dec_and_test(&cache->refs)) { 162 WARN_ON(cache->pinned > 0); 163 /* 164 * If there was a failure to cleanup a log tree, very likely due 165 * to an IO failure on a writeback attempt of one or more of its 166 * extent buffers, we could not do proper (and cheap) unaccounting 167 * of their reserved space, so don't warn on reserved > 0 in that 168 * case. 169 */ 170 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 171 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 172 WARN_ON(cache->reserved > 0); 173 174 /* 175 * A block_group shouldn't be on the discard_list anymore. 176 * Remove the block_group from the discard_list to prevent us 177 * from causing a panic due to NULL pointer dereference. 178 */ 179 if (WARN_ON(!list_empty(&cache->discard_list))) 180 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 181 cache); 182 183 kfree(cache->free_space_ctl); 184 btrfs_free_chunk_map(cache->physical_map); 185 kfree(cache); 186 } 187 } 188 189 static int btrfs_bg_start_cmp(const struct rb_node *new, 190 const struct rb_node *exist) 191 { 192 const struct btrfs_block_group *new_bg = 193 rb_entry(new, struct btrfs_block_group, cache_node); 194 const struct btrfs_block_group *exist_bg = 195 rb_entry(exist, struct btrfs_block_group, cache_node); 196 197 if (new_bg->start < exist_bg->start) 198 return -1; 199 if (new_bg->start > exist_bg->start) 200 return 1; 201 return 0; 202 } 203 204 /* 205 * This adds the block group to the fs_info rb tree for the block group cache 206 */ 207 static int btrfs_add_block_group_cache(struct btrfs_block_group *block_group) 208 { 209 struct btrfs_fs_info *fs_info = block_group->fs_info; 210 struct rb_node *exist; 211 int ret = 0; 212 213 ASSERT(block_group->length != 0); 214 215 write_lock(&fs_info->block_group_cache_lock); 216 217 exist = rb_find_add_cached(&block_group->cache_node, 218 &fs_info->block_group_cache_tree, btrfs_bg_start_cmp); 219 if (exist) 220 ret = -EEXIST; 221 write_unlock(&fs_info->block_group_cache_lock); 222 223 return ret; 224 } 225 226 /* 227 * This will return the block group at or after bytenr if contains is 0, else 228 * it will return the block group that contains the bytenr 229 */ 230 static struct btrfs_block_group *block_group_cache_tree_search( 231 struct btrfs_fs_info *info, u64 bytenr, int contains) 232 { 233 struct btrfs_block_group *cache, *ret = NULL; 234 struct rb_node *n; 235 u64 end, start; 236 237 read_lock(&info->block_group_cache_lock); 238 n = info->block_group_cache_tree.rb_root.rb_node; 239 240 while (n) { 241 cache = rb_entry(n, struct btrfs_block_group, cache_node); 242 end = cache->start + cache->length - 1; 243 start = cache->start; 244 245 if (bytenr < start) { 246 if (!contains && (!ret || start < ret->start)) 247 ret = cache; 248 n = n->rb_left; 249 } else if (bytenr > start) { 250 if (contains && bytenr <= end) { 251 ret = cache; 252 break; 253 } 254 n = n->rb_right; 255 } else { 256 ret = cache; 257 break; 258 } 259 } 260 if (ret) 261 btrfs_get_block_group(ret); 262 read_unlock(&info->block_group_cache_lock); 263 264 return ret; 265 } 266 267 /* 268 * Return the block group that starts at or after bytenr 269 */ 270 struct btrfs_block_group *btrfs_lookup_first_block_group( 271 struct btrfs_fs_info *info, u64 bytenr) 272 { 273 return block_group_cache_tree_search(info, bytenr, 0); 274 } 275 276 /* 277 * Return the block group that contains the given bytenr 278 */ 279 struct btrfs_block_group *btrfs_lookup_block_group( 280 struct btrfs_fs_info *info, u64 bytenr) 281 { 282 return block_group_cache_tree_search(info, bytenr, 1); 283 } 284 285 struct btrfs_block_group *btrfs_next_block_group( 286 struct btrfs_block_group *cache) 287 { 288 struct btrfs_fs_info *fs_info = cache->fs_info; 289 struct rb_node *node; 290 291 read_lock(&fs_info->block_group_cache_lock); 292 293 /* If our block group was removed, we need a full search. */ 294 if (RB_EMPTY_NODE(&cache->cache_node)) { 295 const u64 next_bytenr = cache->start + cache->length; 296 297 read_unlock(&fs_info->block_group_cache_lock); 298 btrfs_put_block_group(cache); 299 return btrfs_lookup_first_block_group(fs_info, next_bytenr); 300 } 301 node = rb_next(&cache->cache_node); 302 btrfs_put_block_group(cache); 303 if (node) { 304 cache = rb_entry(node, struct btrfs_block_group, cache_node); 305 btrfs_get_block_group(cache); 306 } else 307 cache = NULL; 308 read_unlock(&fs_info->block_group_cache_lock); 309 return cache; 310 } 311 312 /* 313 * Check if we can do a NOCOW write for a given extent. 314 * 315 * @fs_info: The filesystem information object. 316 * @bytenr: Logical start address of the extent. 317 * 318 * Check if we can do a NOCOW write for the given extent, and increments the 319 * number of NOCOW writers in the block group that contains the extent, as long 320 * as the block group exists and it's currently not in read-only mode. 321 * 322 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 323 * is responsible for calling btrfs_dec_nocow_writers() later. 324 * 325 * Or NULL if we can not do a NOCOW write 326 */ 327 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 328 u64 bytenr) 329 { 330 struct btrfs_block_group *bg; 331 bool can_nocow = true; 332 333 bg = btrfs_lookup_block_group(fs_info, bytenr); 334 if (!bg) 335 return NULL; 336 337 spin_lock(&bg->lock); 338 if (bg->ro) 339 can_nocow = false; 340 else 341 atomic_inc(&bg->nocow_writers); 342 spin_unlock(&bg->lock); 343 344 if (!can_nocow) { 345 btrfs_put_block_group(bg); 346 return NULL; 347 } 348 349 /* No put on block group, done by btrfs_dec_nocow_writers(). */ 350 return bg; 351 } 352 353 /* 354 * Decrement the number of NOCOW writers in a block group. 355 * 356 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 357 * and on the block group returned by that call. Typically this is called after 358 * creating an ordered extent for a NOCOW write, to prevent races with scrub and 359 * relocation. 360 * 361 * After this call, the caller should not use the block group anymore. It it wants 362 * to use it, then it should get a reference on it before calling this function. 363 */ 364 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 365 { 366 if (atomic_dec_and_test(&bg->nocow_writers)) 367 wake_up_var(&bg->nocow_writers); 368 369 /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 370 btrfs_put_block_group(bg); 371 } 372 373 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 374 { 375 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 376 } 377 378 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 379 const u64 start) 380 { 381 struct btrfs_block_group *bg; 382 383 bg = btrfs_lookup_block_group(fs_info, start); 384 ASSERT(bg); 385 if (atomic_dec_and_test(&bg->reservations)) 386 wake_up_var(&bg->reservations); 387 btrfs_put_block_group(bg); 388 } 389 390 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 391 { 392 struct btrfs_space_info *space_info = bg->space_info; 393 394 ASSERT(bg->ro); 395 396 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 397 return; 398 399 /* 400 * Our block group is read only but before we set it to read only, 401 * some task might have had allocated an extent from it already, but it 402 * has not yet created a respective ordered extent (and added it to a 403 * root's list of ordered extents). 404 * Therefore wait for any task currently allocating extents, since the 405 * block group's reservations counter is incremented while a read lock 406 * on the groups' semaphore is held and decremented after releasing 407 * the read access on that semaphore and creating the ordered extent. 408 */ 409 down_write(&space_info->groups_sem); 410 up_write(&space_info->groups_sem); 411 412 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 413 } 414 415 struct btrfs_caching_control *btrfs_get_caching_control( 416 struct btrfs_block_group *cache) 417 { 418 struct btrfs_caching_control *ctl; 419 420 spin_lock(&cache->lock); 421 if (!cache->caching_ctl) { 422 spin_unlock(&cache->lock); 423 return NULL; 424 } 425 426 ctl = cache->caching_ctl; 427 refcount_inc(&ctl->count); 428 spin_unlock(&cache->lock); 429 return ctl; 430 } 431 432 static void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 433 { 434 if (refcount_dec_and_test(&ctl->count)) 435 kfree(ctl); 436 } 437 438 /* 439 * When we wait for progress in the block group caching, its because our 440 * allocation attempt failed at least once. So, we must sleep and let some 441 * progress happen before we try again. 442 * 443 * This function will sleep at least once waiting for new free space to show 444 * up, and then it will check the block group free space numbers for our min 445 * num_bytes. Another option is to have it go ahead and look in the rbtree for 446 * a free extent of a given size, but this is a good start. 447 * 448 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 449 * any of the information in this block group. 450 */ 451 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 452 u64 num_bytes) 453 { 454 struct btrfs_caching_control *caching_ctl; 455 int progress; 456 457 caching_ctl = btrfs_get_caching_control(cache); 458 if (!caching_ctl) 459 return; 460 461 /* 462 * We've already failed to allocate from this block group, so even if 463 * there's enough space in the block group it isn't contiguous enough to 464 * allow for an allocation, so wait for at least the next wakeup tick, 465 * or for the thing to be done. 466 */ 467 progress = atomic_read(&caching_ctl->progress); 468 469 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 470 (progress != atomic_read(&caching_ctl->progress) && 471 (cache->free_space_ctl->free_space >= num_bytes))); 472 473 btrfs_put_caching_control(caching_ctl); 474 } 475 476 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 477 struct btrfs_caching_control *caching_ctl) 478 { 479 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 480 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 481 } 482 483 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 484 { 485 struct btrfs_caching_control *caching_ctl; 486 int ret; 487 488 caching_ctl = btrfs_get_caching_control(cache); 489 if (!caching_ctl) 490 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 491 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 492 btrfs_put_caching_control(caching_ctl); 493 return ret; 494 } 495 496 #ifdef CONFIG_BTRFS_DEBUG 497 static void fragment_free_space(struct btrfs_block_group *block_group) 498 { 499 struct btrfs_fs_info *fs_info = block_group->fs_info; 500 u64 start = block_group->start; 501 u64 len = block_group->length; 502 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 503 fs_info->nodesize : fs_info->sectorsize; 504 u64 step = chunk << 1; 505 506 while (len > chunk) { 507 btrfs_remove_free_space(block_group, start, chunk); 508 start += step; 509 if (len < step) 510 len = 0; 511 else 512 len -= step; 513 } 514 } 515 #endif 516 517 /* 518 * Add a free space range to the in memory free space cache of a block group. 519 * This checks if the range contains super block locations and any such 520 * locations are not added to the free space cache. 521 * 522 * @block_group: The target block group. 523 * @start: Start offset of the range. 524 * @end: End offset of the range (exclusive). 525 * @total_added_ret: Optional pointer to return the total amount of space 526 * added to the block group's free space cache. 527 * 528 * Returns 0 on success or < 0 on error. 529 */ 530 int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, 531 u64 end, u64 *total_added_ret) 532 { 533 struct btrfs_fs_info *info = block_group->fs_info; 534 u64 extent_start, extent_end, size; 535 int ret; 536 537 if (total_added_ret) 538 *total_added_ret = 0; 539 540 while (start < end) { 541 if (!btrfs_find_first_extent_bit(&info->excluded_extents, start, 542 &extent_start, &extent_end, 543 EXTENT_DIRTY, NULL)) 544 break; 545 546 if (extent_start <= start) { 547 start = extent_end + 1; 548 } else if (extent_start > start && extent_start < end) { 549 size = extent_start - start; 550 ret = btrfs_add_free_space_async_trimmed(block_group, 551 start, size); 552 if (ret) 553 return ret; 554 if (total_added_ret) 555 *total_added_ret += size; 556 start = extent_end + 1; 557 } else { 558 break; 559 } 560 } 561 562 if (start < end) { 563 size = end - start; 564 ret = btrfs_add_free_space_async_trimmed(block_group, start, 565 size); 566 if (ret) 567 return ret; 568 if (total_added_ret) 569 *total_added_ret += size; 570 } 571 572 return 0; 573 } 574 575 /* 576 * Get an arbitrary extent item index / max_index through the block group 577 * 578 * @block_group the block group to sample from 579 * @index: the integral step through the block group to grab from 580 * @max_index: the granularity of the sampling 581 * @key: return value parameter for the item we find 582 * 583 * Pre-conditions on indices: 584 * 0 <= index <= max_index 585 * 0 < max_index 586 * 587 * Returns: 0 on success, 1 if the search didn't yield a useful item, negative 588 * error code on error. 589 */ 590 static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, 591 struct btrfs_block_group *block_group, 592 int index, int max_index, 593 struct btrfs_key *found_key) 594 { 595 struct btrfs_fs_info *fs_info = block_group->fs_info; 596 struct btrfs_root *extent_root; 597 u64 search_offset; 598 u64 search_end = block_group->start + block_group->length; 599 BTRFS_PATH_AUTO_FREE(path); 600 struct btrfs_key search_key; 601 int ret = 0; 602 603 ASSERT(index >= 0); 604 ASSERT(index <= max_index); 605 ASSERT(max_index > 0); 606 lockdep_assert_held(&caching_ctl->mutex); 607 lockdep_assert_held_read(&fs_info->commit_root_sem); 608 609 path = btrfs_alloc_path(); 610 if (!path) 611 return -ENOMEM; 612 613 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, 614 BTRFS_SUPER_INFO_OFFSET)); 615 616 path->skip_locking = 1; 617 path->search_commit_root = 1; 618 path->reada = READA_FORWARD; 619 620 search_offset = index * div_u64(block_group->length, max_index); 621 search_key.objectid = block_group->start + search_offset; 622 search_key.type = BTRFS_EXTENT_ITEM_KEY; 623 search_key.offset = 0; 624 625 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { 626 /* Success; sampled an extent item in the block group */ 627 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && 628 found_key->objectid >= block_group->start && 629 found_key->objectid + found_key->offset <= search_end) 630 break; 631 632 /* We can't possibly find a valid extent item anymore */ 633 if (found_key->objectid >= search_end) { 634 ret = 1; 635 break; 636 } 637 } 638 639 lockdep_assert_held(&caching_ctl->mutex); 640 lockdep_assert_held_read(&fs_info->commit_root_sem); 641 return ret; 642 } 643 644 /* 645 * Best effort attempt to compute a block group's size class while caching it. 646 * 647 * @block_group: the block group we are caching 648 * 649 * We cannot infer the size class while adding free space extents, because that 650 * logic doesn't care about contiguous file extents (it doesn't differentiate 651 * between a 100M extent and 100 contiguous 1M extents). So we need to read the 652 * file extent items. Reading all of them is quite wasteful, because usually 653 * only a handful are enough to give a good answer. Therefore, we just grab 5 of 654 * them at even steps through the block group and pick the smallest size class 655 * we see. Since size class is best effort, and not guaranteed in general, 656 * inaccuracy is acceptable. 657 * 658 * To be more explicit about why this algorithm makes sense: 659 * 660 * If we are caching in a block group from disk, then there are three major cases 661 * to consider: 662 * 1. the block group is well behaved and all extents in it are the same size 663 * class. 664 * 2. the block group is mostly one size class with rare exceptions for last 665 * ditch allocations 666 * 3. the block group was populated before size classes and can have a totally 667 * arbitrary mix of size classes. 668 * 669 * In case 1, looking at any extent in the block group will yield the correct 670 * result. For the mixed cases, taking the minimum size class seems like a good 671 * approximation, since gaps from frees will be usable to the size class. For 672 * 2., a small handful of file extents is likely to yield the right answer. For 673 * 3, we can either read every file extent, or admit that this is best effort 674 * anyway and try to stay fast. 675 * 676 * Returns: 0 on success, negative error code on error. 677 */ 678 static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl, 679 struct btrfs_block_group *block_group) 680 { 681 struct btrfs_fs_info *fs_info = block_group->fs_info; 682 struct btrfs_key key; 683 int i; 684 u64 min_size = block_group->length; 685 enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; 686 int ret; 687 688 if (!btrfs_block_group_should_use_size_class(block_group)) 689 return 0; 690 691 lockdep_assert_held(&caching_ctl->mutex); 692 lockdep_assert_held_read(&fs_info->commit_root_sem); 693 for (i = 0; i < 5; ++i) { 694 ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); 695 if (ret < 0) 696 goto out; 697 if (ret > 0) 698 continue; 699 min_size = min_t(u64, min_size, key.offset); 700 size_class = btrfs_calc_block_group_size_class(min_size); 701 } 702 if (size_class != BTRFS_BG_SZ_NONE) { 703 spin_lock(&block_group->lock); 704 block_group->size_class = size_class; 705 spin_unlock(&block_group->lock); 706 } 707 out: 708 return ret; 709 } 710 711 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 712 { 713 struct btrfs_block_group *block_group = caching_ctl->block_group; 714 struct btrfs_fs_info *fs_info = block_group->fs_info; 715 struct btrfs_root *extent_root; 716 BTRFS_PATH_AUTO_FREE(path); 717 struct extent_buffer *leaf; 718 struct btrfs_key key; 719 u64 total_found = 0; 720 u64 last = 0; 721 u32 nritems; 722 int ret; 723 bool wakeup = true; 724 725 path = btrfs_alloc_path(); 726 if (!path) 727 return -ENOMEM; 728 729 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 730 extent_root = btrfs_extent_root(fs_info, last); 731 732 #ifdef CONFIG_BTRFS_DEBUG 733 /* 734 * If we're fragmenting we don't want to make anybody think we can 735 * allocate from this block group until we've had a chance to fragment 736 * the free space. 737 */ 738 if (btrfs_should_fragment_free_space(block_group)) 739 wakeup = false; 740 #endif 741 /* 742 * We don't want to deadlock with somebody trying to allocate a new 743 * extent for the extent root while also trying to search the extent 744 * root to add free space. So we skip locking and search the commit 745 * root, since its read-only 746 */ 747 path->skip_locking = 1; 748 path->search_commit_root = 1; 749 path->reada = READA_FORWARD; 750 751 key.objectid = last; 752 key.type = BTRFS_EXTENT_ITEM_KEY; 753 key.offset = 0; 754 755 next: 756 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 757 if (ret < 0) 758 goto out; 759 760 leaf = path->nodes[0]; 761 nritems = btrfs_header_nritems(leaf); 762 763 while (1) { 764 if (btrfs_fs_closing(fs_info) > 1) { 765 last = (u64)-1; 766 break; 767 } 768 769 if (path->slots[0] < nritems) { 770 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 771 } else { 772 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 773 if (ret) 774 break; 775 776 if (need_resched() || 777 rwsem_is_contended(&fs_info->commit_root_sem)) { 778 btrfs_release_path(path); 779 up_read(&fs_info->commit_root_sem); 780 mutex_unlock(&caching_ctl->mutex); 781 cond_resched(); 782 mutex_lock(&caching_ctl->mutex); 783 down_read(&fs_info->commit_root_sem); 784 goto next; 785 } 786 787 ret = btrfs_next_leaf(extent_root, path); 788 if (ret < 0) 789 goto out; 790 if (ret) 791 break; 792 leaf = path->nodes[0]; 793 nritems = btrfs_header_nritems(leaf); 794 continue; 795 } 796 797 if (key.objectid < last) { 798 key.objectid = last; 799 key.type = BTRFS_EXTENT_ITEM_KEY; 800 key.offset = 0; 801 btrfs_release_path(path); 802 goto next; 803 } 804 805 if (key.objectid < block_group->start) { 806 path->slots[0]++; 807 continue; 808 } 809 810 if (key.objectid >= block_group->start + block_group->length) 811 break; 812 813 if (key.type == BTRFS_EXTENT_ITEM_KEY || 814 key.type == BTRFS_METADATA_ITEM_KEY) { 815 u64 space_added; 816 817 ret = btrfs_add_new_free_space(block_group, last, 818 key.objectid, &space_added); 819 if (ret) 820 goto out; 821 total_found += space_added; 822 if (key.type == BTRFS_METADATA_ITEM_KEY) 823 last = key.objectid + 824 fs_info->nodesize; 825 else 826 last = key.objectid + key.offset; 827 828 if (total_found > CACHING_CTL_WAKE_UP) { 829 total_found = 0; 830 if (wakeup) { 831 atomic_inc(&caching_ctl->progress); 832 wake_up(&caching_ctl->wait); 833 } 834 } 835 } 836 path->slots[0]++; 837 } 838 839 ret = btrfs_add_new_free_space(block_group, last, 840 block_group->start + block_group->length, 841 NULL); 842 out: 843 return ret; 844 } 845 846 static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) 847 { 848 btrfs_clear_extent_bit(&bg->fs_info->excluded_extents, bg->start, 849 bg->start + bg->length - 1, EXTENT_DIRTY, NULL); 850 } 851 852 static noinline void caching_thread(struct btrfs_work *work) 853 { 854 struct btrfs_block_group *block_group; 855 struct btrfs_fs_info *fs_info; 856 struct btrfs_caching_control *caching_ctl; 857 int ret; 858 859 caching_ctl = container_of(work, struct btrfs_caching_control, work); 860 block_group = caching_ctl->block_group; 861 fs_info = block_group->fs_info; 862 863 mutex_lock(&caching_ctl->mutex); 864 down_read(&fs_info->commit_root_sem); 865 866 load_block_group_size_class(caching_ctl, block_group); 867 if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 868 ret = load_free_space_cache(block_group); 869 if (ret == 1) { 870 ret = 0; 871 goto done; 872 } 873 874 /* 875 * We failed to load the space cache, set ourselves to 876 * CACHE_STARTED and carry on. 877 */ 878 spin_lock(&block_group->lock); 879 block_group->cached = BTRFS_CACHE_STARTED; 880 spin_unlock(&block_group->lock); 881 wake_up(&caching_ctl->wait); 882 } 883 884 /* 885 * If we are in the transaction that populated the free space tree we 886 * can't actually cache from the free space tree as our commit root and 887 * real root are the same, so we could change the contents of the blocks 888 * while caching. Instead do the slow caching in this case, and after 889 * the transaction has committed we will be safe. 890 */ 891 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 892 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 893 ret = btrfs_load_free_space_tree(caching_ctl); 894 else 895 ret = load_extent_tree_free(caching_ctl); 896 done: 897 spin_lock(&block_group->lock); 898 block_group->caching_ctl = NULL; 899 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 900 spin_unlock(&block_group->lock); 901 902 #ifdef CONFIG_BTRFS_DEBUG 903 if (btrfs_should_fragment_free_space(block_group)) { 904 u64 bytes_used; 905 906 spin_lock(&block_group->space_info->lock); 907 spin_lock(&block_group->lock); 908 bytes_used = block_group->length - block_group->used; 909 block_group->space_info->bytes_used += bytes_used >> 1; 910 spin_unlock(&block_group->lock); 911 spin_unlock(&block_group->space_info->lock); 912 fragment_free_space(block_group); 913 } 914 #endif 915 916 up_read(&fs_info->commit_root_sem); 917 btrfs_free_excluded_extents(block_group); 918 mutex_unlock(&caching_ctl->mutex); 919 920 wake_up(&caching_ctl->wait); 921 922 btrfs_put_caching_control(caching_ctl); 923 btrfs_put_block_group(block_group); 924 } 925 926 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 927 { 928 struct btrfs_fs_info *fs_info = cache->fs_info; 929 struct btrfs_caching_control *caching_ctl = NULL; 930 int ret = 0; 931 932 /* Allocator for zoned filesystems does not use the cache at all */ 933 if (btrfs_is_zoned(fs_info)) 934 return 0; 935 936 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 937 if (!caching_ctl) 938 return -ENOMEM; 939 940 INIT_LIST_HEAD(&caching_ctl->list); 941 mutex_init(&caching_ctl->mutex); 942 init_waitqueue_head(&caching_ctl->wait); 943 caching_ctl->block_group = cache; 944 refcount_set(&caching_ctl->count, 2); 945 atomic_set(&caching_ctl->progress, 0); 946 btrfs_init_work(&caching_ctl->work, caching_thread, NULL); 947 948 spin_lock(&cache->lock); 949 if (cache->cached != BTRFS_CACHE_NO) { 950 kfree(caching_ctl); 951 952 caching_ctl = cache->caching_ctl; 953 if (caching_ctl) 954 refcount_inc(&caching_ctl->count); 955 spin_unlock(&cache->lock); 956 goto out; 957 } 958 WARN_ON(cache->caching_ctl); 959 cache->caching_ctl = caching_ctl; 960 cache->cached = BTRFS_CACHE_STARTED; 961 spin_unlock(&cache->lock); 962 963 write_lock(&fs_info->block_group_cache_lock); 964 refcount_inc(&caching_ctl->count); 965 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 966 write_unlock(&fs_info->block_group_cache_lock); 967 968 btrfs_get_block_group(cache); 969 970 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 971 out: 972 if (wait && caching_ctl) 973 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 974 if (caching_ctl) 975 btrfs_put_caching_control(caching_ctl); 976 977 return ret; 978 } 979 980 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 981 { 982 u64 extra_flags = chunk_to_extended(flags) & 983 BTRFS_EXTENDED_PROFILE_MASK; 984 985 write_seqlock(&fs_info->profiles_lock); 986 if (flags & BTRFS_BLOCK_GROUP_DATA) 987 fs_info->avail_data_alloc_bits &= ~extra_flags; 988 if (flags & BTRFS_BLOCK_GROUP_METADATA) 989 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 990 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 991 fs_info->avail_system_alloc_bits &= ~extra_flags; 992 write_sequnlock(&fs_info->profiles_lock); 993 } 994 995 /* 996 * Clear incompat bits for the following feature(s): 997 * 998 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 999 * in the whole filesystem 1000 * 1001 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 1002 */ 1003 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 1004 { 1005 bool found_raid56 = false; 1006 bool found_raid1c34 = false; 1007 1008 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 1009 (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 1010 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 1011 struct list_head *head = &fs_info->space_info; 1012 struct btrfs_space_info *sinfo; 1013 1014 list_for_each_entry_rcu(sinfo, head, list) { 1015 down_read(&sinfo->groups_sem); 1016 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 1017 found_raid56 = true; 1018 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 1019 found_raid56 = true; 1020 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 1021 found_raid1c34 = true; 1022 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 1023 found_raid1c34 = true; 1024 up_read(&sinfo->groups_sem); 1025 } 1026 if (!found_raid56) 1027 btrfs_clear_fs_incompat(fs_info, RAID56); 1028 if (!found_raid1c34) 1029 btrfs_clear_fs_incompat(fs_info, RAID1C34); 1030 } 1031 } 1032 1033 static struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info) 1034 { 1035 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) 1036 return fs_info->block_group_root; 1037 return btrfs_extent_root(fs_info, 0); 1038 } 1039 1040 static int remove_block_group_item(struct btrfs_trans_handle *trans, 1041 struct btrfs_path *path, 1042 struct btrfs_block_group *block_group) 1043 { 1044 struct btrfs_fs_info *fs_info = trans->fs_info; 1045 struct btrfs_root *root; 1046 struct btrfs_key key; 1047 int ret; 1048 1049 root = btrfs_block_group_root(fs_info); 1050 key.objectid = block_group->start; 1051 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1052 key.offset = block_group->length; 1053 1054 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1055 if (ret > 0) 1056 ret = -ENOENT; 1057 if (ret < 0) 1058 return ret; 1059 1060 ret = btrfs_del_item(trans, root, path); 1061 return ret; 1062 } 1063 1064 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 1065 struct btrfs_chunk_map *map) 1066 { 1067 struct btrfs_fs_info *fs_info = trans->fs_info; 1068 struct btrfs_path *path; 1069 struct btrfs_block_group *block_group; 1070 struct btrfs_free_cluster *cluster; 1071 struct inode *inode; 1072 struct kobject *kobj = NULL; 1073 int ret; 1074 int index; 1075 int factor; 1076 struct btrfs_caching_control *caching_ctl = NULL; 1077 bool remove_map; 1078 bool remove_rsv = false; 1079 1080 block_group = btrfs_lookup_block_group(fs_info, map->start); 1081 if (!block_group) 1082 return -ENOENT; 1083 1084 BUG_ON(!block_group->ro); 1085 1086 trace_btrfs_remove_block_group(block_group); 1087 /* 1088 * Free the reserved super bytes from this block group before 1089 * remove it. 1090 */ 1091 btrfs_free_excluded_extents(block_group); 1092 btrfs_free_ref_tree_range(fs_info, block_group->start, 1093 block_group->length); 1094 1095 index = btrfs_bg_flags_to_raid_index(block_group->flags); 1096 factor = btrfs_bg_type_to_factor(block_group->flags); 1097 1098 /* make sure this block group isn't part of an allocation cluster */ 1099 cluster = &fs_info->data_alloc_cluster; 1100 spin_lock(&cluster->refill_lock); 1101 btrfs_return_cluster_to_free_space(block_group, cluster); 1102 spin_unlock(&cluster->refill_lock); 1103 1104 /* 1105 * make sure this block group isn't part of a metadata 1106 * allocation cluster 1107 */ 1108 cluster = &fs_info->meta_alloc_cluster; 1109 spin_lock(&cluster->refill_lock); 1110 btrfs_return_cluster_to_free_space(block_group, cluster); 1111 spin_unlock(&cluster->refill_lock); 1112 1113 btrfs_clear_treelog_bg(block_group); 1114 btrfs_clear_data_reloc_bg(block_group); 1115 1116 path = btrfs_alloc_path(); 1117 if (!path) { 1118 ret = -ENOMEM; 1119 goto out; 1120 } 1121 1122 /* 1123 * get the inode first so any iput calls done for the io_list 1124 * aren't the final iput (no unlinks allowed now) 1125 */ 1126 inode = lookup_free_space_inode(block_group, path); 1127 1128 mutex_lock(&trans->transaction->cache_write_mutex); 1129 /* 1130 * Make sure our free space cache IO is done before removing the 1131 * free space inode 1132 */ 1133 spin_lock(&trans->transaction->dirty_bgs_lock); 1134 if (!list_empty(&block_group->io_list)) { 1135 list_del_init(&block_group->io_list); 1136 1137 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 1138 1139 spin_unlock(&trans->transaction->dirty_bgs_lock); 1140 btrfs_wait_cache_io(trans, block_group, path); 1141 btrfs_put_block_group(block_group); 1142 spin_lock(&trans->transaction->dirty_bgs_lock); 1143 } 1144 1145 if (!list_empty(&block_group->dirty_list)) { 1146 list_del_init(&block_group->dirty_list); 1147 remove_rsv = true; 1148 btrfs_put_block_group(block_group); 1149 } 1150 spin_unlock(&trans->transaction->dirty_bgs_lock); 1151 mutex_unlock(&trans->transaction->cache_write_mutex); 1152 1153 ret = btrfs_remove_free_space_inode(trans, inode, block_group); 1154 if (ret) 1155 goto out; 1156 1157 write_lock(&fs_info->block_group_cache_lock); 1158 rb_erase_cached(&block_group->cache_node, 1159 &fs_info->block_group_cache_tree); 1160 RB_CLEAR_NODE(&block_group->cache_node); 1161 1162 /* Once for the block groups rbtree */ 1163 btrfs_put_block_group(block_group); 1164 1165 write_unlock(&fs_info->block_group_cache_lock); 1166 1167 down_write(&block_group->space_info->groups_sem); 1168 /* 1169 * we must use list_del_init so people can check to see if they 1170 * are still on the list after taking the semaphore 1171 */ 1172 list_del_init(&block_group->list); 1173 if (list_empty(&block_group->space_info->block_groups[index])) { 1174 kobj = block_group->space_info->block_group_kobjs[index]; 1175 block_group->space_info->block_group_kobjs[index] = NULL; 1176 clear_avail_alloc_bits(fs_info, block_group->flags); 1177 } 1178 up_write(&block_group->space_info->groups_sem); 1179 clear_incompat_bg_bits(fs_info, block_group->flags); 1180 if (kobj) { 1181 kobject_del(kobj); 1182 kobject_put(kobj); 1183 } 1184 1185 if (block_group->cached == BTRFS_CACHE_STARTED) 1186 btrfs_wait_block_group_cache_done(block_group); 1187 1188 write_lock(&fs_info->block_group_cache_lock); 1189 caching_ctl = btrfs_get_caching_control(block_group); 1190 if (!caching_ctl) { 1191 struct btrfs_caching_control *ctl; 1192 1193 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { 1194 if (ctl->block_group == block_group) { 1195 caching_ctl = ctl; 1196 refcount_inc(&caching_ctl->count); 1197 break; 1198 } 1199 } 1200 } 1201 if (caching_ctl) 1202 list_del_init(&caching_ctl->list); 1203 write_unlock(&fs_info->block_group_cache_lock); 1204 1205 if (caching_ctl) { 1206 /* Once for the caching bgs list and once for us. */ 1207 btrfs_put_caching_control(caching_ctl); 1208 btrfs_put_caching_control(caching_ctl); 1209 } 1210 1211 spin_lock(&trans->transaction->dirty_bgs_lock); 1212 WARN_ON(!list_empty(&block_group->dirty_list)); 1213 WARN_ON(!list_empty(&block_group->io_list)); 1214 spin_unlock(&trans->transaction->dirty_bgs_lock); 1215 1216 btrfs_remove_free_space_cache(block_group); 1217 1218 spin_lock(&block_group->space_info->lock); 1219 list_del_init(&block_group->ro_list); 1220 1221 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1222 WARN_ON(block_group->space_info->total_bytes 1223 < block_group->length); 1224 WARN_ON(block_group->space_info->bytes_readonly 1225 < block_group->length - block_group->zone_unusable); 1226 WARN_ON(block_group->space_info->bytes_zone_unusable 1227 < block_group->zone_unusable); 1228 WARN_ON(block_group->space_info->disk_total 1229 < block_group->length * factor); 1230 } 1231 block_group->space_info->total_bytes -= block_group->length; 1232 block_group->space_info->bytes_readonly -= 1233 (block_group->length - block_group->zone_unusable); 1234 btrfs_space_info_update_bytes_zone_unusable(block_group->space_info, 1235 -block_group->zone_unusable); 1236 block_group->space_info->disk_total -= block_group->length * factor; 1237 1238 spin_unlock(&block_group->space_info->lock); 1239 1240 /* 1241 * Remove the free space for the block group from the free space tree 1242 * and the block group's item from the extent tree before marking the 1243 * block group as removed. This is to prevent races with tasks that 1244 * freeze and unfreeze a block group, this task and another task 1245 * allocating a new block group - the unfreeze task ends up removing 1246 * the block group's extent map before the task calling this function 1247 * deletes the block group item from the extent tree, allowing for 1248 * another task to attempt to create another block group with the same 1249 * item key (and failing with -EEXIST and a transaction abort). 1250 */ 1251 ret = btrfs_remove_block_group_free_space(trans, block_group); 1252 if (ret) 1253 goto out; 1254 1255 ret = remove_block_group_item(trans, path, block_group); 1256 if (ret < 0) 1257 goto out; 1258 1259 spin_lock(&block_group->lock); 1260 /* 1261 * Hitting this WARN means we removed a block group with an unwritten 1262 * region. It will cause "unable to find chunk map for logical" errors. 1263 */ 1264 if (WARN_ON(has_unwritten_metadata(block_group))) 1265 btrfs_warn(fs_info, 1266 "block group %llu is removed before metadata write out", 1267 block_group->start); 1268 1269 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); 1270 1271 /* 1272 * At this point trimming or scrub can't start on this block group, 1273 * because we removed the block group from the rbtree 1274 * fs_info->block_group_cache_tree so no one can't find it anymore and 1275 * even if someone already got this block group before we removed it 1276 * from the rbtree, they have already incremented block_group->frozen - 1277 * if they didn't, for the trimming case they won't find any free space 1278 * entries because we already removed them all when we called 1279 * btrfs_remove_free_space_cache(). 1280 * 1281 * And we must not remove the chunk map from the fs_info->mapping_tree 1282 * to prevent the same logical address range and physical device space 1283 * ranges from being reused for a new block group. This is needed to 1284 * avoid races with trimming and scrub. 1285 * 1286 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1287 * completely transactionless, so while it is trimming a range the 1288 * currently running transaction might finish and a new one start, 1289 * allowing for new block groups to be created that can reuse the same 1290 * physical device locations unless we take this special care. 1291 * 1292 * There may also be an implicit trim operation if the file system 1293 * is mounted with -odiscard. The same protections must remain 1294 * in place until the extents have been discarded completely when 1295 * the transaction commit has completed. 1296 */ 1297 remove_map = (atomic_read(&block_group->frozen) == 0); 1298 spin_unlock(&block_group->lock); 1299 1300 if (remove_map) 1301 btrfs_remove_chunk_map(fs_info, map); 1302 1303 out: 1304 /* Once for the lookup reference */ 1305 btrfs_put_block_group(block_group); 1306 if (remove_rsv) 1307 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 1308 btrfs_free_path(path); 1309 return ret; 1310 } 1311 1312 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1313 struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1314 { 1315 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1316 struct btrfs_chunk_map *map; 1317 unsigned int num_items; 1318 1319 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 1320 ASSERT(map != NULL); 1321 ASSERT(map->start == chunk_offset); 1322 1323 /* 1324 * We need to reserve 3 + N units from the metadata space info in order 1325 * to remove a block group (done at btrfs_remove_chunk() and at 1326 * btrfs_remove_block_group()), which are used for: 1327 * 1328 * 1 unit for adding the free space inode's orphan (located in the tree 1329 * of tree roots). 1330 * 1 unit for deleting the block group item (located in the extent 1331 * tree). 1332 * 1 unit for deleting the free space item (located in tree of tree 1333 * roots). 1334 * N units for deleting N device extent items corresponding to each 1335 * stripe (located in the device tree). 1336 * 1337 * In order to remove a block group we also need to reserve units in the 1338 * system space info in order to update the chunk tree (update one or 1339 * more device items and remove one chunk item), but this is done at 1340 * btrfs_remove_chunk() through a call to check_system_chunk(). 1341 */ 1342 num_items = 3 + map->num_stripes; 1343 btrfs_free_chunk_map(map); 1344 1345 return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1346 } 1347 1348 /* 1349 * Mark block group @cache read-only, so later write won't happen to block 1350 * group @cache. 1351 * 1352 * If @force is not set, this function will only mark the block group readonly 1353 * if we have enough free space (1M) in other metadata/system block groups. 1354 * If @force is not set, this function will mark the block group readonly 1355 * without checking free space. 1356 * 1357 * NOTE: This function doesn't care if other block groups can contain all the 1358 * data in this block group. That check should be done by relocation routine, 1359 * not this function. 1360 */ 1361 static int inc_block_group_ro(struct btrfs_block_group *cache, bool force) 1362 { 1363 struct btrfs_space_info *sinfo = cache->space_info; 1364 u64 num_bytes; 1365 int ret = -ENOSPC; 1366 1367 spin_lock(&sinfo->lock); 1368 spin_lock(&cache->lock); 1369 1370 if (cache->swap_extents) { 1371 ret = -ETXTBSY; 1372 goto out; 1373 } 1374 1375 if (cache->ro) { 1376 cache->ro++; 1377 ret = 0; 1378 goto out; 1379 } 1380 1381 num_bytes = cache->length - cache->reserved - cache->pinned - 1382 cache->bytes_super - cache->zone_unusable - cache->used; 1383 1384 /* 1385 * Data never overcommits, even in mixed mode, so do just the straight 1386 * check of left over space in how much we have allocated. 1387 */ 1388 if (force) { 1389 ret = 0; 1390 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1391 u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1392 1393 /* 1394 * Here we make sure if we mark this bg RO, we still have enough 1395 * free space as buffer. 1396 */ 1397 if (sinfo_used + num_bytes <= sinfo->total_bytes) 1398 ret = 0; 1399 } else { 1400 /* 1401 * We overcommit metadata, so we need to do the 1402 * btrfs_can_overcommit check here, and we need to pass in 1403 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1404 * leeway to allow us to mark this block group as read only. 1405 */ 1406 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1407 BTRFS_RESERVE_NO_FLUSH)) 1408 ret = 0; 1409 } 1410 1411 if (!ret) { 1412 sinfo->bytes_readonly += num_bytes; 1413 if (btrfs_is_zoned(cache->fs_info)) { 1414 /* Migrate zone_unusable bytes to readonly */ 1415 sinfo->bytes_readonly += cache->zone_unusable; 1416 btrfs_space_info_update_bytes_zone_unusable(sinfo, -cache->zone_unusable); 1417 cache->zone_unusable = 0; 1418 } 1419 cache->ro++; 1420 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 1421 } 1422 out: 1423 spin_unlock(&cache->lock); 1424 spin_unlock(&sinfo->lock); 1425 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 1426 btrfs_info(cache->fs_info, 1427 "unable to make block group %llu ro", cache->start); 1428 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, false); 1429 } 1430 return ret; 1431 } 1432 1433 static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1434 const struct btrfs_block_group *bg) 1435 { 1436 struct btrfs_fs_info *fs_info = trans->fs_info; 1437 struct btrfs_transaction *prev_trans = NULL; 1438 const u64 start = bg->start; 1439 const u64 end = start + bg->length - 1; 1440 int ret; 1441 1442 spin_lock(&fs_info->trans_lock); 1443 if (!list_is_first(&trans->transaction->list, &fs_info->trans_list)) { 1444 prev_trans = list_prev_entry(trans->transaction, list); 1445 refcount_inc(&prev_trans->use_count); 1446 } 1447 spin_unlock(&fs_info->trans_lock); 1448 1449 /* 1450 * Hold the unused_bg_unpin_mutex lock to avoid racing with 1451 * btrfs_finish_extent_commit(). If we are at transaction N, another 1452 * task might be running finish_extent_commit() for the previous 1453 * transaction N - 1, and have seen a range belonging to the block 1454 * group in pinned_extents before we were able to clear the whole block 1455 * group range from pinned_extents. This means that task can lookup for 1456 * the block group after we unpinned it from pinned_extents and removed 1457 * it, leading to an error at unpin_extent_range(). 1458 */ 1459 mutex_lock(&fs_info->unused_bg_unpin_mutex); 1460 if (prev_trans) { 1461 ret = btrfs_clear_extent_bit(&prev_trans->pinned_extents, start, end, 1462 EXTENT_DIRTY, NULL); 1463 if (ret) 1464 goto out; 1465 } 1466 1467 ret = btrfs_clear_extent_bit(&trans->transaction->pinned_extents, start, end, 1468 EXTENT_DIRTY, NULL); 1469 out: 1470 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1471 if (prev_trans) 1472 btrfs_put_transaction(prev_trans); 1473 1474 return ret == 0; 1475 } 1476 1477 /* 1478 * Link the block_group to a list via bg_list. 1479 * 1480 * @bg: The block_group to link to the list. 1481 * @list: The list to link it to. 1482 * 1483 * Use this rather than list_add_tail() directly to ensure proper respect 1484 * to locking and refcounting. 1485 * 1486 * Returns: true if the bg was linked with a refcount bump and false otherwise. 1487 */ 1488 static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *list) 1489 { 1490 struct btrfs_fs_info *fs_info = bg->fs_info; 1491 bool added = false; 1492 1493 spin_lock(&fs_info->unused_bgs_lock); 1494 if (list_empty(&bg->bg_list)) { 1495 btrfs_get_block_group(bg); 1496 list_add_tail(&bg->bg_list, list); 1497 added = true; 1498 } 1499 spin_unlock(&fs_info->unused_bgs_lock); 1500 return added; 1501 } 1502 1503 /* 1504 * Process the unused_bgs list and remove any that don't have any allocated 1505 * space inside of them. 1506 */ 1507 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1508 { 1509 LIST_HEAD(retry_list); 1510 struct btrfs_block_group *block_group; 1511 struct btrfs_space_info *space_info; 1512 struct btrfs_trans_handle *trans; 1513 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1514 int ret = 0; 1515 1516 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1517 return; 1518 1519 if (btrfs_fs_closing(fs_info)) 1520 return; 1521 1522 /* 1523 * Long running balances can keep us blocked here for eternity, so 1524 * simply skip deletion if we're unable to get the mutex. 1525 */ 1526 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1527 return; 1528 1529 spin_lock(&fs_info->unused_bgs_lock); 1530 while (!list_empty(&fs_info->unused_bgs)) { 1531 u64 used; 1532 int trimming; 1533 1534 block_group = list_first_entry(&fs_info->unused_bgs, 1535 struct btrfs_block_group, 1536 bg_list); 1537 list_del_init(&block_group->bg_list); 1538 1539 space_info = block_group->space_info; 1540 1541 if (ret || btrfs_mixed_space_info(space_info)) { 1542 btrfs_put_block_group(block_group); 1543 continue; 1544 } 1545 spin_unlock(&fs_info->unused_bgs_lock); 1546 1547 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1548 1549 /* Don't want to race with allocators so take the groups_sem */ 1550 down_write(&space_info->groups_sem); 1551 1552 /* 1553 * Async discard moves the final block group discard to be prior 1554 * to the unused_bgs code path. Therefore, if it's not fully 1555 * trimmed, punt it back to the async discard lists. 1556 */ 1557 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 1558 !btrfs_is_free_space_trimmed(block_group)) { 1559 trace_btrfs_skip_unused_block_group(block_group); 1560 up_write(&space_info->groups_sem); 1561 /* Requeue if we failed because of async discard */ 1562 btrfs_discard_queue_work(&fs_info->discard_ctl, 1563 block_group); 1564 goto next; 1565 } 1566 1567 spin_lock(&space_info->lock); 1568 spin_lock(&block_group->lock); 1569 if (btrfs_is_block_group_used(block_group) || block_group->ro || 1570 list_is_singular(&block_group->list)) { 1571 /* 1572 * We want to bail if we made new allocations or have 1573 * outstanding allocations in this block group. We do 1574 * the ro check in case balance is currently acting on 1575 * this block group. 1576 * 1577 * Also bail out if this is the only block group for its 1578 * type, because otherwise we would lose profile 1579 * information from fs_info->avail_*_alloc_bits and the 1580 * next block group of this type would be created with a 1581 * "single" profile (even if we're in a raid fs) because 1582 * fs_info->avail_*_alloc_bits would be 0. 1583 */ 1584 trace_btrfs_skip_unused_block_group(block_group); 1585 spin_unlock(&block_group->lock); 1586 spin_unlock(&space_info->lock); 1587 up_write(&space_info->groups_sem); 1588 goto next; 1589 } 1590 1591 /* 1592 * The block group may be unused but there may be space reserved 1593 * accounting with the existence of that block group, that is, 1594 * space_info->bytes_may_use was incremented by a task but no 1595 * space was yet allocated from the block group by the task. 1596 * That space may or may not be allocated, as we are generally 1597 * pessimistic about space reservation for metadata as well as 1598 * for data when using compression (as we reserve space based on 1599 * the worst case, when data can't be compressed, and before 1600 * actually attempting compression, before starting writeback). 1601 * 1602 * So check if the total space of the space_info minus the size 1603 * of this block group is less than the used space of the 1604 * space_info - if that's the case, then it means we have tasks 1605 * that might be relying on the block group in order to allocate 1606 * extents, and add back the block group to the unused list when 1607 * we finish, so that we retry later in case no tasks ended up 1608 * needing to allocate extents from the block group. 1609 */ 1610 used = btrfs_space_info_used(space_info, true); 1611 if ((space_info->total_bytes - block_group->length < used && 1612 block_group->zone_unusable < block_group->length) || 1613 has_unwritten_metadata(block_group)) { 1614 /* 1615 * Add a reference for the list, compensate for the ref 1616 * drop under the "next" label for the 1617 * fs_info->unused_bgs list. 1618 */ 1619 btrfs_link_bg_list(block_group, &retry_list); 1620 1621 trace_btrfs_skip_unused_block_group(block_group); 1622 spin_unlock(&block_group->lock); 1623 spin_unlock(&space_info->lock); 1624 up_write(&space_info->groups_sem); 1625 goto next; 1626 } 1627 1628 spin_unlock(&block_group->lock); 1629 spin_unlock(&space_info->lock); 1630 1631 /* We don't want to force the issue, only flip if it's ok. */ 1632 ret = inc_block_group_ro(block_group, 0); 1633 up_write(&space_info->groups_sem); 1634 if (ret < 0) { 1635 ret = 0; 1636 goto next; 1637 } 1638 1639 ret = btrfs_zone_finish(block_group); 1640 if (ret < 0) { 1641 btrfs_dec_block_group_ro(block_group); 1642 if (ret == -EAGAIN) { 1643 btrfs_link_bg_list(block_group, &retry_list); 1644 ret = 0; 1645 } 1646 goto next; 1647 } 1648 1649 /* 1650 * Want to do this before we do anything else so we can recover 1651 * properly if we fail to join the transaction. 1652 */ 1653 trans = btrfs_start_trans_remove_block_group(fs_info, 1654 block_group->start); 1655 if (IS_ERR(trans)) { 1656 btrfs_dec_block_group_ro(block_group); 1657 ret = PTR_ERR(trans); 1658 goto next; 1659 } 1660 1661 /* 1662 * We could have pending pinned extents for this block group, 1663 * just delete them, we don't care about them anymore. 1664 */ 1665 if (!clean_pinned_extents(trans, block_group)) { 1666 btrfs_dec_block_group_ro(block_group); 1667 goto end_trans; 1668 } 1669 1670 /* 1671 * At this point, the block_group is read only and should fail 1672 * new allocations. However, btrfs_finish_extent_commit() can 1673 * cause this block_group to be placed back on the discard 1674 * lists because now the block_group isn't fully discarded. 1675 * Bail here and try again later after discarding everything. 1676 */ 1677 spin_lock(&fs_info->discard_ctl.lock); 1678 if (!list_empty(&block_group->discard_list)) { 1679 spin_unlock(&fs_info->discard_ctl.lock); 1680 btrfs_dec_block_group_ro(block_group); 1681 btrfs_discard_queue_work(&fs_info->discard_ctl, 1682 block_group); 1683 goto end_trans; 1684 } 1685 spin_unlock(&fs_info->discard_ctl.lock); 1686 1687 /* Reset pinned so btrfs_put_block_group doesn't complain */ 1688 spin_lock(&space_info->lock); 1689 spin_lock(&block_group->lock); 1690 1691 btrfs_space_info_update_bytes_pinned(space_info, -block_group->pinned); 1692 space_info->bytes_readonly += block_group->pinned; 1693 block_group->pinned = 0; 1694 1695 spin_unlock(&block_group->lock); 1696 spin_unlock(&space_info->lock); 1697 1698 /* 1699 * The normal path here is an unused block group is passed here, 1700 * then trimming is handled in the transaction commit path. 1701 * Async discard interposes before this to do the trimming 1702 * before coming down the unused block group path as trimming 1703 * will no longer be done later in the transaction commit path. 1704 */ 1705 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1706 goto flip_async; 1707 1708 /* 1709 * DISCARD can flip during remount. On zoned filesystems, we 1710 * need to reset sequential-required zones. 1711 */ 1712 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1713 btrfs_is_zoned(fs_info); 1714 1715 /* Implicit trim during transaction commit. */ 1716 if (trimming) 1717 btrfs_freeze_block_group(block_group); 1718 1719 /* 1720 * Btrfs_remove_chunk will abort the transaction if things go 1721 * horribly wrong. 1722 */ 1723 ret = btrfs_remove_chunk(trans, block_group->start); 1724 1725 if (ret) { 1726 if (trimming) 1727 btrfs_unfreeze_block_group(block_group); 1728 goto end_trans; 1729 } 1730 1731 /* 1732 * If we're not mounted with -odiscard, we can just forget 1733 * about this block group. Otherwise we'll need to wait 1734 * until transaction commit to do the actual discard. 1735 */ 1736 if (trimming) { 1737 spin_lock(&fs_info->unused_bgs_lock); 1738 /* 1739 * A concurrent scrub might have added us to the list 1740 * fs_info->unused_bgs, so use a list_move operation 1741 * to add the block group to the deleted_bgs list. 1742 */ 1743 list_move(&block_group->bg_list, 1744 &trans->transaction->deleted_bgs); 1745 spin_unlock(&fs_info->unused_bgs_lock); 1746 btrfs_get_block_group(block_group); 1747 } 1748 end_trans: 1749 btrfs_end_transaction(trans); 1750 next: 1751 btrfs_put_block_group(block_group); 1752 spin_lock(&fs_info->unused_bgs_lock); 1753 } 1754 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1755 spin_unlock(&fs_info->unused_bgs_lock); 1756 mutex_unlock(&fs_info->reclaim_bgs_lock); 1757 return; 1758 1759 flip_async: 1760 btrfs_end_transaction(trans); 1761 spin_lock(&fs_info->unused_bgs_lock); 1762 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1763 spin_unlock(&fs_info->unused_bgs_lock); 1764 mutex_unlock(&fs_info->reclaim_bgs_lock); 1765 btrfs_put_block_group(block_group); 1766 btrfs_discard_punt_unused_bgs_list(fs_info); 1767 } 1768 1769 void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1770 { 1771 struct btrfs_fs_info *fs_info = bg->fs_info; 1772 1773 spin_lock(&fs_info->unused_bgs_lock); 1774 if (list_empty(&bg->bg_list)) { 1775 btrfs_get_block_group(bg); 1776 trace_btrfs_add_unused_block_group(bg); 1777 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1778 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { 1779 /* Pull out the block group from the reclaim_bgs list. */ 1780 trace_btrfs_add_unused_block_group(bg); 1781 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); 1782 } 1783 spin_unlock(&fs_info->unused_bgs_lock); 1784 } 1785 1786 /* 1787 * We want block groups with a low number of used bytes to be in the beginning 1788 * of the list, so they will get reclaimed first. 1789 */ 1790 static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 1791 const struct list_head *b) 1792 { 1793 const struct btrfs_block_group *bg1, *bg2; 1794 1795 bg1 = list_entry(a, struct btrfs_block_group, bg_list); 1796 bg2 = list_entry(b, struct btrfs_block_group, bg_list); 1797 1798 /* 1799 * Some other task may be updating the ->used field concurrently, but it 1800 * is not serious if we get a stale value or load/store tearing issues, 1801 * as sorting the list of block groups to reclaim is not critical and an 1802 * occasional imperfect order is ok. So silence KCSAN and avoid the 1803 * overhead of locking or any other synchronization. 1804 */ 1805 return data_race(bg1->used > bg2->used); 1806 } 1807 1808 static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info) 1809 { 1810 if (btrfs_is_zoned(fs_info)) 1811 return btrfs_zoned_should_reclaim(fs_info); 1812 return true; 1813 } 1814 1815 static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed) 1816 { 1817 const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info); 1818 u64 thresh_bytes = mult_perc(bg->length, thresh_pct); 1819 const u64 new_val = bg->used; 1820 const u64 old_val = new_val + bytes_freed; 1821 1822 if (thresh_bytes == 0) 1823 return false; 1824 1825 /* 1826 * If we were below the threshold before don't reclaim, we are likely a 1827 * brand new block group and we don't want to relocate new block groups. 1828 */ 1829 if (old_val < thresh_bytes) 1830 return false; 1831 if (new_val >= thresh_bytes) 1832 return false; 1833 return true; 1834 } 1835 1836 void btrfs_reclaim_bgs_work(struct work_struct *work) 1837 { 1838 struct btrfs_fs_info *fs_info = 1839 container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 1840 struct btrfs_block_group *bg; 1841 struct btrfs_space_info *space_info; 1842 LIST_HEAD(retry_list); 1843 1844 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1845 return; 1846 1847 if (btrfs_fs_closing(fs_info)) 1848 return; 1849 1850 if (!btrfs_should_reclaim(fs_info)) 1851 return; 1852 1853 guard(super_write)(fs_info->sb); 1854 1855 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) 1856 return; 1857 1858 /* 1859 * Long running balances can keep us blocked here for eternity, so 1860 * simply skip reclaim if we're unable to get the mutex. 1861 */ 1862 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 1863 btrfs_exclop_finish(fs_info); 1864 return; 1865 } 1866 1867 spin_lock(&fs_info->unused_bgs_lock); 1868 /* 1869 * Sort happens under lock because we can't simply splice it and sort. 1870 * The block groups might still be in use and reachable via bg_list, 1871 * and their presence in the reclaim_bgs list must be preserved. 1872 */ 1873 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 1874 while (!list_empty(&fs_info->reclaim_bgs)) { 1875 u64 used; 1876 u64 reserved; 1877 int ret = 0; 1878 1879 bg = list_first_entry(&fs_info->reclaim_bgs, 1880 struct btrfs_block_group, 1881 bg_list); 1882 list_del_init(&bg->bg_list); 1883 1884 space_info = bg->space_info; 1885 spin_unlock(&fs_info->unused_bgs_lock); 1886 1887 /* Don't race with allocators so take the groups_sem */ 1888 down_write(&space_info->groups_sem); 1889 1890 spin_lock(&space_info->lock); 1891 spin_lock(&bg->lock); 1892 if (bg->reserved || bg->pinned || bg->ro) { 1893 /* 1894 * We want to bail if we made new allocations or have 1895 * outstanding allocations in this block group. We do 1896 * the ro check in case balance is currently acting on 1897 * this block group. 1898 */ 1899 spin_unlock(&bg->lock); 1900 spin_unlock(&space_info->lock); 1901 up_write(&space_info->groups_sem); 1902 goto next; 1903 } 1904 if (bg->used == 0) { 1905 /* 1906 * It is possible that we trigger relocation on a block 1907 * group as its extents are deleted and it first goes 1908 * below the threshold, then shortly after goes empty. 1909 * 1910 * In this case, relocating it does delete it, but has 1911 * some overhead in relocation specific metadata, looking 1912 * for the non-existent extents and running some extra 1913 * transactions, which we can avoid by using one of the 1914 * other mechanisms for dealing with empty block groups. 1915 */ 1916 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1917 btrfs_mark_bg_unused(bg); 1918 spin_unlock(&bg->lock); 1919 spin_unlock(&space_info->lock); 1920 up_write(&space_info->groups_sem); 1921 goto next; 1922 1923 } 1924 /* 1925 * The block group might no longer meet the reclaim condition by 1926 * the time we get around to reclaiming it, so to avoid 1927 * reclaiming overly full block_groups, skip reclaiming them. 1928 * 1929 * Since the decision making process also depends on the amount 1930 * being freed, pass in a fake giant value to skip that extra 1931 * check, which is more meaningful when adding to the list in 1932 * the first place. 1933 */ 1934 if (!should_reclaim_block_group(bg, bg->length)) { 1935 spin_unlock(&bg->lock); 1936 spin_unlock(&space_info->lock); 1937 up_write(&space_info->groups_sem); 1938 goto next; 1939 } 1940 1941 spin_unlock(&bg->lock); 1942 spin_unlock(&space_info->lock); 1943 1944 /* 1945 * Get out fast, in case we're read-only or unmounting the 1946 * filesystem. It is OK to drop block groups from the list even 1947 * for the read-only case. As we did take the super write lock, 1948 * "mount -o remount,ro" won't happen and read-only filesystem 1949 * means it is forced read-only due to a fatal error. So, it 1950 * never gets back to read-write to let us reclaim again. 1951 */ 1952 if (btrfs_need_cleaner_sleep(fs_info)) { 1953 up_write(&space_info->groups_sem); 1954 goto next; 1955 } 1956 1957 ret = inc_block_group_ro(bg, 0); 1958 up_write(&space_info->groups_sem); 1959 if (ret < 0) 1960 goto next; 1961 1962 /* 1963 * The amount of bytes reclaimed corresponds to the sum of the 1964 * "used" and "reserved" counters. We have set the block group 1965 * to RO above, which prevents reservations from happening but 1966 * we may have existing reservations for which allocation has 1967 * not yet been done - btrfs_update_block_group() was not yet 1968 * called, which is where we will transfer a reserved extent's 1969 * size from the "reserved" counter to the "used" counter - this 1970 * happens when running delayed references. When we relocate the 1971 * chunk below, relocation first flushes delalloc, waits for 1972 * ordered extent completion (which is where we create delayed 1973 * references for data extents) and commits the current 1974 * transaction (which runs delayed references), and only after 1975 * it does the actual work to move extents out of the block 1976 * group. So the reported amount of reclaimed bytes is 1977 * effectively the sum of the 'used' and 'reserved' counters. 1978 */ 1979 spin_lock(&bg->lock); 1980 used = bg->used; 1981 reserved = bg->reserved; 1982 spin_unlock(&bg->lock); 1983 1984 trace_btrfs_reclaim_block_group(bg); 1985 ret = btrfs_relocate_chunk(fs_info, bg->start, false); 1986 if (ret) { 1987 btrfs_dec_block_group_ro(bg); 1988 btrfs_err(fs_info, "error relocating chunk %llu", 1989 bg->start); 1990 used = 0; 1991 reserved = 0; 1992 spin_lock(&space_info->lock); 1993 space_info->reclaim_errors++; 1994 if (READ_ONCE(space_info->periodic_reclaim)) 1995 space_info->periodic_reclaim_ready = false; 1996 spin_unlock(&space_info->lock); 1997 } 1998 spin_lock(&space_info->lock); 1999 space_info->reclaim_count++; 2000 space_info->reclaim_bytes += used; 2001 space_info->reclaim_bytes += reserved; 2002 spin_unlock(&space_info->lock); 2003 2004 next: 2005 if (ret && !READ_ONCE(space_info->periodic_reclaim)) 2006 btrfs_link_bg_list(bg, &retry_list); 2007 btrfs_put_block_group(bg); 2008 2009 mutex_unlock(&fs_info->reclaim_bgs_lock); 2010 /* 2011 * Reclaiming all the block groups in the list can take really 2012 * long. Prioritize cleaning up unused block groups. 2013 */ 2014 btrfs_delete_unused_bgs(fs_info); 2015 /* 2016 * If we are interrupted by a balance, we can just bail out. The 2017 * cleaner thread restart again if necessary. 2018 */ 2019 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 2020 goto end; 2021 spin_lock(&fs_info->unused_bgs_lock); 2022 } 2023 spin_unlock(&fs_info->unused_bgs_lock); 2024 mutex_unlock(&fs_info->reclaim_bgs_lock); 2025 end: 2026 spin_lock(&fs_info->unused_bgs_lock); 2027 list_splice_tail(&retry_list, &fs_info->reclaim_bgs); 2028 spin_unlock(&fs_info->unused_bgs_lock); 2029 btrfs_exclop_finish(fs_info); 2030 } 2031 2032 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 2033 { 2034 btrfs_reclaim_sweep(fs_info); 2035 spin_lock(&fs_info->unused_bgs_lock); 2036 if (!list_empty(&fs_info->reclaim_bgs)) 2037 queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work); 2038 spin_unlock(&fs_info->unused_bgs_lock); 2039 } 2040 2041 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 2042 { 2043 struct btrfs_fs_info *fs_info = bg->fs_info; 2044 2045 if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs)) 2046 trace_btrfs_add_reclaim_block_group(bg); 2047 } 2048 2049 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key, 2050 const struct btrfs_path *path) 2051 { 2052 struct btrfs_chunk_map *map; 2053 struct btrfs_block_group_item bg; 2054 struct extent_buffer *leaf; 2055 int slot; 2056 u64 flags; 2057 int ret = 0; 2058 2059 slot = path->slots[0]; 2060 leaf = path->nodes[0]; 2061 2062 map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset); 2063 if (!map) { 2064 btrfs_err(fs_info, 2065 "logical %llu len %llu found bg but no related chunk", 2066 key->objectid, key->offset); 2067 return -ENOENT; 2068 } 2069 2070 if (unlikely(map->start != key->objectid || map->chunk_len != key->offset)) { 2071 btrfs_err(fs_info, 2072 "block group %llu len %llu mismatch with chunk %llu len %llu", 2073 key->objectid, key->offset, map->start, map->chunk_len); 2074 ret = -EUCLEAN; 2075 goto out_free_map; 2076 } 2077 2078 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 2079 sizeof(bg)); 2080 flags = btrfs_stack_block_group_flags(&bg) & 2081 BTRFS_BLOCK_GROUP_TYPE_MASK; 2082 2083 if (unlikely(flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) { 2084 btrfs_err(fs_info, 2085 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 2086 key->objectid, key->offset, flags, 2087 (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type)); 2088 ret = -EUCLEAN; 2089 } 2090 2091 out_free_map: 2092 btrfs_free_chunk_map(map); 2093 return ret; 2094 } 2095 2096 static int find_first_block_group(struct btrfs_fs_info *fs_info, 2097 struct btrfs_path *path, 2098 const struct btrfs_key *key) 2099 { 2100 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2101 int ret; 2102 struct btrfs_key found_key; 2103 2104 btrfs_for_each_slot(root, key, &found_key, path, ret) { 2105 if (found_key.objectid >= key->objectid && 2106 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 2107 return read_bg_from_eb(fs_info, &found_key, path); 2108 } 2109 } 2110 return ret; 2111 } 2112 2113 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 2114 { 2115 u64 extra_flags = chunk_to_extended(flags) & 2116 BTRFS_EXTENDED_PROFILE_MASK; 2117 2118 write_seqlock(&fs_info->profiles_lock); 2119 if (flags & BTRFS_BLOCK_GROUP_DATA) 2120 fs_info->avail_data_alloc_bits |= extra_flags; 2121 if (flags & BTRFS_BLOCK_GROUP_METADATA) 2122 fs_info->avail_metadata_alloc_bits |= extra_flags; 2123 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 2124 fs_info->avail_system_alloc_bits |= extra_flags; 2125 write_sequnlock(&fs_info->profiles_lock); 2126 } 2127 2128 /* 2129 * Map a physical disk address to a list of logical addresses. 2130 * 2131 * @fs_info: the filesystem 2132 * @chunk_start: logical address of block group 2133 * @physical: physical address to map to logical addresses 2134 * @logical: return array of logical addresses which map to @physical 2135 * @naddrs: length of @logical 2136 * @stripe_len: size of IO stripe for the given block group 2137 * 2138 * Maps a particular @physical disk address to a list of @logical addresses. 2139 * Used primarily to exclude those portions of a block group that contain super 2140 * block copies. 2141 */ 2142 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 2143 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 2144 { 2145 struct btrfs_chunk_map *map; 2146 u64 *buf; 2147 u64 bytenr; 2148 u64 data_stripe_length; 2149 u64 io_stripe_size; 2150 int i, nr = 0; 2151 int ret = 0; 2152 2153 map = btrfs_get_chunk_map(fs_info, chunk_start, 1); 2154 if (IS_ERR(map)) 2155 return -EIO; 2156 2157 data_stripe_length = map->stripe_size; 2158 io_stripe_size = BTRFS_STRIPE_LEN; 2159 chunk_start = map->start; 2160 2161 /* For RAID5/6 adjust to a full IO stripe length */ 2162 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2163 io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 2164 2165 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 2166 if (!buf) { 2167 ret = -ENOMEM; 2168 goto out; 2169 } 2170 2171 for (i = 0; i < map->num_stripes; i++) { 2172 bool already_inserted = false; 2173 u32 stripe_nr; 2174 u32 offset; 2175 int j; 2176 2177 if (!in_range(physical, map->stripes[i].physical, 2178 data_stripe_length)) 2179 continue; 2180 2181 stripe_nr = (physical - map->stripes[i].physical) >> 2182 BTRFS_STRIPE_LEN_SHIFT; 2183 offset = (physical - map->stripes[i].physical) & 2184 BTRFS_STRIPE_LEN_MASK; 2185 2186 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2187 BTRFS_BLOCK_GROUP_RAID10)) 2188 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, 2189 map->sub_stripes); 2190 /* 2191 * The remaining case would be for RAID56, multiply by 2192 * nr_data_stripes(). Alternatively, just use rmap_len below 2193 * instead of map->stripe_len 2194 */ 2195 bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 2196 2197 /* Ensure we don't add duplicate addresses */ 2198 for (j = 0; j < nr; j++) { 2199 if (buf[j] == bytenr) { 2200 already_inserted = true; 2201 break; 2202 } 2203 } 2204 2205 if (!already_inserted) 2206 buf[nr++] = bytenr; 2207 } 2208 2209 *logical = buf; 2210 *naddrs = nr; 2211 *stripe_len = io_stripe_size; 2212 out: 2213 btrfs_free_chunk_map(map); 2214 return ret; 2215 } 2216 2217 static int exclude_super_stripes(struct btrfs_block_group *cache) 2218 { 2219 struct btrfs_fs_info *fs_info = cache->fs_info; 2220 const bool zoned = btrfs_is_zoned(fs_info); 2221 u64 bytenr; 2222 u64 *logical; 2223 int stripe_len; 2224 int i, nr, ret; 2225 2226 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 2227 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 2228 cache->bytes_super += stripe_len; 2229 ret = btrfs_set_extent_bit(&fs_info->excluded_extents, cache->start, 2230 cache->start + stripe_len - 1, 2231 EXTENT_DIRTY, NULL); 2232 if (ret) 2233 return ret; 2234 } 2235 2236 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2237 bytenr = btrfs_sb_offset(i); 2238 ret = btrfs_rmap_block(fs_info, cache->start, 2239 bytenr, &logical, &nr, &stripe_len); 2240 if (ret) 2241 return ret; 2242 2243 /* Shouldn't have super stripes in sequential zones */ 2244 if (unlikely(zoned && nr)) { 2245 kfree(logical); 2246 btrfs_err(fs_info, 2247 "zoned: block group %llu must not contain super block", 2248 cache->start); 2249 return -EUCLEAN; 2250 } 2251 2252 while (nr--) { 2253 u64 len = min_t(u64, stripe_len, 2254 cache->start + cache->length - logical[nr]); 2255 2256 cache->bytes_super += len; 2257 ret = btrfs_set_extent_bit(&fs_info->excluded_extents, 2258 logical[nr], logical[nr] + len - 1, 2259 EXTENT_DIRTY, NULL); 2260 if (ret) { 2261 kfree(logical); 2262 return ret; 2263 } 2264 } 2265 2266 kfree(logical); 2267 } 2268 return 0; 2269 } 2270 2271 static struct btrfs_block_group *btrfs_create_block_group_cache( 2272 struct btrfs_fs_info *fs_info, u64 start) 2273 { 2274 struct btrfs_block_group *cache; 2275 2276 cache = kzalloc(sizeof(*cache), GFP_NOFS); 2277 if (!cache) 2278 return NULL; 2279 2280 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 2281 GFP_NOFS); 2282 if (!cache->free_space_ctl) { 2283 kfree(cache); 2284 return NULL; 2285 } 2286 2287 cache->start = start; 2288 2289 cache->fs_info = fs_info; 2290 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 2291 2292 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 2293 2294 refcount_set(&cache->refs, 1); 2295 spin_lock_init(&cache->lock); 2296 init_rwsem(&cache->data_rwsem); 2297 INIT_LIST_HEAD(&cache->list); 2298 INIT_LIST_HEAD(&cache->cluster_list); 2299 INIT_LIST_HEAD(&cache->bg_list); 2300 INIT_LIST_HEAD(&cache->ro_list); 2301 INIT_LIST_HEAD(&cache->discard_list); 2302 INIT_LIST_HEAD(&cache->dirty_list); 2303 INIT_LIST_HEAD(&cache->io_list); 2304 INIT_LIST_HEAD(&cache->active_bg_list); 2305 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 2306 atomic_set(&cache->frozen, 0); 2307 mutex_init(&cache->free_space_lock); 2308 2309 return cache; 2310 } 2311 2312 /* 2313 * Iterate all chunks and verify that each of them has the corresponding block 2314 * group 2315 */ 2316 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 2317 { 2318 u64 start = 0; 2319 int ret = 0; 2320 2321 while (1) { 2322 struct btrfs_chunk_map *map; 2323 struct btrfs_block_group *bg; 2324 2325 /* 2326 * btrfs_find_chunk_map() will return the first chunk map 2327 * intersecting the range, so setting @length to 1 is enough to 2328 * get the first chunk. 2329 */ 2330 map = btrfs_find_chunk_map(fs_info, start, 1); 2331 if (!map) 2332 break; 2333 2334 bg = btrfs_lookup_block_group(fs_info, map->start); 2335 if (unlikely(!bg)) { 2336 btrfs_err(fs_info, 2337 "chunk start=%llu len=%llu doesn't have corresponding block group", 2338 map->start, map->chunk_len); 2339 ret = -EUCLEAN; 2340 btrfs_free_chunk_map(map); 2341 break; 2342 } 2343 if (unlikely(bg->start != map->start || bg->length != map->chunk_len || 2344 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 2345 (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK))) { 2346 btrfs_err(fs_info, 2347 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 2348 map->start, map->chunk_len, 2349 map->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 2350 bg->start, bg->length, 2351 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 2352 ret = -EUCLEAN; 2353 btrfs_free_chunk_map(map); 2354 btrfs_put_block_group(bg); 2355 break; 2356 } 2357 start = map->start + map->chunk_len; 2358 btrfs_free_chunk_map(map); 2359 btrfs_put_block_group(bg); 2360 } 2361 return ret; 2362 } 2363 2364 static int read_one_block_group(struct btrfs_fs_info *info, 2365 struct btrfs_block_group_item *bgi, 2366 const struct btrfs_key *key, 2367 int need_clear) 2368 { 2369 struct btrfs_block_group *cache; 2370 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2371 int ret; 2372 2373 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2374 2375 cache = btrfs_create_block_group_cache(info, key->objectid); 2376 if (!cache) 2377 return -ENOMEM; 2378 2379 cache->length = key->offset; 2380 cache->used = btrfs_stack_block_group_used(bgi); 2381 cache->commit_used = cache->used; 2382 cache->flags = btrfs_stack_block_group_flags(bgi); 2383 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); 2384 cache->space_info = btrfs_find_space_info(info, cache->flags); 2385 2386 btrfs_set_free_space_tree_thresholds(cache); 2387 2388 if (need_clear) { 2389 /* 2390 * When we mount with old space cache, we need to 2391 * set BTRFS_DC_CLEAR and set dirty flag. 2392 * 2393 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2394 * truncate the old free space cache inode and 2395 * setup a new one. 2396 * b) Setting 'dirty flag' makes sure that we flush 2397 * the new space cache info onto disk. 2398 */ 2399 if (btrfs_test_opt(info, SPACE_CACHE)) 2400 cache->disk_cache_state = BTRFS_DC_CLEAR; 2401 } 2402 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2403 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2404 btrfs_err(info, 2405 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2406 cache->start); 2407 ret = -EINVAL; 2408 goto error; 2409 } 2410 2411 ret = btrfs_load_block_group_zone_info(cache, false); 2412 if (ret) { 2413 btrfs_err(info, "zoned: failed to load zone info of bg %llu", 2414 cache->start); 2415 goto error; 2416 } 2417 2418 /* 2419 * We need to exclude the super stripes now so that the space info has 2420 * super bytes accounted for, otherwise we'll think we have more space 2421 * than we actually do. 2422 */ 2423 ret = exclude_super_stripes(cache); 2424 if (ret) { 2425 /* We may have excluded something, so call this just in case. */ 2426 btrfs_free_excluded_extents(cache); 2427 goto error; 2428 } 2429 2430 /* 2431 * For zoned filesystem, space after the allocation offset is the only 2432 * free space for a block group. So, we don't need any caching work. 2433 * btrfs_calc_zone_unusable() will set the amount of free space and 2434 * zone_unusable space. 2435 * 2436 * For regular filesystem, check for two cases, either we are full, and 2437 * therefore don't need to bother with the caching work since we won't 2438 * find any space, or we are empty, and we can just add all the space 2439 * in and be done with it. This saves us _a_lot_ of time, particularly 2440 * in the full case. 2441 */ 2442 if (btrfs_is_zoned(info)) { 2443 btrfs_calc_zone_unusable(cache); 2444 /* Should not have any excluded extents. Just in case, though. */ 2445 btrfs_free_excluded_extents(cache); 2446 } else if (cache->length == cache->used) { 2447 cache->cached = BTRFS_CACHE_FINISHED; 2448 btrfs_free_excluded_extents(cache); 2449 } else if (cache->used == 0) { 2450 cache->cached = BTRFS_CACHE_FINISHED; 2451 ret = btrfs_add_new_free_space(cache, cache->start, 2452 cache->start + cache->length, NULL); 2453 btrfs_free_excluded_extents(cache); 2454 if (ret) 2455 goto error; 2456 } 2457 2458 ret = btrfs_add_block_group_cache(cache); 2459 if (ret) { 2460 btrfs_remove_free_space_cache(cache); 2461 goto error; 2462 } 2463 2464 trace_btrfs_add_block_group(info, cache, 0); 2465 btrfs_add_bg_to_space_info(info, cache); 2466 2467 set_avail_alloc_bits(info, cache->flags); 2468 if (btrfs_chunk_writeable(info, cache->start)) { 2469 if (cache->used == 0) { 2470 ASSERT(list_empty(&cache->bg_list)); 2471 if (btrfs_test_opt(info, DISCARD_ASYNC)) 2472 btrfs_discard_queue_work(&info->discard_ctl, cache); 2473 else 2474 btrfs_mark_bg_unused(cache); 2475 } 2476 } else { 2477 inc_block_group_ro(cache, 1); 2478 } 2479 2480 return 0; 2481 error: 2482 btrfs_put_block_group(cache); 2483 return ret; 2484 } 2485 2486 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 2487 { 2488 struct rb_node *node; 2489 int ret = 0; 2490 2491 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 2492 struct btrfs_chunk_map *map; 2493 struct btrfs_block_group *bg; 2494 2495 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 2496 bg = btrfs_create_block_group_cache(fs_info, map->start); 2497 if (!bg) { 2498 ret = -ENOMEM; 2499 break; 2500 } 2501 2502 /* Fill dummy cache as FULL */ 2503 bg->length = map->chunk_len; 2504 bg->flags = map->type; 2505 bg->cached = BTRFS_CACHE_FINISHED; 2506 bg->used = map->chunk_len; 2507 bg->flags = map->type; 2508 bg->space_info = btrfs_find_space_info(fs_info, bg->flags); 2509 ret = btrfs_add_block_group_cache(bg); 2510 /* 2511 * We may have some valid block group cache added already, in 2512 * that case we skip to the next one. 2513 */ 2514 if (ret == -EEXIST) { 2515 ret = 0; 2516 btrfs_put_block_group(bg); 2517 continue; 2518 } 2519 2520 if (ret) { 2521 btrfs_remove_free_space_cache(bg); 2522 btrfs_put_block_group(bg); 2523 break; 2524 } 2525 2526 btrfs_add_bg_to_space_info(fs_info, bg); 2527 2528 set_avail_alloc_bits(fs_info, bg->flags); 2529 } 2530 if (!ret) 2531 btrfs_init_global_block_rsv(fs_info); 2532 return ret; 2533 } 2534 2535 int btrfs_read_block_groups(struct btrfs_fs_info *info) 2536 { 2537 struct btrfs_root *root = btrfs_block_group_root(info); 2538 struct btrfs_path *path; 2539 int ret; 2540 struct btrfs_block_group *cache; 2541 struct btrfs_space_info *space_info; 2542 struct btrfs_key key; 2543 int need_clear = 0; 2544 u64 cache_gen; 2545 2546 /* 2547 * Either no extent root (with ibadroots rescue option) or we have 2548 * unsupported RO options. The fs can never be mounted read-write, so no 2549 * need to waste time searching block group items. 2550 * 2551 * This also allows new extent tree related changes to be RO compat, 2552 * no need for a full incompat flag. 2553 */ 2554 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & 2555 ~BTRFS_FEATURE_COMPAT_RO_SUPP)) 2556 return fill_dummy_bgs(info); 2557 2558 key.objectid = 0; 2559 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2560 key.offset = 0; 2561 path = btrfs_alloc_path(); 2562 if (!path) 2563 return -ENOMEM; 2564 2565 cache_gen = btrfs_super_cache_generation(info->super_copy); 2566 if (btrfs_test_opt(info, SPACE_CACHE) && 2567 btrfs_super_generation(info->super_copy) != cache_gen) 2568 need_clear = 1; 2569 if (btrfs_test_opt(info, CLEAR_CACHE)) 2570 need_clear = 1; 2571 2572 while (1) { 2573 struct btrfs_block_group_item bgi; 2574 struct extent_buffer *leaf; 2575 int slot; 2576 2577 ret = find_first_block_group(info, path, &key); 2578 if (ret > 0) 2579 break; 2580 if (ret != 0) 2581 goto error; 2582 2583 leaf = path->nodes[0]; 2584 slot = path->slots[0]; 2585 2586 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 2587 sizeof(bgi)); 2588 2589 btrfs_item_key_to_cpu(leaf, &key, slot); 2590 btrfs_release_path(path); 2591 ret = read_one_block_group(info, &bgi, &key, need_clear); 2592 if (ret < 0) 2593 goto error; 2594 key.objectid += key.offset; 2595 key.offset = 0; 2596 } 2597 btrfs_release_path(path); 2598 2599 list_for_each_entry(space_info, &info->space_info, list) { 2600 int i; 2601 2602 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2603 if (list_empty(&space_info->block_groups[i])) 2604 continue; 2605 cache = list_first_entry(&space_info->block_groups[i], 2606 struct btrfs_block_group, 2607 list); 2608 btrfs_sysfs_add_block_group_type(cache); 2609 } 2610 2611 if (!(btrfs_get_alloc_profile(info, space_info->flags) & 2612 (BTRFS_BLOCK_GROUP_RAID10 | 2613 BTRFS_BLOCK_GROUP_RAID1_MASK | 2614 BTRFS_BLOCK_GROUP_RAID56_MASK | 2615 BTRFS_BLOCK_GROUP_DUP))) 2616 continue; 2617 /* 2618 * Avoid allocating from un-mirrored block group if there are 2619 * mirrored block groups. 2620 */ 2621 list_for_each_entry(cache, 2622 &space_info->block_groups[BTRFS_RAID_RAID0], 2623 list) 2624 inc_block_group_ro(cache, 1); 2625 list_for_each_entry(cache, 2626 &space_info->block_groups[BTRFS_RAID_SINGLE], 2627 list) 2628 inc_block_group_ro(cache, 1); 2629 } 2630 2631 btrfs_init_global_block_rsv(info); 2632 ret = check_chunk_block_group_mappings(info); 2633 error: 2634 btrfs_free_path(path); 2635 /* 2636 * We've hit some error while reading the extent tree, and have 2637 * rescue=ibadroots mount option. 2638 * Try to fill the tree using dummy block groups so that the user can 2639 * continue to mount and grab their data. 2640 */ 2641 if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 2642 ret = fill_dummy_bgs(info); 2643 return ret; 2644 } 2645 2646 /* 2647 * This function, insert_block_group_item(), belongs to the phase 2 of chunk 2648 * allocation. 2649 * 2650 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2651 * phases. 2652 */ 2653 static int insert_block_group_item(struct btrfs_trans_handle *trans, 2654 struct btrfs_block_group *block_group) 2655 { 2656 struct btrfs_fs_info *fs_info = trans->fs_info; 2657 struct btrfs_block_group_item bgi; 2658 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2659 struct btrfs_key key; 2660 u64 old_commit_used; 2661 int ret; 2662 2663 spin_lock(&block_group->lock); 2664 btrfs_set_stack_block_group_used(&bgi, block_group->used); 2665 btrfs_set_stack_block_group_chunk_objectid(&bgi, 2666 block_group->global_root_id); 2667 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 2668 old_commit_used = block_group->commit_used; 2669 block_group->commit_used = block_group->used; 2670 key.objectid = block_group->start; 2671 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2672 key.offset = block_group->length; 2673 spin_unlock(&block_group->lock); 2674 2675 ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 2676 if (ret < 0) { 2677 spin_lock(&block_group->lock); 2678 block_group->commit_used = old_commit_used; 2679 spin_unlock(&block_group->lock); 2680 } 2681 2682 return ret; 2683 } 2684 2685 static int insert_dev_extent(struct btrfs_trans_handle *trans, 2686 const struct btrfs_device *device, u64 chunk_offset, 2687 u64 start, u64 num_bytes) 2688 { 2689 struct btrfs_fs_info *fs_info = device->fs_info; 2690 struct btrfs_root *root = fs_info->dev_root; 2691 BTRFS_PATH_AUTO_FREE(path); 2692 struct btrfs_dev_extent *extent; 2693 struct extent_buffer *leaf; 2694 struct btrfs_key key; 2695 int ret; 2696 2697 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 2698 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 2699 path = btrfs_alloc_path(); 2700 if (!path) 2701 return -ENOMEM; 2702 2703 key.objectid = device->devid; 2704 key.type = BTRFS_DEV_EXTENT_KEY; 2705 key.offset = start; 2706 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 2707 if (ret) 2708 return ret; 2709 2710 leaf = path->nodes[0]; 2711 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 2712 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 2713 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 2714 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2715 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 2716 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 2717 2718 return ret; 2719 } 2720 2721 /* 2722 * This function belongs to phase 2. 2723 * 2724 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2725 * phases. 2726 */ 2727 static int insert_dev_extents(struct btrfs_trans_handle *trans, 2728 u64 chunk_offset, u64 chunk_size) 2729 { 2730 struct btrfs_fs_info *fs_info = trans->fs_info; 2731 struct btrfs_device *device; 2732 struct btrfs_chunk_map *map; 2733 u64 dev_offset; 2734 int i; 2735 int ret = 0; 2736 2737 map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 2738 if (IS_ERR(map)) 2739 return PTR_ERR(map); 2740 2741 /* 2742 * Take the device list mutex to prevent races with the final phase of 2743 * a device replace operation that replaces the device object associated 2744 * with the map's stripes, because the device object's id can change 2745 * at any time during that final phase of the device replace operation 2746 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 2747 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 2748 * resulting in persisting a device extent item with such ID. 2749 */ 2750 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2751 for (i = 0; i < map->num_stripes; i++) { 2752 device = map->stripes[i].dev; 2753 dev_offset = map->stripes[i].physical; 2754 2755 ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 2756 map->stripe_size); 2757 if (ret) 2758 break; 2759 } 2760 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2761 2762 btrfs_free_chunk_map(map); 2763 return ret; 2764 } 2765 2766 /* 2767 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 2768 * chunk allocation. 2769 * 2770 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2771 * phases. 2772 */ 2773 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 2774 { 2775 struct btrfs_fs_info *fs_info = trans->fs_info; 2776 struct btrfs_block_group *block_group; 2777 int ret = 0; 2778 2779 while (!list_empty(&trans->new_bgs)) { 2780 int index; 2781 2782 block_group = list_first_entry(&trans->new_bgs, 2783 struct btrfs_block_group, 2784 bg_list); 2785 if (ret) 2786 goto next; 2787 2788 index = btrfs_bg_flags_to_raid_index(block_group->flags); 2789 2790 ret = insert_block_group_item(trans, block_group); 2791 if (ret) 2792 btrfs_abort_transaction(trans, ret); 2793 if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, 2794 &block_group->runtime_flags)) { 2795 mutex_lock(&fs_info->chunk_mutex); 2796 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 2797 mutex_unlock(&fs_info->chunk_mutex); 2798 if (ret) 2799 btrfs_abort_transaction(trans, ret); 2800 } 2801 ret = insert_dev_extents(trans, block_group->start, 2802 block_group->length); 2803 if (ret) 2804 btrfs_abort_transaction(trans, ret); 2805 btrfs_add_block_group_free_space(trans, block_group); 2806 2807 /* 2808 * If we restriped during balance, we may have added a new raid 2809 * type, so now add the sysfs entries when it is safe to do so. 2810 * We don't have to worry about locking here as it's handled in 2811 * btrfs_sysfs_add_block_group_type. 2812 */ 2813 if (block_group->space_info->block_group_kobjs[index] == NULL) 2814 btrfs_sysfs_add_block_group_type(block_group); 2815 2816 /* Already aborted the transaction if it failed. */ 2817 next: 2818 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); 2819 2820 spin_lock(&fs_info->unused_bgs_lock); 2821 list_del_init(&block_group->bg_list); 2822 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); 2823 btrfs_put_block_group(block_group); 2824 spin_unlock(&fs_info->unused_bgs_lock); 2825 2826 /* 2827 * If the block group is still unused, add it to the list of 2828 * unused block groups. The block group may have been created in 2829 * order to satisfy a space reservation, in which case the 2830 * extent allocation only happens later. But often we don't 2831 * actually need to allocate space that we previously reserved, 2832 * so the block group may become unused for a long time. For 2833 * example for metadata we generally reserve space for a worst 2834 * possible scenario, but then don't end up allocating all that 2835 * space or none at all (due to no need to COW, extent buffers 2836 * were already COWed in the current transaction and still 2837 * unwritten, tree heights lower than the maximum possible 2838 * height, etc). For data we generally reserve the exact amount 2839 * of space we are going to allocate later, the exception is 2840 * when using compression, as we must reserve space based on the 2841 * uncompressed data size, because the compression is only done 2842 * when writeback triggered and we don't know how much space we 2843 * are actually going to need, so we reserve the uncompressed 2844 * size because the data may be incompressible in the worst case. 2845 */ 2846 if (ret == 0) { 2847 bool used; 2848 2849 spin_lock(&block_group->lock); 2850 used = btrfs_is_block_group_used(block_group); 2851 spin_unlock(&block_group->lock); 2852 2853 if (!used) 2854 btrfs_mark_bg_unused(block_group); 2855 } 2856 } 2857 btrfs_trans_release_chunk_metadata(trans); 2858 } 2859 2860 /* 2861 * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2862 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2863 */ 2864 static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset) 2865 { 2866 u64 div = SZ_1G; 2867 u64 index; 2868 2869 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2870 return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2871 2872 /* If we have a smaller fs index based on 128MiB. */ 2873 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2874 div = SZ_128M; 2875 2876 offset = div64_u64(offset, div); 2877 div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2878 return index; 2879 } 2880 2881 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 2882 struct btrfs_space_info *space_info, 2883 u64 type, u64 chunk_offset, u64 size) 2884 { 2885 struct btrfs_fs_info *fs_info = trans->fs_info; 2886 struct btrfs_block_group *cache; 2887 int ret; 2888 2889 btrfs_set_log_full_commit(trans); 2890 2891 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 2892 if (!cache) 2893 return ERR_PTR(-ENOMEM); 2894 2895 /* 2896 * Mark it as new before adding it to the rbtree of block groups or any 2897 * list, so that no other task finds it and calls btrfs_mark_bg_unused() 2898 * before the new flag is set. 2899 */ 2900 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); 2901 2902 cache->length = size; 2903 btrfs_set_free_space_tree_thresholds(cache); 2904 cache->flags = type; 2905 cache->cached = BTRFS_CACHE_FINISHED; 2906 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2907 2908 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 2909 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); 2910 2911 ret = btrfs_load_block_group_zone_info(cache, true); 2912 if (ret) { 2913 btrfs_put_block_group(cache); 2914 return ERR_PTR(ret); 2915 } 2916 2917 ret = exclude_super_stripes(cache); 2918 if (ret) { 2919 /* We may have excluded something, so call this just in case */ 2920 btrfs_free_excluded_extents(cache); 2921 btrfs_put_block_group(cache); 2922 return ERR_PTR(ret); 2923 } 2924 2925 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); 2926 btrfs_free_excluded_extents(cache); 2927 if (ret) { 2928 btrfs_put_block_group(cache); 2929 return ERR_PTR(ret); 2930 } 2931 2932 /* 2933 * Ensure the corresponding space_info object is created and 2934 * assigned to our block group. We want our bg to be added to the rbtree 2935 * with its ->space_info set. 2936 */ 2937 cache->space_info = space_info; 2938 ASSERT(cache->space_info); 2939 2940 ret = btrfs_add_block_group_cache(cache); 2941 if (ret) { 2942 btrfs_remove_free_space_cache(cache); 2943 btrfs_put_block_group(cache); 2944 return ERR_PTR(ret); 2945 } 2946 2947 /* 2948 * Now that our block group has its ->space_info set and is inserted in 2949 * the rbtree, update the space info's counters. 2950 */ 2951 trace_btrfs_add_block_group(fs_info, cache, 1); 2952 btrfs_add_bg_to_space_info(fs_info, cache); 2953 btrfs_update_global_block_rsv(fs_info); 2954 2955 #ifdef CONFIG_BTRFS_DEBUG 2956 if (btrfs_should_fragment_free_space(cache)) { 2957 cache->space_info->bytes_used += size >> 1; 2958 fragment_free_space(cache); 2959 } 2960 #endif 2961 2962 btrfs_link_bg_list(cache, &trans->new_bgs); 2963 btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info); 2964 2965 set_avail_alloc_bits(fs_info, type); 2966 return cache; 2967 } 2968 2969 /* 2970 * Mark one block group RO, can be called several times for the same block 2971 * group. 2972 * 2973 * @cache: the destination block group 2974 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2975 * ensure we still have some free space after marking this 2976 * block group RO. 2977 */ 2978 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2979 bool do_chunk_alloc) 2980 { 2981 struct btrfs_fs_info *fs_info = cache->fs_info; 2982 struct btrfs_space_info *space_info = cache->space_info; 2983 struct btrfs_trans_handle *trans; 2984 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2985 u64 alloc_flags; 2986 int ret; 2987 bool dirty_bg_running; 2988 2989 /* 2990 * This can only happen when we are doing read-only scrub on read-only 2991 * mount. 2992 * In that case we should not start a new transaction on read-only fs. 2993 * Thus here we skip all chunk allocations. 2994 */ 2995 if (sb_rdonly(fs_info->sb)) { 2996 mutex_lock(&fs_info->ro_block_group_mutex); 2997 ret = inc_block_group_ro(cache, 0); 2998 mutex_unlock(&fs_info->ro_block_group_mutex); 2999 return ret; 3000 } 3001 3002 do { 3003 trans = btrfs_join_transaction(root); 3004 if (IS_ERR(trans)) 3005 return PTR_ERR(trans); 3006 3007 dirty_bg_running = false; 3008 3009 /* 3010 * We're not allowed to set block groups readonly after the dirty 3011 * block group cache has started writing. If it already started, 3012 * back off and let this transaction commit. 3013 */ 3014 mutex_lock(&fs_info->ro_block_group_mutex); 3015 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 3016 u64 transid = trans->transid; 3017 3018 mutex_unlock(&fs_info->ro_block_group_mutex); 3019 btrfs_end_transaction(trans); 3020 3021 ret = btrfs_wait_for_commit(fs_info, transid); 3022 if (ret) 3023 return ret; 3024 dirty_bg_running = true; 3025 } 3026 } while (dirty_bg_running); 3027 3028 if (do_chunk_alloc) { 3029 /* 3030 * If we are changing raid levels, try to allocate a 3031 * corresponding block group with the new raid level. 3032 */ 3033 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 3034 if (alloc_flags != cache->flags) { 3035 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, 3036 CHUNK_ALLOC_FORCE); 3037 /* 3038 * ENOSPC is allowed here, we may have enough space 3039 * already allocated at the new raid level to carry on 3040 */ 3041 if (ret == -ENOSPC) 3042 ret = 0; 3043 if (ret < 0) 3044 goto out; 3045 } 3046 } 3047 3048 ret = inc_block_group_ro(cache, 0); 3049 if (!ret) 3050 goto out; 3051 if (ret == -ETXTBSY) 3052 goto unlock_out; 3053 3054 /* 3055 * Skip chunk allocation if the bg is SYSTEM, this is to avoid system 3056 * chunk allocation storm to exhaust the system chunk array. Otherwise 3057 * we still want to try our best to mark the block group read-only. 3058 */ 3059 if (!do_chunk_alloc && ret == -ENOSPC && 3060 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) 3061 goto unlock_out; 3062 3063 alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags); 3064 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); 3065 if (ret < 0) 3066 goto out; 3067 /* 3068 * We have allocated a new chunk. We also need to activate that chunk to 3069 * grant metadata tickets for zoned filesystem. 3070 */ 3071 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true); 3072 if (ret < 0) 3073 goto out; 3074 3075 ret = inc_block_group_ro(cache, 0); 3076 if (ret == -ETXTBSY) 3077 goto unlock_out; 3078 out: 3079 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 3080 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 3081 mutex_lock(&fs_info->chunk_mutex); 3082 check_system_chunk(trans, alloc_flags); 3083 mutex_unlock(&fs_info->chunk_mutex); 3084 } 3085 unlock_out: 3086 mutex_unlock(&fs_info->ro_block_group_mutex); 3087 3088 btrfs_end_transaction(trans); 3089 return ret; 3090 } 3091 3092 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 3093 { 3094 struct btrfs_space_info *sinfo = cache->space_info; 3095 u64 num_bytes; 3096 3097 BUG_ON(!cache->ro); 3098 3099 spin_lock(&sinfo->lock); 3100 spin_lock(&cache->lock); 3101 if (!--cache->ro) { 3102 if (btrfs_is_zoned(cache->fs_info)) { 3103 /* Migrate zone_unusable bytes back */ 3104 cache->zone_unusable = 3105 (cache->alloc_offset - cache->used - cache->pinned - 3106 cache->reserved) + 3107 (cache->length - cache->zone_capacity); 3108 btrfs_space_info_update_bytes_zone_unusable(sinfo, cache->zone_unusable); 3109 sinfo->bytes_readonly -= cache->zone_unusable; 3110 } 3111 num_bytes = cache->length - cache->reserved - 3112 cache->pinned - cache->bytes_super - 3113 cache->zone_unusable - cache->used; 3114 sinfo->bytes_readonly -= num_bytes; 3115 list_del_init(&cache->ro_list); 3116 } 3117 spin_unlock(&cache->lock); 3118 spin_unlock(&sinfo->lock); 3119 } 3120 3121 static int update_block_group_item(struct btrfs_trans_handle *trans, 3122 struct btrfs_path *path, 3123 struct btrfs_block_group *cache) 3124 { 3125 struct btrfs_fs_info *fs_info = trans->fs_info; 3126 int ret; 3127 struct btrfs_root *root = btrfs_block_group_root(fs_info); 3128 unsigned long bi; 3129 struct extent_buffer *leaf; 3130 struct btrfs_block_group_item bgi; 3131 struct btrfs_key key; 3132 u64 old_commit_used; 3133 u64 used; 3134 3135 /* 3136 * Block group items update can be triggered out of commit transaction 3137 * critical section, thus we need a consistent view of used bytes. 3138 * We cannot use cache->used directly outside of the spin lock, as it 3139 * may be changed. 3140 */ 3141 spin_lock(&cache->lock); 3142 old_commit_used = cache->commit_used; 3143 used = cache->used; 3144 /* No change in used bytes, can safely skip it. */ 3145 if (cache->commit_used == used) { 3146 spin_unlock(&cache->lock); 3147 return 0; 3148 } 3149 cache->commit_used = used; 3150 spin_unlock(&cache->lock); 3151 3152 key.objectid = cache->start; 3153 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 3154 key.offset = cache->length; 3155 3156 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3157 if (ret) { 3158 if (ret > 0) 3159 ret = -ENOENT; 3160 goto fail; 3161 } 3162 3163 leaf = path->nodes[0]; 3164 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3165 btrfs_set_stack_block_group_used(&bgi, used); 3166 btrfs_set_stack_block_group_chunk_objectid(&bgi, 3167 cache->global_root_id); 3168 btrfs_set_stack_block_group_flags(&bgi, cache->flags); 3169 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 3170 fail: 3171 btrfs_release_path(path); 3172 /* 3173 * We didn't update the block group item, need to revert commit_used 3174 * unless the block group item didn't exist yet - this is to prevent a 3175 * race with a concurrent insertion of the block group item, with 3176 * insert_block_group_item(), that happened just after we attempted to 3177 * update. In that case we would reset commit_used to 0 just after the 3178 * insertion set it to a value greater than 0 - if the block group later 3179 * becomes with 0 used bytes, we would incorrectly skip its update. 3180 */ 3181 if (ret < 0 && ret != -ENOENT) { 3182 spin_lock(&cache->lock); 3183 cache->commit_used = old_commit_used; 3184 spin_unlock(&cache->lock); 3185 } 3186 return ret; 3187 3188 } 3189 3190 static int cache_save_setup(struct btrfs_block_group *block_group, 3191 struct btrfs_trans_handle *trans, 3192 struct btrfs_path *path) 3193 { 3194 struct btrfs_fs_info *fs_info = block_group->fs_info; 3195 struct inode *inode = NULL; 3196 struct extent_changeset *data_reserved = NULL; 3197 u64 alloc_hint = 0; 3198 int dcs = BTRFS_DC_ERROR; 3199 u64 cache_size = 0; 3200 int retries = 0; 3201 int ret = 0; 3202 3203 if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 3204 return 0; 3205 3206 /* 3207 * If this block group is smaller than 100 megs don't bother caching the 3208 * block group. 3209 */ 3210 if (block_group->length < (100 * SZ_1M)) { 3211 spin_lock(&block_group->lock); 3212 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3213 spin_unlock(&block_group->lock); 3214 return 0; 3215 } 3216 3217 if (TRANS_ABORTED(trans)) 3218 return 0; 3219 again: 3220 inode = lookup_free_space_inode(block_group, path); 3221 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3222 ret = PTR_ERR(inode); 3223 btrfs_release_path(path); 3224 goto out; 3225 } 3226 3227 if (IS_ERR(inode)) { 3228 BUG_ON(retries); 3229 retries++; 3230 3231 if (block_group->ro) 3232 goto out_free; 3233 3234 ret = create_free_space_inode(trans, block_group, path); 3235 if (ret) 3236 goto out_free; 3237 goto again; 3238 } 3239 3240 /* 3241 * We want to set the generation to 0, that way if anything goes wrong 3242 * from here on out we know not to trust this cache when we load up next 3243 * time. 3244 */ 3245 BTRFS_I(inode)->generation = 0; 3246 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 3247 if (unlikely(ret)) { 3248 /* 3249 * So theoretically we could recover from this, simply set the 3250 * super cache generation to 0 so we know to invalidate the 3251 * cache, but then we'd have to keep track of the block groups 3252 * that fail this way so we know we _have_ to reset this cache 3253 * before the next commit or risk reading stale cache. So to 3254 * limit our exposure to horrible edge cases lets just abort the 3255 * transaction, this only happens in really bad situations 3256 * anyway. 3257 */ 3258 btrfs_abort_transaction(trans, ret); 3259 goto out_put; 3260 } 3261 WARN_ON(ret); 3262 3263 /* We've already setup this transaction, go ahead and exit */ 3264 if (block_group->cache_generation == trans->transid && 3265 i_size_read(inode)) { 3266 dcs = BTRFS_DC_SETUP; 3267 goto out_put; 3268 } 3269 3270 if (i_size_read(inode) > 0) { 3271 ret = btrfs_check_trunc_cache_free_space(fs_info, 3272 &fs_info->global_block_rsv); 3273 if (ret) 3274 goto out_put; 3275 3276 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 3277 if (ret) 3278 goto out_put; 3279 } 3280 3281 spin_lock(&block_group->lock); 3282 if (block_group->cached != BTRFS_CACHE_FINISHED || 3283 !btrfs_test_opt(fs_info, SPACE_CACHE)) { 3284 /* 3285 * don't bother trying to write stuff out _if_ 3286 * a) we're not cached, 3287 * b) we're with nospace_cache mount option, 3288 * c) we're with v2 space_cache (FREE_SPACE_TREE). 3289 */ 3290 dcs = BTRFS_DC_WRITTEN; 3291 spin_unlock(&block_group->lock); 3292 goto out_put; 3293 } 3294 spin_unlock(&block_group->lock); 3295 3296 /* 3297 * We hit an ENOSPC when setting up the cache in this transaction, just 3298 * skip doing the setup, we've already cleared the cache so we're safe. 3299 */ 3300 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 3301 ret = -ENOSPC; 3302 goto out_put; 3303 } 3304 3305 /* 3306 * Try to preallocate enough space based on how big the block group is. 3307 * Keep in mind this has to include any pinned space which could end up 3308 * taking up quite a bit since it's not folded into the other space 3309 * cache. 3310 */ 3311 cache_size = div_u64(block_group->length, SZ_256M); 3312 if (!cache_size) 3313 cache_size = 1; 3314 3315 cache_size *= 16; 3316 cache_size *= fs_info->sectorsize; 3317 3318 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 3319 cache_size, false); 3320 if (ret) 3321 goto out_put; 3322 3323 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 3324 cache_size, cache_size, 3325 &alloc_hint); 3326 /* 3327 * Our cache requires contiguous chunks so that we don't modify a bunch 3328 * of metadata or split extents when writing the cache out, which means 3329 * we can enospc if we are heavily fragmented in addition to just normal 3330 * out of space conditions. So if we hit this just skip setting up any 3331 * other block groups for this transaction, maybe we'll unpin enough 3332 * space the next time around. 3333 */ 3334 if (!ret) 3335 dcs = BTRFS_DC_SETUP; 3336 else if (ret == -ENOSPC) 3337 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 3338 3339 out_put: 3340 iput(inode); 3341 out_free: 3342 btrfs_release_path(path); 3343 out: 3344 spin_lock(&block_group->lock); 3345 if (!ret && dcs == BTRFS_DC_SETUP) 3346 block_group->cache_generation = trans->transid; 3347 block_group->disk_cache_state = dcs; 3348 spin_unlock(&block_group->lock); 3349 3350 extent_changeset_free(data_reserved); 3351 return ret; 3352 } 3353 3354 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 3355 { 3356 struct btrfs_fs_info *fs_info = trans->fs_info; 3357 struct btrfs_block_group *cache, *tmp; 3358 struct btrfs_transaction *cur_trans = trans->transaction; 3359 BTRFS_PATH_AUTO_FREE(path); 3360 3361 if (list_empty(&cur_trans->dirty_bgs) || 3362 !btrfs_test_opt(fs_info, SPACE_CACHE)) 3363 return 0; 3364 3365 path = btrfs_alloc_path(); 3366 if (!path) 3367 return -ENOMEM; 3368 3369 /* Could add new block groups, use _safe just in case */ 3370 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 3371 dirty_list) { 3372 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3373 cache_save_setup(cache, trans, path); 3374 } 3375 3376 return 0; 3377 } 3378 3379 /* 3380 * Transaction commit does final block group cache writeback during a critical 3381 * section where nothing is allowed to change the FS. This is required in 3382 * order for the cache to actually match the block group, but can introduce a 3383 * lot of latency into the commit. 3384 * 3385 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 3386 * There's a chance we'll have to redo some of it if the block group changes 3387 * again during the commit, but it greatly reduces the commit latency by 3388 * getting rid of the easy block groups while we're still allowing others to 3389 * join the commit. 3390 */ 3391 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 3392 { 3393 struct btrfs_fs_info *fs_info = trans->fs_info; 3394 struct btrfs_block_group *cache; 3395 struct btrfs_transaction *cur_trans = trans->transaction; 3396 int ret = 0; 3397 int should_put; 3398 BTRFS_PATH_AUTO_FREE(path); 3399 LIST_HEAD(dirty); 3400 struct list_head *io = &cur_trans->io_bgs; 3401 int loops = 0; 3402 3403 spin_lock(&cur_trans->dirty_bgs_lock); 3404 if (list_empty(&cur_trans->dirty_bgs)) { 3405 spin_unlock(&cur_trans->dirty_bgs_lock); 3406 return 0; 3407 } 3408 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3409 spin_unlock(&cur_trans->dirty_bgs_lock); 3410 3411 again: 3412 /* Make sure all the block groups on our dirty list actually exist */ 3413 btrfs_create_pending_block_groups(trans); 3414 3415 if (!path) { 3416 path = btrfs_alloc_path(); 3417 if (!path) { 3418 ret = -ENOMEM; 3419 goto out; 3420 } 3421 } 3422 3423 /* 3424 * cache_write_mutex is here only to save us from balance or automatic 3425 * removal of empty block groups deleting this block group while we are 3426 * writing out the cache 3427 */ 3428 mutex_lock(&trans->transaction->cache_write_mutex); 3429 while (!list_empty(&dirty)) { 3430 bool drop_reserve = true; 3431 3432 cache = list_first_entry(&dirty, struct btrfs_block_group, 3433 dirty_list); 3434 /* 3435 * This can happen if something re-dirties a block group that 3436 * is already under IO. Just wait for it to finish and then do 3437 * it all again 3438 */ 3439 if (!list_empty(&cache->io_list)) { 3440 list_del_init(&cache->io_list); 3441 btrfs_wait_cache_io(trans, cache, path); 3442 btrfs_put_block_group(cache); 3443 } 3444 3445 3446 /* 3447 * btrfs_wait_cache_io uses the cache->dirty_list to decide if 3448 * it should update the cache_state. Don't delete until after 3449 * we wait. 3450 * 3451 * Since we're not running in the commit critical section 3452 * we need the dirty_bgs_lock to protect from update_block_group 3453 */ 3454 spin_lock(&cur_trans->dirty_bgs_lock); 3455 list_del_init(&cache->dirty_list); 3456 spin_unlock(&cur_trans->dirty_bgs_lock); 3457 3458 should_put = 1; 3459 3460 cache_save_setup(cache, trans, path); 3461 3462 if (cache->disk_cache_state == BTRFS_DC_SETUP) { 3463 cache->io_ctl.inode = NULL; 3464 ret = btrfs_write_out_cache(trans, cache, path); 3465 if (ret == 0 && cache->io_ctl.inode) { 3466 should_put = 0; 3467 3468 /* 3469 * The cache_write_mutex is protecting the 3470 * io_list, also refer to the definition of 3471 * btrfs_transaction::io_bgs for more details 3472 */ 3473 list_add_tail(&cache->io_list, io); 3474 } else { 3475 /* 3476 * If we failed to write the cache, the 3477 * generation will be bad and life goes on 3478 */ 3479 ret = 0; 3480 } 3481 } 3482 if (!ret) { 3483 ret = update_block_group_item(trans, path, cache); 3484 /* 3485 * Our block group might still be attached to the list 3486 * of new block groups in the transaction handle of some 3487 * other task (struct btrfs_trans_handle->new_bgs). This 3488 * means its block group item isn't yet in the extent 3489 * tree. If this happens ignore the error, as we will 3490 * try again later in the critical section of the 3491 * transaction commit. 3492 */ 3493 if (ret == -ENOENT) { 3494 ret = 0; 3495 spin_lock(&cur_trans->dirty_bgs_lock); 3496 if (list_empty(&cache->dirty_list)) { 3497 list_add_tail(&cache->dirty_list, 3498 &cur_trans->dirty_bgs); 3499 btrfs_get_block_group(cache); 3500 drop_reserve = false; 3501 } 3502 spin_unlock(&cur_trans->dirty_bgs_lock); 3503 } else if (ret) { 3504 btrfs_abort_transaction(trans, ret); 3505 } 3506 } 3507 3508 /* If it's not on the io list, we need to put the block group */ 3509 if (should_put) 3510 btrfs_put_block_group(cache); 3511 if (drop_reserve) 3512 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3513 /* 3514 * Avoid blocking other tasks for too long. It might even save 3515 * us from writing caches for block groups that are going to be 3516 * removed. 3517 */ 3518 mutex_unlock(&trans->transaction->cache_write_mutex); 3519 if (ret) 3520 goto out; 3521 mutex_lock(&trans->transaction->cache_write_mutex); 3522 } 3523 mutex_unlock(&trans->transaction->cache_write_mutex); 3524 3525 /* 3526 * Go through delayed refs for all the stuff we've just kicked off 3527 * and then loop back (just once) 3528 */ 3529 if (!ret) 3530 ret = btrfs_run_delayed_refs(trans, 0); 3531 if (!ret && loops == 0) { 3532 loops++; 3533 spin_lock(&cur_trans->dirty_bgs_lock); 3534 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3535 /* 3536 * dirty_bgs_lock protects us from concurrent block group 3537 * deletes too (not just cache_write_mutex). 3538 */ 3539 if (!list_empty(&dirty)) { 3540 spin_unlock(&cur_trans->dirty_bgs_lock); 3541 goto again; 3542 } 3543 spin_unlock(&cur_trans->dirty_bgs_lock); 3544 } 3545 out: 3546 if (ret < 0) { 3547 spin_lock(&cur_trans->dirty_bgs_lock); 3548 list_splice_init(&dirty, &cur_trans->dirty_bgs); 3549 spin_unlock(&cur_trans->dirty_bgs_lock); 3550 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 3551 } 3552 3553 return ret; 3554 } 3555 3556 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 3557 { 3558 struct btrfs_fs_info *fs_info = trans->fs_info; 3559 struct btrfs_block_group *cache; 3560 struct btrfs_transaction *cur_trans = trans->transaction; 3561 int ret = 0; 3562 int should_put; 3563 BTRFS_PATH_AUTO_FREE(path); 3564 struct list_head *io = &cur_trans->io_bgs; 3565 3566 path = btrfs_alloc_path(); 3567 if (!path) 3568 return -ENOMEM; 3569 3570 /* 3571 * Even though we are in the critical section of the transaction commit, 3572 * we can still have concurrent tasks adding elements to this 3573 * transaction's list of dirty block groups. These tasks correspond to 3574 * endio free space workers started when writeback finishes for a 3575 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 3576 * allocate new block groups as a result of COWing nodes of the root 3577 * tree when updating the free space inode. The writeback for the space 3578 * caches is triggered by an earlier call to 3579 * btrfs_start_dirty_block_groups() and iterations of the following 3580 * loop. 3581 * Also we want to do the cache_save_setup first and then run the 3582 * delayed refs to make sure we have the best chance at doing this all 3583 * in one shot. 3584 */ 3585 spin_lock(&cur_trans->dirty_bgs_lock); 3586 while (!list_empty(&cur_trans->dirty_bgs)) { 3587 cache = list_first_entry(&cur_trans->dirty_bgs, 3588 struct btrfs_block_group, 3589 dirty_list); 3590 3591 /* 3592 * This can happen if cache_save_setup re-dirties a block group 3593 * that is already under IO. Just wait for it to finish and 3594 * then do it all again 3595 */ 3596 if (!list_empty(&cache->io_list)) { 3597 spin_unlock(&cur_trans->dirty_bgs_lock); 3598 list_del_init(&cache->io_list); 3599 btrfs_wait_cache_io(trans, cache, path); 3600 btrfs_put_block_group(cache); 3601 spin_lock(&cur_trans->dirty_bgs_lock); 3602 } 3603 3604 /* 3605 * Don't remove from the dirty list until after we've waited on 3606 * any pending IO 3607 */ 3608 list_del_init(&cache->dirty_list); 3609 spin_unlock(&cur_trans->dirty_bgs_lock); 3610 should_put = 1; 3611 3612 cache_save_setup(cache, trans, path); 3613 3614 if (!ret) 3615 ret = btrfs_run_delayed_refs(trans, U64_MAX); 3616 3617 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 3618 cache->io_ctl.inode = NULL; 3619 ret = btrfs_write_out_cache(trans, cache, path); 3620 if (ret == 0 && cache->io_ctl.inode) { 3621 should_put = 0; 3622 list_add_tail(&cache->io_list, io); 3623 } else { 3624 /* 3625 * If we failed to write the cache, the 3626 * generation will be bad and life goes on 3627 */ 3628 ret = 0; 3629 } 3630 } 3631 if (!ret) { 3632 ret = update_block_group_item(trans, path, cache); 3633 /* 3634 * One of the free space endio workers might have 3635 * created a new block group while updating a free space 3636 * cache's inode (at inode.c:btrfs_finish_ordered_io()) 3637 * and hasn't released its transaction handle yet, in 3638 * which case the new block group is still attached to 3639 * its transaction handle and its creation has not 3640 * finished yet (no block group item in the extent tree 3641 * yet, etc). If this is the case, wait for all free 3642 * space endio workers to finish and retry. This is a 3643 * very rare case so no need for a more efficient and 3644 * complex approach. 3645 */ 3646 if (ret == -ENOENT) { 3647 wait_event(cur_trans->writer_wait, 3648 atomic_read(&cur_trans->num_writers) == 1); 3649 ret = update_block_group_item(trans, path, cache); 3650 if (ret) 3651 btrfs_abort_transaction(trans, ret); 3652 } else if (ret) { 3653 btrfs_abort_transaction(trans, ret); 3654 } 3655 } 3656 3657 /* If its not on the io list, we need to put the block group */ 3658 if (should_put) 3659 btrfs_put_block_group(cache); 3660 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3661 spin_lock(&cur_trans->dirty_bgs_lock); 3662 } 3663 spin_unlock(&cur_trans->dirty_bgs_lock); 3664 3665 /* 3666 * Refer to the definition of io_bgs member for details why it's safe 3667 * to use it without any locking 3668 */ 3669 while (!list_empty(io)) { 3670 cache = list_first_entry(io, struct btrfs_block_group, 3671 io_list); 3672 list_del_init(&cache->io_list); 3673 btrfs_wait_cache_io(trans, cache, path); 3674 btrfs_put_block_group(cache); 3675 } 3676 3677 return ret; 3678 } 3679 3680 int btrfs_update_block_group(struct btrfs_trans_handle *trans, 3681 u64 bytenr, u64 num_bytes, bool alloc) 3682 { 3683 struct btrfs_fs_info *info = trans->fs_info; 3684 struct btrfs_space_info *space_info; 3685 struct btrfs_block_group *cache; 3686 u64 old_val; 3687 bool reclaim = false; 3688 bool bg_already_dirty = true; 3689 int factor; 3690 3691 /* Block accounting for super block */ 3692 spin_lock(&info->delalloc_root_lock); 3693 old_val = btrfs_super_bytes_used(info->super_copy); 3694 if (alloc) 3695 old_val += num_bytes; 3696 else 3697 old_val -= num_bytes; 3698 btrfs_set_super_bytes_used(info->super_copy, old_val); 3699 spin_unlock(&info->delalloc_root_lock); 3700 3701 cache = btrfs_lookup_block_group(info, bytenr); 3702 if (!cache) 3703 return -ENOENT; 3704 3705 /* An extent can not span multiple block groups. */ 3706 ASSERT(bytenr + num_bytes <= cache->start + cache->length); 3707 3708 space_info = cache->space_info; 3709 factor = btrfs_bg_type_to_factor(cache->flags); 3710 3711 /* 3712 * If this block group has free space cache written out, we need to make 3713 * sure to load it if we are removing space. This is because we need 3714 * the unpinning stage to actually add the space back to the block group, 3715 * otherwise we will leak space. 3716 */ 3717 if (!alloc && !btrfs_block_group_done(cache)) 3718 btrfs_cache_block_group(cache, true); 3719 3720 spin_lock(&space_info->lock); 3721 spin_lock(&cache->lock); 3722 3723 if (btrfs_test_opt(info, SPACE_CACHE) && 3724 cache->disk_cache_state < BTRFS_DC_CLEAR) 3725 cache->disk_cache_state = BTRFS_DC_CLEAR; 3726 3727 old_val = cache->used; 3728 if (alloc) { 3729 old_val += num_bytes; 3730 cache->used = old_val; 3731 cache->reserved -= num_bytes; 3732 cache->reclaim_mark = 0; 3733 space_info->bytes_reserved -= num_bytes; 3734 space_info->bytes_used += num_bytes; 3735 space_info->disk_used += num_bytes * factor; 3736 if (READ_ONCE(space_info->periodic_reclaim)) 3737 btrfs_space_info_update_reclaimable(space_info, -num_bytes); 3738 spin_unlock(&cache->lock); 3739 spin_unlock(&space_info->lock); 3740 } else { 3741 old_val -= num_bytes; 3742 cache->used = old_val; 3743 cache->pinned += num_bytes; 3744 btrfs_space_info_update_bytes_pinned(space_info, num_bytes); 3745 space_info->bytes_used -= num_bytes; 3746 space_info->disk_used -= num_bytes * factor; 3747 if (READ_ONCE(space_info->periodic_reclaim)) 3748 btrfs_space_info_update_reclaimable(space_info, num_bytes); 3749 else 3750 reclaim = should_reclaim_block_group(cache, num_bytes); 3751 3752 spin_unlock(&cache->lock); 3753 spin_unlock(&space_info->lock); 3754 3755 btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr, 3756 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); 3757 } 3758 3759 spin_lock(&trans->transaction->dirty_bgs_lock); 3760 if (list_empty(&cache->dirty_list)) { 3761 list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs); 3762 bg_already_dirty = false; 3763 btrfs_get_block_group(cache); 3764 } 3765 spin_unlock(&trans->transaction->dirty_bgs_lock); 3766 3767 /* 3768 * No longer have used bytes in this block group, queue it for deletion. 3769 * We do this after adding the block group to the dirty list to avoid 3770 * races between cleaner kthread and space cache writeout. 3771 */ 3772 if (!alloc && old_val == 0) { 3773 if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3774 btrfs_mark_bg_unused(cache); 3775 } else if (!alloc && reclaim) { 3776 btrfs_mark_bg_to_reclaim(cache); 3777 } 3778 3779 btrfs_put_block_group(cache); 3780 3781 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3782 if (!bg_already_dirty) 3783 btrfs_inc_delayed_refs_rsv_bg_updates(info); 3784 3785 return 0; 3786 } 3787 3788 /* 3789 * Update the block_group and space info counters. 3790 * 3791 * @cache: The cache we are manipulating 3792 * @ram_bytes: The number of bytes of file content, and will be same to 3793 * @num_bytes except for the compress path. 3794 * @num_bytes: The number of bytes in question 3795 * @delalloc: The blocks are allocated for the delalloc write 3796 * 3797 * This is called by the allocator when it reserves space. If this is a 3798 * reservation and the block group has become read only we cannot make the 3799 * reservation and return -EAGAIN, otherwise this function always succeeds. 3800 */ 3801 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3802 u64 ram_bytes, u64 num_bytes, int delalloc, 3803 bool force_wrong_size_class) 3804 { 3805 struct btrfs_space_info *space_info = cache->space_info; 3806 enum btrfs_block_group_size_class size_class; 3807 int ret = 0; 3808 3809 spin_lock(&space_info->lock); 3810 spin_lock(&cache->lock); 3811 if (cache->ro) { 3812 ret = -EAGAIN; 3813 goto out; 3814 } 3815 3816 if (btrfs_block_group_should_use_size_class(cache)) { 3817 size_class = btrfs_calc_block_group_size_class(num_bytes); 3818 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); 3819 if (ret) 3820 goto out; 3821 } 3822 cache->reserved += num_bytes; 3823 space_info->bytes_reserved += num_bytes; 3824 trace_btrfs_space_reservation(cache->fs_info, "space_info", 3825 space_info->flags, num_bytes, 1); 3826 btrfs_space_info_update_bytes_may_use(space_info, -ram_bytes); 3827 if (delalloc) 3828 cache->delalloc_bytes += num_bytes; 3829 3830 /* 3831 * Compression can use less space than we reserved, so wake tickets if 3832 * that happens. 3833 */ 3834 if (num_bytes < ram_bytes) 3835 btrfs_try_granting_tickets(cache->fs_info, space_info); 3836 out: 3837 spin_unlock(&cache->lock); 3838 spin_unlock(&space_info->lock); 3839 return ret; 3840 } 3841 3842 /* 3843 * Update the block_group and space info counters. 3844 * 3845 * @cache: The cache we are manipulating. 3846 * @num_bytes: The number of bytes in question. 3847 * @is_delalloc: Whether the blocks are allocated for a delalloc write. 3848 * 3849 * This is called by somebody who is freeing space that was never actually used 3850 * on disk. For example if you reserve some space for a new leaf in transaction 3851 * A and before transaction A commits you free that leaf, you call this with 3852 * reserve set to 0 in order to clear the reservation. 3853 */ 3854 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, 3855 bool is_delalloc) 3856 { 3857 struct btrfs_space_info *space_info = cache->space_info; 3858 3859 spin_lock(&space_info->lock); 3860 spin_lock(&cache->lock); 3861 if (cache->ro) 3862 space_info->bytes_readonly += num_bytes; 3863 else if (btrfs_is_zoned(cache->fs_info)) 3864 space_info->bytes_zone_unusable += num_bytes; 3865 cache->reserved -= num_bytes; 3866 space_info->bytes_reserved -= num_bytes; 3867 space_info->max_extent_size = 0; 3868 3869 if (is_delalloc) 3870 cache->delalloc_bytes -= num_bytes; 3871 spin_unlock(&cache->lock); 3872 3873 btrfs_try_granting_tickets(cache->fs_info, space_info); 3874 spin_unlock(&space_info->lock); 3875 } 3876 3877 static void force_metadata_allocation(struct btrfs_fs_info *info) 3878 { 3879 struct list_head *head = &info->space_info; 3880 struct btrfs_space_info *found; 3881 3882 list_for_each_entry(found, head, list) { 3883 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3884 found->force_alloc = CHUNK_ALLOC_FORCE; 3885 } 3886 } 3887 3888 static bool should_alloc_chunk(const struct btrfs_fs_info *fs_info, 3889 const struct btrfs_space_info *sinfo, int force) 3890 { 3891 u64 bytes_used = btrfs_space_info_used(sinfo, false); 3892 u64 thresh; 3893 3894 if (force == CHUNK_ALLOC_FORCE) 3895 return true; 3896 3897 /* 3898 * in limited mode, we want to have some free space up to 3899 * about 1% of the FS size. 3900 */ 3901 if (force == CHUNK_ALLOC_LIMITED) { 3902 thresh = btrfs_super_total_bytes(fs_info->super_copy); 3903 thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); 3904 3905 if (sinfo->total_bytes - bytes_used < thresh) 3906 return true; 3907 } 3908 3909 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) 3910 return false; 3911 return true; 3912 } 3913 3914 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 3915 { 3916 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 3917 struct btrfs_space_info *space_info; 3918 3919 space_info = btrfs_find_space_info(trans->fs_info, type); 3920 if (!space_info) { 3921 DEBUG_WARN(); 3922 return -EINVAL; 3923 } 3924 3925 return btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); 3926 } 3927 3928 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, 3929 struct btrfs_space_info *space_info, 3930 u64 flags) 3931 { 3932 struct btrfs_block_group *bg; 3933 int ret; 3934 3935 /* 3936 * Check if we have enough space in the system space info because we 3937 * will need to update device items in the chunk btree and insert a new 3938 * chunk item in the chunk btree as well. This will allocate a new 3939 * system block group if needed. 3940 */ 3941 check_system_chunk(trans, flags); 3942 3943 bg = btrfs_create_chunk(trans, space_info, flags); 3944 if (IS_ERR(bg)) { 3945 ret = PTR_ERR(bg); 3946 goto out; 3947 } 3948 3949 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3950 /* 3951 * Normally we are not expected to fail with -ENOSPC here, since we have 3952 * previously reserved space in the system space_info and allocated one 3953 * new system chunk if necessary. However there are three exceptions: 3954 * 3955 * 1) We may have enough free space in the system space_info but all the 3956 * existing system block groups have a profile which can not be used 3957 * for extent allocation. 3958 * 3959 * This happens when mounting in degraded mode. For example we have a 3960 * RAID1 filesystem with 2 devices, lose one device and mount the fs 3961 * using the other device in degraded mode. If we then allocate a chunk, 3962 * we may have enough free space in the existing system space_info, but 3963 * none of the block groups can be used for extent allocation since they 3964 * have a RAID1 profile, and because we are in degraded mode with a 3965 * single device, we are forced to allocate a new system chunk with a 3966 * SINGLE profile. Making check_system_chunk() iterate over all system 3967 * block groups and check if they have a usable profile and enough space 3968 * can be slow on very large filesystems, so we tolerate the -ENOSPC and 3969 * try again after forcing allocation of a new system chunk. Like this 3970 * we avoid paying the cost of that search in normal circumstances, when 3971 * we were not mounted in degraded mode; 3972 * 3973 * 2) We had enough free space info the system space_info, and one suitable 3974 * block group to allocate from when we called check_system_chunk() 3975 * above. However right after we called it, the only system block group 3976 * with enough free space got turned into RO mode by a running scrub, 3977 * and in this case we have to allocate a new one and retry. We only 3978 * need do this allocate and retry once, since we have a transaction 3979 * handle and scrub uses the commit root to search for block groups; 3980 * 3981 * 3) We had one system block group with enough free space when we called 3982 * check_system_chunk(), but after that, right before we tried to 3983 * allocate the last extent buffer we needed, a discard operation came 3984 * in and it temporarily removed the last free space entry from the 3985 * block group (discard removes a free space entry, discards it, and 3986 * then adds back the entry to the block group cache). 3987 */ 3988 if (ret == -ENOSPC) { 3989 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 3990 struct btrfs_block_group *sys_bg; 3991 struct btrfs_space_info *sys_space_info; 3992 3993 sys_space_info = btrfs_find_space_info(trans->fs_info, sys_flags); 3994 if (unlikely(!sys_space_info)) { 3995 ret = -EINVAL; 3996 btrfs_abort_transaction(trans, ret); 3997 goto out; 3998 } 3999 4000 sys_bg = btrfs_create_chunk(trans, sys_space_info, sys_flags); 4001 if (IS_ERR(sys_bg)) { 4002 ret = PTR_ERR(sys_bg); 4003 btrfs_abort_transaction(trans, ret); 4004 goto out; 4005 } 4006 4007 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 4008 if (unlikely(ret)) { 4009 btrfs_abort_transaction(trans, ret); 4010 goto out; 4011 } 4012 4013 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 4014 if (unlikely(ret)) { 4015 btrfs_abort_transaction(trans, ret); 4016 goto out; 4017 } 4018 } else if (unlikely(ret)) { 4019 btrfs_abort_transaction(trans, ret); 4020 goto out; 4021 } 4022 out: 4023 btrfs_trans_release_chunk_metadata(trans); 4024 4025 if (ret) 4026 return ERR_PTR(ret); 4027 4028 btrfs_get_block_group(bg); 4029 return bg; 4030 } 4031 4032 /* 4033 * Chunk allocation is done in 2 phases: 4034 * 4035 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 4036 * the chunk, the chunk mapping, create its block group and add the items 4037 * that belong in the chunk btree to it - more specifically, we need to 4038 * update device items in the chunk btree and add a new chunk item to it. 4039 * 4040 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 4041 * group item to the extent btree and the device extent items to the devices 4042 * btree. 4043 * 4044 * This is done to prevent deadlocks. For example when COWing a node from the 4045 * extent btree we are holding a write lock on the node's parent and if we 4046 * trigger chunk allocation and attempted to insert the new block group item 4047 * in the extent btree right way, we could deadlock because the path for the 4048 * insertion can include that parent node. At first glance it seems impossible 4049 * to trigger chunk allocation after starting a transaction since tasks should 4050 * reserve enough transaction units (metadata space), however while that is true 4051 * most of the time, chunk allocation may still be triggered for several reasons: 4052 * 4053 * 1) When reserving metadata, we check if there is enough free space in the 4054 * metadata space_info and therefore don't trigger allocation of a new chunk. 4055 * However later when the task actually tries to COW an extent buffer from 4056 * the extent btree or from the device btree for example, it is forced to 4057 * allocate a new block group (chunk) because the only one that had enough 4058 * free space was just turned to RO mode by a running scrub for example (or 4059 * device replace, block group reclaim thread, etc), so we can not use it 4060 * for allocating an extent and end up being forced to allocate a new one; 4061 * 4062 * 2) Because we only check that the metadata space_info has enough free bytes, 4063 * we end up not allocating a new metadata chunk in that case. However if 4064 * the filesystem was mounted in degraded mode, none of the existing block 4065 * groups might be suitable for extent allocation due to their incompatible 4066 * profile (for e.g. mounting a 2 devices filesystem, where all block groups 4067 * use a RAID1 profile, in degraded mode using a single device). In this case 4068 * when the task attempts to COW some extent buffer of the extent btree for 4069 * example, it will trigger allocation of a new metadata block group with a 4070 * suitable profile (SINGLE profile in the example of the degraded mount of 4071 * the RAID1 filesystem); 4072 * 4073 * 3) The task has reserved enough transaction units / metadata space, but when 4074 * it attempts to COW an extent buffer from the extent or device btree for 4075 * example, it does not find any free extent in any metadata block group, 4076 * therefore forced to try to allocate a new metadata block group. 4077 * This is because some other task allocated all available extents in the 4078 * meanwhile - this typically happens with tasks that don't reserve space 4079 * properly, either intentionally or as a bug. One example where this is 4080 * done intentionally is fsync, as it does not reserve any transaction units 4081 * and ends up allocating a variable number of metadata extents for log 4082 * tree extent buffers; 4083 * 4084 * 4) The task has reserved enough transaction units / metadata space, but right 4085 * before it tries to allocate the last extent buffer it needs, a discard 4086 * operation comes in and, temporarily, removes the last free space entry from 4087 * the only metadata block group that had free space (discard starts by 4088 * removing a free space entry from a block group, then does the discard 4089 * operation and, once it's done, it adds back the free space entry to the 4090 * block group). 4091 * 4092 * We also need this 2 phases setup when adding a device to a filesystem with 4093 * a seed device - we must create new metadata and system chunks without adding 4094 * any of the block group items to the chunk, extent and device btrees. If we 4095 * did not do it this way, we would get ENOSPC when attempting to update those 4096 * btrees, since all the chunks from the seed device are read-only. 4097 * 4098 * Phase 1 does the updates and insertions to the chunk btree because if we had 4099 * it done in phase 2 and have a thundering herd of tasks allocating chunks in 4100 * parallel, we risk having too many system chunks allocated by many tasks if 4101 * many tasks reach phase 1 without the previous ones completing phase 2. In the 4102 * extreme case this leads to exhaustion of the system chunk array in the 4103 * superblock. This is easier to trigger if using a btree node/leaf size of 64K 4104 * and with RAID filesystems (so we have more device items in the chunk btree). 4105 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 4106 * the system chunk array due to concurrent allocations") provides more details. 4107 * 4108 * Allocation of system chunks does not happen through this function. A task that 4109 * needs to update the chunk btree (the only btree that uses system chunks), must 4110 * preallocate chunk space by calling either check_system_chunk() or 4111 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 4112 * metadata chunk or when removing a chunk, while the later is used before doing 4113 * a modification to the chunk btree - use cases for the later are adding, 4114 * removing and resizing a device as well as relocation of a system chunk. 4115 * See the comment below for more details. 4116 * 4117 * The reservation of system space, done through check_system_chunk(), as well 4118 * as all the updates and insertions into the chunk btree must be done while 4119 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 4120 * an extent buffer from the chunks btree we never trigger allocation of a new 4121 * system chunk, which would result in a deadlock (trying to lock twice an 4122 * extent buffer of the chunk btree, first time before triggering the chunk 4123 * allocation and the second time during chunk allocation while attempting to 4124 * update the chunks btree). The system chunk array is also updated while holding 4125 * that mutex. The same logic applies to removing chunks - we must reserve system 4126 * space, update the chunk btree and the system chunk array in the superblock 4127 * while holding fs_info->chunk_mutex. 4128 * 4129 * This function, btrfs_chunk_alloc(), belongs to phase 1. 4130 * 4131 * @space_info: specify which space_info the new chunk should belong to. 4132 * 4133 * If @force is CHUNK_ALLOC_FORCE: 4134 * - return 1 if it successfully allocates a chunk, 4135 * - return errors including -ENOSPC otherwise. 4136 * If @force is NOT CHUNK_ALLOC_FORCE: 4137 * - return 0 if it doesn't need to allocate a new chunk, 4138 * - return 1 if it successfully allocates a chunk, 4139 * - return errors including -ENOSPC otherwise. 4140 */ 4141 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, 4142 struct btrfs_space_info *space_info, u64 flags, 4143 enum btrfs_chunk_alloc_enum force) 4144 { 4145 struct btrfs_fs_info *fs_info = trans->fs_info; 4146 struct btrfs_block_group *ret_bg; 4147 bool wait_for_alloc = false; 4148 bool should_alloc = false; 4149 bool from_extent_allocation = false; 4150 int ret = 0; 4151 4152 if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 4153 from_extent_allocation = true; 4154 force = CHUNK_ALLOC_FORCE; 4155 } 4156 4157 /* Don't re-enter if we're already allocating a chunk */ 4158 if (trans->allocating_chunk) 4159 return -ENOSPC; 4160 /* 4161 * Allocation of system chunks can not happen through this path, as we 4162 * could end up in a deadlock if we are allocating a data or metadata 4163 * chunk and there is another task modifying the chunk btree. 4164 * 4165 * This is because while we are holding the chunk mutex, we will attempt 4166 * to add the new chunk item to the chunk btree or update an existing 4167 * device item in the chunk btree, while the other task that is modifying 4168 * the chunk btree is attempting to COW an extent buffer while holding a 4169 * lock on it and on its parent - if the COW operation triggers a system 4170 * chunk allocation, then we can deadlock because we are holding the 4171 * chunk mutex and we may need to access that extent buffer or its parent 4172 * in order to add the chunk item or update a device item. 4173 * 4174 * Tasks that want to modify the chunk tree should reserve system space 4175 * before updating the chunk btree, by calling either 4176 * btrfs_reserve_chunk_metadata() or check_system_chunk(). 4177 * It's possible that after a task reserves the space, it still ends up 4178 * here - this happens in the cases described above at do_chunk_alloc(). 4179 * The task will have to either retry or fail. 4180 */ 4181 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 4182 return -ENOSPC; 4183 4184 do { 4185 spin_lock(&space_info->lock); 4186 if (force < space_info->force_alloc) 4187 force = space_info->force_alloc; 4188 should_alloc = should_alloc_chunk(fs_info, space_info, force); 4189 if (space_info->full) { 4190 /* No more free physical space */ 4191 if (should_alloc) 4192 ret = -ENOSPC; 4193 else 4194 ret = 0; 4195 spin_unlock(&space_info->lock); 4196 return ret; 4197 } else if (!should_alloc) { 4198 spin_unlock(&space_info->lock); 4199 return 0; 4200 } else if (space_info->chunk_alloc) { 4201 /* 4202 * Someone is already allocating, so we need to block 4203 * until this someone is finished and then loop to 4204 * recheck if we should continue with our allocation 4205 * attempt. 4206 */ 4207 wait_for_alloc = true; 4208 force = CHUNK_ALLOC_NO_FORCE; 4209 spin_unlock(&space_info->lock); 4210 mutex_lock(&fs_info->chunk_mutex); 4211 mutex_unlock(&fs_info->chunk_mutex); 4212 } else { 4213 /* Proceed with allocation */ 4214 space_info->chunk_alloc = 1; 4215 wait_for_alloc = false; 4216 spin_unlock(&space_info->lock); 4217 } 4218 4219 cond_resched(); 4220 } while (wait_for_alloc); 4221 4222 mutex_lock(&fs_info->chunk_mutex); 4223 trans->allocating_chunk = true; 4224 4225 /* 4226 * If we have mixed data/metadata chunks we want to make sure we keep 4227 * allocating mixed chunks instead of individual chunks. 4228 */ 4229 if (btrfs_mixed_space_info(space_info)) 4230 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 4231 4232 /* 4233 * if we're doing a data chunk, go ahead and make sure that 4234 * we keep a reasonable number of metadata chunks allocated in the 4235 * FS as well. 4236 */ 4237 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 4238 fs_info->data_chunk_allocations++; 4239 if (!(fs_info->data_chunk_allocations % 4240 fs_info->metadata_ratio)) 4241 force_metadata_allocation(fs_info); 4242 } 4243 4244 ret_bg = do_chunk_alloc(trans, space_info, flags); 4245 trans->allocating_chunk = false; 4246 4247 if (IS_ERR(ret_bg)) { 4248 ret = PTR_ERR(ret_bg); 4249 } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) { 4250 /* 4251 * New block group is likely to be used soon. Try to activate 4252 * it now. Failure is OK for now. 4253 */ 4254 btrfs_zone_activate(ret_bg); 4255 } 4256 4257 if (!ret) 4258 btrfs_put_block_group(ret_bg); 4259 4260 spin_lock(&space_info->lock); 4261 if (ret < 0) { 4262 if (ret == -ENOSPC) 4263 space_info->full = 1; 4264 else 4265 goto out; 4266 } else { 4267 ret = 1; 4268 space_info->max_extent_size = 0; 4269 } 4270 4271 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 4272 out: 4273 space_info->chunk_alloc = 0; 4274 spin_unlock(&space_info->lock); 4275 mutex_unlock(&fs_info->chunk_mutex); 4276 4277 return ret; 4278 } 4279 4280 static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type) 4281 { 4282 u64 num_dev; 4283 4284 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 4285 if (!num_dev) 4286 num_dev = fs_info->fs_devices->rw_devices; 4287 4288 return num_dev; 4289 } 4290 4291 static void reserve_chunk_space(struct btrfs_trans_handle *trans, 4292 u64 bytes, 4293 u64 type) 4294 { 4295 struct btrfs_fs_info *fs_info = trans->fs_info; 4296 struct btrfs_space_info *info; 4297 u64 left; 4298 int ret = 0; 4299 4300 /* 4301 * Needed because we can end up allocating a system chunk and for an 4302 * atomic and race free space reservation in the chunk block reserve. 4303 */ 4304 lockdep_assert_held(&fs_info->chunk_mutex); 4305 4306 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4307 spin_lock(&info->lock); 4308 left = info->total_bytes - btrfs_space_info_used(info, true); 4309 spin_unlock(&info->lock); 4310 4311 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4312 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 4313 left, bytes, type); 4314 btrfs_dump_space_info(fs_info, info, 0, false); 4315 } 4316 4317 if (left < bytes) { 4318 u64 flags = btrfs_system_alloc_profile(fs_info); 4319 struct btrfs_block_group *bg; 4320 struct btrfs_space_info *space_info; 4321 4322 space_info = btrfs_find_space_info(fs_info, flags); 4323 ASSERT(space_info); 4324 4325 /* 4326 * Ignore failure to create system chunk. We might end up not 4327 * needing it, as we might not need to COW all nodes/leafs from 4328 * the paths we visit in the chunk tree (they were already COWed 4329 * or created in the current transaction for example). 4330 */ 4331 bg = btrfs_create_chunk(trans, space_info, flags); 4332 if (IS_ERR(bg)) { 4333 ret = PTR_ERR(bg); 4334 } else { 4335 /* 4336 * We have a new chunk. We also need to activate it for 4337 * zoned filesystem. 4338 */ 4339 ret = btrfs_zoned_activate_one_bg(fs_info, info, true); 4340 if (ret < 0) 4341 return; 4342 4343 /* 4344 * If we fail to add the chunk item here, we end up 4345 * trying again at phase 2 of chunk allocation, at 4346 * btrfs_create_pending_block_groups(). So ignore 4347 * any error here. An ENOSPC here could happen, due to 4348 * the cases described at do_chunk_alloc() - the system 4349 * block group we just created was just turned into RO 4350 * mode by a scrub for example, or a running discard 4351 * temporarily removed its free space entries, etc. 4352 */ 4353 btrfs_chunk_alloc_add_chunk_item(trans, bg); 4354 } 4355 } 4356 4357 if (!ret) { 4358 ret = btrfs_block_rsv_add(fs_info, 4359 &fs_info->chunk_block_rsv, 4360 bytes, BTRFS_RESERVE_NO_FLUSH); 4361 if (!ret) 4362 trans->chunk_bytes_reserved += bytes; 4363 } 4364 } 4365 4366 /* 4367 * Reserve space in the system space for allocating or removing a chunk. 4368 * The caller must be holding fs_info->chunk_mutex. 4369 */ 4370 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 4371 { 4372 struct btrfs_fs_info *fs_info = trans->fs_info; 4373 const u64 num_devs = get_profile_num_devs(fs_info, type); 4374 u64 bytes; 4375 4376 /* num_devs device items to update and 1 chunk item to add or remove. */ 4377 bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 4378 btrfs_calc_insert_metadata_size(fs_info, 1); 4379 4380 reserve_chunk_space(trans, bytes, type); 4381 } 4382 4383 /* 4384 * Reserve space in the system space, if needed, for doing a modification to the 4385 * chunk btree. 4386 * 4387 * @trans: A transaction handle. 4388 * @is_item_insertion: Indicate if the modification is for inserting a new item 4389 * in the chunk btree or if it's for the deletion or update 4390 * of an existing item. 4391 * 4392 * This is used in a context where we need to update the chunk btree outside 4393 * block group allocation and removal, to avoid a deadlock with a concurrent 4394 * task that is allocating a metadata or data block group and therefore needs to 4395 * update the chunk btree while holding the chunk mutex. After the update to the 4396 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 4397 * 4398 */ 4399 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 4400 bool is_item_insertion) 4401 { 4402 struct btrfs_fs_info *fs_info = trans->fs_info; 4403 u64 bytes; 4404 4405 if (is_item_insertion) 4406 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 4407 else 4408 bytes = btrfs_calc_metadata_size(fs_info, 1); 4409 4410 mutex_lock(&fs_info->chunk_mutex); 4411 reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 4412 mutex_unlock(&fs_info->chunk_mutex); 4413 } 4414 4415 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 4416 { 4417 struct btrfs_block_group *block_group; 4418 4419 block_group = btrfs_lookup_first_block_group(info, 0); 4420 while (block_group) { 4421 btrfs_wait_block_group_cache_done(block_group); 4422 spin_lock(&block_group->lock); 4423 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, 4424 &block_group->runtime_flags)) { 4425 struct btrfs_inode *inode = block_group->inode; 4426 4427 block_group->inode = NULL; 4428 spin_unlock(&block_group->lock); 4429 4430 ASSERT(block_group->io_ctl.inode == NULL); 4431 iput(&inode->vfs_inode); 4432 } else { 4433 spin_unlock(&block_group->lock); 4434 } 4435 block_group = btrfs_next_block_group(block_group); 4436 } 4437 } 4438 4439 static void check_removing_space_info(struct btrfs_space_info *space_info) 4440 { 4441 struct btrfs_fs_info *info = space_info->fs_info; 4442 4443 if (space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY) { 4444 /* This is a top space_info, proceed with its children first. */ 4445 for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) { 4446 if (space_info->sub_group[i]) { 4447 check_removing_space_info(space_info->sub_group[i]); 4448 kfree(space_info->sub_group[i]); 4449 space_info->sub_group[i] = NULL; 4450 } 4451 } 4452 } 4453 4454 /* 4455 * Do not hide this behind enospc_debug, this is actually important and 4456 * indicates a real bug if this happens. 4457 */ 4458 if (WARN_ON(space_info->bytes_pinned > 0 || space_info->bytes_may_use > 0)) 4459 btrfs_dump_space_info(info, space_info, 0, false); 4460 4461 /* 4462 * If there was a failure to cleanup a log tree, very likely due to an 4463 * IO failure on a writeback attempt of one or more of its extent 4464 * buffers, we could not do proper (and cheap) unaccounting of their 4465 * reserved space, so don't warn on bytes_reserved > 0 in that case. 4466 */ 4467 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 4468 !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 4469 if (WARN_ON(space_info->bytes_reserved > 0)) 4470 btrfs_dump_space_info(info, space_info, 0, false); 4471 } 4472 4473 WARN_ON(space_info->reclaim_size > 0); 4474 } 4475 4476 /* 4477 * Must be called only after stopping all workers, since we could have block 4478 * group caching kthreads running, and therefore they could race with us if we 4479 * freed the block groups before stopping them. 4480 */ 4481 int btrfs_free_block_groups(struct btrfs_fs_info *info) 4482 { 4483 struct btrfs_block_group *block_group; 4484 struct btrfs_space_info *space_info; 4485 struct btrfs_caching_control *caching_ctl; 4486 struct rb_node *n; 4487 4488 if (btrfs_is_zoned(info)) { 4489 if (info->active_meta_bg) { 4490 btrfs_put_block_group(info->active_meta_bg); 4491 info->active_meta_bg = NULL; 4492 } 4493 if (info->active_system_bg) { 4494 btrfs_put_block_group(info->active_system_bg); 4495 info->active_system_bg = NULL; 4496 } 4497 } 4498 4499 write_lock(&info->block_group_cache_lock); 4500 while (!list_empty(&info->caching_block_groups)) { 4501 caching_ctl = list_first_entry(&info->caching_block_groups, 4502 struct btrfs_caching_control, list); 4503 list_del(&caching_ctl->list); 4504 btrfs_put_caching_control(caching_ctl); 4505 } 4506 write_unlock(&info->block_group_cache_lock); 4507 4508 spin_lock(&info->unused_bgs_lock); 4509 while (!list_empty(&info->unused_bgs)) { 4510 block_group = list_first_entry(&info->unused_bgs, 4511 struct btrfs_block_group, 4512 bg_list); 4513 list_del_init(&block_group->bg_list); 4514 btrfs_put_block_group(block_group); 4515 } 4516 4517 while (!list_empty(&info->reclaim_bgs)) { 4518 block_group = list_first_entry(&info->reclaim_bgs, 4519 struct btrfs_block_group, 4520 bg_list); 4521 list_del_init(&block_group->bg_list); 4522 btrfs_put_block_group(block_group); 4523 } 4524 spin_unlock(&info->unused_bgs_lock); 4525 4526 spin_lock(&info->zone_active_bgs_lock); 4527 while (!list_empty(&info->zone_active_bgs)) { 4528 block_group = list_first_entry(&info->zone_active_bgs, 4529 struct btrfs_block_group, 4530 active_bg_list); 4531 list_del_init(&block_group->active_bg_list); 4532 btrfs_put_block_group(block_group); 4533 } 4534 spin_unlock(&info->zone_active_bgs_lock); 4535 4536 write_lock(&info->block_group_cache_lock); 4537 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 4538 block_group = rb_entry(n, struct btrfs_block_group, 4539 cache_node); 4540 rb_erase_cached(&block_group->cache_node, 4541 &info->block_group_cache_tree); 4542 RB_CLEAR_NODE(&block_group->cache_node); 4543 write_unlock(&info->block_group_cache_lock); 4544 4545 down_write(&block_group->space_info->groups_sem); 4546 list_del(&block_group->list); 4547 up_write(&block_group->space_info->groups_sem); 4548 4549 /* 4550 * We haven't cached this block group, which means we could 4551 * possibly have excluded extents on this block group. 4552 */ 4553 if (block_group->cached == BTRFS_CACHE_NO || 4554 block_group->cached == BTRFS_CACHE_ERROR) 4555 btrfs_free_excluded_extents(block_group); 4556 4557 btrfs_remove_free_space_cache(block_group); 4558 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 4559 ASSERT(list_empty(&block_group->dirty_list)); 4560 ASSERT(list_empty(&block_group->io_list)); 4561 ASSERT(list_empty(&block_group->bg_list)); 4562 ASSERT(refcount_read(&block_group->refs) == 1); 4563 ASSERT(block_group->swap_extents == 0); 4564 btrfs_put_block_group(block_group); 4565 4566 write_lock(&info->block_group_cache_lock); 4567 } 4568 write_unlock(&info->block_group_cache_lock); 4569 4570 btrfs_release_global_block_rsv(info); 4571 4572 while (!list_empty(&info->space_info)) { 4573 space_info = list_first_entry(&info->space_info, 4574 struct btrfs_space_info, list); 4575 4576 check_removing_space_info(space_info); 4577 list_del(&space_info->list); 4578 btrfs_sysfs_remove_space_info(space_info); 4579 } 4580 return 0; 4581 } 4582 4583 void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4584 { 4585 atomic_inc(&cache->frozen); 4586 } 4587 4588 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4589 { 4590 struct btrfs_fs_info *fs_info = block_group->fs_info; 4591 bool cleanup; 4592 4593 spin_lock(&block_group->lock); 4594 cleanup = (atomic_dec_and_test(&block_group->frozen) && 4595 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); 4596 spin_unlock(&block_group->lock); 4597 4598 if (cleanup) { 4599 struct btrfs_chunk_map *map; 4600 4601 map = btrfs_find_chunk_map(fs_info, block_group->start, 1); 4602 /* Logic error, can't happen. */ 4603 ASSERT(map); 4604 4605 btrfs_remove_chunk_map(fs_info, map); 4606 4607 /* Once for our lookup reference. */ 4608 btrfs_free_chunk_map(map); 4609 4610 /* 4611 * We may have left one free space entry and other possible 4612 * tasks trimming this block group have left 1 entry each one. 4613 * Free them if any. 4614 */ 4615 btrfs_remove_free_space_cache(block_group); 4616 } 4617 } 4618 4619 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4620 { 4621 bool ret = true; 4622 4623 spin_lock(&bg->lock); 4624 if (bg->ro) 4625 ret = false; 4626 else 4627 bg->swap_extents++; 4628 spin_unlock(&bg->lock); 4629 4630 return ret; 4631 } 4632 4633 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4634 { 4635 spin_lock(&bg->lock); 4636 ASSERT(!bg->ro); 4637 ASSERT(bg->swap_extents >= amount); 4638 bg->swap_extents -= amount; 4639 spin_unlock(&bg->lock); 4640 } 4641 4642 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) 4643 { 4644 if (size <= SZ_128K) 4645 return BTRFS_BG_SZ_SMALL; 4646 if (size <= SZ_8M) 4647 return BTRFS_BG_SZ_MEDIUM; 4648 return BTRFS_BG_SZ_LARGE; 4649 } 4650 4651 /* 4652 * Handle a block group allocating an extent in a size class 4653 * 4654 * @bg: The block group we allocated in. 4655 * @size_class: The size class of the allocation. 4656 * @force_wrong_size_class: Whether we are desperate enough to allow 4657 * mismatched size classes. 4658 * 4659 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the 4660 * case of a race that leads to the wrong size class without 4661 * force_wrong_size_class set. 4662 * 4663 * find_free_extent will skip block groups with a mismatched size class until 4664 * it really needs to avoid ENOSPC. In that case it will set 4665 * force_wrong_size_class. However, if a block group is newly allocated and 4666 * doesn't yet have a size class, then it is possible for two allocations of 4667 * different sizes to race and both try to use it. The loser is caught here and 4668 * has to retry. 4669 */ 4670 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 4671 enum btrfs_block_group_size_class size_class, 4672 bool force_wrong_size_class) 4673 { 4674 ASSERT(size_class != BTRFS_BG_SZ_NONE); 4675 4676 /* The new allocation is in the right size class, do nothing */ 4677 if (bg->size_class == size_class) 4678 return 0; 4679 /* 4680 * The new allocation is in a mismatched size class. 4681 * This means one of two things: 4682 * 4683 * 1. Two tasks in find_free_extent for different size_classes raced 4684 * and hit the same empty block_group. Make the loser try again. 4685 * 2. A call to find_free_extent got desperate enough to set 4686 * 'force_wrong_slab'. Don't change the size_class, but allow the 4687 * allocation. 4688 */ 4689 if (bg->size_class != BTRFS_BG_SZ_NONE) { 4690 if (force_wrong_size_class) 4691 return 0; 4692 return -EAGAIN; 4693 } 4694 /* 4695 * The happy new block group case: the new allocation is the first 4696 * one in the block_group so we set size_class. 4697 */ 4698 bg->size_class = size_class; 4699 4700 return 0; 4701 } 4702 4703 bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg) 4704 { 4705 if (btrfs_is_zoned(bg->fs_info)) 4706 return false; 4707 if (!btrfs_is_block_group_data_only(bg)) 4708 return false; 4709 return true; 4710 } 4711