1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/sizes.h> 4 #include <linux/list_sort.h> 5 #include "misc.h" 6 #include "ctree.h" 7 #include "block-group.h" 8 #include "space-info.h" 9 #include "disk-io.h" 10 #include "free-space-cache.h" 11 #include "free-space-tree.h" 12 #include "volumes.h" 13 #include "transaction.h" 14 #include "ref-verify.h" 15 #include "sysfs.h" 16 #include "tree-log.h" 17 #include "delalloc-space.h" 18 #include "discard.h" 19 #include "raid56.h" 20 #include "zoned.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 25 #ifdef CONFIG_BTRFS_DEBUG 26 int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) 27 { 28 struct btrfs_fs_info *fs_info = block_group->fs_info; 29 30 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && 31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 32 (btrfs_test_opt(fs_info, FRAGMENT_DATA) && 33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 34 } 35 #endif 36 37 /* 38 * Return target flags in extended format or 0 if restripe for this chunk_type 39 * is not in progress 40 * 41 * Should be called with balance_lock held 42 */ 43 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 44 { 45 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 46 u64 target = 0; 47 48 if (!bctl) 49 return 0; 50 51 if (flags & BTRFS_BLOCK_GROUP_DATA && 52 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 53 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 54 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 55 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 56 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 57 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 58 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 59 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 60 } 61 62 return target; 63 } 64 65 /* 66 * @flags: available profiles in extended format (see ctree.h) 67 * 68 * Return reduced profile in chunk format. If profile changing is in progress 69 * (either running or paused) picks the target profile (if it's already 70 * available), otherwise falls back to plain reducing. 71 */ 72 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 73 { 74 u64 num_devices = fs_info->fs_devices->rw_devices; 75 u64 target; 76 u64 raid_type; 77 u64 allowed = 0; 78 79 /* 80 * See if restripe for this chunk_type is in progress, if so try to 81 * reduce to the target profile 82 */ 83 spin_lock(&fs_info->balance_lock); 84 target = get_restripe_target(fs_info, flags); 85 if (target) { 86 spin_unlock(&fs_info->balance_lock); 87 return extended_to_chunk(target); 88 } 89 spin_unlock(&fs_info->balance_lock); 90 91 /* First, mask out the RAID levels which aren't possible */ 92 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 93 if (num_devices >= btrfs_raid_array[raid_type].devs_min) 94 allowed |= btrfs_raid_array[raid_type].bg_flag; 95 } 96 allowed &= flags; 97 98 /* Select the highest-redundancy RAID level. */ 99 if (allowed & BTRFS_BLOCK_GROUP_RAID1C4) 100 allowed = BTRFS_BLOCK_GROUP_RAID1C4; 101 else if (allowed & BTRFS_BLOCK_GROUP_RAID6) 102 allowed = BTRFS_BLOCK_GROUP_RAID6; 103 else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3) 104 allowed = BTRFS_BLOCK_GROUP_RAID1C3; 105 else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 106 allowed = BTRFS_BLOCK_GROUP_RAID5; 107 else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 108 allowed = BTRFS_BLOCK_GROUP_RAID10; 109 else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 110 allowed = BTRFS_BLOCK_GROUP_RAID1; 111 else if (allowed & BTRFS_BLOCK_GROUP_DUP) 112 allowed = BTRFS_BLOCK_GROUP_DUP; 113 else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 114 allowed = BTRFS_BLOCK_GROUP_RAID0; 115 116 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 117 118 return extended_to_chunk(flags | allowed); 119 } 120 121 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 122 { 123 unsigned seq; 124 u64 flags; 125 126 do { 127 flags = orig_flags; 128 seq = read_seqbegin(&fs_info->profiles_lock); 129 130 if (flags & BTRFS_BLOCK_GROUP_DATA) 131 flags |= fs_info->avail_data_alloc_bits; 132 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 133 flags |= fs_info->avail_system_alloc_bits; 134 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 135 flags |= fs_info->avail_metadata_alloc_bits; 136 } while (read_seqretry(&fs_info->profiles_lock, seq)); 137 138 return btrfs_reduce_alloc_profile(fs_info, flags); 139 } 140 141 void btrfs_get_block_group(struct btrfs_block_group *cache) 142 { 143 refcount_inc(&cache->refs); 144 } 145 146 void btrfs_put_block_group(struct btrfs_block_group *cache) 147 { 148 if (refcount_dec_and_test(&cache->refs)) { 149 WARN_ON(cache->pinned > 0); 150 /* 151 * If there was a failure to cleanup a log tree, very likely due 152 * to an IO failure on a writeback attempt of one or more of its 153 * extent buffers, we could not do proper (and cheap) unaccounting 154 * of their reserved space, so don't warn on reserved > 0 in that 155 * case. 156 */ 157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 159 WARN_ON(cache->reserved > 0); 160 161 /* 162 * A block_group shouldn't be on the discard_list anymore. 163 * Remove the block_group from the discard_list to prevent us 164 * from causing a panic due to NULL pointer dereference. 165 */ 166 if (WARN_ON(!list_empty(&cache->discard_list))) 167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 168 cache); 169 170 kfree(cache->free_space_ctl); 171 btrfs_free_chunk_map(cache->physical_map); 172 kfree(cache); 173 } 174 } 175 176 /* 177 * This adds the block group to the fs_info rb tree for the block group cache 178 */ 179 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 180 struct btrfs_block_group *block_group) 181 { 182 struct rb_node **p; 183 struct rb_node *parent = NULL; 184 struct btrfs_block_group *cache; 185 bool leftmost = true; 186 187 ASSERT(block_group->length != 0); 188 189 write_lock(&info->block_group_cache_lock); 190 p = &info->block_group_cache_tree.rb_root.rb_node; 191 192 while (*p) { 193 parent = *p; 194 cache = rb_entry(parent, struct btrfs_block_group, cache_node); 195 if (block_group->start < cache->start) { 196 p = &(*p)->rb_left; 197 } else if (block_group->start > cache->start) { 198 p = &(*p)->rb_right; 199 leftmost = false; 200 } else { 201 write_unlock(&info->block_group_cache_lock); 202 return -EEXIST; 203 } 204 } 205 206 rb_link_node(&block_group->cache_node, parent, p); 207 rb_insert_color_cached(&block_group->cache_node, 208 &info->block_group_cache_tree, leftmost); 209 210 write_unlock(&info->block_group_cache_lock); 211 212 return 0; 213 } 214 215 /* 216 * This will return the block group at or after bytenr if contains is 0, else 217 * it will return the block group that contains the bytenr 218 */ 219 static struct btrfs_block_group *block_group_cache_tree_search( 220 struct btrfs_fs_info *info, u64 bytenr, int contains) 221 { 222 struct btrfs_block_group *cache, *ret = NULL; 223 struct rb_node *n; 224 u64 end, start; 225 226 read_lock(&info->block_group_cache_lock); 227 n = info->block_group_cache_tree.rb_root.rb_node; 228 229 while (n) { 230 cache = rb_entry(n, struct btrfs_block_group, cache_node); 231 end = cache->start + cache->length - 1; 232 start = cache->start; 233 234 if (bytenr < start) { 235 if (!contains && (!ret || start < ret->start)) 236 ret = cache; 237 n = n->rb_left; 238 } else if (bytenr > start) { 239 if (contains && bytenr <= end) { 240 ret = cache; 241 break; 242 } 243 n = n->rb_right; 244 } else { 245 ret = cache; 246 break; 247 } 248 } 249 if (ret) 250 btrfs_get_block_group(ret); 251 read_unlock(&info->block_group_cache_lock); 252 253 return ret; 254 } 255 256 /* 257 * Return the block group that starts at or after bytenr 258 */ 259 struct btrfs_block_group *btrfs_lookup_first_block_group( 260 struct btrfs_fs_info *info, u64 bytenr) 261 { 262 return block_group_cache_tree_search(info, bytenr, 0); 263 } 264 265 /* 266 * Return the block group that contains the given bytenr 267 */ 268 struct btrfs_block_group *btrfs_lookup_block_group( 269 struct btrfs_fs_info *info, u64 bytenr) 270 { 271 return block_group_cache_tree_search(info, bytenr, 1); 272 } 273 274 struct btrfs_block_group *btrfs_next_block_group( 275 struct btrfs_block_group *cache) 276 { 277 struct btrfs_fs_info *fs_info = cache->fs_info; 278 struct rb_node *node; 279 280 read_lock(&fs_info->block_group_cache_lock); 281 282 /* If our block group was removed, we need a full search. */ 283 if (RB_EMPTY_NODE(&cache->cache_node)) { 284 const u64 next_bytenr = cache->start + cache->length; 285 286 read_unlock(&fs_info->block_group_cache_lock); 287 btrfs_put_block_group(cache); 288 return btrfs_lookup_first_block_group(fs_info, next_bytenr); 289 } 290 node = rb_next(&cache->cache_node); 291 btrfs_put_block_group(cache); 292 if (node) { 293 cache = rb_entry(node, struct btrfs_block_group, cache_node); 294 btrfs_get_block_group(cache); 295 } else 296 cache = NULL; 297 read_unlock(&fs_info->block_group_cache_lock); 298 return cache; 299 } 300 301 /* 302 * Check if we can do a NOCOW write for a given extent. 303 * 304 * @fs_info: The filesystem information object. 305 * @bytenr: Logical start address of the extent. 306 * 307 * Check if we can do a NOCOW write for the given extent, and increments the 308 * number of NOCOW writers in the block group that contains the extent, as long 309 * as the block group exists and it's currently not in read-only mode. 310 * 311 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 312 * is responsible for calling btrfs_dec_nocow_writers() later. 313 * 314 * Or NULL if we can not do a NOCOW write 315 */ 316 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 317 u64 bytenr) 318 { 319 struct btrfs_block_group *bg; 320 bool can_nocow = true; 321 322 bg = btrfs_lookup_block_group(fs_info, bytenr); 323 if (!bg) 324 return NULL; 325 326 spin_lock(&bg->lock); 327 if (bg->ro) 328 can_nocow = false; 329 else 330 atomic_inc(&bg->nocow_writers); 331 spin_unlock(&bg->lock); 332 333 if (!can_nocow) { 334 btrfs_put_block_group(bg); 335 return NULL; 336 } 337 338 /* No put on block group, done by btrfs_dec_nocow_writers(). */ 339 return bg; 340 } 341 342 /* 343 * Decrement the number of NOCOW writers in a block group. 344 * 345 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 346 * and on the block group returned by that call. Typically this is called after 347 * creating an ordered extent for a NOCOW write, to prevent races with scrub and 348 * relocation. 349 * 350 * After this call, the caller should not use the block group anymore. It it wants 351 * to use it, then it should get a reference on it before calling this function. 352 */ 353 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 354 { 355 if (atomic_dec_and_test(&bg->nocow_writers)) 356 wake_up_var(&bg->nocow_writers); 357 358 /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 359 btrfs_put_block_group(bg); 360 } 361 362 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 363 { 364 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 365 } 366 367 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 368 const u64 start) 369 { 370 struct btrfs_block_group *bg; 371 372 bg = btrfs_lookup_block_group(fs_info, start); 373 ASSERT(bg); 374 if (atomic_dec_and_test(&bg->reservations)) 375 wake_up_var(&bg->reservations); 376 btrfs_put_block_group(bg); 377 } 378 379 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 380 { 381 struct btrfs_space_info *space_info = bg->space_info; 382 383 ASSERT(bg->ro); 384 385 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 386 return; 387 388 /* 389 * Our block group is read only but before we set it to read only, 390 * some task might have had allocated an extent from it already, but it 391 * has not yet created a respective ordered extent (and added it to a 392 * root's list of ordered extents). 393 * Therefore wait for any task currently allocating extents, since the 394 * block group's reservations counter is incremented while a read lock 395 * on the groups' semaphore is held and decremented after releasing 396 * the read access on that semaphore and creating the ordered extent. 397 */ 398 down_write(&space_info->groups_sem); 399 up_write(&space_info->groups_sem); 400 401 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 402 } 403 404 struct btrfs_caching_control *btrfs_get_caching_control( 405 struct btrfs_block_group *cache) 406 { 407 struct btrfs_caching_control *ctl; 408 409 spin_lock(&cache->lock); 410 if (!cache->caching_ctl) { 411 spin_unlock(&cache->lock); 412 return NULL; 413 } 414 415 ctl = cache->caching_ctl; 416 refcount_inc(&ctl->count); 417 spin_unlock(&cache->lock); 418 return ctl; 419 } 420 421 void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 422 { 423 if (refcount_dec_and_test(&ctl->count)) 424 kfree(ctl); 425 } 426 427 /* 428 * When we wait for progress in the block group caching, its because our 429 * allocation attempt failed at least once. So, we must sleep and let some 430 * progress happen before we try again. 431 * 432 * This function will sleep at least once waiting for new free space to show 433 * up, and then it will check the block group free space numbers for our min 434 * num_bytes. Another option is to have it go ahead and look in the rbtree for 435 * a free extent of a given size, but this is a good start. 436 * 437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 438 * any of the information in this block group. 439 */ 440 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 441 u64 num_bytes) 442 { 443 struct btrfs_caching_control *caching_ctl; 444 int progress; 445 446 caching_ctl = btrfs_get_caching_control(cache); 447 if (!caching_ctl) 448 return; 449 450 /* 451 * We've already failed to allocate from this block group, so even if 452 * there's enough space in the block group it isn't contiguous enough to 453 * allow for an allocation, so wait for at least the next wakeup tick, 454 * or for the thing to be done. 455 */ 456 progress = atomic_read(&caching_ctl->progress); 457 458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 459 (progress != atomic_read(&caching_ctl->progress) && 460 (cache->free_space_ctl->free_space >= num_bytes))); 461 462 btrfs_put_caching_control(caching_ctl); 463 } 464 465 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 466 struct btrfs_caching_control *caching_ctl) 467 { 468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 470 } 471 472 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 473 { 474 struct btrfs_caching_control *caching_ctl; 475 int ret; 476 477 caching_ctl = btrfs_get_caching_control(cache); 478 if (!caching_ctl) 479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 480 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 481 btrfs_put_caching_control(caching_ctl); 482 return ret; 483 } 484 485 #ifdef CONFIG_BTRFS_DEBUG 486 static void fragment_free_space(struct btrfs_block_group *block_group) 487 { 488 struct btrfs_fs_info *fs_info = block_group->fs_info; 489 u64 start = block_group->start; 490 u64 len = block_group->length; 491 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 492 fs_info->nodesize : fs_info->sectorsize; 493 u64 step = chunk << 1; 494 495 while (len > chunk) { 496 btrfs_remove_free_space(block_group, start, chunk); 497 start += step; 498 if (len < step) 499 len = 0; 500 else 501 len -= step; 502 } 503 } 504 #endif 505 506 /* 507 * Add a free space range to the in memory free space cache of a block group. 508 * This checks if the range contains super block locations and any such 509 * locations are not added to the free space cache. 510 * 511 * @block_group: The target block group. 512 * @start: Start offset of the range. 513 * @end: End offset of the range (exclusive). 514 * @total_added_ret: Optional pointer to return the total amount of space 515 * added to the block group's free space cache. 516 * 517 * Returns 0 on success or < 0 on error. 518 */ 519 int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, 520 u64 end, u64 *total_added_ret) 521 { 522 struct btrfs_fs_info *info = block_group->fs_info; 523 u64 extent_start, extent_end, size; 524 int ret; 525 526 if (total_added_ret) 527 *total_added_ret = 0; 528 529 while (start < end) { 530 if (!find_first_extent_bit(&info->excluded_extents, start, 531 &extent_start, &extent_end, 532 EXTENT_DIRTY | EXTENT_UPTODATE, 533 NULL)) 534 break; 535 536 if (extent_start <= start) { 537 start = extent_end + 1; 538 } else if (extent_start > start && extent_start < end) { 539 size = extent_start - start; 540 ret = btrfs_add_free_space_async_trimmed(block_group, 541 start, size); 542 if (ret) 543 return ret; 544 if (total_added_ret) 545 *total_added_ret += size; 546 start = extent_end + 1; 547 } else { 548 break; 549 } 550 } 551 552 if (start < end) { 553 size = end - start; 554 ret = btrfs_add_free_space_async_trimmed(block_group, start, 555 size); 556 if (ret) 557 return ret; 558 if (total_added_ret) 559 *total_added_ret += size; 560 } 561 562 return 0; 563 } 564 565 /* 566 * Get an arbitrary extent item index / max_index through the block group 567 * 568 * @block_group the block group to sample from 569 * @index: the integral step through the block group to grab from 570 * @max_index: the granularity of the sampling 571 * @key: return value parameter for the item we find 572 * 573 * Pre-conditions on indices: 574 * 0 <= index <= max_index 575 * 0 < max_index 576 * 577 * Returns: 0 on success, 1 if the search didn't yield a useful item, negative 578 * error code on error. 579 */ 580 static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, 581 struct btrfs_block_group *block_group, 582 int index, int max_index, 583 struct btrfs_key *found_key) 584 { 585 struct btrfs_fs_info *fs_info = block_group->fs_info; 586 struct btrfs_root *extent_root; 587 u64 search_offset; 588 u64 search_end = block_group->start + block_group->length; 589 struct btrfs_path *path; 590 struct btrfs_key search_key; 591 int ret = 0; 592 593 ASSERT(index >= 0); 594 ASSERT(index <= max_index); 595 ASSERT(max_index > 0); 596 lockdep_assert_held(&caching_ctl->mutex); 597 lockdep_assert_held_read(&fs_info->commit_root_sem); 598 599 path = btrfs_alloc_path(); 600 if (!path) 601 return -ENOMEM; 602 603 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, 604 BTRFS_SUPER_INFO_OFFSET)); 605 606 path->skip_locking = 1; 607 path->search_commit_root = 1; 608 path->reada = READA_FORWARD; 609 610 search_offset = index * div_u64(block_group->length, max_index); 611 search_key.objectid = block_group->start + search_offset; 612 search_key.type = BTRFS_EXTENT_ITEM_KEY; 613 search_key.offset = 0; 614 615 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { 616 /* Success; sampled an extent item in the block group */ 617 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && 618 found_key->objectid >= block_group->start && 619 found_key->objectid + found_key->offset <= search_end) 620 break; 621 622 /* We can't possibly find a valid extent item anymore */ 623 if (found_key->objectid >= search_end) { 624 ret = 1; 625 break; 626 } 627 } 628 629 lockdep_assert_held(&caching_ctl->mutex); 630 lockdep_assert_held_read(&fs_info->commit_root_sem); 631 btrfs_free_path(path); 632 return ret; 633 } 634 635 /* 636 * Best effort attempt to compute a block group's size class while caching it. 637 * 638 * @block_group: the block group we are caching 639 * 640 * We cannot infer the size class while adding free space extents, because that 641 * logic doesn't care about contiguous file extents (it doesn't differentiate 642 * between a 100M extent and 100 contiguous 1M extents). So we need to read the 643 * file extent items. Reading all of them is quite wasteful, because usually 644 * only a handful are enough to give a good answer. Therefore, we just grab 5 of 645 * them at even steps through the block group and pick the smallest size class 646 * we see. Since size class is best effort, and not guaranteed in general, 647 * inaccuracy is acceptable. 648 * 649 * To be more explicit about why this algorithm makes sense: 650 * 651 * If we are caching in a block group from disk, then there are three major cases 652 * to consider: 653 * 1. the block group is well behaved and all extents in it are the same size 654 * class. 655 * 2. the block group is mostly one size class with rare exceptions for last 656 * ditch allocations 657 * 3. the block group was populated before size classes and can have a totally 658 * arbitrary mix of size classes. 659 * 660 * In case 1, looking at any extent in the block group will yield the correct 661 * result. For the mixed cases, taking the minimum size class seems like a good 662 * approximation, since gaps from frees will be usable to the size class. For 663 * 2., a small handful of file extents is likely to yield the right answer. For 664 * 3, we can either read every file extent, or admit that this is best effort 665 * anyway and try to stay fast. 666 * 667 * Returns: 0 on success, negative error code on error. 668 */ 669 static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl, 670 struct btrfs_block_group *block_group) 671 { 672 struct btrfs_fs_info *fs_info = block_group->fs_info; 673 struct btrfs_key key; 674 int i; 675 u64 min_size = block_group->length; 676 enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; 677 int ret; 678 679 if (!btrfs_block_group_should_use_size_class(block_group)) 680 return 0; 681 682 lockdep_assert_held(&caching_ctl->mutex); 683 lockdep_assert_held_read(&fs_info->commit_root_sem); 684 for (i = 0; i < 5; ++i) { 685 ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); 686 if (ret < 0) 687 goto out; 688 if (ret > 0) 689 continue; 690 min_size = min_t(u64, min_size, key.offset); 691 size_class = btrfs_calc_block_group_size_class(min_size); 692 } 693 if (size_class != BTRFS_BG_SZ_NONE) { 694 spin_lock(&block_group->lock); 695 block_group->size_class = size_class; 696 spin_unlock(&block_group->lock); 697 } 698 out: 699 return ret; 700 } 701 702 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 703 { 704 struct btrfs_block_group *block_group = caching_ctl->block_group; 705 struct btrfs_fs_info *fs_info = block_group->fs_info; 706 struct btrfs_root *extent_root; 707 struct btrfs_path *path; 708 struct extent_buffer *leaf; 709 struct btrfs_key key; 710 u64 total_found = 0; 711 u64 last = 0; 712 u32 nritems; 713 int ret; 714 bool wakeup = true; 715 716 path = btrfs_alloc_path(); 717 if (!path) 718 return -ENOMEM; 719 720 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 721 extent_root = btrfs_extent_root(fs_info, last); 722 723 #ifdef CONFIG_BTRFS_DEBUG 724 /* 725 * If we're fragmenting we don't want to make anybody think we can 726 * allocate from this block group until we've had a chance to fragment 727 * the free space. 728 */ 729 if (btrfs_should_fragment_free_space(block_group)) 730 wakeup = false; 731 #endif 732 /* 733 * We don't want to deadlock with somebody trying to allocate a new 734 * extent for the extent root while also trying to search the extent 735 * root to add free space. So we skip locking and search the commit 736 * root, since its read-only 737 */ 738 path->skip_locking = 1; 739 path->search_commit_root = 1; 740 path->reada = READA_FORWARD; 741 742 key.objectid = last; 743 key.offset = 0; 744 key.type = BTRFS_EXTENT_ITEM_KEY; 745 746 next: 747 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 748 if (ret < 0) 749 goto out; 750 751 leaf = path->nodes[0]; 752 nritems = btrfs_header_nritems(leaf); 753 754 while (1) { 755 if (btrfs_fs_closing(fs_info) > 1) { 756 last = (u64)-1; 757 break; 758 } 759 760 if (path->slots[0] < nritems) { 761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 762 } else { 763 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 764 if (ret) 765 break; 766 767 if (need_resched() || 768 rwsem_is_contended(&fs_info->commit_root_sem)) { 769 btrfs_release_path(path); 770 up_read(&fs_info->commit_root_sem); 771 mutex_unlock(&caching_ctl->mutex); 772 cond_resched(); 773 mutex_lock(&caching_ctl->mutex); 774 down_read(&fs_info->commit_root_sem); 775 goto next; 776 } 777 778 ret = btrfs_next_leaf(extent_root, path); 779 if (ret < 0) 780 goto out; 781 if (ret) 782 break; 783 leaf = path->nodes[0]; 784 nritems = btrfs_header_nritems(leaf); 785 continue; 786 } 787 788 if (key.objectid < last) { 789 key.objectid = last; 790 key.offset = 0; 791 key.type = BTRFS_EXTENT_ITEM_KEY; 792 btrfs_release_path(path); 793 goto next; 794 } 795 796 if (key.objectid < block_group->start) { 797 path->slots[0]++; 798 continue; 799 } 800 801 if (key.objectid >= block_group->start + block_group->length) 802 break; 803 804 if (key.type == BTRFS_EXTENT_ITEM_KEY || 805 key.type == BTRFS_METADATA_ITEM_KEY) { 806 u64 space_added; 807 808 ret = btrfs_add_new_free_space(block_group, last, 809 key.objectid, &space_added); 810 if (ret) 811 goto out; 812 total_found += space_added; 813 if (key.type == BTRFS_METADATA_ITEM_KEY) 814 last = key.objectid + 815 fs_info->nodesize; 816 else 817 last = key.objectid + key.offset; 818 819 if (total_found > CACHING_CTL_WAKE_UP) { 820 total_found = 0; 821 if (wakeup) { 822 atomic_inc(&caching_ctl->progress); 823 wake_up(&caching_ctl->wait); 824 } 825 } 826 } 827 path->slots[0]++; 828 } 829 830 ret = btrfs_add_new_free_space(block_group, last, 831 block_group->start + block_group->length, 832 NULL); 833 out: 834 btrfs_free_path(path); 835 return ret; 836 } 837 838 static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) 839 { 840 clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, 841 bg->start + bg->length - 1, EXTENT_UPTODATE); 842 } 843 844 static noinline void caching_thread(struct btrfs_work *work) 845 { 846 struct btrfs_block_group *block_group; 847 struct btrfs_fs_info *fs_info; 848 struct btrfs_caching_control *caching_ctl; 849 int ret; 850 851 caching_ctl = container_of(work, struct btrfs_caching_control, work); 852 block_group = caching_ctl->block_group; 853 fs_info = block_group->fs_info; 854 855 mutex_lock(&caching_ctl->mutex); 856 down_read(&fs_info->commit_root_sem); 857 858 load_block_group_size_class(caching_ctl, block_group); 859 if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 860 ret = load_free_space_cache(block_group); 861 if (ret == 1) { 862 ret = 0; 863 goto done; 864 } 865 866 /* 867 * We failed to load the space cache, set ourselves to 868 * CACHE_STARTED and carry on. 869 */ 870 spin_lock(&block_group->lock); 871 block_group->cached = BTRFS_CACHE_STARTED; 872 spin_unlock(&block_group->lock); 873 wake_up(&caching_ctl->wait); 874 } 875 876 /* 877 * If we are in the transaction that populated the free space tree we 878 * can't actually cache from the free space tree as our commit root and 879 * real root are the same, so we could change the contents of the blocks 880 * while caching. Instead do the slow caching in this case, and after 881 * the transaction has committed we will be safe. 882 */ 883 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 884 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 885 ret = load_free_space_tree(caching_ctl); 886 else 887 ret = load_extent_tree_free(caching_ctl); 888 done: 889 spin_lock(&block_group->lock); 890 block_group->caching_ctl = NULL; 891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 892 spin_unlock(&block_group->lock); 893 894 #ifdef CONFIG_BTRFS_DEBUG 895 if (btrfs_should_fragment_free_space(block_group)) { 896 u64 bytes_used; 897 898 spin_lock(&block_group->space_info->lock); 899 spin_lock(&block_group->lock); 900 bytes_used = block_group->length - block_group->used; 901 block_group->space_info->bytes_used += bytes_used >> 1; 902 spin_unlock(&block_group->lock); 903 spin_unlock(&block_group->space_info->lock); 904 fragment_free_space(block_group); 905 } 906 #endif 907 908 up_read(&fs_info->commit_root_sem); 909 btrfs_free_excluded_extents(block_group); 910 mutex_unlock(&caching_ctl->mutex); 911 912 wake_up(&caching_ctl->wait); 913 914 btrfs_put_caching_control(caching_ctl); 915 btrfs_put_block_group(block_group); 916 } 917 918 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 919 { 920 struct btrfs_fs_info *fs_info = cache->fs_info; 921 struct btrfs_caching_control *caching_ctl = NULL; 922 int ret = 0; 923 924 /* Allocator for zoned filesystems does not use the cache at all */ 925 if (btrfs_is_zoned(fs_info)) 926 return 0; 927 928 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 929 if (!caching_ctl) 930 return -ENOMEM; 931 932 INIT_LIST_HEAD(&caching_ctl->list); 933 mutex_init(&caching_ctl->mutex); 934 init_waitqueue_head(&caching_ctl->wait); 935 caching_ctl->block_group = cache; 936 refcount_set(&caching_ctl->count, 2); 937 atomic_set(&caching_ctl->progress, 0); 938 btrfs_init_work(&caching_ctl->work, caching_thread, NULL); 939 940 spin_lock(&cache->lock); 941 if (cache->cached != BTRFS_CACHE_NO) { 942 kfree(caching_ctl); 943 944 caching_ctl = cache->caching_ctl; 945 if (caching_ctl) 946 refcount_inc(&caching_ctl->count); 947 spin_unlock(&cache->lock); 948 goto out; 949 } 950 WARN_ON(cache->caching_ctl); 951 cache->caching_ctl = caching_ctl; 952 cache->cached = BTRFS_CACHE_STARTED; 953 spin_unlock(&cache->lock); 954 955 write_lock(&fs_info->block_group_cache_lock); 956 refcount_inc(&caching_ctl->count); 957 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 958 write_unlock(&fs_info->block_group_cache_lock); 959 960 btrfs_get_block_group(cache); 961 962 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 963 out: 964 if (wait && caching_ctl) 965 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 966 if (caching_ctl) 967 btrfs_put_caching_control(caching_ctl); 968 969 return ret; 970 } 971 972 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 973 { 974 u64 extra_flags = chunk_to_extended(flags) & 975 BTRFS_EXTENDED_PROFILE_MASK; 976 977 write_seqlock(&fs_info->profiles_lock); 978 if (flags & BTRFS_BLOCK_GROUP_DATA) 979 fs_info->avail_data_alloc_bits &= ~extra_flags; 980 if (flags & BTRFS_BLOCK_GROUP_METADATA) 981 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 982 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 983 fs_info->avail_system_alloc_bits &= ~extra_flags; 984 write_sequnlock(&fs_info->profiles_lock); 985 } 986 987 /* 988 * Clear incompat bits for the following feature(s): 989 * 990 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 991 * in the whole filesystem 992 * 993 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 994 */ 995 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 996 { 997 bool found_raid56 = false; 998 bool found_raid1c34 = false; 999 1000 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 1001 (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 1002 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 1003 struct list_head *head = &fs_info->space_info; 1004 struct btrfs_space_info *sinfo; 1005 1006 list_for_each_entry_rcu(sinfo, head, list) { 1007 down_read(&sinfo->groups_sem); 1008 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 1009 found_raid56 = true; 1010 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 1011 found_raid56 = true; 1012 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 1013 found_raid1c34 = true; 1014 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 1015 found_raid1c34 = true; 1016 up_read(&sinfo->groups_sem); 1017 } 1018 if (!found_raid56) 1019 btrfs_clear_fs_incompat(fs_info, RAID56); 1020 if (!found_raid1c34) 1021 btrfs_clear_fs_incompat(fs_info, RAID1C34); 1022 } 1023 } 1024 1025 static int remove_block_group_item(struct btrfs_trans_handle *trans, 1026 struct btrfs_path *path, 1027 struct btrfs_block_group *block_group) 1028 { 1029 struct btrfs_fs_info *fs_info = trans->fs_info; 1030 struct btrfs_root *root; 1031 struct btrfs_key key; 1032 int ret; 1033 1034 root = btrfs_block_group_root(fs_info); 1035 key.objectid = block_group->start; 1036 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1037 key.offset = block_group->length; 1038 1039 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1040 if (ret > 0) 1041 ret = -ENOENT; 1042 if (ret < 0) 1043 return ret; 1044 1045 ret = btrfs_del_item(trans, root, path); 1046 return ret; 1047 } 1048 1049 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 1050 struct btrfs_chunk_map *map) 1051 { 1052 struct btrfs_fs_info *fs_info = trans->fs_info; 1053 struct btrfs_path *path; 1054 struct btrfs_block_group *block_group; 1055 struct btrfs_free_cluster *cluster; 1056 struct inode *inode; 1057 struct kobject *kobj = NULL; 1058 int ret; 1059 int index; 1060 int factor; 1061 struct btrfs_caching_control *caching_ctl = NULL; 1062 bool remove_map; 1063 bool remove_rsv = false; 1064 1065 block_group = btrfs_lookup_block_group(fs_info, map->start); 1066 BUG_ON(!block_group); 1067 BUG_ON(!block_group->ro); 1068 1069 trace_btrfs_remove_block_group(block_group); 1070 /* 1071 * Free the reserved super bytes from this block group before 1072 * remove it. 1073 */ 1074 btrfs_free_excluded_extents(block_group); 1075 btrfs_free_ref_tree_range(fs_info, block_group->start, 1076 block_group->length); 1077 1078 index = btrfs_bg_flags_to_raid_index(block_group->flags); 1079 factor = btrfs_bg_type_to_factor(block_group->flags); 1080 1081 /* make sure this block group isn't part of an allocation cluster */ 1082 cluster = &fs_info->data_alloc_cluster; 1083 spin_lock(&cluster->refill_lock); 1084 btrfs_return_cluster_to_free_space(block_group, cluster); 1085 spin_unlock(&cluster->refill_lock); 1086 1087 /* 1088 * make sure this block group isn't part of a metadata 1089 * allocation cluster 1090 */ 1091 cluster = &fs_info->meta_alloc_cluster; 1092 spin_lock(&cluster->refill_lock); 1093 btrfs_return_cluster_to_free_space(block_group, cluster); 1094 spin_unlock(&cluster->refill_lock); 1095 1096 btrfs_clear_treelog_bg(block_group); 1097 btrfs_clear_data_reloc_bg(block_group); 1098 1099 path = btrfs_alloc_path(); 1100 if (!path) { 1101 ret = -ENOMEM; 1102 goto out; 1103 } 1104 1105 /* 1106 * get the inode first so any iput calls done for the io_list 1107 * aren't the final iput (no unlinks allowed now) 1108 */ 1109 inode = lookup_free_space_inode(block_group, path); 1110 1111 mutex_lock(&trans->transaction->cache_write_mutex); 1112 /* 1113 * Make sure our free space cache IO is done before removing the 1114 * free space inode 1115 */ 1116 spin_lock(&trans->transaction->dirty_bgs_lock); 1117 if (!list_empty(&block_group->io_list)) { 1118 list_del_init(&block_group->io_list); 1119 1120 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 1121 1122 spin_unlock(&trans->transaction->dirty_bgs_lock); 1123 btrfs_wait_cache_io(trans, block_group, path); 1124 btrfs_put_block_group(block_group); 1125 spin_lock(&trans->transaction->dirty_bgs_lock); 1126 } 1127 1128 if (!list_empty(&block_group->dirty_list)) { 1129 list_del_init(&block_group->dirty_list); 1130 remove_rsv = true; 1131 btrfs_put_block_group(block_group); 1132 } 1133 spin_unlock(&trans->transaction->dirty_bgs_lock); 1134 mutex_unlock(&trans->transaction->cache_write_mutex); 1135 1136 ret = btrfs_remove_free_space_inode(trans, inode, block_group); 1137 if (ret) 1138 goto out; 1139 1140 write_lock(&fs_info->block_group_cache_lock); 1141 rb_erase_cached(&block_group->cache_node, 1142 &fs_info->block_group_cache_tree); 1143 RB_CLEAR_NODE(&block_group->cache_node); 1144 1145 /* Once for the block groups rbtree */ 1146 btrfs_put_block_group(block_group); 1147 1148 write_unlock(&fs_info->block_group_cache_lock); 1149 1150 down_write(&block_group->space_info->groups_sem); 1151 /* 1152 * we must use list_del_init so people can check to see if they 1153 * are still on the list after taking the semaphore 1154 */ 1155 list_del_init(&block_group->list); 1156 if (list_empty(&block_group->space_info->block_groups[index])) { 1157 kobj = block_group->space_info->block_group_kobjs[index]; 1158 block_group->space_info->block_group_kobjs[index] = NULL; 1159 clear_avail_alloc_bits(fs_info, block_group->flags); 1160 } 1161 up_write(&block_group->space_info->groups_sem); 1162 clear_incompat_bg_bits(fs_info, block_group->flags); 1163 if (kobj) { 1164 kobject_del(kobj); 1165 kobject_put(kobj); 1166 } 1167 1168 if (block_group->cached == BTRFS_CACHE_STARTED) 1169 btrfs_wait_block_group_cache_done(block_group); 1170 1171 write_lock(&fs_info->block_group_cache_lock); 1172 caching_ctl = btrfs_get_caching_control(block_group); 1173 if (!caching_ctl) { 1174 struct btrfs_caching_control *ctl; 1175 1176 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { 1177 if (ctl->block_group == block_group) { 1178 caching_ctl = ctl; 1179 refcount_inc(&caching_ctl->count); 1180 break; 1181 } 1182 } 1183 } 1184 if (caching_ctl) 1185 list_del_init(&caching_ctl->list); 1186 write_unlock(&fs_info->block_group_cache_lock); 1187 1188 if (caching_ctl) { 1189 /* Once for the caching bgs list and once for us. */ 1190 btrfs_put_caching_control(caching_ctl); 1191 btrfs_put_caching_control(caching_ctl); 1192 } 1193 1194 spin_lock(&trans->transaction->dirty_bgs_lock); 1195 WARN_ON(!list_empty(&block_group->dirty_list)); 1196 WARN_ON(!list_empty(&block_group->io_list)); 1197 spin_unlock(&trans->transaction->dirty_bgs_lock); 1198 1199 btrfs_remove_free_space_cache(block_group); 1200 1201 spin_lock(&block_group->space_info->lock); 1202 list_del_init(&block_group->ro_list); 1203 1204 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1205 WARN_ON(block_group->space_info->total_bytes 1206 < block_group->length); 1207 WARN_ON(block_group->space_info->bytes_readonly 1208 < block_group->length - block_group->zone_unusable); 1209 WARN_ON(block_group->space_info->bytes_zone_unusable 1210 < block_group->zone_unusable); 1211 WARN_ON(block_group->space_info->disk_total 1212 < block_group->length * factor); 1213 } 1214 block_group->space_info->total_bytes -= block_group->length; 1215 block_group->space_info->bytes_readonly -= 1216 (block_group->length - block_group->zone_unusable); 1217 block_group->space_info->bytes_zone_unusable -= 1218 block_group->zone_unusable; 1219 block_group->space_info->disk_total -= block_group->length * factor; 1220 1221 spin_unlock(&block_group->space_info->lock); 1222 1223 /* 1224 * Remove the free space for the block group from the free space tree 1225 * and the block group's item from the extent tree before marking the 1226 * block group as removed. This is to prevent races with tasks that 1227 * freeze and unfreeze a block group, this task and another task 1228 * allocating a new block group - the unfreeze task ends up removing 1229 * the block group's extent map before the task calling this function 1230 * deletes the block group item from the extent tree, allowing for 1231 * another task to attempt to create another block group with the same 1232 * item key (and failing with -EEXIST and a transaction abort). 1233 */ 1234 ret = remove_block_group_free_space(trans, block_group); 1235 if (ret) 1236 goto out; 1237 1238 ret = remove_block_group_item(trans, path, block_group); 1239 if (ret < 0) 1240 goto out; 1241 1242 spin_lock(&block_group->lock); 1243 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); 1244 1245 /* 1246 * At this point trimming or scrub can't start on this block group, 1247 * because we removed the block group from the rbtree 1248 * fs_info->block_group_cache_tree so no one can't find it anymore and 1249 * even if someone already got this block group before we removed it 1250 * from the rbtree, they have already incremented block_group->frozen - 1251 * if they didn't, for the trimming case they won't find any free space 1252 * entries because we already removed them all when we called 1253 * btrfs_remove_free_space_cache(). 1254 * 1255 * And we must not remove the chunk map from the fs_info->mapping_tree 1256 * to prevent the same logical address range and physical device space 1257 * ranges from being reused for a new block group. This is needed to 1258 * avoid races with trimming and scrub. 1259 * 1260 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1261 * completely transactionless, so while it is trimming a range the 1262 * currently running transaction might finish and a new one start, 1263 * allowing for new block groups to be created that can reuse the same 1264 * physical device locations unless we take this special care. 1265 * 1266 * There may also be an implicit trim operation if the file system 1267 * is mounted with -odiscard. The same protections must remain 1268 * in place until the extents have been discarded completely when 1269 * the transaction commit has completed. 1270 */ 1271 remove_map = (atomic_read(&block_group->frozen) == 0); 1272 spin_unlock(&block_group->lock); 1273 1274 if (remove_map) 1275 btrfs_remove_chunk_map(fs_info, map); 1276 1277 out: 1278 /* Once for the lookup reference */ 1279 btrfs_put_block_group(block_group); 1280 if (remove_rsv) 1281 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 1282 btrfs_free_path(path); 1283 return ret; 1284 } 1285 1286 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1287 struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1288 { 1289 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1290 struct btrfs_chunk_map *map; 1291 unsigned int num_items; 1292 1293 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 1294 ASSERT(map != NULL); 1295 ASSERT(map->start == chunk_offset); 1296 1297 /* 1298 * We need to reserve 3 + N units from the metadata space info in order 1299 * to remove a block group (done at btrfs_remove_chunk() and at 1300 * btrfs_remove_block_group()), which are used for: 1301 * 1302 * 1 unit for adding the free space inode's orphan (located in the tree 1303 * of tree roots). 1304 * 1 unit for deleting the block group item (located in the extent 1305 * tree). 1306 * 1 unit for deleting the free space item (located in tree of tree 1307 * roots). 1308 * N units for deleting N device extent items corresponding to each 1309 * stripe (located in the device tree). 1310 * 1311 * In order to remove a block group we also need to reserve units in the 1312 * system space info in order to update the chunk tree (update one or 1313 * more device items and remove one chunk item), but this is done at 1314 * btrfs_remove_chunk() through a call to check_system_chunk(). 1315 */ 1316 num_items = 3 + map->num_stripes; 1317 btrfs_free_chunk_map(map); 1318 1319 return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1320 } 1321 1322 /* 1323 * Mark block group @cache read-only, so later write won't happen to block 1324 * group @cache. 1325 * 1326 * If @force is not set, this function will only mark the block group readonly 1327 * if we have enough free space (1M) in other metadata/system block groups. 1328 * If @force is not set, this function will mark the block group readonly 1329 * without checking free space. 1330 * 1331 * NOTE: This function doesn't care if other block groups can contain all the 1332 * data in this block group. That check should be done by relocation routine, 1333 * not this function. 1334 */ 1335 static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 1336 { 1337 struct btrfs_space_info *sinfo = cache->space_info; 1338 u64 num_bytes; 1339 int ret = -ENOSPC; 1340 1341 spin_lock(&sinfo->lock); 1342 spin_lock(&cache->lock); 1343 1344 if (cache->swap_extents) { 1345 ret = -ETXTBSY; 1346 goto out; 1347 } 1348 1349 if (cache->ro) { 1350 cache->ro++; 1351 ret = 0; 1352 goto out; 1353 } 1354 1355 num_bytes = cache->length - cache->reserved - cache->pinned - 1356 cache->bytes_super - cache->zone_unusable - cache->used; 1357 1358 /* 1359 * Data never overcommits, even in mixed mode, so do just the straight 1360 * check of left over space in how much we have allocated. 1361 */ 1362 if (force) { 1363 ret = 0; 1364 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1365 u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1366 1367 /* 1368 * Here we make sure if we mark this bg RO, we still have enough 1369 * free space as buffer. 1370 */ 1371 if (sinfo_used + num_bytes <= sinfo->total_bytes) 1372 ret = 0; 1373 } else { 1374 /* 1375 * We overcommit metadata, so we need to do the 1376 * btrfs_can_overcommit check here, and we need to pass in 1377 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1378 * leeway to allow us to mark this block group as read only. 1379 */ 1380 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1381 BTRFS_RESERVE_NO_FLUSH)) 1382 ret = 0; 1383 } 1384 1385 if (!ret) { 1386 sinfo->bytes_readonly += num_bytes; 1387 if (btrfs_is_zoned(cache->fs_info)) { 1388 /* Migrate zone_unusable bytes to readonly */ 1389 sinfo->bytes_readonly += cache->zone_unusable; 1390 sinfo->bytes_zone_unusable -= cache->zone_unusable; 1391 cache->zone_unusable = 0; 1392 } 1393 cache->ro++; 1394 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 1395 } 1396 out: 1397 spin_unlock(&cache->lock); 1398 spin_unlock(&sinfo->lock); 1399 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 1400 btrfs_info(cache->fs_info, 1401 "unable to make block group %llu ro", cache->start); 1402 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 1403 } 1404 return ret; 1405 } 1406 1407 static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1408 struct btrfs_block_group *bg) 1409 { 1410 struct btrfs_fs_info *fs_info = bg->fs_info; 1411 struct btrfs_transaction *prev_trans = NULL; 1412 const u64 start = bg->start; 1413 const u64 end = start + bg->length - 1; 1414 int ret; 1415 1416 spin_lock(&fs_info->trans_lock); 1417 if (trans->transaction->list.prev != &fs_info->trans_list) { 1418 prev_trans = list_last_entry(&trans->transaction->list, 1419 struct btrfs_transaction, list); 1420 refcount_inc(&prev_trans->use_count); 1421 } 1422 spin_unlock(&fs_info->trans_lock); 1423 1424 /* 1425 * Hold the unused_bg_unpin_mutex lock to avoid racing with 1426 * btrfs_finish_extent_commit(). If we are at transaction N, another 1427 * task might be running finish_extent_commit() for the previous 1428 * transaction N - 1, and have seen a range belonging to the block 1429 * group in pinned_extents before we were able to clear the whole block 1430 * group range from pinned_extents. This means that task can lookup for 1431 * the block group after we unpinned it from pinned_extents and removed 1432 * it, leading to a BUG_ON() at unpin_extent_range(). 1433 */ 1434 mutex_lock(&fs_info->unused_bg_unpin_mutex); 1435 if (prev_trans) { 1436 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 1437 EXTENT_DIRTY); 1438 if (ret) 1439 goto out; 1440 } 1441 1442 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 1443 EXTENT_DIRTY); 1444 out: 1445 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1446 if (prev_trans) 1447 btrfs_put_transaction(prev_trans); 1448 1449 return ret == 0; 1450 } 1451 1452 /* 1453 * Process the unused_bgs list and remove any that don't have any allocated 1454 * space inside of them. 1455 */ 1456 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1457 { 1458 struct btrfs_block_group *block_group; 1459 struct btrfs_space_info *space_info; 1460 struct btrfs_trans_handle *trans; 1461 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1462 int ret = 0; 1463 1464 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1465 return; 1466 1467 if (btrfs_fs_closing(fs_info)) 1468 return; 1469 1470 /* 1471 * Long running balances can keep us blocked here for eternity, so 1472 * simply skip deletion if we're unable to get the mutex. 1473 */ 1474 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1475 return; 1476 1477 spin_lock(&fs_info->unused_bgs_lock); 1478 while (!list_empty(&fs_info->unused_bgs)) { 1479 int trimming; 1480 1481 block_group = list_first_entry(&fs_info->unused_bgs, 1482 struct btrfs_block_group, 1483 bg_list); 1484 list_del_init(&block_group->bg_list); 1485 1486 space_info = block_group->space_info; 1487 1488 if (ret || btrfs_mixed_space_info(space_info)) { 1489 btrfs_put_block_group(block_group); 1490 continue; 1491 } 1492 spin_unlock(&fs_info->unused_bgs_lock); 1493 1494 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1495 1496 /* Don't want to race with allocators so take the groups_sem */ 1497 down_write(&space_info->groups_sem); 1498 1499 /* 1500 * Async discard moves the final block group discard to be prior 1501 * to the unused_bgs code path. Therefore, if it's not fully 1502 * trimmed, punt it back to the async discard lists. 1503 */ 1504 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 1505 !btrfs_is_free_space_trimmed(block_group)) { 1506 trace_btrfs_skip_unused_block_group(block_group); 1507 up_write(&space_info->groups_sem); 1508 /* Requeue if we failed because of async discard */ 1509 btrfs_discard_queue_work(&fs_info->discard_ctl, 1510 block_group); 1511 goto next; 1512 } 1513 1514 spin_lock(&block_group->lock); 1515 if (block_group->reserved || block_group->pinned || 1516 block_group->used || block_group->ro || 1517 list_is_singular(&block_group->list)) { 1518 /* 1519 * We want to bail if we made new allocations or have 1520 * outstanding allocations in this block group. We do 1521 * the ro check in case balance is currently acting on 1522 * this block group. 1523 */ 1524 trace_btrfs_skip_unused_block_group(block_group); 1525 spin_unlock(&block_group->lock); 1526 up_write(&space_info->groups_sem); 1527 goto next; 1528 } 1529 spin_unlock(&block_group->lock); 1530 1531 /* We don't want to force the issue, only flip if it's ok. */ 1532 ret = inc_block_group_ro(block_group, 0); 1533 up_write(&space_info->groups_sem); 1534 if (ret < 0) { 1535 ret = 0; 1536 goto next; 1537 } 1538 1539 ret = btrfs_zone_finish(block_group); 1540 if (ret < 0) { 1541 btrfs_dec_block_group_ro(block_group); 1542 if (ret == -EAGAIN) 1543 ret = 0; 1544 goto next; 1545 } 1546 1547 /* 1548 * Want to do this before we do anything else so we can recover 1549 * properly if we fail to join the transaction. 1550 */ 1551 trans = btrfs_start_trans_remove_block_group(fs_info, 1552 block_group->start); 1553 if (IS_ERR(trans)) { 1554 btrfs_dec_block_group_ro(block_group); 1555 ret = PTR_ERR(trans); 1556 goto next; 1557 } 1558 1559 /* 1560 * We could have pending pinned extents for this block group, 1561 * just delete them, we don't care about them anymore. 1562 */ 1563 if (!clean_pinned_extents(trans, block_group)) { 1564 btrfs_dec_block_group_ro(block_group); 1565 goto end_trans; 1566 } 1567 1568 /* 1569 * At this point, the block_group is read only and should fail 1570 * new allocations. However, btrfs_finish_extent_commit() can 1571 * cause this block_group to be placed back on the discard 1572 * lists because now the block_group isn't fully discarded. 1573 * Bail here and try again later after discarding everything. 1574 */ 1575 spin_lock(&fs_info->discard_ctl.lock); 1576 if (!list_empty(&block_group->discard_list)) { 1577 spin_unlock(&fs_info->discard_ctl.lock); 1578 btrfs_dec_block_group_ro(block_group); 1579 btrfs_discard_queue_work(&fs_info->discard_ctl, 1580 block_group); 1581 goto end_trans; 1582 } 1583 spin_unlock(&fs_info->discard_ctl.lock); 1584 1585 /* Reset pinned so btrfs_put_block_group doesn't complain */ 1586 spin_lock(&space_info->lock); 1587 spin_lock(&block_group->lock); 1588 1589 btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1590 -block_group->pinned); 1591 space_info->bytes_readonly += block_group->pinned; 1592 block_group->pinned = 0; 1593 1594 spin_unlock(&block_group->lock); 1595 spin_unlock(&space_info->lock); 1596 1597 /* 1598 * The normal path here is an unused block group is passed here, 1599 * then trimming is handled in the transaction commit path. 1600 * Async discard interposes before this to do the trimming 1601 * before coming down the unused block group path as trimming 1602 * will no longer be done later in the transaction commit path. 1603 */ 1604 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1605 goto flip_async; 1606 1607 /* 1608 * DISCARD can flip during remount. On zoned filesystems, we 1609 * need to reset sequential-required zones. 1610 */ 1611 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1612 btrfs_is_zoned(fs_info); 1613 1614 /* Implicit trim during transaction commit. */ 1615 if (trimming) 1616 btrfs_freeze_block_group(block_group); 1617 1618 /* 1619 * Btrfs_remove_chunk will abort the transaction if things go 1620 * horribly wrong. 1621 */ 1622 ret = btrfs_remove_chunk(trans, block_group->start); 1623 1624 if (ret) { 1625 if (trimming) 1626 btrfs_unfreeze_block_group(block_group); 1627 goto end_trans; 1628 } 1629 1630 /* 1631 * If we're not mounted with -odiscard, we can just forget 1632 * about this block group. Otherwise we'll need to wait 1633 * until transaction commit to do the actual discard. 1634 */ 1635 if (trimming) { 1636 spin_lock(&fs_info->unused_bgs_lock); 1637 /* 1638 * A concurrent scrub might have added us to the list 1639 * fs_info->unused_bgs, so use a list_move operation 1640 * to add the block group to the deleted_bgs list. 1641 */ 1642 list_move(&block_group->bg_list, 1643 &trans->transaction->deleted_bgs); 1644 spin_unlock(&fs_info->unused_bgs_lock); 1645 btrfs_get_block_group(block_group); 1646 } 1647 end_trans: 1648 btrfs_end_transaction(trans); 1649 next: 1650 btrfs_put_block_group(block_group); 1651 spin_lock(&fs_info->unused_bgs_lock); 1652 } 1653 spin_unlock(&fs_info->unused_bgs_lock); 1654 mutex_unlock(&fs_info->reclaim_bgs_lock); 1655 return; 1656 1657 flip_async: 1658 btrfs_end_transaction(trans); 1659 mutex_unlock(&fs_info->reclaim_bgs_lock); 1660 btrfs_put_block_group(block_group); 1661 btrfs_discard_punt_unused_bgs_list(fs_info); 1662 } 1663 1664 void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1665 { 1666 struct btrfs_fs_info *fs_info = bg->fs_info; 1667 1668 spin_lock(&fs_info->unused_bgs_lock); 1669 if (list_empty(&bg->bg_list)) { 1670 btrfs_get_block_group(bg); 1671 trace_btrfs_add_unused_block_group(bg); 1672 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1673 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { 1674 /* Pull out the block group from the reclaim_bgs list. */ 1675 trace_btrfs_add_unused_block_group(bg); 1676 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); 1677 } 1678 spin_unlock(&fs_info->unused_bgs_lock); 1679 } 1680 1681 /* 1682 * We want block groups with a low number of used bytes to be in the beginning 1683 * of the list, so they will get reclaimed first. 1684 */ 1685 static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 1686 const struct list_head *b) 1687 { 1688 const struct btrfs_block_group *bg1, *bg2; 1689 1690 bg1 = list_entry(a, struct btrfs_block_group, bg_list); 1691 bg2 = list_entry(b, struct btrfs_block_group, bg_list); 1692 1693 return bg1->used > bg2->used; 1694 } 1695 1696 static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) 1697 { 1698 if (btrfs_is_zoned(fs_info)) 1699 return btrfs_zoned_should_reclaim(fs_info); 1700 return true; 1701 } 1702 1703 static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) 1704 { 1705 const struct btrfs_space_info *space_info = bg->space_info; 1706 const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); 1707 const u64 new_val = bg->used; 1708 const u64 old_val = new_val + bytes_freed; 1709 u64 thresh; 1710 1711 if (reclaim_thresh == 0) 1712 return false; 1713 1714 thresh = mult_perc(bg->length, reclaim_thresh); 1715 1716 /* 1717 * If we were below the threshold before don't reclaim, we are likely a 1718 * brand new block group and we don't want to relocate new block groups. 1719 */ 1720 if (old_val < thresh) 1721 return false; 1722 if (new_val >= thresh) 1723 return false; 1724 return true; 1725 } 1726 1727 void btrfs_reclaim_bgs_work(struct work_struct *work) 1728 { 1729 struct btrfs_fs_info *fs_info = 1730 container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 1731 struct btrfs_block_group *bg; 1732 struct btrfs_space_info *space_info; 1733 1734 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1735 return; 1736 1737 if (btrfs_fs_closing(fs_info)) 1738 return; 1739 1740 if (!btrfs_should_reclaim(fs_info)) 1741 return; 1742 1743 sb_start_write(fs_info->sb); 1744 1745 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 1746 sb_end_write(fs_info->sb); 1747 return; 1748 } 1749 1750 /* 1751 * Long running balances can keep us blocked here for eternity, so 1752 * simply skip reclaim if we're unable to get the mutex. 1753 */ 1754 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 1755 btrfs_exclop_finish(fs_info); 1756 sb_end_write(fs_info->sb); 1757 return; 1758 } 1759 1760 spin_lock(&fs_info->unused_bgs_lock); 1761 /* 1762 * Sort happens under lock because we can't simply splice it and sort. 1763 * The block groups might still be in use and reachable via bg_list, 1764 * and their presence in the reclaim_bgs list must be preserved. 1765 */ 1766 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 1767 while (!list_empty(&fs_info->reclaim_bgs)) { 1768 u64 zone_unusable; 1769 int ret = 0; 1770 1771 bg = list_first_entry(&fs_info->reclaim_bgs, 1772 struct btrfs_block_group, 1773 bg_list); 1774 list_del_init(&bg->bg_list); 1775 1776 space_info = bg->space_info; 1777 spin_unlock(&fs_info->unused_bgs_lock); 1778 1779 /* Don't race with allocators so take the groups_sem */ 1780 down_write(&space_info->groups_sem); 1781 1782 spin_lock(&bg->lock); 1783 if (bg->reserved || bg->pinned || bg->ro) { 1784 /* 1785 * We want to bail if we made new allocations or have 1786 * outstanding allocations in this block group. We do 1787 * the ro check in case balance is currently acting on 1788 * this block group. 1789 */ 1790 spin_unlock(&bg->lock); 1791 up_write(&space_info->groups_sem); 1792 goto next; 1793 } 1794 if (bg->used == 0) { 1795 /* 1796 * It is possible that we trigger relocation on a block 1797 * group as its extents are deleted and it first goes 1798 * below the threshold, then shortly after goes empty. 1799 * 1800 * In this case, relocating it does delete it, but has 1801 * some overhead in relocation specific metadata, looking 1802 * for the non-existent extents and running some extra 1803 * transactions, which we can avoid by using one of the 1804 * other mechanisms for dealing with empty block groups. 1805 */ 1806 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1807 btrfs_mark_bg_unused(bg); 1808 spin_unlock(&bg->lock); 1809 up_write(&space_info->groups_sem); 1810 goto next; 1811 1812 } 1813 /* 1814 * The block group might no longer meet the reclaim condition by 1815 * the time we get around to reclaiming it, so to avoid 1816 * reclaiming overly full block_groups, skip reclaiming them. 1817 * 1818 * Since the decision making process also depends on the amount 1819 * being freed, pass in a fake giant value to skip that extra 1820 * check, which is more meaningful when adding to the list in 1821 * the first place. 1822 */ 1823 if (!should_reclaim_block_group(bg, bg->length)) { 1824 spin_unlock(&bg->lock); 1825 up_write(&space_info->groups_sem); 1826 goto next; 1827 } 1828 spin_unlock(&bg->lock); 1829 1830 /* 1831 * Get out fast, in case we're read-only or unmounting the 1832 * filesystem. It is OK to drop block groups from the list even 1833 * for the read-only case. As we did sb_start_write(), 1834 * "mount -o remount,ro" won't happen and read-only filesystem 1835 * means it is forced read-only due to a fatal error. So, it 1836 * never gets back to read-write to let us reclaim again. 1837 */ 1838 if (btrfs_need_cleaner_sleep(fs_info)) { 1839 up_write(&space_info->groups_sem); 1840 goto next; 1841 } 1842 1843 /* 1844 * Cache the zone_unusable value before turning the block group 1845 * to read only. As soon as the blog group is read only it's 1846 * zone_unusable value gets moved to the block group's read-only 1847 * bytes and isn't available for calculations anymore. 1848 */ 1849 zone_unusable = bg->zone_unusable; 1850 ret = inc_block_group_ro(bg, 0); 1851 up_write(&space_info->groups_sem); 1852 if (ret < 0) 1853 goto next; 1854 1855 btrfs_info(fs_info, 1856 "reclaiming chunk %llu with %llu%% used %llu%% unusable", 1857 bg->start, 1858 div64_u64(bg->used * 100, bg->length), 1859 div64_u64(zone_unusable * 100, bg->length)); 1860 trace_btrfs_reclaim_block_group(bg); 1861 ret = btrfs_relocate_chunk(fs_info, bg->start); 1862 if (ret) { 1863 btrfs_dec_block_group_ro(bg); 1864 btrfs_err(fs_info, "error relocating chunk %llu", 1865 bg->start); 1866 } 1867 1868 next: 1869 if (ret) 1870 btrfs_mark_bg_to_reclaim(bg); 1871 btrfs_put_block_group(bg); 1872 1873 mutex_unlock(&fs_info->reclaim_bgs_lock); 1874 /* 1875 * Reclaiming all the block groups in the list can take really 1876 * long. Prioritize cleaning up unused block groups. 1877 */ 1878 btrfs_delete_unused_bgs(fs_info); 1879 /* 1880 * If we are interrupted by a balance, we can just bail out. The 1881 * cleaner thread restart again if necessary. 1882 */ 1883 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1884 goto end; 1885 spin_lock(&fs_info->unused_bgs_lock); 1886 } 1887 spin_unlock(&fs_info->unused_bgs_lock); 1888 mutex_unlock(&fs_info->reclaim_bgs_lock); 1889 end: 1890 btrfs_exclop_finish(fs_info); 1891 sb_end_write(fs_info->sb); 1892 } 1893 1894 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 1895 { 1896 spin_lock(&fs_info->unused_bgs_lock); 1897 if (!list_empty(&fs_info->reclaim_bgs)) 1898 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 1899 spin_unlock(&fs_info->unused_bgs_lock); 1900 } 1901 1902 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 1903 { 1904 struct btrfs_fs_info *fs_info = bg->fs_info; 1905 1906 spin_lock(&fs_info->unused_bgs_lock); 1907 if (list_empty(&bg->bg_list)) { 1908 btrfs_get_block_group(bg); 1909 trace_btrfs_add_reclaim_block_group(bg); 1910 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); 1911 } 1912 spin_unlock(&fs_info->unused_bgs_lock); 1913 } 1914 1915 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1916 struct btrfs_path *path) 1917 { 1918 struct btrfs_chunk_map *map; 1919 struct btrfs_block_group_item bg; 1920 struct extent_buffer *leaf; 1921 int slot; 1922 u64 flags; 1923 int ret = 0; 1924 1925 slot = path->slots[0]; 1926 leaf = path->nodes[0]; 1927 1928 map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset); 1929 if (!map) { 1930 btrfs_err(fs_info, 1931 "logical %llu len %llu found bg but no related chunk", 1932 key->objectid, key->offset); 1933 return -ENOENT; 1934 } 1935 1936 if (map->start != key->objectid || map->chunk_len != key->offset) { 1937 btrfs_err(fs_info, 1938 "block group %llu len %llu mismatch with chunk %llu len %llu", 1939 key->objectid, key->offset, map->start, map->chunk_len); 1940 ret = -EUCLEAN; 1941 goto out_free_map; 1942 } 1943 1944 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 1945 sizeof(bg)); 1946 flags = btrfs_stack_block_group_flags(&bg) & 1947 BTRFS_BLOCK_GROUP_TYPE_MASK; 1948 1949 if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 1950 btrfs_err(fs_info, 1951 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 1952 key->objectid, key->offset, flags, 1953 (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type)); 1954 ret = -EUCLEAN; 1955 } 1956 1957 out_free_map: 1958 btrfs_free_chunk_map(map); 1959 return ret; 1960 } 1961 1962 static int find_first_block_group(struct btrfs_fs_info *fs_info, 1963 struct btrfs_path *path, 1964 struct btrfs_key *key) 1965 { 1966 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1967 int ret; 1968 struct btrfs_key found_key; 1969 1970 btrfs_for_each_slot(root, key, &found_key, path, ret) { 1971 if (found_key.objectid >= key->objectid && 1972 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1973 return read_bg_from_eb(fs_info, &found_key, path); 1974 } 1975 } 1976 return ret; 1977 } 1978 1979 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 1980 { 1981 u64 extra_flags = chunk_to_extended(flags) & 1982 BTRFS_EXTENDED_PROFILE_MASK; 1983 1984 write_seqlock(&fs_info->profiles_lock); 1985 if (flags & BTRFS_BLOCK_GROUP_DATA) 1986 fs_info->avail_data_alloc_bits |= extra_flags; 1987 if (flags & BTRFS_BLOCK_GROUP_METADATA) 1988 fs_info->avail_metadata_alloc_bits |= extra_flags; 1989 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 1990 fs_info->avail_system_alloc_bits |= extra_flags; 1991 write_sequnlock(&fs_info->profiles_lock); 1992 } 1993 1994 /* 1995 * Map a physical disk address to a list of logical addresses. 1996 * 1997 * @fs_info: the filesystem 1998 * @chunk_start: logical address of block group 1999 * @physical: physical address to map to logical addresses 2000 * @logical: return array of logical addresses which map to @physical 2001 * @naddrs: length of @logical 2002 * @stripe_len: size of IO stripe for the given block group 2003 * 2004 * Maps a particular @physical disk address to a list of @logical addresses. 2005 * Used primarily to exclude those portions of a block group that contain super 2006 * block copies. 2007 */ 2008 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 2009 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 2010 { 2011 struct btrfs_chunk_map *map; 2012 u64 *buf; 2013 u64 bytenr; 2014 u64 data_stripe_length; 2015 u64 io_stripe_size; 2016 int i, nr = 0; 2017 int ret = 0; 2018 2019 map = btrfs_get_chunk_map(fs_info, chunk_start, 1); 2020 if (IS_ERR(map)) 2021 return -EIO; 2022 2023 data_stripe_length = map->stripe_size; 2024 io_stripe_size = BTRFS_STRIPE_LEN; 2025 chunk_start = map->start; 2026 2027 /* For RAID5/6 adjust to a full IO stripe length */ 2028 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2029 io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 2030 2031 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 2032 if (!buf) { 2033 ret = -ENOMEM; 2034 goto out; 2035 } 2036 2037 for (i = 0; i < map->num_stripes; i++) { 2038 bool already_inserted = false; 2039 u32 stripe_nr; 2040 u32 offset; 2041 int j; 2042 2043 if (!in_range(physical, map->stripes[i].physical, 2044 data_stripe_length)) 2045 continue; 2046 2047 stripe_nr = (physical - map->stripes[i].physical) >> 2048 BTRFS_STRIPE_LEN_SHIFT; 2049 offset = (physical - map->stripes[i].physical) & 2050 BTRFS_STRIPE_LEN_MASK; 2051 2052 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2053 BTRFS_BLOCK_GROUP_RAID10)) 2054 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, 2055 map->sub_stripes); 2056 /* 2057 * The remaining case would be for RAID56, multiply by 2058 * nr_data_stripes(). Alternatively, just use rmap_len below 2059 * instead of map->stripe_len 2060 */ 2061 bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 2062 2063 /* Ensure we don't add duplicate addresses */ 2064 for (j = 0; j < nr; j++) { 2065 if (buf[j] == bytenr) { 2066 already_inserted = true; 2067 break; 2068 } 2069 } 2070 2071 if (!already_inserted) 2072 buf[nr++] = bytenr; 2073 } 2074 2075 *logical = buf; 2076 *naddrs = nr; 2077 *stripe_len = io_stripe_size; 2078 out: 2079 btrfs_free_chunk_map(map); 2080 return ret; 2081 } 2082 2083 static int exclude_super_stripes(struct btrfs_block_group *cache) 2084 { 2085 struct btrfs_fs_info *fs_info = cache->fs_info; 2086 const bool zoned = btrfs_is_zoned(fs_info); 2087 u64 bytenr; 2088 u64 *logical; 2089 int stripe_len; 2090 int i, nr, ret; 2091 2092 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 2093 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 2094 cache->bytes_super += stripe_len; 2095 ret = set_extent_bit(&fs_info->excluded_extents, cache->start, 2096 cache->start + stripe_len - 1, 2097 EXTENT_UPTODATE, NULL); 2098 if (ret) 2099 return ret; 2100 } 2101 2102 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2103 bytenr = btrfs_sb_offset(i); 2104 ret = btrfs_rmap_block(fs_info, cache->start, 2105 bytenr, &logical, &nr, &stripe_len); 2106 if (ret) 2107 return ret; 2108 2109 /* Shouldn't have super stripes in sequential zones */ 2110 if (zoned && nr) { 2111 kfree(logical); 2112 btrfs_err(fs_info, 2113 "zoned: block group %llu must not contain super block", 2114 cache->start); 2115 return -EUCLEAN; 2116 } 2117 2118 while (nr--) { 2119 u64 len = min_t(u64, stripe_len, 2120 cache->start + cache->length - logical[nr]); 2121 2122 cache->bytes_super += len; 2123 ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], 2124 logical[nr] + len - 1, 2125 EXTENT_UPTODATE, NULL); 2126 if (ret) { 2127 kfree(logical); 2128 return ret; 2129 } 2130 } 2131 2132 kfree(logical); 2133 } 2134 return 0; 2135 } 2136 2137 static struct btrfs_block_group *btrfs_create_block_group_cache( 2138 struct btrfs_fs_info *fs_info, u64 start) 2139 { 2140 struct btrfs_block_group *cache; 2141 2142 cache = kzalloc(sizeof(*cache), GFP_NOFS); 2143 if (!cache) 2144 return NULL; 2145 2146 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 2147 GFP_NOFS); 2148 if (!cache->free_space_ctl) { 2149 kfree(cache); 2150 return NULL; 2151 } 2152 2153 cache->start = start; 2154 2155 cache->fs_info = fs_info; 2156 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 2157 2158 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 2159 2160 refcount_set(&cache->refs, 1); 2161 spin_lock_init(&cache->lock); 2162 init_rwsem(&cache->data_rwsem); 2163 INIT_LIST_HEAD(&cache->list); 2164 INIT_LIST_HEAD(&cache->cluster_list); 2165 INIT_LIST_HEAD(&cache->bg_list); 2166 INIT_LIST_HEAD(&cache->ro_list); 2167 INIT_LIST_HEAD(&cache->discard_list); 2168 INIT_LIST_HEAD(&cache->dirty_list); 2169 INIT_LIST_HEAD(&cache->io_list); 2170 INIT_LIST_HEAD(&cache->active_bg_list); 2171 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 2172 atomic_set(&cache->frozen, 0); 2173 mutex_init(&cache->free_space_lock); 2174 2175 return cache; 2176 } 2177 2178 /* 2179 * Iterate all chunks and verify that each of them has the corresponding block 2180 * group 2181 */ 2182 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 2183 { 2184 u64 start = 0; 2185 int ret = 0; 2186 2187 while (1) { 2188 struct btrfs_chunk_map *map; 2189 struct btrfs_block_group *bg; 2190 2191 /* 2192 * btrfs_find_chunk_map() will return the first chunk map 2193 * intersecting the range, so setting @length to 1 is enough to 2194 * get the first chunk. 2195 */ 2196 map = btrfs_find_chunk_map(fs_info, start, 1); 2197 if (!map) 2198 break; 2199 2200 bg = btrfs_lookup_block_group(fs_info, map->start); 2201 if (!bg) { 2202 btrfs_err(fs_info, 2203 "chunk start=%llu len=%llu doesn't have corresponding block group", 2204 map->start, map->chunk_len); 2205 ret = -EUCLEAN; 2206 btrfs_free_chunk_map(map); 2207 break; 2208 } 2209 if (bg->start != map->start || bg->length != map->chunk_len || 2210 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 2211 (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 2212 btrfs_err(fs_info, 2213 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 2214 map->start, map->chunk_len, 2215 map->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 2216 bg->start, bg->length, 2217 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 2218 ret = -EUCLEAN; 2219 btrfs_free_chunk_map(map); 2220 btrfs_put_block_group(bg); 2221 break; 2222 } 2223 start = map->start + map->chunk_len; 2224 btrfs_free_chunk_map(map); 2225 btrfs_put_block_group(bg); 2226 } 2227 return ret; 2228 } 2229 2230 static int read_one_block_group(struct btrfs_fs_info *info, 2231 struct btrfs_block_group_item *bgi, 2232 const struct btrfs_key *key, 2233 int need_clear) 2234 { 2235 struct btrfs_block_group *cache; 2236 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2237 int ret; 2238 2239 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2240 2241 cache = btrfs_create_block_group_cache(info, key->objectid); 2242 if (!cache) 2243 return -ENOMEM; 2244 2245 cache->length = key->offset; 2246 cache->used = btrfs_stack_block_group_used(bgi); 2247 cache->commit_used = cache->used; 2248 cache->flags = btrfs_stack_block_group_flags(bgi); 2249 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); 2250 2251 set_free_space_tree_thresholds(cache); 2252 2253 if (need_clear) { 2254 /* 2255 * When we mount with old space cache, we need to 2256 * set BTRFS_DC_CLEAR and set dirty flag. 2257 * 2258 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2259 * truncate the old free space cache inode and 2260 * setup a new one. 2261 * b) Setting 'dirty flag' makes sure that we flush 2262 * the new space cache info onto disk. 2263 */ 2264 if (btrfs_test_opt(info, SPACE_CACHE)) 2265 cache->disk_cache_state = BTRFS_DC_CLEAR; 2266 } 2267 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2268 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2269 btrfs_err(info, 2270 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2271 cache->start); 2272 ret = -EINVAL; 2273 goto error; 2274 } 2275 2276 ret = btrfs_load_block_group_zone_info(cache, false); 2277 if (ret) { 2278 btrfs_err(info, "zoned: failed to load zone info of bg %llu", 2279 cache->start); 2280 goto error; 2281 } 2282 2283 /* 2284 * We need to exclude the super stripes now so that the space info has 2285 * super bytes accounted for, otherwise we'll think we have more space 2286 * than we actually do. 2287 */ 2288 ret = exclude_super_stripes(cache); 2289 if (ret) { 2290 /* We may have excluded something, so call this just in case. */ 2291 btrfs_free_excluded_extents(cache); 2292 goto error; 2293 } 2294 2295 /* 2296 * For zoned filesystem, space after the allocation offset is the only 2297 * free space for a block group. So, we don't need any caching work. 2298 * btrfs_calc_zone_unusable() will set the amount of free space and 2299 * zone_unusable space. 2300 * 2301 * For regular filesystem, check for two cases, either we are full, and 2302 * therefore don't need to bother with the caching work since we won't 2303 * find any space, or we are empty, and we can just add all the space 2304 * in and be done with it. This saves us _a_lot_ of time, particularly 2305 * in the full case. 2306 */ 2307 if (btrfs_is_zoned(info)) { 2308 btrfs_calc_zone_unusable(cache); 2309 /* Should not have any excluded extents. Just in case, though. */ 2310 btrfs_free_excluded_extents(cache); 2311 } else if (cache->length == cache->used) { 2312 cache->cached = BTRFS_CACHE_FINISHED; 2313 btrfs_free_excluded_extents(cache); 2314 } else if (cache->used == 0) { 2315 cache->cached = BTRFS_CACHE_FINISHED; 2316 ret = btrfs_add_new_free_space(cache, cache->start, 2317 cache->start + cache->length, NULL); 2318 btrfs_free_excluded_extents(cache); 2319 if (ret) 2320 goto error; 2321 } 2322 2323 ret = btrfs_add_block_group_cache(info, cache); 2324 if (ret) { 2325 btrfs_remove_free_space_cache(cache); 2326 goto error; 2327 } 2328 trace_btrfs_add_block_group(info, cache, 0); 2329 btrfs_add_bg_to_space_info(info, cache); 2330 2331 set_avail_alloc_bits(info, cache->flags); 2332 if (btrfs_chunk_writeable(info, cache->start)) { 2333 if (cache->used == 0) { 2334 ASSERT(list_empty(&cache->bg_list)); 2335 if (btrfs_test_opt(info, DISCARD_ASYNC)) 2336 btrfs_discard_queue_work(&info->discard_ctl, cache); 2337 else 2338 btrfs_mark_bg_unused(cache); 2339 } 2340 } else { 2341 inc_block_group_ro(cache, 1); 2342 } 2343 2344 return 0; 2345 error: 2346 btrfs_put_block_group(cache); 2347 return ret; 2348 } 2349 2350 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 2351 { 2352 struct rb_node *node; 2353 int ret = 0; 2354 2355 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 2356 struct btrfs_chunk_map *map; 2357 struct btrfs_block_group *bg; 2358 2359 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 2360 bg = btrfs_create_block_group_cache(fs_info, map->start); 2361 if (!bg) { 2362 ret = -ENOMEM; 2363 break; 2364 } 2365 2366 /* Fill dummy cache as FULL */ 2367 bg->length = map->chunk_len; 2368 bg->flags = map->type; 2369 bg->cached = BTRFS_CACHE_FINISHED; 2370 bg->used = map->chunk_len; 2371 bg->flags = map->type; 2372 ret = btrfs_add_block_group_cache(fs_info, bg); 2373 /* 2374 * We may have some valid block group cache added already, in 2375 * that case we skip to the next one. 2376 */ 2377 if (ret == -EEXIST) { 2378 ret = 0; 2379 btrfs_put_block_group(bg); 2380 continue; 2381 } 2382 2383 if (ret) { 2384 btrfs_remove_free_space_cache(bg); 2385 btrfs_put_block_group(bg); 2386 break; 2387 } 2388 2389 btrfs_add_bg_to_space_info(fs_info, bg); 2390 2391 set_avail_alloc_bits(fs_info, bg->flags); 2392 } 2393 if (!ret) 2394 btrfs_init_global_block_rsv(fs_info); 2395 return ret; 2396 } 2397 2398 int btrfs_read_block_groups(struct btrfs_fs_info *info) 2399 { 2400 struct btrfs_root *root = btrfs_block_group_root(info); 2401 struct btrfs_path *path; 2402 int ret; 2403 struct btrfs_block_group *cache; 2404 struct btrfs_space_info *space_info; 2405 struct btrfs_key key; 2406 int need_clear = 0; 2407 u64 cache_gen; 2408 2409 /* 2410 * Either no extent root (with ibadroots rescue option) or we have 2411 * unsupported RO options. The fs can never be mounted read-write, so no 2412 * need to waste time searching block group items. 2413 * 2414 * This also allows new extent tree related changes to be RO compat, 2415 * no need for a full incompat flag. 2416 */ 2417 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & 2418 ~BTRFS_FEATURE_COMPAT_RO_SUPP)) 2419 return fill_dummy_bgs(info); 2420 2421 key.objectid = 0; 2422 key.offset = 0; 2423 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2424 path = btrfs_alloc_path(); 2425 if (!path) 2426 return -ENOMEM; 2427 2428 cache_gen = btrfs_super_cache_generation(info->super_copy); 2429 if (btrfs_test_opt(info, SPACE_CACHE) && 2430 btrfs_super_generation(info->super_copy) != cache_gen) 2431 need_clear = 1; 2432 if (btrfs_test_opt(info, CLEAR_CACHE)) 2433 need_clear = 1; 2434 2435 while (1) { 2436 struct btrfs_block_group_item bgi; 2437 struct extent_buffer *leaf; 2438 int slot; 2439 2440 ret = find_first_block_group(info, path, &key); 2441 if (ret > 0) 2442 break; 2443 if (ret != 0) 2444 goto error; 2445 2446 leaf = path->nodes[0]; 2447 slot = path->slots[0]; 2448 2449 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 2450 sizeof(bgi)); 2451 2452 btrfs_item_key_to_cpu(leaf, &key, slot); 2453 btrfs_release_path(path); 2454 ret = read_one_block_group(info, &bgi, &key, need_clear); 2455 if (ret < 0) 2456 goto error; 2457 key.objectid += key.offset; 2458 key.offset = 0; 2459 } 2460 btrfs_release_path(path); 2461 2462 list_for_each_entry(space_info, &info->space_info, list) { 2463 int i; 2464 2465 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2466 if (list_empty(&space_info->block_groups[i])) 2467 continue; 2468 cache = list_first_entry(&space_info->block_groups[i], 2469 struct btrfs_block_group, 2470 list); 2471 btrfs_sysfs_add_block_group_type(cache); 2472 } 2473 2474 if (!(btrfs_get_alloc_profile(info, space_info->flags) & 2475 (BTRFS_BLOCK_GROUP_RAID10 | 2476 BTRFS_BLOCK_GROUP_RAID1_MASK | 2477 BTRFS_BLOCK_GROUP_RAID56_MASK | 2478 BTRFS_BLOCK_GROUP_DUP))) 2479 continue; 2480 /* 2481 * Avoid allocating from un-mirrored block group if there are 2482 * mirrored block groups. 2483 */ 2484 list_for_each_entry(cache, 2485 &space_info->block_groups[BTRFS_RAID_RAID0], 2486 list) 2487 inc_block_group_ro(cache, 1); 2488 list_for_each_entry(cache, 2489 &space_info->block_groups[BTRFS_RAID_SINGLE], 2490 list) 2491 inc_block_group_ro(cache, 1); 2492 } 2493 2494 btrfs_init_global_block_rsv(info); 2495 ret = check_chunk_block_group_mappings(info); 2496 error: 2497 btrfs_free_path(path); 2498 /* 2499 * We've hit some error while reading the extent tree, and have 2500 * rescue=ibadroots mount option. 2501 * Try to fill the tree using dummy block groups so that the user can 2502 * continue to mount and grab their data. 2503 */ 2504 if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 2505 ret = fill_dummy_bgs(info); 2506 return ret; 2507 } 2508 2509 /* 2510 * This function, insert_block_group_item(), belongs to the phase 2 of chunk 2511 * allocation. 2512 * 2513 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2514 * phases. 2515 */ 2516 static int insert_block_group_item(struct btrfs_trans_handle *trans, 2517 struct btrfs_block_group *block_group) 2518 { 2519 struct btrfs_fs_info *fs_info = trans->fs_info; 2520 struct btrfs_block_group_item bgi; 2521 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2522 struct btrfs_key key; 2523 u64 old_commit_used; 2524 int ret; 2525 2526 spin_lock(&block_group->lock); 2527 btrfs_set_stack_block_group_used(&bgi, block_group->used); 2528 btrfs_set_stack_block_group_chunk_objectid(&bgi, 2529 block_group->global_root_id); 2530 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 2531 old_commit_used = block_group->commit_used; 2532 block_group->commit_used = block_group->used; 2533 key.objectid = block_group->start; 2534 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2535 key.offset = block_group->length; 2536 spin_unlock(&block_group->lock); 2537 2538 ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 2539 if (ret < 0) { 2540 spin_lock(&block_group->lock); 2541 block_group->commit_used = old_commit_used; 2542 spin_unlock(&block_group->lock); 2543 } 2544 2545 return ret; 2546 } 2547 2548 static int insert_dev_extent(struct btrfs_trans_handle *trans, 2549 struct btrfs_device *device, u64 chunk_offset, 2550 u64 start, u64 num_bytes) 2551 { 2552 struct btrfs_fs_info *fs_info = device->fs_info; 2553 struct btrfs_root *root = fs_info->dev_root; 2554 struct btrfs_path *path; 2555 struct btrfs_dev_extent *extent; 2556 struct extent_buffer *leaf; 2557 struct btrfs_key key; 2558 int ret; 2559 2560 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 2561 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 2562 path = btrfs_alloc_path(); 2563 if (!path) 2564 return -ENOMEM; 2565 2566 key.objectid = device->devid; 2567 key.type = BTRFS_DEV_EXTENT_KEY; 2568 key.offset = start; 2569 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 2570 if (ret) 2571 goto out; 2572 2573 leaf = path->nodes[0]; 2574 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 2575 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 2576 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 2577 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2578 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 2579 2580 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 2581 btrfs_mark_buffer_dirty(trans, leaf); 2582 out: 2583 btrfs_free_path(path); 2584 return ret; 2585 } 2586 2587 /* 2588 * This function belongs to phase 2. 2589 * 2590 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2591 * phases. 2592 */ 2593 static int insert_dev_extents(struct btrfs_trans_handle *trans, 2594 u64 chunk_offset, u64 chunk_size) 2595 { 2596 struct btrfs_fs_info *fs_info = trans->fs_info; 2597 struct btrfs_device *device; 2598 struct btrfs_chunk_map *map; 2599 u64 dev_offset; 2600 int i; 2601 int ret = 0; 2602 2603 map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 2604 if (IS_ERR(map)) 2605 return PTR_ERR(map); 2606 2607 /* 2608 * Take the device list mutex to prevent races with the final phase of 2609 * a device replace operation that replaces the device object associated 2610 * with the map's stripes, because the device object's id can change 2611 * at any time during that final phase of the device replace operation 2612 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 2613 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 2614 * resulting in persisting a device extent item with such ID. 2615 */ 2616 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2617 for (i = 0; i < map->num_stripes; i++) { 2618 device = map->stripes[i].dev; 2619 dev_offset = map->stripes[i].physical; 2620 2621 ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 2622 map->stripe_size); 2623 if (ret) 2624 break; 2625 } 2626 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2627 2628 btrfs_free_chunk_map(map); 2629 return ret; 2630 } 2631 2632 /* 2633 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 2634 * chunk allocation. 2635 * 2636 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2637 * phases. 2638 */ 2639 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 2640 { 2641 struct btrfs_fs_info *fs_info = trans->fs_info; 2642 struct btrfs_block_group *block_group; 2643 int ret = 0; 2644 2645 while (!list_empty(&trans->new_bgs)) { 2646 int index; 2647 2648 block_group = list_first_entry(&trans->new_bgs, 2649 struct btrfs_block_group, 2650 bg_list); 2651 if (ret) 2652 goto next; 2653 2654 index = btrfs_bg_flags_to_raid_index(block_group->flags); 2655 2656 ret = insert_block_group_item(trans, block_group); 2657 if (ret) 2658 btrfs_abort_transaction(trans, ret); 2659 if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, 2660 &block_group->runtime_flags)) { 2661 mutex_lock(&fs_info->chunk_mutex); 2662 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 2663 mutex_unlock(&fs_info->chunk_mutex); 2664 if (ret) 2665 btrfs_abort_transaction(trans, ret); 2666 } 2667 ret = insert_dev_extents(trans, block_group->start, 2668 block_group->length); 2669 if (ret) 2670 btrfs_abort_transaction(trans, ret); 2671 add_block_group_free_space(trans, block_group); 2672 2673 /* 2674 * If we restriped during balance, we may have added a new raid 2675 * type, so now add the sysfs entries when it is safe to do so. 2676 * We don't have to worry about locking here as it's handled in 2677 * btrfs_sysfs_add_block_group_type. 2678 */ 2679 if (block_group->space_info->block_group_kobjs[index] == NULL) 2680 btrfs_sysfs_add_block_group_type(block_group); 2681 2682 /* Already aborted the transaction if it failed. */ 2683 next: 2684 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); 2685 list_del_init(&block_group->bg_list); 2686 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); 2687 } 2688 btrfs_trans_release_chunk_metadata(trans); 2689 } 2690 2691 /* 2692 * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2693 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2694 */ 2695 static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) 2696 { 2697 u64 div = SZ_1G; 2698 u64 index; 2699 2700 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2701 return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2702 2703 /* If we have a smaller fs index based on 128MiB. */ 2704 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2705 div = SZ_128M; 2706 2707 offset = div64_u64(offset, div); 2708 div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2709 return index; 2710 } 2711 2712 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 2713 u64 type, 2714 u64 chunk_offset, u64 size) 2715 { 2716 struct btrfs_fs_info *fs_info = trans->fs_info; 2717 struct btrfs_block_group *cache; 2718 int ret; 2719 2720 btrfs_set_log_full_commit(trans); 2721 2722 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 2723 if (!cache) 2724 return ERR_PTR(-ENOMEM); 2725 2726 /* 2727 * Mark it as new before adding it to the rbtree of block groups or any 2728 * list, so that no other task finds it and calls btrfs_mark_bg_unused() 2729 * before the new flag is set. 2730 */ 2731 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); 2732 2733 cache->length = size; 2734 set_free_space_tree_thresholds(cache); 2735 cache->flags = type; 2736 cache->cached = BTRFS_CACHE_FINISHED; 2737 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2738 2739 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 2740 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); 2741 2742 ret = btrfs_load_block_group_zone_info(cache, true); 2743 if (ret) { 2744 btrfs_put_block_group(cache); 2745 return ERR_PTR(ret); 2746 } 2747 2748 ret = exclude_super_stripes(cache); 2749 if (ret) { 2750 /* We may have excluded something, so call this just in case */ 2751 btrfs_free_excluded_extents(cache); 2752 btrfs_put_block_group(cache); 2753 return ERR_PTR(ret); 2754 } 2755 2756 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); 2757 btrfs_free_excluded_extents(cache); 2758 if (ret) { 2759 btrfs_put_block_group(cache); 2760 return ERR_PTR(ret); 2761 } 2762 2763 /* 2764 * Ensure the corresponding space_info object is created and 2765 * assigned to our block group. We want our bg to be added to the rbtree 2766 * with its ->space_info set. 2767 */ 2768 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 2769 ASSERT(cache->space_info); 2770 2771 ret = btrfs_add_block_group_cache(fs_info, cache); 2772 if (ret) { 2773 btrfs_remove_free_space_cache(cache); 2774 btrfs_put_block_group(cache); 2775 return ERR_PTR(ret); 2776 } 2777 2778 /* 2779 * Now that our block group has its ->space_info set and is inserted in 2780 * the rbtree, update the space info's counters. 2781 */ 2782 trace_btrfs_add_block_group(fs_info, cache, 1); 2783 btrfs_add_bg_to_space_info(fs_info, cache); 2784 btrfs_update_global_block_rsv(fs_info); 2785 2786 #ifdef CONFIG_BTRFS_DEBUG 2787 if (btrfs_should_fragment_free_space(cache)) { 2788 cache->space_info->bytes_used += size >> 1; 2789 fragment_free_space(cache); 2790 } 2791 #endif 2792 2793 list_add_tail(&cache->bg_list, &trans->new_bgs); 2794 btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info); 2795 2796 set_avail_alloc_bits(fs_info, type); 2797 return cache; 2798 } 2799 2800 /* 2801 * Mark one block group RO, can be called several times for the same block 2802 * group. 2803 * 2804 * @cache: the destination block group 2805 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2806 * ensure we still have some free space after marking this 2807 * block group RO. 2808 */ 2809 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2810 bool do_chunk_alloc) 2811 { 2812 struct btrfs_fs_info *fs_info = cache->fs_info; 2813 struct btrfs_trans_handle *trans; 2814 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2815 u64 alloc_flags; 2816 int ret; 2817 bool dirty_bg_running; 2818 2819 /* 2820 * This can only happen when we are doing read-only scrub on read-only 2821 * mount. 2822 * In that case we should not start a new transaction on read-only fs. 2823 * Thus here we skip all chunk allocations. 2824 */ 2825 if (sb_rdonly(fs_info->sb)) { 2826 mutex_lock(&fs_info->ro_block_group_mutex); 2827 ret = inc_block_group_ro(cache, 0); 2828 mutex_unlock(&fs_info->ro_block_group_mutex); 2829 return ret; 2830 } 2831 2832 do { 2833 trans = btrfs_join_transaction(root); 2834 if (IS_ERR(trans)) 2835 return PTR_ERR(trans); 2836 2837 dirty_bg_running = false; 2838 2839 /* 2840 * We're not allowed to set block groups readonly after the dirty 2841 * block group cache has started writing. If it already started, 2842 * back off and let this transaction commit. 2843 */ 2844 mutex_lock(&fs_info->ro_block_group_mutex); 2845 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 2846 u64 transid = trans->transid; 2847 2848 mutex_unlock(&fs_info->ro_block_group_mutex); 2849 btrfs_end_transaction(trans); 2850 2851 ret = btrfs_wait_for_commit(fs_info, transid); 2852 if (ret) 2853 return ret; 2854 dirty_bg_running = true; 2855 } 2856 } while (dirty_bg_running); 2857 2858 if (do_chunk_alloc) { 2859 /* 2860 * If we are changing raid levels, try to allocate a 2861 * corresponding block group with the new raid level. 2862 */ 2863 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 2864 if (alloc_flags != cache->flags) { 2865 ret = btrfs_chunk_alloc(trans, alloc_flags, 2866 CHUNK_ALLOC_FORCE); 2867 /* 2868 * ENOSPC is allowed here, we may have enough space 2869 * already allocated at the new raid level to carry on 2870 */ 2871 if (ret == -ENOSPC) 2872 ret = 0; 2873 if (ret < 0) 2874 goto out; 2875 } 2876 } 2877 2878 ret = inc_block_group_ro(cache, 0); 2879 if (!ret) 2880 goto out; 2881 if (ret == -ETXTBSY) 2882 goto unlock_out; 2883 2884 /* 2885 * Skip chunk allocation if the bg is SYSTEM, this is to avoid system 2886 * chunk allocation storm to exhaust the system chunk array. Otherwise 2887 * we still want to try our best to mark the block group read-only. 2888 */ 2889 if (!do_chunk_alloc && ret == -ENOSPC && 2890 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) 2891 goto unlock_out; 2892 2893 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 2894 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 2895 if (ret < 0) 2896 goto out; 2897 /* 2898 * We have allocated a new chunk. We also need to activate that chunk to 2899 * grant metadata tickets for zoned filesystem. 2900 */ 2901 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); 2902 if (ret < 0) 2903 goto out; 2904 2905 ret = inc_block_group_ro(cache, 0); 2906 if (ret == -ETXTBSY) 2907 goto unlock_out; 2908 out: 2909 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 2910 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 2911 mutex_lock(&fs_info->chunk_mutex); 2912 check_system_chunk(trans, alloc_flags); 2913 mutex_unlock(&fs_info->chunk_mutex); 2914 } 2915 unlock_out: 2916 mutex_unlock(&fs_info->ro_block_group_mutex); 2917 2918 btrfs_end_transaction(trans); 2919 return ret; 2920 } 2921 2922 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 2923 { 2924 struct btrfs_space_info *sinfo = cache->space_info; 2925 u64 num_bytes; 2926 2927 BUG_ON(!cache->ro); 2928 2929 spin_lock(&sinfo->lock); 2930 spin_lock(&cache->lock); 2931 if (!--cache->ro) { 2932 if (btrfs_is_zoned(cache->fs_info)) { 2933 /* Migrate zone_unusable bytes back */ 2934 cache->zone_unusable = 2935 (cache->alloc_offset - cache->used) + 2936 (cache->length - cache->zone_capacity); 2937 sinfo->bytes_zone_unusable += cache->zone_unusable; 2938 sinfo->bytes_readonly -= cache->zone_unusable; 2939 } 2940 num_bytes = cache->length - cache->reserved - 2941 cache->pinned - cache->bytes_super - 2942 cache->zone_unusable - cache->used; 2943 sinfo->bytes_readonly -= num_bytes; 2944 list_del_init(&cache->ro_list); 2945 } 2946 spin_unlock(&cache->lock); 2947 spin_unlock(&sinfo->lock); 2948 } 2949 2950 static int update_block_group_item(struct btrfs_trans_handle *trans, 2951 struct btrfs_path *path, 2952 struct btrfs_block_group *cache) 2953 { 2954 struct btrfs_fs_info *fs_info = trans->fs_info; 2955 int ret; 2956 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2957 unsigned long bi; 2958 struct extent_buffer *leaf; 2959 struct btrfs_block_group_item bgi; 2960 struct btrfs_key key; 2961 u64 old_commit_used; 2962 u64 used; 2963 2964 /* 2965 * Block group items update can be triggered out of commit transaction 2966 * critical section, thus we need a consistent view of used bytes. 2967 * We cannot use cache->used directly outside of the spin lock, as it 2968 * may be changed. 2969 */ 2970 spin_lock(&cache->lock); 2971 old_commit_used = cache->commit_used; 2972 used = cache->used; 2973 /* No change in used bytes, can safely skip it. */ 2974 if (cache->commit_used == used) { 2975 spin_unlock(&cache->lock); 2976 return 0; 2977 } 2978 cache->commit_used = used; 2979 spin_unlock(&cache->lock); 2980 2981 key.objectid = cache->start; 2982 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2983 key.offset = cache->length; 2984 2985 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2986 if (ret) { 2987 if (ret > 0) 2988 ret = -ENOENT; 2989 goto fail; 2990 } 2991 2992 leaf = path->nodes[0]; 2993 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2994 btrfs_set_stack_block_group_used(&bgi, used); 2995 btrfs_set_stack_block_group_chunk_objectid(&bgi, 2996 cache->global_root_id); 2997 btrfs_set_stack_block_group_flags(&bgi, cache->flags); 2998 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 2999 btrfs_mark_buffer_dirty(trans, leaf); 3000 fail: 3001 btrfs_release_path(path); 3002 /* 3003 * We didn't update the block group item, need to revert commit_used 3004 * unless the block group item didn't exist yet - this is to prevent a 3005 * race with a concurrent insertion of the block group item, with 3006 * insert_block_group_item(), that happened just after we attempted to 3007 * update. In that case we would reset commit_used to 0 just after the 3008 * insertion set it to a value greater than 0 - if the block group later 3009 * becomes with 0 used bytes, we would incorrectly skip its update. 3010 */ 3011 if (ret < 0 && ret != -ENOENT) { 3012 spin_lock(&cache->lock); 3013 cache->commit_used = old_commit_used; 3014 spin_unlock(&cache->lock); 3015 } 3016 return ret; 3017 3018 } 3019 3020 static int cache_save_setup(struct btrfs_block_group *block_group, 3021 struct btrfs_trans_handle *trans, 3022 struct btrfs_path *path) 3023 { 3024 struct btrfs_fs_info *fs_info = block_group->fs_info; 3025 struct inode *inode = NULL; 3026 struct extent_changeset *data_reserved = NULL; 3027 u64 alloc_hint = 0; 3028 int dcs = BTRFS_DC_ERROR; 3029 u64 cache_size = 0; 3030 int retries = 0; 3031 int ret = 0; 3032 3033 if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 3034 return 0; 3035 3036 /* 3037 * If this block group is smaller than 100 megs don't bother caching the 3038 * block group. 3039 */ 3040 if (block_group->length < (100 * SZ_1M)) { 3041 spin_lock(&block_group->lock); 3042 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3043 spin_unlock(&block_group->lock); 3044 return 0; 3045 } 3046 3047 if (TRANS_ABORTED(trans)) 3048 return 0; 3049 again: 3050 inode = lookup_free_space_inode(block_group, path); 3051 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3052 ret = PTR_ERR(inode); 3053 btrfs_release_path(path); 3054 goto out; 3055 } 3056 3057 if (IS_ERR(inode)) { 3058 BUG_ON(retries); 3059 retries++; 3060 3061 if (block_group->ro) 3062 goto out_free; 3063 3064 ret = create_free_space_inode(trans, block_group, path); 3065 if (ret) 3066 goto out_free; 3067 goto again; 3068 } 3069 3070 /* 3071 * We want to set the generation to 0, that way if anything goes wrong 3072 * from here on out we know not to trust this cache when we load up next 3073 * time. 3074 */ 3075 BTRFS_I(inode)->generation = 0; 3076 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 3077 if (ret) { 3078 /* 3079 * So theoretically we could recover from this, simply set the 3080 * super cache generation to 0 so we know to invalidate the 3081 * cache, but then we'd have to keep track of the block groups 3082 * that fail this way so we know we _have_ to reset this cache 3083 * before the next commit or risk reading stale cache. So to 3084 * limit our exposure to horrible edge cases lets just abort the 3085 * transaction, this only happens in really bad situations 3086 * anyway. 3087 */ 3088 btrfs_abort_transaction(trans, ret); 3089 goto out_put; 3090 } 3091 WARN_ON(ret); 3092 3093 /* We've already setup this transaction, go ahead and exit */ 3094 if (block_group->cache_generation == trans->transid && 3095 i_size_read(inode)) { 3096 dcs = BTRFS_DC_SETUP; 3097 goto out_put; 3098 } 3099 3100 if (i_size_read(inode) > 0) { 3101 ret = btrfs_check_trunc_cache_free_space(fs_info, 3102 &fs_info->global_block_rsv); 3103 if (ret) 3104 goto out_put; 3105 3106 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 3107 if (ret) 3108 goto out_put; 3109 } 3110 3111 spin_lock(&block_group->lock); 3112 if (block_group->cached != BTRFS_CACHE_FINISHED || 3113 !btrfs_test_opt(fs_info, SPACE_CACHE)) { 3114 /* 3115 * don't bother trying to write stuff out _if_ 3116 * a) we're not cached, 3117 * b) we're with nospace_cache mount option, 3118 * c) we're with v2 space_cache (FREE_SPACE_TREE). 3119 */ 3120 dcs = BTRFS_DC_WRITTEN; 3121 spin_unlock(&block_group->lock); 3122 goto out_put; 3123 } 3124 spin_unlock(&block_group->lock); 3125 3126 /* 3127 * We hit an ENOSPC when setting up the cache in this transaction, just 3128 * skip doing the setup, we've already cleared the cache so we're safe. 3129 */ 3130 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 3131 ret = -ENOSPC; 3132 goto out_put; 3133 } 3134 3135 /* 3136 * Try to preallocate enough space based on how big the block group is. 3137 * Keep in mind this has to include any pinned space which could end up 3138 * taking up quite a bit since it's not folded into the other space 3139 * cache. 3140 */ 3141 cache_size = div_u64(block_group->length, SZ_256M); 3142 if (!cache_size) 3143 cache_size = 1; 3144 3145 cache_size *= 16; 3146 cache_size *= fs_info->sectorsize; 3147 3148 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 3149 cache_size, false); 3150 if (ret) 3151 goto out_put; 3152 3153 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 3154 cache_size, cache_size, 3155 &alloc_hint); 3156 /* 3157 * Our cache requires contiguous chunks so that we don't modify a bunch 3158 * of metadata or split extents when writing the cache out, which means 3159 * we can enospc if we are heavily fragmented in addition to just normal 3160 * out of space conditions. So if we hit this just skip setting up any 3161 * other block groups for this transaction, maybe we'll unpin enough 3162 * space the next time around. 3163 */ 3164 if (!ret) 3165 dcs = BTRFS_DC_SETUP; 3166 else if (ret == -ENOSPC) 3167 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 3168 3169 out_put: 3170 iput(inode); 3171 out_free: 3172 btrfs_release_path(path); 3173 out: 3174 spin_lock(&block_group->lock); 3175 if (!ret && dcs == BTRFS_DC_SETUP) 3176 block_group->cache_generation = trans->transid; 3177 block_group->disk_cache_state = dcs; 3178 spin_unlock(&block_group->lock); 3179 3180 extent_changeset_free(data_reserved); 3181 return ret; 3182 } 3183 3184 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 3185 { 3186 struct btrfs_fs_info *fs_info = trans->fs_info; 3187 struct btrfs_block_group *cache, *tmp; 3188 struct btrfs_transaction *cur_trans = trans->transaction; 3189 struct btrfs_path *path; 3190 3191 if (list_empty(&cur_trans->dirty_bgs) || 3192 !btrfs_test_opt(fs_info, SPACE_CACHE)) 3193 return 0; 3194 3195 path = btrfs_alloc_path(); 3196 if (!path) 3197 return -ENOMEM; 3198 3199 /* Could add new block groups, use _safe just in case */ 3200 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 3201 dirty_list) { 3202 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3203 cache_save_setup(cache, trans, path); 3204 } 3205 3206 btrfs_free_path(path); 3207 return 0; 3208 } 3209 3210 /* 3211 * Transaction commit does final block group cache writeback during a critical 3212 * section where nothing is allowed to change the FS. This is required in 3213 * order for the cache to actually match the block group, but can introduce a 3214 * lot of latency into the commit. 3215 * 3216 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 3217 * There's a chance we'll have to redo some of it if the block group changes 3218 * again during the commit, but it greatly reduces the commit latency by 3219 * getting rid of the easy block groups while we're still allowing others to 3220 * join the commit. 3221 */ 3222 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 3223 { 3224 struct btrfs_fs_info *fs_info = trans->fs_info; 3225 struct btrfs_block_group *cache; 3226 struct btrfs_transaction *cur_trans = trans->transaction; 3227 int ret = 0; 3228 int should_put; 3229 struct btrfs_path *path = NULL; 3230 LIST_HEAD(dirty); 3231 struct list_head *io = &cur_trans->io_bgs; 3232 int loops = 0; 3233 3234 spin_lock(&cur_trans->dirty_bgs_lock); 3235 if (list_empty(&cur_trans->dirty_bgs)) { 3236 spin_unlock(&cur_trans->dirty_bgs_lock); 3237 return 0; 3238 } 3239 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3240 spin_unlock(&cur_trans->dirty_bgs_lock); 3241 3242 again: 3243 /* Make sure all the block groups on our dirty list actually exist */ 3244 btrfs_create_pending_block_groups(trans); 3245 3246 if (!path) { 3247 path = btrfs_alloc_path(); 3248 if (!path) { 3249 ret = -ENOMEM; 3250 goto out; 3251 } 3252 } 3253 3254 /* 3255 * cache_write_mutex is here only to save us from balance or automatic 3256 * removal of empty block groups deleting this block group while we are 3257 * writing out the cache 3258 */ 3259 mutex_lock(&trans->transaction->cache_write_mutex); 3260 while (!list_empty(&dirty)) { 3261 bool drop_reserve = true; 3262 3263 cache = list_first_entry(&dirty, struct btrfs_block_group, 3264 dirty_list); 3265 /* 3266 * This can happen if something re-dirties a block group that 3267 * is already under IO. Just wait for it to finish and then do 3268 * it all again 3269 */ 3270 if (!list_empty(&cache->io_list)) { 3271 list_del_init(&cache->io_list); 3272 btrfs_wait_cache_io(trans, cache, path); 3273 btrfs_put_block_group(cache); 3274 } 3275 3276 3277 /* 3278 * btrfs_wait_cache_io uses the cache->dirty_list to decide if 3279 * it should update the cache_state. Don't delete until after 3280 * we wait. 3281 * 3282 * Since we're not running in the commit critical section 3283 * we need the dirty_bgs_lock to protect from update_block_group 3284 */ 3285 spin_lock(&cur_trans->dirty_bgs_lock); 3286 list_del_init(&cache->dirty_list); 3287 spin_unlock(&cur_trans->dirty_bgs_lock); 3288 3289 should_put = 1; 3290 3291 cache_save_setup(cache, trans, path); 3292 3293 if (cache->disk_cache_state == BTRFS_DC_SETUP) { 3294 cache->io_ctl.inode = NULL; 3295 ret = btrfs_write_out_cache(trans, cache, path); 3296 if (ret == 0 && cache->io_ctl.inode) { 3297 should_put = 0; 3298 3299 /* 3300 * The cache_write_mutex is protecting the 3301 * io_list, also refer to the definition of 3302 * btrfs_transaction::io_bgs for more details 3303 */ 3304 list_add_tail(&cache->io_list, io); 3305 } else { 3306 /* 3307 * If we failed to write the cache, the 3308 * generation will be bad and life goes on 3309 */ 3310 ret = 0; 3311 } 3312 } 3313 if (!ret) { 3314 ret = update_block_group_item(trans, path, cache); 3315 /* 3316 * Our block group might still be attached to the list 3317 * of new block groups in the transaction handle of some 3318 * other task (struct btrfs_trans_handle->new_bgs). This 3319 * means its block group item isn't yet in the extent 3320 * tree. If this happens ignore the error, as we will 3321 * try again later in the critical section of the 3322 * transaction commit. 3323 */ 3324 if (ret == -ENOENT) { 3325 ret = 0; 3326 spin_lock(&cur_trans->dirty_bgs_lock); 3327 if (list_empty(&cache->dirty_list)) { 3328 list_add_tail(&cache->dirty_list, 3329 &cur_trans->dirty_bgs); 3330 btrfs_get_block_group(cache); 3331 drop_reserve = false; 3332 } 3333 spin_unlock(&cur_trans->dirty_bgs_lock); 3334 } else if (ret) { 3335 btrfs_abort_transaction(trans, ret); 3336 } 3337 } 3338 3339 /* If it's not on the io list, we need to put the block group */ 3340 if (should_put) 3341 btrfs_put_block_group(cache); 3342 if (drop_reserve) 3343 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3344 /* 3345 * Avoid blocking other tasks for too long. It might even save 3346 * us from writing caches for block groups that are going to be 3347 * removed. 3348 */ 3349 mutex_unlock(&trans->transaction->cache_write_mutex); 3350 if (ret) 3351 goto out; 3352 mutex_lock(&trans->transaction->cache_write_mutex); 3353 } 3354 mutex_unlock(&trans->transaction->cache_write_mutex); 3355 3356 /* 3357 * Go through delayed refs for all the stuff we've just kicked off 3358 * and then loop back (just once) 3359 */ 3360 if (!ret) 3361 ret = btrfs_run_delayed_refs(trans, 0); 3362 if (!ret && loops == 0) { 3363 loops++; 3364 spin_lock(&cur_trans->dirty_bgs_lock); 3365 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3366 /* 3367 * dirty_bgs_lock protects us from concurrent block group 3368 * deletes too (not just cache_write_mutex). 3369 */ 3370 if (!list_empty(&dirty)) { 3371 spin_unlock(&cur_trans->dirty_bgs_lock); 3372 goto again; 3373 } 3374 spin_unlock(&cur_trans->dirty_bgs_lock); 3375 } 3376 out: 3377 if (ret < 0) { 3378 spin_lock(&cur_trans->dirty_bgs_lock); 3379 list_splice_init(&dirty, &cur_trans->dirty_bgs); 3380 spin_unlock(&cur_trans->dirty_bgs_lock); 3381 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 3382 } 3383 3384 btrfs_free_path(path); 3385 return ret; 3386 } 3387 3388 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 3389 { 3390 struct btrfs_fs_info *fs_info = trans->fs_info; 3391 struct btrfs_block_group *cache; 3392 struct btrfs_transaction *cur_trans = trans->transaction; 3393 int ret = 0; 3394 int should_put; 3395 struct btrfs_path *path; 3396 struct list_head *io = &cur_trans->io_bgs; 3397 3398 path = btrfs_alloc_path(); 3399 if (!path) 3400 return -ENOMEM; 3401 3402 /* 3403 * Even though we are in the critical section of the transaction commit, 3404 * we can still have concurrent tasks adding elements to this 3405 * transaction's list of dirty block groups. These tasks correspond to 3406 * endio free space workers started when writeback finishes for a 3407 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 3408 * allocate new block groups as a result of COWing nodes of the root 3409 * tree when updating the free space inode. The writeback for the space 3410 * caches is triggered by an earlier call to 3411 * btrfs_start_dirty_block_groups() and iterations of the following 3412 * loop. 3413 * Also we want to do the cache_save_setup first and then run the 3414 * delayed refs to make sure we have the best chance at doing this all 3415 * in one shot. 3416 */ 3417 spin_lock(&cur_trans->dirty_bgs_lock); 3418 while (!list_empty(&cur_trans->dirty_bgs)) { 3419 cache = list_first_entry(&cur_trans->dirty_bgs, 3420 struct btrfs_block_group, 3421 dirty_list); 3422 3423 /* 3424 * This can happen if cache_save_setup re-dirties a block group 3425 * that is already under IO. Just wait for it to finish and 3426 * then do it all again 3427 */ 3428 if (!list_empty(&cache->io_list)) { 3429 spin_unlock(&cur_trans->dirty_bgs_lock); 3430 list_del_init(&cache->io_list); 3431 btrfs_wait_cache_io(trans, cache, path); 3432 btrfs_put_block_group(cache); 3433 spin_lock(&cur_trans->dirty_bgs_lock); 3434 } 3435 3436 /* 3437 * Don't remove from the dirty list until after we've waited on 3438 * any pending IO 3439 */ 3440 list_del_init(&cache->dirty_list); 3441 spin_unlock(&cur_trans->dirty_bgs_lock); 3442 should_put = 1; 3443 3444 cache_save_setup(cache, trans, path); 3445 3446 if (!ret) 3447 ret = btrfs_run_delayed_refs(trans, U64_MAX); 3448 3449 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 3450 cache->io_ctl.inode = NULL; 3451 ret = btrfs_write_out_cache(trans, cache, path); 3452 if (ret == 0 && cache->io_ctl.inode) { 3453 should_put = 0; 3454 list_add_tail(&cache->io_list, io); 3455 } else { 3456 /* 3457 * If we failed to write the cache, the 3458 * generation will be bad and life goes on 3459 */ 3460 ret = 0; 3461 } 3462 } 3463 if (!ret) { 3464 ret = update_block_group_item(trans, path, cache); 3465 /* 3466 * One of the free space endio workers might have 3467 * created a new block group while updating a free space 3468 * cache's inode (at inode.c:btrfs_finish_ordered_io()) 3469 * and hasn't released its transaction handle yet, in 3470 * which case the new block group is still attached to 3471 * its transaction handle and its creation has not 3472 * finished yet (no block group item in the extent tree 3473 * yet, etc). If this is the case, wait for all free 3474 * space endio workers to finish and retry. This is a 3475 * very rare case so no need for a more efficient and 3476 * complex approach. 3477 */ 3478 if (ret == -ENOENT) { 3479 wait_event(cur_trans->writer_wait, 3480 atomic_read(&cur_trans->num_writers) == 1); 3481 ret = update_block_group_item(trans, path, cache); 3482 } 3483 if (ret) 3484 btrfs_abort_transaction(trans, ret); 3485 } 3486 3487 /* If its not on the io list, we need to put the block group */ 3488 if (should_put) 3489 btrfs_put_block_group(cache); 3490 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3491 spin_lock(&cur_trans->dirty_bgs_lock); 3492 } 3493 spin_unlock(&cur_trans->dirty_bgs_lock); 3494 3495 /* 3496 * Refer to the definition of io_bgs member for details why it's safe 3497 * to use it without any locking 3498 */ 3499 while (!list_empty(io)) { 3500 cache = list_first_entry(io, struct btrfs_block_group, 3501 io_list); 3502 list_del_init(&cache->io_list); 3503 btrfs_wait_cache_io(trans, cache, path); 3504 btrfs_put_block_group(cache); 3505 } 3506 3507 btrfs_free_path(path); 3508 return ret; 3509 } 3510 3511 int btrfs_update_block_group(struct btrfs_trans_handle *trans, 3512 u64 bytenr, u64 num_bytes, bool alloc) 3513 { 3514 struct btrfs_fs_info *info = trans->fs_info; 3515 struct btrfs_space_info *space_info; 3516 struct btrfs_block_group *cache; 3517 u64 old_val; 3518 bool reclaim = false; 3519 bool bg_already_dirty = true; 3520 int factor; 3521 3522 /* Block accounting for super block */ 3523 spin_lock(&info->delalloc_root_lock); 3524 old_val = btrfs_super_bytes_used(info->super_copy); 3525 if (alloc) 3526 old_val += num_bytes; 3527 else 3528 old_val -= num_bytes; 3529 btrfs_set_super_bytes_used(info->super_copy, old_val); 3530 spin_unlock(&info->delalloc_root_lock); 3531 3532 cache = btrfs_lookup_block_group(info, bytenr); 3533 if (!cache) 3534 return -ENOENT; 3535 3536 /* An extent can not span multiple block groups. */ 3537 ASSERT(bytenr + num_bytes <= cache->start + cache->length); 3538 3539 space_info = cache->space_info; 3540 factor = btrfs_bg_type_to_factor(cache->flags); 3541 3542 /* 3543 * If this block group has free space cache written out, we need to make 3544 * sure to load it if we are removing space. This is because we need 3545 * the unpinning stage to actually add the space back to the block group, 3546 * otherwise we will leak space. 3547 */ 3548 if (!alloc && !btrfs_block_group_done(cache)) 3549 btrfs_cache_block_group(cache, true); 3550 3551 spin_lock(&space_info->lock); 3552 spin_lock(&cache->lock); 3553 3554 if (btrfs_test_opt(info, SPACE_CACHE) && 3555 cache->disk_cache_state < BTRFS_DC_CLEAR) 3556 cache->disk_cache_state = BTRFS_DC_CLEAR; 3557 3558 old_val = cache->used; 3559 if (alloc) { 3560 old_val += num_bytes; 3561 cache->used = old_val; 3562 cache->reserved -= num_bytes; 3563 space_info->bytes_reserved -= num_bytes; 3564 space_info->bytes_used += num_bytes; 3565 space_info->disk_used += num_bytes * factor; 3566 spin_unlock(&cache->lock); 3567 spin_unlock(&space_info->lock); 3568 } else { 3569 old_val -= num_bytes; 3570 cache->used = old_val; 3571 cache->pinned += num_bytes; 3572 btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes); 3573 space_info->bytes_used -= num_bytes; 3574 space_info->disk_used -= num_bytes * factor; 3575 3576 reclaim = should_reclaim_block_group(cache, num_bytes); 3577 3578 spin_unlock(&cache->lock); 3579 spin_unlock(&space_info->lock); 3580 3581 set_extent_bit(&trans->transaction->pinned_extents, bytenr, 3582 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); 3583 } 3584 3585 spin_lock(&trans->transaction->dirty_bgs_lock); 3586 if (list_empty(&cache->dirty_list)) { 3587 list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs); 3588 bg_already_dirty = false; 3589 btrfs_get_block_group(cache); 3590 } 3591 spin_unlock(&trans->transaction->dirty_bgs_lock); 3592 3593 /* 3594 * No longer have used bytes in this block group, queue it for deletion. 3595 * We do this after adding the block group to the dirty list to avoid 3596 * races between cleaner kthread and space cache writeout. 3597 */ 3598 if (!alloc && old_val == 0) { 3599 if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3600 btrfs_mark_bg_unused(cache); 3601 } else if (!alloc && reclaim) { 3602 btrfs_mark_bg_to_reclaim(cache); 3603 } 3604 3605 btrfs_put_block_group(cache); 3606 3607 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3608 if (!bg_already_dirty) 3609 btrfs_inc_delayed_refs_rsv_bg_updates(info); 3610 3611 return 0; 3612 } 3613 3614 /* 3615 * Update the block_group and space info counters. 3616 * 3617 * @cache: The cache we are manipulating 3618 * @ram_bytes: The number of bytes of file content, and will be same to 3619 * @num_bytes except for the compress path. 3620 * @num_bytes: The number of bytes in question 3621 * @delalloc: The blocks are allocated for the delalloc write 3622 * 3623 * This is called by the allocator when it reserves space. If this is a 3624 * reservation and the block group has become read only we cannot make the 3625 * reservation and return -EAGAIN, otherwise this function always succeeds. 3626 */ 3627 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3628 u64 ram_bytes, u64 num_bytes, int delalloc, 3629 bool force_wrong_size_class) 3630 { 3631 struct btrfs_space_info *space_info = cache->space_info; 3632 enum btrfs_block_group_size_class size_class; 3633 int ret = 0; 3634 3635 spin_lock(&space_info->lock); 3636 spin_lock(&cache->lock); 3637 if (cache->ro) { 3638 ret = -EAGAIN; 3639 goto out; 3640 } 3641 3642 if (btrfs_block_group_should_use_size_class(cache)) { 3643 size_class = btrfs_calc_block_group_size_class(num_bytes); 3644 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); 3645 if (ret) 3646 goto out; 3647 } 3648 cache->reserved += num_bytes; 3649 space_info->bytes_reserved += num_bytes; 3650 trace_btrfs_space_reservation(cache->fs_info, "space_info", 3651 space_info->flags, num_bytes, 1); 3652 btrfs_space_info_update_bytes_may_use(cache->fs_info, 3653 space_info, -ram_bytes); 3654 if (delalloc) 3655 cache->delalloc_bytes += num_bytes; 3656 3657 /* 3658 * Compression can use less space than we reserved, so wake tickets if 3659 * that happens. 3660 */ 3661 if (num_bytes < ram_bytes) 3662 btrfs_try_granting_tickets(cache->fs_info, space_info); 3663 out: 3664 spin_unlock(&cache->lock); 3665 spin_unlock(&space_info->lock); 3666 return ret; 3667 } 3668 3669 /* 3670 * Update the block_group and space info counters. 3671 * 3672 * @cache: The cache we are manipulating 3673 * @num_bytes: The number of bytes in question 3674 * @delalloc: The blocks are allocated for the delalloc write 3675 * 3676 * This is called by somebody who is freeing space that was never actually used 3677 * on disk. For example if you reserve some space for a new leaf in transaction 3678 * A and before transaction A commits you free that leaf, you call this with 3679 * reserve set to 0 in order to clear the reservation. 3680 */ 3681 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3682 u64 num_bytes, int delalloc) 3683 { 3684 struct btrfs_space_info *space_info = cache->space_info; 3685 3686 spin_lock(&space_info->lock); 3687 spin_lock(&cache->lock); 3688 if (cache->ro) 3689 space_info->bytes_readonly += num_bytes; 3690 cache->reserved -= num_bytes; 3691 space_info->bytes_reserved -= num_bytes; 3692 space_info->max_extent_size = 0; 3693 3694 if (delalloc) 3695 cache->delalloc_bytes -= num_bytes; 3696 spin_unlock(&cache->lock); 3697 3698 btrfs_try_granting_tickets(cache->fs_info, space_info); 3699 spin_unlock(&space_info->lock); 3700 } 3701 3702 static void force_metadata_allocation(struct btrfs_fs_info *info) 3703 { 3704 struct list_head *head = &info->space_info; 3705 struct btrfs_space_info *found; 3706 3707 list_for_each_entry(found, head, list) { 3708 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3709 found->force_alloc = CHUNK_ALLOC_FORCE; 3710 } 3711 } 3712 3713 static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 3714 struct btrfs_space_info *sinfo, int force) 3715 { 3716 u64 bytes_used = btrfs_space_info_used(sinfo, false); 3717 u64 thresh; 3718 3719 if (force == CHUNK_ALLOC_FORCE) 3720 return 1; 3721 3722 /* 3723 * in limited mode, we want to have some free space up to 3724 * about 1% of the FS size. 3725 */ 3726 if (force == CHUNK_ALLOC_LIMITED) { 3727 thresh = btrfs_super_total_bytes(fs_info->super_copy); 3728 thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); 3729 3730 if (sinfo->total_bytes - bytes_used < thresh) 3731 return 1; 3732 } 3733 3734 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) 3735 return 0; 3736 return 1; 3737 } 3738 3739 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 3740 { 3741 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 3742 3743 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 3744 } 3745 3746 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) 3747 { 3748 struct btrfs_block_group *bg; 3749 int ret; 3750 3751 /* 3752 * Check if we have enough space in the system space info because we 3753 * will need to update device items in the chunk btree and insert a new 3754 * chunk item in the chunk btree as well. This will allocate a new 3755 * system block group if needed. 3756 */ 3757 check_system_chunk(trans, flags); 3758 3759 bg = btrfs_create_chunk(trans, flags); 3760 if (IS_ERR(bg)) { 3761 ret = PTR_ERR(bg); 3762 goto out; 3763 } 3764 3765 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3766 /* 3767 * Normally we are not expected to fail with -ENOSPC here, since we have 3768 * previously reserved space in the system space_info and allocated one 3769 * new system chunk if necessary. However there are three exceptions: 3770 * 3771 * 1) We may have enough free space in the system space_info but all the 3772 * existing system block groups have a profile which can not be used 3773 * for extent allocation. 3774 * 3775 * This happens when mounting in degraded mode. For example we have a 3776 * RAID1 filesystem with 2 devices, lose one device and mount the fs 3777 * using the other device in degraded mode. If we then allocate a chunk, 3778 * we may have enough free space in the existing system space_info, but 3779 * none of the block groups can be used for extent allocation since they 3780 * have a RAID1 profile, and because we are in degraded mode with a 3781 * single device, we are forced to allocate a new system chunk with a 3782 * SINGLE profile. Making check_system_chunk() iterate over all system 3783 * block groups and check if they have a usable profile and enough space 3784 * can be slow on very large filesystems, so we tolerate the -ENOSPC and 3785 * try again after forcing allocation of a new system chunk. Like this 3786 * we avoid paying the cost of that search in normal circumstances, when 3787 * we were not mounted in degraded mode; 3788 * 3789 * 2) We had enough free space info the system space_info, and one suitable 3790 * block group to allocate from when we called check_system_chunk() 3791 * above. However right after we called it, the only system block group 3792 * with enough free space got turned into RO mode by a running scrub, 3793 * and in this case we have to allocate a new one and retry. We only 3794 * need do this allocate and retry once, since we have a transaction 3795 * handle and scrub uses the commit root to search for block groups; 3796 * 3797 * 3) We had one system block group with enough free space when we called 3798 * check_system_chunk(), but after that, right before we tried to 3799 * allocate the last extent buffer we needed, a discard operation came 3800 * in and it temporarily removed the last free space entry from the 3801 * block group (discard removes a free space entry, discards it, and 3802 * then adds back the entry to the block group cache). 3803 */ 3804 if (ret == -ENOSPC) { 3805 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 3806 struct btrfs_block_group *sys_bg; 3807 3808 sys_bg = btrfs_create_chunk(trans, sys_flags); 3809 if (IS_ERR(sys_bg)) { 3810 ret = PTR_ERR(sys_bg); 3811 btrfs_abort_transaction(trans, ret); 3812 goto out; 3813 } 3814 3815 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3816 if (ret) { 3817 btrfs_abort_transaction(trans, ret); 3818 goto out; 3819 } 3820 3821 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3822 if (ret) { 3823 btrfs_abort_transaction(trans, ret); 3824 goto out; 3825 } 3826 } else if (ret) { 3827 btrfs_abort_transaction(trans, ret); 3828 goto out; 3829 } 3830 out: 3831 btrfs_trans_release_chunk_metadata(trans); 3832 3833 if (ret) 3834 return ERR_PTR(ret); 3835 3836 btrfs_get_block_group(bg); 3837 return bg; 3838 } 3839 3840 /* 3841 * Chunk allocation is done in 2 phases: 3842 * 3843 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 3844 * the chunk, the chunk mapping, create its block group and add the items 3845 * that belong in the chunk btree to it - more specifically, we need to 3846 * update device items in the chunk btree and add a new chunk item to it. 3847 * 3848 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 3849 * group item to the extent btree and the device extent items to the devices 3850 * btree. 3851 * 3852 * This is done to prevent deadlocks. For example when COWing a node from the 3853 * extent btree we are holding a write lock on the node's parent and if we 3854 * trigger chunk allocation and attempted to insert the new block group item 3855 * in the extent btree right way, we could deadlock because the path for the 3856 * insertion can include that parent node. At first glance it seems impossible 3857 * to trigger chunk allocation after starting a transaction since tasks should 3858 * reserve enough transaction units (metadata space), however while that is true 3859 * most of the time, chunk allocation may still be triggered for several reasons: 3860 * 3861 * 1) When reserving metadata, we check if there is enough free space in the 3862 * metadata space_info and therefore don't trigger allocation of a new chunk. 3863 * However later when the task actually tries to COW an extent buffer from 3864 * the extent btree or from the device btree for example, it is forced to 3865 * allocate a new block group (chunk) because the only one that had enough 3866 * free space was just turned to RO mode by a running scrub for example (or 3867 * device replace, block group reclaim thread, etc), so we can not use it 3868 * for allocating an extent and end up being forced to allocate a new one; 3869 * 3870 * 2) Because we only check that the metadata space_info has enough free bytes, 3871 * we end up not allocating a new metadata chunk in that case. However if 3872 * the filesystem was mounted in degraded mode, none of the existing block 3873 * groups might be suitable for extent allocation due to their incompatible 3874 * profile (for e.g. mounting a 2 devices filesystem, where all block groups 3875 * use a RAID1 profile, in degraded mode using a single device). In this case 3876 * when the task attempts to COW some extent buffer of the extent btree for 3877 * example, it will trigger allocation of a new metadata block group with a 3878 * suitable profile (SINGLE profile in the example of the degraded mount of 3879 * the RAID1 filesystem); 3880 * 3881 * 3) The task has reserved enough transaction units / metadata space, but when 3882 * it attempts to COW an extent buffer from the extent or device btree for 3883 * example, it does not find any free extent in any metadata block group, 3884 * therefore forced to try to allocate a new metadata block group. 3885 * This is because some other task allocated all available extents in the 3886 * meanwhile - this typically happens with tasks that don't reserve space 3887 * properly, either intentionally or as a bug. One example where this is 3888 * done intentionally is fsync, as it does not reserve any transaction units 3889 * and ends up allocating a variable number of metadata extents for log 3890 * tree extent buffers; 3891 * 3892 * 4) The task has reserved enough transaction units / metadata space, but right 3893 * before it tries to allocate the last extent buffer it needs, a discard 3894 * operation comes in and, temporarily, removes the last free space entry from 3895 * the only metadata block group that had free space (discard starts by 3896 * removing a free space entry from a block group, then does the discard 3897 * operation and, once it's done, it adds back the free space entry to the 3898 * block group). 3899 * 3900 * We also need this 2 phases setup when adding a device to a filesystem with 3901 * a seed device - we must create new metadata and system chunks without adding 3902 * any of the block group items to the chunk, extent and device btrees. If we 3903 * did not do it this way, we would get ENOSPC when attempting to update those 3904 * btrees, since all the chunks from the seed device are read-only. 3905 * 3906 * Phase 1 does the updates and insertions to the chunk btree because if we had 3907 * it done in phase 2 and have a thundering herd of tasks allocating chunks in 3908 * parallel, we risk having too many system chunks allocated by many tasks if 3909 * many tasks reach phase 1 without the previous ones completing phase 2. In the 3910 * extreme case this leads to exhaustion of the system chunk array in the 3911 * superblock. This is easier to trigger if using a btree node/leaf size of 64K 3912 * and with RAID filesystems (so we have more device items in the chunk btree). 3913 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 3914 * the system chunk array due to concurrent allocations") provides more details. 3915 * 3916 * Allocation of system chunks does not happen through this function. A task that 3917 * needs to update the chunk btree (the only btree that uses system chunks), must 3918 * preallocate chunk space by calling either check_system_chunk() or 3919 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 3920 * metadata chunk or when removing a chunk, while the later is used before doing 3921 * a modification to the chunk btree - use cases for the later are adding, 3922 * removing and resizing a device as well as relocation of a system chunk. 3923 * See the comment below for more details. 3924 * 3925 * The reservation of system space, done through check_system_chunk(), as well 3926 * as all the updates and insertions into the chunk btree must be done while 3927 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 3928 * an extent buffer from the chunks btree we never trigger allocation of a new 3929 * system chunk, which would result in a deadlock (trying to lock twice an 3930 * extent buffer of the chunk btree, first time before triggering the chunk 3931 * allocation and the second time during chunk allocation while attempting to 3932 * update the chunks btree). The system chunk array is also updated while holding 3933 * that mutex. The same logic applies to removing chunks - we must reserve system 3934 * space, update the chunk btree and the system chunk array in the superblock 3935 * while holding fs_info->chunk_mutex. 3936 * 3937 * This function, btrfs_chunk_alloc(), belongs to phase 1. 3938 * 3939 * If @force is CHUNK_ALLOC_FORCE: 3940 * - return 1 if it successfully allocates a chunk, 3941 * - return errors including -ENOSPC otherwise. 3942 * If @force is NOT CHUNK_ALLOC_FORCE: 3943 * - return 0 if it doesn't need to allocate a new chunk, 3944 * - return 1 if it successfully allocates a chunk, 3945 * - return errors including -ENOSPC otherwise. 3946 */ 3947 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 3948 enum btrfs_chunk_alloc_enum force) 3949 { 3950 struct btrfs_fs_info *fs_info = trans->fs_info; 3951 struct btrfs_space_info *space_info; 3952 struct btrfs_block_group *ret_bg; 3953 bool wait_for_alloc = false; 3954 bool should_alloc = false; 3955 bool from_extent_allocation = false; 3956 int ret = 0; 3957 3958 if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 3959 from_extent_allocation = true; 3960 force = CHUNK_ALLOC_FORCE; 3961 } 3962 3963 /* Don't re-enter if we're already allocating a chunk */ 3964 if (trans->allocating_chunk) 3965 return -ENOSPC; 3966 /* 3967 * Allocation of system chunks can not happen through this path, as we 3968 * could end up in a deadlock if we are allocating a data or metadata 3969 * chunk and there is another task modifying the chunk btree. 3970 * 3971 * This is because while we are holding the chunk mutex, we will attempt 3972 * to add the new chunk item to the chunk btree or update an existing 3973 * device item in the chunk btree, while the other task that is modifying 3974 * the chunk btree is attempting to COW an extent buffer while holding a 3975 * lock on it and on its parent - if the COW operation triggers a system 3976 * chunk allocation, then we can deadlock because we are holding the 3977 * chunk mutex and we may need to access that extent buffer or its parent 3978 * in order to add the chunk item or update a device item. 3979 * 3980 * Tasks that want to modify the chunk tree should reserve system space 3981 * before updating the chunk btree, by calling either 3982 * btrfs_reserve_chunk_metadata() or check_system_chunk(). 3983 * It's possible that after a task reserves the space, it still ends up 3984 * here - this happens in the cases described above at do_chunk_alloc(). 3985 * The task will have to either retry or fail. 3986 */ 3987 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3988 return -ENOSPC; 3989 3990 space_info = btrfs_find_space_info(fs_info, flags); 3991 ASSERT(space_info); 3992 3993 do { 3994 spin_lock(&space_info->lock); 3995 if (force < space_info->force_alloc) 3996 force = space_info->force_alloc; 3997 should_alloc = should_alloc_chunk(fs_info, space_info, force); 3998 if (space_info->full) { 3999 /* No more free physical space */ 4000 if (should_alloc) 4001 ret = -ENOSPC; 4002 else 4003 ret = 0; 4004 spin_unlock(&space_info->lock); 4005 return ret; 4006 } else if (!should_alloc) { 4007 spin_unlock(&space_info->lock); 4008 return 0; 4009 } else if (space_info->chunk_alloc) { 4010 /* 4011 * Someone is already allocating, so we need to block 4012 * until this someone is finished and then loop to 4013 * recheck if we should continue with our allocation 4014 * attempt. 4015 */ 4016 wait_for_alloc = true; 4017 force = CHUNK_ALLOC_NO_FORCE; 4018 spin_unlock(&space_info->lock); 4019 mutex_lock(&fs_info->chunk_mutex); 4020 mutex_unlock(&fs_info->chunk_mutex); 4021 } else { 4022 /* Proceed with allocation */ 4023 space_info->chunk_alloc = 1; 4024 wait_for_alloc = false; 4025 spin_unlock(&space_info->lock); 4026 } 4027 4028 cond_resched(); 4029 } while (wait_for_alloc); 4030 4031 mutex_lock(&fs_info->chunk_mutex); 4032 trans->allocating_chunk = true; 4033 4034 /* 4035 * If we have mixed data/metadata chunks we want to make sure we keep 4036 * allocating mixed chunks instead of individual chunks. 4037 */ 4038 if (btrfs_mixed_space_info(space_info)) 4039 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 4040 4041 /* 4042 * if we're doing a data chunk, go ahead and make sure that 4043 * we keep a reasonable number of metadata chunks allocated in the 4044 * FS as well. 4045 */ 4046 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 4047 fs_info->data_chunk_allocations++; 4048 if (!(fs_info->data_chunk_allocations % 4049 fs_info->metadata_ratio)) 4050 force_metadata_allocation(fs_info); 4051 } 4052 4053 ret_bg = do_chunk_alloc(trans, flags); 4054 trans->allocating_chunk = false; 4055 4056 if (IS_ERR(ret_bg)) { 4057 ret = PTR_ERR(ret_bg); 4058 } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) { 4059 /* 4060 * New block group is likely to be used soon. Try to activate 4061 * it now. Failure is OK for now. 4062 */ 4063 btrfs_zone_activate(ret_bg); 4064 } 4065 4066 if (!ret) 4067 btrfs_put_block_group(ret_bg); 4068 4069 spin_lock(&space_info->lock); 4070 if (ret < 0) { 4071 if (ret == -ENOSPC) 4072 space_info->full = 1; 4073 else 4074 goto out; 4075 } else { 4076 ret = 1; 4077 space_info->max_extent_size = 0; 4078 } 4079 4080 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 4081 out: 4082 space_info->chunk_alloc = 0; 4083 spin_unlock(&space_info->lock); 4084 mutex_unlock(&fs_info->chunk_mutex); 4085 4086 return ret; 4087 } 4088 4089 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 4090 { 4091 u64 num_dev; 4092 4093 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 4094 if (!num_dev) 4095 num_dev = fs_info->fs_devices->rw_devices; 4096 4097 return num_dev; 4098 } 4099 4100 static void reserve_chunk_space(struct btrfs_trans_handle *trans, 4101 u64 bytes, 4102 u64 type) 4103 { 4104 struct btrfs_fs_info *fs_info = trans->fs_info; 4105 struct btrfs_space_info *info; 4106 u64 left; 4107 int ret = 0; 4108 4109 /* 4110 * Needed because we can end up allocating a system chunk and for an 4111 * atomic and race free space reservation in the chunk block reserve. 4112 */ 4113 lockdep_assert_held(&fs_info->chunk_mutex); 4114 4115 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4116 spin_lock(&info->lock); 4117 left = info->total_bytes - btrfs_space_info_used(info, true); 4118 spin_unlock(&info->lock); 4119 4120 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4121 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 4122 left, bytes, type); 4123 btrfs_dump_space_info(fs_info, info, 0, 0); 4124 } 4125 4126 if (left < bytes) { 4127 u64 flags = btrfs_system_alloc_profile(fs_info); 4128 struct btrfs_block_group *bg; 4129 4130 /* 4131 * Ignore failure to create system chunk. We might end up not 4132 * needing it, as we might not need to COW all nodes/leafs from 4133 * the paths we visit in the chunk tree (they were already COWed 4134 * or created in the current transaction for example). 4135 */ 4136 bg = btrfs_create_chunk(trans, flags); 4137 if (IS_ERR(bg)) { 4138 ret = PTR_ERR(bg); 4139 } else { 4140 /* 4141 * We have a new chunk. We also need to activate it for 4142 * zoned filesystem. 4143 */ 4144 ret = btrfs_zoned_activate_one_bg(fs_info, info, true); 4145 if (ret < 0) 4146 return; 4147 4148 /* 4149 * If we fail to add the chunk item here, we end up 4150 * trying again at phase 2 of chunk allocation, at 4151 * btrfs_create_pending_block_groups(). So ignore 4152 * any error here. An ENOSPC here could happen, due to 4153 * the cases described at do_chunk_alloc() - the system 4154 * block group we just created was just turned into RO 4155 * mode by a scrub for example, or a running discard 4156 * temporarily removed its free space entries, etc. 4157 */ 4158 btrfs_chunk_alloc_add_chunk_item(trans, bg); 4159 } 4160 } 4161 4162 if (!ret) { 4163 ret = btrfs_block_rsv_add(fs_info, 4164 &fs_info->chunk_block_rsv, 4165 bytes, BTRFS_RESERVE_NO_FLUSH); 4166 if (!ret) 4167 trans->chunk_bytes_reserved += bytes; 4168 } 4169 } 4170 4171 /* 4172 * Reserve space in the system space for allocating or removing a chunk. 4173 * The caller must be holding fs_info->chunk_mutex. 4174 */ 4175 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 4176 { 4177 struct btrfs_fs_info *fs_info = trans->fs_info; 4178 const u64 num_devs = get_profile_num_devs(fs_info, type); 4179 u64 bytes; 4180 4181 /* num_devs device items to update and 1 chunk item to add or remove. */ 4182 bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 4183 btrfs_calc_insert_metadata_size(fs_info, 1); 4184 4185 reserve_chunk_space(trans, bytes, type); 4186 } 4187 4188 /* 4189 * Reserve space in the system space, if needed, for doing a modification to the 4190 * chunk btree. 4191 * 4192 * @trans: A transaction handle. 4193 * @is_item_insertion: Indicate if the modification is for inserting a new item 4194 * in the chunk btree or if it's for the deletion or update 4195 * of an existing item. 4196 * 4197 * This is used in a context where we need to update the chunk btree outside 4198 * block group allocation and removal, to avoid a deadlock with a concurrent 4199 * task that is allocating a metadata or data block group and therefore needs to 4200 * update the chunk btree while holding the chunk mutex. After the update to the 4201 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 4202 * 4203 */ 4204 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 4205 bool is_item_insertion) 4206 { 4207 struct btrfs_fs_info *fs_info = trans->fs_info; 4208 u64 bytes; 4209 4210 if (is_item_insertion) 4211 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 4212 else 4213 bytes = btrfs_calc_metadata_size(fs_info, 1); 4214 4215 mutex_lock(&fs_info->chunk_mutex); 4216 reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 4217 mutex_unlock(&fs_info->chunk_mutex); 4218 } 4219 4220 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 4221 { 4222 struct btrfs_block_group *block_group; 4223 4224 block_group = btrfs_lookup_first_block_group(info, 0); 4225 while (block_group) { 4226 btrfs_wait_block_group_cache_done(block_group); 4227 spin_lock(&block_group->lock); 4228 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, 4229 &block_group->runtime_flags)) { 4230 struct inode *inode = block_group->inode; 4231 4232 block_group->inode = NULL; 4233 spin_unlock(&block_group->lock); 4234 4235 ASSERT(block_group->io_ctl.inode == NULL); 4236 iput(inode); 4237 } else { 4238 spin_unlock(&block_group->lock); 4239 } 4240 block_group = btrfs_next_block_group(block_group); 4241 } 4242 } 4243 4244 /* 4245 * Must be called only after stopping all workers, since we could have block 4246 * group caching kthreads running, and therefore they could race with us if we 4247 * freed the block groups before stopping them. 4248 */ 4249 int btrfs_free_block_groups(struct btrfs_fs_info *info) 4250 { 4251 struct btrfs_block_group *block_group; 4252 struct btrfs_space_info *space_info; 4253 struct btrfs_caching_control *caching_ctl; 4254 struct rb_node *n; 4255 4256 if (btrfs_is_zoned(info)) { 4257 if (info->active_meta_bg) { 4258 btrfs_put_block_group(info->active_meta_bg); 4259 info->active_meta_bg = NULL; 4260 } 4261 if (info->active_system_bg) { 4262 btrfs_put_block_group(info->active_system_bg); 4263 info->active_system_bg = NULL; 4264 } 4265 } 4266 4267 write_lock(&info->block_group_cache_lock); 4268 while (!list_empty(&info->caching_block_groups)) { 4269 caching_ctl = list_entry(info->caching_block_groups.next, 4270 struct btrfs_caching_control, list); 4271 list_del(&caching_ctl->list); 4272 btrfs_put_caching_control(caching_ctl); 4273 } 4274 write_unlock(&info->block_group_cache_lock); 4275 4276 spin_lock(&info->unused_bgs_lock); 4277 while (!list_empty(&info->unused_bgs)) { 4278 block_group = list_first_entry(&info->unused_bgs, 4279 struct btrfs_block_group, 4280 bg_list); 4281 list_del_init(&block_group->bg_list); 4282 btrfs_put_block_group(block_group); 4283 } 4284 4285 while (!list_empty(&info->reclaim_bgs)) { 4286 block_group = list_first_entry(&info->reclaim_bgs, 4287 struct btrfs_block_group, 4288 bg_list); 4289 list_del_init(&block_group->bg_list); 4290 btrfs_put_block_group(block_group); 4291 } 4292 spin_unlock(&info->unused_bgs_lock); 4293 4294 spin_lock(&info->zone_active_bgs_lock); 4295 while (!list_empty(&info->zone_active_bgs)) { 4296 block_group = list_first_entry(&info->zone_active_bgs, 4297 struct btrfs_block_group, 4298 active_bg_list); 4299 list_del_init(&block_group->active_bg_list); 4300 btrfs_put_block_group(block_group); 4301 } 4302 spin_unlock(&info->zone_active_bgs_lock); 4303 4304 write_lock(&info->block_group_cache_lock); 4305 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 4306 block_group = rb_entry(n, struct btrfs_block_group, 4307 cache_node); 4308 rb_erase_cached(&block_group->cache_node, 4309 &info->block_group_cache_tree); 4310 RB_CLEAR_NODE(&block_group->cache_node); 4311 write_unlock(&info->block_group_cache_lock); 4312 4313 down_write(&block_group->space_info->groups_sem); 4314 list_del(&block_group->list); 4315 up_write(&block_group->space_info->groups_sem); 4316 4317 /* 4318 * We haven't cached this block group, which means we could 4319 * possibly have excluded extents on this block group. 4320 */ 4321 if (block_group->cached == BTRFS_CACHE_NO || 4322 block_group->cached == BTRFS_CACHE_ERROR) 4323 btrfs_free_excluded_extents(block_group); 4324 4325 btrfs_remove_free_space_cache(block_group); 4326 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 4327 ASSERT(list_empty(&block_group->dirty_list)); 4328 ASSERT(list_empty(&block_group->io_list)); 4329 ASSERT(list_empty(&block_group->bg_list)); 4330 ASSERT(refcount_read(&block_group->refs) == 1); 4331 ASSERT(block_group->swap_extents == 0); 4332 btrfs_put_block_group(block_group); 4333 4334 write_lock(&info->block_group_cache_lock); 4335 } 4336 write_unlock(&info->block_group_cache_lock); 4337 4338 btrfs_release_global_block_rsv(info); 4339 4340 while (!list_empty(&info->space_info)) { 4341 space_info = list_entry(info->space_info.next, 4342 struct btrfs_space_info, 4343 list); 4344 4345 /* 4346 * Do not hide this behind enospc_debug, this is actually 4347 * important and indicates a real bug if this happens. 4348 */ 4349 if (WARN_ON(space_info->bytes_pinned > 0 || 4350 space_info->bytes_may_use > 0)) 4351 btrfs_dump_space_info(info, space_info, 0, 0); 4352 4353 /* 4354 * If there was a failure to cleanup a log tree, very likely due 4355 * to an IO failure on a writeback attempt of one or more of its 4356 * extent buffers, we could not do proper (and cheap) unaccounting 4357 * of their reserved space, so don't warn on bytes_reserved > 0 in 4358 * that case. 4359 */ 4360 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 4361 !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 4362 if (WARN_ON(space_info->bytes_reserved > 0)) 4363 btrfs_dump_space_info(info, space_info, 0, 0); 4364 } 4365 4366 WARN_ON(space_info->reclaim_size > 0); 4367 list_del(&space_info->list); 4368 btrfs_sysfs_remove_space_info(space_info); 4369 } 4370 return 0; 4371 } 4372 4373 void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4374 { 4375 atomic_inc(&cache->frozen); 4376 } 4377 4378 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4379 { 4380 struct btrfs_fs_info *fs_info = block_group->fs_info; 4381 bool cleanup; 4382 4383 spin_lock(&block_group->lock); 4384 cleanup = (atomic_dec_and_test(&block_group->frozen) && 4385 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); 4386 spin_unlock(&block_group->lock); 4387 4388 if (cleanup) { 4389 struct btrfs_chunk_map *map; 4390 4391 map = btrfs_find_chunk_map(fs_info, block_group->start, 1); 4392 /* Logic error, can't happen. */ 4393 ASSERT(map); 4394 4395 btrfs_remove_chunk_map(fs_info, map); 4396 4397 /* Once for our lookup reference. */ 4398 btrfs_free_chunk_map(map); 4399 4400 /* 4401 * We may have left one free space entry and other possible 4402 * tasks trimming this block group have left 1 entry each one. 4403 * Free them if any. 4404 */ 4405 btrfs_remove_free_space_cache(block_group); 4406 } 4407 } 4408 4409 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4410 { 4411 bool ret = true; 4412 4413 spin_lock(&bg->lock); 4414 if (bg->ro) 4415 ret = false; 4416 else 4417 bg->swap_extents++; 4418 spin_unlock(&bg->lock); 4419 4420 return ret; 4421 } 4422 4423 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4424 { 4425 spin_lock(&bg->lock); 4426 ASSERT(!bg->ro); 4427 ASSERT(bg->swap_extents >= amount); 4428 bg->swap_extents -= amount; 4429 spin_unlock(&bg->lock); 4430 } 4431 4432 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) 4433 { 4434 if (size <= SZ_128K) 4435 return BTRFS_BG_SZ_SMALL; 4436 if (size <= SZ_8M) 4437 return BTRFS_BG_SZ_MEDIUM; 4438 return BTRFS_BG_SZ_LARGE; 4439 } 4440 4441 /* 4442 * Handle a block group allocating an extent in a size class 4443 * 4444 * @bg: The block group we allocated in. 4445 * @size_class: The size class of the allocation. 4446 * @force_wrong_size_class: Whether we are desperate enough to allow 4447 * mismatched size classes. 4448 * 4449 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the 4450 * case of a race that leads to the wrong size class without 4451 * force_wrong_size_class set. 4452 * 4453 * find_free_extent will skip block groups with a mismatched size class until 4454 * it really needs to avoid ENOSPC. In that case it will set 4455 * force_wrong_size_class. However, if a block group is newly allocated and 4456 * doesn't yet have a size class, then it is possible for two allocations of 4457 * different sizes to race and both try to use it. The loser is caught here and 4458 * has to retry. 4459 */ 4460 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 4461 enum btrfs_block_group_size_class size_class, 4462 bool force_wrong_size_class) 4463 { 4464 ASSERT(size_class != BTRFS_BG_SZ_NONE); 4465 4466 /* The new allocation is in the right size class, do nothing */ 4467 if (bg->size_class == size_class) 4468 return 0; 4469 /* 4470 * The new allocation is in a mismatched size class. 4471 * This means one of two things: 4472 * 4473 * 1. Two tasks in find_free_extent for different size_classes raced 4474 * and hit the same empty block_group. Make the loser try again. 4475 * 2. A call to find_free_extent got desperate enough to set 4476 * 'force_wrong_slab'. Don't change the size_class, but allow the 4477 * allocation. 4478 */ 4479 if (bg->size_class != BTRFS_BG_SZ_NONE) { 4480 if (force_wrong_size_class) 4481 return 0; 4482 return -EAGAIN; 4483 } 4484 /* 4485 * The happy new block group case: the new allocation is the first 4486 * one in the block_group so we set size_class. 4487 */ 4488 bg->size_class = size_class; 4489 4490 return 0; 4491 } 4492 4493 bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) 4494 { 4495 if (btrfs_is_zoned(bg->fs_info)) 4496 return false; 4497 if (!btrfs_is_block_group_data_only(bg)) 4498 return false; 4499 return true; 4500 } 4501