1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/sizes.h> 4 #include <linux/list_sort.h> 5 #include "misc.h" 6 #include "ctree.h" 7 #include "block-group.h" 8 #include "space-info.h" 9 #include "disk-io.h" 10 #include "free-space-cache.h" 11 #include "free-space-tree.h" 12 #include "volumes.h" 13 #include "transaction.h" 14 #include "ref-verify.h" 15 #include "sysfs.h" 16 #include "tree-log.h" 17 #include "delalloc-space.h" 18 #include "discard.h" 19 #include "raid56.h" 20 #include "zoned.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 25 #ifdef CONFIG_BTRFS_DEBUG 26 int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) 27 { 28 struct btrfs_fs_info *fs_info = block_group->fs_info; 29 30 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && 31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 32 (btrfs_test_opt(fs_info, FRAGMENT_DATA) && 33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 34 } 35 #endif 36 37 /* 38 * Return target flags in extended format or 0 if restripe for this chunk_type 39 * is not in progress 40 * 41 * Should be called with balance_lock held 42 */ 43 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 44 { 45 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 46 u64 target = 0; 47 48 if (!bctl) 49 return 0; 50 51 if (flags & BTRFS_BLOCK_GROUP_DATA && 52 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 53 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 54 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 55 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 56 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 57 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 58 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 59 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 60 } 61 62 return target; 63 } 64 65 /* 66 * @flags: available profiles in extended format (see ctree.h) 67 * 68 * Return reduced profile in chunk format. If profile changing is in progress 69 * (either running or paused) picks the target profile (if it's already 70 * available), otherwise falls back to plain reducing. 71 */ 72 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 73 { 74 u64 num_devices = fs_info->fs_devices->rw_devices; 75 u64 target; 76 u64 raid_type; 77 u64 allowed = 0; 78 79 /* 80 * See if restripe for this chunk_type is in progress, if so try to 81 * reduce to the target profile 82 */ 83 spin_lock(&fs_info->balance_lock); 84 target = get_restripe_target(fs_info, flags); 85 if (target) { 86 spin_unlock(&fs_info->balance_lock); 87 return extended_to_chunk(target); 88 } 89 spin_unlock(&fs_info->balance_lock); 90 91 /* First, mask out the RAID levels which aren't possible */ 92 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 93 if (num_devices >= btrfs_raid_array[raid_type].devs_min) 94 allowed |= btrfs_raid_array[raid_type].bg_flag; 95 } 96 allowed &= flags; 97 98 /* Select the highest-redundancy RAID level. */ 99 if (allowed & BTRFS_BLOCK_GROUP_RAID1C4) 100 allowed = BTRFS_BLOCK_GROUP_RAID1C4; 101 else if (allowed & BTRFS_BLOCK_GROUP_RAID6) 102 allowed = BTRFS_BLOCK_GROUP_RAID6; 103 else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3) 104 allowed = BTRFS_BLOCK_GROUP_RAID1C3; 105 else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 106 allowed = BTRFS_BLOCK_GROUP_RAID5; 107 else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 108 allowed = BTRFS_BLOCK_GROUP_RAID10; 109 else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 110 allowed = BTRFS_BLOCK_GROUP_RAID1; 111 else if (allowed & BTRFS_BLOCK_GROUP_DUP) 112 allowed = BTRFS_BLOCK_GROUP_DUP; 113 else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 114 allowed = BTRFS_BLOCK_GROUP_RAID0; 115 116 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 117 118 return extended_to_chunk(flags | allowed); 119 } 120 121 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 122 { 123 unsigned seq; 124 u64 flags; 125 126 do { 127 flags = orig_flags; 128 seq = read_seqbegin(&fs_info->profiles_lock); 129 130 if (flags & BTRFS_BLOCK_GROUP_DATA) 131 flags |= fs_info->avail_data_alloc_bits; 132 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 133 flags |= fs_info->avail_system_alloc_bits; 134 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 135 flags |= fs_info->avail_metadata_alloc_bits; 136 } while (read_seqretry(&fs_info->profiles_lock, seq)); 137 138 return btrfs_reduce_alloc_profile(fs_info, flags); 139 } 140 141 void btrfs_get_block_group(struct btrfs_block_group *cache) 142 { 143 refcount_inc(&cache->refs); 144 } 145 146 void btrfs_put_block_group(struct btrfs_block_group *cache) 147 { 148 if (refcount_dec_and_test(&cache->refs)) { 149 WARN_ON(cache->pinned > 0); 150 /* 151 * If there was a failure to cleanup a log tree, very likely due 152 * to an IO failure on a writeback attempt of one or more of its 153 * extent buffers, we could not do proper (and cheap) unaccounting 154 * of their reserved space, so don't warn on reserved > 0 in that 155 * case. 156 */ 157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 159 WARN_ON(cache->reserved > 0); 160 161 /* 162 * A block_group shouldn't be on the discard_list anymore. 163 * Remove the block_group from the discard_list to prevent us 164 * from causing a panic due to NULL pointer dereference. 165 */ 166 if (WARN_ON(!list_empty(&cache->discard_list))) 167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 168 cache); 169 170 kfree(cache->free_space_ctl); 171 btrfs_free_chunk_map(cache->physical_map); 172 kfree(cache); 173 } 174 } 175 176 /* 177 * This adds the block group to the fs_info rb tree for the block group cache 178 */ 179 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 180 struct btrfs_block_group *block_group) 181 { 182 struct rb_node **p; 183 struct rb_node *parent = NULL; 184 struct btrfs_block_group *cache; 185 bool leftmost = true; 186 187 ASSERT(block_group->length != 0); 188 189 write_lock(&info->block_group_cache_lock); 190 p = &info->block_group_cache_tree.rb_root.rb_node; 191 192 while (*p) { 193 parent = *p; 194 cache = rb_entry(parent, struct btrfs_block_group, cache_node); 195 if (block_group->start < cache->start) { 196 p = &(*p)->rb_left; 197 } else if (block_group->start > cache->start) { 198 p = &(*p)->rb_right; 199 leftmost = false; 200 } else { 201 write_unlock(&info->block_group_cache_lock); 202 return -EEXIST; 203 } 204 } 205 206 rb_link_node(&block_group->cache_node, parent, p); 207 rb_insert_color_cached(&block_group->cache_node, 208 &info->block_group_cache_tree, leftmost); 209 210 write_unlock(&info->block_group_cache_lock); 211 212 return 0; 213 } 214 215 /* 216 * This will return the block group at or after bytenr if contains is 0, else 217 * it will return the block group that contains the bytenr 218 */ 219 static struct btrfs_block_group *block_group_cache_tree_search( 220 struct btrfs_fs_info *info, u64 bytenr, int contains) 221 { 222 struct btrfs_block_group *cache, *ret = NULL; 223 struct rb_node *n; 224 u64 end, start; 225 226 read_lock(&info->block_group_cache_lock); 227 n = info->block_group_cache_tree.rb_root.rb_node; 228 229 while (n) { 230 cache = rb_entry(n, struct btrfs_block_group, cache_node); 231 end = cache->start + cache->length - 1; 232 start = cache->start; 233 234 if (bytenr < start) { 235 if (!contains && (!ret || start < ret->start)) 236 ret = cache; 237 n = n->rb_left; 238 } else if (bytenr > start) { 239 if (contains && bytenr <= end) { 240 ret = cache; 241 break; 242 } 243 n = n->rb_right; 244 } else { 245 ret = cache; 246 break; 247 } 248 } 249 if (ret) 250 btrfs_get_block_group(ret); 251 read_unlock(&info->block_group_cache_lock); 252 253 return ret; 254 } 255 256 /* 257 * Return the block group that starts at or after bytenr 258 */ 259 struct btrfs_block_group *btrfs_lookup_first_block_group( 260 struct btrfs_fs_info *info, u64 bytenr) 261 { 262 return block_group_cache_tree_search(info, bytenr, 0); 263 } 264 265 /* 266 * Return the block group that contains the given bytenr 267 */ 268 struct btrfs_block_group *btrfs_lookup_block_group( 269 struct btrfs_fs_info *info, u64 bytenr) 270 { 271 return block_group_cache_tree_search(info, bytenr, 1); 272 } 273 274 struct btrfs_block_group *btrfs_next_block_group( 275 struct btrfs_block_group *cache) 276 { 277 struct btrfs_fs_info *fs_info = cache->fs_info; 278 struct rb_node *node; 279 280 read_lock(&fs_info->block_group_cache_lock); 281 282 /* If our block group was removed, we need a full search. */ 283 if (RB_EMPTY_NODE(&cache->cache_node)) { 284 const u64 next_bytenr = cache->start + cache->length; 285 286 read_unlock(&fs_info->block_group_cache_lock); 287 btrfs_put_block_group(cache); 288 return btrfs_lookup_first_block_group(fs_info, next_bytenr); 289 } 290 node = rb_next(&cache->cache_node); 291 btrfs_put_block_group(cache); 292 if (node) { 293 cache = rb_entry(node, struct btrfs_block_group, cache_node); 294 btrfs_get_block_group(cache); 295 } else 296 cache = NULL; 297 read_unlock(&fs_info->block_group_cache_lock); 298 return cache; 299 } 300 301 /* 302 * Check if we can do a NOCOW write for a given extent. 303 * 304 * @fs_info: The filesystem information object. 305 * @bytenr: Logical start address of the extent. 306 * 307 * Check if we can do a NOCOW write for the given extent, and increments the 308 * number of NOCOW writers in the block group that contains the extent, as long 309 * as the block group exists and it's currently not in read-only mode. 310 * 311 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 312 * is responsible for calling btrfs_dec_nocow_writers() later. 313 * 314 * Or NULL if we can not do a NOCOW write 315 */ 316 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 317 u64 bytenr) 318 { 319 struct btrfs_block_group *bg; 320 bool can_nocow = true; 321 322 bg = btrfs_lookup_block_group(fs_info, bytenr); 323 if (!bg) 324 return NULL; 325 326 spin_lock(&bg->lock); 327 if (bg->ro) 328 can_nocow = false; 329 else 330 atomic_inc(&bg->nocow_writers); 331 spin_unlock(&bg->lock); 332 333 if (!can_nocow) { 334 btrfs_put_block_group(bg); 335 return NULL; 336 } 337 338 /* No put on block group, done by btrfs_dec_nocow_writers(). */ 339 return bg; 340 } 341 342 /* 343 * Decrement the number of NOCOW writers in a block group. 344 * 345 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 346 * and on the block group returned by that call. Typically this is called after 347 * creating an ordered extent for a NOCOW write, to prevent races with scrub and 348 * relocation. 349 * 350 * After this call, the caller should not use the block group anymore. It it wants 351 * to use it, then it should get a reference on it before calling this function. 352 */ 353 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 354 { 355 if (atomic_dec_and_test(&bg->nocow_writers)) 356 wake_up_var(&bg->nocow_writers); 357 358 /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 359 btrfs_put_block_group(bg); 360 } 361 362 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 363 { 364 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 365 } 366 367 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 368 const u64 start) 369 { 370 struct btrfs_block_group *bg; 371 372 bg = btrfs_lookup_block_group(fs_info, start); 373 ASSERT(bg); 374 if (atomic_dec_and_test(&bg->reservations)) 375 wake_up_var(&bg->reservations); 376 btrfs_put_block_group(bg); 377 } 378 379 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 380 { 381 struct btrfs_space_info *space_info = bg->space_info; 382 383 ASSERT(bg->ro); 384 385 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 386 return; 387 388 /* 389 * Our block group is read only but before we set it to read only, 390 * some task might have had allocated an extent from it already, but it 391 * has not yet created a respective ordered extent (and added it to a 392 * root's list of ordered extents). 393 * Therefore wait for any task currently allocating extents, since the 394 * block group's reservations counter is incremented while a read lock 395 * on the groups' semaphore is held and decremented after releasing 396 * the read access on that semaphore and creating the ordered extent. 397 */ 398 down_write(&space_info->groups_sem); 399 up_write(&space_info->groups_sem); 400 401 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 402 } 403 404 struct btrfs_caching_control *btrfs_get_caching_control( 405 struct btrfs_block_group *cache) 406 { 407 struct btrfs_caching_control *ctl; 408 409 spin_lock(&cache->lock); 410 if (!cache->caching_ctl) { 411 spin_unlock(&cache->lock); 412 return NULL; 413 } 414 415 ctl = cache->caching_ctl; 416 refcount_inc(&ctl->count); 417 spin_unlock(&cache->lock); 418 return ctl; 419 } 420 421 static void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 422 { 423 if (refcount_dec_and_test(&ctl->count)) 424 kfree(ctl); 425 } 426 427 /* 428 * When we wait for progress in the block group caching, its because our 429 * allocation attempt failed at least once. So, we must sleep and let some 430 * progress happen before we try again. 431 * 432 * This function will sleep at least once waiting for new free space to show 433 * up, and then it will check the block group free space numbers for our min 434 * num_bytes. Another option is to have it go ahead and look in the rbtree for 435 * a free extent of a given size, but this is a good start. 436 * 437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 438 * any of the information in this block group. 439 */ 440 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 441 u64 num_bytes) 442 { 443 struct btrfs_caching_control *caching_ctl; 444 int progress; 445 446 caching_ctl = btrfs_get_caching_control(cache); 447 if (!caching_ctl) 448 return; 449 450 /* 451 * We've already failed to allocate from this block group, so even if 452 * there's enough space in the block group it isn't contiguous enough to 453 * allow for an allocation, so wait for at least the next wakeup tick, 454 * or for the thing to be done. 455 */ 456 progress = atomic_read(&caching_ctl->progress); 457 458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 459 (progress != atomic_read(&caching_ctl->progress) && 460 (cache->free_space_ctl->free_space >= num_bytes))); 461 462 btrfs_put_caching_control(caching_ctl); 463 } 464 465 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 466 struct btrfs_caching_control *caching_ctl) 467 { 468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 470 } 471 472 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 473 { 474 struct btrfs_caching_control *caching_ctl; 475 int ret; 476 477 caching_ctl = btrfs_get_caching_control(cache); 478 if (!caching_ctl) 479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 480 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 481 btrfs_put_caching_control(caching_ctl); 482 return ret; 483 } 484 485 #ifdef CONFIG_BTRFS_DEBUG 486 static void fragment_free_space(struct btrfs_block_group *block_group) 487 { 488 struct btrfs_fs_info *fs_info = block_group->fs_info; 489 u64 start = block_group->start; 490 u64 len = block_group->length; 491 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 492 fs_info->nodesize : fs_info->sectorsize; 493 u64 step = chunk << 1; 494 495 while (len > chunk) { 496 btrfs_remove_free_space(block_group, start, chunk); 497 start += step; 498 if (len < step) 499 len = 0; 500 else 501 len -= step; 502 } 503 } 504 #endif 505 506 /* 507 * Add a free space range to the in memory free space cache of a block group. 508 * This checks if the range contains super block locations and any such 509 * locations are not added to the free space cache. 510 * 511 * @block_group: The target block group. 512 * @start: Start offset of the range. 513 * @end: End offset of the range (exclusive). 514 * @total_added_ret: Optional pointer to return the total amount of space 515 * added to the block group's free space cache. 516 * 517 * Returns 0 on success or < 0 on error. 518 */ 519 int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, 520 u64 end, u64 *total_added_ret) 521 { 522 struct btrfs_fs_info *info = block_group->fs_info; 523 u64 extent_start, extent_end, size; 524 int ret; 525 526 if (total_added_ret) 527 *total_added_ret = 0; 528 529 while (start < end) { 530 if (!find_first_extent_bit(&info->excluded_extents, start, 531 &extent_start, &extent_end, 532 EXTENT_DIRTY | EXTENT_UPTODATE, 533 NULL)) 534 break; 535 536 if (extent_start <= start) { 537 start = extent_end + 1; 538 } else if (extent_start > start && extent_start < end) { 539 size = extent_start - start; 540 ret = btrfs_add_free_space_async_trimmed(block_group, 541 start, size); 542 if (ret) 543 return ret; 544 if (total_added_ret) 545 *total_added_ret += size; 546 start = extent_end + 1; 547 } else { 548 break; 549 } 550 } 551 552 if (start < end) { 553 size = end - start; 554 ret = btrfs_add_free_space_async_trimmed(block_group, start, 555 size); 556 if (ret) 557 return ret; 558 if (total_added_ret) 559 *total_added_ret += size; 560 } 561 562 return 0; 563 } 564 565 /* 566 * Get an arbitrary extent item index / max_index through the block group 567 * 568 * @block_group the block group to sample from 569 * @index: the integral step through the block group to grab from 570 * @max_index: the granularity of the sampling 571 * @key: return value parameter for the item we find 572 * 573 * Pre-conditions on indices: 574 * 0 <= index <= max_index 575 * 0 < max_index 576 * 577 * Returns: 0 on success, 1 if the search didn't yield a useful item, negative 578 * error code on error. 579 */ 580 static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, 581 struct btrfs_block_group *block_group, 582 int index, int max_index, 583 struct btrfs_key *found_key) 584 { 585 struct btrfs_fs_info *fs_info = block_group->fs_info; 586 struct btrfs_root *extent_root; 587 u64 search_offset; 588 u64 search_end = block_group->start + block_group->length; 589 struct btrfs_path *path; 590 struct btrfs_key search_key; 591 int ret = 0; 592 593 ASSERT(index >= 0); 594 ASSERT(index <= max_index); 595 ASSERT(max_index > 0); 596 lockdep_assert_held(&caching_ctl->mutex); 597 lockdep_assert_held_read(&fs_info->commit_root_sem); 598 599 path = btrfs_alloc_path(); 600 if (!path) 601 return -ENOMEM; 602 603 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, 604 BTRFS_SUPER_INFO_OFFSET)); 605 606 path->skip_locking = 1; 607 path->search_commit_root = 1; 608 path->reada = READA_FORWARD; 609 610 search_offset = index * div_u64(block_group->length, max_index); 611 search_key.objectid = block_group->start + search_offset; 612 search_key.type = BTRFS_EXTENT_ITEM_KEY; 613 search_key.offset = 0; 614 615 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { 616 /* Success; sampled an extent item in the block group */ 617 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && 618 found_key->objectid >= block_group->start && 619 found_key->objectid + found_key->offset <= search_end) 620 break; 621 622 /* We can't possibly find a valid extent item anymore */ 623 if (found_key->objectid >= search_end) { 624 ret = 1; 625 break; 626 } 627 } 628 629 lockdep_assert_held(&caching_ctl->mutex); 630 lockdep_assert_held_read(&fs_info->commit_root_sem); 631 btrfs_free_path(path); 632 return ret; 633 } 634 635 /* 636 * Best effort attempt to compute a block group's size class while caching it. 637 * 638 * @block_group: the block group we are caching 639 * 640 * We cannot infer the size class while adding free space extents, because that 641 * logic doesn't care about contiguous file extents (it doesn't differentiate 642 * between a 100M extent and 100 contiguous 1M extents). So we need to read the 643 * file extent items. Reading all of them is quite wasteful, because usually 644 * only a handful are enough to give a good answer. Therefore, we just grab 5 of 645 * them at even steps through the block group and pick the smallest size class 646 * we see. Since size class is best effort, and not guaranteed in general, 647 * inaccuracy is acceptable. 648 * 649 * To be more explicit about why this algorithm makes sense: 650 * 651 * If we are caching in a block group from disk, then there are three major cases 652 * to consider: 653 * 1. the block group is well behaved and all extents in it are the same size 654 * class. 655 * 2. the block group is mostly one size class with rare exceptions for last 656 * ditch allocations 657 * 3. the block group was populated before size classes and can have a totally 658 * arbitrary mix of size classes. 659 * 660 * In case 1, looking at any extent in the block group will yield the correct 661 * result. For the mixed cases, taking the minimum size class seems like a good 662 * approximation, since gaps from frees will be usable to the size class. For 663 * 2., a small handful of file extents is likely to yield the right answer. For 664 * 3, we can either read every file extent, or admit that this is best effort 665 * anyway and try to stay fast. 666 * 667 * Returns: 0 on success, negative error code on error. 668 */ 669 static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl, 670 struct btrfs_block_group *block_group) 671 { 672 struct btrfs_fs_info *fs_info = block_group->fs_info; 673 struct btrfs_key key; 674 int i; 675 u64 min_size = block_group->length; 676 enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; 677 int ret; 678 679 if (!btrfs_block_group_should_use_size_class(block_group)) 680 return 0; 681 682 lockdep_assert_held(&caching_ctl->mutex); 683 lockdep_assert_held_read(&fs_info->commit_root_sem); 684 for (i = 0; i < 5; ++i) { 685 ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); 686 if (ret < 0) 687 goto out; 688 if (ret > 0) 689 continue; 690 min_size = min_t(u64, min_size, key.offset); 691 size_class = btrfs_calc_block_group_size_class(min_size); 692 } 693 if (size_class != BTRFS_BG_SZ_NONE) { 694 spin_lock(&block_group->lock); 695 block_group->size_class = size_class; 696 spin_unlock(&block_group->lock); 697 } 698 out: 699 return ret; 700 } 701 702 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 703 { 704 struct btrfs_block_group *block_group = caching_ctl->block_group; 705 struct btrfs_fs_info *fs_info = block_group->fs_info; 706 struct btrfs_root *extent_root; 707 struct btrfs_path *path; 708 struct extent_buffer *leaf; 709 struct btrfs_key key; 710 u64 total_found = 0; 711 u64 last = 0; 712 u32 nritems; 713 int ret; 714 bool wakeup = true; 715 716 path = btrfs_alloc_path(); 717 if (!path) 718 return -ENOMEM; 719 720 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 721 extent_root = btrfs_extent_root(fs_info, last); 722 723 #ifdef CONFIG_BTRFS_DEBUG 724 /* 725 * If we're fragmenting we don't want to make anybody think we can 726 * allocate from this block group until we've had a chance to fragment 727 * the free space. 728 */ 729 if (btrfs_should_fragment_free_space(block_group)) 730 wakeup = false; 731 #endif 732 /* 733 * We don't want to deadlock with somebody trying to allocate a new 734 * extent for the extent root while also trying to search the extent 735 * root to add free space. So we skip locking and search the commit 736 * root, since its read-only 737 */ 738 path->skip_locking = 1; 739 path->search_commit_root = 1; 740 path->reada = READA_FORWARD; 741 742 key.objectid = last; 743 key.offset = 0; 744 key.type = BTRFS_EXTENT_ITEM_KEY; 745 746 next: 747 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 748 if (ret < 0) 749 goto out; 750 751 leaf = path->nodes[0]; 752 nritems = btrfs_header_nritems(leaf); 753 754 while (1) { 755 if (btrfs_fs_closing(fs_info) > 1) { 756 last = (u64)-1; 757 break; 758 } 759 760 if (path->slots[0] < nritems) { 761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 762 } else { 763 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 764 if (ret) 765 break; 766 767 if (need_resched() || 768 rwsem_is_contended(&fs_info->commit_root_sem)) { 769 btrfs_release_path(path); 770 up_read(&fs_info->commit_root_sem); 771 mutex_unlock(&caching_ctl->mutex); 772 cond_resched(); 773 mutex_lock(&caching_ctl->mutex); 774 down_read(&fs_info->commit_root_sem); 775 goto next; 776 } 777 778 ret = btrfs_next_leaf(extent_root, path); 779 if (ret < 0) 780 goto out; 781 if (ret) 782 break; 783 leaf = path->nodes[0]; 784 nritems = btrfs_header_nritems(leaf); 785 continue; 786 } 787 788 if (key.objectid < last) { 789 key.objectid = last; 790 key.offset = 0; 791 key.type = BTRFS_EXTENT_ITEM_KEY; 792 btrfs_release_path(path); 793 goto next; 794 } 795 796 if (key.objectid < block_group->start) { 797 path->slots[0]++; 798 continue; 799 } 800 801 if (key.objectid >= block_group->start + block_group->length) 802 break; 803 804 if (key.type == BTRFS_EXTENT_ITEM_KEY || 805 key.type == BTRFS_METADATA_ITEM_KEY) { 806 u64 space_added; 807 808 ret = btrfs_add_new_free_space(block_group, last, 809 key.objectid, &space_added); 810 if (ret) 811 goto out; 812 total_found += space_added; 813 if (key.type == BTRFS_METADATA_ITEM_KEY) 814 last = key.objectid + 815 fs_info->nodesize; 816 else 817 last = key.objectid + key.offset; 818 819 if (total_found > CACHING_CTL_WAKE_UP) { 820 total_found = 0; 821 if (wakeup) { 822 atomic_inc(&caching_ctl->progress); 823 wake_up(&caching_ctl->wait); 824 } 825 } 826 } 827 path->slots[0]++; 828 } 829 830 ret = btrfs_add_new_free_space(block_group, last, 831 block_group->start + block_group->length, 832 NULL); 833 out: 834 btrfs_free_path(path); 835 return ret; 836 } 837 838 static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) 839 { 840 clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, 841 bg->start + bg->length - 1, EXTENT_UPTODATE); 842 } 843 844 static noinline void caching_thread(struct btrfs_work *work) 845 { 846 struct btrfs_block_group *block_group; 847 struct btrfs_fs_info *fs_info; 848 struct btrfs_caching_control *caching_ctl; 849 int ret; 850 851 caching_ctl = container_of(work, struct btrfs_caching_control, work); 852 block_group = caching_ctl->block_group; 853 fs_info = block_group->fs_info; 854 855 mutex_lock(&caching_ctl->mutex); 856 down_read(&fs_info->commit_root_sem); 857 858 load_block_group_size_class(caching_ctl, block_group); 859 if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 860 ret = load_free_space_cache(block_group); 861 if (ret == 1) { 862 ret = 0; 863 goto done; 864 } 865 866 /* 867 * We failed to load the space cache, set ourselves to 868 * CACHE_STARTED and carry on. 869 */ 870 spin_lock(&block_group->lock); 871 block_group->cached = BTRFS_CACHE_STARTED; 872 spin_unlock(&block_group->lock); 873 wake_up(&caching_ctl->wait); 874 } 875 876 /* 877 * If we are in the transaction that populated the free space tree we 878 * can't actually cache from the free space tree as our commit root and 879 * real root are the same, so we could change the contents of the blocks 880 * while caching. Instead do the slow caching in this case, and after 881 * the transaction has committed we will be safe. 882 */ 883 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 884 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 885 ret = load_free_space_tree(caching_ctl); 886 else 887 ret = load_extent_tree_free(caching_ctl); 888 done: 889 spin_lock(&block_group->lock); 890 block_group->caching_ctl = NULL; 891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 892 spin_unlock(&block_group->lock); 893 894 #ifdef CONFIG_BTRFS_DEBUG 895 if (btrfs_should_fragment_free_space(block_group)) { 896 u64 bytes_used; 897 898 spin_lock(&block_group->space_info->lock); 899 spin_lock(&block_group->lock); 900 bytes_used = block_group->length - block_group->used; 901 block_group->space_info->bytes_used += bytes_used >> 1; 902 spin_unlock(&block_group->lock); 903 spin_unlock(&block_group->space_info->lock); 904 fragment_free_space(block_group); 905 } 906 #endif 907 908 up_read(&fs_info->commit_root_sem); 909 btrfs_free_excluded_extents(block_group); 910 mutex_unlock(&caching_ctl->mutex); 911 912 wake_up(&caching_ctl->wait); 913 914 btrfs_put_caching_control(caching_ctl); 915 btrfs_put_block_group(block_group); 916 } 917 918 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 919 { 920 struct btrfs_fs_info *fs_info = cache->fs_info; 921 struct btrfs_caching_control *caching_ctl = NULL; 922 int ret = 0; 923 924 /* Allocator for zoned filesystems does not use the cache at all */ 925 if (btrfs_is_zoned(fs_info)) 926 return 0; 927 928 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 929 if (!caching_ctl) 930 return -ENOMEM; 931 932 INIT_LIST_HEAD(&caching_ctl->list); 933 mutex_init(&caching_ctl->mutex); 934 init_waitqueue_head(&caching_ctl->wait); 935 caching_ctl->block_group = cache; 936 refcount_set(&caching_ctl->count, 2); 937 atomic_set(&caching_ctl->progress, 0); 938 btrfs_init_work(&caching_ctl->work, caching_thread, NULL); 939 940 spin_lock(&cache->lock); 941 if (cache->cached != BTRFS_CACHE_NO) { 942 kfree(caching_ctl); 943 944 caching_ctl = cache->caching_ctl; 945 if (caching_ctl) 946 refcount_inc(&caching_ctl->count); 947 spin_unlock(&cache->lock); 948 goto out; 949 } 950 WARN_ON(cache->caching_ctl); 951 cache->caching_ctl = caching_ctl; 952 cache->cached = BTRFS_CACHE_STARTED; 953 spin_unlock(&cache->lock); 954 955 write_lock(&fs_info->block_group_cache_lock); 956 refcount_inc(&caching_ctl->count); 957 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 958 write_unlock(&fs_info->block_group_cache_lock); 959 960 btrfs_get_block_group(cache); 961 962 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 963 out: 964 if (wait && caching_ctl) 965 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 966 if (caching_ctl) 967 btrfs_put_caching_control(caching_ctl); 968 969 return ret; 970 } 971 972 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 973 { 974 u64 extra_flags = chunk_to_extended(flags) & 975 BTRFS_EXTENDED_PROFILE_MASK; 976 977 write_seqlock(&fs_info->profiles_lock); 978 if (flags & BTRFS_BLOCK_GROUP_DATA) 979 fs_info->avail_data_alloc_bits &= ~extra_flags; 980 if (flags & BTRFS_BLOCK_GROUP_METADATA) 981 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 982 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 983 fs_info->avail_system_alloc_bits &= ~extra_flags; 984 write_sequnlock(&fs_info->profiles_lock); 985 } 986 987 /* 988 * Clear incompat bits for the following feature(s): 989 * 990 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 991 * in the whole filesystem 992 * 993 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 994 */ 995 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 996 { 997 bool found_raid56 = false; 998 bool found_raid1c34 = false; 999 1000 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 1001 (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 1002 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 1003 struct list_head *head = &fs_info->space_info; 1004 struct btrfs_space_info *sinfo; 1005 1006 list_for_each_entry_rcu(sinfo, head, list) { 1007 down_read(&sinfo->groups_sem); 1008 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 1009 found_raid56 = true; 1010 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 1011 found_raid56 = true; 1012 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 1013 found_raid1c34 = true; 1014 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 1015 found_raid1c34 = true; 1016 up_read(&sinfo->groups_sem); 1017 } 1018 if (!found_raid56) 1019 btrfs_clear_fs_incompat(fs_info, RAID56); 1020 if (!found_raid1c34) 1021 btrfs_clear_fs_incompat(fs_info, RAID1C34); 1022 } 1023 } 1024 1025 static int remove_block_group_item(struct btrfs_trans_handle *trans, 1026 struct btrfs_path *path, 1027 struct btrfs_block_group *block_group) 1028 { 1029 struct btrfs_fs_info *fs_info = trans->fs_info; 1030 struct btrfs_root *root; 1031 struct btrfs_key key; 1032 int ret; 1033 1034 root = btrfs_block_group_root(fs_info); 1035 key.objectid = block_group->start; 1036 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1037 key.offset = block_group->length; 1038 1039 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1040 if (ret > 0) 1041 ret = -ENOENT; 1042 if (ret < 0) 1043 return ret; 1044 1045 ret = btrfs_del_item(trans, root, path); 1046 return ret; 1047 } 1048 1049 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 1050 struct btrfs_chunk_map *map) 1051 { 1052 struct btrfs_fs_info *fs_info = trans->fs_info; 1053 struct btrfs_path *path; 1054 struct btrfs_block_group *block_group; 1055 struct btrfs_free_cluster *cluster; 1056 struct inode *inode; 1057 struct kobject *kobj = NULL; 1058 int ret; 1059 int index; 1060 int factor; 1061 struct btrfs_caching_control *caching_ctl = NULL; 1062 bool remove_map; 1063 bool remove_rsv = false; 1064 1065 block_group = btrfs_lookup_block_group(fs_info, map->start); 1066 if (!block_group) 1067 return -ENOENT; 1068 1069 BUG_ON(!block_group->ro); 1070 1071 trace_btrfs_remove_block_group(block_group); 1072 /* 1073 * Free the reserved super bytes from this block group before 1074 * remove it. 1075 */ 1076 btrfs_free_excluded_extents(block_group); 1077 btrfs_free_ref_tree_range(fs_info, block_group->start, 1078 block_group->length); 1079 1080 index = btrfs_bg_flags_to_raid_index(block_group->flags); 1081 factor = btrfs_bg_type_to_factor(block_group->flags); 1082 1083 /* make sure this block group isn't part of an allocation cluster */ 1084 cluster = &fs_info->data_alloc_cluster; 1085 spin_lock(&cluster->refill_lock); 1086 btrfs_return_cluster_to_free_space(block_group, cluster); 1087 spin_unlock(&cluster->refill_lock); 1088 1089 /* 1090 * make sure this block group isn't part of a metadata 1091 * allocation cluster 1092 */ 1093 cluster = &fs_info->meta_alloc_cluster; 1094 spin_lock(&cluster->refill_lock); 1095 btrfs_return_cluster_to_free_space(block_group, cluster); 1096 spin_unlock(&cluster->refill_lock); 1097 1098 btrfs_clear_treelog_bg(block_group); 1099 btrfs_clear_data_reloc_bg(block_group); 1100 1101 path = btrfs_alloc_path(); 1102 if (!path) { 1103 ret = -ENOMEM; 1104 goto out; 1105 } 1106 1107 /* 1108 * get the inode first so any iput calls done for the io_list 1109 * aren't the final iput (no unlinks allowed now) 1110 */ 1111 inode = lookup_free_space_inode(block_group, path); 1112 1113 mutex_lock(&trans->transaction->cache_write_mutex); 1114 /* 1115 * Make sure our free space cache IO is done before removing the 1116 * free space inode 1117 */ 1118 spin_lock(&trans->transaction->dirty_bgs_lock); 1119 if (!list_empty(&block_group->io_list)) { 1120 list_del_init(&block_group->io_list); 1121 1122 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 1123 1124 spin_unlock(&trans->transaction->dirty_bgs_lock); 1125 btrfs_wait_cache_io(trans, block_group, path); 1126 btrfs_put_block_group(block_group); 1127 spin_lock(&trans->transaction->dirty_bgs_lock); 1128 } 1129 1130 if (!list_empty(&block_group->dirty_list)) { 1131 list_del_init(&block_group->dirty_list); 1132 remove_rsv = true; 1133 btrfs_put_block_group(block_group); 1134 } 1135 spin_unlock(&trans->transaction->dirty_bgs_lock); 1136 mutex_unlock(&trans->transaction->cache_write_mutex); 1137 1138 ret = btrfs_remove_free_space_inode(trans, inode, block_group); 1139 if (ret) 1140 goto out; 1141 1142 write_lock(&fs_info->block_group_cache_lock); 1143 rb_erase_cached(&block_group->cache_node, 1144 &fs_info->block_group_cache_tree); 1145 RB_CLEAR_NODE(&block_group->cache_node); 1146 1147 /* Once for the block groups rbtree */ 1148 btrfs_put_block_group(block_group); 1149 1150 write_unlock(&fs_info->block_group_cache_lock); 1151 1152 down_write(&block_group->space_info->groups_sem); 1153 /* 1154 * we must use list_del_init so people can check to see if they 1155 * are still on the list after taking the semaphore 1156 */ 1157 list_del_init(&block_group->list); 1158 if (list_empty(&block_group->space_info->block_groups[index])) { 1159 kobj = block_group->space_info->block_group_kobjs[index]; 1160 block_group->space_info->block_group_kobjs[index] = NULL; 1161 clear_avail_alloc_bits(fs_info, block_group->flags); 1162 } 1163 up_write(&block_group->space_info->groups_sem); 1164 clear_incompat_bg_bits(fs_info, block_group->flags); 1165 if (kobj) { 1166 kobject_del(kobj); 1167 kobject_put(kobj); 1168 } 1169 1170 if (block_group->cached == BTRFS_CACHE_STARTED) 1171 btrfs_wait_block_group_cache_done(block_group); 1172 1173 write_lock(&fs_info->block_group_cache_lock); 1174 caching_ctl = btrfs_get_caching_control(block_group); 1175 if (!caching_ctl) { 1176 struct btrfs_caching_control *ctl; 1177 1178 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { 1179 if (ctl->block_group == block_group) { 1180 caching_ctl = ctl; 1181 refcount_inc(&caching_ctl->count); 1182 break; 1183 } 1184 } 1185 } 1186 if (caching_ctl) 1187 list_del_init(&caching_ctl->list); 1188 write_unlock(&fs_info->block_group_cache_lock); 1189 1190 if (caching_ctl) { 1191 /* Once for the caching bgs list and once for us. */ 1192 btrfs_put_caching_control(caching_ctl); 1193 btrfs_put_caching_control(caching_ctl); 1194 } 1195 1196 spin_lock(&trans->transaction->dirty_bgs_lock); 1197 WARN_ON(!list_empty(&block_group->dirty_list)); 1198 WARN_ON(!list_empty(&block_group->io_list)); 1199 spin_unlock(&trans->transaction->dirty_bgs_lock); 1200 1201 btrfs_remove_free_space_cache(block_group); 1202 1203 spin_lock(&block_group->space_info->lock); 1204 list_del_init(&block_group->ro_list); 1205 1206 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1207 WARN_ON(block_group->space_info->total_bytes 1208 < block_group->length); 1209 WARN_ON(block_group->space_info->bytes_readonly 1210 < block_group->length - block_group->zone_unusable); 1211 WARN_ON(block_group->space_info->bytes_zone_unusable 1212 < block_group->zone_unusable); 1213 WARN_ON(block_group->space_info->disk_total 1214 < block_group->length * factor); 1215 } 1216 block_group->space_info->total_bytes -= block_group->length; 1217 block_group->space_info->bytes_readonly -= 1218 (block_group->length - block_group->zone_unusable); 1219 block_group->space_info->bytes_zone_unusable -= 1220 block_group->zone_unusable; 1221 block_group->space_info->disk_total -= block_group->length * factor; 1222 1223 spin_unlock(&block_group->space_info->lock); 1224 1225 /* 1226 * Remove the free space for the block group from the free space tree 1227 * and the block group's item from the extent tree before marking the 1228 * block group as removed. This is to prevent races with tasks that 1229 * freeze and unfreeze a block group, this task and another task 1230 * allocating a new block group - the unfreeze task ends up removing 1231 * the block group's extent map before the task calling this function 1232 * deletes the block group item from the extent tree, allowing for 1233 * another task to attempt to create another block group with the same 1234 * item key (and failing with -EEXIST and a transaction abort). 1235 */ 1236 ret = remove_block_group_free_space(trans, block_group); 1237 if (ret) 1238 goto out; 1239 1240 ret = remove_block_group_item(trans, path, block_group); 1241 if (ret < 0) 1242 goto out; 1243 1244 spin_lock(&block_group->lock); 1245 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); 1246 1247 /* 1248 * At this point trimming or scrub can't start on this block group, 1249 * because we removed the block group from the rbtree 1250 * fs_info->block_group_cache_tree so no one can't find it anymore and 1251 * even if someone already got this block group before we removed it 1252 * from the rbtree, they have already incremented block_group->frozen - 1253 * if they didn't, for the trimming case they won't find any free space 1254 * entries because we already removed them all when we called 1255 * btrfs_remove_free_space_cache(). 1256 * 1257 * And we must not remove the chunk map from the fs_info->mapping_tree 1258 * to prevent the same logical address range and physical device space 1259 * ranges from being reused for a new block group. This is needed to 1260 * avoid races with trimming and scrub. 1261 * 1262 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1263 * completely transactionless, so while it is trimming a range the 1264 * currently running transaction might finish and a new one start, 1265 * allowing for new block groups to be created that can reuse the same 1266 * physical device locations unless we take this special care. 1267 * 1268 * There may also be an implicit trim operation if the file system 1269 * is mounted with -odiscard. The same protections must remain 1270 * in place until the extents have been discarded completely when 1271 * the transaction commit has completed. 1272 */ 1273 remove_map = (atomic_read(&block_group->frozen) == 0); 1274 spin_unlock(&block_group->lock); 1275 1276 if (remove_map) 1277 btrfs_remove_chunk_map(fs_info, map); 1278 1279 out: 1280 /* Once for the lookup reference */ 1281 btrfs_put_block_group(block_group); 1282 if (remove_rsv) 1283 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 1284 btrfs_free_path(path); 1285 return ret; 1286 } 1287 1288 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1289 struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1290 { 1291 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1292 struct btrfs_chunk_map *map; 1293 unsigned int num_items; 1294 1295 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 1296 ASSERT(map != NULL); 1297 ASSERT(map->start == chunk_offset); 1298 1299 /* 1300 * We need to reserve 3 + N units from the metadata space info in order 1301 * to remove a block group (done at btrfs_remove_chunk() and at 1302 * btrfs_remove_block_group()), which are used for: 1303 * 1304 * 1 unit for adding the free space inode's orphan (located in the tree 1305 * of tree roots). 1306 * 1 unit for deleting the block group item (located in the extent 1307 * tree). 1308 * 1 unit for deleting the free space item (located in tree of tree 1309 * roots). 1310 * N units for deleting N device extent items corresponding to each 1311 * stripe (located in the device tree). 1312 * 1313 * In order to remove a block group we also need to reserve units in the 1314 * system space info in order to update the chunk tree (update one or 1315 * more device items and remove one chunk item), but this is done at 1316 * btrfs_remove_chunk() through a call to check_system_chunk(). 1317 */ 1318 num_items = 3 + map->num_stripes; 1319 btrfs_free_chunk_map(map); 1320 1321 return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1322 } 1323 1324 /* 1325 * Mark block group @cache read-only, so later write won't happen to block 1326 * group @cache. 1327 * 1328 * If @force is not set, this function will only mark the block group readonly 1329 * if we have enough free space (1M) in other metadata/system block groups. 1330 * If @force is not set, this function will mark the block group readonly 1331 * without checking free space. 1332 * 1333 * NOTE: This function doesn't care if other block groups can contain all the 1334 * data in this block group. That check should be done by relocation routine, 1335 * not this function. 1336 */ 1337 static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 1338 { 1339 struct btrfs_space_info *sinfo = cache->space_info; 1340 u64 num_bytes; 1341 int ret = -ENOSPC; 1342 1343 spin_lock(&sinfo->lock); 1344 spin_lock(&cache->lock); 1345 1346 if (cache->swap_extents) { 1347 ret = -ETXTBSY; 1348 goto out; 1349 } 1350 1351 if (cache->ro) { 1352 cache->ro++; 1353 ret = 0; 1354 goto out; 1355 } 1356 1357 num_bytes = cache->length - cache->reserved - cache->pinned - 1358 cache->bytes_super - cache->zone_unusable - cache->used; 1359 1360 /* 1361 * Data never overcommits, even in mixed mode, so do just the straight 1362 * check of left over space in how much we have allocated. 1363 */ 1364 if (force) { 1365 ret = 0; 1366 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1367 u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1368 1369 /* 1370 * Here we make sure if we mark this bg RO, we still have enough 1371 * free space as buffer. 1372 */ 1373 if (sinfo_used + num_bytes <= sinfo->total_bytes) 1374 ret = 0; 1375 } else { 1376 /* 1377 * We overcommit metadata, so we need to do the 1378 * btrfs_can_overcommit check here, and we need to pass in 1379 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1380 * leeway to allow us to mark this block group as read only. 1381 */ 1382 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1383 BTRFS_RESERVE_NO_FLUSH)) 1384 ret = 0; 1385 } 1386 1387 if (!ret) { 1388 sinfo->bytes_readonly += num_bytes; 1389 if (btrfs_is_zoned(cache->fs_info)) { 1390 /* Migrate zone_unusable bytes to readonly */ 1391 sinfo->bytes_readonly += cache->zone_unusable; 1392 sinfo->bytes_zone_unusable -= cache->zone_unusable; 1393 cache->zone_unusable = 0; 1394 } 1395 cache->ro++; 1396 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 1397 } 1398 out: 1399 spin_unlock(&cache->lock); 1400 spin_unlock(&sinfo->lock); 1401 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 1402 btrfs_info(cache->fs_info, 1403 "unable to make block group %llu ro", cache->start); 1404 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 1405 } 1406 return ret; 1407 } 1408 1409 static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1410 struct btrfs_block_group *bg) 1411 { 1412 struct btrfs_fs_info *fs_info = bg->fs_info; 1413 struct btrfs_transaction *prev_trans = NULL; 1414 const u64 start = bg->start; 1415 const u64 end = start + bg->length - 1; 1416 int ret; 1417 1418 spin_lock(&fs_info->trans_lock); 1419 if (trans->transaction->list.prev != &fs_info->trans_list) { 1420 prev_trans = list_last_entry(&trans->transaction->list, 1421 struct btrfs_transaction, list); 1422 refcount_inc(&prev_trans->use_count); 1423 } 1424 spin_unlock(&fs_info->trans_lock); 1425 1426 /* 1427 * Hold the unused_bg_unpin_mutex lock to avoid racing with 1428 * btrfs_finish_extent_commit(). If we are at transaction N, another 1429 * task might be running finish_extent_commit() for the previous 1430 * transaction N - 1, and have seen a range belonging to the block 1431 * group in pinned_extents before we were able to clear the whole block 1432 * group range from pinned_extents. This means that task can lookup for 1433 * the block group after we unpinned it from pinned_extents and removed 1434 * it, leading to an error at unpin_extent_range(). 1435 */ 1436 mutex_lock(&fs_info->unused_bg_unpin_mutex); 1437 if (prev_trans) { 1438 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 1439 EXTENT_DIRTY); 1440 if (ret) 1441 goto out; 1442 } 1443 1444 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 1445 EXTENT_DIRTY); 1446 out: 1447 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1448 if (prev_trans) 1449 btrfs_put_transaction(prev_trans); 1450 1451 return ret == 0; 1452 } 1453 1454 /* 1455 * Process the unused_bgs list and remove any that don't have any allocated 1456 * space inside of them. 1457 */ 1458 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1459 { 1460 LIST_HEAD(retry_list); 1461 struct btrfs_block_group *block_group; 1462 struct btrfs_space_info *space_info; 1463 struct btrfs_trans_handle *trans; 1464 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1465 int ret = 0; 1466 1467 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1468 return; 1469 1470 if (btrfs_fs_closing(fs_info)) 1471 return; 1472 1473 /* 1474 * Long running balances can keep us blocked here for eternity, so 1475 * simply skip deletion if we're unable to get the mutex. 1476 */ 1477 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1478 return; 1479 1480 spin_lock(&fs_info->unused_bgs_lock); 1481 while (!list_empty(&fs_info->unused_bgs)) { 1482 u64 used; 1483 int trimming; 1484 1485 block_group = list_first_entry(&fs_info->unused_bgs, 1486 struct btrfs_block_group, 1487 bg_list); 1488 list_del_init(&block_group->bg_list); 1489 1490 space_info = block_group->space_info; 1491 1492 if (ret || btrfs_mixed_space_info(space_info)) { 1493 btrfs_put_block_group(block_group); 1494 continue; 1495 } 1496 spin_unlock(&fs_info->unused_bgs_lock); 1497 1498 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1499 1500 /* Don't want to race with allocators so take the groups_sem */ 1501 down_write(&space_info->groups_sem); 1502 1503 /* 1504 * Async discard moves the final block group discard to be prior 1505 * to the unused_bgs code path. Therefore, if it's not fully 1506 * trimmed, punt it back to the async discard lists. 1507 */ 1508 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 1509 !btrfs_is_free_space_trimmed(block_group)) { 1510 trace_btrfs_skip_unused_block_group(block_group); 1511 up_write(&space_info->groups_sem); 1512 /* Requeue if we failed because of async discard */ 1513 btrfs_discard_queue_work(&fs_info->discard_ctl, 1514 block_group); 1515 goto next; 1516 } 1517 1518 spin_lock(&space_info->lock); 1519 spin_lock(&block_group->lock); 1520 if (btrfs_is_block_group_used(block_group) || block_group->ro || 1521 list_is_singular(&block_group->list)) { 1522 /* 1523 * We want to bail if we made new allocations or have 1524 * outstanding allocations in this block group. We do 1525 * the ro check in case balance is currently acting on 1526 * this block group. 1527 * 1528 * Also bail out if this is the only block group for its 1529 * type, because otherwise we would lose profile 1530 * information from fs_info->avail_*_alloc_bits and the 1531 * next block group of this type would be created with a 1532 * "single" profile (even if we're in a raid fs) because 1533 * fs_info->avail_*_alloc_bits would be 0. 1534 */ 1535 trace_btrfs_skip_unused_block_group(block_group); 1536 spin_unlock(&block_group->lock); 1537 spin_unlock(&space_info->lock); 1538 up_write(&space_info->groups_sem); 1539 goto next; 1540 } 1541 1542 /* 1543 * The block group may be unused but there may be space reserved 1544 * accounting with the existence of that block group, that is, 1545 * space_info->bytes_may_use was incremented by a task but no 1546 * space was yet allocated from the block group by the task. 1547 * That space may or may not be allocated, as we are generally 1548 * pessimistic about space reservation for metadata as well as 1549 * for data when using compression (as we reserve space based on 1550 * the worst case, when data can't be compressed, and before 1551 * actually attempting compression, before starting writeback). 1552 * 1553 * So check if the total space of the space_info minus the size 1554 * of this block group is less than the used space of the 1555 * space_info - if that's the case, then it means we have tasks 1556 * that might be relying on the block group in order to allocate 1557 * extents, and add back the block group to the unused list when 1558 * we finish, so that we retry later in case no tasks ended up 1559 * needing to allocate extents from the block group. 1560 */ 1561 used = btrfs_space_info_used(space_info, true); 1562 if (space_info->total_bytes - block_group->length < used) { 1563 /* 1564 * Add a reference for the list, compensate for the ref 1565 * drop under the "next" label for the 1566 * fs_info->unused_bgs list. 1567 */ 1568 btrfs_get_block_group(block_group); 1569 list_add_tail(&block_group->bg_list, &retry_list); 1570 1571 trace_btrfs_skip_unused_block_group(block_group); 1572 spin_unlock(&block_group->lock); 1573 spin_unlock(&space_info->lock); 1574 up_write(&space_info->groups_sem); 1575 goto next; 1576 } 1577 1578 spin_unlock(&block_group->lock); 1579 spin_unlock(&space_info->lock); 1580 1581 /* We don't want to force the issue, only flip if it's ok. */ 1582 ret = inc_block_group_ro(block_group, 0); 1583 up_write(&space_info->groups_sem); 1584 if (ret < 0) { 1585 ret = 0; 1586 goto next; 1587 } 1588 1589 ret = btrfs_zone_finish(block_group); 1590 if (ret < 0) { 1591 btrfs_dec_block_group_ro(block_group); 1592 if (ret == -EAGAIN) 1593 ret = 0; 1594 goto next; 1595 } 1596 1597 /* 1598 * Want to do this before we do anything else so we can recover 1599 * properly if we fail to join the transaction. 1600 */ 1601 trans = btrfs_start_trans_remove_block_group(fs_info, 1602 block_group->start); 1603 if (IS_ERR(trans)) { 1604 btrfs_dec_block_group_ro(block_group); 1605 ret = PTR_ERR(trans); 1606 goto next; 1607 } 1608 1609 /* 1610 * We could have pending pinned extents for this block group, 1611 * just delete them, we don't care about them anymore. 1612 */ 1613 if (!clean_pinned_extents(trans, block_group)) { 1614 btrfs_dec_block_group_ro(block_group); 1615 goto end_trans; 1616 } 1617 1618 /* 1619 * At this point, the block_group is read only and should fail 1620 * new allocations. However, btrfs_finish_extent_commit() can 1621 * cause this block_group to be placed back on the discard 1622 * lists because now the block_group isn't fully discarded. 1623 * Bail here and try again later after discarding everything. 1624 */ 1625 spin_lock(&fs_info->discard_ctl.lock); 1626 if (!list_empty(&block_group->discard_list)) { 1627 spin_unlock(&fs_info->discard_ctl.lock); 1628 btrfs_dec_block_group_ro(block_group); 1629 btrfs_discard_queue_work(&fs_info->discard_ctl, 1630 block_group); 1631 goto end_trans; 1632 } 1633 spin_unlock(&fs_info->discard_ctl.lock); 1634 1635 /* Reset pinned so btrfs_put_block_group doesn't complain */ 1636 spin_lock(&space_info->lock); 1637 spin_lock(&block_group->lock); 1638 1639 btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1640 -block_group->pinned); 1641 space_info->bytes_readonly += block_group->pinned; 1642 block_group->pinned = 0; 1643 1644 spin_unlock(&block_group->lock); 1645 spin_unlock(&space_info->lock); 1646 1647 /* 1648 * The normal path here is an unused block group is passed here, 1649 * then trimming is handled in the transaction commit path. 1650 * Async discard interposes before this to do the trimming 1651 * before coming down the unused block group path as trimming 1652 * will no longer be done later in the transaction commit path. 1653 */ 1654 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1655 goto flip_async; 1656 1657 /* 1658 * DISCARD can flip during remount. On zoned filesystems, we 1659 * need to reset sequential-required zones. 1660 */ 1661 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1662 btrfs_is_zoned(fs_info); 1663 1664 /* Implicit trim during transaction commit. */ 1665 if (trimming) 1666 btrfs_freeze_block_group(block_group); 1667 1668 /* 1669 * Btrfs_remove_chunk will abort the transaction if things go 1670 * horribly wrong. 1671 */ 1672 ret = btrfs_remove_chunk(trans, block_group->start); 1673 1674 if (ret) { 1675 if (trimming) 1676 btrfs_unfreeze_block_group(block_group); 1677 goto end_trans; 1678 } 1679 1680 /* 1681 * If we're not mounted with -odiscard, we can just forget 1682 * about this block group. Otherwise we'll need to wait 1683 * until transaction commit to do the actual discard. 1684 */ 1685 if (trimming) { 1686 spin_lock(&fs_info->unused_bgs_lock); 1687 /* 1688 * A concurrent scrub might have added us to the list 1689 * fs_info->unused_bgs, so use a list_move operation 1690 * to add the block group to the deleted_bgs list. 1691 */ 1692 list_move(&block_group->bg_list, 1693 &trans->transaction->deleted_bgs); 1694 spin_unlock(&fs_info->unused_bgs_lock); 1695 btrfs_get_block_group(block_group); 1696 } 1697 end_trans: 1698 btrfs_end_transaction(trans); 1699 next: 1700 btrfs_put_block_group(block_group); 1701 spin_lock(&fs_info->unused_bgs_lock); 1702 } 1703 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1704 spin_unlock(&fs_info->unused_bgs_lock); 1705 mutex_unlock(&fs_info->reclaim_bgs_lock); 1706 return; 1707 1708 flip_async: 1709 btrfs_end_transaction(trans); 1710 spin_lock(&fs_info->unused_bgs_lock); 1711 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1712 spin_unlock(&fs_info->unused_bgs_lock); 1713 mutex_unlock(&fs_info->reclaim_bgs_lock); 1714 btrfs_put_block_group(block_group); 1715 btrfs_discard_punt_unused_bgs_list(fs_info); 1716 } 1717 1718 void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1719 { 1720 struct btrfs_fs_info *fs_info = bg->fs_info; 1721 1722 spin_lock(&fs_info->unused_bgs_lock); 1723 if (list_empty(&bg->bg_list)) { 1724 btrfs_get_block_group(bg); 1725 trace_btrfs_add_unused_block_group(bg); 1726 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1727 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { 1728 /* Pull out the block group from the reclaim_bgs list. */ 1729 trace_btrfs_add_unused_block_group(bg); 1730 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); 1731 } 1732 spin_unlock(&fs_info->unused_bgs_lock); 1733 } 1734 1735 /* 1736 * We want block groups with a low number of used bytes to be in the beginning 1737 * of the list, so they will get reclaimed first. 1738 */ 1739 static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 1740 const struct list_head *b) 1741 { 1742 const struct btrfs_block_group *bg1, *bg2; 1743 1744 bg1 = list_entry(a, struct btrfs_block_group, bg_list); 1745 bg2 = list_entry(b, struct btrfs_block_group, bg_list); 1746 1747 return bg1->used > bg2->used; 1748 } 1749 1750 static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) 1751 { 1752 if (btrfs_is_zoned(fs_info)) 1753 return btrfs_zoned_should_reclaim(fs_info); 1754 return true; 1755 } 1756 1757 static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) 1758 { 1759 const struct btrfs_space_info *space_info = bg->space_info; 1760 const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); 1761 const u64 new_val = bg->used; 1762 const u64 old_val = new_val + bytes_freed; 1763 u64 thresh; 1764 1765 if (reclaim_thresh == 0) 1766 return false; 1767 1768 thresh = mult_perc(bg->length, reclaim_thresh); 1769 1770 /* 1771 * If we were below the threshold before don't reclaim, we are likely a 1772 * brand new block group and we don't want to relocate new block groups. 1773 */ 1774 if (old_val < thresh) 1775 return false; 1776 if (new_val >= thresh) 1777 return false; 1778 return true; 1779 } 1780 1781 void btrfs_reclaim_bgs_work(struct work_struct *work) 1782 { 1783 struct btrfs_fs_info *fs_info = 1784 container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 1785 struct btrfs_block_group *bg; 1786 struct btrfs_space_info *space_info; 1787 1788 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1789 return; 1790 1791 if (btrfs_fs_closing(fs_info)) 1792 return; 1793 1794 if (!btrfs_should_reclaim(fs_info)) 1795 return; 1796 1797 sb_start_write(fs_info->sb); 1798 1799 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 1800 sb_end_write(fs_info->sb); 1801 return; 1802 } 1803 1804 /* 1805 * Long running balances can keep us blocked here for eternity, so 1806 * simply skip reclaim if we're unable to get the mutex. 1807 */ 1808 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 1809 btrfs_exclop_finish(fs_info); 1810 sb_end_write(fs_info->sb); 1811 return; 1812 } 1813 1814 spin_lock(&fs_info->unused_bgs_lock); 1815 /* 1816 * Sort happens under lock because we can't simply splice it and sort. 1817 * The block groups might still be in use and reachable via bg_list, 1818 * and their presence in the reclaim_bgs list must be preserved. 1819 */ 1820 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 1821 while (!list_empty(&fs_info->reclaim_bgs)) { 1822 u64 zone_unusable; 1823 int ret = 0; 1824 1825 bg = list_first_entry(&fs_info->reclaim_bgs, 1826 struct btrfs_block_group, 1827 bg_list); 1828 list_del_init(&bg->bg_list); 1829 1830 space_info = bg->space_info; 1831 spin_unlock(&fs_info->unused_bgs_lock); 1832 1833 /* Don't race with allocators so take the groups_sem */ 1834 down_write(&space_info->groups_sem); 1835 1836 spin_lock(&bg->lock); 1837 if (bg->reserved || bg->pinned || bg->ro) { 1838 /* 1839 * We want to bail if we made new allocations or have 1840 * outstanding allocations in this block group. We do 1841 * the ro check in case balance is currently acting on 1842 * this block group. 1843 */ 1844 spin_unlock(&bg->lock); 1845 up_write(&space_info->groups_sem); 1846 goto next; 1847 } 1848 if (bg->used == 0) { 1849 /* 1850 * It is possible that we trigger relocation on a block 1851 * group as its extents are deleted and it first goes 1852 * below the threshold, then shortly after goes empty. 1853 * 1854 * In this case, relocating it does delete it, but has 1855 * some overhead in relocation specific metadata, looking 1856 * for the non-existent extents and running some extra 1857 * transactions, which we can avoid by using one of the 1858 * other mechanisms for dealing with empty block groups. 1859 */ 1860 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1861 btrfs_mark_bg_unused(bg); 1862 spin_unlock(&bg->lock); 1863 up_write(&space_info->groups_sem); 1864 goto next; 1865 1866 } 1867 /* 1868 * The block group might no longer meet the reclaim condition by 1869 * the time we get around to reclaiming it, so to avoid 1870 * reclaiming overly full block_groups, skip reclaiming them. 1871 * 1872 * Since the decision making process also depends on the amount 1873 * being freed, pass in a fake giant value to skip that extra 1874 * check, which is more meaningful when adding to the list in 1875 * the first place. 1876 */ 1877 if (!should_reclaim_block_group(bg, bg->length)) { 1878 spin_unlock(&bg->lock); 1879 up_write(&space_info->groups_sem); 1880 goto next; 1881 } 1882 spin_unlock(&bg->lock); 1883 1884 /* 1885 * Get out fast, in case we're read-only or unmounting the 1886 * filesystem. It is OK to drop block groups from the list even 1887 * for the read-only case. As we did sb_start_write(), 1888 * "mount -o remount,ro" won't happen and read-only filesystem 1889 * means it is forced read-only due to a fatal error. So, it 1890 * never gets back to read-write to let us reclaim again. 1891 */ 1892 if (btrfs_need_cleaner_sleep(fs_info)) { 1893 up_write(&space_info->groups_sem); 1894 goto next; 1895 } 1896 1897 /* 1898 * Cache the zone_unusable value before turning the block group 1899 * to read only. As soon as the blog group is read only it's 1900 * zone_unusable value gets moved to the block group's read-only 1901 * bytes and isn't available for calculations anymore. 1902 */ 1903 zone_unusable = bg->zone_unusable; 1904 ret = inc_block_group_ro(bg, 0); 1905 up_write(&space_info->groups_sem); 1906 if (ret < 0) 1907 goto next; 1908 1909 btrfs_info(fs_info, 1910 "reclaiming chunk %llu with %llu%% used %llu%% unusable", 1911 bg->start, 1912 div64_u64(bg->used * 100, bg->length), 1913 div64_u64(zone_unusable * 100, bg->length)); 1914 trace_btrfs_reclaim_block_group(bg); 1915 ret = btrfs_relocate_chunk(fs_info, bg->start); 1916 if (ret) { 1917 btrfs_dec_block_group_ro(bg); 1918 btrfs_err(fs_info, "error relocating chunk %llu", 1919 bg->start); 1920 } 1921 1922 next: 1923 if (ret) 1924 btrfs_mark_bg_to_reclaim(bg); 1925 btrfs_put_block_group(bg); 1926 1927 mutex_unlock(&fs_info->reclaim_bgs_lock); 1928 /* 1929 * Reclaiming all the block groups in the list can take really 1930 * long. Prioritize cleaning up unused block groups. 1931 */ 1932 btrfs_delete_unused_bgs(fs_info); 1933 /* 1934 * If we are interrupted by a balance, we can just bail out. The 1935 * cleaner thread restart again if necessary. 1936 */ 1937 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1938 goto end; 1939 spin_lock(&fs_info->unused_bgs_lock); 1940 } 1941 spin_unlock(&fs_info->unused_bgs_lock); 1942 mutex_unlock(&fs_info->reclaim_bgs_lock); 1943 end: 1944 btrfs_exclop_finish(fs_info); 1945 sb_end_write(fs_info->sb); 1946 } 1947 1948 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 1949 { 1950 spin_lock(&fs_info->unused_bgs_lock); 1951 if (!list_empty(&fs_info->reclaim_bgs)) 1952 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 1953 spin_unlock(&fs_info->unused_bgs_lock); 1954 } 1955 1956 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 1957 { 1958 struct btrfs_fs_info *fs_info = bg->fs_info; 1959 1960 spin_lock(&fs_info->unused_bgs_lock); 1961 if (list_empty(&bg->bg_list)) { 1962 btrfs_get_block_group(bg); 1963 trace_btrfs_add_reclaim_block_group(bg); 1964 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); 1965 } 1966 spin_unlock(&fs_info->unused_bgs_lock); 1967 } 1968 1969 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1970 struct btrfs_path *path) 1971 { 1972 struct btrfs_chunk_map *map; 1973 struct btrfs_block_group_item bg; 1974 struct extent_buffer *leaf; 1975 int slot; 1976 u64 flags; 1977 int ret = 0; 1978 1979 slot = path->slots[0]; 1980 leaf = path->nodes[0]; 1981 1982 map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset); 1983 if (!map) { 1984 btrfs_err(fs_info, 1985 "logical %llu len %llu found bg but no related chunk", 1986 key->objectid, key->offset); 1987 return -ENOENT; 1988 } 1989 1990 if (map->start != key->objectid || map->chunk_len != key->offset) { 1991 btrfs_err(fs_info, 1992 "block group %llu len %llu mismatch with chunk %llu len %llu", 1993 key->objectid, key->offset, map->start, map->chunk_len); 1994 ret = -EUCLEAN; 1995 goto out_free_map; 1996 } 1997 1998 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 1999 sizeof(bg)); 2000 flags = btrfs_stack_block_group_flags(&bg) & 2001 BTRFS_BLOCK_GROUP_TYPE_MASK; 2002 2003 if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 2004 btrfs_err(fs_info, 2005 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 2006 key->objectid, key->offset, flags, 2007 (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type)); 2008 ret = -EUCLEAN; 2009 } 2010 2011 out_free_map: 2012 btrfs_free_chunk_map(map); 2013 return ret; 2014 } 2015 2016 static int find_first_block_group(struct btrfs_fs_info *fs_info, 2017 struct btrfs_path *path, 2018 struct btrfs_key *key) 2019 { 2020 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2021 int ret; 2022 struct btrfs_key found_key; 2023 2024 btrfs_for_each_slot(root, key, &found_key, path, ret) { 2025 if (found_key.objectid >= key->objectid && 2026 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 2027 return read_bg_from_eb(fs_info, &found_key, path); 2028 } 2029 } 2030 return ret; 2031 } 2032 2033 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 2034 { 2035 u64 extra_flags = chunk_to_extended(flags) & 2036 BTRFS_EXTENDED_PROFILE_MASK; 2037 2038 write_seqlock(&fs_info->profiles_lock); 2039 if (flags & BTRFS_BLOCK_GROUP_DATA) 2040 fs_info->avail_data_alloc_bits |= extra_flags; 2041 if (flags & BTRFS_BLOCK_GROUP_METADATA) 2042 fs_info->avail_metadata_alloc_bits |= extra_flags; 2043 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 2044 fs_info->avail_system_alloc_bits |= extra_flags; 2045 write_sequnlock(&fs_info->profiles_lock); 2046 } 2047 2048 /* 2049 * Map a physical disk address to a list of logical addresses. 2050 * 2051 * @fs_info: the filesystem 2052 * @chunk_start: logical address of block group 2053 * @physical: physical address to map to logical addresses 2054 * @logical: return array of logical addresses which map to @physical 2055 * @naddrs: length of @logical 2056 * @stripe_len: size of IO stripe for the given block group 2057 * 2058 * Maps a particular @physical disk address to a list of @logical addresses. 2059 * Used primarily to exclude those portions of a block group that contain super 2060 * block copies. 2061 */ 2062 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 2063 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 2064 { 2065 struct btrfs_chunk_map *map; 2066 u64 *buf; 2067 u64 bytenr; 2068 u64 data_stripe_length; 2069 u64 io_stripe_size; 2070 int i, nr = 0; 2071 int ret = 0; 2072 2073 map = btrfs_get_chunk_map(fs_info, chunk_start, 1); 2074 if (IS_ERR(map)) 2075 return -EIO; 2076 2077 data_stripe_length = map->stripe_size; 2078 io_stripe_size = BTRFS_STRIPE_LEN; 2079 chunk_start = map->start; 2080 2081 /* For RAID5/6 adjust to a full IO stripe length */ 2082 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2083 io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 2084 2085 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 2086 if (!buf) { 2087 ret = -ENOMEM; 2088 goto out; 2089 } 2090 2091 for (i = 0; i < map->num_stripes; i++) { 2092 bool already_inserted = false; 2093 u32 stripe_nr; 2094 u32 offset; 2095 int j; 2096 2097 if (!in_range(physical, map->stripes[i].physical, 2098 data_stripe_length)) 2099 continue; 2100 2101 stripe_nr = (physical - map->stripes[i].physical) >> 2102 BTRFS_STRIPE_LEN_SHIFT; 2103 offset = (physical - map->stripes[i].physical) & 2104 BTRFS_STRIPE_LEN_MASK; 2105 2106 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2107 BTRFS_BLOCK_GROUP_RAID10)) 2108 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, 2109 map->sub_stripes); 2110 /* 2111 * The remaining case would be for RAID56, multiply by 2112 * nr_data_stripes(). Alternatively, just use rmap_len below 2113 * instead of map->stripe_len 2114 */ 2115 bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 2116 2117 /* Ensure we don't add duplicate addresses */ 2118 for (j = 0; j < nr; j++) { 2119 if (buf[j] == bytenr) { 2120 already_inserted = true; 2121 break; 2122 } 2123 } 2124 2125 if (!already_inserted) 2126 buf[nr++] = bytenr; 2127 } 2128 2129 *logical = buf; 2130 *naddrs = nr; 2131 *stripe_len = io_stripe_size; 2132 out: 2133 btrfs_free_chunk_map(map); 2134 return ret; 2135 } 2136 2137 static int exclude_super_stripes(struct btrfs_block_group *cache) 2138 { 2139 struct btrfs_fs_info *fs_info = cache->fs_info; 2140 const bool zoned = btrfs_is_zoned(fs_info); 2141 u64 bytenr; 2142 u64 *logical; 2143 int stripe_len; 2144 int i, nr, ret; 2145 2146 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 2147 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 2148 cache->bytes_super += stripe_len; 2149 ret = set_extent_bit(&fs_info->excluded_extents, cache->start, 2150 cache->start + stripe_len - 1, 2151 EXTENT_UPTODATE, NULL); 2152 if (ret) 2153 return ret; 2154 } 2155 2156 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2157 bytenr = btrfs_sb_offset(i); 2158 ret = btrfs_rmap_block(fs_info, cache->start, 2159 bytenr, &logical, &nr, &stripe_len); 2160 if (ret) 2161 return ret; 2162 2163 /* Shouldn't have super stripes in sequential zones */ 2164 if (zoned && nr) { 2165 kfree(logical); 2166 btrfs_err(fs_info, 2167 "zoned: block group %llu must not contain super block", 2168 cache->start); 2169 return -EUCLEAN; 2170 } 2171 2172 while (nr--) { 2173 u64 len = min_t(u64, stripe_len, 2174 cache->start + cache->length - logical[nr]); 2175 2176 cache->bytes_super += len; 2177 ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], 2178 logical[nr] + len - 1, 2179 EXTENT_UPTODATE, NULL); 2180 if (ret) { 2181 kfree(logical); 2182 return ret; 2183 } 2184 } 2185 2186 kfree(logical); 2187 } 2188 return 0; 2189 } 2190 2191 static struct btrfs_block_group *btrfs_create_block_group_cache( 2192 struct btrfs_fs_info *fs_info, u64 start) 2193 { 2194 struct btrfs_block_group *cache; 2195 2196 cache = kzalloc(sizeof(*cache), GFP_NOFS); 2197 if (!cache) 2198 return NULL; 2199 2200 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 2201 GFP_NOFS); 2202 if (!cache->free_space_ctl) { 2203 kfree(cache); 2204 return NULL; 2205 } 2206 2207 cache->start = start; 2208 2209 cache->fs_info = fs_info; 2210 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 2211 2212 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 2213 2214 refcount_set(&cache->refs, 1); 2215 spin_lock_init(&cache->lock); 2216 init_rwsem(&cache->data_rwsem); 2217 INIT_LIST_HEAD(&cache->list); 2218 INIT_LIST_HEAD(&cache->cluster_list); 2219 INIT_LIST_HEAD(&cache->bg_list); 2220 INIT_LIST_HEAD(&cache->ro_list); 2221 INIT_LIST_HEAD(&cache->discard_list); 2222 INIT_LIST_HEAD(&cache->dirty_list); 2223 INIT_LIST_HEAD(&cache->io_list); 2224 INIT_LIST_HEAD(&cache->active_bg_list); 2225 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 2226 atomic_set(&cache->frozen, 0); 2227 mutex_init(&cache->free_space_lock); 2228 2229 return cache; 2230 } 2231 2232 /* 2233 * Iterate all chunks and verify that each of them has the corresponding block 2234 * group 2235 */ 2236 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 2237 { 2238 u64 start = 0; 2239 int ret = 0; 2240 2241 while (1) { 2242 struct btrfs_chunk_map *map; 2243 struct btrfs_block_group *bg; 2244 2245 /* 2246 * btrfs_find_chunk_map() will return the first chunk map 2247 * intersecting the range, so setting @length to 1 is enough to 2248 * get the first chunk. 2249 */ 2250 map = btrfs_find_chunk_map(fs_info, start, 1); 2251 if (!map) 2252 break; 2253 2254 bg = btrfs_lookup_block_group(fs_info, map->start); 2255 if (!bg) { 2256 btrfs_err(fs_info, 2257 "chunk start=%llu len=%llu doesn't have corresponding block group", 2258 map->start, map->chunk_len); 2259 ret = -EUCLEAN; 2260 btrfs_free_chunk_map(map); 2261 break; 2262 } 2263 if (bg->start != map->start || bg->length != map->chunk_len || 2264 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 2265 (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 2266 btrfs_err(fs_info, 2267 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 2268 map->start, map->chunk_len, 2269 map->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 2270 bg->start, bg->length, 2271 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 2272 ret = -EUCLEAN; 2273 btrfs_free_chunk_map(map); 2274 btrfs_put_block_group(bg); 2275 break; 2276 } 2277 start = map->start + map->chunk_len; 2278 btrfs_free_chunk_map(map); 2279 btrfs_put_block_group(bg); 2280 } 2281 return ret; 2282 } 2283 2284 static int read_one_block_group(struct btrfs_fs_info *info, 2285 struct btrfs_block_group_item *bgi, 2286 const struct btrfs_key *key, 2287 int need_clear) 2288 { 2289 struct btrfs_block_group *cache; 2290 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2291 int ret; 2292 2293 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2294 2295 cache = btrfs_create_block_group_cache(info, key->objectid); 2296 if (!cache) 2297 return -ENOMEM; 2298 2299 cache->length = key->offset; 2300 cache->used = btrfs_stack_block_group_used(bgi); 2301 cache->commit_used = cache->used; 2302 cache->flags = btrfs_stack_block_group_flags(bgi); 2303 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); 2304 2305 set_free_space_tree_thresholds(cache); 2306 2307 if (need_clear) { 2308 /* 2309 * When we mount with old space cache, we need to 2310 * set BTRFS_DC_CLEAR and set dirty flag. 2311 * 2312 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2313 * truncate the old free space cache inode and 2314 * setup a new one. 2315 * b) Setting 'dirty flag' makes sure that we flush 2316 * the new space cache info onto disk. 2317 */ 2318 if (btrfs_test_opt(info, SPACE_CACHE)) 2319 cache->disk_cache_state = BTRFS_DC_CLEAR; 2320 } 2321 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2322 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2323 btrfs_err(info, 2324 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2325 cache->start); 2326 ret = -EINVAL; 2327 goto error; 2328 } 2329 2330 ret = btrfs_load_block_group_zone_info(cache, false); 2331 if (ret) { 2332 btrfs_err(info, "zoned: failed to load zone info of bg %llu", 2333 cache->start); 2334 goto error; 2335 } 2336 2337 /* 2338 * We need to exclude the super stripes now so that the space info has 2339 * super bytes accounted for, otherwise we'll think we have more space 2340 * than we actually do. 2341 */ 2342 ret = exclude_super_stripes(cache); 2343 if (ret) { 2344 /* We may have excluded something, so call this just in case. */ 2345 btrfs_free_excluded_extents(cache); 2346 goto error; 2347 } 2348 2349 /* 2350 * For zoned filesystem, space after the allocation offset is the only 2351 * free space for a block group. So, we don't need any caching work. 2352 * btrfs_calc_zone_unusable() will set the amount of free space and 2353 * zone_unusable space. 2354 * 2355 * For regular filesystem, check for two cases, either we are full, and 2356 * therefore don't need to bother with the caching work since we won't 2357 * find any space, or we are empty, and we can just add all the space 2358 * in and be done with it. This saves us _a_lot_ of time, particularly 2359 * in the full case. 2360 */ 2361 if (btrfs_is_zoned(info)) { 2362 btrfs_calc_zone_unusable(cache); 2363 /* Should not have any excluded extents. Just in case, though. */ 2364 btrfs_free_excluded_extents(cache); 2365 } else if (cache->length == cache->used) { 2366 cache->cached = BTRFS_CACHE_FINISHED; 2367 btrfs_free_excluded_extents(cache); 2368 } else if (cache->used == 0) { 2369 cache->cached = BTRFS_CACHE_FINISHED; 2370 ret = btrfs_add_new_free_space(cache, cache->start, 2371 cache->start + cache->length, NULL); 2372 btrfs_free_excluded_extents(cache); 2373 if (ret) 2374 goto error; 2375 } 2376 2377 ret = btrfs_add_block_group_cache(info, cache); 2378 if (ret) { 2379 btrfs_remove_free_space_cache(cache); 2380 goto error; 2381 } 2382 trace_btrfs_add_block_group(info, cache, 0); 2383 btrfs_add_bg_to_space_info(info, cache); 2384 2385 set_avail_alloc_bits(info, cache->flags); 2386 if (btrfs_chunk_writeable(info, cache->start)) { 2387 if (cache->used == 0) { 2388 ASSERT(list_empty(&cache->bg_list)); 2389 if (btrfs_test_opt(info, DISCARD_ASYNC)) 2390 btrfs_discard_queue_work(&info->discard_ctl, cache); 2391 else 2392 btrfs_mark_bg_unused(cache); 2393 } 2394 } else { 2395 inc_block_group_ro(cache, 1); 2396 } 2397 2398 return 0; 2399 error: 2400 btrfs_put_block_group(cache); 2401 return ret; 2402 } 2403 2404 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 2405 { 2406 struct rb_node *node; 2407 int ret = 0; 2408 2409 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 2410 struct btrfs_chunk_map *map; 2411 struct btrfs_block_group *bg; 2412 2413 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 2414 bg = btrfs_create_block_group_cache(fs_info, map->start); 2415 if (!bg) { 2416 ret = -ENOMEM; 2417 break; 2418 } 2419 2420 /* Fill dummy cache as FULL */ 2421 bg->length = map->chunk_len; 2422 bg->flags = map->type; 2423 bg->cached = BTRFS_CACHE_FINISHED; 2424 bg->used = map->chunk_len; 2425 bg->flags = map->type; 2426 ret = btrfs_add_block_group_cache(fs_info, bg); 2427 /* 2428 * We may have some valid block group cache added already, in 2429 * that case we skip to the next one. 2430 */ 2431 if (ret == -EEXIST) { 2432 ret = 0; 2433 btrfs_put_block_group(bg); 2434 continue; 2435 } 2436 2437 if (ret) { 2438 btrfs_remove_free_space_cache(bg); 2439 btrfs_put_block_group(bg); 2440 break; 2441 } 2442 2443 btrfs_add_bg_to_space_info(fs_info, bg); 2444 2445 set_avail_alloc_bits(fs_info, bg->flags); 2446 } 2447 if (!ret) 2448 btrfs_init_global_block_rsv(fs_info); 2449 return ret; 2450 } 2451 2452 int btrfs_read_block_groups(struct btrfs_fs_info *info) 2453 { 2454 struct btrfs_root *root = btrfs_block_group_root(info); 2455 struct btrfs_path *path; 2456 int ret; 2457 struct btrfs_block_group *cache; 2458 struct btrfs_space_info *space_info; 2459 struct btrfs_key key; 2460 int need_clear = 0; 2461 u64 cache_gen; 2462 2463 /* 2464 * Either no extent root (with ibadroots rescue option) or we have 2465 * unsupported RO options. The fs can never be mounted read-write, so no 2466 * need to waste time searching block group items. 2467 * 2468 * This also allows new extent tree related changes to be RO compat, 2469 * no need for a full incompat flag. 2470 */ 2471 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & 2472 ~BTRFS_FEATURE_COMPAT_RO_SUPP)) 2473 return fill_dummy_bgs(info); 2474 2475 key.objectid = 0; 2476 key.offset = 0; 2477 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2478 path = btrfs_alloc_path(); 2479 if (!path) 2480 return -ENOMEM; 2481 2482 cache_gen = btrfs_super_cache_generation(info->super_copy); 2483 if (btrfs_test_opt(info, SPACE_CACHE) && 2484 btrfs_super_generation(info->super_copy) != cache_gen) 2485 need_clear = 1; 2486 if (btrfs_test_opt(info, CLEAR_CACHE)) 2487 need_clear = 1; 2488 2489 while (1) { 2490 struct btrfs_block_group_item bgi; 2491 struct extent_buffer *leaf; 2492 int slot; 2493 2494 ret = find_first_block_group(info, path, &key); 2495 if (ret > 0) 2496 break; 2497 if (ret != 0) 2498 goto error; 2499 2500 leaf = path->nodes[0]; 2501 slot = path->slots[0]; 2502 2503 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 2504 sizeof(bgi)); 2505 2506 btrfs_item_key_to_cpu(leaf, &key, slot); 2507 btrfs_release_path(path); 2508 ret = read_one_block_group(info, &bgi, &key, need_clear); 2509 if (ret < 0) 2510 goto error; 2511 key.objectid += key.offset; 2512 key.offset = 0; 2513 } 2514 btrfs_release_path(path); 2515 2516 list_for_each_entry(space_info, &info->space_info, list) { 2517 int i; 2518 2519 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2520 if (list_empty(&space_info->block_groups[i])) 2521 continue; 2522 cache = list_first_entry(&space_info->block_groups[i], 2523 struct btrfs_block_group, 2524 list); 2525 btrfs_sysfs_add_block_group_type(cache); 2526 } 2527 2528 if (!(btrfs_get_alloc_profile(info, space_info->flags) & 2529 (BTRFS_BLOCK_GROUP_RAID10 | 2530 BTRFS_BLOCK_GROUP_RAID1_MASK | 2531 BTRFS_BLOCK_GROUP_RAID56_MASK | 2532 BTRFS_BLOCK_GROUP_DUP))) 2533 continue; 2534 /* 2535 * Avoid allocating from un-mirrored block group if there are 2536 * mirrored block groups. 2537 */ 2538 list_for_each_entry(cache, 2539 &space_info->block_groups[BTRFS_RAID_RAID0], 2540 list) 2541 inc_block_group_ro(cache, 1); 2542 list_for_each_entry(cache, 2543 &space_info->block_groups[BTRFS_RAID_SINGLE], 2544 list) 2545 inc_block_group_ro(cache, 1); 2546 } 2547 2548 btrfs_init_global_block_rsv(info); 2549 ret = check_chunk_block_group_mappings(info); 2550 error: 2551 btrfs_free_path(path); 2552 /* 2553 * We've hit some error while reading the extent tree, and have 2554 * rescue=ibadroots mount option. 2555 * Try to fill the tree using dummy block groups so that the user can 2556 * continue to mount and grab their data. 2557 */ 2558 if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 2559 ret = fill_dummy_bgs(info); 2560 return ret; 2561 } 2562 2563 /* 2564 * This function, insert_block_group_item(), belongs to the phase 2 of chunk 2565 * allocation. 2566 * 2567 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2568 * phases. 2569 */ 2570 static int insert_block_group_item(struct btrfs_trans_handle *trans, 2571 struct btrfs_block_group *block_group) 2572 { 2573 struct btrfs_fs_info *fs_info = trans->fs_info; 2574 struct btrfs_block_group_item bgi; 2575 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2576 struct btrfs_key key; 2577 u64 old_commit_used; 2578 int ret; 2579 2580 spin_lock(&block_group->lock); 2581 btrfs_set_stack_block_group_used(&bgi, block_group->used); 2582 btrfs_set_stack_block_group_chunk_objectid(&bgi, 2583 block_group->global_root_id); 2584 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 2585 old_commit_used = block_group->commit_used; 2586 block_group->commit_used = block_group->used; 2587 key.objectid = block_group->start; 2588 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2589 key.offset = block_group->length; 2590 spin_unlock(&block_group->lock); 2591 2592 ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 2593 if (ret < 0) { 2594 spin_lock(&block_group->lock); 2595 block_group->commit_used = old_commit_used; 2596 spin_unlock(&block_group->lock); 2597 } 2598 2599 return ret; 2600 } 2601 2602 static int insert_dev_extent(struct btrfs_trans_handle *trans, 2603 struct btrfs_device *device, u64 chunk_offset, 2604 u64 start, u64 num_bytes) 2605 { 2606 struct btrfs_fs_info *fs_info = device->fs_info; 2607 struct btrfs_root *root = fs_info->dev_root; 2608 struct btrfs_path *path; 2609 struct btrfs_dev_extent *extent; 2610 struct extent_buffer *leaf; 2611 struct btrfs_key key; 2612 int ret; 2613 2614 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 2615 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 2616 path = btrfs_alloc_path(); 2617 if (!path) 2618 return -ENOMEM; 2619 2620 key.objectid = device->devid; 2621 key.type = BTRFS_DEV_EXTENT_KEY; 2622 key.offset = start; 2623 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 2624 if (ret) 2625 goto out; 2626 2627 leaf = path->nodes[0]; 2628 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 2629 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 2630 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 2631 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2632 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 2633 2634 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 2635 btrfs_mark_buffer_dirty(trans, leaf); 2636 out: 2637 btrfs_free_path(path); 2638 return ret; 2639 } 2640 2641 /* 2642 * This function belongs to phase 2. 2643 * 2644 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2645 * phases. 2646 */ 2647 static int insert_dev_extents(struct btrfs_trans_handle *trans, 2648 u64 chunk_offset, u64 chunk_size) 2649 { 2650 struct btrfs_fs_info *fs_info = trans->fs_info; 2651 struct btrfs_device *device; 2652 struct btrfs_chunk_map *map; 2653 u64 dev_offset; 2654 int i; 2655 int ret = 0; 2656 2657 map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 2658 if (IS_ERR(map)) 2659 return PTR_ERR(map); 2660 2661 /* 2662 * Take the device list mutex to prevent races with the final phase of 2663 * a device replace operation that replaces the device object associated 2664 * with the map's stripes, because the device object's id can change 2665 * at any time during that final phase of the device replace operation 2666 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 2667 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 2668 * resulting in persisting a device extent item with such ID. 2669 */ 2670 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2671 for (i = 0; i < map->num_stripes; i++) { 2672 device = map->stripes[i].dev; 2673 dev_offset = map->stripes[i].physical; 2674 2675 ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 2676 map->stripe_size); 2677 if (ret) 2678 break; 2679 } 2680 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2681 2682 btrfs_free_chunk_map(map); 2683 return ret; 2684 } 2685 2686 /* 2687 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 2688 * chunk allocation. 2689 * 2690 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2691 * phases. 2692 */ 2693 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 2694 { 2695 struct btrfs_fs_info *fs_info = trans->fs_info; 2696 struct btrfs_block_group *block_group; 2697 int ret = 0; 2698 2699 while (!list_empty(&trans->new_bgs)) { 2700 int index; 2701 2702 block_group = list_first_entry(&trans->new_bgs, 2703 struct btrfs_block_group, 2704 bg_list); 2705 if (ret) 2706 goto next; 2707 2708 index = btrfs_bg_flags_to_raid_index(block_group->flags); 2709 2710 ret = insert_block_group_item(trans, block_group); 2711 if (ret) 2712 btrfs_abort_transaction(trans, ret); 2713 if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, 2714 &block_group->runtime_flags)) { 2715 mutex_lock(&fs_info->chunk_mutex); 2716 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 2717 mutex_unlock(&fs_info->chunk_mutex); 2718 if (ret) 2719 btrfs_abort_transaction(trans, ret); 2720 } 2721 ret = insert_dev_extents(trans, block_group->start, 2722 block_group->length); 2723 if (ret) 2724 btrfs_abort_transaction(trans, ret); 2725 add_block_group_free_space(trans, block_group); 2726 2727 /* 2728 * If we restriped during balance, we may have added a new raid 2729 * type, so now add the sysfs entries when it is safe to do so. 2730 * We don't have to worry about locking here as it's handled in 2731 * btrfs_sysfs_add_block_group_type. 2732 */ 2733 if (block_group->space_info->block_group_kobjs[index] == NULL) 2734 btrfs_sysfs_add_block_group_type(block_group); 2735 2736 /* Already aborted the transaction if it failed. */ 2737 next: 2738 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); 2739 list_del_init(&block_group->bg_list); 2740 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); 2741 2742 /* 2743 * If the block group is still unused, add it to the list of 2744 * unused block groups. The block group may have been created in 2745 * order to satisfy a space reservation, in which case the 2746 * extent allocation only happens later. But often we don't 2747 * actually need to allocate space that we previously reserved, 2748 * so the block group may become unused for a long time. For 2749 * example for metadata we generally reserve space for a worst 2750 * possible scenario, but then don't end up allocating all that 2751 * space or none at all (due to no need to COW, extent buffers 2752 * were already COWed in the current transaction and still 2753 * unwritten, tree heights lower than the maximum possible 2754 * height, etc). For data we generally reserve the axact amount 2755 * of space we are going to allocate later, the exception is 2756 * when using compression, as we must reserve space based on the 2757 * uncompressed data size, because the compression is only done 2758 * when writeback triggered and we don't know how much space we 2759 * are actually going to need, so we reserve the uncompressed 2760 * size because the data may be uncompressible in the worst case. 2761 */ 2762 if (ret == 0) { 2763 bool used; 2764 2765 spin_lock(&block_group->lock); 2766 used = btrfs_is_block_group_used(block_group); 2767 spin_unlock(&block_group->lock); 2768 2769 if (!used) 2770 btrfs_mark_bg_unused(block_group); 2771 } 2772 } 2773 btrfs_trans_release_chunk_metadata(trans); 2774 } 2775 2776 /* 2777 * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2778 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2779 */ 2780 static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) 2781 { 2782 u64 div = SZ_1G; 2783 u64 index; 2784 2785 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2786 return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2787 2788 /* If we have a smaller fs index based on 128MiB. */ 2789 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2790 div = SZ_128M; 2791 2792 offset = div64_u64(offset, div); 2793 div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2794 return index; 2795 } 2796 2797 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 2798 u64 type, 2799 u64 chunk_offset, u64 size) 2800 { 2801 struct btrfs_fs_info *fs_info = trans->fs_info; 2802 struct btrfs_block_group *cache; 2803 int ret; 2804 2805 btrfs_set_log_full_commit(trans); 2806 2807 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 2808 if (!cache) 2809 return ERR_PTR(-ENOMEM); 2810 2811 /* 2812 * Mark it as new before adding it to the rbtree of block groups or any 2813 * list, so that no other task finds it and calls btrfs_mark_bg_unused() 2814 * before the new flag is set. 2815 */ 2816 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); 2817 2818 cache->length = size; 2819 set_free_space_tree_thresholds(cache); 2820 cache->flags = type; 2821 cache->cached = BTRFS_CACHE_FINISHED; 2822 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2823 2824 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 2825 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); 2826 2827 ret = btrfs_load_block_group_zone_info(cache, true); 2828 if (ret) { 2829 btrfs_put_block_group(cache); 2830 return ERR_PTR(ret); 2831 } 2832 2833 ret = exclude_super_stripes(cache); 2834 if (ret) { 2835 /* We may have excluded something, so call this just in case */ 2836 btrfs_free_excluded_extents(cache); 2837 btrfs_put_block_group(cache); 2838 return ERR_PTR(ret); 2839 } 2840 2841 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); 2842 btrfs_free_excluded_extents(cache); 2843 if (ret) { 2844 btrfs_put_block_group(cache); 2845 return ERR_PTR(ret); 2846 } 2847 2848 /* 2849 * Ensure the corresponding space_info object is created and 2850 * assigned to our block group. We want our bg to be added to the rbtree 2851 * with its ->space_info set. 2852 */ 2853 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 2854 ASSERT(cache->space_info); 2855 2856 ret = btrfs_add_block_group_cache(fs_info, cache); 2857 if (ret) { 2858 btrfs_remove_free_space_cache(cache); 2859 btrfs_put_block_group(cache); 2860 return ERR_PTR(ret); 2861 } 2862 2863 /* 2864 * Now that our block group has its ->space_info set and is inserted in 2865 * the rbtree, update the space info's counters. 2866 */ 2867 trace_btrfs_add_block_group(fs_info, cache, 1); 2868 btrfs_add_bg_to_space_info(fs_info, cache); 2869 btrfs_update_global_block_rsv(fs_info); 2870 2871 #ifdef CONFIG_BTRFS_DEBUG 2872 if (btrfs_should_fragment_free_space(cache)) { 2873 cache->space_info->bytes_used += size >> 1; 2874 fragment_free_space(cache); 2875 } 2876 #endif 2877 2878 list_add_tail(&cache->bg_list, &trans->new_bgs); 2879 btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info); 2880 2881 set_avail_alloc_bits(fs_info, type); 2882 return cache; 2883 } 2884 2885 /* 2886 * Mark one block group RO, can be called several times for the same block 2887 * group. 2888 * 2889 * @cache: the destination block group 2890 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2891 * ensure we still have some free space after marking this 2892 * block group RO. 2893 */ 2894 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2895 bool do_chunk_alloc) 2896 { 2897 struct btrfs_fs_info *fs_info = cache->fs_info; 2898 struct btrfs_trans_handle *trans; 2899 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2900 u64 alloc_flags; 2901 int ret; 2902 bool dirty_bg_running; 2903 2904 /* 2905 * This can only happen when we are doing read-only scrub on read-only 2906 * mount. 2907 * In that case we should not start a new transaction on read-only fs. 2908 * Thus here we skip all chunk allocations. 2909 */ 2910 if (sb_rdonly(fs_info->sb)) { 2911 mutex_lock(&fs_info->ro_block_group_mutex); 2912 ret = inc_block_group_ro(cache, 0); 2913 mutex_unlock(&fs_info->ro_block_group_mutex); 2914 return ret; 2915 } 2916 2917 do { 2918 trans = btrfs_join_transaction(root); 2919 if (IS_ERR(trans)) 2920 return PTR_ERR(trans); 2921 2922 dirty_bg_running = false; 2923 2924 /* 2925 * We're not allowed to set block groups readonly after the dirty 2926 * block group cache has started writing. If it already started, 2927 * back off and let this transaction commit. 2928 */ 2929 mutex_lock(&fs_info->ro_block_group_mutex); 2930 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 2931 u64 transid = trans->transid; 2932 2933 mutex_unlock(&fs_info->ro_block_group_mutex); 2934 btrfs_end_transaction(trans); 2935 2936 ret = btrfs_wait_for_commit(fs_info, transid); 2937 if (ret) 2938 return ret; 2939 dirty_bg_running = true; 2940 } 2941 } while (dirty_bg_running); 2942 2943 if (do_chunk_alloc) { 2944 /* 2945 * If we are changing raid levels, try to allocate a 2946 * corresponding block group with the new raid level. 2947 */ 2948 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 2949 if (alloc_flags != cache->flags) { 2950 ret = btrfs_chunk_alloc(trans, alloc_flags, 2951 CHUNK_ALLOC_FORCE); 2952 /* 2953 * ENOSPC is allowed here, we may have enough space 2954 * already allocated at the new raid level to carry on 2955 */ 2956 if (ret == -ENOSPC) 2957 ret = 0; 2958 if (ret < 0) 2959 goto out; 2960 } 2961 } 2962 2963 ret = inc_block_group_ro(cache, 0); 2964 if (!ret) 2965 goto out; 2966 if (ret == -ETXTBSY) 2967 goto unlock_out; 2968 2969 /* 2970 * Skip chunk allocation if the bg is SYSTEM, this is to avoid system 2971 * chunk allocation storm to exhaust the system chunk array. Otherwise 2972 * we still want to try our best to mark the block group read-only. 2973 */ 2974 if (!do_chunk_alloc && ret == -ENOSPC && 2975 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) 2976 goto unlock_out; 2977 2978 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 2979 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 2980 if (ret < 0) 2981 goto out; 2982 /* 2983 * We have allocated a new chunk. We also need to activate that chunk to 2984 * grant metadata tickets for zoned filesystem. 2985 */ 2986 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); 2987 if (ret < 0) 2988 goto out; 2989 2990 ret = inc_block_group_ro(cache, 0); 2991 if (ret == -ETXTBSY) 2992 goto unlock_out; 2993 out: 2994 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 2995 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 2996 mutex_lock(&fs_info->chunk_mutex); 2997 check_system_chunk(trans, alloc_flags); 2998 mutex_unlock(&fs_info->chunk_mutex); 2999 } 3000 unlock_out: 3001 mutex_unlock(&fs_info->ro_block_group_mutex); 3002 3003 btrfs_end_transaction(trans); 3004 return ret; 3005 } 3006 3007 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 3008 { 3009 struct btrfs_space_info *sinfo = cache->space_info; 3010 u64 num_bytes; 3011 3012 BUG_ON(!cache->ro); 3013 3014 spin_lock(&sinfo->lock); 3015 spin_lock(&cache->lock); 3016 if (!--cache->ro) { 3017 if (btrfs_is_zoned(cache->fs_info)) { 3018 /* Migrate zone_unusable bytes back */ 3019 cache->zone_unusable = 3020 (cache->alloc_offset - cache->used) + 3021 (cache->length - cache->zone_capacity); 3022 sinfo->bytes_zone_unusable += cache->zone_unusable; 3023 sinfo->bytes_readonly -= cache->zone_unusable; 3024 } 3025 num_bytes = cache->length - cache->reserved - 3026 cache->pinned - cache->bytes_super - 3027 cache->zone_unusable - cache->used; 3028 sinfo->bytes_readonly -= num_bytes; 3029 list_del_init(&cache->ro_list); 3030 } 3031 spin_unlock(&cache->lock); 3032 spin_unlock(&sinfo->lock); 3033 } 3034 3035 static int update_block_group_item(struct btrfs_trans_handle *trans, 3036 struct btrfs_path *path, 3037 struct btrfs_block_group *cache) 3038 { 3039 struct btrfs_fs_info *fs_info = trans->fs_info; 3040 int ret; 3041 struct btrfs_root *root = btrfs_block_group_root(fs_info); 3042 unsigned long bi; 3043 struct extent_buffer *leaf; 3044 struct btrfs_block_group_item bgi; 3045 struct btrfs_key key; 3046 u64 old_commit_used; 3047 u64 used; 3048 3049 /* 3050 * Block group items update can be triggered out of commit transaction 3051 * critical section, thus we need a consistent view of used bytes. 3052 * We cannot use cache->used directly outside of the spin lock, as it 3053 * may be changed. 3054 */ 3055 spin_lock(&cache->lock); 3056 old_commit_used = cache->commit_used; 3057 used = cache->used; 3058 /* No change in used bytes, can safely skip it. */ 3059 if (cache->commit_used == used) { 3060 spin_unlock(&cache->lock); 3061 return 0; 3062 } 3063 cache->commit_used = used; 3064 spin_unlock(&cache->lock); 3065 3066 key.objectid = cache->start; 3067 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 3068 key.offset = cache->length; 3069 3070 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3071 if (ret) { 3072 if (ret > 0) 3073 ret = -ENOENT; 3074 goto fail; 3075 } 3076 3077 leaf = path->nodes[0]; 3078 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3079 btrfs_set_stack_block_group_used(&bgi, used); 3080 btrfs_set_stack_block_group_chunk_objectid(&bgi, 3081 cache->global_root_id); 3082 btrfs_set_stack_block_group_flags(&bgi, cache->flags); 3083 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 3084 btrfs_mark_buffer_dirty(trans, leaf); 3085 fail: 3086 btrfs_release_path(path); 3087 /* 3088 * We didn't update the block group item, need to revert commit_used 3089 * unless the block group item didn't exist yet - this is to prevent a 3090 * race with a concurrent insertion of the block group item, with 3091 * insert_block_group_item(), that happened just after we attempted to 3092 * update. In that case we would reset commit_used to 0 just after the 3093 * insertion set it to a value greater than 0 - if the block group later 3094 * becomes with 0 used bytes, we would incorrectly skip its update. 3095 */ 3096 if (ret < 0 && ret != -ENOENT) { 3097 spin_lock(&cache->lock); 3098 cache->commit_used = old_commit_used; 3099 spin_unlock(&cache->lock); 3100 } 3101 return ret; 3102 3103 } 3104 3105 static int cache_save_setup(struct btrfs_block_group *block_group, 3106 struct btrfs_trans_handle *trans, 3107 struct btrfs_path *path) 3108 { 3109 struct btrfs_fs_info *fs_info = block_group->fs_info; 3110 struct inode *inode = NULL; 3111 struct extent_changeset *data_reserved = NULL; 3112 u64 alloc_hint = 0; 3113 int dcs = BTRFS_DC_ERROR; 3114 u64 cache_size = 0; 3115 int retries = 0; 3116 int ret = 0; 3117 3118 if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 3119 return 0; 3120 3121 /* 3122 * If this block group is smaller than 100 megs don't bother caching the 3123 * block group. 3124 */ 3125 if (block_group->length < (100 * SZ_1M)) { 3126 spin_lock(&block_group->lock); 3127 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3128 spin_unlock(&block_group->lock); 3129 return 0; 3130 } 3131 3132 if (TRANS_ABORTED(trans)) 3133 return 0; 3134 again: 3135 inode = lookup_free_space_inode(block_group, path); 3136 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3137 ret = PTR_ERR(inode); 3138 btrfs_release_path(path); 3139 goto out; 3140 } 3141 3142 if (IS_ERR(inode)) { 3143 BUG_ON(retries); 3144 retries++; 3145 3146 if (block_group->ro) 3147 goto out_free; 3148 3149 ret = create_free_space_inode(trans, block_group, path); 3150 if (ret) 3151 goto out_free; 3152 goto again; 3153 } 3154 3155 /* 3156 * We want to set the generation to 0, that way if anything goes wrong 3157 * from here on out we know not to trust this cache when we load up next 3158 * time. 3159 */ 3160 BTRFS_I(inode)->generation = 0; 3161 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 3162 if (ret) { 3163 /* 3164 * So theoretically we could recover from this, simply set the 3165 * super cache generation to 0 so we know to invalidate the 3166 * cache, but then we'd have to keep track of the block groups 3167 * that fail this way so we know we _have_ to reset this cache 3168 * before the next commit or risk reading stale cache. So to 3169 * limit our exposure to horrible edge cases lets just abort the 3170 * transaction, this only happens in really bad situations 3171 * anyway. 3172 */ 3173 btrfs_abort_transaction(trans, ret); 3174 goto out_put; 3175 } 3176 WARN_ON(ret); 3177 3178 /* We've already setup this transaction, go ahead and exit */ 3179 if (block_group->cache_generation == trans->transid && 3180 i_size_read(inode)) { 3181 dcs = BTRFS_DC_SETUP; 3182 goto out_put; 3183 } 3184 3185 if (i_size_read(inode) > 0) { 3186 ret = btrfs_check_trunc_cache_free_space(fs_info, 3187 &fs_info->global_block_rsv); 3188 if (ret) 3189 goto out_put; 3190 3191 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 3192 if (ret) 3193 goto out_put; 3194 } 3195 3196 spin_lock(&block_group->lock); 3197 if (block_group->cached != BTRFS_CACHE_FINISHED || 3198 !btrfs_test_opt(fs_info, SPACE_CACHE)) { 3199 /* 3200 * don't bother trying to write stuff out _if_ 3201 * a) we're not cached, 3202 * b) we're with nospace_cache mount option, 3203 * c) we're with v2 space_cache (FREE_SPACE_TREE). 3204 */ 3205 dcs = BTRFS_DC_WRITTEN; 3206 spin_unlock(&block_group->lock); 3207 goto out_put; 3208 } 3209 spin_unlock(&block_group->lock); 3210 3211 /* 3212 * We hit an ENOSPC when setting up the cache in this transaction, just 3213 * skip doing the setup, we've already cleared the cache so we're safe. 3214 */ 3215 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 3216 ret = -ENOSPC; 3217 goto out_put; 3218 } 3219 3220 /* 3221 * Try to preallocate enough space based on how big the block group is. 3222 * Keep in mind this has to include any pinned space which could end up 3223 * taking up quite a bit since it's not folded into the other space 3224 * cache. 3225 */ 3226 cache_size = div_u64(block_group->length, SZ_256M); 3227 if (!cache_size) 3228 cache_size = 1; 3229 3230 cache_size *= 16; 3231 cache_size *= fs_info->sectorsize; 3232 3233 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 3234 cache_size, false); 3235 if (ret) 3236 goto out_put; 3237 3238 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 3239 cache_size, cache_size, 3240 &alloc_hint); 3241 /* 3242 * Our cache requires contiguous chunks so that we don't modify a bunch 3243 * of metadata or split extents when writing the cache out, which means 3244 * we can enospc if we are heavily fragmented in addition to just normal 3245 * out of space conditions. So if we hit this just skip setting up any 3246 * other block groups for this transaction, maybe we'll unpin enough 3247 * space the next time around. 3248 */ 3249 if (!ret) 3250 dcs = BTRFS_DC_SETUP; 3251 else if (ret == -ENOSPC) 3252 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 3253 3254 out_put: 3255 iput(inode); 3256 out_free: 3257 btrfs_release_path(path); 3258 out: 3259 spin_lock(&block_group->lock); 3260 if (!ret && dcs == BTRFS_DC_SETUP) 3261 block_group->cache_generation = trans->transid; 3262 block_group->disk_cache_state = dcs; 3263 spin_unlock(&block_group->lock); 3264 3265 extent_changeset_free(data_reserved); 3266 return ret; 3267 } 3268 3269 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 3270 { 3271 struct btrfs_fs_info *fs_info = trans->fs_info; 3272 struct btrfs_block_group *cache, *tmp; 3273 struct btrfs_transaction *cur_trans = trans->transaction; 3274 struct btrfs_path *path; 3275 3276 if (list_empty(&cur_trans->dirty_bgs) || 3277 !btrfs_test_opt(fs_info, SPACE_CACHE)) 3278 return 0; 3279 3280 path = btrfs_alloc_path(); 3281 if (!path) 3282 return -ENOMEM; 3283 3284 /* Could add new block groups, use _safe just in case */ 3285 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 3286 dirty_list) { 3287 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3288 cache_save_setup(cache, trans, path); 3289 } 3290 3291 btrfs_free_path(path); 3292 return 0; 3293 } 3294 3295 /* 3296 * Transaction commit does final block group cache writeback during a critical 3297 * section where nothing is allowed to change the FS. This is required in 3298 * order for the cache to actually match the block group, but can introduce a 3299 * lot of latency into the commit. 3300 * 3301 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 3302 * There's a chance we'll have to redo some of it if the block group changes 3303 * again during the commit, but it greatly reduces the commit latency by 3304 * getting rid of the easy block groups while we're still allowing others to 3305 * join the commit. 3306 */ 3307 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 3308 { 3309 struct btrfs_fs_info *fs_info = trans->fs_info; 3310 struct btrfs_block_group *cache; 3311 struct btrfs_transaction *cur_trans = trans->transaction; 3312 int ret = 0; 3313 int should_put; 3314 struct btrfs_path *path = NULL; 3315 LIST_HEAD(dirty); 3316 struct list_head *io = &cur_trans->io_bgs; 3317 int loops = 0; 3318 3319 spin_lock(&cur_trans->dirty_bgs_lock); 3320 if (list_empty(&cur_trans->dirty_bgs)) { 3321 spin_unlock(&cur_trans->dirty_bgs_lock); 3322 return 0; 3323 } 3324 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3325 spin_unlock(&cur_trans->dirty_bgs_lock); 3326 3327 again: 3328 /* Make sure all the block groups on our dirty list actually exist */ 3329 btrfs_create_pending_block_groups(trans); 3330 3331 if (!path) { 3332 path = btrfs_alloc_path(); 3333 if (!path) { 3334 ret = -ENOMEM; 3335 goto out; 3336 } 3337 } 3338 3339 /* 3340 * cache_write_mutex is here only to save us from balance or automatic 3341 * removal of empty block groups deleting this block group while we are 3342 * writing out the cache 3343 */ 3344 mutex_lock(&trans->transaction->cache_write_mutex); 3345 while (!list_empty(&dirty)) { 3346 bool drop_reserve = true; 3347 3348 cache = list_first_entry(&dirty, struct btrfs_block_group, 3349 dirty_list); 3350 /* 3351 * This can happen if something re-dirties a block group that 3352 * is already under IO. Just wait for it to finish and then do 3353 * it all again 3354 */ 3355 if (!list_empty(&cache->io_list)) { 3356 list_del_init(&cache->io_list); 3357 btrfs_wait_cache_io(trans, cache, path); 3358 btrfs_put_block_group(cache); 3359 } 3360 3361 3362 /* 3363 * btrfs_wait_cache_io uses the cache->dirty_list to decide if 3364 * it should update the cache_state. Don't delete until after 3365 * we wait. 3366 * 3367 * Since we're not running in the commit critical section 3368 * we need the dirty_bgs_lock to protect from update_block_group 3369 */ 3370 spin_lock(&cur_trans->dirty_bgs_lock); 3371 list_del_init(&cache->dirty_list); 3372 spin_unlock(&cur_trans->dirty_bgs_lock); 3373 3374 should_put = 1; 3375 3376 cache_save_setup(cache, trans, path); 3377 3378 if (cache->disk_cache_state == BTRFS_DC_SETUP) { 3379 cache->io_ctl.inode = NULL; 3380 ret = btrfs_write_out_cache(trans, cache, path); 3381 if (ret == 0 && cache->io_ctl.inode) { 3382 should_put = 0; 3383 3384 /* 3385 * The cache_write_mutex is protecting the 3386 * io_list, also refer to the definition of 3387 * btrfs_transaction::io_bgs for more details 3388 */ 3389 list_add_tail(&cache->io_list, io); 3390 } else { 3391 /* 3392 * If we failed to write the cache, the 3393 * generation will be bad and life goes on 3394 */ 3395 ret = 0; 3396 } 3397 } 3398 if (!ret) { 3399 ret = update_block_group_item(trans, path, cache); 3400 /* 3401 * Our block group might still be attached to the list 3402 * of new block groups in the transaction handle of some 3403 * other task (struct btrfs_trans_handle->new_bgs). This 3404 * means its block group item isn't yet in the extent 3405 * tree. If this happens ignore the error, as we will 3406 * try again later in the critical section of the 3407 * transaction commit. 3408 */ 3409 if (ret == -ENOENT) { 3410 ret = 0; 3411 spin_lock(&cur_trans->dirty_bgs_lock); 3412 if (list_empty(&cache->dirty_list)) { 3413 list_add_tail(&cache->dirty_list, 3414 &cur_trans->dirty_bgs); 3415 btrfs_get_block_group(cache); 3416 drop_reserve = false; 3417 } 3418 spin_unlock(&cur_trans->dirty_bgs_lock); 3419 } else if (ret) { 3420 btrfs_abort_transaction(trans, ret); 3421 } 3422 } 3423 3424 /* If it's not on the io list, we need to put the block group */ 3425 if (should_put) 3426 btrfs_put_block_group(cache); 3427 if (drop_reserve) 3428 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3429 /* 3430 * Avoid blocking other tasks for too long. It might even save 3431 * us from writing caches for block groups that are going to be 3432 * removed. 3433 */ 3434 mutex_unlock(&trans->transaction->cache_write_mutex); 3435 if (ret) 3436 goto out; 3437 mutex_lock(&trans->transaction->cache_write_mutex); 3438 } 3439 mutex_unlock(&trans->transaction->cache_write_mutex); 3440 3441 /* 3442 * Go through delayed refs for all the stuff we've just kicked off 3443 * and then loop back (just once) 3444 */ 3445 if (!ret) 3446 ret = btrfs_run_delayed_refs(trans, 0); 3447 if (!ret && loops == 0) { 3448 loops++; 3449 spin_lock(&cur_trans->dirty_bgs_lock); 3450 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3451 /* 3452 * dirty_bgs_lock protects us from concurrent block group 3453 * deletes too (not just cache_write_mutex). 3454 */ 3455 if (!list_empty(&dirty)) { 3456 spin_unlock(&cur_trans->dirty_bgs_lock); 3457 goto again; 3458 } 3459 spin_unlock(&cur_trans->dirty_bgs_lock); 3460 } 3461 out: 3462 if (ret < 0) { 3463 spin_lock(&cur_trans->dirty_bgs_lock); 3464 list_splice_init(&dirty, &cur_trans->dirty_bgs); 3465 spin_unlock(&cur_trans->dirty_bgs_lock); 3466 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 3467 } 3468 3469 btrfs_free_path(path); 3470 return ret; 3471 } 3472 3473 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 3474 { 3475 struct btrfs_fs_info *fs_info = trans->fs_info; 3476 struct btrfs_block_group *cache; 3477 struct btrfs_transaction *cur_trans = trans->transaction; 3478 int ret = 0; 3479 int should_put; 3480 struct btrfs_path *path; 3481 struct list_head *io = &cur_trans->io_bgs; 3482 3483 path = btrfs_alloc_path(); 3484 if (!path) 3485 return -ENOMEM; 3486 3487 /* 3488 * Even though we are in the critical section of the transaction commit, 3489 * we can still have concurrent tasks adding elements to this 3490 * transaction's list of dirty block groups. These tasks correspond to 3491 * endio free space workers started when writeback finishes for a 3492 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 3493 * allocate new block groups as a result of COWing nodes of the root 3494 * tree when updating the free space inode. The writeback for the space 3495 * caches is triggered by an earlier call to 3496 * btrfs_start_dirty_block_groups() and iterations of the following 3497 * loop. 3498 * Also we want to do the cache_save_setup first and then run the 3499 * delayed refs to make sure we have the best chance at doing this all 3500 * in one shot. 3501 */ 3502 spin_lock(&cur_trans->dirty_bgs_lock); 3503 while (!list_empty(&cur_trans->dirty_bgs)) { 3504 cache = list_first_entry(&cur_trans->dirty_bgs, 3505 struct btrfs_block_group, 3506 dirty_list); 3507 3508 /* 3509 * This can happen if cache_save_setup re-dirties a block group 3510 * that is already under IO. Just wait for it to finish and 3511 * then do it all again 3512 */ 3513 if (!list_empty(&cache->io_list)) { 3514 spin_unlock(&cur_trans->dirty_bgs_lock); 3515 list_del_init(&cache->io_list); 3516 btrfs_wait_cache_io(trans, cache, path); 3517 btrfs_put_block_group(cache); 3518 spin_lock(&cur_trans->dirty_bgs_lock); 3519 } 3520 3521 /* 3522 * Don't remove from the dirty list until after we've waited on 3523 * any pending IO 3524 */ 3525 list_del_init(&cache->dirty_list); 3526 spin_unlock(&cur_trans->dirty_bgs_lock); 3527 should_put = 1; 3528 3529 cache_save_setup(cache, trans, path); 3530 3531 if (!ret) 3532 ret = btrfs_run_delayed_refs(trans, U64_MAX); 3533 3534 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 3535 cache->io_ctl.inode = NULL; 3536 ret = btrfs_write_out_cache(trans, cache, path); 3537 if (ret == 0 && cache->io_ctl.inode) { 3538 should_put = 0; 3539 list_add_tail(&cache->io_list, io); 3540 } else { 3541 /* 3542 * If we failed to write the cache, the 3543 * generation will be bad and life goes on 3544 */ 3545 ret = 0; 3546 } 3547 } 3548 if (!ret) { 3549 ret = update_block_group_item(trans, path, cache); 3550 /* 3551 * One of the free space endio workers might have 3552 * created a new block group while updating a free space 3553 * cache's inode (at inode.c:btrfs_finish_ordered_io()) 3554 * and hasn't released its transaction handle yet, in 3555 * which case the new block group is still attached to 3556 * its transaction handle and its creation has not 3557 * finished yet (no block group item in the extent tree 3558 * yet, etc). If this is the case, wait for all free 3559 * space endio workers to finish and retry. This is a 3560 * very rare case so no need for a more efficient and 3561 * complex approach. 3562 */ 3563 if (ret == -ENOENT) { 3564 wait_event(cur_trans->writer_wait, 3565 atomic_read(&cur_trans->num_writers) == 1); 3566 ret = update_block_group_item(trans, path, cache); 3567 } 3568 if (ret) 3569 btrfs_abort_transaction(trans, ret); 3570 } 3571 3572 /* If its not on the io list, we need to put the block group */ 3573 if (should_put) 3574 btrfs_put_block_group(cache); 3575 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3576 spin_lock(&cur_trans->dirty_bgs_lock); 3577 } 3578 spin_unlock(&cur_trans->dirty_bgs_lock); 3579 3580 /* 3581 * Refer to the definition of io_bgs member for details why it's safe 3582 * to use it without any locking 3583 */ 3584 while (!list_empty(io)) { 3585 cache = list_first_entry(io, struct btrfs_block_group, 3586 io_list); 3587 list_del_init(&cache->io_list); 3588 btrfs_wait_cache_io(trans, cache, path); 3589 btrfs_put_block_group(cache); 3590 } 3591 3592 btrfs_free_path(path); 3593 return ret; 3594 } 3595 3596 int btrfs_update_block_group(struct btrfs_trans_handle *trans, 3597 u64 bytenr, u64 num_bytes, bool alloc) 3598 { 3599 struct btrfs_fs_info *info = trans->fs_info; 3600 struct btrfs_space_info *space_info; 3601 struct btrfs_block_group *cache; 3602 u64 old_val; 3603 bool reclaim = false; 3604 bool bg_already_dirty = true; 3605 int factor; 3606 3607 /* Block accounting for super block */ 3608 spin_lock(&info->delalloc_root_lock); 3609 old_val = btrfs_super_bytes_used(info->super_copy); 3610 if (alloc) 3611 old_val += num_bytes; 3612 else 3613 old_val -= num_bytes; 3614 btrfs_set_super_bytes_used(info->super_copy, old_val); 3615 spin_unlock(&info->delalloc_root_lock); 3616 3617 cache = btrfs_lookup_block_group(info, bytenr); 3618 if (!cache) 3619 return -ENOENT; 3620 3621 /* An extent can not span multiple block groups. */ 3622 ASSERT(bytenr + num_bytes <= cache->start + cache->length); 3623 3624 space_info = cache->space_info; 3625 factor = btrfs_bg_type_to_factor(cache->flags); 3626 3627 /* 3628 * If this block group has free space cache written out, we need to make 3629 * sure to load it if we are removing space. This is because we need 3630 * the unpinning stage to actually add the space back to the block group, 3631 * otherwise we will leak space. 3632 */ 3633 if (!alloc && !btrfs_block_group_done(cache)) 3634 btrfs_cache_block_group(cache, true); 3635 3636 spin_lock(&space_info->lock); 3637 spin_lock(&cache->lock); 3638 3639 if (btrfs_test_opt(info, SPACE_CACHE) && 3640 cache->disk_cache_state < BTRFS_DC_CLEAR) 3641 cache->disk_cache_state = BTRFS_DC_CLEAR; 3642 3643 old_val = cache->used; 3644 if (alloc) { 3645 old_val += num_bytes; 3646 cache->used = old_val; 3647 cache->reserved -= num_bytes; 3648 space_info->bytes_reserved -= num_bytes; 3649 space_info->bytes_used += num_bytes; 3650 space_info->disk_used += num_bytes * factor; 3651 spin_unlock(&cache->lock); 3652 spin_unlock(&space_info->lock); 3653 } else { 3654 old_val -= num_bytes; 3655 cache->used = old_val; 3656 cache->pinned += num_bytes; 3657 btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes); 3658 space_info->bytes_used -= num_bytes; 3659 space_info->disk_used -= num_bytes * factor; 3660 3661 reclaim = should_reclaim_block_group(cache, num_bytes); 3662 3663 spin_unlock(&cache->lock); 3664 spin_unlock(&space_info->lock); 3665 3666 set_extent_bit(&trans->transaction->pinned_extents, bytenr, 3667 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); 3668 } 3669 3670 spin_lock(&trans->transaction->dirty_bgs_lock); 3671 if (list_empty(&cache->dirty_list)) { 3672 list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs); 3673 bg_already_dirty = false; 3674 btrfs_get_block_group(cache); 3675 } 3676 spin_unlock(&trans->transaction->dirty_bgs_lock); 3677 3678 /* 3679 * No longer have used bytes in this block group, queue it for deletion. 3680 * We do this after adding the block group to the dirty list to avoid 3681 * races between cleaner kthread and space cache writeout. 3682 */ 3683 if (!alloc && old_val == 0) { 3684 if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3685 btrfs_mark_bg_unused(cache); 3686 } else if (!alloc && reclaim) { 3687 btrfs_mark_bg_to_reclaim(cache); 3688 } 3689 3690 btrfs_put_block_group(cache); 3691 3692 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3693 if (!bg_already_dirty) 3694 btrfs_inc_delayed_refs_rsv_bg_updates(info); 3695 3696 return 0; 3697 } 3698 3699 /* 3700 * Update the block_group and space info counters. 3701 * 3702 * @cache: The cache we are manipulating 3703 * @ram_bytes: The number of bytes of file content, and will be same to 3704 * @num_bytes except for the compress path. 3705 * @num_bytes: The number of bytes in question 3706 * @delalloc: The blocks are allocated for the delalloc write 3707 * 3708 * This is called by the allocator when it reserves space. If this is a 3709 * reservation and the block group has become read only we cannot make the 3710 * reservation and return -EAGAIN, otherwise this function always succeeds. 3711 */ 3712 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3713 u64 ram_bytes, u64 num_bytes, int delalloc, 3714 bool force_wrong_size_class) 3715 { 3716 struct btrfs_space_info *space_info = cache->space_info; 3717 enum btrfs_block_group_size_class size_class; 3718 int ret = 0; 3719 3720 spin_lock(&space_info->lock); 3721 spin_lock(&cache->lock); 3722 if (cache->ro) { 3723 ret = -EAGAIN; 3724 goto out; 3725 } 3726 3727 if (btrfs_block_group_should_use_size_class(cache)) { 3728 size_class = btrfs_calc_block_group_size_class(num_bytes); 3729 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); 3730 if (ret) 3731 goto out; 3732 } 3733 cache->reserved += num_bytes; 3734 space_info->bytes_reserved += num_bytes; 3735 trace_btrfs_space_reservation(cache->fs_info, "space_info", 3736 space_info->flags, num_bytes, 1); 3737 btrfs_space_info_update_bytes_may_use(cache->fs_info, 3738 space_info, -ram_bytes); 3739 if (delalloc) 3740 cache->delalloc_bytes += num_bytes; 3741 3742 /* 3743 * Compression can use less space than we reserved, so wake tickets if 3744 * that happens. 3745 */ 3746 if (num_bytes < ram_bytes) 3747 btrfs_try_granting_tickets(cache->fs_info, space_info); 3748 out: 3749 spin_unlock(&cache->lock); 3750 spin_unlock(&space_info->lock); 3751 return ret; 3752 } 3753 3754 /* 3755 * Update the block_group and space info counters. 3756 * 3757 * @cache: The cache we are manipulating 3758 * @num_bytes: The number of bytes in question 3759 * @delalloc: The blocks are allocated for the delalloc write 3760 * 3761 * This is called by somebody who is freeing space that was never actually used 3762 * on disk. For example if you reserve some space for a new leaf in transaction 3763 * A and before transaction A commits you free that leaf, you call this with 3764 * reserve set to 0 in order to clear the reservation. 3765 */ 3766 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3767 u64 num_bytes, int delalloc) 3768 { 3769 struct btrfs_space_info *space_info = cache->space_info; 3770 3771 spin_lock(&space_info->lock); 3772 spin_lock(&cache->lock); 3773 if (cache->ro) 3774 space_info->bytes_readonly += num_bytes; 3775 cache->reserved -= num_bytes; 3776 space_info->bytes_reserved -= num_bytes; 3777 space_info->max_extent_size = 0; 3778 3779 if (delalloc) 3780 cache->delalloc_bytes -= num_bytes; 3781 spin_unlock(&cache->lock); 3782 3783 btrfs_try_granting_tickets(cache->fs_info, space_info); 3784 spin_unlock(&space_info->lock); 3785 } 3786 3787 static void force_metadata_allocation(struct btrfs_fs_info *info) 3788 { 3789 struct list_head *head = &info->space_info; 3790 struct btrfs_space_info *found; 3791 3792 list_for_each_entry(found, head, list) { 3793 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3794 found->force_alloc = CHUNK_ALLOC_FORCE; 3795 } 3796 } 3797 3798 static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 3799 struct btrfs_space_info *sinfo, int force) 3800 { 3801 u64 bytes_used = btrfs_space_info_used(sinfo, false); 3802 u64 thresh; 3803 3804 if (force == CHUNK_ALLOC_FORCE) 3805 return 1; 3806 3807 /* 3808 * in limited mode, we want to have some free space up to 3809 * about 1% of the FS size. 3810 */ 3811 if (force == CHUNK_ALLOC_LIMITED) { 3812 thresh = btrfs_super_total_bytes(fs_info->super_copy); 3813 thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); 3814 3815 if (sinfo->total_bytes - bytes_used < thresh) 3816 return 1; 3817 } 3818 3819 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) 3820 return 0; 3821 return 1; 3822 } 3823 3824 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 3825 { 3826 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 3827 3828 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 3829 } 3830 3831 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) 3832 { 3833 struct btrfs_block_group *bg; 3834 int ret; 3835 3836 /* 3837 * Check if we have enough space in the system space info because we 3838 * will need to update device items in the chunk btree and insert a new 3839 * chunk item in the chunk btree as well. This will allocate a new 3840 * system block group if needed. 3841 */ 3842 check_system_chunk(trans, flags); 3843 3844 bg = btrfs_create_chunk(trans, flags); 3845 if (IS_ERR(bg)) { 3846 ret = PTR_ERR(bg); 3847 goto out; 3848 } 3849 3850 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3851 /* 3852 * Normally we are not expected to fail with -ENOSPC here, since we have 3853 * previously reserved space in the system space_info and allocated one 3854 * new system chunk if necessary. However there are three exceptions: 3855 * 3856 * 1) We may have enough free space in the system space_info but all the 3857 * existing system block groups have a profile which can not be used 3858 * for extent allocation. 3859 * 3860 * This happens when mounting in degraded mode. For example we have a 3861 * RAID1 filesystem with 2 devices, lose one device and mount the fs 3862 * using the other device in degraded mode. If we then allocate a chunk, 3863 * we may have enough free space in the existing system space_info, but 3864 * none of the block groups can be used for extent allocation since they 3865 * have a RAID1 profile, and because we are in degraded mode with a 3866 * single device, we are forced to allocate a new system chunk with a 3867 * SINGLE profile. Making check_system_chunk() iterate over all system 3868 * block groups and check if they have a usable profile and enough space 3869 * can be slow on very large filesystems, so we tolerate the -ENOSPC and 3870 * try again after forcing allocation of a new system chunk. Like this 3871 * we avoid paying the cost of that search in normal circumstances, when 3872 * we were not mounted in degraded mode; 3873 * 3874 * 2) We had enough free space info the system space_info, and one suitable 3875 * block group to allocate from when we called check_system_chunk() 3876 * above. However right after we called it, the only system block group 3877 * with enough free space got turned into RO mode by a running scrub, 3878 * and in this case we have to allocate a new one and retry. We only 3879 * need do this allocate and retry once, since we have a transaction 3880 * handle and scrub uses the commit root to search for block groups; 3881 * 3882 * 3) We had one system block group with enough free space when we called 3883 * check_system_chunk(), but after that, right before we tried to 3884 * allocate the last extent buffer we needed, a discard operation came 3885 * in and it temporarily removed the last free space entry from the 3886 * block group (discard removes a free space entry, discards it, and 3887 * then adds back the entry to the block group cache). 3888 */ 3889 if (ret == -ENOSPC) { 3890 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 3891 struct btrfs_block_group *sys_bg; 3892 3893 sys_bg = btrfs_create_chunk(trans, sys_flags); 3894 if (IS_ERR(sys_bg)) { 3895 ret = PTR_ERR(sys_bg); 3896 btrfs_abort_transaction(trans, ret); 3897 goto out; 3898 } 3899 3900 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3901 if (ret) { 3902 btrfs_abort_transaction(trans, ret); 3903 goto out; 3904 } 3905 3906 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3907 if (ret) { 3908 btrfs_abort_transaction(trans, ret); 3909 goto out; 3910 } 3911 } else if (ret) { 3912 btrfs_abort_transaction(trans, ret); 3913 goto out; 3914 } 3915 out: 3916 btrfs_trans_release_chunk_metadata(trans); 3917 3918 if (ret) 3919 return ERR_PTR(ret); 3920 3921 btrfs_get_block_group(bg); 3922 return bg; 3923 } 3924 3925 /* 3926 * Chunk allocation is done in 2 phases: 3927 * 3928 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 3929 * the chunk, the chunk mapping, create its block group and add the items 3930 * that belong in the chunk btree to it - more specifically, we need to 3931 * update device items in the chunk btree and add a new chunk item to it. 3932 * 3933 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 3934 * group item to the extent btree and the device extent items to the devices 3935 * btree. 3936 * 3937 * This is done to prevent deadlocks. For example when COWing a node from the 3938 * extent btree we are holding a write lock on the node's parent and if we 3939 * trigger chunk allocation and attempted to insert the new block group item 3940 * in the extent btree right way, we could deadlock because the path for the 3941 * insertion can include that parent node. At first glance it seems impossible 3942 * to trigger chunk allocation after starting a transaction since tasks should 3943 * reserve enough transaction units (metadata space), however while that is true 3944 * most of the time, chunk allocation may still be triggered for several reasons: 3945 * 3946 * 1) When reserving metadata, we check if there is enough free space in the 3947 * metadata space_info and therefore don't trigger allocation of a new chunk. 3948 * However later when the task actually tries to COW an extent buffer from 3949 * the extent btree or from the device btree for example, it is forced to 3950 * allocate a new block group (chunk) because the only one that had enough 3951 * free space was just turned to RO mode by a running scrub for example (or 3952 * device replace, block group reclaim thread, etc), so we can not use it 3953 * for allocating an extent and end up being forced to allocate a new one; 3954 * 3955 * 2) Because we only check that the metadata space_info has enough free bytes, 3956 * we end up not allocating a new metadata chunk in that case. However if 3957 * the filesystem was mounted in degraded mode, none of the existing block 3958 * groups might be suitable for extent allocation due to their incompatible 3959 * profile (for e.g. mounting a 2 devices filesystem, where all block groups 3960 * use a RAID1 profile, in degraded mode using a single device). In this case 3961 * when the task attempts to COW some extent buffer of the extent btree for 3962 * example, it will trigger allocation of a new metadata block group with a 3963 * suitable profile (SINGLE profile in the example of the degraded mount of 3964 * the RAID1 filesystem); 3965 * 3966 * 3) The task has reserved enough transaction units / metadata space, but when 3967 * it attempts to COW an extent buffer from the extent or device btree for 3968 * example, it does not find any free extent in any metadata block group, 3969 * therefore forced to try to allocate a new metadata block group. 3970 * This is because some other task allocated all available extents in the 3971 * meanwhile - this typically happens with tasks that don't reserve space 3972 * properly, either intentionally or as a bug. One example where this is 3973 * done intentionally is fsync, as it does not reserve any transaction units 3974 * and ends up allocating a variable number of metadata extents for log 3975 * tree extent buffers; 3976 * 3977 * 4) The task has reserved enough transaction units / metadata space, but right 3978 * before it tries to allocate the last extent buffer it needs, a discard 3979 * operation comes in and, temporarily, removes the last free space entry from 3980 * the only metadata block group that had free space (discard starts by 3981 * removing a free space entry from a block group, then does the discard 3982 * operation and, once it's done, it adds back the free space entry to the 3983 * block group). 3984 * 3985 * We also need this 2 phases setup when adding a device to a filesystem with 3986 * a seed device - we must create new metadata and system chunks without adding 3987 * any of the block group items to the chunk, extent and device btrees. If we 3988 * did not do it this way, we would get ENOSPC when attempting to update those 3989 * btrees, since all the chunks from the seed device are read-only. 3990 * 3991 * Phase 1 does the updates and insertions to the chunk btree because if we had 3992 * it done in phase 2 and have a thundering herd of tasks allocating chunks in 3993 * parallel, we risk having too many system chunks allocated by many tasks if 3994 * many tasks reach phase 1 without the previous ones completing phase 2. In the 3995 * extreme case this leads to exhaustion of the system chunk array in the 3996 * superblock. This is easier to trigger if using a btree node/leaf size of 64K 3997 * and with RAID filesystems (so we have more device items in the chunk btree). 3998 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 3999 * the system chunk array due to concurrent allocations") provides more details. 4000 * 4001 * Allocation of system chunks does not happen through this function. A task that 4002 * needs to update the chunk btree (the only btree that uses system chunks), must 4003 * preallocate chunk space by calling either check_system_chunk() or 4004 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 4005 * metadata chunk or when removing a chunk, while the later is used before doing 4006 * a modification to the chunk btree - use cases for the later are adding, 4007 * removing and resizing a device as well as relocation of a system chunk. 4008 * See the comment below for more details. 4009 * 4010 * The reservation of system space, done through check_system_chunk(), as well 4011 * as all the updates and insertions into the chunk btree must be done while 4012 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 4013 * an extent buffer from the chunks btree we never trigger allocation of a new 4014 * system chunk, which would result in a deadlock (trying to lock twice an 4015 * extent buffer of the chunk btree, first time before triggering the chunk 4016 * allocation and the second time during chunk allocation while attempting to 4017 * update the chunks btree). The system chunk array is also updated while holding 4018 * that mutex. The same logic applies to removing chunks - we must reserve system 4019 * space, update the chunk btree and the system chunk array in the superblock 4020 * while holding fs_info->chunk_mutex. 4021 * 4022 * This function, btrfs_chunk_alloc(), belongs to phase 1. 4023 * 4024 * If @force is CHUNK_ALLOC_FORCE: 4025 * - return 1 if it successfully allocates a chunk, 4026 * - return errors including -ENOSPC otherwise. 4027 * If @force is NOT CHUNK_ALLOC_FORCE: 4028 * - return 0 if it doesn't need to allocate a new chunk, 4029 * - return 1 if it successfully allocates a chunk, 4030 * - return errors including -ENOSPC otherwise. 4031 */ 4032 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 4033 enum btrfs_chunk_alloc_enum force) 4034 { 4035 struct btrfs_fs_info *fs_info = trans->fs_info; 4036 struct btrfs_space_info *space_info; 4037 struct btrfs_block_group *ret_bg; 4038 bool wait_for_alloc = false; 4039 bool should_alloc = false; 4040 bool from_extent_allocation = false; 4041 int ret = 0; 4042 4043 if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 4044 from_extent_allocation = true; 4045 force = CHUNK_ALLOC_FORCE; 4046 } 4047 4048 /* Don't re-enter if we're already allocating a chunk */ 4049 if (trans->allocating_chunk) 4050 return -ENOSPC; 4051 /* 4052 * Allocation of system chunks can not happen through this path, as we 4053 * could end up in a deadlock if we are allocating a data or metadata 4054 * chunk and there is another task modifying the chunk btree. 4055 * 4056 * This is because while we are holding the chunk mutex, we will attempt 4057 * to add the new chunk item to the chunk btree or update an existing 4058 * device item in the chunk btree, while the other task that is modifying 4059 * the chunk btree is attempting to COW an extent buffer while holding a 4060 * lock on it and on its parent - if the COW operation triggers a system 4061 * chunk allocation, then we can deadlock because we are holding the 4062 * chunk mutex and we may need to access that extent buffer or its parent 4063 * in order to add the chunk item or update a device item. 4064 * 4065 * Tasks that want to modify the chunk tree should reserve system space 4066 * before updating the chunk btree, by calling either 4067 * btrfs_reserve_chunk_metadata() or check_system_chunk(). 4068 * It's possible that after a task reserves the space, it still ends up 4069 * here - this happens in the cases described above at do_chunk_alloc(). 4070 * The task will have to either retry or fail. 4071 */ 4072 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 4073 return -ENOSPC; 4074 4075 space_info = btrfs_find_space_info(fs_info, flags); 4076 ASSERT(space_info); 4077 4078 do { 4079 spin_lock(&space_info->lock); 4080 if (force < space_info->force_alloc) 4081 force = space_info->force_alloc; 4082 should_alloc = should_alloc_chunk(fs_info, space_info, force); 4083 if (space_info->full) { 4084 /* No more free physical space */ 4085 if (should_alloc) 4086 ret = -ENOSPC; 4087 else 4088 ret = 0; 4089 spin_unlock(&space_info->lock); 4090 return ret; 4091 } else if (!should_alloc) { 4092 spin_unlock(&space_info->lock); 4093 return 0; 4094 } else if (space_info->chunk_alloc) { 4095 /* 4096 * Someone is already allocating, so we need to block 4097 * until this someone is finished and then loop to 4098 * recheck if we should continue with our allocation 4099 * attempt. 4100 */ 4101 wait_for_alloc = true; 4102 force = CHUNK_ALLOC_NO_FORCE; 4103 spin_unlock(&space_info->lock); 4104 mutex_lock(&fs_info->chunk_mutex); 4105 mutex_unlock(&fs_info->chunk_mutex); 4106 } else { 4107 /* Proceed with allocation */ 4108 space_info->chunk_alloc = 1; 4109 wait_for_alloc = false; 4110 spin_unlock(&space_info->lock); 4111 } 4112 4113 cond_resched(); 4114 } while (wait_for_alloc); 4115 4116 mutex_lock(&fs_info->chunk_mutex); 4117 trans->allocating_chunk = true; 4118 4119 /* 4120 * If we have mixed data/metadata chunks we want to make sure we keep 4121 * allocating mixed chunks instead of individual chunks. 4122 */ 4123 if (btrfs_mixed_space_info(space_info)) 4124 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 4125 4126 /* 4127 * if we're doing a data chunk, go ahead and make sure that 4128 * we keep a reasonable number of metadata chunks allocated in the 4129 * FS as well. 4130 */ 4131 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 4132 fs_info->data_chunk_allocations++; 4133 if (!(fs_info->data_chunk_allocations % 4134 fs_info->metadata_ratio)) 4135 force_metadata_allocation(fs_info); 4136 } 4137 4138 ret_bg = do_chunk_alloc(trans, flags); 4139 trans->allocating_chunk = false; 4140 4141 if (IS_ERR(ret_bg)) { 4142 ret = PTR_ERR(ret_bg); 4143 } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) { 4144 /* 4145 * New block group is likely to be used soon. Try to activate 4146 * it now. Failure is OK for now. 4147 */ 4148 btrfs_zone_activate(ret_bg); 4149 } 4150 4151 if (!ret) 4152 btrfs_put_block_group(ret_bg); 4153 4154 spin_lock(&space_info->lock); 4155 if (ret < 0) { 4156 if (ret == -ENOSPC) 4157 space_info->full = 1; 4158 else 4159 goto out; 4160 } else { 4161 ret = 1; 4162 space_info->max_extent_size = 0; 4163 } 4164 4165 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 4166 out: 4167 space_info->chunk_alloc = 0; 4168 spin_unlock(&space_info->lock); 4169 mutex_unlock(&fs_info->chunk_mutex); 4170 4171 return ret; 4172 } 4173 4174 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 4175 { 4176 u64 num_dev; 4177 4178 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 4179 if (!num_dev) 4180 num_dev = fs_info->fs_devices->rw_devices; 4181 4182 return num_dev; 4183 } 4184 4185 static void reserve_chunk_space(struct btrfs_trans_handle *trans, 4186 u64 bytes, 4187 u64 type) 4188 { 4189 struct btrfs_fs_info *fs_info = trans->fs_info; 4190 struct btrfs_space_info *info; 4191 u64 left; 4192 int ret = 0; 4193 4194 /* 4195 * Needed because we can end up allocating a system chunk and for an 4196 * atomic and race free space reservation in the chunk block reserve. 4197 */ 4198 lockdep_assert_held(&fs_info->chunk_mutex); 4199 4200 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4201 spin_lock(&info->lock); 4202 left = info->total_bytes - btrfs_space_info_used(info, true); 4203 spin_unlock(&info->lock); 4204 4205 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4206 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 4207 left, bytes, type); 4208 btrfs_dump_space_info(fs_info, info, 0, 0); 4209 } 4210 4211 if (left < bytes) { 4212 u64 flags = btrfs_system_alloc_profile(fs_info); 4213 struct btrfs_block_group *bg; 4214 4215 /* 4216 * Ignore failure to create system chunk. We might end up not 4217 * needing it, as we might not need to COW all nodes/leafs from 4218 * the paths we visit in the chunk tree (they were already COWed 4219 * or created in the current transaction for example). 4220 */ 4221 bg = btrfs_create_chunk(trans, flags); 4222 if (IS_ERR(bg)) { 4223 ret = PTR_ERR(bg); 4224 } else { 4225 /* 4226 * We have a new chunk. We also need to activate it for 4227 * zoned filesystem. 4228 */ 4229 ret = btrfs_zoned_activate_one_bg(fs_info, info, true); 4230 if (ret < 0) 4231 return; 4232 4233 /* 4234 * If we fail to add the chunk item here, we end up 4235 * trying again at phase 2 of chunk allocation, at 4236 * btrfs_create_pending_block_groups(). So ignore 4237 * any error here. An ENOSPC here could happen, due to 4238 * the cases described at do_chunk_alloc() - the system 4239 * block group we just created was just turned into RO 4240 * mode by a scrub for example, or a running discard 4241 * temporarily removed its free space entries, etc. 4242 */ 4243 btrfs_chunk_alloc_add_chunk_item(trans, bg); 4244 } 4245 } 4246 4247 if (!ret) { 4248 ret = btrfs_block_rsv_add(fs_info, 4249 &fs_info->chunk_block_rsv, 4250 bytes, BTRFS_RESERVE_NO_FLUSH); 4251 if (!ret) 4252 trans->chunk_bytes_reserved += bytes; 4253 } 4254 } 4255 4256 /* 4257 * Reserve space in the system space for allocating or removing a chunk. 4258 * The caller must be holding fs_info->chunk_mutex. 4259 */ 4260 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 4261 { 4262 struct btrfs_fs_info *fs_info = trans->fs_info; 4263 const u64 num_devs = get_profile_num_devs(fs_info, type); 4264 u64 bytes; 4265 4266 /* num_devs device items to update and 1 chunk item to add or remove. */ 4267 bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 4268 btrfs_calc_insert_metadata_size(fs_info, 1); 4269 4270 reserve_chunk_space(trans, bytes, type); 4271 } 4272 4273 /* 4274 * Reserve space in the system space, if needed, for doing a modification to the 4275 * chunk btree. 4276 * 4277 * @trans: A transaction handle. 4278 * @is_item_insertion: Indicate if the modification is for inserting a new item 4279 * in the chunk btree or if it's for the deletion or update 4280 * of an existing item. 4281 * 4282 * This is used in a context where we need to update the chunk btree outside 4283 * block group allocation and removal, to avoid a deadlock with a concurrent 4284 * task that is allocating a metadata or data block group and therefore needs to 4285 * update the chunk btree while holding the chunk mutex. After the update to the 4286 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 4287 * 4288 */ 4289 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 4290 bool is_item_insertion) 4291 { 4292 struct btrfs_fs_info *fs_info = trans->fs_info; 4293 u64 bytes; 4294 4295 if (is_item_insertion) 4296 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 4297 else 4298 bytes = btrfs_calc_metadata_size(fs_info, 1); 4299 4300 mutex_lock(&fs_info->chunk_mutex); 4301 reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 4302 mutex_unlock(&fs_info->chunk_mutex); 4303 } 4304 4305 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 4306 { 4307 struct btrfs_block_group *block_group; 4308 4309 block_group = btrfs_lookup_first_block_group(info, 0); 4310 while (block_group) { 4311 btrfs_wait_block_group_cache_done(block_group); 4312 spin_lock(&block_group->lock); 4313 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, 4314 &block_group->runtime_flags)) { 4315 struct inode *inode = block_group->inode; 4316 4317 block_group->inode = NULL; 4318 spin_unlock(&block_group->lock); 4319 4320 ASSERT(block_group->io_ctl.inode == NULL); 4321 iput(inode); 4322 } else { 4323 spin_unlock(&block_group->lock); 4324 } 4325 block_group = btrfs_next_block_group(block_group); 4326 } 4327 } 4328 4329 /* 4330 * Must be called only after stopping all workers, since we could have block 4331 * group caching kthreads running, and therefore they could race with us if we 4332 * freed the block groups before stopping them. 4333 */ 4334 int btrfs_free_block_groups(struct btrfs_fs_info *info) 4335 { 4336 struct btrfs_block_group *block_group; 4337 struct btrfs_space_info *space_info; 4338 struct btrfs_caching_control *caching_ctl; 4339 struct rb_node *n; 4340 4341 if (btrfs_is_zoned(info)) { 4342 if (info->active_meta_bg) { 4343 btrfs_put_block_group(info->active_meta_bg); 4344 info->active_meta_bg = NULL; 4345 } 4346 if (info->active_system_bg) { 4347 btrfs_put_block_group(info->active_system_bg); 4348 info->active_system_bg = NULL; 4349 } 4350 } 4351 4352 write_lock(&info->block_group_cache_lock); 4353 while (!list_empty(&info->caching_block_groups)) { 4354 caching_ctl = list_entry(info->caching_block_groups.next, 4355 struct btrfs_caching_control, list); 4356 list_del(&caching_ctl->list); 4357 btrfs_put_caching_control(caching_ctl); 4358 } 4359 write_unlock(&info->block_group_cache_lock); 4360 4361 spin_lock(&info->unused_bgs_lock); 4362 while (!list_empty(&info->unused_bgs)) { 4363 block_group = list_first_entry(&info->unused_bgs, 4364 struct btrfs_block_group, 4365 bg_list); 4366 list_del_init(&block_group->bg_list); 4367 btrfs_put_block_group(block_group); 4368 } 4369 4370 while (!list_empty(&info->reclaim_bgs)) { 4371 block_group = list_first_entry(&info->reclaim_bgs, 4372 struct btrfs_block_group, 4373 bg_list); 4374 list_del_init(&block_group->bg_list); 4375 btrfs_put_block_group(block_group); 4376 } 4377 spin_unlock(&info->unused_bgs_lock); 4378 4379 spin_lock(&info->zone_active_bgs_lock); 4380 while (!list_empty(&info->zone_active_bgs)) { 4381 block_group = list_first_entry(&info->zone_active_bgs, 4382 struct btrfs_block_group, 4383 active_bg_list); 4384 list_del_init(&block_group->active_bg_list); 4385 btrfs_put_block_group(block_group); 4386 } 4387 spin_unlock(&info->zone_active_bgs_lock); 4388 4389 write_lock(&info->block_group_cache_lock); 4390 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 4391 block_group = rb_entry(n, struct btrfs_block_group, 4392 cache_node); 4393 rb_erase_cached(&block_group->cache_node, 4394 &info->block_group_cache_tree); 4395 RB_CLEAR_NODE(&block_group->cache_node); 4396 write_unlock(&info->block_group_cache_lock); 4397 4398 down_write(&block_group->space_info->groups_sem); 4399 list_del(&block_group->list); 4400 up_write(&block_group->space_info->groups_sem); 4401 4402 /* 4403 * We haven't cached this block group, which means we could 4404 * possibly have excluded extents on this block group. 4405 */ 4406 if (block_group->cached == BTRFS_CACHE_NO || 4407 block_group->cached == BTRFS_CACHE_ERROR) 4408 btrfs_free_excluded_extents(block_group); 4409 4410 btrfs_remove_free_space_cache(block_group); 4411 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 4412 ASSERT(list_empty(&block_group->dirty_list)); 4413 ASSERT(list_empty(&block_group->io_list)); 4414 ASSERT(list_empty(&block_group->bg_list)); 4415 ASSERT(refcount_read(&block_group->refs) == 1); 4416 ASSERT(block_group->swap_extents == 0); 4417 btrfs_put_block_group(block_group); 4418 4419 write_lock(&info->block_group_cache_lock); 4420 } 4421 write_unlock(&info->block_group_cache_lock); 4422 4423 btrfs_release_global_block_rsv(info); 4424 4425 while (!list_empty(&info->space_info)) { 4426 space_info = list_entry(info->space_info.next, 4427 struct btrfs_space_info, 4428 list); 4429 4430 /* 4431 * Do not hide this behind enospc_debug, this is actually 4432 * important and indicates a real bug if this happens. 4433 */ 4434 if (WARN_ON(space_info->bytes_pinned > 0 || 4435 space_info->bytes_may_use > 0)) 4436 btrfs_dump_space_info(info, space_info, 0, 0); 4437 4438 /* 4439 * If there was a failure to cleanup a log tree, very likely due 4440 * to an IO failure on a writeback attempt of one or more of its 4441 * extent buffers, we could not do proper (and cheap) unaccounting 4442 * of their reserved space, so don't warn on bytes_reserved > 0 in 4443 * that case. 4444 */ 4445 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 4446 !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 4447 if (WARN_ON(space_info->bytes_reserved > 0)) 4448 btrfs_dump_space_info(info, space_info, 0, 0); 4449 } 4450 4451 WARN_ON(space_info->reclaim_size > 0); 4452 list_del(&space_info->list); 4453 btrfs_sysfs_remove_space_info(space_info); 4454 } 4455 return 0; 4456 } 4457 4458 void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4459 { 4460 atomic_inc(&cache->frozen); 4461 } 4462 4463 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4464 { 4465 struct btrfs_fs_info *fs_info = block_group->fs_info; 4466 bool cleanup; 4467 4468 spin_lock(&block_group->lock); 4469 cleanup = (atomic_dec_and_test(&block_group->frozen) && 4470 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); 4471 spin_unlock(&block_group->lock); 4472 4473 if (cleanup) { 4474 struct btrfs_chunk_map *map; 4475 4476 map = btrfs_find_chunk_map(fs_info, block_group->start, 1); 4477 /* Logic error, can't happen. */ 4478 ASSERT(map); 4479 4480 btrfs_remove_chunk_map(fs_info, map); 4481 4482 /* Once for our lookup reference. */ 4483 btrfs_free_chunk_map(map); 4484 4485 /* 4486 * We may have left one free space entry and other possible 4487 * tasks trimming this block group have left 1 entry each one. 4488 * Free them if any. 4489 */ 4490 btrfs_remove_free_space_cache(block_group); 4491 } 4492 } 4493 4494 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4495 { 4496 bool ret = true; 4497 4498 spin_lock(&bg->lock); 4499 if (bg->ro) 4500 ret = false; 4501 else 4502 bg->swap_extents++; 4503 spin_unlock(&bg->lock); 4504 4505 return ret; 4506 } 4507 4508 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4509 { 4510 spin_lock(&bg->lock); 4511 ASSERT(!bg->ro); 4512 ASSERT(bg->swap_extents >= amount); 4513 bg->swap_extents -= amount; 4514 spin_unlock(&bg->lock); 4515 } 4516 4517 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) 4518 { 4519 if (size <= SZ_128K) 4520 return BTRFS_BG_SZ_SMALL; 4521 if (size <= SZ_8M) 4522 return BTRFS_BG_SZ_MEDIUM; 4523 return BTRFS_BG_SZ_LARGE; 4524 } 4525 4526 /* 4527 * Handle a block group allocating an extent in a size class 4528 * 4529 * @bg: The block group we allocated in. 4530 * @size_class: The size class of the allocation. 4531 * @force_wrong_size_class: Whether we are desperate enough to allow 4532 * mismatched size classes. 4533 * 4534 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the 4535 * case of a race that leads to the wrong size class without 4536 * force_wrong_size_class set. 4537 * 4538 * find_free_extent will skip block groups with a mismatched size class until 4539 * it really needs to avoid ENOSPC. In that case it will set 4540 * force_wrong_size_class. However, if a block group is newly allocated and 4541 * doesn't yet have a size class, then it is possible for two allocations of 4542 * different sizes to race and both try to use it. The loser is caught here and 4543 * has to retry. 4544 */ 4545 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 4546 enum btrfs_block_group_size_class size_class, 4547 bool force_wrong_size_class) 4548 { 4549 ASSERT(size_class != BTRFS_BG_SZ_NONE); 4550 4551 /* The new allocation is in the right size class, do nothing */ 4552 if (bg->size_class == size_class) 4553 return 0; 4554 /* 4555 * The new allocation is in a mismatched size class. 4556 * This means one of two things: 4557 * 4558 * 1. Two tasks in find_free_extent for different size_classes raced 4559 * and hit the same empty block_group. Make the loser try again. 4560 * 2. A call to find_free_extent got desperate enough to set 4561 * 'force_wrong_slab'. Don't change the size_class, but allow the 4562 * allocation. 4563 */ 4564 if (bg->size_class != BTRFS_BG_SZ_NONE) { 4565 if (force_wrong_size_class) 4566 return 0; 4567 return -EAGAIN; 4568 } 4569 /* 4570 * The happy new block group case: the new allocation is the first 4571 * one in the block_group so we set size_class. 4572 */ 4573 bg->size_class = size_class; 4574 4575 return 0; 4576 } 4577 4578 bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) 4579 { 4580 if (btrfs_is_zoned(bg->fs_info)) 4581 return false; 4582 if (!btrfs_is_block_group_data_only(bg)) 4583 return false; 4584 return true; 4585 } 4586