1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/sizes.h> 4 #include <linux/list_sort.h> 5 #include "misc.h" 6 #include "ctree.h" 7 #include "block-group.h" 8 #include "space-info.h" 9 #include "disk-io.h" 10 #include "free-space-cache.h" 11 #include "free-space-tree.h" 12 #include "volumes.h" 13 #include "transaction.h" 14 #include "ref-verify.h" 15 #include "sysfs.h" 16 #include "tree-log.h" 17 #include "delalloc-space.h" 18 #include "discard.h" 19 #include "raid56.h" 20 #include "zoned.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 25 #ifdef CONFIG_BTRFS_DEBUG 26 int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) 27 { 28 struct btrfs_fs_info *fs_info = block_group->fs_info; 29 30 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && 31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 32 (btrfs_test_opt(fs_info, FRAGMENT_DATA) && 33 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 34 } 35 #endif 36 37 /* 38 * Return target flags in extended format or 0 if restripe for this chunk_type 39 * is not in progress 40 * 41 * Should be called with balance_lock held 42 */ 43 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 44 { 45 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 46 u64 target = 0; 47 48 if (!bctl) 49 return 0; 50 51 if (flags & BTRFS_BLOCK_GROUP_DATA && 52 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 53 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 54 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 55 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 56 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 57 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 58 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 59 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 60 } 61 62 return target; 63 } 64 65 /* 66 * @flags: available profiles in extended format (see ctree.h) 67 * 68 * Return reduced profile in chunk format. If profile changing is in progress 69 * (either running or paused) picks the target profile (if it's already 70 * available), otherwise falls back to plain reducing. 71 */ 72 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) 73 { 74 u64 num_devices = fs_info->fs_devices->rw_devices; 75 u64 target; 76 u64 raid_type; 77 u64 allowed = 0; 78 79 /* 80 * See if restripe for this chunk_type is in progress, if so try to 81 * reduce to the target profile 82 */ 83 spin_lock(&fs_info->balance_lock); 84 target = get_restripe_target(fs_info, flags); 85 if (target) { 86 spin_unlock(&fs_info->balance_lock); 87 return extended_to_chunk(target); 88 } 89 spin_unlock(&fs_info->balance_lock); 90 91 /* First, mask out the RAID levels which aren't possible */ 92 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { 93 if (num_devices >= btrfs_raid_array[raid_type].devs_min) 94 allowed |= btrfs_raid_array[raid_type].bg_flag; 95 } 96 allowed &= flags; 97 98 /* Select the highest-redundancy RAID level. */ 99 if (allowed & BTRFS_BLOCK_GROUP_RAID1C4) 100 allowed = BTRFS_BLOCK_GROUP_RAID1C4; 101 else if (allowed & BTRFS_BLOCK_GROUP_RAID6) 102 allowed = BTRFS_BLOCK_GROUP_RAID6; 103 else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3) 104 allowed = BTRFS_BLOCK_GROUP_RAID1C3; 105 else if (allowed & BTRFS_BLOCK_GROUP_RAID5) 106 allowed = BTRFS_BLOCK_GROUP_RAID5; 107 else if (allowed & BTRFS_BLOCK_GROUP_RAID10) 108 allowed = BTRFS_BLOCK_GROUP_RAID10; 109 else if (allowed & BTRFS_BLOCK_GROUP_RAID1) 110 allowed = BTRFS_BLOCK_GROUP_RAID1; 111 else if (allowed & BTRFS_BLOCK_GROUP_DUP) 112 allowed = BTRFS_BLOCK_GROUP_DUP; 113 else if (allowed & BTRFS_BLOCK_GROUP_RAID0) 114 allowed = BTRFS_BLOCK_GROUP_RAID0; 115 116 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; 117 118 return extended_to_chunk(flags | allowed); 119 } 120 121 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) 122 { 123 unsigned seq; 124 u64 flags; 125 126 do { 127 flags = orig_flags; 128 seq = read_seqbegin(&fs_info->profiles_lock); 129 130 if (flags & BTRFS_BLOCK_GROUP_DATA) 131 flags |= fs_info->avail_data_alloc_bits; 132 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 133 flags |= fs_info->avail_system_alloc_bits; 134 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 135 flags |= fs_info->avail_metadata_alloc_bits; 136 } while (read_seqretry(&fs_info->profiles_lock, seq)); 137 138 return btrfs_reduce_alloc_profile(fs_info, flags); 139 } 140 141 void btrfs_get_block_group(struct btrfs_block_group *cache) 142 { 143 refcount_inc(&cache->refs); 144 } 145 146 void btrfs_put_block_group(struct btrfs_block_group *cache) 147 { 148 if (refcount_dec_and_test(&cache->refs)) { 149 WARN_ON(cache->pinned > 0); 150 /* 151 * If there was a failure to cleanup a log tree, very likely due 152 * to an IO failure on a writeback attempt of one or more of its 153 * extent buffers, we could not do proper (and cheap) unaccounting 154 * of their reserved space, so don't warn on reserved > 0 in that 155 * case. 156 */ 157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || 158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) 159 WARN_ON(cache->reserved > 0); 160 161 /* 162 * A block_group shouldn't be on the discard_list anymore. 163 * Remove the block_group from the discard_list to prevent us 164 * from causing a panic due to NULL pointer dereference. 165 */ 166 if (WARN_ON(!list_empty(&cache->discard_list))) 167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, 168 cache); 169 170 kfree(cache->free_space_ctl); 171 btrfs_free_chunk_map(cache->physical_map); 172 kfree(cache); 173 } 174 } 175 176 /* 177 * This adds the block group to the fs_info rb tree for the block group cache 178 */ 179 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 180 struct btrfs_block_group *block_group) 181 { 182 struct rb_node **p; 183 struct rb_node *parent = NULL; 184 struct btrfs_block_group *cache; 185 bool leftmost = true; 186 187 ASSERT(block_group->length != 0); 188 189 write_lock(&info->block_group_cache_lock); 190 p = &info->block_group_cache_tree.rb_root.rb_node; 191 192 while (*p) { 193 parent = *p; 194 cache = rb_entry(parent, struct btrfs_block_group, cache_node); 195 if (block_group->start < cache->start) { 196 p = &(*p)->rb_left; 197 } else if (block_group->start > cache->start) { 198 p = &(*p)->rb_right; 199 leftmost = false; 200 } else { 201 write_unlock(&info->block_group_cache_lock); 202 return -EEXIST; 203 } 204 } 205 206 rb_link_node(&block_group->cache_node, parent, p); 207 rb_insert_color_cached(&block_group->cache_node, 208 &info->block_group_cache_tree, leftmost); 209 210 write_unlock(&info->block_group_cache_lock); 211 212 return 0; 213 } 214 215 /* 216 * This will return the block group at or after bytenr if contains is 0, else 217 * it will return the block group that contains the bytenr 218 */ 219 static struct btrfs_block_group *block_group_cache_tree_search( 220 struct btrfs_fs_info *info, u64 bytenr, int contains) 221 { 222 struct btrfs_block_group *cache, *ret = NULL; 223 struct rb_node *n; 224 u64 end, start; 225 226 read_lock(&info->block_group_cache_lock); 227 n = info->block_group_cache_tree.rb_root.rb_node; 228 229 while (n) { 230 cache = rb_entry(n, struct btrfs_block_group, cache_node); 231 end = cache->start + cache->length - 1; 232 start = cache->start; 233 234 if (bytenr < start) { 235 if (!contains && (!ret || start < ret->start)) 236 ret = cache; 237 n = n->rb_left; 238 } else if (bytenr > start) { 239 if (contains && bytenr <= end) { 240 ret = cache; 241 break; 242 } 243 n = n->rb_right; 244 } else { 245 ret = cache; 246 break; 247 } 248 } 249 if (ret) 250 btrfs_get_block_group(ret); 251 read_unlock(&info->block_group_cache_lock); 252 253 return ret; 254 } 255 256 /* 257 * Return the block group that starts at or after bytenr 258 */ 259 struct btrfs_block_group *btrfs_lookup_first_block_group( 260 struct btrfs_fs_info *info, u64 bytenr) 261 { 262 return block_group_cache_tree_search(info, bytenr, 0); 263 } 264 265 /* 266 * Return the block group that contains the given bytenr 267 */ 268 struct btrfs_block_group *btrfs_lookup_block_group( 269 struct btrfs_fs_info *info, u64 bytenr) 270 { 271 return block_group_cache_tree_search(info, bytenr, 1); 272 } 273 274 struct btrfs_block_group *btrfs_next_block_group( 275 struct btrfs_block_group *cache) 276 { 277 struct btrfs_fs_info *fs_info = cache->fs_info; 278 struct rb_node *node; 279 280 read_lock(&fs_info->block_group_cache_lock); 281 282 /* If our block group was removed, we need a full search. */ 283 if (RB_EMPTY_NODE(&cache->cache_node)) { 284 const u64 next_bytenr = cache->start + cache->length; 285 286 read_unlock(&fs_info->block_group_cache_lock); 287 btrfs_put_block_group(cache); 288 return btrfs_lookup_first_block_group(fs_info, next_bytenr); 289 } 290 node = rb_next(&cache->cache_node); 291 btrfs_put_block_group(cache); 292 if (node) { 293 cache = rb_entry(node, struct btrfs_block_group, cache_node); 294 btrfs_get_block_group(cache); 295 } else 296 cache = NULL; 297 read_unlock(&fs_info->block_group_cache_lock); 298 return cache; 299 } 300 301 /* 302 * Check if we can do a NOCOW write for a given extent. 303 * 304 * @fs_info: The filesystem information object. 305 * @bytenr: Logical start address of the extent. 306 * 307 * Check if we can do a NOCOW write for the given extent, and increments the 308 * number of NOCOW writers in the block group that contains the extent, as long 309 * as the block group exists and it's currently not in read-only mode. 310 * 311 * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller 312 * is responsible for calling btrfs_dec_nocow_writers() later. 313 * 314 * Or NULL if we can not do a NOCOW write 315 */ 316 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, 317 u64 bytenr) 318 { 319 struct btrfs_block_group *bg; 320 bool can_nocow = true; 321 322 bg = btrfs_lookup_block_group(fs_info, bytenr); 323 if (!bg) 324 return NULL; 325 326 spin_lock(&bg->lock); 327 if (bg->ro) 328 can_nocow = false; 329 else 330 atomic_inc(&bg->nocow_writers); 331 spin_unlock(&bg->lock); 332 333 if (!can_nocow) { 334 btrfs_put_block_group(bg); 335 return NULL; 336 } 337 338 /* No put on block group, done by btrfs_dec_nocow_writers(). */ 339 return bg; 340 } 341 342 /* 343 * Decrement the number of NOCOW writers in a block group. 344 * 345 * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), 346 * and on the block group returned by that call. Typically this is called after 347 * creating an ordered extent for a NOCOW write, to prevent races with scrub and 348 * relocation. 349 * 350 * After this call, the caller should not use the block group anymore. It it wants 351 * to use it, then it should get a reference on it before calling this function. 352 */ 353 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) 354 { 355 if (atomic_dec_and_test(&bg->nocow_writers)) 356 wake_up_var(&bg->nocow_writers); 357 358 /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ 359 btrfs_put_block_group(bg); 360 } 361 362 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) 363 { 364 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 365 } 366 367 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 368 const u64 start) 369 { 370 struct btrfs_block_group *bg; 371 372 bg = btrfs_lookup_block_group(fs_info, start); 373 ASSERT(bg); 374 if (atomic_dec_and_test(&bg->reservations)) 375 wake_up_var(&bg->reservations); 376 btrfs_put_block_group(bg); 377 } 378 379 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) 380 { 381 struct btrfs_space_info *space_info = bg->space_info; 382 383 ASSERT(bg->ro); 384 385 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) 386 return; 387 388 /* 389 * Our block group is read only but before we set it to read only, 390 * some task might have had allocated an extent from it already, but it 391 * has not yet created a respective ordered extent (and added it to a 392 * root's list of ordered extents). 393 * Therefore wait for any task currently allocating extents, since the 394 * block group's reservations counter is incremented while a read lock 395 * on the groups' semaphore is held and decremented after releasing 396 * the read access on that semaphore and creating the ordered extent. 397 */ 398 down_write(&space_info->groups_sem); 399 up_write(&space_info->groups_sem); 400 401 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 402 } 403 404 struct btrfs_caching_control *btrfs_get_caching_control( 405 struct btrfs_block_group *cache) 406 { 407 struct btrfs_caching_control *ctl; 408 409 spin_lock(&cache->lock); 410 if (!cache->caching_ctl) { 411 spin_unlock(&cache->lock); 412 return NULL; 413 } 414 415 ctl = cache->caching_ctl; 416 refcount_inc(&ctl->count); 417 spin_unlock(&cache->lock); 418 return ctl; 419 } 420 421 static void btrfs_put_caching_control(struct btrfs_caching_control *ctl) 422 { 423 if (refcount_dec_and_test(&ctl->count)) 424 kfree(ctl); 425 } 426 427 /* 428 * When we wait for progress in the block group caching, its because our 429 * allocation attempt failed at least once. So, we must sleep and let some 430 * progress happen before we try again. 431 * 432 * This function will sleep at least once waiting for new free space to show 433 * up, and then it will check the block group free space numbers for our min 434 * num_bytes. Another option is to have it go ahead and look in the rbtree for 435 * a free extent of a given size, but this is a good start. 436 * 437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 438 * any of the information in this block group. 439 */ 440 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, 441 u64 num_bytes) 442 { 443 struct btrfs_caching_control *caching_ctl; 444 int progress; 445 446 caching_ctl = btrfs_get_caching_control(cache); 447 if (!caching_ctl) 448 return; 449 450 /* 451 * We've already failed to allocate from this block group, so even if 452 * there's enough space in the block group it isn't contiguous enough to 453 * allow for an allocation, so wait for at least the next wakeup tick, 454 * or for the thing to be done. 455 */ 456 progress = atomic_read(&caching_ctl->progress); 457 458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || 459 (progress != atomic_read(&caching_ctl->progress) && 460 (cache->free_space_ctl->free_space >= num_bytes))); 461 462 btrfs_put_caching_control(caching_ctl); 463 } 464 465 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, 466 struct btrfs_caching_control *caching_ctl) 467 { 468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); 469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; 470 } 471 472 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) 473 { 474 struct btrfs_caching_control *caching_ctl; 475 int ret; 476 477 caching_ctl = btrfs_get_caching_control(cache); 478 if (!caching_ctl) 479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 480 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 481 btrfs_put_caching_control(caching_ctl); 482 return ret; 483 } 484 485 #ifdef CONFIG_BTRFS_DEBUG 486 static void fragment_free_space(struct btrfs_block_group *block_group) 487 { 488 struct btrfs_fs_info *fs_info = block_group->fs_info; 489 u64 start = block_group->start; 490 u64 len = block_group->length; 491 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? 492 fs_info->nodesize : fs_info->sectorsize; 493 u64 step = chunk << 1; 494 495 while (len > chunk) { 496 btrfs_remove_free_space(block_group, start, chunk); 497 start += step; 498 if (len < step) 499 len = 0; 500 else 501 len -= step; 502 } 503 } 504 #endif 505 506 /* 507 * Add a free space range to the in memory free space cache of a block group. 508 * This checks if the range contains super block locations and any such 509 * locations are not added to the free space cache. 510 * 511 * @block_group: The target block group. 512 * @start: Start offset of the range. 513 * @end: End offset of the range (exclusive). 514 * @total_added_ret: Optional pointer to return the total amount of space 515 * added to the block group's free space cache. 516 * 517 * Returns 0 on success or < 0 on error. 518 */ 519 int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, 520 u64 end, u64 *total_added_ret) 521 { 522 struct btrfs_fs_info *info = block_group->fs_info; 523 u64 extent_start, extent_end, size; 524 int ret; 525 526 if (total_added_ret) 527 *total_added_ret = 0; 528 529 while (start < end) { 530 if (!find_first_extent_bit(&info->excluded_extents, start, 531 &extent_start, &extent_end, 532 EXTENT_DIRTY | EXTENT_UPTODATE, 533 NULL)) 534 break; 535 536 if (extent_start <= start) { 537 start = extent_end + 1; 538 } else if (extent_start > start && extent_start < end) { 539 size = extent_start - start; 540 ret = btrfs_add_free_space_async_trimmed(block_group, 541 start, size); 542 if (ret) 543 return ret; 544 if (total_added_ret) 545 *total_added_ret += size; 546 start = extent_end + 1; 547 } else { 548 break; 549 } 550 } 551 552 if (start < end) { 553 size = end - start; 554 ret = btrfs_add_free_space_async_trimmed(block_group, start, 555 size); 556 if (ret) 557 return ret; 558 if (total_added_ret) 559 *total_added_ret += size; 560 } 561 562 return 0; 563 } 564 565 /* 566 * Get an arbitrary extent item index / max_index through the block group 567 * 568 * @block_group the block group to sample from 569 * @index: the integral step through the block group to grab from 570 * @max_index: the granularity of the sampling 571 * @key: return value parameter for the item we find 572 * 573 * Pre-conditions on indices: 574 * 0 <= index <= max_index 575 * 0 < max_index 576 * 577 * Returns: 0 on success, 1 if the search didn't yield a useful item, negative 578 * error code on error. 579 */ 580 static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, 581 struct btrfs_block_group *block_group, 582 int index, int max_index, 583 struct btrfs_key *found_key) 584 { 585 struct btrfs_fs_info *fs_info = block_group->fs_info; 586 struct btrfs_root *extent_root; 587 u64 search_offset; 588 u64 search_end = block_group->start + block_group->length; 589 struct btrfs_path *path; 590 struct btrfs_key search_key; 591 int ret = 0; 592 593 ASSERT(index >= 0); 594 ASSERT(index <= max_index); 595 ASSERT(max_index > 0); 596 lockdep_assert_held(&caching_ctl->mutex); 597 lockdep_assert_held_read(&fs_info->commit_root_sem); 598 599 path = btrfs_alloc_path(); 600 if (!path) 601 return -ENOMEM; 602 603 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, 604 BTRFS_SUPER_INFO_OFFSET)); 605 606 path->skip_locking = 1; 607 path->search_commit_root = 1; 608 path->reada = READA_FORWARD; 609 610 search_offset = index * div_u64(block_group->length, max_index); 611 search_key.objectid = block_group->start + search_offset; 612 search_key.type = BTRFS_EXTENT_ITEM_KEY; 613 search_key.offset = 0; 614 615 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { 616 /* Success; sampled an extent item in the block group */ 617 if (found_key->type == BTRFS_EXTENT_ITEM_KEY && 618 found_key->objectid >= block_group->start && 619 found_key->objectid + found_key->offset <= search_end) 620 break; 621 622 /* We can't possibly find a valid extent item anymore */ 623 if (found_key->objectid >= search_end) { 624 ret = 1; 625 break; 626 } 627 } 628 629 lockdep_assert_held(&caching_ctl->mutex); 630 lockdep_assert_held_read(&fs_info->commit_root_sem); 631 btrfs_free_path(path); 632 return ret; 633 } 634 635 /* 636 * Best effort attempt to compute a block group's size class while caching it. 637 * 638 * @block_group: the block group we are caching 639 * 640 * We cannot infer the size class while adding free space extents, because that 641 * logic doesn't care about contiguous file extents (it doesn't differentiate 642 * between a 100M extent and 100 contiguous 1M extents). So we need to read the 643 * file extent items. Reading all of them is quite wasteful, because usually 644 * only a handful are enough to give a good answer. Therefore, we just grab 5 of 645 * them at even steps through the block group and pick the smallest size class 646 * we see. Since size class is best effort, and not guaranteed in general, 647 * inaccuracy is acceptable. 648 * 649 * To be more explicit about why this algorithm makes sense: 650 * 651 * If we are caching in a block group from disk, then there are three major cases 652 * to consider: 653 * 1. the block group is well behaved and all extents in it are the same size 654 * class. 655 * 2. the block group is mostly one size class with rare exceptions for last 656 * ditch allocations 657 * 3. the block group was populated before size classes and can have a totally 658 * arbitrary mix of size classes. 659 * 660 * In case 1, looking at any extent in the block group will yield the correct 661 * result. For the mixed cases, taking the minimum size class seems like a good 662 * approximation, since gaps from frees will be usable to the size class. For 663 * 2., a small handful of file extents is likely to yield the right answer. For 664 * 3, we can either read every file extent, or admit that this is best effort 665 * anyway and try to stay fast. 666 * 667 * Returns: 0 on success, negative error code on error. 668 */ 669 static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl, 670 struct btrfs_block_group *block_group) 671 { 672 struct btrfs_fs_info *fs_info = block_group->fs_info; 673 struct btrfs_key key; 674 int i; 675 u64 min_size = block_group->length; 676 enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; 677 int ret; 678 679 if (!btrfs_block_group_should_use_size_class(block_group)) 680 return 0; 681 682 lockdep_assert_held(&caching_ctl->mutex); 683 lockdep_assert_held_read(&fs_info->commit_root_sem); 684 for (i = 0; i < 5; ++i) { 685 ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); 686 if (ret < 0) 687 goto out; 688 if (ret > 0) 689 continue; 690 min_size = min_t(u64, min_size, key.offset); 691 size_class = btrfs_calc_block_group_size_class(min_size); 692 } 693 if (size_class != BTRFS_BG_SZ_NONE) { 694 spin_lock(&block_group->lock); 695 block_group->size_class = size_class; 696 spin_unlock(&block_group->lock); 697 } 698 out: 699 return ret; 700 } 701 702 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) 703 { 704 struct btrfs_block_group *block_group = caching_ctl->block_group; 705 struct btrfs_fs_info *fs_info = block_group->fs_info; 706 struct btrfs_root *extent_root; 707 struct btrfs_path *path; 708 struct extent_buffer *leaf; 709 struct btrfs_key key; 710 u64 total_found = 0; 711 u64 last = 0; 712 u32 nritems; 713 int ret; 714 bool wakeup = true; 715 716 path = btrfs_alloc_path(); 717 if (!path) 718 return -ENOMEM; 719 720 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 721 extent_root = btrfs_extent_root(fs_info, last); 722 723 #ifdef CONFIG_BTRFS_DEBUG 724 /* 725 * If we're fragmenting we don't want to make anybody think we can 726 * allocate from this block group until we've had a chance to fragment 727 * the free space. 728 */ 729 if (btrfs_should_fragment_free_space(block_group)) 730 wakeup = false; 731 #endif 732 /* 733 * We don't want to deadlock with somebody trying to allocate a new 734 * extent for the extent root while also trying to search the extent 735 * root to add free space. So we skip locking and search the commit 736 * root, since its read-only 737 */ 738 path->skip_locking = 1; 739 path->search_commit_root = 1; 740 path->reada = READA_FORWARD; 741 742 key.objectid = last; 743 key.offset = 0; 744 key.type = BTRFS_EXTENT_ITEM_KEY; 745 746 next: 747 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 748 if (ret < 0) 749 goto out; 750 751 leaf = path->nodes[0]; 752 nritems = btrfs_header_nritems(leaf); 753 754 while (1) { 755 if (btrfs_fs_closing(fs_info) > 1) { 756 last = (u64)-1; 757 break; 758 } 759 760 if (path->slots[0] < nritems) { 761 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 762 } else { 763 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); 764 if (ret) 765 break; 766 767 if (need_resched() || 768 rwsem_is_contended(&fs_info->commit_root_sem)) { 769 btrfs_release_path(path); 770 up_read(&fs_info->commit_root_sem); 771 mutex_unlock(&caching_ctl->mutex); 772 cond_resched(); 773 mutex_lock(&caching_ctl->mutex); 774 down_read(&fs_info->commit_root_sem); 775 goto next; 776 } 777 778 ret = btrfs_next_leaf(extent_root, path); 779 if (ret < 0) 780 goto out; 781 if (ret) 782 break; 783 leaf = path->nodes[0]; 784 nritems = btrfs_header_nritems(leaf); 785 continue; 786 } 787 788 if (key.objectid < last) { 789 key.objectid = last; 790 key.offset = 0; 791 key.type = BTRFS_EXTENT_ITEM_KEY; 792 btrfs_release_path(path); 793 goto next; 794 } 795 796 if (key.objectid < block_group->start) { 797 path->slots[0]++; 798 continue; 799 } 800 801 if (key.objectid >= block_group->start + block_group->length) 802 break; 803 804 if (key.type == BTRFS_EXTENT_ITEM_KEY || 805 key.type == BTRFS_METADATA_ITEM_KEY) { 806 u64 space_added; 807 808 ret = btrfs_add_new_free_space(block_group, last, 809 key.objectid, &space_added); 810 if (ret) 811 goto out; 812 total_found += space_added; 813 if (key.type == BTRFS_METADATA_ITEM_KEY) 814 last = key.objectid + 815 fs_info->nodesize; 816 else 817 last = key.objectid + key.offset; 818 819 if (total_found > CACHING_CTL_WAKE_UP) { 820 total_found = 0; 821 if (wakeup) { 822 atomic_inc(&caching_ctl->progress); 823 wake_up(&caching_ctl->wait); 824 } 825 } 826 } 827 path->slots[0]++; 828 } 829 830 ret = btrfs_add_new_free_space(block_group, last, 831 block_group->start + block_group->length, 832 NULL); 833 out: 834 btrfs_free_path(path); 835 return ret; 836 } 837 838 static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) 839 { 840 clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, 841 bg->start + bg->length - 1, EXTENT_UPTODATE); 842 } 843 844 static noinline void caching_thread(struct btrfs_work *work) 845 { 846 struct btrfs_block_group *block_group; 847 struct btrfs_fs_info *fs_info; 848 struct btrfs_caching_control *caching_ctl; 849 int ret; 850 851 caching_ctl = container_of(work, struct btrfs_caching_control, work); 852 block_group = caching_ctl->block_group; 853 fs_info = block_group->fs_info; 854 855 mutex_lock(&caching_ctl->mutex); 856 down_read(&fs_info->commit_root_sem); 857 858 load_block_group_size_class(caching_ctl, block_group); 859 if (btrfs_test_opt(fs_info, SPACE_CACHE)) { 860 ret = load_free_space_cache(block_group); 861 if (ret == 1) { 862 ret = 0; 863 goto done; 864 } 865 866 /* 867 * We failed to load the space cache, set ourselves to 868 * CACHE_STARTED and carry on. 869 */ 870 spin_lock(&block_group->lock); 871 block_group->cached = BTRFS_CACHE_STARTED; 872 spin_unlock(&block_group->lock); 873 wake_up(&caching_ctl->wait); 874 } 875 876 /* 877 * If we are in the transaction that populated the free space tree we 878 * can't actually cache from the free space tree as our commit root and 879 * real root are the same, so we could change the contents of the blocks 880 * while caching. Instead do the slow caching in this case, and after 881 * the transaction has committed we will be safe. 882 */ 883 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && 884 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) 885 ret = load_free_space_tree(caching_ctl); 886 else 887 ret = load_extent_tree_free(caching_ctl); 888 done: 889 spin_lock(&block_group->lock); 890 block_group->caching_ctl = NULL; 891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; 892 spin_unlock(&block_group->lock); 893 894 #ifdef CONFIG_BTRFS_DEBUG 895 if (btrfs_should_fragment_free_space(block_group)) { 896 u64 bytes_used; 897 898 spin_lock(&block_group->space_info->lock); 899 spin_lock(&block_group->lock); 900 bytes_used = block_group->length - block_group->used; 901 block_group->space_info->bytes_used += bytes_used >> 1; 902 spin_unlock(&block_group->lock); 903 spin_unlock(&block_group->space_info->lock); 904 fragment_free_space(block_group); 905 } 906 #endif 907 908 up_read(&fs_info->commit_root_sem); 909 btrfs_free_excluded_extents(block_group); 910 mutex_unlock(&caching_ctl->mutex); 911 912 wake_up(&caching_ctl->wait); 913 914 btrfs_put_caching_control(caching_ctl); 915 btrfs_put_block_group(block_group); 916 } 917 918 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) 919 { 920 struct btrfs_fs_info *fs_info = cache->fs_info; 921 struct btrfs_caching_control *caching_ctl = NULL; 922 int ret = 0; 923 924 /* Allocator for zoned filesystems does not use the cache at all */ 925 if (btrfs_is_zoned(fs_info)) 926 return 0; 927 928 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 929 if (!caching_ctl) 930 return -ENOMEM; 931 932 INIT_LIST_HEAD(&caching_ctl->list); 933 mutex_init(&caching_ctl->mutex); 934 init_waitqueue_head(&caching_ctl->wait); 935 caching_ctl->block_group = cache; 936 refcount_set(&caching_ctl->count, 2); 937 atomic_set(&caching_ctl->progress, 0); 938 btrfs_init_work(&caching_ctl->work, caching_thread, NULL); 939 940 spin_lock(&cache->lock); 941 if (cache->cached != BTRFS_CACHE_NO) { 942 kfree(caching_ctl); 943 944 caching_ctl = cache->caching_ctl; 945 if (caching_ctl) 946 refcount_inc(&caching_ctl->count); 947 spin_unlock(&cache->lock); 948 goto out; 949 } 950 WARN_ON(cache->caching_ctl); 951 cache->caching_ctl = caching_ctl; 952 cache->cached = BTRFS_CACHE_STARTED; 953 spin_unlock(&cache->lock); 954 955 write_lock(&fs_info->block_group_cache_lock); 956 refcount_inc(&caching_ctl->count); 957 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 958 write_unlock(&fs_info->block_group_cache_lock); 959 960 btrfs_get_block_group(cache); 961 962 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 963 out: 964 if (wait && caching_ctl) 965 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); 966 if (caching_ctl) 967 btrfs_put_caching_control(caching_ctl); 968 969 return ret; 970 } 971 972 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 973 { 974 u64 extra_flags = chunk_to_extended(flags) & 975 BTRFS_EXTENDED_PROFILE_MASK; 976 977 write_seqlock(&fs_info->profiles_lock); 978 if (flags & BTRFS_BLOCK_GROUP_DATA) 979 fs_info->avail_data_alloc_bits &= ~extra_flags; 980 if (flags & BTRFS_BLOCK_GROUP_METADATA) 981 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 982 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 983 fs_info->avail_system_alloc_bits &= ~extra_flags; 984 write_sequnlock(&fs_info->profiles_lock); 985 } 986 987 /* 988 * Clear incompat bits for the following feature(s): 989 * 990 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group 991 * in the whole filesystem 992 * 993 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups 994 */ 995 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) 996 { 997 bool found_raid56 = false; 998 bool found_raid1c34 = false; 999 1000 if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || 1001 (flags & BTRFS_BLOCK_GROUP_RAID1C3) || 1002 (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { 1003 struct list_head *head = &fs_info->space_info; 1004 struct btrfs_space_info *sinfo; 1005 1006 list_for_each_entry_rcu(sinfo, head, list) { 1007 down_read(&sinfo->groups_sem); 1008 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) 1009 found_raid56 = true; 1010 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) 1011 found_raid56 = true; 1012 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) 1013 found_raid1c34 = true; 1014 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) 1015 found_raid1c34 = true; 1016 up_read(&sinfo->groups_sem); 1017 } 1018 if (!found_raid56) 1019 btrfs_clear_fs_incompat(fs_info, RAID56); 1020 if (!found_raid1c34) 1021 btrfs_clear_fs_incompat(fs_info, RAID1C34); 1022 } 1023 } 1024 1025 static int remove_block_group_item(struct btrfs_trans_handle *trans, 1026 struct btrfs_path *path, 1027 struct btrfs_block_group *block_group) 1028 { 1029 struct btrfs_fs_info *fs_info = trans->fs_info; 1030 struct btrfs_root *root; 1031 struct btrfs_key key; 1032 int ret; 1033 1034 root = btrfs_block_group_root(fs_info); 1035 key.objectid = block_group->start; 1036 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1037 key.offset = block_group->length; 1038 1039 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1040 if (ret > 0) 1041 ret = -ENOENT; 1042 if (ret < 0) 1043 return ret; 1044 1045 ret = btrfs_del_item(trans, root, path); 1046 return ret; 1047 } 1048 1049 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 1050 struct btrfs_chunk_map *map) 1051 { 1052 struct btrfs_fs_info *fs_info = trans->fs_info; 1053 struct btrfs_path *path; 1054 struct btrfs_block_group *block_group; 1055 struct btrfs_free_cluster *cluster; 1056 struct inode *inode; 1057 struct kobject *kobj = NULL; 1058 int ret; 1059 int index; 1060 int factor; 1061 struct btrfs_caching_control *caching_ctl = NULL; 1062 bool remove_map; 1063 bool remove_rsv = false; 1064 1065 block_group = btrfs_lookup_block_group(fs_info, map->start); 1066 if (!block_group) 1067 return -ENOENT; 1068 1069 BUG_ON(!block_group->ro); 1070 1071 trace_btrfs_remove_block_group(block_group); 1072 /* 1073 * Free the reserved super bytes from this block group before 1074 * remove it. 1075 */ 1076 btrfs_free_excluded_extents(block_group); 1077 btrfs_free_ref_tree_range(fs_info, block_group->start, 1078 block_group->length); 1079 1080 index = btrfs_bg_flags_to_raid_index(block_group->flags); 1081 factor = btrfs_bg_type_to_factor(block_group->flags); 1082 1083 /* make sure this block group isn't part of an allocation cluster */ 1084 cluster = &fs_info->data_alloc_cluster; 1085 spin_lock(&cluster->refill_lock); 1086 btrfs_return_cluster_to_free_space(block_group, cluster); 1087 spin_unlock(&cluster->refill_lock); 1088 1089 /* 1090 * make sure this block group isn't part of a metadata 1091 * allocation cluster 1092 */ 1093 cluster = &fs_info->meta_alloc_cluster; 1094 spin_lock(&cluster->refill_lock); 1095 btrfs_return_cluster_to_free_space(block_group, cluster); 1096 spin_unlock(&cluster->refill_lock); 1097 1098 btrfs_clear_treelog_bg(block_group); 1099 btrfs_clear_data_reloc_bg(block_group); 1100 1101 path = btrfs_alloc_path(); 1102 if (!path) { 1103 ret = -ENOMEM; 1104 goto out; 1105 } 1106 1107 /* 1108 * get the inode first so any iput calls done for the io_list 1109 * aren't the final iput (no unlinks allowed now) 1110 */ 1111 inode = lookup_free_space_inode(block_group, path); 1112 1113 mutex_lock(&trans->transaction->cache_write_mutex); 1114 /* 1115 * Make sure our free space cache IO is done before removing the 1116 * free space inode 1117 */ 1118 spin_lock(&trans->transaction->dirty_bgs_lock); 1119 if (!list_empty(&block_group->io_list)) { 1120 list_del_init(&block_group->io_list); 1121 1122 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); 1123 1124 spin_unlock(&trans->transaction->dirty_bgs_lock); 1125 btrfs_wait_cache_io(trans, block_group, path); 1126 btrfs_put_block_group(block_group); 1127 spin_lock(&trans->transaction->dirty_bgs_lock); 1128 } 1129 1130 if (!list_empty(&block_group->dirty_list)) { 1131 list_del_init(&block_group->dirty_list); 1132 remove_rsv = true; 1133 btrfs_put_block_group(block_group); 1134 } 1135 spin_unlock(&trans->transaction->dirty_bgs_lock); 1136 mutex_unlock(&trans->transaction->cache_write_mutex); 1137 1138 ret = btrfs_remove_free_space_inode(trans, inode, block_group); 1139 if (ret) 1140 goto out; 1141 1142 write_lock(&fs_info->block_group_cache_lock); 1143 rb_erase_cached(&block_group->cache_node, 1144 &fs_info->block_group_cache_tree); 1145 RB_CLEAR_NODE(&block_group->cache_node); 1146 1147 /* Once for the block groups rbtree */ 1148 btrfs_put_block_group(block_group); 1149 1150 write_unlock(&fs_info->block_group_cache_lock); 1151 1152 down_write(&block_group->space_info->groups_sem); 1153 /* 1154 * we must use list_del_init so people can check to see if they 1155 * are still on the list after taking the semaphore 1156 */ 1157 list_del_init(&block_group->list); 1158 if (list_empty(&block_group->space_info->block_groups[index])) { 1159 kobj = block_group->space_info->block_group_kobjs[index]; 1160 block_group->space_info->block_group_kobjs[index] = NULL; 1161 clear_avail_alloc_bits(fs_info, block_group->flags); 1162 } 1163 up_write(&block_group->space_info->groups_sem); 1164 clear_incompat_bg_bits(fs_info, block_group->flags); 1165 if (kobj) { 1166 kobject_del(kobj); 1167 kobject_put(kobj); 1168 } 1169 1170 if (block_group->cached == BTRFS_CACHE_STARTED) 1171 btrfs_wait_block_group_cache_done(block_group); 1172 1173 write_lock(&fs_info->block_group_cache_lock); 1174 caching_ctl = btrfs_get_caching_control(block_group); 1175 if (!caching_ctl) { 1176 struct btrfs_caching_control *ctl; 1177 1178 list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { 1179 if (ctl->block_group == block_group) { 1180 caching_ctl = ctl; 1181 refcount_inc(&caching_ctl->count); 1182 break; 1183 } 1184 } 1185 } 1186 if (caching_ctl) 1187 list_del_init(&caching_ctl->list); 1188 write_unlock(&fs_info->block_group_cache_lock); 1189 1190 if (caching_ctl) { 1191 /* Once for the caching bgs list and once for us. */ 1192 btrfs_put_caching_control(caching_ctl); 1193 btrfs_put_caching_control(caching_ctl); 1194 } 1195 1196 spin_lock(&trans->transaction->dirty_bgs_lock); 1197 WARN_ON(!list_empty(&block_group->dirty_list)); 1198 WARN_ON(!list_empty(&block_group->io_list)); 1199 spin_unlock(&trans->transaction->dirty_bgs_lock); 1200 1201 btrfs_remove_free_space_cache(block_group); 1202 1203 spin_lock(&block_group->space_info->lock); 1204 list_del_init(&block_group->ro_list); 1205 1206 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 1207 WARN_ON(block_group->space_info->total_bytes 1208 < block_group->length); 1209 WARN_ON(block_group->space_info->bytes_readonly 1210 < block_group->length - block_group->zone_unusable); 1211 WARN_ON(block_group->space_info->bytes_zone_unusable 1212 < block_group->zone_unusable); 1213 WARN_ON(block_group->space_info->disk_total 1214 < block_group->length * factor); 1215 } 1216 block_group->space_info->total_bytes -= block_group->length; 1217 block_group->space_info->bytes_readonly -= 1218 (block_group->length - block_group->zone_unusable); 1219 block_group->space_info->bytes_zone_unusable -= 1220 block_group->zone_unusable; 1221 block_group->space_info->disk_total -= block_group->length * factor; 1222 1223 spin_unlock(&block_group->space_info->lock); 1224 1225 /* 1226 * Remove the free space for the block group from the free space tree 1227 * and the block group's item from the extent tree before marking the 1228 * block group as removed. This is to prevent races with tasks that 1229 * freeze and unfreeze a block group, this task and another task 1230 * allocating a new block group - the unfreeze task ends up removing 1231 * the block group's extent map before the task calling this function 1232 * deletes the block group item from the extent tree, allowing for 1233 * another task to attempt to create another block group with the same 1234 * item key (and failing with -EEXIST and a transaction abort). 1235 */ 1236 ret = remove_block_group_free_space(trans, block_group); 1237 if (ret) 1238 goto out; 1239 1240 ret = remove_block_group_item(trans, path, block_group); 1241 if (ret < 0) 1242 goto out; 1243 1244 spin_lock(&block_group->lock); 1245 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); 1246 1247 /* 1248 * At this point trimming or scrub can't start on this block group, 1249 * because we removed the block group from the rbtree 1250 * fs_info->block_group_cache_tree so no one can't find it anymore and 1251 * even if someone already got this block group before we removed it 1252 * from the rbtree, they have already incremented block_group->frozen - 1253 * if they didn't, for the trimming case they won't find any free space 1254 * entries because we already removed them all when we called 1255 * btrfs_remove_free_space_cache(). 1256 * 1257 * And we must not remove the chunk map from the fs_info->mapping_tree 1258 * to prevent the same logical address range and physical device space 1259 * ranges from being reused for a new block group. This is needed to 1260 * avoid races with trimming and scrub. 1261 * 1262 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 1263 * completely transactionless, so while it is trimming a range the 1264 * currently running transaction might finish and a new one start, 1265 * allowing for new block groups to be created that can reuse the same 1266 * physical device locations unless we take this special care. 1267 * 1268 * There may also be an implicit trim operation if the file system 1269 * is mounted with -odiscard. The same protections must remain 1270 * in place until the extents have been discarded completely when 1271 * the transaction commit has completed. 1272 */ 1273 remove_map = (atomic_read(&block_group->frozen) == 0); 1274 spin_unlock(&block_group->lock); 1275 1276 if (remove_map) 1277 btrfs_remove_chunk_map(fs_info, map); 1278 1279 out: 1280 /* Once for the lookup reference */ 1281 btrfs_put_block_group(block_group); 1282 if (remove_rsv) 1283 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 1284 btrfs_free_path(path); 1285 return ret; 1286 } 1287 1288 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 1289 struct btrfs_fs_info *fs_info, const u64 chunk_offset) 1290 { 1291 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1292 struct btrfs_chunk_map *map; 1293 unsigned int num_items; 1294 1295 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 1296 ASSERT(map != NULL); 1297 ASSERT(map->start == chunk_offset); 1298 1299 /* 1300 * We need to reserve 3 + N units from the metadata space info in order 1301 * to remove a block group (done at btrfs_remove_chunk() and at 1302 * btrfs_remove_block_group()), which are used for: 1303 * 1304 * 1 unit for adding the free space inode's orphan (located in the tree 1305 * of tree roots). 1306 * 1 unit for deleting the block group item (located in the extent 1307 * tree). 1308 * 1 unit for deleting the free space item (located in tree of tree 1309 * roots). 1310 * N units for deleting N device extent items corresponding to each 1311 * stripe (located in the device tree). 1312 * 1313 * In order to remove a block group we also need to reserve units in the 1314 * system space info in order to update the chunk tree (update one or 1315 * more device items and remove one chunk item), but this is done at 1316 * btrfs_remove_chunk() through a call to check_system_chunk(). 1317 */ 1318 num_items = 3 + map->num_stripes; 1319 btrfs_free_chunk_map(map); 1320 1321 return btrfs_start_transaction_fallback_global_rsv(root, num_items); 1322 } 1323 1324 /* 1325 * Mark block group @cache read-only, so later write won't happen to block 1326 * group @cache. 1327 * 1328 * If @force is not set, this function will only mark the block group readonly 1329 * if we have enough free space (1M) in other metadata/system block groups. 1330 * If @force is not set, this function will mark the block group readonly 1331 * without checking free space. 1332 * 1333 * NOTE: This function doesn't care if other block groups can contain all the 1334 * data in this block group. That check should be done by relocation routine, 1335 * not this function. 1336 */ 1337 static int inc_block_group_ro(struct btrfs_block_group *cache, int force) 1338 { 1339 struct btrfs_space_info *sinfo = cache->space_info; 1340 u64 num_bytes; 1341 int ret = -ENOSPC; 1342 1343 spin_lock(&sinfo->lock); 1344 spin_lock(&cache->lock); 1345 1346 if (cache->swap_extents) { 1347 ret = -ETXTBSY; 1348 goto out; 1349 } 1350 1351 if (cache->ro) { 1352 cache->ro++; 1353 ret = 0; 1354 goto out; 1355 } 1356 1357 num_bytes = cache->length - cache->reserved - cache->pinned - 1358 cache->bytes_super - cache->zone_unusable - cache->used; 1359 1360 /* 1361 * Data never overcommits, even in mixed mode, so do just the straight 1362 * check of left over space in how much we have allocated. 1363 */ 1364 if (force) { 1365 ret = 0; 1366 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { 1367 u64 sinfo_used = btrfs_space_info_used(sinfo, true); 1368 1369 /* 1370 * Here we make sure if we mark this bg RO, we still have enough 1371 * free space as buffer. 1372 */ 1373 if (sinfo_used + num_bytes <= sinfo->total_bytes) 1374 ret = 0; 1375 } else { 1376 /* 1377 * We overcommit metadata, so we need to do the 1378 * btrfs_can_overcommit check here, and we need to pass in 1379 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of 1380 * leeway to allow us to mark this block group as read only. 1381 */ 1382 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, 1383 BTRFS_RESERVE_NO_FLUSH)) 1384 ret = 0; 1385 } 1386 1387 if (!ret) { 1388 sinfo->bytes_readonly += num_bytes; 1389 if (btrfs_is_zoned(cache->fs_info)) { 1390 /* Migrate zone_unusable bytes to readonly */ 1391 sinfo->bytes_readonly += cache->zone_unusable; 1392 sinfo->bytes_zone_unusable -= cache->zone_unusable; 1393 cache->zone_unusable = 0; 1394 } 1395 cache->ro++; 1396 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 1397 } 1398 out: 1399 spin_unlock(&cache->lock); 1400 spin_unlock(&sinfo->lock); 1401 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { 1402 btrfs_info(cache->fs_info, 1403 "unable to make block group %llu ro", cache->start); 1404 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); 1405 } 1406 return ret; 1407 } 1408 1409 static bool clean_pinned_extents(struct btrfs_trans_handle *trans, 1410 struct btrfs_block_group *bg) 1411 { 1412 struct btrfs_fs_info *fs_info = bg->fs_info; 1413 struct btrfs_transaction *prev_trans = NULL; 1414 const u64 start = bg->start; 1415 const u64 end = start + bg->length - 1; 1416 int ret; 1417 1418 spin_lock(&fs_info->trans_lock); 1419 if (trans->transaction->list.prev != &fs_info->trans_list) { 1420 prev_trans = list_last_entry(&trans->transaction->list, 1421 struct btrfs_transaction, list); 1422 refcount_inc(&prev_trans->use_count); 1423 } 1424 spin_unlock(&fs_info->trans_lock); 1425 1426 /* 1427 * Hold the unused_bg_unpin_mutex lock to avoid racing with 1428 * btrfs_finish_extent_commit(). If we are at transaction N, another 1429 * task might be running finish_extent_commit() for the previous 1430 * transaction N - 1, and have seen a range belonging to the block 1431 * group in pinned_extents before we were able to clear the whole block 1432 * group range from pinned_extents. This means that task can lookup for 1433 * the block group after we unpinned it from pinned_extents and removed 1434 * it, leading to an error at unpin_extent_range(). 1435 */ 1436 mutex_lock(&fs_info->unused_bg_unpin_mutex); 1437 if (prev_trans) { 1438 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, 1439 EXTENT_DIRTY); 1440 if (ret) 1441 goto out; 1442 } 1443 1444 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, 1445 EXTENT_DIRTY); 1446 out: 1447 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1448 if (prev_trans) 1449 btrfs_put_transaction(prev_trans); 1450 1451 return ret == 0; 1452 } 1453 1454 /* 1455 * Process the unused_bgs list and remove any that don't have any allocated 1456 * space inside of them. 1457 */ 1458 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 1459 { 1460 LIST_HEAD(retry_list); 1461 struct btrfs_block_group *block_group; 1462 struct btrfs_space_info *space_info; 1463 struct btrfs_trans_handle *trans; 1464 const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); 1465 int ret = 0; 1466 1467 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1468 return; 1469 1470 if (btrfs_fs_closing(fs_info)) 1471 return; 1472 1473 /* 1474 * Long running balances can keep us blocked here for eternity, so 1475 * simply skip deletion if we're unable to get the mutex. 1476 */ 1477 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1478 return; 1479 1480 spin_lock(&fs_info->unused_bgs_lock); 1481 while (!list_empty(&fs_info->unused_bgs)) { 1482 u64 used; 1483 int trimming; 1484 1485 block_group = list_first_entry(&fs_info->unused_bgs, 1486 struct btrfs_block_group, 1487 bg_list); 1488 list_del_init(&block_group->bg_list); 1489 1490 space_info = block_group->space_info; 1491 1492 if (ret || btrfs_mixed_space_info(space_info)) { 1493 btrfs_put_block_group(block_group); 1494 continue; 1495 } 1496 spin_unlock(&fs_info->unused_bgs_lock); 1497 1498 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); 1499 1500 /* Don't want to race with allocators so take the groups_sem */ 1501 down_write(&space_info->groups_sem); 1502 1503 /* 1504 * Async discard moves the final block group discard to be prior 1505 * to the unused_bgs code path. Therefore, if it's not fully 1506 * trimmed, punt it back to the async discard lists. 1507 */ 1508 if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && 1509 !btrfs_is_free_space_trimmed(block_group)) { 1510 trace_btrfs_skip_unused_block_group(block_group); 1511 up_write(&space_info->groups_sem); 1512 /* Requeue if we failed because of async discard */ 1513 btrfs_discard_queue_work(&fs_info->discard_ctl, 1514 block_group); 1515 goto next; 1516 } 1517 1518 spin_lock(&space_info->lock); 1519 spin_lock(&block_group->lock); 1520 if (btrfs_is_block_group_used(block_group) || block_group->ro || 1521 list_is_singular(&block_group->list)) { 1522 /* 1523 * We want to bail if we made new allocations or have 1524 * outstanding allocations in this block group. We do 1525 * the ro check in case balance is currently acting on 1526 * this block group. 1527 * 1528 * Also bail out if this is the only block group for its 1529 * type, because otherwise we would lose profile 1530 * information from fs_info->avail_*_alloc_bits and the 1531 * next block group of this type would be created with a 1532 * "single" profile (even if we're in a raid fs) because 1533 * fs_info->avail_*_alloc_bits would be 0. 1534 */ 1535 trace_btrfs_skip_unused_block_group(block_group); 1536 spin_unlock(&block_group->lock); 1537 spin_unlock(&space_info->lock); 1538 up_write(&space_info->groups_sem); 1539 goto next; 1540 } 1541 1542 /* 1543 * The block group may be unused but there may be space reserved 1544 * accounting with the existence of that block group, that is, 1545 * space_info->bytes_may_use was incremented by a task but no 1546 * space was yet allocated from the block group by the task. 1547 * That space may or may not be allocated, as we are generally 1548 * pessimistic about space reservation for metadata as well as 1549 * for data when using compression (as we reserve space based on 1550 * the worst case, when data can't be compressed, and before 1551 * actually attempting compression, before starting writeback). 1552 * 1553 * So check if the total space of the space_info minus the size 1554 * of this block group is less than the used space of the 1555 * space_info - if that's the case, then it means we have tasks 1556 * that might be relying on the block group in order to allocate 1557 * extents, and add back the block group to the unused list when 1558 * we finish, so that we retry later in case no tasks ended up 1559 * needing to allocate extents from the block group. 1560 */ 1561 used = btrfs_space_info_used(space_info, true); 1562 if (space_info->total_bytes - block_group->length < used && 1563 block_group->zone_unusable < block_group->length) { 1564 /* 1565 * Add a reference for the list, compensate for the ref 1566 * drop under the "next" label for the 1567 * fs_info->unused_bgs list. 1568 */ 1569 btrfs_get_block_group(block_group); 1570 list_add_tail(&block_group->bg_list, &retry_list); 1571 1572 trace_btrfs_skip_unused_block_group(block_group); 1573 spin_unlock(&block_group->lock); 1574 spin_unlock(&space_info->lock); 1575 up_write(&space_info->groups_sem); 1576 goto next; 1577 } 1578 1579 spin_unlock(&block_group->lock); 1580 spin_unlock(&space_info->lock); 1581 1582 /* We don't want to force the issue, only flip if it's ok. */ 1583 ret = inc_block_group_ro(block_group, 0); 1584 up_write(&space_info->groups_sem); 1585 if (ret < 0) { 1586 ret = 0; 1587 goto next; 1588 } 1589 1590 ret = btrfs_zone_finish(block_group); 1591 if (ret < 0) { 1592 btrfs_dec_block_group_ro(block_group); 1593 if (ret == -EAGAIN) 1594 ret = 0; 1595 goto next; 1596 } 1597 1598 /* 1599 * Want to do this before we do anything else so we can recover 1600 * properly if we fail to join the transaction. 1601 */ 1602 trans = btrfs_start_trans_remove_block_group(fs_info, 1603 block_group->start); 1604 if (IS_ERR(trans)) { 1605 btrfs_dec_block_group_ro(block_group); 1606 ret = PTR_ERR(trans); 1607 goto next; 1608 } 1609 1610 /* 1611 * We could have pending pinned extents for this block group, 1612 * just delete them, we don't care about them anymore. 1613 */ 1614 if (!clean_pinned_extents(trans, block_group)) { 1615 btrfs_dec_block_group_ro(block_group); 1616 goto end_trans; 1617 } 1618 1619 /* 1620 * At this point, the block_group is read only and should fail 1621 * new allocations. However, btrfs_finish_extent_commit() can 1622 * cause this block_group to be placed back on the discard 1623 * lists because now the block_group isn't fully discarded. 1624 * Bail here and try again later after discarding everything. 1625 */ 1626 spin_lock(&fs_info->discard_ctl.lock); 1627 if (!list_empty(&block_group->discard_list)) { 1628 spin_unlock(&fs_info->discard_ctl.lock); 1629 btrfs_dec_block_group_ro(block_group); 1630 btrfs_discard_queue_work(&fs_info->discard_ctl, 1631 block_group); 1632 goto end_trans; 1633 } 1634 spin_unlock(&fs_info->discard_ctl.lock); 1635 1636 /* Reset pinned so btrfs_put_block_group doesn't complain */ 1637 spin_lock(&space_info->lock); 1638 spin_lock(&block_group->lock); 1639 1640 btrfs_space_info_update_bytes_pinned(fs_info, space_info, 1641 -block_group->pinned); 1642 space_info->bytes_readonly += block_group->pinned; 1643 block_group->pinned = 0; 1644 1645 spin_unlock(&block_group->lock); 1646 spin_unlock(&space_info->lock); 1647 1648 /* 1649 * The normal path here is an unused block group is passed here, 1650 * then trimming is handled in the transaction commit path. 1651 * Async discard interposes before this to do the trimming 1652 * before coming down the unused block group path as trimming 1653 * will no longer be done later in the transaction commit path. 1654 */ 1655 if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1656 goto flip_async; 1657 1658 /* 1659 * DISCARD can flip during remount. On zoned filesystems, we 1660 * need to reset sequential-required zones. 1661 */ 1662 trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || 1663 btrfs_is_zoned(fs_info); 1664 1665 /* Implicit trim during transaction commit. */ 1666 if (trimming) 1667 btrfs_freeze_block_group(block_group); 1668 1669 /* 1670 * Btrfs_remove_chunk will abort the transaction if things go 1671 * horribly wrong. 1672 */ 1673 ret = btrfs_remove_chunk(trans, block_group->start); 1674 1675 if (ret) { 1676 if (trimming) 1677 btrfs_unfreeze_block_group(block_group); 1678 goto end_trans; 1679 } 1680 1681 /* 1682 * If we're not mounted with -odiscard, we can just forget 1683 * about this block group. Otherwise we'll need to wait 1684 * until transaction commit to do the actual discard. 1685 */ 1686 if (trimming) { 1687 spin_lock(&fs_info->unused_bgs_lock); 1688 /* 1689 * A concurrent scrub might have added us to the list 1690 * fs_info->unused_bgs, so use a list_move operation 1691 * to add the block group to the deleted_bgs list. 1692 */ 1693 list_move(&block_group->bg_list, 1694 &trans->transaction->deleted_bgs); 1695 spin_unlock(&fs_info->unused_bgs_lock); 1696 btrfs_get_block_group(block_group); 1697 } 1698 end_trans: 1699 btrfs_end_transaction(trans); 1700 next: 1701 btrfs_put_block_group(block_group); 1702 spin_lock(&fs_info->unused_bgs_lock); 1703 } 1704 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1705 spin_unlock(&fs_info->unused_bgs_lock); 1706 mutex_unlock(&fs_info->reclaim_bgs_lock); 1707 return; 1708 1709 flip_async: 1710 btrfs_end_transaction(trans); 1711 spin_lock(&fs_info->unused_bgs_lock); 1712 list_splice_tail(&retry_list, &fs_info->unused_bgs); 1713 spin_unlock(&fs_info->unused_bgs_lock); 1714 mutex_unlock(&fs_info->reclaim_bgs_lock); 1715 btrfs_put_block_group(block_group); 1716 btrfs_discard_punt_unused_bgs_list(fs_info); 1717 } 1718 1719 void btrfs_mark_bg_unused(struct btrfs_block_group *bg) 1720 { 1721 struct btrfs_fs_info *fs_info = bg->fs_info; 1722 1723 spin_lock(&fs_info->unused_bgs_lock); 1724 if (list_empty(&bg->bg_list)) { 1725 btrfs_get_block_group(bg); 1726 trace_btrfs_add_unused_block_group(bg); 1727 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); 1728 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { 1729 /* Pull out the block group from the reclaim_bgs list. */ 1730 trace_btrfs_add_unused_block_group(bg); 1731 list_move_tail(&bg->bg_list, &fs_info->unused_bgs); 1732 } 1733 spin_unlock(&fs_info->unused_bgs_lock); 1734 } 1735 1736 /* 1737 * We want block groups with a low number of used bytes to be in the beginning 1738 * of the list, so they will get reclaimed first. 1739 */ 1740 static int reclaim_bgs_cmp(void *unused, const struct list_head *a, 1741 const struct list_head *b) 1742 { 1743 const struct btrfs_block_group *bg1, *bg2; 1744 1745 bg1 = list_entry(a, struct btrfs_block_group, bg_list); 1746 bg2 = list_entry(b, struct btrfs_block_group, bg_list); 1747 1748 return bg1->used > bg2->used; 1749 } 1750 1751 static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) 1752 { 1753 if (btrfs_is_zoned(fs_info)) 1754 return btrfs_zoned_should_reclaim(fs_info); 1755 return true; 1756 } 1757 1758 static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) 1759 { 1760 const struct btrfs_space_info *space_info = bg->space_info; 1761 const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); 1762 const u64 new_val = bg->used; 1763 const u64 old_val = new_val + bytes_freed; 1764 u64 thresh; 1765 1766 if (reclaim_thresh == 0) 1767 return false; 1768 1769 thresh = mult_perc(bg->length, reclaim_thresh); 1770 1771 /* 1772 * If we were below the threshold before don't reclaim, we are likely a 1773 * brand new block group and we don't want to relocate new block groups. 1774 */ 1775 if (old_val < thresh) 1776 return false; 1777 if (new_val >= thresh) 1778 return false; 1779 return true; 1780 } 1781 1782 void btrfs_reclaim_bgs_work(struct work_struct *work) 1783 { 1784 struct btrfs_fs_info *fs_info = 1785 container_of(work, struct btrfs_fs_info, reclaim_bgs_work); 1786 struct btrfs_block_group *bg; 1787 struct btrfs_space_info *space_info; 1788 LIST_HEAD(retry_list); 1789 1790 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) 1791 return; 1792 1793 if (btrfs_fs_closing(fs_info)) 1794 return; 1795 1796 if (!btrfs_should_reclaim(fs_info)) 1797 return; 1798 1799 sb_start_write(fs_info->sb); 1800 1801 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { 1802 sb_end_write(fs_info->sb); 1803 return; 1804 } 1805 1806 /* 1807 * Long running balances can keep us blocked here for eternity, so 1808 * simply skip reclaim if we're unable to get the mutex. 1809 */ 1810 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { 1811 btrfs_exclop_finish(fs_info); 1812 sb_end_write(fs_info->sb); 1813 return; 1814 } 1815 1816 spin_lock(&fs_info->unused_bgs_lock); 1817 /* 1818 * Sort happens under lock because we can't simply splice it and sort. 1819 * The block groups might still be in use and reachable via bg_list, 1820 * and their presence in the reclaim_bgs list must be preserved. 1821 */ 1822 list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); 1823 while (!list_empty(&fs_info->reclaim_bgs)) { 1824 u64 zone_unusable; 1825 int ret = 0; 1826 1827 bg = list_first_entry(&fs_info->reclaim_bgs, 1828 struct btrfs_block_group, 1829 bg_list); 1830 list_del_init(&bg->bg_list); 1831 1832 space_info = bg->space_info; 1833 spin_unlock(&fs_info->unused_bgs_lock); 1834 1835 /* Don't race with allocators so take the groups_sem */ 1836 down_write(&space_info->groups_sem); 1837 1838 spin_lock(&bg->lock); 1839 if (bg->reserved || bg->pinned || bg->ro) { 1840 /* 1841 * We want to bail if we made new allocations or have 1842 * outstanding allocations in this block group. We do 1843 * the ro check in case balance is currently acting on 1844 * this block group. 1845 */ 1846 spin_unlock(&bg->lock); 1847 up_write(&space_info->groups_sem); 1848 goto next; 1849 } 1850 if (bg->used == 0) { 1851 /* 1852 * It is possible that we trigger relocation on a block 1853 * group as its extents are deleted and it first goes 1854 * below the threshold, then shortly after goes empty. 1855 * 1856 * In this case, relocating it does delete it, but has 1857 * some overhead in relocation specific metadata, looking 1858 * for the non-existent extents and running some extra 1859 * transactions, which we can avoid by using one of the 1860 * other mechanisms for dealing with empty block groups. 1861 */ 1862 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) 1863 btrfs_mark_bg_unused(bg); 1864 spin_unlock(&bg->lock); 1865 up_write(&space_info->groups_sem); 1866 goto next; 1867 1868 } 1869 /* 1870 * The block group might no longer meet the reclaim condition by 1871 * the time we get around to reclaiming it, so to avoid 1872 * reclaiming overly full block_groups, skip reclaiming them. 1873 * 1874 * Since the decision making process also depends on the amount 1875 * being freed, pass in a fake giant value to skip that extra 1876 * check, which is more meaningful when adding to the list in 1877 * the first place. 1878 */ 1879 if (!should_reclaim_block_group(bg, bg->length)) { 1880 spin_unlock(&bg->lock); 1881 up_write(&space_info->groups_sem); 1882 goto next; 1883 } 1884 spin_unlock(&bg->lock); 1885 1886 /* 1887 * Get out fast, in case we're read-only or unmounting the 1888 * filesystem. It is OK to drop block groups from the list even 1889 * for the read-only case. As we did sb_start_write(), 1890 * "mount -o remount,ro" won't happen and read-only filesystem 1891 * means it is forced read-only due to a fatal error. So, it 1892 * never gets back to read-write to let us reclaim again. 1893 */ 1894 if (btrfs_need_cleaner_sleep(fs_info)) { 1895 up_write(&space_info->groups_sem); 1896 goto next; 1897 } 1898 1899 /* 1900 * Cache the zone_unusable value before turning the block group 1901 * to read only. As soon as the blog group is read only it's 1902 * zone_unusable value gets moved to the block group's read-only 1903 * bytes and isn't available for calculations anymore. 1904 */ 1905 zone_unusable = bg->zone_unusable; 1906 ret = inc_block_group_ro(bg, 0); 1907 up_write(&space_info->groups_sem); 1908 if (ret < 0) 1909 goto next; 1910 1911 btrfs_info(fs_info, 1912 "reclaiming chunk %llu with %llu%% used %llu%% unusable", 1913 bg->start, 1914 div64_u64(bg->used * 100, bg->length), 1915 div64_u64(zone_unusable * 100, bg->length)); 1916 trace_btrfs_reclaim_block_group(bg); 1917 ret = btrfs_relocate_chunk(fs_info, bg->start); 1918 if (ret) { 1919 btrfs_dec_block_group_ro(bg); 1920 btrfs_err(fs_info, "error relocating chunk %llu", 1921 bg->start); 1922 } 1923 1924 next: 1925 if (ret) { 1926 /* Refcount held by the reclaim_bgs list after splice. */ 1927 spin_lock(&fs_info->unused_bgs_lock); 1928 /* 1929 * This block group might be added to the unused list 1930 * during the above process. Move it back to the 1931 * reclaim list otherwise. 1932 */ 1933 if (list_empty(&bg->bg_list)) { 1934 btrfs_get_block_group(bg); 1935 list_add_tail(&bg->bg_list, &retry_list); 1936 } 1937 spin_unlock(&fs_info->unused_bgs_lock); 1938 } 1939 btrfs_put_block_group(bg); 1940 1941 mutex_unlock(&fs_info->reclaim_bgs_lock); 1942 /* 1943 * Reclaiming all the block groups in the list can take really 1944 * long. Prioritize cleaning up unused block groups. 1945 */ 1946 btrfs_delete_unused_bgs(fs_info); 1947 /* 1948 * If we are interrupted by a balance, we can just bail out. The 1949 * cleaner thread restart again if necessary. 1950 */ 1951 if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) 1952 goto end; 1953 spin_lock(&fs_info->unused_bgs_lock); 1954 } 1955 spin_unlock(&fs_info->unused_bgs_lock); 1956 mutex_unlock(&fs_info->reclaim_bgs_lock); 1957 end: 1958 spin_lock(&fs_info->unused_bgs_lock); 1959 list_splice_tail(&retry_list, &fs_info->reclaim_bgs); 1960 spin_unlock(&fs_info->unused_bgs_lock); 1961 btrfs_exclop_finish(fs_info); 1962 sb_end_write(fs_info->sb); 1963 } 1964 1965 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) 1966 { 1967 spin_lock(&fs_info->unused_bgs_lock); 1968 if (!list_empty(&fs_info->reclaim_bgs)) 1969 queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); 1970 spin_unlock(&fs_info->unused_bgs_lock); 1971 } 1972 1973 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) 1974 { 1975 struct btrfs_fs_info *fs_info = bg->fs_info; 1976 1977 spin_lock(&fs_info->unused_bgs_lock); 1978 if (list_empty(&bg->bg_list)) { 1979 btrfs_get_block_group(bg); 1980 trace_btrfs_add_reclaim_block_group(bg); 1981 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); 1982 } 1983 spin_unlock(&fs_info->unused_bgs_lock); 1984 } 1985 1986 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, 1987 struct btrfs_path *path) 1988 { 1989 struct btrfs_chunk_map *map; 1990 struct btrfs_block_group_item bg; 1991 struct extent_buffer *leaf; 1992 int slot; 1993 u64 flags; 1994 int ret = 0; 1995 1996 slot = path->slots[0]; 1997 leaf = path->nodes[0]; 1998 1999 map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset); 2000 if (!map) { 2001 btrfs_err(fs_info, 2002 "logical %llu len %llu found bg but no related chunk", 2003 key->objectid, key->offset); 2004 return -ENOENT; 2005 } 2006 2007 if (map->start != key->objectid || map->chunk_len != key->offset) { 2008 btrfs_err(fs_info, 2009 "block group %llu len %llu mismatch with chunk %llu len %llu", 2010 key->objectid, key->offset, map->start, map->chunk_len); 2011 ret = -EUCLEAN; 2012 goto out_free_map; 2013 } 2014 2015 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), 2016 sizeof(bg)); 2017 flags = btrfs_stack_block_group_flags(&bg) & 2018 BTRFS_BLOCK_GROUP_TYPE_MASK; 2019 2020 if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 2021 btrfs_err(fs_info, 2022 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", 2023 key->objectid, key->offset, flags, 2024 (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type)); 2025 ret = -EUCLEAN; 2026 } 2027 2028 out_free_map: 2029 btrfs_free_chunk_map(map); 2030 return ret; 2031 } 2032 2033 static int find_first_block_group(struct btrfs_fs_info *fs_info, 2034 struct btrfs_path *path, 2035 struct btrfs_key *key) 2036 { 2037 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2038 int ret; 2039 struct btrfs_key found_key; 2040 2041 btrfs_for_each_slot(root, key, &found_key, path, ret) { 2042 if (found_key.objectid >= key->objectid && 2043 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 2044 return read_bg_from_eb(fs_info, &found_key, path); 2045 } 2046 } 2047 return ret; 2048 } 2049 2050 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 2051 { 2052 u64 extra_flags = chunk_to_extended(flags) & 2053 BTRFS_EXTENDED_PROFILE_MASK; 2054 2055 write_seqlock(&fs_info->profiles_lock); 2056 if (flags & BTRFS_BLOCK_GROUP_DATA) 2057 fs_info->avail_data_alloc_bits |= extra_flags; 2058 if (flags & BTRFS_BLOCK_GROUP_METADATA) 2059 fs_info->avail_metadata_alloc_bits |= extra_flags; 2060 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 2061 fs_info->avail_system_alloc_bits |= extra_flags; 2062 write_sequnlock(&fs_info->profiles_lock); 2063 } 2064 2065 /* 2066 * Map a physical disk address to a list of logical addresses. 2067 * 2068 * @fs_info: the filesystem 2069 * @chunk_start: logical address of block group 2070 * @physical: physical address to map to logical addresses 2071 * @logical: return array of logical addresses which map to @physical 2072 * @naddrs: length of @logical 2073 * @stripe_len: size of IO stripe for the given block group 2074 * 2075 * Maps a particular @physical disk address to a list of @logical addresses. 2076 * Used primarily to exclude those portions of a block group that contain super 2077 * block copies. 2078 */ 2079 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, 2080 u64 physical, u64 **logical, int *naddrs, int *stripe_len) 2081 { 2082 struct btrfs_chunk_map *map; 2083 u64 *buf; 2084 u64 bytenr; 2085 u64 data_stripe_length; 2086 u64 io_stripe_size; 2087 int i, nr = 0; 2088 int ret = 0; 2089 2090 map = btrfs_get_chunk_map(fs_info, chunk_start, 1); 2091 if (IS_ERR(map)) 2092 return -EIO; 2093 2094 data_stripe_length = map->stripe_size; 2095 io_stripe_size = BTRFS_STRIPE_LEN; 2096 chunk_start = map->start; 2097 2098 /* For RAID5/6 adjust to a full IO stripe length */ 2099 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) 2100 io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); 2101 2102 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); 2103 if (!buf) { 2104 ret = -ENOMEM; 2105 goto out; 2106 } 2107 2108 for (i = 0; i < map->num_stripes; i++) { 2109 bool already_inserted = false; 2110 u32 stripe_nr; 2111 u32 offset; 2112 int j; 2113 2114 if (!in_range(physical, map->stripes[i].physical, 2115 data_stripe_length)) 2116 continue; 2117 2118 stripe_nr = (physical - map->stripes[i].physical) >> 2119 BTRFS_STRIPE_LEN_SHIFT; 2120 offset = (physical - map->stripes[i].physical) & 2121 BTRFS_STRIPE_LEN_MASK; 2122 2123 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | 2124 BTRFS_BLOCK_GROUP_RAID10)) 2125 stripe_nr = div_u64(stripe_nr * map->num_stripes + i, 2126 map->sub_stripes); 2127 /* 2128 * The remaining case would be for RAID56, multiply by 2129 * nr_data_stripes(). Alternatively, just use rmap_len below 2130 * instead of map->stripe_len 2131 */ 2132 bytenr = chunk_start + stripe_nr * io_stripe_size + offset; 2133 2134 /* Ensure we don't add duplicate addresses */ 2135 for (j = 0; j < nr; j++) { 2136 if (buf[j] == bytenr) { 2137 already_inserted = true; 2138 break; 2139 } 2140 } 2141 2142 if (!already_inserted) 2143 buf[nr++] = bytenr; 2144 } 2145 2146 *logical = buf; 2147 *naddrs = nr; 2148 *stripe_len = io_stripe_size; 2149 out: 2150 btrfs_free_chunk_map(map); 2151 return ret; 2152 } 2153 2154 static int exclude_super_stripes(struct btrfs_block_group *cache) 2155 { 2156 struct btrfs_fs_info *fs_info = cache->fs_info; 2157 const bool zoned = btrfs_is_zoned(fs_info); 2158 u64 bytenr; 2159 u64 *logical; 2160 int stripe_len; 2161 int i, nr, ret; 2162 2163 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { 2164 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; 2165 cache->bytes_super += stripe_len; 2166 ret = set_extent_bit(&fs_info->excluded_extents, cache->start, 2167 cache->start + stripe_len - 1, 2168 EXTENT_UPTODATE, NULL); 2169 if (ret) 2170 return ret; 2171 } 2172 2173 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2174 bytenr = btrfs_sb_offset(i); 2175 ret = btrfs_rmap_block(fs_info, cache->start, 2176 bytenr, &logical, &nr, &stripe_len); 2177 if (ret) 2178 return ret; 2179 2180 /* Shouldn't have super stripes in sequential zones */ 2181 if (zoned && nr) { 2182 kfree(logical); 2183 btrfs_err(fs_info, 2184 "zoned: block group %llu must not contain super block", 2185 cache->start); 2186 return -EUCLEAN; 2187 } 2188 2189 while (nr--) { 2190 u64 len = min_t(u64, stripe_len, 2191 cache->start + cache->length - logical[nr]); 2192 2193 cache->bytes_super += len; 2194 ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], 2195 logical[nr] + len - 1, 2196 EXTENT_UPTODATE, NULL); 2197 if (ret) { 2198 kfree(logical); 2199 return ret; 2200 } 2201 } 2202 2203 kfree(logical); 2204 } 2205 return 0; 2206 } 2207 2208 static struct btrfs_block_group *btrfs_create_block_group_cache( 2209 struct btrfs_fs_info *fs_info, u64 start) 2210 { 2211 struct btrfs_block_group *cache; 2212 2213 cache = kzalloc(sizeof(*cache), GFP_NOFS); 2214 if (!cache) 2215 return NULL; 2216 2217 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 2218 GFP_NOFS); 2219 if (!cache->free_space_ctl) { 2220 kfree(cache); 2221 return NULL; 2222 } 2223 2224 cache->start = start; 2225 2226 cache->fs_info = fs_info; 2227 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); 2228 2229 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; 2230 2231 refcount_set(&cache->refs, 1); 2232 spin_lock_init(&cache->lock); 2233 init_rwsem(&cache->data_rwsem); 2234 INIT_LIST_HEAD(&cache->list); 2235 INIT_LIST_HEAD(&cache->cluster_list); 2236 INIT_LIST_HEAD(&cache->bg_list); 2237 INIT_LIST_HEAD(&cache->ro_list); 2238 INIT_LIST_HEAD(&cache->discard_list); 2239 INIT_LIST_HEAD(&cache->dirty_list); 2240 INIT_LIST_HEAD(&cache->io_list); 2241 INIT_LIST_HEAD(&cache->active_bg_list); 2242 btrfs_init_free_space_ctl(cache, cache->free_space_ctl); 2243 atomic_set(&cache->frozen, 0); 2244 mutex_init(&cache->free_space_lock); 2245 2246 return cache; 2247 } 2248 2249 /* 2250 * Iterate all chunks and verify that each of them has the corresponding block 2251 * group 2252 */ 2253 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) 2254 { 2255 u64 start = 0; 2256 int ret = 0; 2257 2258 while (1) { 2259 struct btrfs_chunk_map *map; 2260 struct btrfs_block_group *bg; 2261 2262 /* 2263 * btrfs_find_chunk_map() will return the first chunk map 2264 * intersecting the range, so setting @length to 1 is enough to 2265 * get the first chunk. 2266 */ 2267 map = btrfs_find_chunk_map(fs_info, start, 1); 2268 if (!map) 2269 break; 2270 2271 bg = btrfs_lookup_block_group(fs_info, map->start); 2272 if (!bg) { 2273 btrfs_err(fs_info, 2274 "chunk start=%llu len=%llu doesn't have corresponding block group", 2275 map->start, map->chunk_len); 2276 ret = -EUCLEAN; 2277 btrfs_free_chunk_map(map); 2278 break; 2279 } 2280 if (bg->start != map->start || bg->length != map->chunk_len || 2281 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != 2282 (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { 2283 btrfs_err(fs_info, 2284 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", 2285 map->start, map->chunk_len, 2286 map->type & BTRFS_BLOCK_GROUP_TYPE_MASK, 2287 bg->start, bg->length, 2288 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 2289 ret = -EUCLEAN; 2290 btrfs_free_chunk_map(map); 2291 btrfs_put_block_group(bg); 2292 break; 2293 } 2294 start = map->start + map->chunk_len; 2295 btrfs_free_chunk_map(map); 2296 btrfs_put_block_group(bg); 2297 } 2298 return ret; 2299 } 2300 2301 static int read_one_block_group(struct btrfs_fs_info *info, 2302 struct btrfs_block_group_item *bgi, 2303 const struct btrfs_key *key, 2304 int need_clear) 2305 { 2306 struct btrfs_block_group *cache; 2307 const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); 2308 int ret; 2309 2310 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); 2311 2312 cache = btrfs_create_block_group_cache(info, key->objectid); 2313 if (!cache) 2314 return -ENOMEM; 2315 2316 cache->length = key->offset; 2317 cache->used = btrfs_stack_block_group_used(bgi); 2318 cache->commit_used = cache->used; 2319 cache->flags = btrfs_stack_block_group_flags(bgi); 2320 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); 2321 2322 set_free_space_tree_thresholds(cache); 2323 2324 if (need_clear) { 2325 /* 2326 * When we mount with old space cache, we need to 2327 * set BTRFS_DC_CLEAR and set dirty flag. 2328 * 2329 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 2330 * truncate the old free space cache inode and 2331 * setup a new one. 2332 * b) Setting 'dirty flag' makes sure that we flush 2333 * the new space cache info onto disk. 2334 */ 2335 if (btrfs_test_opt(info, SPACE_CACHE)) 2336 cache->disk_cache_state = BTRFS_DC_CLEAR; 2337 } 2338 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && 2339 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { 2340 btrfs_err(info, 2341 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", 2342 cache->start); 2343 ret = -EINVAL; 2344 goto error; 2345 } 2346 2347 ret = btrfs_load_block_group_zone_info(cache, false); 2348 if (ret) { 2349 btrfs_err(info, "zoned: failed to load zone info of bg %llu", 2350 cache->start); 2351 goto error; 2352 } 2353 2354 /* 2355 * We need to exclude the super stripes now so that the space info has 2356 * super bytes accounted for, otherwise we'll think we have more space 2357 * than we actually do. 2358 */ 2359 ret = exclude_super_stripes(cache); 2360 if (ret) { 2361 /* We may have excluded something, so call this just in case. */ 2362 btrfs_free_excluded_extents(cache); 2363 goto error; 2364 } 2365 2366 /* 2367 * For zoned filesystem, space after the allocation offset is the only 2368 * free space for a block group. So, we don't need any caching work. 2369 * btrfs_calc_zone_unusable() will set the amount of free space and 2370 * zone_unusable space. 2371 * 2372 * For regular filesystem, check for two cases, either we are full, and 2373 * therefore don't need to bother with the caching work since we won't 2374 * find any space, or we are empty, and we can just add all the space 2375 * in and be done with it. This saves us _a_lot_ of time, particularly 2376 * in the full case. 2377 */ 2378 if (btrfs_is_zoned(info)) { 2379 btrfs_calc_zone_unusable(cache); 2380 /* Should not have any excluded extents. Just in case, though. */ 2381 btrfs_free_excluded_extents(cache); 2382 } else if (cache->length == cache->used) { 2383 cache->cached = BTRFS_CACHE_FINISHED; 2384 btrfs_free_excluded_extents(cache); 2385 } else if (cache->used == 0) { 2386 cache->cached = BTRFS_CACHE_FINISHED; 2387 ret = btrfs_add_new_free_space(cache, cache->start, 2388 cache->start + cache->length, NULL); 2389 btrfs_free_excluded_extents(cache); 2390 if (ret) 2391 goto error; 2392 } 2393 2394 ret = btrfs_add_block_group_cache(info, cache); 2395 if (ret) { 2396 btrfs_remove_free_space_cache(cache); 2397 goto error; 2398 } 2399 trace_btrfs_add_block_group(info, cache, 0); 2400 btrfs_add_bg_to_space_info(info, cache); 2401 2402 set_avail_alloc_bits(info, cache->flags); 2403 if (btrfs_chunk_writeable(info, cache->start)) { 2404 if (cache->used == 0) { 2405 ASSERT(list_empty(&cache->bg_list)); 2406 if (btrfs_test_opt(info, DISCARD_ASYNC)) 2407 btrfs_discard_queue_work(&info->discard_ctl, cache); 2408 else 2409 btrfs_mark_bg_unused(cache); 2410 } 2411 } else { 2412 inc_block_group_ro(cache, 1); 2413 } 2414 2415 return 0; 2416 error: 2417 btrfs_put_block_group(cache); 2418 return ret; 2419 } 2420 2421 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) 2422 { 2423 struct rb_node *node; 2424 int ret = 0; 2425 2426 for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) { 2427 struct btrfs_chunk_map *map; 2428 struct btrfs_block_group *bg; 2429 2430 map = rb_entry(node, struct btrfs_chunk_map, rb_node); 2431 bg = btrfs_create_block_group_cache(fs_info, map->start); 2432 if (!bg) { 2433 ret = -ENOMEM; 2434 break; 2435 } 2436 2437 /* Fill dummy cache as FULL */ 2438 bg->length = map->chunk_len; 2439 bg->flags = map->type; 2440 bg->cached = BTRFS_CACHE_FINISHED; 2441 bg->used = map->chunk_len; 2442 bg->flags = map->type; 2443 ret = btrfs_add_block_group_cache(fs_info, bg); 2444 /* 2445 * We may have some valid block group cache added already, in 2446 * that case we skip to the next one. 2447 */ 2448 if (ret == -EEXIST) { 2449 ret = 0; 2450 btrfs_put_block_group(bg); 2451 continue; 2452 } 2453 2454 if (ret) { 2455 btrfs_remove_free_space_cache(bg); 2456 btrfs_put_block_group(bg); 2457 break; 2458 } 2459 2460 btrfs_add_bg_to_space_info(fs_info, bg); 2461 2462 set_avail_alloc_bits(fs_info, bg->flags); 2463 } 2464 if (!ret) 2465 btrfs_init_global_block_rsv(fs_info); 2466 return ret; 2467 } 2468 2469 int btrfs_read_block_groups(struct btrfs_fs_info *info) 2470 { 2471 struct btrfs_root *root = btrfs_block_group_root(info); 2472 struct btrfs_path *path; 2473 int ret; 2474 struct btrfs_block_group *cache; 2475 struct btrfs_space_info *space_info; 2476 struct btrfs_key key; 2477 int need_clear = 0; 2478 u64 cache_gen; 2479 2480 /* 2481 * Either no extent root (with ibadroots rescue option) or we have 2482 * unsupported RO options. The fs can never be mounted read-write, so no 2483 * need to waste time searching block group items. 2484 * 2485 * This also allows new extent tree related changes to be RO compat, 2486 * no need for a full incompat flag. 2487 */ 2488 if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & 2489 ~BTRFS_FEATURE_COMPAT_RO_SUPP)) 2490 return fill_dummy_bgs(info); 2491 2492 key.objectid = 0; 2493 key.offset = 0; 2494 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2495 path = btrfs_alloc_path(); 2496 if (!path) 2497 return -ENOMEM; 2498 2499 cache_gen = btrfs_super_cache_generation(info->super_copy); 2500 if (btrfs_test_opt(info, SPACE_CACHE) && 2501 btrfs_super_generation(info->super_copy) != cache_gen) 2502 need_clear = 1; 2503 if (btrfs_test_opt(info, CLEAR_CACHE)) 2504 need_clear = 1; 2505 2506 while (1) { 2507 struct btrfs_block_group_item bgi; 2508 struct extent_buffer *leaf; 2509 int slot; 2510 2511 ret = find_first_block_group(info, path, &key); 2512 if (ret > 0) 2513 break; 2514 if (ret != 0) 2515 goto error; 2516 2517 leaf = path->nodes[0]; 2518 slot = path->slots[0]; 2519 2520 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), 2521 sizeof(bgi)); 2522 2523 btrfs_item_key_to_cpu(leaf, &key, slot); 2524 btrfs_release_path(path); 2525 ret = read_one_block_group(info, &bgi, &key, need_clear); 2526 if (ret < 0) 2527 goto error; 2528 key.objectid += key.offset; 2529 key.offset = 0; 2530 } 2531 btrfs_release_path(path); 2532 2533 list_for_each_entry(space_info, &info->space_info, list) { 2534 int i; 2535 2536 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 2537 if (list_empty(&space_info->block_groups[i])) 2538 continue; 2539 cache = list_first_entry(&space_info->block_groups[i], 2540 struct btrfs_block_group, 2541 list); 2542 btrfs_sysfs_add_block_group_type(cache); 2543 } 2544 2545 if (!(btrfs_get_alloc_profile(info, space_info->flags) & 2546 (BTRFS_BLOCK_GROUP_RAID10 | 2547 BTRFS_BLOCK_GROUP_RAID1_MASK | 2548 BTRFS_BLOCK_GROUP_RAID56_MASK | 2549 BTRFS_BLOCK_GROUP_DUP))) 2550 continue; 2551 /* 2552 * Avoid allocating from un-mirrored block group if there are 2553 * mirrored block groups. 2554 */ 2555 list_for_each_entry(cache, 2556 &space_info->block_groups[BTRFS_RAID_RAID0], 2557 list) 2558 inc_block_group_ro(cache, 1); 2559 list_for_each_entry(cache, 2560 &space_info->block_groups[BTRFS_RAID_SINGLE], 2561 list) 2562 inc_block_group_ro(cache, 1); 2563 } 2564 2565 btrfs_init_global_block_rsv(info); 2566 ret = check_chunk_block_group_mappings(info); 2567 error: 2568 btrfs_free_path(path); 2569 /* 2570 * We've hit some error while reading the extent tree, and have 2571 * rescue=ibadroots mount option. 2572 * Try to fill the tree using dummy block groups so that the user can 2573 * continue to mount and grab their data. 2574 */ 2575 if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) 2576 ret = fill_dummy_bgs(info); 2577 return ret; 2578 } 2579 2580 /* 2581 * This function, insert_block_group_item(), belongs to the phase 2 of chunk 2582 * allocation. 2583 * 2584 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2585 * phases. 2586 */ 2587 static int insert_block_group_item(struct btrfs_trans_handle *trans, 2588 struct btrfs_block_group *block_group) 2589 { 2590 struct btrfs_fs_info *fs_info = trans->fs_info; 2591 struct btrfs_block_group_item bgi; 2592 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2593 struct btrfs_key key; 2594 u64 old_commit_used; 2595 int ret; 2596 2597 spin_lock(&block_group->lock); 2598 btrfs_set_stack_block_group_used(&bgi, block_group->used); 2599 btrfs_set_stack_block_group_chunk_objectid(&bgi, 2600 block_group->global_root_id); 2601 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); 2602 old_commit_used = block_group->commit_used; 2603 block_group->commit_used = block_group->used; 2604 key.objectid = block_group->start; 2605 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 2606 key.offset = block_group->length; 2607 spin_unlock(&block_group->lock); 2608 2609 ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); 2610 if (ret < 0) { 2611 spin_lock(&block_group->lock); 2612 block_group->commit_used = old_commit_used; 2613 spin_unlock(&block_group->lock); 2614 } 2615 2616 return ret; 2617 } 2618 2619 static int insert_dev_extent(struct btrfs_trans_handle *trans, 2620 struct btrfs_device *device, u64 chunk_offset, 2621 u64 start, u64 num_bytes) 2622 { 2623 struct btrfs_fs_info *fs_info = device->fs_info; 2624 struct btrfs_root *root = fs_info->dev_root; 2625 struct btrfs_path *path; 2626 struct btrfs_dev_extent *extent; 2627 struct extent_buffer *leaf; 2628 struct btrfs_key key; 2629 int ret; 2630 2631 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); 2632 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); 2633 path = btrfs_alloc_path(); 2634 if (!path) 2635 return -ENOMEM; 2636 2637 key.objectid = device->devid; 2638 key.type = BTRFS_DEV_EXTENT_KEY; 2639 key.offset = start; 2640 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); 2641 if (ret) 2642 goto out; 2643 2644 leaf = path->nodes[0]; 2645 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); 2646 btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); 2647 btrfs_set_dev_extent_chunk_objectid(leaf, extent, 2648 BTRFS_FIRST_CHUNK_TREE_OBJECTID); 2649 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); 2650 2651 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 2652 btrfs_mark_buffer_dirty(trans, leaf); 2653 out: 2654 btrfs_free_path(path); 2655 return ret; 2656 } 2657 2658 /* 2659 * This function belongs to phase 2. 2660 * 2661 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2662 * phases. 2663 */ 2664 static int insert_dev_extents(struct btrfs_trans_handle *trans, 2665 u64 chunk_offset, u64 chunk_size) 2666 { 2667 struct btrfs_fs_info *fs_info = trans->fs_info; 2668 struct btrfs_device *device; 2669 struct btrfs_chunk_map *map; 2670 u64 dev_offset; 2671 int i; 2672 int ret = 0; 2673 2674 map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); 2675 if (IS_ERR(map)) 2676 return PTR_ERR(map); 2677 2678 /* 2679 * Take the device list mutex to prevent races with the final phase of 2680 * a device replace operation that replaces the device object associated 2681 * with the map's stripes, because the device object's id can change 2682 * at any time during that final phase of the device replace operation 2683 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the 2684 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, 2685 * resulting in persisting a device extent item with such ID. 2686 */ 2687 mutex_lock(&fs_info->fs_devices->device_list_mutex); 2688 for (i = 0; i < map->num_stripes; i++) { 2689 device = map->stripes[i].dev; 2690 dev_offset = map->stripes[i].physical; 2691 2692 ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, 2693 map->stripe_size); 2694 if (ret) 2695 break; 2696 } 2697 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2698 2699 btrfs_free_chunk_map(map); 2700 return ret; 2701 } 2702 2703 /* 2704 * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of 2705 * chunk allocation. 2706 * 2707 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation 2708 * phases. 2709 */ 2710 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) 2711 { 2712 struct btrfs_fs_info *fs_info = trans->fs_info; 2713 struct btrfs_block_group *block_group; 2714 int ret = 0; 2715 2716 while (!list_empty(&trans->new_bgs)) { 2717 int index; 2718 2719 block_group = list_first_entry(&trans->new_bgs, 2720 struct btrfs_block_group, 2721 bg_list); 2722 if (ret) 2723 goto next; 2724 2725 index = btrfs_bg_flags_to_raid_index(block_group->flags); 2726 2727 ret = insert_block_group_item(trans, block_group); 2728 if (ret) 2729 btrfs_abort_transaction(trans, ret); 2730 if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, 2731 &block_group->runtime_flags)) { 2732 mutex_lock(&fs_info->chunk_mutex); 2733 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); 2734 mutex_unlock(&fs_info->chunk_mutex); 2735 if (ret) 2736 btrfs_abort_transaction(trans, ret); 2737 } 2738 ret = insert_dev_extents(trans, block_group->start, 2739 block_group->length); 2740 if (ret) 2741 btrfs_abort_transaction(trans, ret); 2742 add_block_group_free_space(trans, block_group); 2743 2744 /* 2745 * If we restriped during balance, we may have added a new raid 2746 * type, so now add the sysfs entries when it is safe to do so. 2747 * We don't have to worry about locking here as it's handled in 2748 * btrfs_sysfs_add_block_group_type. 2749 */ 2750 if (block_group->space_info->block_group_kobjs[index] == NULL) 2751 btrfs_sysfs_add_block_group_type(block_group); 2752 2753 /* Already aborted the transaction if it failed. */ 2754 next: 2755 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); 2756 list_del_init(&block_group->bg_list); 2757 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); 2758 2759 /* 2760 * If the block group is still unused, add it to the list of 2761 * unused block groups. The block group may have been created in 2762 * order to satisfy a space reservation, in which case the 2763 * extent allocation only happens later. But often we don't 2764 * actually need to allocate space that we previously reserved, 2765 * so the block group may become unused for a long time. For 2766 * example for metadata we generally reserve space for a worst 2767 * possible scenario, but then don't end up allocating all that 2768 * space or none at all (due to no need to COW, extent buffers 2769 * were already COWed in the current transaction and still 2770 * unwritten, tree heights lower than the maximum possible 2771 * height, etc). For data we generally reserve the axact amount 2772 * of space we are going to allocate later, the exception is 2773 * when using compression, as we must reserve space based on the 2774 * uncompressed data size, because the compression is only done 2775 * when writeback triggered and we don't know how much space we 2776 * are actually going to need, so we reserve the uncompressed 2777 * size because the data may be uncompressible in the worst case. 2778 */ 2779 if (ret == 0) { 2780 bool used; 2781 2782 spin_lock(&block_group->lock); 2783 used = btrfs_is_block_group_used(block_group); 2784 spin_unlock(&block_group->lock); 2785 2786 if (!used) 2787 btrfs_mark_bg_unused(block_group); 2788 } 2789 } 2790 btrfs_trans_release_chunk_metadata(trans); 2791 } 2792 2793 /* 2794 * For extent tree v2 we use the block_group_item->chunk_offset to point at our 2795 * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. 2796 */ 2797 static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) 2798 { 2799 u64 div = SZ_1G; 2800 u64 index; 2801 2802 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) 2803 return BTRFS_FIRST_CHUNK_TREE_OBJECTID; 2804 2805 /* If we have a smaller fs index based on 128MiB. */ 2806 if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) 2807 div = SZ_128M; 2808 2809 offset = div64_u64(offset, div); 2810 div64_u64_rem(offset, fs_info->nr_global_roots, &index); 2811 return index; 2812 } 2813 2814 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, 2815 u64 type, 2816 u64 chunk_offset, u64 size) 2817 { 2818 struct btrfs_fs_info *fs_info = trans->fs_info; 2819 struct btrfs_block_group *cache; 2820 int ret; 2821 2822 btrfs_set_log_full_commit(trans); 2823 2824 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); 2825 if (!cache) 2826 return ERR_PTR(-ENOMEM); 2827 2828 /* 2829 * Mark it as new before adding it to the rbtree of block groups or any 2830 * list, so that no other task finds it and calls btrfs_mark_bg_unused() 2831 * before the new flag is set. 2832 */ 2833 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); 2834 2835 cache->length = size; 2836 set_free_space_tree_thresholds(cache); 2837 cache->flags = type; 2838 cache->cached = BTRFS_CACHE_FINISHED; 2839 cache->global_root_id = calculate_global_root_id(fs_info, cache->start); 2840 2841 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 2842 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); 2843 2844 ret = btrfs_load_block_group_zone_info(cache, true); 2845 if (ret) { 2846 btrfs_put_block_group(cache); 2847 return ERR_PTR(ret); 2848 } 2849 2850 ret = exclude_super_stripes(cache); 2851 if (ret) { 2852 /* We may have excluded something, so call this just in case */ 2853 btrfs_free_excluded_extents(cache); 2854 btrfs_put_block_group(cache); 2855 return ERR_PTR(ret); 2856 } 2857 2858 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); 2859 btrfs_free_excluded_extents(cache); 2860 if (ret) { 2861 btrfs_put_block_group(cache); 2862 return ERR_PTR(ret); 2863 } 2864 2865 /* 2866 * Ensure the corresponding space_info object is created and 2867 * assigned to our block group. We want our bg to be added to the rbtree 2868 * with its ->space_info set. 2869 */ 2870 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); 2871 ASSERT(cache->space_info); 2872 2873 ret = btrfs_add_block_group_cache(fs_info, cache); 2874 if (ret) { 2875 btrfs_remove_free_space_cache(cache); 2876 btrfs_put_block_group(cache); 2877 return ERR_PTR(ret); 2878 } 2879 2880 /* 2881 * Now that our block group has its ->space_info set and is inserted in 2882 * the rbtree, update the space info's counters. 2883 */ 2884 trace_btrfs_add_block_group(fs_info, cache, 1); 2885 btrfs_add_bg_to_space_info(fs_info, cache); 2886 btrfs_update_global_block_rsv(fs_info); 2887 2888 #ifdef CONFIG_BTRFS_DEBUG 2889 if (btrfs_should_fragment_free_space(cache)) { 2890 cache->space_info->bytes_used += size >> 1; 2891 fragment_free_space(cache); 2892 } 2893 #endif 2894 2895 list_add_tail(&cache->bg_list, &trans->new_bgs); 2896 btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info); 2897 2898 set_avail_alloc_bits(fs_info, type); 2899 return cache; 2900 } 2901 2902 /* 2903 * Mark one block group RO, can be called several times for the same block 2904 * group. 2905 * 2906 * @cache: the destination block group 2907 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to 2908 * ensure we still have some free space after marking this 2909 * block group RO. 2910 */ 2911 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, 2912 bool do_chunk_alloc) 2913 { 2914 struct btrfs_fs_info *fs_info = cache->fs_info; 2915 struct btrfs_trans_handle *trans; 2916 struct btrfs_root *root = btrfs_block_group_root(fs_info); 2917 u64 alloc_flags; 2918 int ret; 2919 bool dirty_bg_running; 2920 2921 /* 2922 * This can only happen when we are doing read-only scrub on read-only 2923 * mount. 2924 * In that case we should not start a new transaction on read-only fs. 2925 * Thus here we skip all chunk allocations. 2926 */ 2927 if (sb_rdonly(fs_info->sb)) { 2928 mutex_lock(&fs_info->ro_block_group_mutex); 2929 ret = inc_block_group_ro(cache, 0); 2930 mutex_unlock(&fs_info->ro_block_group_mutex); 2931 return ret; 2932 } 2933 2934 do { 2935 trans = btrfs_join_transaction(root); 2936 if (IS_ERR(trans)) 2937 return PTR_ERR(trans); 2938 2939 dirty_bg_running = false; 2940 2941 /* 2942 * We're not allowed to set block groups readonly after the dirty 2943 * block group cache has started writing. If it already started, 2944 * back off and let this transaction commit. 2945 */ 2946 mutex_lock(&fs_info->ro_block_group_mutex); 2947 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { 2948 u64 transid = trans->transid; 2949 2950 mutex_unlock(&fs_info->ro_block_group_mutex); 2951 btrfs_end_transaction(trans); 2952 2953 ret = btrfs_wait_for_commit(fs_info, transid); 2954 if (ret) 2955 return ret; 2956 dirty_bg_running = true; 2957 } 2958 } while (dirty_bg_running); 2959 2960 if (do_chunk_alloc) { 2961 /* 2962 * If we are changing raid levels, try to allocate a 2963 * corresponding block group with the new raid level. 2964 */ 2965 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 2966 if (alloc_flags != cache->flags) { 2967 ret = btrfs_chunk_alloc(trans, alloc_flags, 2968 CHUNK_ALLOC_FORCE); 2969 /* 2970 * ENOSPC is allowed here, we may have enough space 2971 * already allocated at the new raid level to carry on 2972 */ 2973 if (ret == -ENOSPC) 2974 ret = 0; 2975 if (ret < 0) 2976 goto out; 2977 } 2978 } 2979 2980 ret = inc_block_group_ro(cache, 0); 2981 if (!ret) 2982 goto out; 2983 if (ret == -ETXTBSY) 2984 goto unlock_out; 2985 2986 /* 2987 * Skip chunk allocation if the bg is SYSTEM, this is to avoid system 2988 * chunk allocation storm to exhaust the system chunk array. Otherwise 2989 * we still want to try our best to mark the block group read-only. 2990 */ 2991 if (!do_chunk_alloc && ret == -ENOSPC && 2992 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) 2993 goto unlock_out; 2994 2995 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); 2996 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 2997 if (ret < 0) 2998 goto out; 2999 /* 3000 * We have allocated a new chunk. We also need to activate that chunk to 3001 * grant metadata tickets for zoned filesystem. 3002 */ 3003 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); 3004 if (ret < 0) 3005 goto out; 3006 3007 ret = inc_block_group_ro(cache, 0); 3008 if (ret == -ETXTBSY) 3009 goto unlock_out; 3010 out: 3011 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { 3012 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); 3013 mutex_lock(&fs_info->chunk_mutex); 3014 check_system_chunk(trans, alloc_flags); 3015 mutex_unlock(&fs_info->chunk_mutex); 3016 } 3017 unlock_out: 3018 mutex_unlock(&fs_info->ro_block_group_mutex); 3019 3020 btrfs_end_transaction(trans); 3021 return ret; 3022 } 3023 3024 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) 3025 { 3026 struct btrfs_space_info *sinfo = cache->space_info; 3027 u64 num_bytes; 3028 3029 BUG_ON(!cache->ro); 3030 3031 spin_lock(&sinfo->lock); 3032 spin_lock(&cache->lock); 3033 if (!--cache->ro) { 3034 if (btrfs_is_zoned(cache->fs_info)) { 3035 /* Migrate zone_unusable bytes back */ 3036 cache->zone_unusable = 3037 (cache->alloc_offset - cache->used) + 3038 (cache->length - cache->zone_capacity); 3039 sinfo->bytes_zone_unusable += cache->zone_unusable; 3040 sinfo->bytes_readonly -= cache->zone_unusable; 3041 } 3042 num_bytes = cache->length - cache->reserved - 3043 cache->pinned - cache->bytes_super - 3044 cache->zone_unusable - cache->used; 3045 sinfo->bytes_readonly -= num_bytes; 3046 list_del_init(&cache->ro_list); 3047 } 3048 spin_unlock(&cache->lock); 3049 spin_unlock(&sinfo->lock); 3050 } 3051 3052 static int update_block_group_item(struct btrfs_trans_handle *trans, 3053 struct btrfs_path *path, 3054 struct btrfs_block_group *cache) 3055 { 3056 struct btrfs_fs_info *fs_info = trans->fs_info; 3057 int ret; 3058 struct btrfs_root *root = btrfs_block_group_root(fs_info); 3059 unsigned long bi; 3060 struct extent_buffer *leaf; 3061 struct btrfs_block_group_item bgi; 3062 struct btrfs_key key; 3063 u64 old_commit_used; 3064 u64 used; 3065 3066 /* 3067 * Block group items update can be triggered out of commit transaction 3068 * critical section, thus we need a consistent view of used bytes. 3069 * We cannot use cache->used directly outside of the spin lock, as it 3070 * may be changed. 3071 */ 3072 spin_lock(&cache->lock); 3073 old_commit_used = cache->commit_used; 3074 used = cache->used; 3075 /* No change in used bytes, can safely skip it. */ 3076 if (cache->commit_used == used) { 3077 spin_unlock(&cache->lock); 3078 return 0; 3079 } 3080 cache->commit_used = used; 3081 spin_unlock(&cache->lock); 3082 3083 key.objectid = cache->start; 3084 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 3085 key.offset = cache->length; 3086 3087 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3088 if (ret) { 3089 if (ret > 0) 3090 ret = -ENOENT; 3091 goto fail; 3092 } 3093 3094 leaf = path->nodes[0]; 3095 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3096 btrfs_set_stack_block_group_used(&bgi, used); 3097 btrfs_set_stack_block_group_chunk_objectid(&bgi, 3098 cache->global_root_id); 3099 btrfs_set_stack_block_group_flags(&bgi, cache->flags); 3100 write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); 3101 btrfs_mark_buffer_dirty(trans, leaf); 3102 fail: 3103 btrfs_release_path(path); 3104 /* 3105 * We didn't update the block group item, need to revert commit_used 3106 * unless the block group item didn't exist yet - this is to prevent a 3107 * race with a concurrent insertion of the block group item, with 3108 * insert_block_group_item(), that happened just after we attempted to 3109 * update. In that case we would reset commit_used to 0 just after the 3110 * insertion set it to a value greater than 0 - if the block group later 3111 * becomes with 0 used bytes, we would incorrectly skip its update. 3112 */ 3113 if (ret < 0 && ret != -ENOENT) { 3114 spin_lock(&cache->lock); 3115 cache->commit_used = old_commit_used; 3116 spin_unlock(&cache->lock); 3117 } 3118 return ret; 3119 3120 } 3121 3122 static int cache_save_setup(struct btrfs_block_group *block_group, 3123 struct btrfs_trans_handle *trans, 3124 struct btrfs_path *path) 3125 { 3126 struct btrfs_fs_info *fs_info = block_group->fs_info; 3127 struct inode *inode = NULL; 3128 struct extent_changeset *data_reserved = NULL; 3129 u64 alloc_hint = 0; 3130 int dcs = BTRFS_DC_ERROR; 3131 u64 cache_size = 0; 3132 int retries = 0; 3133 int ret = 0; 3134 3135 if (!btrfs_test_opt(fs_info, SPACE_CACHE)) 3136 return 0; 3137 3138 /* 3139 * If this block group is smaller than 100 megs don't bother caching the 3140 * block group. 3141 */ 3142 if (block_group->length < (100 * SZ_1M)) { 3143 spin_lock(&block_group->lock); 3144 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3145 spin_unlock(&block_group->lock); 3146 return 0; 3147 } 3148 3149 if (TRANS_ABORTED(trans)) 3150 return 0; 3151 again: 3152 inode = lookup_free_space_inode(block_group, path); 3153 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3154 ret = PTR_ERR(inode); 3155 btrfs_release_path(path); 3156 goto out; 3157 } 3158 3159 if (IS_ERR(inode)) { 3160 BUG_ON(retries); 3161 retries++; 3162 3163 if (block_group->ro) 3164 goto out_free; 3165 3166 ret = create_free_space_inode(trans, block_group, path); 3167 if (ret) 3168 goto out_free; 3169 goto again; 3170 } 3171 3172 /* 3173 * We want to set the generation to 0, that way if anything goes wrong 3174 * from here on out we know not to trust this cache when we load up next 3175 * time. 3176 */ 3177 BTRFS_I(inode)->generation = 0; 3178 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 3179 if (ret) { 3180 /* 3181 * So theoretically we could recover from this, simply set the 3182 * super cache generation to 0 so we know to invalidate the 3183 * cache, but then we'd have to keep track of the block groups 3184 * that fail this way so we know we _have_ to reset this cache 3185 * before the next commit or risk reading stale cache. So to 3186 * limit our exposure to horrible edge cases lets just abort the 3187 * transaction, this only happens in really bad situations 3188 * anyway. 3189 */ 3190 btrfs_abort_transaction(trans, ret); 3191 goto out_put; 3192 } 3193 WARN_ON(ret); 3194 3195 /* We've already setup this transaction, go ahead and exit */ 3196 if (block_group->cache_generation == trans->transid && 3197 i_size_read(inode)) { 3198 dcs = BTRFS_DC_SETUP; 3199 goto out_put; 3200 } 3201 3202 if (i_size_read(inode) > 0) { 3203 ret = btrfs_check_trunc_cache_free_space(fs_info, 3204 &fs_info->global_block_rsv); 3205 if (ret) 3206 goto out_put; 3207 3208 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 3209 if (ret) 3210 goto out_put; 3211 } 3212 3213 spin_lock(&block_group->lock); 3214 if (block_group->cached != BTRFS_CACHE_FINISHED || 3215 !btrfs_test_opt(fs_info, SPACE_CACHE)) { 3216 /* 3217 * don't bother trying to write stuff out _if_ 3218 * a) we're not cached, 3219 * b) we're with nospace_cache mount option, 3220 * c) we're with v2 space_cache (FREE_SPACE_TREE). 3221 */ 3222 dcs = BTRFS_DC_WRITTEN; 3223 spin_unlock(&block_group->lock); 3224 goto out_put; 3225 } 3226 spin_unlock(&block_group->lock); 3227 3228 /* 3229 * We hit an ENOSPC when setting up the cache in this transaction, just 3230 * skip doing the setup, we've already cleared the cache so we're safe. 3231 */ 3232 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { 3233 ret = -ENOSPC; 3234 goto out_put; 3235 } 3236 3237 /* 3238 * Try to preallocate enough space based on how big the block group is. 3239 * Keep in mind this has to include any pinned space which could end up 3240 * taking up quite a bit since it's not folded into the other space 3241 * cache. 3242 */ 3243 cache_size = div_u64(block_group->length, SZ_256M); 3244 if (!cache_size) 3245 cache_size = 1; 3246 3247 cache_size *= 16; 3248 cache_size *= fs_info->sectorsize; 3249 3250 ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, 3251 cache_size, false); 3252 if (ret) 3253 goto out_put; 3254 3255 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, 3256 cache_size, cache_size, 3257 &alloc_hint); 3258 /* 3259 * Our cache requires contiguous chunks so that we don't modify a bunch 3260 * of metadata or split extents when writing the cache out, which means 3261 * we can enospc if we are heavily fragmented in addition to just normal 3262 * out of space conditions. So if we hit this just skip setting up any 3263 * other block groups for this transaction, maybe we'll unpin enough 3264 * space the next time around. 3265 */ 3266 if (!ret) 3267 dcs = BTRFS_DC_SETUP; 3268 else if (ret == -ENOSPC) 3269 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 3270 3271 out_put: 3272 iput(inode); 3273 out_free: 3274 btrfs_release_path(path); 3275 out: 3276 spin_lock(&block_group->lock); 3277 if (!ret && dcs == BTRFS_DC_SETUP) 3278 block_group->cache_generation = trans->transid; 3279 block_group->disk_cache_state = dcs; 3280 spin_unlock(&block_group->lock); 3281 3282 extent_changeset_free(data_reserved); 3283 return ret; 3284 } 3285 3286 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) 3287 { 3288 struct btrfs_fs_info *fs_info = trans->fs_info; 3289 struct btrfs_block_group *cache, *tmp; 3290 struct btrfs_transaction *cur_trans = trans->transaction; 3291 struct btrfs_path *path; 3292 3293 if (list_empty(&cur_trans->dirty_bgs) || 3294 !btrfs_test_opt(fs_info, SPACE_CACHE)) 3295 return 0; 3296 3297 path = btrfs_alloc_path(); 3298 if (!path) 3299 return -ENOMEM; 3300 3301 /* Could add new block groups, use _safe just in case */ 3302 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, 3303 dirty_list) { 3304 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3305 cache_save_setup(cache, trans, path); 3306 } 3307 3308 btrfs_free_path(path); 3309 return 0; 3310 } 3311 3312 /* 3313 * Transaction commit does final block group cache writeback during a critical 3314 * section where nothing is allowed to change the FS. This is required in 3315 * order for the cache to actually match the block group, but can introduce a 3316 * lot of latency into the commit. 3317 * 3318 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. 3319 * There's a chance we'll have to redo some of it if the block group changes 3320 * again during the commit, but it greatly reduces the commit latency by 3321 * getting rid of the easy block groups while we're still allowing others to 3322 * join the commit. 3323 */ 3324 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) 3325 { 3326 struct btrfs_fs_info *fs_info = trans->fs_info; 3327 struct btrfs_block_group *cache; 3328 struct btrfs_transaction *cur_trans = trans->transaction; 3329 int ret = 0; 3330 int should_put; 3331 struct btrfs_path *path = NULL; 3332 LIST_HEAD(dirty); 3333 struct list_head *io = &cur_trans->io_bgs; 3334 int loops = 0; 3335 3336 spin_lock(&cur_trans->dirty_bgs_lock); 3337 if (list_empty(&cur_trans->dirty_bgs)) { 3338 spin_unlock(&cur_trans->dirty_bgs_lock); 3339 return 0; 3340 } 3341 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3342 spin_unlock(&cur_trans->dirty_bgs_lock); 3343 3344 again: 3345 /* Make sure all the block groups on our dirty list actually exist */ 3346 btrfs_create_pending_block_groups(trans); 3347 3348 if (!path) { 3349 path = btrfs_alloc_path(); 3350 if (!path) { 3351 ret = -ENOMEM; 3352 goto out; 3353 } 3354 } 3355 3356 /* 3357 * cache_write_mutex is here only to save us from balance or automatic 3358 * removal of empty block groups deleting this block group while we are 3359 * writing out the cache 3360 */ 3361 mutex_lock(&trans->transaction->cache_write_mutex); 3362 while (!list_empty(&dirty)) { 3363 bool drop_reserve = true; 3364 3365 cache = list_first_entry(&dirty, struct btrfs_block_group, 3366 dirty_list); 3367 /* 3368 * This can happen if something re-dirties a block group that 3369 * is already under IO. Just wait for it to finish and then do 3370 * it all again 3371 */ 3372 if (!list_empty(&cache->io_list)) { 3373 list_del_init(&cache->io_list); 3374 btrfs_wait_cache_io(trans, cache, path); 3375 btrfs_put_block_group(cache); 3376 } 3377 3378 3379 /* 3380 * btrfs_wait_cache_io uses the cache->dirty_list to decide if 3381 * it should update the cache_state. Don't delete until after 3382 * we wait. 3383 * 3384 * Since we're not running in the commit critical section 3385 * we need the dirty_bgs_lock to protect from update_block_group 3386 */ 3387 spin_lock(&cur_trans->dirty_bgs_lock); 3388 list_del_init(&cache->dirty_list); 3389 spin_unlock(&cur_trans->dirty_bgs_lock); 3390 3391 should_put = 1; 3392 3393 cache_save_setup(cache, trans, path); 3394 3395 if (cache->disk_cache_state == BTRFS_DC_SETUP) { 3396 cache->io_ctl.inode = NULL; 3397 ret = btrfs_write_out_cache(trans, cache, path); 3398 if (ret == 0 && cache->io_ctl.inode) { 3399 should_put = 0; 3400 3401 /* 3402 * The cache_write_mutex is protecting the 3403 * io_list, also refer to the definition of 3404 * btrfs_transaction::io_bgs for more details 3405 */ 3406 list_add_tail(&cache->io_list, io); 3407 } else { 3408 /* 3409 * If we failed to write the cache, the 3410 * generation will be bad and life goes on 3411 */ 3412 ret = 0; 3413 } 3414 } 3415 if (!ret) { 3416 ret = update_block_group_item(trans, path, cache); 3417 /* 3418 * Our block group might still be attached to the list 3419 * of new block groups in the transaction handle of some 3420 * other task (struct btrfs_trans_handle->new_bgs). This 3421 * means its block group item isn't yet in the extent 3422 * tree. If this happens ignore the error, as we will 3423 * try again later in the critical section of the 3424 * transaction commit. 3425 */ 3426 if (ret == -ENOENT) { 3427 ret = 0; 3428 spin_lock(&cur_trans->dirty_bgs_lock); 3429 if (list_empty(&cache->dirty_list)) { 3430 list_add_tail(&cache->dirty_list, 3431 &cur_trans->dirty_bgs); 3432 btrfs_get_block_group(cache); 3433 drop_reserve = false; 3434 } 3435 spin_unlock(&cur_trans->dirty_bgs_lock); 3436 } else if (ret) { 3437 btrfs_abort_transaction(trans, ret); 3438 } 3439 } 3440 3441 /* If it's not on the io list, we need to put the block group */ 3442 if (should_put) 3443 btrfs_put_block_group(cache); 3444 if (drop_reserve) 3445 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3446 /* 3447 * Avoid blocking other tasks for too long. It might even save 3448 * us from writing caches for block groups that are going to be 3449 * removed. 3450 */ 3451 mutex_unlock(&trans->transaction->cache_write_mutex); 3452 if (ret) 3453 goto out; 3454 mutex_lock(&trans->transaction->cache_write_mutex); 3455 } 3456 mutex_unlock(&trans->transaction->cache_write_mutex); 3457 3458 /* 3459 * Go through delayed refs for all the stuff we've just kicked off 3460 * and then loop back (just once) 3461 */ 3462 if (!ret) 3463 ret = btrfs_run_delayed_refs(trans, 0); 3464 if (!ret && loops == 0) { 3465 loops++; 3466 spin_lock(&cur_trans->dirty_bgs_lock); 3467 list_splice_init(&cur_trans->dirty_bgs, &dirty); 3468 /* 3469 * dirty_bgs_lock protects us from concurrent block group 3470 * deletes too (not just cache_write_mutex). 3471 */ 3472 if (!list_empty(&dirty)) { 3473 spin_unlock(&cur_trans->dirty_bgs_lock); 3474 goto again; 3475 } 3476 spin_unlock(&cur_trans->dirty_bgs_lock); 3477 } 3478 out: 3479 if (ret < 0) { 3480 spin_lock(&cur_trans->dirty_bgs_lock); 3481 list_splice_init(&dirty, &cur_trans->dirty_bgs); 3482 spin_unlock(&cur_trans->dirty_bgs_lock); 3483 btrfs_cleanup_dirty_bgs(cur_trans, fs_info); 3484 } 3485 3486 btrfs_free_path(path); 3487 return ret; 3488 } 3489 3490 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) 3491 { 3492 struct btrfs_fs_info *fs_info = trans->fs_info; 3493 struct btrfs_block_group *cache; 3494 struct btrfs_transaction *cur_trans = trans->transaction; 3495 int ret = 0; 3496 int should_put; 3497 struct btrfs_path *path; 3498 struct list_head *io = &cur_trans->io_bgs; 3499 3500 path = btrfs_alloc_path(); 3501 if (!path) 3502 return -ENOMEM; 3503 3504 /* 3505 * Even though we are in the critical section of the transaction commit, 3506 * we can still have concurrent tasks adding elements to this 3507 * transaction's list of dirty block groups. These tasks correspond to 3508 * endio free space workers started when writeback finishes for a 3509 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can 3510 * allocate new block groups as a result of COWing nodes of the root 3511 * tree when updating the free space inode. The writeback for the space 3512 * caches is triggered by an earlier call to 3513 * btrfs_start_dirty_block_groups() and iterations of the following 3514 * loop. 3515 * Also we want to do the cache_save_setup first and then run the 3516 * delayed refs to make sure we have the best chance at doing this all 3517 * in one shot. 3518 */ 3519 spin_lock(&cur_trans->dirty_bgs_lock); 3520 while (!list_empty(&cur_trans->dirty_bgs)) { 3521 cache = list_first_entry(&cur_trans->dirty_bgs, 3522 struct btrfs_block_group, 3523 dirty_list); 3524 3525 /* 3526 * This can happen if cache_save_setup re-dirties a block group 3527 * that is already under IO. Just wait for it to finish and 3528 * then do it all again 3529 */ 3530 if (!list_empty(&cache->io_list)) { 3531 spin_unlock(&cur_trans->dirty_bgs_lock); 3532 list_del_init(&cache->io_list); 3533 btrfs_wait_cache_io(trans, cache, path); 3534 btrfs_put_block_group(cache); 3535 spin_lock(&cur_trans->dirty_bgs_lock); 3536 } 3537 3538 /* 3539 * Don't remove from the dirty list until after we've waited on 3540 * any pending IO 3541 */ 3542 list_del_init(&cache->dirty_list); 3543 spin_unlock(&cur_trans->dirty_bgs_lock); 3544 should_put = 1; 3545 3546 cache_save_setup(cache, trans, path); 3547 3548 if (!ret) 3549 ret = btrfs_run_delayed_refs(trans, U64_MAX); 3550 3551 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { 3552 cache->io_ctl.inode = NULL; 3553 ret = btrfs_write_out_cache(trans, cache, path); 3554 if (ret == 0 && cache->io_ctl.inode) { 3555 should_put = 0; 3556 list_add_tail(&cache->io_list, io); 3557 } else { 3558 /* 3559 * If we failed to write the cache, the 3560 * generation will be bad and life goes on 3561 */ 3562 ret = 0; 3563 } 3564 } 3565 if (!ret) { 3566 ret = update_block_group_item(trans, path, cache); 3567 /* 3568 * One of the free space endio workers might have 3569 * created a new block group while updating a free space 3570 * cache's inode (at inode.c:btrfs_finish_ordered_io()) 3571 * and hasn't released its transaction handle yet, in 3572 * which case the new block group is still attached to 3573 * its transaction handle and its creation has not 3574 * finished yet (no block group item in the extent tree 3575 * yet, etc). If this is the case, wait for all free 3576 * space endio workers to finish and retry. This is a 3577 * very rare case so no need for a more efficient and 3578 * complex approach. 3579 */ 3580 if (ret == -ENOENT) { 3581 wait_event(cur_trans->writer_wait, 3582 atomic_read(&cur_trans->num_writers) == 1); 3583 ret = update_block_group_item(trans, path, cache); 3584 } 3585 if (ret) 3586 btrfs_abort_transaction(trans, ret); 3587 } 3588 3589 /* If its not on the io list, we need to put the block group */ 3590 if (should_put) 3591 btrfs_put_block_group(cache); 3592 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); 3593 spin_lock(&cur_trans->dirty_bgs_lock); 3594 } 3595 spin_unlock(&cur_trans->dirty_bgs_lock); 3596 3597 /* 3598 * Refer to the definition of io_bgs member for details why it's safe 3599 * to use it without any locking 3600 */ 3601 while (!list_empty(io)) { 3602 cache = list_first_entry(io, struct btrfs_block_group, 3603 io_list); 3604 list_del_init(&cache->io_list); 3605 btrfs_wait_cache_io(trans, cache, path); 3606 btrfs_put_block_group(cache); 3607 } 3608 3609 btrfs_free_path(path); 3610 return ret; 3611 } 3612 3613 int btrfs_update_block_group(struct btrfs_trans_handle *trans, 3614 u64 bytenr, u64 num_bytes, bool alloc) 3615 { 3616 struct btrfs_fs_info *info = trans->fs_info; 3617 struct btrfs_space_info *space_info; 3618 struct btrfs_block_group *cache; 3619 u64 old_val; 3620 bool reclaim = false; 3621 bool bg_already_dirty = true; 3622 int factor; 3623 3624 /* Block accounting for super block */ 3625 spin_lock(&info->delalloc_root_lock); 3626 old_val = btrfs_super_bytes_used(info->super_copy); 3627 if (alloc) 3628 old_val += num_bytes; 3629 else 3630 old_val -= num_bytes; 3631 btrfs_set_super_bytes_used(info->super_copy, old_val); 3632 spin_unlock(&info->delalloc_root_lock); 3633 3634 cache = btrfs_lookup_block_group(info, bytenr); 3635 if (!cache) 3636 return -ENOENT; 3637 3638 /* An extent can not span multiple block groups. */ 3639 ASSERT(bytenr + num_bytes <= cache->start + cache->length); 3640 3641 space_info = cache->space_info; 3642 factor = btrfs_bg_type_to_factor(cache->flags); 3643 3644 /* 3645 * If this block group has free space cache written out, we need to make 3646 * sure to load it if we are removing space. This is because we need 3647 * the unpinning stage to actually add the space back to the block group, 3648 * otherwise we will leak space. 3649 */ 3650 if (!alloc && !btrfs_block_group_done(cache)) 3651 btrfs_cache_block_group(cache, true); 3652 3653 spin_lock(&space_info->lock); 3654 spin_lock(&cache->lock); 3655 3656 if (btrfs_test_opt(info, SPACE_CACHE) && 3657 cache->disk_cache_state < BTRFS_DC_CLEAR) 3658 cache->disk_cache_state = BTRFS_DC_CLEAR; 3659 3660 old_val = cache->used; 3661 if (alloc) { 3662 old_val += num_bytes; 3663 cache->used = old_val; 3664 cache->reserved -= num_bytes; 3665 space_info->bytes_reserved -= num_bytes; 3666 space_info->bytes_used += num_bytes; 3667 space_info->disk_used += num_bytes * factor; 3668 spin_unlock(&cache->lock); 3669 spin_unlock(&space_info->lock); 3670 } else { 3671 old_val -= num_bytes; 3672 cache->used = old_val; 3673 cache->pinned += num_bytes; 3674 btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes); 3675 space_info->bytes_used -= num_bytes; 3676 space_info->disk_used -= num_bytes * factor; 3677 3678 reclaim = should_reclaim_block_group(cache, num_bytes); 3679 3680 spin_unlock(&cache->lock); 3681 spin_unlock(&space_info->lock); 3682 3683 set_extent_bit(&trans->transaction->pinned_extents, bytenr, 3684 bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); 3685 } 3686 3687 spin_lock(&trans->transaction->dirty_bgs_lock); 3688 if (list_empty(&cache->dirty_list)) { 3689 list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs); 3690 bg_already_dirty = false; 3691 btrfs_get_block_group(cache); 3692 } 3693 spin_unlock(&trans->transaction->dirty_bgs_lock); 3694 3695 /* 3696 * No longer have used bytes in this block group, queue it for deletion. 3697 * We do this after adding the block group to the dirty list to avoid 3698 * races between cleaner kthread and space cache writeout. 3699 */ 3700 if (!alloc && old_val == 0) { 3701 if (!btrfs_test_opt(info, DISCARD_ASYNC)) 3702 btrfs_mark_bg_unused(cache); 3703 } else if (!alloc && reclaim) { 3704 btrfs_mark_bg_to_reclaim(cache); 3705 } 3706 3707 btrfs_put_block_group(cache); 3708 3709 /* Modified block groups are accounted for in the delayed_refs_rsv. */ 3710 if (!bg_already_dirty) 3711 btrfs_inc_delayed_refs_rsv_bg_updates(info); 3712 3713 return 0; 3714 } 3715 3716 /* 3717 * Update the block_group and space info counters. 3718 * 3719 * @cache: The cache we are manipulating 3720 * @ram_bytes: The number of bytes of file content, and will be same to 3721 * @num_bytes except for the compress path. 3722 * @num_bytes: The number of bytes in question 3723 * @delalloc: The blocks are allocated for the delalloc write 3724 * 3725 * This is called by the allocator when it reserves space. If this is a 3726 * reservation and the block group has become read only we cannot make the 3727 * reservation and return -EAGAIN, otherwise this function always succeeds. 3728 */ 3729 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, 3730 u64 ram_bytes, u64 num_bytes, int delalloc, 3731 bool force_wrong_size_class) 3732 { 3733 struct btrfs_space_info *space_info = cache->space_info; 3734 enum btrfs_block_group_size_class size_class; 3735 int ret = 0; 3736 3737 spin_lock(&space_info->lock); 3738 spin_lock(&cache->lock); 3739 if (cache->ro) { 3740 ret = -EAGAIN; 3741 goto out; 3742 } 3743 3744 if (btrfs_block_group_should_use_size_class(cache)) { 3745 size_class = btrfs_calc_block_group_size_class(num_bytes); 3746 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); 3747 if (ret) 3748 goto out; 3749 } 3750 cache->reserved += num_bytes; 3751 space_info->bytes_reserved += num_bytes; 3752 trace_btrfs_space_reservation(cache->fs_info, "space_info", 3753 space_info->flags, num_bytes, 1); 3754 btrfs_space_info_update_bytes_may_use(cache->fs_info, 3755 space_info, -ram_bytes); 3756 if (delalloc) 3757 cache->delalloc_bytes += num_bytes; 3758 3759 /* 3760 * Compression can use less space than we reserved, so wake tickets if 3761 * that happens. 3762 */ 3763 if (num_bytes < ram_bytes) 3764 btrfs_try_granting_tickets(cache->fs_info, space_info); 3765 out: 3766 spin_unlock(&cache->lock); 3767 spin_unlock(&space_info->lock); 3768 return ret; 3769 } 3770 3771 /* 3772 * Update the block_group and space info counters. 3773 * 3774 * @cache: The cache we are manipulating 3775 * @num_bytes: The number of bytes in question 3776 * @delalloc: The blocks are allocated for the delalloc write 3777 * 3778 * This is called by somebody who is freeing space that was never actually used 3779 * on disk. For example if you reserve some space for a new leaf in transaction 3780 * A and before transaction A commits you free that leaf, you call this with 3781 * reserve set to 0 in order to clear the reservation. 3782 */ 3783 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, 3784 u64 num_bytes, int delalloc) 3785 { 3786 struct btrfs_space_info *space_info = cache->space_info; 3787 3788 spin_lock(&space_info->lock); 3789 spin_lock(&cache->lock); 3790 if (cache->ro) 3791 space_info->bytes_readonly += num_bytes; 3792 cache->reserved -= num_bytes; 3793 space_info->bytes_reserved -= num_bytes; 3794 space_info->max_extent_size = 0; 3795 3796 if (delalloc) 3797 cache->delalloc_bytes -= num_bytes; 3798 spin_unlock(&cache->lock); 3799 3800 btrfs_try_granting_tickets(cache->fs_info, space_info); 3801 spin_unlock(&space_info->lock); 3802 } 3803 3804 static void force_metadata_allocation(struct btrfs_fs_info *info) 3805 { 3806 struct list_head *head = &info->space_info; 3807 struct btrfs_space_info *found; 3808 3809 list_for_each_entry(found, head, list) { 3810 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3811 found->force_alloc = CHUNK_ALLOC_FORCE; 3812 } 3813 } 3814 3815 static int should_alloc_chunk(struct btrfs_fs_info *fs_info, 3816 struct btrfs_space_info *sinfo, int force) 3817 { 3818 u64 bytes_used = btrfs_space_info_used(sinfo, false); 3819 u64 thresh; 3820 3821 if (force == CHUNK_ALLOC_FORCE) 3822 return 1; 3823 3824 /* 3825 * in limited mode, we want to have some free space up to 3826 * about 1% of the FS size. 3827 */ 3828 if (force == CHUNK_ALLOC_LIMITED) { 3829 thresh = btrfs_super_total_bytes(fs_info->super_copy); 3830 thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); 3831 3832 if (sinfo->total_bytes - bytes_used < thresh) 3833 return 1; 3834 } 3835 3836 if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) 3837 return 0; 3838 return 1; 3839 } 3840 3841 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) 3842 { 3843 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); 3844 3845 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); 3846 } 3847 3848 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) 3849 { 3850 struct btrfs_block_group *bg; 3851 int ret; 3852 3853 /* 3854 * Check if we have enough space in the system space info because we 3855 * will need to update device items in the chunk btree and insert a new 3856 * chunk item in the chunk btree as well. This will allocate a new 3857 * system block group if needed. 3858 */ 3859 check_system_chunk(trans, flags); 3860 3861 bg = btrfs_create_chunk(trans, flags); 3862 if (IS_ERR(bg)) { 3863 ret = PTR_ERR(bg); 3864 goto out; 3865 } 3866 3867 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3868 /* 3869 * Normally we are not expected to fail with -ENOSPC here, since we have 3870 * previously reserved space in the system space_info and allocated one 3871 * new system chunk if necessary. However there are three exceptions: 3872 * 3873 * 1) We may have enough free space in the system space_info but all the 3874 * existing system block groups have a profile which can not be used 3875 * for extent allocation. 3876 * 3877 * This happens when mounting in degraded mode. For example we have a 3878 * RAID1 filesystem with 2 devices, lose one device and mount the fs 3879 * using the other device in degraded mode. If we then allocate a chunk, 3880 * we may have enough free space in the existing system space_info, but 3881 * none of the block groups can be used for extent allocation since they 3882 * have a RAID1 profile, and because we are in degraded mode with a 3883 * single device, we are forced to allocate a new system chunk with a 3884 * SINGLE profile. Making check_system_chunk() iterate over all system 3885 * block groups and check if they have a usable profile and enough space 3886 * can be slow on very large filesystems, so we tolerate the -ENOSPC and 3887 * try again after forcing allocation of a new system chunk. Like this 3888 * we avoid paying the cost of that search in normal circumstances, when 3889 * we were not mounted in degraded mode; 3890 * 3891 * 2) We had enough free space info the system space_info, and one suitable 3892 * block group to allocate from when we called check_system_chunk() 3893 * above. However right after we called it, the only system block group 3894 * with enough free space got turned into RO mode by a running scrub, 3895 * and in this case we have to allocate a new one and retry. We only 3896 * need do this allocate and retry once, since we have a transaction 3897 * handle and scrub uses the commit root to search for block groups; 3898 * 3899 * 3) We had one system block group with enough free space when we called 3900 * check_system_chunk(), but after that, right before we tried to 3901 * allocate the last extent buffer we needed, a discard operation came 3902 * in and it temporarily removed the last free space entry from the 3903 * block group (discard removes a free space entry, discards it, and 3904 * then adds back the entry to the block group cache). 3905 */ 3906 if (ret == -ENOSPC) { 3907 const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); 3908 struct btrfs_block_group *sys_bg; 3909 3910 sys_bg = btrfs_create_chunk(trans, sys_flags); 3911 if (IS_ERR(sys_bg)) { 3912 ret = PTR_ERR(sys_bg); 3913 btrfs_abort_transaction(trans, ret); 3914 goto out; 3915 } 3916 3917 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); 3918 if (ret) { 3919 btrfs_abort_transaction(trans, ret); 3920 goto out; 3921 } 3922 3923 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); 3924 if (ret) { 3925 btrfs_abort_transaction(trans, ret); 3926 goto out; 3927 } 3928 } else if (ret) { 3929 btrfs_abort_transaction(trans, ret); 3930 goto out; 3931 } 3932 out: 3933 btrfs_trans_release_chunk_metadata(trans); 3934 3935 if (ret) 3936 return ERR_PTR(ret); 3937 3938 btrfs_get_block_group(bg); 3939 return bg; 3940 } 3941 3942 /* 3943 * Chunk allocation is done in 2 phases: 3944 * 3945 * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for 3946 * the chunk, the chunk mapping, create its block group and add the items 3947 * that belong in the chunk btree to it - more specifically, we need to 3948 * update device items in the chunk btree and add a new chunk item to it. 3949 * 3950 * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block 3951 * group item to the extent btree and the device extent items to the devices 3952 * btree. 3953 * 3954 * This is done to prevent deadlocks. For example when COWing a node from the 3955 * extent btree we are holding a write lock on the node's parent and if we 3956 * trigger chunk allocation and attempted to insert the new block group item 3957 * in the extent btree right way, we could deadlock because the path for the 3958 * insertion can include that parent node. At first glance it seems impossible 3959 * to trigger chunk allocation after starting a transaction since tasks should 3960 * reserve enough transaction units (metadata space), however while that is true 3961 * most of the time, chunk allocation may still be triggered for several reasons: 3962 * 3963 * 1) When reserving metadata, we check if there is enough free space in the 3964 * metadata space_info and therefore don't trigger allocation of a new chunk. 3965 * However later when the task actually tries to COW an extent buffer from 3966 * the extent btree or from the device btree for example, it is forced to 3967 * allocate a new block group (chunk) because the only one that had enough 3968 * free space was just turned to RO mode by a running scrub for example (or 3969 * device replace, block group reclaim thread, etc), so we can not use it 3970 * for allocating an extent and end up being forced to allocate a new one; 3971 * 3972 * 2) Because we only check that the metadata space_info has enough free bytes, 3973 * we end up not allocating a new metadata chunk in that case. However if 3974 * the filesystem was mounted in degraded mode, none of the existing block 3975 * groups might be suitable for extent allocation due to their incompatible 3976 * profile (for e.g. mounting a 2 devices filesystem, where all block groups 3977 * use a RAID1 profile, in degraded mode using a single device). In this case 3978 * when the task attempts to COW some extent buffer of the extent btree for 3979 * example, it will trigger allocation of a new metadata block group with a 3980 * suitable profile (SINGLE profile in the example of the degraded mount of 3981 * the RAID1 filesystem); 3982 * 3983 * 3) The task has reserved enough transaction units / metadata space, but when 3984 * it attempts to COW an extent buffer from the extent or device btree for 3985 * example, it does not find any free extent in any metadata block group, 3986 * therefore forced to try to allocate a new metadata block group. 3987 * This is because some other task allocated all available extents in the 3988 * meanwhile - this typically happens with tasks that don't reserve space 3989 * properly, either intentionally or as a bug. One example where this is 3990 * done intentionally is fsync, as it does not reserve any transaction units 3991 * and ends up allocating a variable number of metadata extents for log 3992 * tree extent buffers; 3993 * 3994 * 4) The task has reserved enough transaction units / metadata space, but right 3995 * before it tries to allocate the last extent buffer it needs, a discard 3996 * operation comes in and, temporarily, removes the last free space entry from 3997 * the only metadata block group that had free space (discard starts by 3998 * removing a free space entry from a block group, then does the discard 3999 * operation and, once it's done, it adds back the free space entry to the 4000 * block group). 4001 * 4002 * We also need this 2 phases setup when adding a device to a filesystem with 4003 * a seed device - we must create new metadata and system chunks without adding 4004 * any of the block group items to the chunk, extent and device btrees. If we 4005 * did not do it this way, we would get ENOSPC when attempting to update those 4006 * btrees, since all the chunks from the seed device are read-only. 4007 * 4008 * Phase 1 does the updates and insertions to the chunk btree because if we had 4009 * it done in phase 2 and have a thundering herd of tasks allocating chunks in 4010 * parallel, we risk having too many system chunks allocated by many tasks if 4011 * many tasks reach phase 1 without the previous ones completing phase 2. In the 4012 * extreme case this leads to exhaustion of the system chunk array in the 4013 * superblock. This is easier to trigger if using a btree node/leaf size of 64K 4014 * and with RAID filesystems (so we have more device items in the chunk btree). 4015 * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of 4016 * the system chunk array due to concurrent allocations") provides more details. 4017 * 4018 * Allocation of system chunks does not happen through this function. A task that 4019 * needs to update the chunk btree (the only btree that uses system chunks), must 4020 * preallocate chunk space by calling either check_system_chunk() or 4021 * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or 4022 * metadata chunk or when removing a chunk, while the later is used before doing 4023 * a modification to the chunk btree - use cases for the later are adding, 4024 * removing and resizing a device as well as relocation of a system chunk. 4025 * See the comment below for more details. 4026 * 4027 * The reservation of system space, done through check_system_chunk(), as well 4028 * as all the updates and insertions into the chunk btree must be done while 4029 * holding fs_info->chunk_mutex. This is important to guarantee that while COWing 4030 * an extent buffer from the chunks btree we never trigger allocation of a new 4031 * system chunk, which would result in a deadlock (trying to lock twice an 4032 * extent buffer of the chunk btree, first time before triggering the chunk 4033 * allocation and the second time during chunk allocation while attempting to 4034 * update the chunks btree). The system chunk array is also updated while holding 4035 * that mutex. The same logic applies to removing chunks - we must reserve system 4036 * space, update the chunk btree and the system chunk array in the superblock 4037 * while holding fs_info->chunk_mutex. 4038 * 4039 * This function, btrfs_chunk_alloc(), belongs to phase 1. 4040 * 4041 * If @force is CHUNK_ALLOC_FORCE: 4042 * - return 1 if it successfully allocates a chunk, 4043 * - return errors including -ENOSPC otherwise. 4044 * If @force is NOT CHUNK_ALLOC_FORCE: 4045 * - return 0 if it doesn't need to allocate a new chunk, 4046 * - return 1 if it successfully allocates a chunk, 4047 * - return errors including -ENOSPC otherwise. 4048 */ 4049 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, 4050 enum btrfs_chunk_alloc_enum force) 4051 { 4052 struct btrfs_fs_info *fs_info = trans->fs_info; 4053 struct btrfs_space_info *space_info; 4054 struct btrfs_block_group *ret_bg; 4055 bool wait_for_alloc = false; 4056 bool should_alloc = false; 4057 bool from_extent_allocation = false; 4058 int ret = 0; 4059 4060 if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { 4061 from_extent_allocation = true; 4062 force = CHUNK_ALLOC_FORCE; 4063 } 4064 4065 /* Don't re-enter if we're already allocating a chunk */ 4066 if (trans->allocating_chunk) 4067 return -ENOSPC; 4068 /* 4069 * Allocation of system chunks can not happen through this path, as we 4070 * could end up in a deadlock if we are allocating a data or metadata 4071 * chunk and there is another task modifying the chunk btree. 4072 * 4073 * This is because while we are holding the chunk mutex, we will attempt 4074 * to add the new chunk item to the chunk btree or update an existing 4075 * device item in the chunk btree, while the other task that is modifying 4076 * the chunk btree is attempting to COW an extent buffer while holding a 4077 * lock on it and on its parent - if the COW operation triggers a system 4078 * chunk allocation, then we can deadlock because we are holding the 4079 * chunk mutex and we may need to access that extent buffer or its parent 4080 * in order to add the chunk item or update a device item. 4081 * 4082 * Tasks that want to modify the chunk tree should reserve system space 4083 * before updating the chunk btree, by calling either 4084 * btrfs_reserve_chunk_metadata() or check_system_chunk(). 4085 * It's possible that after a task reserves the space, it still ends up 4086 * here - this happens in the cases described above at do_chunk_alloc(). 4087 * The task will have to either retry or fail. 4088 */ 4089 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 4090 return -ENOSPC; 4091 4092 space_info = btrfs_find_space_info(fs_info, flags); 4093 ASSERT(space_info); 4094 4095 do { 4096 spin_lock(&space_info->lock); 4097 if (force < space_info->force_alloc) 4098 force = space_info->force_alloc; 4099 should_alloc = should_alloc_chunk(fs_info, space_info, force); 4100 if (space_info->full) { 4101 /* No more free physical space */ 4102 if (should_alloc) 4103 ret = -ENOSPC; 4104 else 4105 ret = 0; 4106 spin_unlock(&space_info->lock); 4107 return ret; 4108 } else if (!should_alloc) { 4109 spin_unlock(&space_info->lock); 4110 return 0; 4111 } else if (space_info->chunk_alloc) { 4112 /* 4113 * Someone is already allocating, so we need to block 4114 * until this someone is finished and then loop to 4115 * recheck if we should continue with our allocation 4116 * attempt. 4117 */ 4118 wait_for_alloc = true; 4119 force = CHUNK_ALLOC_NO_FORCE; 4120 spin_unlock(&space_info->lock); 4121 mutex_lock(&fs_info->chunk_mutex); 4122 mutex_unlock(&fs_info->chunk_mutex); 4123 } else { 4124 /* Proceed with allocation */ 4125 space_info->chunk_alloc = 1; 4126 wait_for_alloc = false; 4127 spin_unlock(&space_info->lock); 4128 } 4129 4130 cond_resched(); 4131 } while (wait_for_alloc); 4132 4133 mutex_lock(&fs_info->chunk_mutex); 4134 trans->allocating_chunk = true; 4135 4136 /* 4137 * If we have mixed data/metadata chunks we want to make sure we keep 4138 * allocating mixed chunks instead of individual chunks. 4139 */ 4140 if (btrfs_mixed_space_info(space_info)) 4141 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 4142 4143 /* 4144 * if we're doing a data chunk, go ahead and make sure that 4145 * we keep a reasonable number of metadata chunks allocated in the 4146 * FS as well. 4147 */ 4148 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 4149 fs_info->data_chunk_allocations++; 4150 if (!(fs_info->data_chunk_allocations % 4151 fs_info->metadata_ratio)) 4152 force_metadata_allocation(fs_info); 4153 } 4154 4155 ret_bg = do_chunk_alloc(trans, flags); 4156 trans->allocating_chunk = false; 4157 4158 if (IS_ERR(ret_bg)) { 4159 ret = PTR_ERR(ret_bg); 4160 } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) { 4161 /* 4162 * New block group is likely to be used soon. Try to activate 4163 * it now. Failure is OK for now. 4164 */ 4165 btrfs_zone_activate(ret_bg); 4166 } 4167 4168 if (!ret) 4169 btrfs_put_block_group(ret_bg); 4170 4171 spin_lock(&space_info->lock); 4172 if (ret < 0) { 4173 if (ret == -ENOSPC) 4174 space_info->full = 1; 4175 else 4176 goto out; 4177 } else { 4178 ret = 1; 4179 space_info->max_extent_size = 0; 4180 } 4181 4182 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 4183 out: 4184 space_info->chunk_alloc = 0; 4185 spin_unlock(&space_info->lock); 4186 mutex_unlock(&fs_info->chunk_mutex); 4187 4188 return ret; 4189 } 4190 4191 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) 4192 { 4193 u64 num_dev; 4194 4195 num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; 4196 if (!num_dev) 4197 num_dev = fs_info->fs_devices->rw_devices; 4198 4199 return num_dev; 4200 } 4201 4202 static void reserve_chunk_space(struct btrfs_trans_handle *trans, 4203 u64 bytes, 4204 u64 type) 4205 { 4206 struct btrfs_fs_info *fs_info = trans->fs_info; 4207 struct btrfs_space_info *info; 4208 u64 left; 4209 int ret = 0; 4210 4211 /* 4212 * Needed because we can end up allocating a system chunk and for an 4213 * atomic and race free space reservation in the chunk block reserve. 4214 */ 4215 lockdep_assert_held(&fs_info->chunk_mutex); 4216 4217 info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4218 spin_lock(&info->lock); 4219 left = info->total_bytes - btrfs_space_info_used(info, true); 4220 spin_unlock(&info->lock); 4221 4222 if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4223 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", 4224 left, bytes, type); 4225 btrfs_dump_space_info(fs_info, info, 0, 0); 4226 } 4227 4228 if (left < bytes) { 4229 u64 flags = btrfs_system_alloc_profile(fs_info); 4230 struct btrfs_block_group *bg; 4231 4232 /* 4233 * Ignore failure to create system chunk. We might end up not 4234 * needing it, as we might not need to COW all nodes/leafs from 4235 * the paths we visit in the chunk tree (they were already COWed 4236 * or created in the current transaction for example). 4237 */ 4238 bg = btrfs_create_chunk(trans, flags); 4239 if (IS_ERR(bg)) { 4240 ret = PTR_ERR(bg); 4241 } else { 4242 /* 4243 * We have a new chunk. We also need to activate it for 4244 * zoned filesystem. 4245 */ 4246 ret = btrfs_zoned_activate_one_bg(fs_info, info, true); 4247 if (ret < 0) 4248 return; 4249 4250 /* 4251 * If we fail to add the chunk item here, we end up 4252 * trying again at phase 2 of chunk allocation, at 4253 * btrfs_create_pending_block_groups(). So ignore 4254 * any error here. An ENOSPC here could happen, due to 4255 * the cases described at do_chunk_alloc() - the system 4256 * block group we just created was just turned into RO 4257 * mode by a scrub for example, or a running discard 4258 * temporarily removed its free space entries, etc. 4259 */ 4260 btrfs_chunk_alloc_add_chunk_item(trans, bg); 4261 } 4262 } 4263 4264 if (!ret) { 4265 ret = btrfs_block_rsv_add(fs_info, 4266 &fs_info->chunk_block_rsv, 4267 bytes, BTRFS_RESERVE_NO_FLUSH); 4268 if (!ret) 4269 trans->chunk_bytes_reserved += bytes; 4270 } 4271 } 4272 4273 /* 4274 * Reserve space in the system space for allocating or removing a chunk. 4275 * The caller must be holding fs_info->chunk_mutex. 4276 */ 4277 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) 4278 { 4279 struct btrfs_fs_info *fs_info = trans->fs_info; 4280 const u64 num_devs = get_profile_num_devs(fs_info, type); 4281 u64 bytes; 4282 4283 /* num_devs device items to update and 1 chunk item to add or remove. */ 4284 bytes = btrfs_calc_metadata_size(fs_info, num_devs) + 4285 btrfs_calc_insert_metadata_size(fs_info, 1); 4286 4287 reserve_chunk_space(trans, bytes, type); 4288 } 4289 4290 /* 4291 * Reserve space in the system space, if needed, for doing a modification to the 4292 * chunk btree. 4293 * 4294 * @trans: A transaction handle. 4295 * @is_item_insertion: Indicate if the modification is for inserting a new item 4296 * in the chunk btree or if it's for the deletion or update 4297 * of an existing item. 4298 * 4299 * This is used in a context where we need to update the chunk btree outside 4300 * block group allocation and removal, to avoid a deadlock with a concurrent 4301 * task that is allocating a metadata or data block group and therefore needs to 4302 * update the chunk btree while holding the chunk mutex. After the update to the 4303 * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. 4304 * 4305 */ 4306 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, 4307 bool is_item_insertion) 4308 { 4309 struct btrfs_fs_info *fs_info = trans->fs_info; 4310 u64 bytes; 4311 4312 if (is_item_insertion) 4313 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); 4314 else 4315 bytes = btrfs_calc_metadata_size(fs_info, 1); 4316 4317 mutex_lock(&fs_info->chunk_mutex); 4318 reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); 4319 mutex_unlock(&fs_info->chunk_mutex); 4320 } 4321 4322 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 4323 { 4324 struct btrfs_block_group *block_group; 4325 4326 block_group = btrfs_lookup_first_block_group(info, 0); 4327 while (block_group) { 4328 btrfs_wait_block_group_cache_done(block_group); 4329 spin_lock(&block_group->lock); 4330 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, 4331 &block_group->runtime_flags)) { 4332 struct inode *inode = block_group->inode; 4333 4334 block_group->inode = NULL; 4335 spin_unlock(&block_group->lock); 4336 4337 ASSERT(block_group->io_ctl.inode == NULL); 4338 iput(inode); 4339 } else { 4340 spin_unlock(&block_group->lock); 4341 } 4342 block_group = btrfs_next_block_group(block_group); 4343 } 4344 } 4345 4346 /* 4347 * Must be called only after stopping all workers, since we could have block 4348 * group caching kthreads running, and therefore they could race with us if we 4349 * freed the block groups before stopping them. 4350 */ 4351 int btrfs_free_block_groups(struct btrfs_fs_info *info) 4352 { 4353 struct btrfs_block_group *block_group; 4354 struct btrfs_space_info *space_info; 4355 struct btrfs_caching_control *caching_ctl; 4356 struct rb_node *n; 4357 4358 if (btrfs_is_zoned(info)) { 4359 if (info->active_meta_bg) { 4360 btrfs_put_block_group(info->active_meta_bg); 4361 info->active_meta_bg = NULL; 4362 } 4363 if (info->active_system_bg) { 4364 btrfs_put_block_group(info->active_system_bg); 4365 info->active_system_bg = NULL; 4366 } 4367 } 4368 4369 write_lock(&info->block_group_cache_lock); 4370 while (!list_empty(&info->caching_block_groups)) { 4371 caching_ctl = list_entry(info->caching_block_groups.next, 4372 struct btrfs_caching_control, list); 4373 list_del(&caching_ctl->list); 4374 btrfs_put_caching_control(caching_ctl); 4375 } 4376 write_unlock(&info->block_group_cache_lock); 4377 4378 spin_lock(&info->unused_bgs_lock); 4379 while (!list_empty(&info->unused_bgs)) { 4380 block_group = list_first_entry(&info->unused_bgs, 4381 struct btrfs_block_group, 4382 bg_list); 4383 list_del_init(&block_group->bg_list); 4384 btrfs_put_block_group(block_group); 4385 } 4386 4387 while (!list_empty(&info->reclaim_bgs)) { 4388 block_group = list_first_entry(&info->reclaim_bgs, 4389 struct btrfs_block_group, 4390 bg_list); 4391 list_del_init(&block_group->bg_list); 4392 btrfs_put_block_group(block_group); 4393 } 4394 spin_unlock(&info->unused_bgs_lock); 4395 4396 spin_lock(&info->zone_active_bgs_lock); 4397 while (!list_empty(&info->zone_active_bgs)) { 4398 block_group = list_first_entry(&info->zone_active_bgs, 4399 struct btrfs_block_group, 4400 active_bg_list); 4401 list_del_init(&block_group->active_bg_list); 4402 btrfs_put_block_group(block_group); 4403 } 4404 spin_unlock(&info->zone_active_bgs_lock); 4405 4406 write_lock(&info->block_group_cache_lock); 4407 while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { 4408 block_group = rb_entry(n, struct btrfs_block_group, 4409 cache_node); 4410 rb_erase_cached(&block_group->cache_node, 4411 &info->block_group_cache_tree); 4412 RB_CLEAR_NODE(&block_group->cache_node); 4413 write_unlock(&info->block_group_cache_lock); 4414 4415 down_write(&block_group->space_info->groups_sem); 4416 list_del(&block_group->list); 4417 up_write(&block_group->space_info->groups_sem); 4418 4419 /* 4420 * We haven't cached this block group, which means we could 4421 * possibly have excluded extents on this block group. 4422 */ 4423 if (block_group->cached == BTRFS_CACHE_NO || 4424 block_group->cached == BTRFS_CACHE_ERROR) 4425 btrfs_free_excluded_extents(block_group); 4426 4427 btrfs_remove_free_space_cache(block_group); 4428 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); 4429 ASSERT(list_empty(&block_group->dirty_list)); 4430 ASSERT(list_empty(&block_group->io_list)); 4431 ASSERT(list_empty(&block_group->bg_list)); 4432 ASSERT(refcount_read(&block_group->refs) == 1); 4433 ASSERT(block_group->swap_extents == 0); 4434 btrfs_put_block_group(block_group); 4435 4436 write_lock(&info->block_group_cache_lock); 4437 } 4438 write_unlock(&info->block_group_cache_lock); 4439 4440 btrfs_release_global_block_rsv(info); 4441 4442 while (!list_empty(&info->space_info)) { 4443 space_info = list_entry(info->space_info.next, 4444 struct btrfs_space_info, 4445 list); 4446 4447 /* 4448 * Do not hide this behind enospc_debug, this is actually 4449 * important and indicates a real bug if this happens. 4450 */ 4451 if (WARN_ON(space_info->bytes_pinned > 0 || 4452 space_info->bytes_may_use > 0)) 4453 btrfs_dump_space_info(info, space_info, 0, 0); 4454 4455 /* 4456 * If there was a failure to cleanup a log tree, very likely due 4457 * to an IO failure on a writeback attempt of one or more of its 4458 * extent buffers, we could not do proper (and cheap) unaccounting 4459 * of their reserved space, so don't warn on bytes_reserved > 0 in 4460 * that case. 4461 */ 4462 if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || 4463 !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { 4464 if (WARN_ON(space_info->bytes_reserved > 0)) 4465 btrfs_dump_space_info(info, space_info, 0, 0); 4466 } 4467 4468 WARN_ON(space_info->reclaim_size > 0); 4469 list_del(&space_info->list); 4470 btrfs_sysfs_remove_space_info(space_info); 4471 } 4472 return 0; 4473 } 4474 4475 void btrfs_freeze_block_group(struct btrfs_block_group *cache) 4476 { 4477 atomic_inc(&cache->frozen); 4478 } 4479 4480 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) 4481 { 4482 struct btrfs_fs_info *fs_info = block_group->fs_info; 4483 bool cleanup; 4484 4485 spin_lock(&block_group->lock); 4486 cleanup = (atomic_dec_and_test(&block_group->frozen) && 4487 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); 4488 spin_unlock(&block_group->lock); 4489 4490 if (cleanup) { 4491 struct btrfs_chunk_map *map; 4492 4493 map = btrfs_find_chunk_map(fs_info, block_group->start, 1); 4494 /* Logic error, can't happen. */ 4495 ASSERT(map); 4496 4497 btrfs_remove_chunk_map(fs_info, map); 4498 4499 /* Once for our lookup reference. */ 4500 btrfs_free_chunk_map(map); 4501 4502 /* 4503 * We may have left one free space entry and other possible 4504 * tasks trimming this block group have left 1 entry each one. 4505 * Free them if any. 4506 */ 4507 btrfs_remove_free_space_cache(block_group); 4508 } 4509 } 4510 4511 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) 4512 { 4513 bool ret = true; 4514 4515 spin_lock(&bg->lock); 4516 if (bg->ro) 4517 ret = false; 4518 else 4519 bg->swap_extents++; 4520 spin_unlock(&bg->lock); 4521 4522 return ret; 4523 } 4524 4525 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) 4526 { 4527 spin_lock(&bg->lock); 4528 ASSERT(!bg->ro); 4529 ASSERT(bg->swap_extents >= amount); 4530 bg->swap_extents -= amount; 4531 spin_unlock(&bg->lock); 4532 } 4533 4534 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) 4535 { 4536 if (size <= SZ_128K) 4537 return BTRFS_BG_SZ_SMALL; 4538 if (size <= SZ_8M) 4539 return BTRFS_BG_SZ_MEDIUM; 4540 return BTRFS_BG_SZ_LARGE; 4541 } 4542 4543 /* 4544 * Handle a block group allocating an extent in a size class 4545 * 4546 * @bg: The block group we allocated in. 4547 * @size_class: The size class of the allocation. 4548 * @force_wrong_size_class: Whether we are desperate enough to allow 4549 * mismatched size classes. 4550 * 4551 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the 4552 * case of a race that leads to the wrong size class without 4553 * force_wrong_size_class set. 4554 * 4555 * find_free_extent will skip block groups with a mismatched size class until 4556 * it really needs to avoid ENOSPC. In that case it will set 4557 * force_wrong_size_class. However, if a block group is newly allocated and 4558 * doesn't yet have a size class, then it is possible for two allocations of 4559 * different sizes to race and both try to use it. The loser is caught here and 4560 * has to retry. 4561 */ 4562 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, 4563 enum btrfs_block_group_size_class size_class, 4564 bool force_wrong_size_class) 4565 { 4566 ASSERT(size_class != BTRFS_BG_SZ_NONE); 4567 4568 /* The new allocation is in the right size class, do nothing */ 4569 if (bg->size_class == size_class) 4570 return 0; 4571 /* 4572 * The new allocation is in a mismatched size class. 4573 * This means one of two things: 4574 * 4575 * 1. Two tasks in find_free_extent for different size_classes raced 4576 * and hit the same empty block_group. Make the loser try again. 4577 * 2. A call to find_free_extent got desperate enough to set 4578 * 'force_wrong_slab'. Don't change the size_class, but allow the 4579 * allocation. 4580 */ 4581 if (bg->size_class != BTRFS_BG_SZ_NONE) { 4582 if (force_wrong_size_class) 4583 return 0; 4584 return -EAGAIN; 4585 } 4586 /* 4587 * The happy new block group case: the new allocation is the first 4588 * one in the block_group so we set size_class. 4589 */ 4590 bg->size_class = size_class; 4591 4592 return 0; 4593 } 4594 4595 bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) 4596 { 4597 if (btrfs_is_zoned(bg->fs_info)) 4598 return false; 4599 if (!btrfs_is_block_group_data_only(bg)) 4600 return false; 4601 return true; 4602 } 4603