1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/sched/signal.h> 8 #include <linux/pagemap.h> 9 #include <linux/writeback.h> 10 #include <linux/blkdev.h> 11 #include <linux/sort.h> 12 #include <linux/rcupdate.h> 13 #include <linux/kthread.h> 14 #include <linux/slab.h> 15 #include <linux/ratelimit.h> 16 #include <linux/percpu_counter.h> 17 #include <linux/lockdep.h> 18 #include <linux/crc32c.h> 19 #include "misc.h" 20 #include "tree-log.h" 21 #include "disk-io.h" 22 #include "print-tree.h" 23 #include "volumes.h" 24 #include "raid56.h" 25 #include "locking.h" 26 #include "free-space-cache.h" 27 #include "free-space-tree.h" 28 #include "sysfs.h" 29 #include "qgroup.h" 30 #include "ref-verify.h" 31 #include "space-info.h" 32 #include "block-rsv.h" 33 #include "delalloc-space.h" 34 #include "block-group.h" 35 #include "discard.h" 36 #include "rcu-string.h" 37 #include "zoned.h" 38 #include "dev-replace.h" 39 #include "fs.h" 40 #include "accessors.h" 41 #include "extent-tree.h" 42 #include "root-tree.h" 43 #include "file-item.h" 44 #include "orphan.h" 45 #include "tree-checker.h" 46 47 #undef SCRAMBLE_DELAYED_REFS 48 49 50 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 51 struct btrfs_delayed_ref_node *node, u64 parent, 52 u64 root_objectid, u64 owner_objectid, 53 u64 owner_offset, int refs_to_drop, 54 struct btrfs_delayed_extent_op *extra_op); 55 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 56 struct extent_buffer *leaf, 57 struct btrfs_extent_item *ei); 58 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 59 u64 parent, u64 root_objectid, 60 u64 flags, u64 owner, u64 offset, 61 struct btrfs_key *ins, int ref_mod); 62 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 63 struct btrfs_delayed_ref_node *node, 64 struct btrfs_delayed_extent_op *extent_op); 65 static int find_next_key(struct btrfs_path *path, int level, 66 struct btrfs_key *key); 67 68 static int block_group_bits(struct btrfs_block_group *cache, u64 bits) 69 { 70 return (cache->flags & bits) == bits; 71 } 72 73 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, 74 u64 start, u64 num_bytes) 75 { 76 u64 end = start + num_bytes - 1; 77 set_extent_bits(&fs_info->excluded_extents, start, end, 78 EXTENT_UPTODATE); 79 return 0; 80 } 81 82 void btrfs_free_excluded_extents(struct btrfs_block_group *cache) 83 { 84 struct btrfs_fs_info *fs_info = cache->fs_info; 85 u64 start, end; 86 87 start = cache->start; 88 end = start + cache->length - 1; 89 90 clear_extent_bits(&fs_info->excluded_extents, start, end, 91 EXTENT_UPTODATE); 92 } 93 94 /* simple helper to search for an existing data extent at a given offset */ 95 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) 96 { 97 struct btrfs_root *root = btrfs_extent_root(fs_info, start); 98 int ret; 99 struct btrfs_key key; 100 struct btrfs_path *path; 101 102 path = btrfs_alloc_path(); 103 if (!path) 104 return -ENOMEM; 105 106 key.objectid = start; 107 key.offset = len; 108 key.type = BTRFS_EXTENT_ITEM_KEY; 109 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 110 btrfs_free_path(path); 111 return ret; 112 } 113 114 /* 115 * helper function to lookup reference count and flags of a tree block. 116 * 117 * the head node for delayed ref is used to store the sum of all the 118 * reference count modifications queued up in the rbtree. the head 119 * node may also store the extent flags to set. This way you can check 120 * to see what the reference count and extent flags would be if all of 121 * the delayed refs are not processed. 122 */ 123 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 124 struct btrfs_fs_info *fs_info, u64 bytenr, 125 u64 offset, int metadata, u64 *refs, u64 *flags) 126 { 127 struct btrfs_root *extent_root; 128 struct btrfs_delayed_ref_head *head; 129 struct btrfs_delayed_ref_root *delayed_refs; 130 struct btrfs_path *path; 131 struct btrfs_extent_item *ei; 132 struct extent_buffer *leaf; 133 struct btrfs_key key; 134 u32 item_size; 135 u64 num_refs; 136 u64 extent_flags; 137 int ret; 138 139 /* 140 * If we don't have skinny metadata, don't bother doing anything 141 * different 142 */ 143 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { 144 offset = fs_info->nodesize; 145 metadata = 0; 146 } 147 148 path = btrfs_alloc_path(); 149 if (!path) 150 return -ENOMEM; 151 152 if (!trans) { 153 path->skip_locking = 1; 154 path->search_commit_root = 1; 155 } 156 157 search_again: 158 key.objectid = bytenr; 159 key.offset = offset; 160 if (metadata) 161 key.type = BTRFS_METADATA_ITEM_KEY; 162 else 163 key.type = BTRFS_EXTENT_ITEM_KEY; 164 165 extent_root = btrfs_extent_root(fs_info, bytenr); 166 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 167 if (ret < 0) 168 goto out_free; 169 170 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 171 if (path->slots[0]) { 172 path->slots[0]--; 173 btrfs_item_key_to_cpu(path->nodes[0], &key, 174 path->slots[0]); 175 if (key.objectid == bytenr && 176 key.type == BTRFS_EXTENT_ITEM_KEY && 177 key.offset == fs_info->nodesize) 178 ret = 0; 179 } 180 } 181 182 if (ret == 0) { 183 leaf = path->nodes[0]; 184 item_size = btrfs_item_size(leaf, path->slots[0]); 185 if (item_size >= sizeof(*ei)) { 186 ei = btrfs_item_ptr(leaf, path->slots[0], 187 struct btrfs_extent_item); 188 num_refs = btrfs_extent_refs(leaf, ei); 189 extent_flags = btrfs_extent_flags(leaf, ei); 190 } else { 191 ret = -EINVAL; 192 btrfs_print_v0_err(fs_info); 193 if (trans) 194 btrfs_abort_transaction(trans, ret); 195 else 196 btrfs_handle_fs_error(fs_info, ret, NULL); 197 198 goto out_free; 199 } 200 201 BUG_ON(num_refs == 0); 202 } else { 203 num_refs = 0; 204 extent_flags = 0; 205 ret = 0; 206 } 207 208 if (!trans) 209 goto out; 210 211 delayed_refs = &trans->transaction->delayed_refs; 212 spin_lock(&delayed_refs->lock); 213 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 214 if (head) { 215 if (!mutex_trylock(&head->mutex)) { 216 refcount_inc(&head->refs); 217 spin_unlock(&delayed_refs->lock); 218 219 btrfs_release_path(path); 220 221 /* 222 * Mutex was contended, block until it's released and try 223 * again 224 */ 225 mutex_lock(&head->mutex); 226 mutex_unlock(&head->mutex); 227 btrfs_put_delayed_ref_head(head); 228 goto search_again; 229 } 230 spin_lock(&head->lock); 231 if (head->extent_op && head->extent_op->update_flags) 232 extent_flags |= head->extent_op->flags_to_set; 233 else 234 BUG_ON(num_refs == 0); 235 236 num_refs += head->ref_mod; 237 spin_unlock(&head->lock); 238 mutex_unlock(&head->mutex); 239 } 240 spin_unlock(&delayed_refs->lock); 241 out: 242 WARN_ON(num_refs == 0); 243 if (refs) 244 *refs = num_refs; 245 if (flags) 246 *flags = extent_flags; 247 out_free: 248 btrfs_free_path(path); 249 return ret; 250 } 251 252 /* 253 * Back reference rules. Back refs have three main goals: 254 * 255 * 1) differentiate between all holders of references to an extent so that 256 * when a reference is dropped we can make sure it was a valid reference 257 * before freeing the extent. 258 * 259 * 2) Provide enough information to quickly find the holders of an extent 260 * if we notice a given block is corrupted or bad. 261 * 262 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 263 * maintenance. This is actually the same as #2, but with a slightly 264 * different use case. 265 * 266 * There are two kinds of back refs. The implicit back refs is optimized 267 * for pointers in non-shared tree blocks. For a given pointer in a block, 268 * back refs of this kind provide information about the block's owner tree 269 * and the pointer's key. These information allow us to find the block by 270 * b-tree searching. The full back refs is for pointers in tree blocks not 271 * referenced by their owner trees. The location of tree block is recorded 272 * in the back refs. Actually the full back refs is generic, and can be 273 * used in all cases the implicit back refs is used. The major shortcoming 274 * of the full back refs is its overhead. Every time a tree block gets 275 * COWed, we have to update back refs entry for all pointers in it. 276 * 277 * For a newly allocated tree block, we use implicit back refs for 278 * pointers in it. This means most tree related operations only involve 279 * implicit back refs. For a tree block created in old transaction, the 280 * only way to drop a reference to it is COW it. So we can detect the 281 * event that tree block loses its owner tree's reference and do the 282 * back refs conversion. 283 * 284 * When a tree block is COWed through a tree, there are four cases: 285 * 286 * The reference count of the block is one and the tree is the block's 287 * owner tree. Nothing to do in this case. 288 * 289 * The reference count of the block is one and the tree is not the 290 * block's owner tree. In this case, full back refs is used for pointers 291 * in the block. Remove these full back refs, add implicit back refs for 292 * every pointers in the new block. 293 * 294 * The reference count of the block is greater than one and the tree is 295 * the block's owner tree. In this case, implicit back refs is used for 296 * pointers in the block. Add full back refs for every pointers in the 297 * block, increase lower level extents' reference counts. The original 298 * implicit back refs are entailed to the new block. 299 * 300 * The reference count of the block is greater than one and the tree is 301 * not the block's owner tree. Add implicit back refs for every pointer in 302 * the new block, increase lower level extents' reference count. 303 * 304 * Back Reference Key composing: 305 * 306 * The key objectid corresponds to the first byte in the extent, 307 * The key type is used to differentiate between types of back refs. 308 * There are different meanings of the key offset for different types 309 * of back refs. 310 * 311 * File extents can be referenced by: 312 * 313 * - multiple snapshots, subvolumes, or different generations in one subvol 314 * - different files inside a single subvolume 315 * - different offsets inside a file (bookend extents in file.c) 316 * 317 * The extent ref structure for the implicit back refs has fields for: 318 * 319 * - Objectid of the subvolume root 320 * - objectid of the file holding the reference 321 * - original offset in the file 322 * - how many bookend extents 323 * 324 * The key offset for the implicit back refs is hash of the first 325 * three fields. 326 * 327 * The extent ref structure for the full back refs has field for: 328 * 329 * - number of pointers in the tree leaf 330 * 331 * The key offset for the implicit back refs is the first byte of 332 * the tree leaf 333 * 334 * When a file extent is allocated, The implicit back refs is used. 335 * the fields are filled in: 336 * 337 * (root_key.objectid, inode objectid, offset in file, 1) 338 * 339 * When a file extent is removed file truncation, we find the 340 * corresponding implicit back refs and check the following fields: 341 * 342 * (btrfs_header_owner(leaf), inode objectid, offset in file) 343 * 344 * Btree extents can be referenced by: 345 * 346 * - Different subvolumes 347 * 348 * Both the implicit back refs and the full back refs for tree blocks 349 * only consist of key. The key offset for the implicit back refs is 350 * objectid of block's owner tree. The key offset for the full back refs 351 * is the first byte of parent block. 352 * 353 * When implicit back refs is used, information about the lowest key and 354 * level of the tree block are required. These information are stored in 355 * tree block info structure. 356 */ 357 358 /* 359 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, 360 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, 361 * is_data == BTRFS_REF_TYPE_ANY, either type is OK. 362 */ 363 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, 364 struct btrfs_extent_inline_ref *iref, 365 enum btrfs_inline_ref_type is_data) 366 { 367 int type = btrfs_extent_inline_ref_type(eb, iref); 368 u64 offset = btrfs_extent_inline_ref_offset(eb, iref); 369 370 if (type == BTRFS_TREE_BLOCK_REF_KEY || 371 type == BTRFS_SHARED_BLOCK_REF_KEY || 372 type == BTRFS_SHARED_DATA_REF_KEY || 373 type == BTRFS_EXTENT_DATA_REF_KEY) { 374 if (is_data == BTRFS_REF_TYPE_BLOCK) { 375 if (type == BTRFS_TREE_BLOCK_REF_KEY) 376 return type; 377 if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 378 ASSERT(eb->fs_info); 379 /* 380 * Every shared one has parent tree block, 381 * which must be aligned to sector size. 382 */ 383 if (offset && 384 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 385 return type; 386 } 387 } else if (is_data == BTRFS_REF_TYPE_DATA) { 388 if (type == BTRFS_EXTENT_DATA_REF_KEY) 389 return type; 390 if (type == BTRFS_SHARED_DATA_REF_KEY) { 391 ASSERT(eb->fs_info); 392 /* 393 * Every shared one has parent tree block, 394 * which must be aligned to sector size. 395 */ 396 if (offset && 397 IS_ALIGNED(offset, eb->fs_info->sectorsize)) 398 return type; 399 } 400 } else { 401 ASSERT(is_data == BTRFS_REF_TYPE_ANY); 402 return type; 403 } 404 } 405 406 btrfs_print_leaf((struct extent_buffer *)eb); 407 btrfs_err(eb->fs_info, 408 "eb %llu iref 0x%lx invalid extent inline ref type %d", 409 eb->start, (unsigned long)iref, type); 410 WARN_ON(1); 411 412 return BTRFS_REF_TYPE_INVALID; 413 } 414 415 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 416 { 417 u32 high_crc = ~(u32)0; 418 u32 low_crc = ~(u32)0; 419 __le64 lenum; 420 421 lenum = cpu_to_le64(root_objectid); 422 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); 423 lenum = cpu_to_le64(owner); 424 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 425 lenum = cpu_to_le64(offset); 426 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 427 428 return ((u64)high_crc << 31) ^ (u64)low_crc; 429 } 430 431 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 432 struct btrfs_extent_data_ref *ref) 433 { 434 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 435 btrfs_extent_data_ref_objectid(leaf, ref), 436 btrfs_extent_data_ref_offset(leaf, ref)); 437 } 438 439 static int match_extent_data_ref(struct extent_buffer *leaf, 440 struct btrfs_extent_data_ref *ref, 441 u64 root_objectid, u64 owner, u64 offset) 442 { 443 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 444 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 445 btrfs_extent_data_ref_offset(leaf, ref) != offset) 446 return 0; 447 return 1; 448 } 449 450 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 451 struct btrfs_path *path, 452 u64 bytenr, u64 parent, 453 u64 root_objectid, 454 u64 owner, u64 offset) 455 { 456 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 457 struct btrfs_key key; 458 struct btrfs_extent_data_ref *ref; 459 struct extent_buffer *leaf; 460 u32 nritems; 461 int ret; 462 int recow; 463 int err = -ENOENT; 464 465 key.objectid = bytenr; 466 if (parent) { 467 key.type = BTRFS_SHARED_DATA_REF_KEY; 468 key.offset = parent; 469 } else { 470 key.type = BTRFS_EXTENT_DATA_REF_KEY; 471 key.offset = hash_extent_data_ref(root_objectid, 472 owner, offset); 473 } 474 again: 475 recow = 0; 476 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 477 if (ret < 0) { 478 err = ret; 479 goto fail; 480 } 481 482 if (parent) { 483 if (!ret) 484 return 0; 485 goto fail; 486 } 487 488 leaf = path->nodes[0]; 489 nritems = btrfs_header_nritems(leaf); 490 while (1) { 491 if (path->slots[0] >= nritems) { 492 ret = btrfs_next_leaf(root, path); 493 if (ret < 0) 494 err = ret; 495 if (ret) 496 goto fail; 497 498 leaf = path->nodes[0]; 499 nritems = btrfs_header_nritems(leaf); 500 recow = 1; 501 } 502 503 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 504 if (key.objectid != bytenr || 505 key.type != BTRFS_EXTENT_DATA_REF_KEY) 506 goto fail; 507 508 ref = btrfs_item_ptr(leaf, path->slots[0], 509 struct btrfs_extent_data_ref); 510 511 if (match_extent_data_ref(leaf, ref, root_objectid, 512 owner, offset)) { 513 if (recow) { 514 btrfs_release_path(path); 515 goto again; 516 } 517 err = 0; 518 break; 519 } 520 path->slots[0]++; 521 } 522 fail: 523 return err; 524 } 525 526 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 527 struct btrfs_path *path, 528 u64 bytenr, u64 parent, 529 u64 root_objectid, u64 owner, 530 u64 offset, int refs_to_add) 531 { 532 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 533 struct btrfs_key key; 534 struct extent_buffer *leaf; 535 u32 size; 536 u32 num_refs; 537 int ret; 538 539 key.objectid = bytenr; 540 if (parent) { 541 key.type = BTRFS_SHARED_DATA_REF_KEY; 542 key.offset = parent; 543 size = sizeof(struct btrfs_shared_data_ref); 544 } else { 545 key.type = BTRFS_EXTENT_DATA_REF_KEY; 546 key.offset = hash_extent_data_ref(root_objectid, 547 owner, offset); 548 size = sizeof(struct btrfs_extent_data_ref); 549 } 550 551 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 552 if (ret && ret != -EEXIST) 553 goto fail; 554 555 leaf = path->nodes[0]; 556 if (parent) { 557 struct btrfs_shared_data_ref *ref; 558 ref = btrfs_item_ptr(leaf, path->slots[0], 559 struct btrfs_shared_data_ref); 560 if (ret == 0) { 561 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 562 } else { 563 num_refs = btrfs_shared_data_ref_count(leaf, ref); 564 num_refs += refs_to_add; 565 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 566 } 567 } else { 568 struct btrfs_extent_data_ref *ref; 569 while (ret == -EEXIST) { 570 ref = btrfs_item_ptr(leaf, path->slots[0], 571 struct btrfs_extent_data_ref); 572 if (match_extent_data_ref(leaf, ref, root_objectid, 573 owner, offset)) 574 break; 575 btrfs_release_path(path); 576 key.offset++; 577 ret = btrfs_insert_empty_item(trans, root, path, &key, 578 size); 579 if (ret && ret != -EEXIST) 580 goto fail; 581 582 leaf = path->nodes[0]; 583 } 584 ref = btrfs_item_ptr(leaf, path->slots[0], 585 struct btrfs_extent_data_ref); 586 if (ret == 0) { 587 btrfs_set_extent_data_ref_root(leaf, ref, 588 root_objectid); 589 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 590 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 591 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 592 } else { 593 num_refs = btrfs_extent_data_ref_count(leaf, ref); 594 num_refs += refs_to_add; 595 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 596 } 597 } 598 btrfs_mark_buffer_dirty(leaf); 599 ret = 0; 600 fail: 601 btrfs_release_path(path); 602 return ret; 603 } 604 605 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 606 struct btrfs_root *root, 607 struct btrfs_path *path, 608 int refs_to_drop) 609 { 610 struct btrfs_key key; 611 struct btrfs_extent_data_ref *ref1 = NULL; 612 struct btrfs_shared_data_ref *ref2 = NULL; 613 struct extent_buffer *leaf; 614 u32 num_refs = 0; 615 int ret = 0; 616 617 leaf = path->nodes[0]; 618 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 619 620 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 621 ref1 = btrfs_item_ptr(leaf, path->slots[0], 622 struct btrfs_extent_data_ref); 623 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 624 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 625 ref2 = btrfs_item_ptr(leaf, path->slots[0], 626 struct btrfs_shared_data_ref); 627 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 628 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { 629 btrfs_print_v0_err(trans->fs_info); 630 btrfs_abort_transaction(trans, -EINVAL); 631 return -EINVAL; 632 } else { 633 BUG(); 634 } 635 636 BUG_ON(num_refs < refs_to_drop); 637 num_refs -= refs_to_drop; 638 639 if (num_refs == 0) { 640 ret = btrfs_del_item(trans, root, path); 641 } else { 642 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 643 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 644 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 645 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 646 btrfs_mark_buffer_dirty(leaf); 647 } 648 return ret; 649 } 650 651 static noinline u32 extent_data_ref_count(struct btrfs_path *path, 652 struct btrfs_extent_inline_ref *iref) 653 { 654 struct btrfs_key key; 655 struct extent_buffer *leaf; 656 struct btrfs_extent_data_ref *ref1; 657 struct btrfs_shared_data_ref *ref2; 658 u32 num_refs = 0; 659 int type; 660 661 leaf = path->nodes[0]; 662 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 663 664 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 665 if (iref) { 666 /* 667 * If type is invalid, we should have bailed out earlier than 668 * this call. 669 */ 670 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 671 ASSERT(type != BTRFS_REF_TYPE_INVALID); 672 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 673 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 674 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 675 } else { 676 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 677 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 678 } 679 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 680 ref1 = btrfs_item_ptr(leaf, path->slots[0], 681 struct btrfs_extent_data_ref); 682 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 683 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 684 ref2 = btrfs_item_ptr(leaf, path->slots[0], 685 struct btrfs_shared_data_ref); 686 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 687 } else { 688 WARN_ON(1); 689 } 690 return num_refs; 691 } 692 693 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 694 struct btrfs_path *path, 695 u64 bytenr, u64 parent, 696 u64 root_objectid) 697 { 698 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 699 struct btrfs_key key; 700 int ret; 701 702 key.objectid = bytenr; 703 if (parent) { 704 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 705 key.offset = parent; 706 } else { 707 key.type = BTRFS_TREE_BLOCK_REF_KEY; 708 key.offset = root_objectid; 709 } 710 711 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 712 if (ret > 0) 713 ret = -ENOENT; 714 return ret; 715 } 716 717 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 718 struct btrfs_path *path, 719 u64 bytenr, u64 parent, 720 u64 root_objectid) 721 { 722 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 723 struct btrfs_key key; 724 int ret; 725 726 key.objectid = bytenr; 727 if (parent) { 728 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 729 key.offset = parent; 730 } else { 731 key.type = BTRFS_TREE_BLOCK_REF_KEY; 732 key.offset = root_objectid; 733 } 734 735 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 736 btrfs_release_path(path); 737 return ret; 738 } 739 740 static inline int extent_ref_type(u64 parent, u64 owner) 741 { 742 int type; 743 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 744 if (parent > 0) 745 type = BTRFS_SHARED_BLOCK_REF_KEY; 746 else 747 type = BTRFS_TREE_BLOCK_REF_KEY; 748 } else { 749 if (parent > 0) 750 type = BTRFS_SHARED_DATA_REF_KEY; 751 else 752 type = BTRFS_EXTENT_DATA_REF_KEY; 753 } 754 return type; 755 } 756 757 static int find_next_key(struct btrfs_path *path, int level, 758 struct btrfs_key *key) 759 760 { 761 for (; level < BTRFS_MAX_LEVEL; level++) { 762 if (!path->nodes[level]) 763 break; 764 if (path->slots[level] + 1 >= 765 btrfs_header_nritems(path->nodes[level])) 766 continue; 767 if (level == 0) 768 btrfs_item_key_to_cpu(path->nodes[level], key, 769 path->slots[level] + 1); 770 else 771 btrfs_node_key_to_cpu(path->nodes[level], key, 772 path->slots[level] + 1); 773 return 0; 774 } 775 return 1; 776 } 777 778 /* 779 * look for inline back ref. if back ref is found, *ref_ret is set 780 * to the address of inline back ref, and 0 is returned. 781 * 782 * if back ref isn't found, *ref_ret is set to the address where it 783 * should be inserted, and -ENOENT is returned. 784 * 785 * if insert is true and there are too many inline back refs, the path 786 * points to the extent item, and -EAGAIN is returned. 787 * 788 * NOTE: inline back refs are ordered in the same way that back ref 789 * items in the tree are ordered. 790 */ 791 static noinline_for_stack 792 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 793 struct btrfs_path *path, 794 struct btrfs_extent_inline_ref **ref_ret, 795 u64 bytenr, u64 num_bytes, 796 u64 parent, u64 root_objectid, 797 u64 owner, u64 offset, int insert) 798 { 799 struct btrfs_fs_info *fs_info = trans->fs_info; 800 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); 801 struct btrfs_key key; 802 struct extent_buffer *leaf; 803 struct btrfs_extent_item *ei; 804 struct btrfs_extent_inline_ref *iref; 805 u64 flags; 806 u64 item_size; 807 unsigned long ptr; 808 unsigned long end; 809 int extra_size; 810 int type; 811 int want; 812 int ret; 813 int err = 0; 814 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 815 int needed; 816 817 key.objectid = bytenr; 818 key.type = BTRFS_EXTENT_ITEM_KEY; 819 key.offset = num_bytes; 820 821 want = extent_ref_type(parent, owner); 822 if (insert) { 823 extra_size = btrfs_extent_inline_ref_size(want); 824 path->search_for_extension = 1; 825 path->keep_locks = 1; 826 } else 827 extra_size = -1; 828 829 /* 830 * Owner is our level, so we can just add one to get the level for the 831 * block we are interested in. 832 */ 833 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { 834 key.type = BTRFS_METADATA_ITEM_KEY; 835 key.offset = owner; 836 } 837 838 again: 839 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 840 if (ret < 0) { 841 err = ret; 842 goto out; 843 } 844 845 /* 846 * We may be a newly converted file system which still has the old fat 847 * extent entries for metadata, so try and see if we have one of those. 848 */ 849 if (ret > 0 && skinny_metadata) { 850 skinny_metadata = false; 851 if (path->slots[0]) { 852 path->slots[0]--; 853 btrfs_item_key_to_cpu(path->nodes[0], &key, 854 path->slots[0]); 855 if (key.objectid == bytenr && 856 key.type == BTRFS_EXTENT_ITEM_KEY && 857 key.offset == num_bytes) 858 ret = 0; 859 } 860 if (ret) { 861 key.objectid = bytenr; 862 key.type = BTRFS_EXTENT_ITEM_KEY; 863 key.offset = num_bytes; 864 btrfs_release_path(path); 865 goto again; 866 } 867 } 868 869 if (ret && !insert) { 870 err = -ENOENT; 871 goto out; 872 } else if (WARN_ON(ret)) { 873 err = -EIO; 874 goto out; 875 } 876 877 leaf = path->nodes[0]; 878 item_size = btrfs_item_size(leaf, path->slots[0]); 879 if (unlikely(item_size < sizeof(*ei))) { 880 err = -EINVAL; 881 btrfs_print_v0_err(fs_info); 882 btrfs_abort_transaction(trans, err); 883 goto out; 884 } 885 886 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 887 flags = btrfs_extent_flags(leaf, ei); 888 889 ptr = (unsigned long)(ei + 1); 890 end = (unsigned long)ei + item_size; 891 892 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { 893 ptr += sizeof(struct btrfs_tree_block_info); 894 BUG_ON(ptr > end); 895 } 896 897 if (owner >= BTRFS_FIRST_FREE_OBJECTID) 898 needed = BTRFS_REF_TYPE_DATA; 899 else 900 needed = BTRFS_REF_TYPE_BLOCK; 901 902 err = -ENOENT; 903 while (1) { 904 if (ptr >= end) { 905 if (ptr > end) { 906 err = -EUCLEAN; 907 btrfs_print_leaf(path->nodes[0]); 908 btrfs_crit(fs_info, 909 "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu", 910 path->slots[0], root_objectid, owner, offset, parent); 911 } 912 break; 913 } 914 iref = (struct btrfs_extent_inline_ref *)ptr; 915 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed); 916 if (type == BTRFS_REF_TYPE_INVALID) { 917 err = -EUCLEAN; 918 goto out; 919 } 920 921 if (want < type) 922 break; 923 if (want > type) { 924 ptr += btrfs_extent_inline_ref_size(type); 925 continue; 926 } 927 928 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 929 struct btrfs_extent_data_ref *dref; 930 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 931 if (match_extent_data_ref(leaf, dref, root_objectid, 932 owner, offset)) { 933 err = 0; 934 break; 935 } 936 if (hash_extent_data_ref_item(leaf, dref) < 937 hash_extent_data_ref(root_objectid, owner, offset)) 938 break; 939 } else { 940 u64 ref_offset; 941 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 942 if (parent > 0) { 943 if (parent == ref_offset) { 944 err = 0; 945 break; 946 } 947 if (ref_offset < parent) 948 break; 949 } else { 950 if (root_objectid == ref_offset) { 951 err = 0; 952 break; 953 } 954 if (ref_offset < root_objectid) 955 break; 956 } 957 } 958 ptr += btrfs_extent_inline_ref_size(type); 959 } 960 if (err == -ENOENT && insert) { 961 if (item_size + extra_size >= 962 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 963 err = -EAGAIN; 964 goto out; 965 } 966 /* 967 * To add new inline back ref, we have to make sure 968 * there is no corresponding back ref item. 969 * For simplicity, we just do not add new inline back 970 * ref if there is any kind of item for this block 971 */ 972 if (find_next_key(path, 0, &key) == 0 && 973 key.objectid == bytenr && 974 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 975 err = -EAGAIN; 976 goto out; 977 } 978 } 979 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 980 out: 981 if (insert) { 982 path->keep_locks = 0; 983 path->search_for_extension = 0; 984 btrfs_unlock_up_safe(path, 1); 985 } 986 return err; 987 } 988 989 /* 990 * helper to add new inline back ref 991 */ 992 static noinline_for_stack 993 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info, 994 struct btrfs_path *path, 995 struct btrfs_extent_inline_ref *iref, 996 u64 parent, u64 root_objectid, 997 u64 owner, u64 offset, int refs_to_add, 998 struct btrfs_delayed_extent_op *extent_op) 999 { 1000 struct extent_buffer *leaf; 1001 struct btrfs_extent_item *ei; 1002 unsigned long ptr; 1003 unsigned long end; 1004 unsigned long item_offset; 1005 u64 refs; 1006 int size; 1007 int type; 1008 1009 leaf = path->nodes[0]; 1010 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1011 item_offset = (unsigned long)iref - (unsigned long)ei; 1012 1013 type = extent_ref_type(parent, owner); 1014 size = btrfs_extent_inline_ref_size(type); 1015 1016 btrfs_extend_item(path, size); 1017 1018 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1019 refs = btrfs_extent_refs(leaf, ei); 1020 refs += refs_to_add; 1021 btrfs_set_extent_refs(leaf, ei, refs); 1022 if (extent_op) 1023 __run_delayed_extent_op(extent_op, leaf, ei); 1024 1025 ptr = (unsigned long)ei + item_offset; 1026 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); 1027 if (ptr < end - size) 1028 memmove_extent_buffer(leaf, ptr + size, ptr, 1029 end - size - ptr); 1030 1031 iref = (struct btrfs_extent_inline_ref *)ptr; 1032 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1033 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1034 struct btrfs_extent_data_ref *dref; 1035 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1036 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1037 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1038 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1039 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1040 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1041 struct btrfs_shared_data_ref *sref; 1042 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1043 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1044 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1045 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1046 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1047 } else { 1048 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1049 } 1050 btrfs_mark_buffer_dirty(leaf); 1051 } 1052 1053 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1054 struct btrfs_path *path, 1055 struct btrfs_extent_inline_ref **ref_ret, 1056 u64 bytenr, u64 num_bytes, u64 parent, 1057 u64 root_objectid, u64 owner, u64 offset) 1058 { 1059 int ret; 1060 1061 ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, 1062 num_bytes, parent, root_objectid, 1063 owner, offset, 0); 1064 if (ret != -ENOENT) 1065 return ret; 1066 1067 btrfs_release_path(path); 1068 *ref_ret = NULL; 1069 1070 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1071 ret = lookup_tree_block_ref(trans, path, bytenr, parent, 1072 root_objectid); 1073 } else { 1074 ret = lookup_extent_data_ref(trans, path, bytenr, parent, 1075 root_objectid, owner, offset); 1076 } 1077 return ret; 1078 } 1079 1080 /* 1081 * helper to update/remove inline back ref 1082 */ 1083 static noinline_for_stack 1084 void update_inline_extent_backref(struct btrfs_path *path, 1085 struct btrfs_extent_inline_ref *iref, 1086 int refs_to_mod, 1087 struct btrfs_delayed_extent_op *extent_op) 1088 { 1089 struct extent_buffer *leaf = path->nodes[0]; 1090 struct btrfs_extent_item *ei; 1091 struct btrfs_extent_data_ref *dref = NULL; 1092 struct btrfs_shared_data_ref *sref = NULL; 1093 unsigned long ptr; 1094 unsigned long end; 1095 u32 item_size; 1096 int size; 1097 int type; 1098 u64 refs; 1099 1100 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1101 refs = btrfs_extent_refs(leaf, ei); 1102 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); 1103 refs += refs_to_mod; 1104 btrfs_set_extent_refs(leaf, ei, refs); 1105 if (extent_op) 1106 __run_delayed_extent_op(extent_op, leaf, ei); 1107 1108 /* 1109 * If type is invalid, we should have bailed out after 1110 * lookup_inline_extent_backref(). 1111 */ 1112 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); 1113 ASSERT(type != BTRFS_REF_TYPE_INVALID); 1114 1115 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1116 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1117 refs = btrfs_extent_data_ref_count(leaf, dref); 1118 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1119 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1120 refs = btrfs_shared_data_ref_count(leaf, sref); 1121 } else { 1122 refs = 1; 1123 BUG_ON(refs_to_mod != -1); 1124 } 1125 1126 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); 1127 refs += refs_to_mod; 1128 1129 if (refs > 0) { 1130 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1131 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1132 else 1133 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1134 } else { 1135 size = btrfs_extent_inline_ref_size(type); 1136 item_size = btrfs_item_size(leaf, path->slots[0]); 1137 ptr = (unsigned long)iref; 1138 end = (unsigned long)ei + item_size; 1139 if (ptr + size < end) 1140 memmove_extent_buffer(leaf, ptr, ptr + size, 1141 end - ptr - size); 1142 item_size -= size; 1143 btrfs_truncate_item(path, item_size, 1); 1144 } 1145 btrfs_mark_buffer_dirty(leaf); 1146 } 1147 1148 static noinline_for_stack 1149 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1150 struct btrfs_path *path, 1151 u64 bytenr, u64 num_bytes, u64 parent, 1152 u64 root_objectid, u64 owner, 1153 u64 offset, int refs_to_add, 1154 struct btrfs_delayed_extent_op *extent_op) 1155 { 1156 struct btrfs_extent_inline_ref *iref; 1157 int ret; 1158 1159 ret = lookup_inline_extent_backref(trans, path, &iref, bytenr, 1160 num_bytes, parent, root_objectid, 1161 owner, offset, 1); 1162 if (ret == 0) { 1163 /* 1164 * We're adding refs to a tree block we already own, this 1165 * should not happen at all. 1166 */ 1167 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1168 btrfs_crit(trans->fs_info, 1169 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu", 1170 bytenr, num_bytes, root_objectid); 1171 if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) { 1172 WARN_ON(1); 1173 btrfs_crit(trans->fs_info, 1174 "path->slots[0]=%d path->nodes[0]:", path->slots[0]); 1175 btrfs_print_leaf(path->nodes[0]); 1176 } 1177 return -EUCLEAN; 1178 } 1179 update_inline_extent_backref(path, iref, refs_to_add, extent_op); 1180 } else if (ret == -ENOENT) { 1181 setup_inline_extent_backref(trans->fs_info, path, iref, parent, 1182 root_objectid, owner, offset, 1183 refs_to_add, extent_op); 1184 ret = 0; 1185 } 1186 return ret; 1187 } 1188 1189 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1190 struct btrfs_root *root, 1191 struct btrfs_path *path, 1192 struct btrfs_extent_inline_ref *iref, 1193 int refs_to_drop, int is_data) 1194 { 1195 int ret = 0; 1196 1197 BUG_ON(!is_data && refs_to_drop != 1); 1198 if (iref) 1199 update_inline_extent_backref(path, iref, -refs_to_drop, NULL); 1200 else if (is_data) 1201 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1202 else 1203 ret = btrfs_del_item(trans, root, path); 1204 return ret; 1205 } 1206 1207 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, 1208 u64 *discarded_bytes) 1209 { 1210 int j, ret = 0; 1211 u64 bytes_left, end; 1212 u64 aligned_start = ALIGN(start, 1 << 9); 1213 1214 if (WARN_ON(start != aligned_start)) { 1215 len -= aligned_start - start; 1216 len = round_down(len, 1 << 9); 1217 start = aligned_start; 1218 } 1219 1220 *discarded_bytes = 0; 1221 1222 if (!len) 1223 return 0; 1224 1225 end = start + len; 1226 bytes_left = len; 1227 1228 /* Skip any superblocks on this device. */ 1229 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { 1230 u64 sb_start = btrfs_sb_offset(j); 1231 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; 1232 u64 size = sb_start - start; 1233 1234 if (!in_range(sb_start, start, bytes_left) && 1235 !in_range(sb_end, start, bytes_left) && 1236 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) 1237 continue; 1238 1239 /* 1240 * Superblock spans beginning of range. Adjust start and 1241 * try again. 1242 */ 1243 if (sb_start <= start) { 1244 start += sb_end - start; 1245 if (start > end) { 1246 bytes_left = 0; 1247 break; 1248 } 1249 bytes_left = end - start; 1250 continue; 1251 } 1252 1253 if (size) { 1254 ret = blkdev_issue_discard(bdev, start >> 9, size >> 9, 1255 GFP_NOFS); 1256 if (!ret) 1257 *discarded_bytes += size; 1258 else if (ret != -EOPNOTSUPP) 1259 return ret; 1260 } 1261 1262 start = sb_end; 1263 if (start > end) { 1264 bytes_left = 0; 1265 break; 1266 } 1267 bytes_left = end - start; 1268 } 1269 1270 if (bytes_left) { 1271 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9, 1272 GFP_NOFS); 1273 if (!ret) 1274 *discarded_bytes += bytes_left; 1275 } 1276 return ret; 1277 } 1278 1279 static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes) 1280 { 1281 struct btrfs_device *dev = stripe->dev; 1282 struct btrfs_fs_info *fs_info = dev->fs_info; 1283 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 1284 u64 phys = stripe->physical; 1285 u64 len = stripe->length; 1286 u64 discarded = 0; 1287 int ret = 0; 1288 1289 /* Zone reset on a zoned filesystem */ 1290 if (btrfs_can_zone_reset(dev, phys, len)) { 1291 u64 src_disc; 1292 1293 ret = btrfs_reset_device_zone(dev, phys, len, &discarded); 1294 if (ret) 1295 goto out; 1296 1297 if (!btrfs_dev_replace_is_ongoing(dev_replace) || 1298 dev != dev_replace->srcdev) 1299 goto out; 1300 1301 src_disc = discarded; 1302 1303 /* Send to replace target as well */ 1304 ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, 1305 &discarded); 1306 discarded += src_disc; 1307 } else if (bdev_max_discard_sectors(stripe->dev->bdev)) { 1308 ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); 1309 } else { 1310 ret = 0; 1311 *bytes = 0; 1312 } 1313 1314 out: 1315 *bytes = discarded; 1316 return ret; 1317 } 1318 1319 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, 1320 u64 num_bytes, u64 *actual_bytes) 1321 { 1322 int ret = 0; 1323 u64 discarded_bytes = 0; 1324 u64 end = bytenr + num_bytes; 1325 u64 cur = bytenr; 1326 1327 /* 1328 * Avoid races with device replace and make sure the devices in the 1329 * stripes don't go away while we are discarding. 1330 */ 1331 btrfs_bio_counter_inc_blocked(fs_info); 1332 while (cur < end) { 1333 struct btrfs_discard_stripe *stripes; 1334 unsigned int num_stripes; 1335 int i; 1336 1337 num_bytes = end - cur; 1338 stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes); 1339 if (IS_ERR(stripes)) { 1340 ret = PTR_ERR(stripes); 1341 if (ret == -EOPNOTSUPP) 1342 ret = 0; 1343 break; 1344 } 1345 1346 for (i = 0; i < num_stripes; i++) { 1347 struct btrfs_discard_stripe *stripe = stripes + i; 1348 u64 bytes; 1349 1350 if (!stripe->dev->bdev) { 1351 ASSERT(btrfs_test_opt(fs_info, DEGRADED)); 1352 continue; 1353 } 1354 1355 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, 1356 &stripe->dev->dev_state)) 1357 continue; 1358 1359 ret = do_discard_extent(stripe, &bytes); 1360 if (ret) { 1361 /* 1362 * Keep going if discard is not supported by the 1363 * device. 1364 */ 1365 if (ret != -EOPNOTSUPP) 1366 break; 1367 ret = 0; 1368 } else { 1369 discarded_bytes += bytes; 1370 } 1371 } 1372 kfree(stripes); 1373 if (ret) 1374 break; 1375 cur += num_bytes; 1376 } 1377 btrfs_bio_counter_dec(fs_info); 1378 if (actual_bytes) 1379 *actual_bytes = discarded_bytes; 1380 return ret; 1381 } 1382 1383 /* Can return -ENOMEM */ 1384 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1385 struct btrfs_ref *generic_ref) 1386 { 1387 struct btrfs_fs_info *fs_info = trans->fs_info; 1388 int ret; 1389 1390 ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && 1391 generic_ref->action); 1392 BUG_ON(generic_ref->type == BTRFS_REF_METADATA && 1393 generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID); 1394 1395 if (generic_ref->type == BTRFS_REF_METADATA) 1396 ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); 1397 else 1398 ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0); 1399 1400 btrfs_ref_tree_mod(fs_info, generic_ref); 1401 1402 return ret; 1403 } 1404 1405 /* 1406 * __btrfs_inc_extent_ref - insert backreference for a given extent 1407 * 1408 * The counterpart is in __btrfs_free_extent(), with examples and more details 1409 * how it works. 1410 * 1411 * @trans: Handle of transaction 1412 * 1413 * @node: The delayed ref node used to get the bytenr/length for 1414 * extent whose references are incremented. 1415 * 1416 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ 1417 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical 1418 * bytenr of the parent block. Since new extents are always 1419 * created with indirect references, this will only be the case 1420 * when relocating a shared extent. In that case, root_objectid 1421 * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must 1422 * be 0 1423 * 1424 * @root_objectid: The id of the root where this modification has originated, 1425 * this can be either one of the well-known metadata trees or 1426 * the subvolume id which references this extent. 1427 * 1428 * @owner: For data extents it is the inode number of the owning file. 1429 * For metadata extents this parameter holds the level in the 1430 * tree of the extent. 1431 * 1432 * @offset: For metadata extents the offset is ignored and is currently 1433 * always passed as 0. For data extents it is the fileoffset 1434 * this extent belongs to. 1435 * 1436 * @refs_to_add Number of references to add 1437 * 1438 * @extent_op Pointer to a structure, holding information necessary when 1439 * updating a tree block's flags 1440 * 1441 */ 1442 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1443 struct btrfs_delayed_ref_node *node, 1444 u64 parent, u64 root_objectid, 1445 u64 owner, u64 offset, int refs_to_add, 1446 struct btrfs_delayed_extent_op *extent_op) 1447 { 1448 struct btrfs_path *path; 1449 struct extent_buffer *leaf; 1450 struct btrfs_extent_item *item; 1451 struct btrfs_key key; 1452 u64 bytenr = node->bytenr; 1453 u64 num_bytes = node->num_bytes; 1454 u64 refs; 1455 int ret; 1456 1457 path = btrfs_alloc_path(); 1458 if (!path) 1459 return -ENOMEM; 1460 1461 /* this will setup the path even if it fails to insert the back ref */ 1462 ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, 1463 parent, root_objectid, owner, 1464 offset, refs_to_add, extent_op); 1465 if ((ret < 0 && ret != -EAGAIN) || !ret) 1466 goto out; 1467 1468 /* 1469 * Ok we had -EAGAIN which means we didn't have space to insert and 1470 * inline extent ref, so just update the reference count and add a 1471 * normal backref. 1472 */ 1473 leaf = path->nodes[0]; 1474 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1475 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1476 refs = btrfs_extent_refs(leaf, item); 1477 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 1478 if (extent_op) 1479 __run_delayed_extent_op(extent_op, leaf, item); 1480 1481 btrfs_mark_buffer_dirty(leaf); 1482 btrfs_release_path(path); 1483 1484 /* now insert the actual backref */ 1485 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1486 BUG_ON(refs_to_add != 1); 1487 ret = insert_tree_block_ref(trans, path, bytenr, parent, 1488 root_objectid); 1489 } else { 1490 ret = insert_extent_data_ref(trans, path, bytenr, parent, 1491 root_objectid, owner, offset, 1492 refs_to_add); 1493 } 1494 if (ret) 1495 btrfs_abort_transaction(trans, ret); 1496 out: 1497 btrfs_free_path(path); 1498 return ret; 1499 } 1500 1501 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 1502 struct btrfs_delayed_ref_node *node, 1503 struct btrfs_delayed_extent_op *extent_op, 1504 int insert_reserved) 1505 { 1506 int ret = 0; 1507 struct btrfs_delayed_data_ref *ref; 1508 struct btrfs_key ins; 1509 u64 parent = 0; 1510 u64 ref_root = 0; 1511 u64 flags = 0; 1512 1513 ins.objectid = node->bytenr; 1514 ins.offset = node->num_bytes; 1515 ins.type = BTRFS_EXTENT_ITEM_KEY; 1516 1517 ref = btrfs_delayed_node_to_data_ref(node); 1518 trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action); 1519 1520 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 1521 parent = ref->parent; 1522 ref_root = ref->root; 1523 1524 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1525 if (extent_op) 1526 flags |= extent_op->flags_to_set; 1527 ret = alloc_reserved_file_extent(trans, parent, ref_root, 1528 flags, ref->objectid, 1529 ref->offset, &ins, 1530 node->ref_mod); 1531 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1532 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1533 ref->objectid, ref->offset, 1534 node->ref_mod, extent_op); 1535 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1536 ret = __btrfs_free_extent(trans, node, parent, 1537 ref_root, ref->objectid, 1538 ref->offset, node->ref_mod, 1539 extent_op); 1540 } else { 1541 BUG(); 1542 } 1543 return ret; 1544 } 1545 1546 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 1547 struct extent_buffer *leaf, 1548 struct btrfs_extent_item *ei) 1549 { 1550 u64 flags = btrfs_extent_flags(leaf, ei); 1551 if (extent_op->update_flags) { 1552 flags |= extent_op->flags_to_set; 1553 btrfs_set_extent_flags(leaf, ei, flags); 1554 } 1555 1556 if (extent_op->update_key) { 1557 struct btrfs_tree_block_info *bi; 1558 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 1559 bi = (struct btrfs_tree_block_info *)(ei + 1); 1560 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 1561 } 1562 } 1563 1564 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 1565 struct btrfs_delayed_ref_head *head, 1566 struct btrfs_delayed_extent_op *extent_op) 1567 { 1568 struct btrfs_fs_info *fs_info = trans->fs_info; 1569 struct btrfs_root *root; 1570 struct btrfs_key key; 1571 struct btrfs_path *path; 1572 struct btrfs_extent_item *ei; 1573 struct extent_buffer *leaf; 1574 u32 item_size; 1575 int ret; 1576 int err = 0; 1577 int metadata = 1; 1578 1579 if (TRANS_ABORTED(trans)) 1580 return 0; 1581 1582 if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1583 metadata = 0; 1584 1585 path = btrfs_alloc_path(); 1586 if (!path) 1587 return -ENOMEM; 1588 1589 key.objectid = head->bytenr; 1590 1591 if (metadata) { 1592 key.type = BTRFS_METADATA_ITEM_KEY; 1593 key.offset = extent_op->level; 1594 } else { 1595 key.type = BTRFS_EXTENT_ITEM_KEY; 1596 key.offset = head->num_bytes; 1597 } 1598 1599 root = btrfs_extent_root(fs_info, key.objectid); 1600 again: 1601 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1602 if (ret < 0) { 1603 err = ret; 1604 goto out; 1605 } 1606 if (ret > 0) { 1607 if (metadata) { 1608 if (path->slots[0] > 0) { 1609 path->slots[0]--; 1610 btrfs_item_key_to_cpu(path->nodes[0], &key, 1611 path->slots[0]); 1612 if (key.objectid == head->bytenr && 1613 key.type == BTRFS_EXTENT_ITEM_KEY && 1614 key.offset == head->num_bytes) 1615 ret = 0; 1616 } 1617 if (ret > 0) { 1618 btrfs_release_path(path); 1619 metadata = 0; 1620 1621 key.objectid = head->bytenr; 1622 key.offset = head->num_bytes; 1623 key.type = BTRFS_EXTENT_ITEM_KEY; 1624 goto again; 1625 } 1626 } else { 1627 err = -EIO; 1628 goto out; 1629 } 1630 } 1631 1632 leaf = path->nodes[0]; 1633 item_size = btrfs_item_size(leaf, path->slots[0]); 1634 1635 if (unlikely(item_size < sizeof(*ei))) { 1636 err = -EINVAL; 1637 btrfs_print_v0_err(fs_info); 1638 btrfs_abort_transaction(trans, err); 1639 goto out; 1640 } 1641 1642 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1643 __run_delayed_extent_op(extent_op, leaf, ei); 1644 1645 btrfs_mark_buffer_dirty(leaf); 1646 out: 1647 btrfs_free_path(path); 1648 return err; 1649 } 1650 1651 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 1652 struct btrfs_delayed_ref_node *node, 1653 struct btrfs_delayed_extent_op *extent_op, 1654 int insert_reserved) 1655 { 1656 int ret = 0; 1657 struct btrfs_delayed_tree_ref *ref; 1658 u64 parent = 0; 1659 u64 ref_root = 0; 1660 1661 ref = btrfs_delayed_node_to_tree_ref(node); 1662 trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action); 1663 1664 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1665 parent = ref->parent; 1666 ref_root = ref->root; 1667 1668 if (node->ref_mod != 1) { 1669 btrfs_err(trans->fs_info, 1670 "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", 1671 node->bytenr, node->ref_mod, node->action, ref_root, 1672 parent); 1673 return -EIO; 1674 } 1675 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1676 BUG_ON(!extent_op || !extent_op->update_flags); 1677 ret = alloc_reserved_tree_block(trans, node, extent_op); 1678 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1679 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, 1680 ref->level, 0, 1, extent_op); 1681 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1682 ret = __btrfs_free_extent(trans, node, parent, ref_root, 1683 ref->level, 0, 1, extent_op); 1684 } else { 1685 BUG(); 1686 } 1687 return ret; 1688 } 1689 1690 /* helper function to actually process a single delayed ref entry */ 1691 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 1692 struct btrfs_delayed_ref_node *node, 1693 struct btrfs_delayed_extent_op *extent_op, 1694 int insert_reserved) 1695 { 1696 int ret = 0; 1697 1698 if (TRANS_ABORTED(trans)) { 1699 if (insert_reserved) 1700 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1701 return 0; 1702 } 1703 1704 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 1705 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 1706 ret = run_delayed_tree_ref(trans, node, extent_op, 1707 insert_reserved); 1708 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 1709 node->type == BTRFS_SHARED_DATA_REF_KEY) 1710 ret = run_delayed_data_ref(trans, node, extent_op, 1711 insert_reserved); 1712 else 1713 BUG(); 1714 if (ret && insert_reserved) 1715 btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); 1716 return ret; 1717 } 1718 1719 static inline struct btrfs_delayed_ref_node * 1720 select_delayed_ref(struct btrfs_delayed_ref_head *head) 1721 { 1722 struct btrfs_delayed_ref_node *ref; 1723 1724 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 1725 return NULL; 1726 1727 /* 1728 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. 1729 * This is to prevent a ref count from going down to zero, which deletes 1730 * the extent item from the extent tree, when there still are references 1731 * to add, which would fail because they would not find the extent item. 1732 */ 1733 if (!list_empty(&head->ref_add_list)) 1734 return list_first_entry(&head->ref_add_list, 1735 struct btrfs_delayed_ref_node, add_list); 1736 1737 ref = rb_entry(rb_first_cached(&head->ref_tree), 1738 struct btrfs_delayed_ref_node, ref_node); 1739 ASSERT(list_empty(&ref->add_list)); 1740 return ref; 1741 } 1742 1743 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, 1744 struct btrfs_delayed_ref_head *head) 1745 { 1746 spin_lock(&delayed_refs->lock); 1747 head->processing = 0; 1748 delayed_refs->num_heads_ready++; 1749 spin_unlock(&delayed_refs->lock); 1750 btrfs_delayed_ref_unlock(head); 1751 } 1752 1753 static struct btrfs_delayed_extent_op *cleanup_extent_op( 1754 struct btrfs_delayed_ref_head *head) 1755 { 1756 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 1757 1758 if (!extent_op) 1759 return NULL; 1760 1761 if (head->must_insert_reserved) { 1762 head->extent_op = NULL; 1763 btrfs_free_delayed_extent_op(extent_op); 1764 return NULL; 1765 } 1766 return extent_op; 1767 } 1768 1769 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, 1770 struct btrfs_delayed_ref_head *head) 1771 { 1772 struct btrfs_delayed_extent_op *extent_op; 1773 int ret; 1774 1775 extent_op = cleanup_extent_op(head); 1776 if (!extent_op) 1777 return 0; 1778 head->extent_op = NULL; 1779 spin_unlock(&head->lock); 1780 ret = run_delayed_extent_op(trans, head, extent_op); 1781 btrfs_free_delayed_extent_op(extent_op); 1782 return ret ? ret : 1; 1783 } 1784 1785 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, 1786 struct btrfs_delayed_ref_root *delayed_refs, 1787 struct btrfs_delayed_ref_head *head) 1788 { 1789 int nr_items = 1; /* Dropping this ref head update. */ 1790 1791 /* 1792 * We had csum deletions accounted for in our delayed refs rsv, we need 1793 * to drop the csum leaves for this update from our delayed_refs_rsv. 1794 */ 1795 if (head->total_ref_mod < 0 && head->is_data) { 1796 spin_lock(&delayed_refs->lock); 1797 delayed_refs->pending_csums -= head->num_bytes; 1798 spin_unlock(&delayed_refs->lock); 1799 nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); 1800 } 1801 1802 btrfs_delayed_refs_rsv_release(fs_info, nr_items); 1803 } 1804 1805 static int cleanup_ref_head(struct btrfs_trans_handle *trans, 1806 struct btrfs_delayed_ref_head *head) 1807 { 1808 1809 struct btrfs_fs_info *fs_info = trans->fs_info; 1810 struct btrfs_delayed_ref_root *delayed_refs; 1811 int ret; 1812 1813 delayed_refs = &trans->transaction->delayed_refs; 1814 1815 ret = run_and_cleanup_extent_op(trans, head); 1816 if (ret < 0) { 1817 unselect_delayed_ref_head(delayed_refs, head); 1818 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 1819 return ret; 1820 } else if (ret) { 1821 return ret; 1822 } 1823 1824 /* 1825 * Need to drop our head ref lock and re-acquire the delayed ref lock 1826 * and then re-check to make sure nobody got added. 1827 */ 1828 spin_unlock(&head->lock); 1829 spin_lock(&delayed_refs->lock); 1830 spin_lock(&head->lock); 1831 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { 1832 spin_unlock(&head->lock); 1833 spin_unlock(&delayed_refs->lock); 1834 return 1; 1835 } 1836 btrfs_delete_ref_head(delayed_refs, head); 1837 spin_unlock(&head->lock); 1838 spin_unlock(&delayed_refs->lock); 1839 1840 if (head->must_insert_reserved) { 1841 btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); 1842 if (head->is_data) { 1843 struct btrfs_root *csum_root; 1844 1845 csum_root = btrfs_csum_root(fs_info, head->bytenr); 1846 ret = btrfs_del_csums(trans, csum_root, head->bytenr, 1847 head->num_bytes); 1848 } 1849 } 1850 1851 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); 1852 1853 trace_run_delayed_ref_head(fs_info, head, 0); 1854 btrfs_delayed_ref_unlock(head); 1855 btrfs_put_delayed_ref_head(head); 1856 return ret; 1857 } 1858 1859 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( 1860 struct btrfs_trans_handle *trans) 1861 { 1862 struct btrfs_delayed_ref_root *delayed_refs = 1863 &trans->transaction->delayed_refs; 1864 struct btrfs_delayed_ref_head *head = NULL; 1865 int ret; 1866 1867 spin_lock(&delayed_refs->lock); 1868 head = btrfs_select_ref_head(delayed_refs); 1869 if (!head) { 1870 spin_unlock(&delayed_refs->lock); 1871 return head; 1872 } 1873 1874 /* 1875 * Grab the lock that says we are going to process all the refs for 1876 * this head 1877 */ 1878 ret = btrfs_delayed_ref_lock(delayed_refs, head); 1879 spin_unlock(&delayed_refs->lock); 1880 1881 /* 1882 * We may have dropped the spin lock to get the head mutex lock, and 1883 * that might have given someone else time to free the head. If that's 1884 * true, it has been removed from our list and we can move on. 1885 */ 1886 if (ret == -EAGAIN) 1887 head = ERR_PTR(-EAGAIN); 1888 1889 return head; 1890 } 1891 1892 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, 1893 struct btrfs_delayed_ref_head *locked_ref, 1894 unsigned long *run_refs) 1895 { 1896 struct btrfs_fs_info *fs_info = trans->fs_info; 1897 struct btrfs_delayed_ref_root *delayed_refs; 1898 struct btrfs_delayed_extent_op *extent_op; 1899 struct btrfs_delayed_ref_node *ref; 1900 int must_insert_reserved = 0; 1901 int ret; 1902 1903 delayed_refs = &trans->transaction->delayed_refs; 1904 1905 lockdep_assert_held(&locked_ref->mutex); 1906 lockdep_assert_held(&locked_ref->lock); 1907 1908 while ((ref = select_delayed_ref(locked_ref))) { 1909 if (ref->seq && 1910 btrfs_check_delayed_seq(fs_info, ref->seq)) { 1911 spin_unlock(&locked_ref->lock); 1912 unselect_delayed_ref_head(delayed_refs, locked_ref); 1913 return -EAGAIN; 1914 } 1915 1916 (*run_refs)++; 1917 ref->in_tree = 0; 1918 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); 1919 RB_CLEAR_NODE(&ref->ref_node); 1920 if (!list_empty(&ref->add_list)) 1921 list_del(&ref->add_list); 1922 /* 1923 * When we play the delayed ref, also correct the ref_mod on 1924 * head 1925 */ 1926 switch (ref->action) { 1927 case BTRFS_ADD_DELAYED_REF: 1928 case BTRFS_ADD_DELAYED_EXTENT: 1929 locked_ref->ref_mod -= ref->ref_mod; 1930 break; 1931 case BTRFS_DROP_DELAYED_REF: 1932 locked_ref->ref_mod += ref->ref_mod; 1933 break; 1934 default: 1935 WARN_ON(1); 1936 } 1937 atomic_dec(&delayed_refs->num_entries); 1938 1939 /* 1940 * Record the must_insert_reserved flag before we drop the 1941 * spin lock. 1942 */ 1943 must_insert_reserved = locked_ref->must_insert_reserved; 1944 locked_ref->must_insert_reserved = 0; 1945 1946 extent_op = locked_ref->extent_op; 1947 locked_ref->extent_op = NULL; 1948 spin_unlock(&locked_ref->lock); 1949 1950 ret = run_one_delayed_ref(trans, ref, extent_op, 1951 must_insert_reserved); 1952 1953 btrfs_free_delayed_extent_op(extent_op); 1954 if (ret) { 1955 unselect_delayed_ref_head(delayed_refs, locked_ref); 1956 btrfs_put_delayed_ref(ref); 1957 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", 1958 ret); 1959 return ret; 1960 } 1961 1962 btrfs_put_delayed_ref(ref); 1963 cond_resched(); 1964 1965 spin_lock(&locked_ref->lock); 1966 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref); 1967 } 1968 1969 return 0; 1970 } 1971 1972 /* 1973 * Returns 0 on success or if called with an already aborted transaction. 1974 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 1975 */ 1976 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 1977 unsigned long nr) 1978 { 1979 struct btrfs_fs_info *fs_info = trans->fs_info; 1980 struct btrfs_delayed_ref_root *delayed_refs; 1981 struct btrfs_delayed_ref_head *locked_ref = NULL; 1982 ktime_t start = ktime_get(); 1983 int ret; 1984 unsigned long count = 0; 1985 unsigned long actual_count = 0; 1986 1987 delayed_refs = &trans->transaction->delayed_refs; 1988 do { 1989 if (!locked_ref) { 1990 locked_ref = btrfs_obtain_ref_head(trans); 1991 if (IS_ERR_OR_NULL(locked_ref)) { 1992 if (PTR_ERR(locked_ref) == -EAGAIN) { 1993 continue; 1994 } else { 1995 break; 1996 } 1997 } 1998 count++; 1999 } 2000 /* 2001 * We need to try and merge add/drops of the same ref since we 2002 * can run into issues with relocate dropping the implicit ref 2003 * and then it being added back again before the drop can 2004 * finish. If we merged anything we need to re-loop so we can 2005 * get a good ref. 2006 * Or we can get node references of the same type that weren't 2007 * merged when created due to bumps in the tree mod seq, and 2008 * we need to merge them to prevent adding an inline extent 2009 * backref before dropping it (triggering a BUG_ON at 2010 * insert_inline_extent_backref()). 2011 */ 2012 spin_lock(&locked_ref->lock); 2013 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref); 2014 2015 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, 2016 &actual_count); 2017 if (ret < 0 && ret != -EAGAIN) { 2018 /* 2019 * Error, btrfs_run_delayed_refs_for_head already 2020 * unlocked everything so just bail out 2021 */ 2022 return ret; 2023 } else if (!ret) { 2024 /* 2025 * Success, perform the usual cleanup of a processed 2026 * head 2027 */ 2028 ret = cleanup_ref_head(trans, locked_ref); 2029 if (ret > 0 ) { 2030 /* We dropped our lock, we need to loop. */ 2031 ret = 0; 2032 continue; 2033 } else if (ret) { 2034 return ret; 2035 } 2036 } 2037 2038 /* 2039 * Either success case or btrfs_run_delayed_refs_for_head 2040 * returned -EAGAIN, meaning we need to select another head 2041 */ 2042 2043 locked_ref = NULL; 2044 cond_resched(); 2045 } while ((nr != -1 && count < nr) || locked_ref); 2046 2047 /* 2048 * We don't want to include ref heads since we can have empty ref heads 2049 * and those will drastically skew our runtime down since we just do 2050 * accounting, no actual extent tree updates. 2051 */ 2052 if (actual_count > 0) { 2053 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); 2054 u64 avg; 2055 2056 /* 2057 * We weigh the current average higher than our current runtime 2058 * to avoid large swings in the average. 2059 */ 2060 spin_lock(&delayed_refs->lock); 2061 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; 2062 fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */ 2063 spin_unlock(&delayed_refs->lock); 2064 } 2065 return 0; 2066 } 2067 2068 #ifdef SCRAMBLE_DELAYED_REFS 2069 /* 2070 * Normally delayed refs get processed in ascending bytenr order. This 2071 * correlates in most cases to the order added. To expose dependencies on this 2072 * order, we start to process the tree in the middle instead of the beginning 2073 */ 2074 static u64 find_middle(struct rb_root *root) 2075 { 2076 struct rb_node *n = root->rb_node; 2077 struct btrfs_delayed_ref_node *entry; 2078 int alt = 1; 2079 u64 middle; 2080 u64 first = 0, last = 0; 2081 2082 n = rb_first(root); 2083 if (n) { 2084 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2085 first = entry->bytenr; 2086 } 2087 n = rb_last(root); 2088 if (n) { 2089 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2090 last = entry->bytenr; 2091 } 2092 n = root->rb_node; 2093 2094 while (n) { 2095 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2096 WARN_ON(!entry->in_tree); 2097 2098 middle = entry->bytenr; 2099 2100 if (alt) 2101 n = n->rb_left; 2102 else 2103 n = n->rb_right; 2104 2105 alt = 1 - alt; 2106 } 2107 return middle; 2108 } 2109 #endif 2110 2111 /* 2112 * this starts processing the delayed reference count updates and 2113 * extent insertions we have queued up so far. count can be 2114 * 0, which means to process everything in the tree at the start 2115 * of the run (but not newly added entries), or it can be some target 2116 * number you'd like to process. 2117 * 2118 * Returns 0 on success or if called with an aborted transaction 2119 * Returns <0 on error and aborts the transaction 2120 */ 2121 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2122 unsigned long count) 2123 { 2124 struct btrfs_fs_info *fs_info = trans->fs_info; 2125 struct rb_node *node; 2126 struct btrfs_delayed_ref_root *delayed_refs; 2127 struct btrfs_delayed_ref_head *head; 2128 int ret; 2129 int run_all = count == (unsigned long)-1; 2130 2131 /* We'll clean this up in btrfs_cleanup_transaction */ 2132 if (TRANS_ABORTED(trans)) 2133 return 0; 2134 2135 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) 2136 return 0; 2137 2138 delayed_refs = &trans->transaction->delayed_refs; 2139 if (count == 0) 2140 count = delayed_refs->num_heads_ready; 2141 2142 again: 2143 #ifdef SCRAMBLE_DELAYED_REFS 2144 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2145 #endif 2146 ret = __btrfs_run_delayed_refs(trans, count); 2147 if (ret < 0) { 2148 btrfs_abort_transaction(trans, ret); 2149 return ret; 2150 } 2151 2152 if (run_all) { 2153 btrfs_create_pending_block_groups(trans); 2154 2155 spin_lock(&delayed_refs->lock); 2156 node = rb_first_cached(&delayed_refs->href_root); 2157 if (!node) { 2158 spin_unlock(&delayed_refs->lock); 2159 goto out; 2160 } 2161 head = rb_entry(node, struct btrfs_delayed_ref_head, 2162 href_node); 2163 refcount_inc(&head->refs); 2164 spin_unlock(&delayed_refs->lock); 2165 2166 /* Mutex was contended, block until it's released and retry. */ 2167 mutex_lock(&head->mutex); 2168 mutex_unlock(&head->mutex); 2169 2170 btrfs_put_delayed_ref_head(head); 2171 cond_resched(); 2172 goto again; 2173 } 2174 out: 2175 return 0; 2176 } 2177 2178 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2179 struct extent_buffer *eb, u64 flags, 2180 int level) 2181 { 2182 struct btrfs_delayed_extent_op *extent_op; 2183 int ret; 2184 2185 extent_op = btrfs_alloc_delayed_extent_op(); 2186 if (!extent_op) 2187 return -ENOMEM; 2188 2189 extent_op->flags_to_set = flags; 2190 extent_op->update_flags = true; 2191 extent_op->update_key = false; 2192 extent_op->level = level; 2193 2194 ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); 2195 if (ret) 2196 btrfs_free_delayed_extent_op(extent_op); 2197 return ret; 2198 } 2199 2200 static noinline int check_delayed_ref(struct btrfs_root *root, 2201 struct btrfs_path *path, 2202 u64 objectid, u64 offset, u64 bytenr) 2203 { 2204 struct btrfs_delayed_ref_head *head; 2205 struct btrfs_delayed_ref_node *ref; 2206 struct btrfs_delayed_data_ref *data_ref; 2207 struct btrfs_delayed_ref_root *delayed_refs; 2208 struct btrfs_transaction *cur_trans; 2209 struct rb_node *node; 2210 int ret = 0; 2211 2212 spin_lock(&root->fs_info->trans_lock); 2213 cur_trans = root->fs_info->running_transaction; 2214 if (cur_trans) 2215 refcount_inc(&cur_trans->use_count); 2216 spin_unlock(&root->fs_info->trans_lock); 2217 if (!cur_trans) 2218 return 0; 2219 2220 delayed_refs = &cur_trans->delayed_refs; 2221 spin_lock(&delayed_refs->lock); 2222 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 2223 if (!head) { 2224 spin_unlock(&delayed_refs->lock); 2225 btrfs_put_transaction(cur_trans); 2226 return 0; 2227 } 2228 2229 if (!mutex_trylock(&head->mutex)) { 2230 if (path->nowait) { 2231 spin_unlock(&delayed_refs->lock); 2232 btrfs_put_transaction(cur_trans); 2233 return -EAGAIN; 2234 } 2235 2236 refcount_inc(&head->refs); 2237 spin_unlock(&delayed_refs->lock); 2238 2239 btrfs_release_path(path); 2240 2241 /* 2242 * Mutex was contended, block until it's released and let 2243 * caller try again 2244 */ 2245 mutex_lock(&head->mutex); 2246 mutex_unlock(&head->mutex); 2247 btrfs_put_delayed_ref_head(head); 2248 btrfs_put_transaction(cur_trans); 2249 return -EAGAIN; 2250 } 2251 spin_unlock(&delayed_refs->lock); 2252 2253 spin_lock(&head->lock); 2254 /* 2255 * XXX: We should replace this with a proper search function in the 2256 * future. 2257 */ 2258 for (node = rb_first_cached(&head->ref_tree); node; 2259 node = rb_next(node)) { 2260 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); 2261 /* If it's a shared ref we know a cross reference exists */ 2262 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { 2263 ret = 1; 2264 break; 2265 } 2266 2267 data_ref = btrfs_delayed_node_to_data_ref(ref); 2268 2269 /* 2270 * If our ref doesn't match the one we're currently looking at 2271 * then we have a cross reference. 2272 */ 2273 if (data_ref->root != root->root_key.objectid || 2274 data_ref->objectid != objectid || 2275 data_ref->offset != offset) { 2276 ret = 1; 2277 break; 2278 } 2279 } 2280 spin_unlock(&head->lock); 2281 mutex_unlock(&head->mutex); 2282 btrfs_put_transaction(cur_trans); 2283 return ret; 2284 } 2285 2286 static noinline int check_committed_ref(struct btrfs_root *root, 2287 struct btrfs_path *path, 2288 u64 objectid, u64 offset, u64 bytenr, 2289 bool strict) 2290 { 2291 struct btrfs_fs_info *fs_info = root->fs_info; 2292 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); 2293 struct extent_buffer *leaf; 2294 struct btrfs_extent_data_ref *ref; 2295 struct btrfs_extent_inline_ref *iref; 2296 struct btrfs_extent_item *ei; 2297 struct btrfs_key key; 2298 u32 item_size; 2299 int type; 2300 int ret; 2301 2302 key.objectid = bytenr; 2303 key.offset = (u64)-1; 2304 key.type = BTRFS_EXTENT_ITEM_KEY; 2305 2306 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2307 if (ret < 0) 2308 goto out; 2309 BUG_ON(ret == 0); /* Corruption */ 2310 2311 ret = -ENOENT; 2312 if (path->slots[0] == 0) 2313 goto out; 2314 2315 path->slots[0]--; 2316 leaf = path->nodes[0]; 2317 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2318 2319 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2320 goto out; 2321 2322 ret = 1; 2323 item_size = btrfs_item_size(leaf, path->slots[0]); 2324 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2325 2326 /* If extent item has more than 1 inline ref then it's shared */ 2327 if (item_size != sizeof(*ei) + 2328 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2329 goto out; 2330 2331 /* 2332 * If extent created before last snapshot => it's shared unless the 2333 * snapshot has been deleted. Use the heuristic if strict is false. 2334 */ 2335 if (!strict && 2336 (btrfs_extent_generation(leaf, ei) <= 2337 btrfs_root_last_snapshot(&root->root_item))) 2338 goto out; 2339 2340 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2341 2342 /* If this extent has SHARED_DATA_REF then it's shared */ 2343 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); 2344 if (type != BTRFS_EXTENT_DATA_REF_KEY) 2345 goto out; 2346 2347 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2348 if (btrfs_extent_refs(leaf, ei) != 2349 btrfs_extent_data_ref_count(leaf, ref) || 2350 btrfs_extent_data_ref_root(leaf, ref) != 2351 root->root_key.objectid || 2352 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 2353 btrfs_extent_data_ref_offset(leaf, ref) != offset) 2354 goto out; 2355 2356 ret = 0; 2357 out: 2358 return ret; 2359 } 2360 2361 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, 2362 u64 bytenr, bool strict, struct btrfs_path *path) 2363 { 2364 int ret; 2365 2366 do { 2367 ret = check_committed_ref(root, path, objectid, 2368 offset, bytenr, strict); 2369 if (ret && ret != -ENOENT) 2370 goto out; 2371 2372 ret = check_delayed_ref(root, path, objectid, offset, bytenr); 2373 } while (ret == -EAGAIN); 2374 2375 out: 2376 btrfs_release_path(path); 2377 if (btrfs_is_data_reloc_root(root)) 2378 WARN_ON(ret > 0); 2379 return ret; 2380 } 2381 2382 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2383 struct btrfs_root *root, 2384 struct extent_buffer *buf, 2385 int full_backref, int inc) 2386 { 2387 struct btrfs_fs_info *fs_info = root->fs_info; 2388 u64 bytenr; 2389 u64 num_bytes; 2390 u64 parent; 2391 u64 ref_root; 2392 u32 nritems; 2393 struct btrfs_key key; 2394 struct btrfs_file_extent_item *fi; 2395 struct btrfs_ref generic_ref = { 0 }; 2396 bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC); 2397 int i; 2398 int action; 2399 int level; 2400 int ret = 0; 2401 2402 if (btrfs_is_testing(fs_info)) 2403 return 0; 2404 2405 ref_root = btrfs_header_owner(buf); 2406 nritems = btrfs_header_nritems(buf); 2407 level = btrfs_header_level(buf); 2408 2409 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) 2410 return 0; 2411 2412 if (full_backref) 2413 parent = buf->start; 2414 else 2415 parent = 0; 2416 if (inc) 2417 action = BTRFS_ADD_DELAYED_REF; 2418 else 2419 action = BTRFS_DROP_DELAYED_REF; 2420 2421 for (i = 0; i < nritems; i++) { 2422 if (level == 0) { 2423 btrfs_item_key_to_cpu(buf, &key, i); 2424 if (key.type != BTRFS_EXTENT_DATA_KEY) 2425 continue; 2426 fi = btrfs_item_ptr(buf, i, 2427 struct btrfs_file_extent_item); 2428 if (btrfs_file_extent_type(buf, fi) == 2429 BTRFS_FILE_EXTENT_INLINE) 2430 continue; 2431 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 2432 if (bytenr == 0) 2433 continue; 2434 2435 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 2436 key.offset -= btrfs_file_extent_offset(buf, fi); 2437 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2438 num_bytes, parent); 2439 btrfs_init_data_ref(&generic_ref, ref_root, key.objectid, 2440 key.offset, root->root_key.objectid, 2441 for_reloc); 2442 if (inc) 2443 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2444 else 2445 ret = btrfs_free_extent(trans, &generic_ref); 2446 if (ret) 2447 goto fail; 2448 } else { 2449 bytenr = btrfs_node_blockptr(buf, i); 2450 num_bytes = fs_info->nodesize; 2451 btrfs_init_generic_ref(&generic_ref, action, bytenr, 2452 num_bytes, parent); 2453 btrfs_init_tree_ref(&generic_ref, level - 1, ref_root, 2454 root->root_key.objectid, for_reloc); 2455 if (inc) 2456 ret = btrfs_inc_extent_ref(trans, &generic_ref); 2457 else 2458 ret = btrfs_free_extent(trans, &generic_ref); 2459 if (ret) 2460 goto fail; 2461 } 2462 } 2463 return 0; 2464 fail: 2465 return ret; 2466 } 2467 2468 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2469 struct extent_buffer *buf, int full_backref) 2470 { 2471 return __btrfs_mod_ref(trans, root, buf, full_backref, 1); 2472 } 2473 2474 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2475 struct extent_buffer *buf, int full_backref) 2476 { 2477 return __btrfs_mod_ref(trans, root, buf, full_backref, 0); 2478 } 2479 2480 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) 2481 { 2482 struct btrfs_fs_info *fs_info = root->fs_info; 2483 u64 flags; 2484 u64 ret; 2485 2486 if (data) 2487 flags = BTRFS_BLOCK_GROUP_DATA; 2488 else if (root == fs_info->chunk_root) 2489 flags = BTRFS_BLOCK_GROUP_SYSTEM; 2490 else 2491 flags = BTRFS_BLOCK_GROUP_METADATA; 2492 2493 ret = btrfs_get_alloc_profile(fs_info, flags); 2494 return ret; 2495 } 2496 2497 static u64 first_logical_byte(struct btrfs_fs_info *fs_info) 2498 { 2499 struct rb_node *leftmost; 2500 u64 bytenr = 0; 2501 2502 read_lock(&fs_info->block_group_cache_lock); 2503 /* Get the block group with the lowest logical start address. */ 2504 leftmost = rb_first_cached(&fs_info->block_group_cache_tree); 2505 if (leftmost) { 2506 struct btrfs_block_group *bg; 2507 2508 bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); 2509 bytenr = bg->start; 2510 } 2511 read_unlock(&fs_info->block_group_cache_lock); 2512 2513 return bytenr; 2514 } 2515 2516 static int pin_down_extent(struct btrfs_trans_handle *trans, 2517 struct btrfs_block_group *cache, 2518 u64 bytenr, u64 num_bytes, int reserved) 2519 { 2520 struct btrfs_fs_info *fs_info = cache->fs_info; 2521 2522 spin_lock(&cache->space_info->lock); 2523 spin_lock(&cache->lock); 2524 cache->pinned += num_bytes; 2525 btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, 2526 num_bytes); 2527 if (reserved) { 2528 cache->reserved -= num_bytes; 2529 cache->space_info->bytes_reserved -= num_bytes; 2530 } 2531 spin_unlock(&cache->lock); 2532 spin_unlock(&cache->space_info->lock); 2533 2534 set_extent_dirty(&trans->transaction->pinned_extents, bytenr, 2535 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); 2536 return 0; 2537 } 2538 2539 int btrfs_pin_extent(struct btrfs_trans_handle *trans, 2540 u64 bytenr, u64 num_bytes, int reserved) 2541 { 2542 struct btrfs_block_group *cache; 2543 2544 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2545 BUG_ON(!cache); /* Logic error */ 2546 2547 pin_down_extent(trans, cache, bytenr, num_bytes, reserved); 2548 2549 btrfs_put_block_group(cache); 2550 return 0; 2551 } 2552 2553 /* 2554 * this function must be called within transaction 2555 */ 2556 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, 2557 u64 bytenr, u64 num_bytes) 2558 { 2559 struct btrfs_block_group *cache; 2560 int ret; 2561 2562 cache = btrfs_lookup_block_group(trans->fs_info, bytenr); 2563 if (!cache) 2564 return -EINVAL; 2565 2566 /* 2567 * Fully cache the free space first so that our pin removes the free space 2568 * from the cache. 2569 */ 2570 ret = btrfs_cache_block_group(cache, true); 2571 if (ret) 2572 goto out; 2573 2574 pin_down_extent(trans, cache, bytenr, num_bytes, 0); 2575 2576 /* remove us from the free space cache (if we're there at all) */ 2577 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); 2578 out: 2579 btrfs_put_block_group(cache); 2580 return ret; 2581 } 2582 2583 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, 2584 u64 start, u64 num_bytes) 2585 { 2586 int ret; 2587 struct btrfs_block_group *block_group; 2588 2589 block_group = btrfs_lookup_block_group(fs_info, start); 2590 if (!block_group) 2591 return -EINVAL; 2592 2593 ret = btrfs_cache_block_group(block_group, true); 2594 if (ret) 2595 goto out; 2596 2597 ret = btrfs_remove_free_space(block_group, start, num_bytes); 2598 out: 2599 btrfs_put_block_group(block_group); 2600 return ret; 2601 } 2602 2603 int btrfs_exclude_logged_extents(struct extent_buffer *eb) 2604 { 2605 struct btrfs_fs_info *fs_info = eb->fs_info; 2606 struct btrfs_file_extent_item *item; 2607 struct btrfs_key key; 2608 int found_type; 2609 int i; 2610 int ret = 0; 2611 2612 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) 2613 return 0; 2614 2615 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2616 btrfs_item_key_to_cpu(eb, &key, i); 2617 if (key.type != BTRFS_EXTENT_DATA_KEY) 2618 continue; 2619 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2620 found_type = btrfs_file_extent_type(eb, item); 2621 if (found_type == BTRFS_FILE_EXTENT_INLINE) 2622 continue; 2623 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 2624 continue; 2625 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); 2626 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); 2627 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset); 2628 if (ret) 2629 break; 2630 } 2631 2632 return ret; 2633 } 2634 2635 static void 2636 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) 2637 { 2638 atomic_inc(&bg->reservations); 2639 } 2640 2641 /* 2642 * Returns the free cluster for the given space info and sets empty_cluster to 2643 * what it should be based on the mount options. 2644 */ 2645 static struct btrfs_free_cluster * 2646 fetch_cluster_info(struct btrfs_fs_info *fs_info, 2647 struct btrfs_space_info *space_info, u64 *empty_cluster) 2648 { 2649 struct btrfs_free_cluster *ret = NULL; 2650 2651 *empty_cluster = 0; 2652 if (btrfs_mixed_space_info(space_info)) 2653 return ret; 2654 2655 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 2656 ret = &fs_info->meta_alloc_cluster; 2657 if (btrfs_test_opt(fs_info, SSD)) 2658 *empty_cluster = SZ_2M; 2659 else 2660 *empty_cluster = SZ_64K; 2661 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && 2662 btrfs_test_opt(fs_info, SSD_SPREAD)) { 2663 *empty_cluster = SZ_2M; 2664 ret = &fs_info->data_alloc_cluster; 2665 } 2666 2667 return ret; 2668 } 2669 2670 static int unpin_extent_range(struct btrfs_fs_info *fs_info, 2671 u64 start, u64 end, 2672 const bool return_free_space) 2673 { 2674 struct btrfs_block_group *cache = NULL; 2675 struct btrfs_space_info *space_info; 2676 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 2677 struct btrfs_free_cluster *cluster = NULL; 2678 u64 len; 2679 u64 total_unpinned = 0; 2680 u64 empty_cluster = 0; 2681 bool readonly; 2682 2683 while (start <= end) { 2684 readonly = false; 2685 if (!cache || 2686 start >= cache->start + cache->length) { 2687 if (cache) 2688 btrfs_put_block_group(cache); 2689 total_unpinned = 0; 2690 cache = btrfs_lookup_block_group(fs_info, start); 2691 BUG_ON(!cache); /* Logic error */ 2692 2693 cluster = fetch_cluster_info(fs_info, 2694 cache->space_info, 2695 &empty_cluster); 2696 empty_cluster <<= 1; 2697 } 2698 2699 len = cache->start + cache->length - start; 2700 len = min(len, end + 1 - start); 2701 2702 if (return_free_space) 2703 btrfs_add_free_space(cache, start, len); 2704 2705 start += len; 2706 total_unpinned += len; 2707 space_info = cache->space_info; 2708 2709 /* 2710 * If this space cluster has been marked as fragmented and we've 2711 * unpinned enough in this block group to potentially allow a 2712 * cluster to be created inside of it go ahead and clear the 2713 * fragmented check. 2714 */ 2715 if (cluster && cluster->fragmented && 2716 total_unpinned > empty_cluster) { 2717 spin_lock(&cluster->lock); 2718 cluster->fragmented = 0; 2719 spin_unlock(&cluster->lock); 2720 } 2721 2722 spin_lock(&space_info->lock); 2723 spin_lock(&cache->lock); 2724 cache->pinned -= len; 2725 btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); 2726 space_info->max_extent_size = 0; 2727 if (cache->ro) { 2728 space_info->bytes_readonly += len; 2729 readonly = true; 2730 } else if (btrfs_is_zoned(fs_info)) { 2731 /* Need reset before reusing in a zoned block group */ 2732 space_info->bytes_zone_unusable += len; 2733 readonly = true; 2734 } 2735 spin_unlock(&cache->lock); 2736 if (!readonly && return_free_space && 2737 global_rsv->space_info == space_info) { 2738 spin_lock(&global_rsv->lock); 2739 if (!global_rsv->full) { 2740 u64 to_add = min(len, global_rsv->size - 2741 global_rsv->reserved); 2742 2743 global_rsv->reserved += to_add; 2744 btrfs_space_info_update_bytes_may_use(fs_info, 2745 space_info, to_add); 2746 if (global_rsv->reserved >= global_rsv->size) 2747 global_rsv->full = 1; 2748 len -= to_add; 2749 } 2750 spin_unlock(&global_rsv->lock); 2751 } 2752 /* Add to any tickets we may have */ 2753 if (!readonly && return_free_space && len) 2754 btrfs_try_granting_tickets(fs_info, space_info); 2755 spin_unlock(&space_info->lock); 2756 } 2757 2758 if (cache) 2759 btrfs_put_block_group(cache); 2760 return 0; 2761 } 2762 2763 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) 2764 { 2765 struct btrfs_fs_info *fs_info = trans->fs_info; 2766 struct btrfs_block_group *block_group, *tmp; 2767 struct list_head *deleted_bgs; 2768 struct extent_io_tree *unpin; 2769 u64 start; 2770 u64 end; 2771 int ret; 2772 2773 unpin = &trans->transaction->pinned_extents; 2774 2775 while (!TRANS_ABORTED(trans)) { 2776 struct extent_state *cached_state = NULL; 2777 2778 mutex_lock(&fs_info->unused_bg_unpin_mutex); 2779 ret = find_first_extent_bit(unpin, 0, &start, &end, 2780 EXTENT_DIRTY, &cached_state); 2781 if (ret) { 2782 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2783 break; 2784 } 2785 2786 if (btrfs_test_opt(fs_info, DISCARD_SYNC)) 2787 ret = btrfs_discard_extent(fs_info, start, 2788 end + 1 - start, NULL); 2789 2790 clear_extent_dirty(unpin, start, end, &cached_state); 2791 unpin_extent_range(fs_info, start, end, true); 2792 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 2793 free_extent_state(cached_state); 2794 cond_resched(); 2795 } 2796 2797 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { 2798 btrfs_discard_calc_delay(&fs_info->discard_ctl); 2799 btrfs_discard_schedule_work(&fs_info->discard_ctl, true); 2800 } 2801 2802 /* 2803 * Transaction is finished. We don't need the lock anymore. We 2804 * do need to clean up the block groups in case of a transaction 2805 * abort. 2806 */ 2807 deleted_bgs = &trans->transaction->deleted_bgs; 2808 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { 2809 u64 trimmed = 0; 2810 2811 ret = -EROFS; 2812 if (!TRANS_ABORTED(trans)) 2813 ret = btrfs_discard_extent(fs_info, 2814 block_group->start, 2815 block_group->length, 2816 &trimmed); 2817 2818 list_del_init(&block_group->bg_list); 2819 btrfs_unfreeze_block_group(block_group); 2820 btrfs_put_block_group(block_group); 2821 2822 if (ret) { 2823 const char *errstr = btrfs_decode_error(ret); 2824 btrfs_warn(fs_info, 2825 "discard failed while removing blockgroup: errno=%d %s", 2826 ret, errstr); 2827 } 2828 } 2829 2830 return 0; 2831 } 2832 2833 static int do_free_extent_accounting(struct btrfs_trans_handle *trans, 2834 u64 bytenr, u64 num_bytes, bool is_data) 2835 { 2836 int ret; 2837 2838 if (is_data) { 2839 struct btrfs_root *csum_root; 2840 2841 csum_root = btrfs_csum_root(trans->fs_info, bytenr); 2842 ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes); 2843 if (ret) { 2844 btrfs_abort_transaction(trans, ret); 2845 return ret; 2846 } 2847 } 2848 2849 ret = add_to_free_space_tree(trans, bytenr, num_bytes); 2850 if (ret) { 2851 btrfs_abort_transaction(trans, ret); 2852 return ret; 2853 } 2854 2855 ret = btrfs_update_block_group(trans, bytenr, num_bytes, false); 2856 if (ret) 2857 btrfs_abort_transaction(trans, ret); 2858 2859 return ret; 2860 } 2861 2862 /* 2863 * Drop one or more refs of @node. 2864 * 2865 * 1. Locate the extent refs. 2866 * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. 2867 * Locate it, then reduce the refs number or remove the ref line completely. 2868 * 2869 * 2. Update the refs count in EXTENT/METADATA_ITEM 2870 * 2871 * Inline backref case: 2872 * 2873 * in extent tree we have: 2874 * 2875 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2876 * refs 2 gen 6 flags DATA 2877 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2878 * extent data backref root FS_TREE objectid 257 offset 0 count 1 2879 * 2880 * This function gets called with: 2881 * 2882 * node->bytenr = 13631488 2883 * node->num_bytes = 1048576 2884 * root_objectid = FS_TREE 2885 * owner_objectid = 257 2886 * owner_offset = 0 2887 * refs_to_drop = 1 2888 * 2889 * Then we should get some like: 2890 * 2891 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 2892 * refs 1 gen 6 flags DATA 2893 * extent data backref root FS_TREE objectid 258 offset 0 count 1 2894 * 2895 * Keyed backref case: 2896 * 2897 * in extent tree we have: 2898 * 2899 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2900 * refs 754 gen 6 flags DATA 2901 * [...] 2902 * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 2903 * extent data backref root FS_TREE objectid 866 offset 0 count 1 2904 * 2905 * This function get called with: 2906 * 2907 * node->bytenr = 13631488 2908 * node->num_bytes = 1048576 2909 * root_objectid = FS_TREE 2910 * owner_objectid = 866 2911 * owner_offset = 0 2912 * refs_to_drop = 1 2913 * 2914 * Then we should get some like: 2915 * 2916 * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 2917 * refs 753 gen 6 flags DATA 2918 * 2919 * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. 2920 */ 2921 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 2922 struct btrfs_delayed_ref_node *node, u64 parent, 2923 u64 root_objectid, u64 owner_objectid, 2924 u64 owner_offset, int refs_to_drop, 2925 struct btrfs_delayed_extent_op *extent_op) 2926 { 2927 struct btrfs_fs_info *info = trans->fs_info; 2928 struct btrfs_key key; 2929 struct btrfs_path *path; 2930 struct btrfs_root *extent_root; 2931 struct extent_buffer *leaf; 2932 struct btrfs_extent_item *ei; 2933 struct btrfs_extent_inline_ref *iref; 2934 int ret; 2935 int is_data; 2936 int extent_slot = 0; 2937 int found_extent = 0; 2938 int num_to_del = 1; 2939 u32 item_size; 2940 u64 refs; 2941 u64 bytenr = node->bytenr; 2942 u64 num_bytes = node->num_bytes; 2943 bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); 2944 2945 extent_root = btrfs_extent_root(info, bytenr); 2946 ASSERT(extent_root); 2947 2948 path = btrfs_alloc_path(); 2949 if (!path) 2950 return -ENOMEM; 2951 2952 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 2953 2954 if (!is_data && refs_to_drop != 1) { 2955 btrfs_crit(info, 2956 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u", 2957 node->bytenr, refs_to_drop); 2958 ret = -EINVAL; 2959 btrfs_abort_transaction(trans, ret); 2960 goto out; 2961 } 2962 2963 if (is_data) 2964 skinny_metadata = false; 2965 2966 ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes, 2967 parent, root_objectid, owner_objectid, 2968 owner_offset); 2969 if (ret == 0) { 2970 /* 2971 * Either the inline backref or the SHARED_DATA_REF/ 2972 * SHARED_BLOCK_REF is found 2973 * 2974 * Here is a quick path to locate EXTENT/METADATA_ITEM. 2975 * It's possible the EXTENT/METADATA_ITEM is near current slot. 2976 */ 2977 extent_slot = path->slots[0]; 2978 while (extent_slot >= 0) { 2979 btrfs_item_key_to_cpu(path->nodes[0], &key, 2980 extent_slot); 2981 if (key.objectid != bytenr) 2982 break; 2983 if (key.type == BTRFS_EXTENT_ITEM_KEY && 2984 key.offset == num_bytes) { 2985 found_extent = 1; 2986 break; 2987 } 2988 if (key.type == BTRFS_METADATA_ITEM_KEY && 2989 key.offset == owner_objectid) { 2990 found_extent = 1; 2991 break; 2992 } 2993 2994 /* Quick path didn't find the EXTEMT/METADATA_ITEM */ 2995 if (path->slots[0] - extent_slot > 5) 2996 break; 2997 extent_slot--; 2998 } 2999 3000 if (!found_extent) { 3001 if (iref) { 3002 btrfs_crit(info, 3003 "invalid iref, no EXTENT/METADATA_ITEM found but has inline extent ref"); 3004 btrfs_abort_transaction(trans, -EUCLEAN); 3005 goto err_dump; 3006 } 3007 /* Must be SHARED_* item, remove the backref first */ 3008 ret = remove_extent_backref(trans, extent_root, path, 3009 NULL, refs_to_drop, is_data); 3010 if (ret) { 3011 btrfs_abort_transaction(trans, ret); 3012 goto out; 3013 } 3014 btrfs_release_path(path); 3015 3016 /* Slow path to locate EXTENT/METADATA_ITEM */ 3017 key.objectid = bytenr; 3018 key.type = BTRFS_EXTENT_ITEM_KEY; 3019 key.offset = num_bytes; 3020 3021 if (!is_data && skinny_metadata) { 3022 key.type = BTRFS_METADATA_ITEM_KEY; 3023 key.offset = owner_objectid; 3024 } 3025 3026 ret = btrfs_search_slot(trans, extent_root, 3027 &key, path, -1, 1); 3028 if (ret > 0 && skinny_metadata && path->slots[0]) { 3029 /* 3030 * Couldn't find our skinny metadata item, 3031 * see if we have ye olde extent item. 3032 */ 3033 path->slots[0]--; 3034 btrfs_item_key_to_cpu(path->nodes[0], &key, 3035 path->slots[0]); 3036 if (key.objectid == bytenr && 3037 key.type == BTRFS_EXTENT_ITEM_KEY && 3038 key.offset == num_bytes) 3039 ret = 0; 3040 } 3041 3042 if (ret > 0 && skinny_metadata) { 3043 skinny_metadata = false; 3044 key.objectid = bytenr; 3045 key.type = BTRFS_EXTENT_ITEM_KEY; 3046 key.offset = num_bytes; 3047 btrfs_release_path(path); 3048 ret = btrfs_search_slot(trans, extent_root, 3049 &key, path, -1, 1); 3050 } 3051 3052 if (ret) { 3053 btrfs_err(info, 3054 "umm, got %d back from search, was looking for %llu", 3055 ret, bytenr); 3056 if (ret > 0) 3057 btrfs_print_leaf(path->nodes[0]); 3058 } 3059 if (ret < 0) { 3060 btrfs_abort_transaction(trans, ret); 3061 goto out; 3062 } 3063 extent_slot = path->slots[0]; 3064 } 3065 } else if (WARN_ON(ret == -ENOENT)) { 3066 btrfs_print_leaf(path->nodes[0]); 3067 btrfs_err(info, 3068 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu", 3069 bytenr, parent, root_objectid, owner_objectid, 3070 owner_offset); 3071 btrfs_abort_transaction(trans, ret); 3072 goto out; 3073 } else { 3074 btrfs_abort_transaction(trans, ret); 3075 goto out; 3076 } 3077 3078 leaf = path->nodes[0]; 3079 item_size = btrfs_item_size(leaf, extent_slot); 3080 if (unlikely(item_size < sizeof(*ei))) { 3081 ret = -EINVAL; 3082 btrfs_print_v0_err(info); 3083 btrfs_abort_transaction(trans, ret); 3084 goto out; 3085 } 3086 ei = btrfs_item_ptr(leaf, extent_slot, 3087 struct btrfs_extent_item); 3088 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && 3089 key.type == BTRFS_EXTENT_ITEM_KEY) { 3090 struct btrfs_tree_block_info *bi; 3091 if (item_size < sizeof(*ei) + sizeof(*bi)) { 3092 btrfs_crit(info, 3093 "invalid extent item size for key (%llu, %u, %llu) owner %llu, has %u expect >= %zu", 3094 key.objectid, key.type, key.offset, 3095 owner_objectid, item_size, 3096 sizeof(*ei) + sizeof(*bi)); 3097 btrfs_abort_transaction(trans, -EUCLEAN); 3098 goto err_dump; 3099 } 3100 bi = (struct btrfs_tree_block_info *)(ei + 1); 3101 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 3102 } 3103 3104 refs = btrfs_extent_refs(leaf, ei); 3105 if (refs < refs_to_drop) { 3106 btrfs_crit(info, 3107 "trying to drop %d refs but we only have %llu for bytenr %llu", 3108 refs_to_drop, refs, bytenr); 3109 btrfs_abort_transaction(trans, -EUCLEAN); 3110 goto err_dump; 3111 } 3112 refs -= refs_to_drop; 3113 3114 if (refs > 0) { 3115 if (extent_op) 3116 __run_delayed_extent_op(extent_op, leaf, ei); 3117 /* 3118 * In the case of inline back ref, reference count will 3119 * be updated by remove_extent_backref 3120 */ 3121 if (iref) { 3122 if (!found_extent) { 3123 btrfs_crit(info, 3124 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found"); 3125 btrfs_abort_transaction(trans, -EUCLEAN); 3126 goto err_dump; 3127 } 3128 } else { 3129 btrfs_set_extent_refs(leaf, ei, refs); 3130 btrfs_mark_buffer_dirty(leaf); 3131 } 3132 if (found_extent) { 3133 ret = remove_extent_backref(trans, extent_root, path, 3134 iref, refs_to_drop, is_data); 3135 if (ret) { 3136 btrfs_abort_transaction(trans, ret); 3137 goto out; 3138 } 3139 } 3140 } else { 3141 /* In this branch refs == 1 */ 3142 if (found_extent) { 3143 if (is_data && refs_to_drop != 3144 extent_data_ref_count(path, iref)) { 3145 btrfs_crit(info, 3146 "invalid refs_to_drop, current refs %u refs_to_drop %u", 3147 extent_data_ref_count(path, iref), 3148 refs_to_drop); 3149 btrfs_abort_transaction(trans, -EUCLEAN); 3150 goto err_dump; 3151 } 3152 if (iref) { 3153 if (path->slots[0] != extent_slot) { 3154 btrfs_crit(info, 3155 "invalid iref, extent item key (%llu %u %llu) doesn't have wanted iref", 3156 key.objectid, key.type, 3157 key.offset); 3158 btrfs_abort_transaction(trans, -EUCLEAN); 3159 goto err_dump; 3160 } 3161 } else { 3162 /* 3163 * No inline ref, we must be at SHARED_* item, 3164 * And it's single ref, it must be: 3165 * | extent_slot ||extent_slot + 1| 3166 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] 3167 */ 3168 if (path->slots[0] != extent_slot + 1) { 3169 btrfs_crit(info, 3170 "invalid SHARED_* item, previous item is not EXTENT/METADATA_ITEM"); 3171 btrfs_abort_transaction(trans, -EUCLEAN); 3172 goto err_dump; 3173 } 3174 path->slots[0] = extent_slot; 3175 num_to_del = 2; 3176 } 3177 } 3178 3179 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 3180 num_to_del); 3181 if (ret) { 3182 btrfs_abort_transaction(trans, ret); 3183 goto out; 3184 } 3185 btrfs_release_path(path); 3186 3187 ret = do_free_extent_accounting(trans, bytenr, num_bytes, is_data); 3188 } 3189 btrfs_release_path(path); 3190 3191 out: 3192 btrfs_free_path(path); 3193 return ret; 3194 err_dump: 3195 /* 3196 * Leaf dump can take up a lot of log buffer, so we only do full leaf 3197 * dump for debug build. 3198 */ 3199 if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) { 3200 btrfs_crit(info, "path->slots[0]=%d extent_slot=%d", 3201 path->slots[0], extent_slot); 3202 btrfs_print_leaf(path->nodes[0]); 3203 } 3204 3205 btrfs_free_path(path); 3206 return -EUCLEAN; 3207 } 3208 3209 /* 3210 * when we free an block, it is possible (and likely) that we free the last 3211 * delayed ref for that extent as well. This searches the delayed ref tree for 3212 * a given extent, and if there are no other delayed refs to be processed, it 3213 * removes it from the tree. 3214 */ 3215 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 3216 u64 bytenr) 3217 { 3218 struct btrfs_delayed_ref_head *head; 3219 struct btrfs_delayed_ref_root *delayed_refs; 3220 int ret = 0; 3221 3222 delayed_refs = &trans->transaction->delayed_refs; 3223 spin_lock(&delayed_refs->lock); 3224 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); 3225 if (!head) 3226 goto out_delayed_unlock; 3227 3228 spin_lock(&head->lock); 3229 if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) 3230 goto out; 3231 3232 if (cleanup_extent_op(head) != NULL) 3233 goto out; 3234 3235 /* 3236 * waiting for the lock here would deadlock. If someone else has it 3237 * locked they are already in the process of dropping it anyway 3238 */ 3239 if (!mutex_trylock(&head->mutex)) 3240 goto out; 3241 3242 btrfs_delete_ref_head(delayed_refs, head); 3243 head->processing = 0; 3244 3245 spin_unlock(&head->lock); 3246 spin_unlock(&delayed_refs->lock); 3247 3248 BUG_ON(head->extent_op); 3249 if (head->must_insert_reserved) 3250 ret = 1; 3251 3252 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); 3253 mutex_unlock(&head->mutex); 3254 btrfs_put_delayed_ref_head(head); 3255 return ret; 3256 out: 3257 spin_unlock(&head->lock); 3258 3259 out_delayed_unlock: 3260 spin_unlock(&delayed_refs->lock); 3261 return 0; 3262 } 3263 3264 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3265 u64 root_id, 3266 struct extent_buffer *buf, 3267 u64 parent, int last_ref) 3268 { 3269 struct btrfs_fs_info *fs_info = trans->fs_info; 3270 struct btrfs_ref generic_ref = { 0 }; 3271 int ret; 3272 3273 btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, 3274 buf->start, buf->len, parent); 3275 btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 3276 root_id, 0, false); 3277 3278 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3279 btrfs_ref_tree_mod(fs_info, &generic_ref); 3280 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); 3281 BUG_ON(ret); /* -ENOMEM */ 3282 } 3283 3284 if (last_ref && btrfs_header_generation(buf) == trans->transid) { 3285 struct btrfs_block_group *cache; 3286 bool must_pin = false; 3287 3288 if (root_id != BTRFS_TREE_LOG_OBJECTID) { 3289 ret = check_ref_cleanup(trans, buf->start); 3290 if (!ret) { 3291 btrfs_redirty_list_add(trans->transaction, buf); 3292 goto out; 3293 } 3294 } 3295 3296 cache = btrfs_lookup_block_group(fs_info, buf->start); 3297 3298 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 3299 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3300 btrfs_put_block_group(cache); 3301 goto out; 3302 } 3303 3304 /* 3305 * If there are tree mod log users we may have recorded mod log 3306 * operations for this node. If we re-allocate this node we 3307 * could replay operations on this node that happened when it 3308 * existed in a completely different root. For example if it 3309 * was part of root A, then was reallocated to root B, and we 3310 * are doing a btrfs_old_search_slot(root b), we could replay 3311 * operations that happened when the block was part of root A, 3312 * giving us an inconsistent view of the btree. 3313 * 3314 * We are safe from races here because at this point no other 3315 * node or root points to this extent buffer, so if after this 3316 * check a new tree mod log user joins we will not have an 3317 * existing log of operations on this node that we have to 3318 * contend with. 3319 */ 3320 if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) 3321 must_pin = true; 3322 3323 if (must_pin || btrfs_is_zoned(fs_info)) { 3324 btrfs_redirty_list_add(trans->transaction, buf); 3325 pin_down_extent(trans, cache, buf->start, buf->len, 1); 3326 btrfs_put_block_group(cache); 3327 goto out; 3328 } 3329 3330 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 3331 3332 btrfs_add_free_space(cache, buf->start, buf->len); 3333 btrfs_free_reserved_bytes(cache, buf->len, 0); 3334 btrfs_put_block_group(cache); 3335 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); 3336 } 3337 out: 3338 if (last_ref) { 3339 /* 3340 * Deleting the buffer, clear the corrupt flag since it doesn't 3341 * matter anymore. 3342 */ 3343 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 3344 } 3345 } 3346 3347 /* Can return -ENOMEM */ 3348 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) 3349 { 3350 struct btrfs_fs_info *fs_info = trans->fs_info; 3351 int ret; 3352 3353 if (btrfs_is_testing(fs_info)) 3354 return 0; 3355 3356 /* 3357 * tree log blocks never actually go into the extent allocation 3358 * tree, just update pinning info and exit early. 3359 */ 3360 if ((ref->type == BTRFS_REF_METADATA && 3361 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3362 (ref->type == BTRFS_REF_DATA && 3363 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) { 3364 /* unlocks the pinned mutex */ 3365 btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); 3366 ret = 0; 3367 } else if (ref->type == BTRFS_REF_METADATA) { 3368 ret = btrfs_add_delayed_tree_ref(trans, ref, NULL); 3369 } else { 3370 ret = btrfs_add_delayed_data_ref(trans, ref, 0); 3371 } 3372 3373 if (!((ref->type == BTRFS_REF_METADATA && 3374 ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || 3375 (ref->type == BTRFS_REF_DATA && 3376 ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID))) 3377 btrfs_ref_tree_mod(fs_info, ref); 3378 3379 return ret; 3380 } 3381 3382 enum btrfs_loop_type { 3383 LOOP_CACHING_NOWAIT, 3384 LOOP_CACHING_WAIT, 3385 LOOP_ALLOC_CHUNK, 3386 LOOP_NO_EMPTY_SIZE, 3387 }; 3388 3389 static inline void 3390 btrfs_lock_block_group(struct btrfs_block_group *cache, 3391 int delalloc) 3392 { 3393 if (delalloc) 3394 down_read(&cache->data_rwsem); 3395 } 3396 3397 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, 3398 int delalloc) 3399 { 3400 btrfs_get_block_group(cache); 3401 if (delalloc) 3402 down_read(&cache->data_rwsem); 3403 } 3404 3405 static struct btrfs_block_group *btrfs_lock_cluster( 3406 struct btrfs_block_group *block_group, 3407 struct btrfs_free_cluster *cluster, 3408 int delalloc) 3409 __acquires(&cluster->refill_lock) 3410 { 3411 struct btrfs_block_group *used_bg = NULL; 3412 3413 spin_lock(&cluster->refill_lock); 3414 while (1) { 3415 used_bg = cluster->block_group; 3416 if (!used_bg) 3417 return NULL; 3418 3419 if (used_bg == block_group) 3420 return used_bg; 3421 3422 btrfs_get_block_group(used_bg); 3423 3424 if (!delalloc) 3425 return used_bg; 3426 3427 if (down_read_trylock(&used_bg->data_rwsem)) 3428 return used_bg; 3429 3430 spin_unlock(&cluster->refill_lock); 3431 3432 /* We should only have one-level nested. */ 3433 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); 3434 3435 spin_lock(&cluster->refill_lock); 3436 if (used_bg == cluster->block_group) 3437 return used_bg; 3438 3439 up_read(&used_bg->data_rwsem); 3440 btrfs_put_block_group(used_bg); 3441 } 3442 } 3443 3444 static inline void 3445 btrfs_release_block_group(struct btrfs_block_group *cache, 3446 int delalloc) 3447 { 3448 if (delalloc) 3449 up_read(&cache->data_rwsem); 3450 btrfs_put_block_group(cache); 3451 } 3452 3453 enum btrfs_extent_allocation_policy { 3454 BTRFS_EXTENT_ALLOC_CLUSTERED, 3455 BTRFS_EXTENT_ALLOC_ZONED, 3456 }; 3457 3458 /* 3459 * Structure used internally for find_free_extent() function. Wraps needed 3460 * parameters. 3461 */ 3462 struct find_free_extent_ctl { 3463 /* Basic allocation info */ 3464 u64 ram_bytes; 3465 u64 num_bytes; 3466 u64 min_alloc_size; 3467 u64 empty_size; 3468 u64 flags; 3469 int delalloc; 3470 3471 /* Where to start the search inside the bg */ 3472 u64 search_start; 3473 3474 /* For clustered allocation */ 3475 u64 empty_cluster; 3476 struct btrfs_free_cluster *last_ptr; 3477 bool use_cluster; 3478 3479 bool have_caching_bg; 3480 bool orig_have_caching_bg; 3481 3482 /* Allocation is called for tree-log */ 3483 bool for_treelog; 3484 3485 /* Allocation is called for data relocation */ 3486 bool for_data_reloc; 3487 3488 /* RAID index, converted from flags */ 3489 int index; 3490 3491 /* 3492 * Current loop number, check find_free_extent_update_loop() for details 3493 */ 3494 int loop; 3495 3496 /* 3497 * Whether we're refilling a cluster, if true we need to re-search 3498 * current block group but don't try to refill the cluster again. 3499 */ 3500 bool retry_clustered; 3501 3502 /* 3503 * Whether we're updating free space cache, if true we need to re-search 3504 * current block group but don't try updating free space cache again. 3505 */ 3506 bool retry_unclustered; 3507 3508 /* If current block group is cached */ 3509 int cached; 3510 3511 /* Max contiguous hole found */ 3512 u64 max_extent_size; 3513 3514 /* Total free space from free space cache, not always contiguous */ 3515 u64 total_free_space; 3516 3517 /* Found result */ 3518 u64 found_offset; 3519 3520 /* Hint where to start looking for an empty space */ 3521 u64 hint_byte; 3522 3523 /* Allocation policy */ 3524 enum btrfs_extent_allocation_policy policy; 3525 }; 3526 3527 3528 /* 3529 * Helper function for find_free_extent(). 3530 * 3531 * Return -ENOENT to inform caller that we need fallback to unclustered mode. 3532 * Return -EAGAIN to inform caller that we need to re-search this block group 3533 * Return >0 to inform caller that we find nothing 3534 * Return 0 means we have found a location and set ffe_ctl->found_offset. 3535 */ 3536 static int find_free_extent_clustered(struct btrfs_block_group *bg, 3537 struct find_free_extent_ctl *ffe_ctl, 3538 struct btrfs_block_group **cluster_bg_ret) 3539 { 3540 struct btrfs_block_group *cluster_bg; 3541 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3542 u64 aligned_cluster; 3543 u64 offset; 3544 int ret; 3545 3546 cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); 3547 if (!cluster_bg) 3548 goto refill_cluster; 3549 if (cluster_bg != bg && (cluster_bg->ro || 3550 !block_group_bits(cluster_bg, ffe_ctl->flags))) 3551 goto release_cluster; 3552 3553 offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, 3554 ffe_ctl->num_bytes, cluster_bg->start, 3555 &ffe_ctl->max_extent_size); 3556 if (offset) { 3557 /* We have a block, we're done */ 3558 spin_unlock(&last_ptr->refill_lock); 3559 trace_btrfs_reserve_extent_cluster(cluster_bg, 3560 ffe_ctl->search_start, ffe_ctl->num_bytes); 3561 *cluster_bg_ret = cluster_bg; 3562 ffe_ctl->found_offset = offset; 3563 return 0; 3564 } 3565 WARN_ON(last_ptr->block_group != cluster_bg); 3566 3567 release_cluster: 3568 /* 3569 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so 3570 * lets just skip it and let the allocator find whatever block it can 3571 * find. If we reach this point, we will have tried the cluster 3572 * allocator plenty of times and not have found anything, so we are 3573 * likely way too fragmented for the clustering stuff to find anything. 3574 * 3575 * However, if the cluster is taken from the current block group, 3576 * release the cluster first, so that we stand a better chance of 3577 * succeeding in the unclustered allocation. 3578 */ 3579 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { 3580 spin_unlock(&last_ptr->refill_lock); 3581 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3582 return -ENOENT; 3583 } 3584 3585 /* This cluster didn't work out, free it and start over */ 3586 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3587 3588 if (cluster_bg != bg) 3589 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); 3590 3591 refill_cluster: 3592 if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { 3593 spin_unlock(&last_ptr->refill_lock); 3594 return -ENOENT; 3595 } 3596 3597 aligned_cluster = max_t(u64, 3598 ffe_ctl->empty_cluster + ffe_ctl->empty_size, 3599 bg->full_stripe_len); 3600 ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, 3601 ffe_ctl->num_bytes, aligned_cluster); 3602 if (ret == 0) { 3603 /* Now pull our allocation out of this cluster */ 3604 offset = btrfs_alloc_from_cluster(bg, last_ptr, 3605 ffe_ctl->num_bytes, ffe_ctl->search_start, 3606 &ffe_ctl->max_extent_size); 3607 if (offset) { 3608 /* We found one, proceed */ 3609 spin_unlock(&last_ptr->refill_lock); 3610 trace_btrfs_reserve_extent_cluster(bg, 3611 ffe_ctl->search_start, 3612 ffe_ctl->num_bytes); 3613 ffe_ctl->found_offset = offset; 3614 return 0; 3615 } 3616 } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && 3617 !ffe_ctl->retry_clustered) { 3618 spin_unlock(&last_ptr->refill_lock); 3619 3620 ffe_ctl->retry_clustered = true; 3621 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + 3622 ffe_ctl->empty_cluster + ffe_ctl->empty_size); 3623 return -EAGAIN; 3624 } 3625 /* 3626 * At this point we either didn't find a cluster or we weren't able to 3627 * allocate a block from our cluster. Free the cluster we've been 3628 * trying to use, and go to the next block group. 3629 */ 3630 btrfs_return_cluster_to_free_space(NULL, last_ptr); 3631 spin_unlock(&last_ptr->refill_lock); 3632 return 1; 3633 } 3634 3635 /* 3636 * Return >0 to inform caller that we find nothing 3637 * Return 0 when we found an free extent and set ffe_ctrl->found_offset 3638 * Return -EAGAIN to inform caller that we need to re-search this block group 3639 */ 3640 static int find_free_extent_unclustered(struct btrfs_block_group *bg, 3641 struct find_free_extent_ctl *ffe_ctl) 3642 { 3643 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3644 u64 offset; 3645 3646 /* 3647 * We are doing an unclustered allocation, set the fragmented flag so 3648 * we don't bother trying to setup a cluster again until we get more 3649 * space. 3650 */ 3651 if (unlikely(last_ptr)) { 3652 spin_lock(&last_ptr->lock); 3653 last_ptr->fragmented = 1; 3654 spin_unlock(&last_ptr->lock); 3655 } 3656 if (ffe_ctl->cached) { 3657 struct btrfs_free_space_ctl *free_space_ctl; 3658 3659 free_space_ctl = bg->free_space_ctl; 3660 spin_lock(&free_space_ctl->tree_lock); 3661 if (free_space_ctl->free_space < 3662 ffe_ctl->num_bytes + ffe_ctl->empty_cluster + 3663 ffe_ctl->empty_size) { 3664 ffe_ctl->total_free_space = max_t(u64, 3665 ffe_ctl->total_free_space, 3666 free_space_ctl->free_space); 3667 spin_unlock(&free_space_ctl->tree_lock); 3668 return 1; 3669 } 3670 spin_unlock(&free_space_ctl->tree_lock); 3671 } 3672 3673 offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, 3674 ffe_ctl->num_bytes, ffe_ctl->empty_size, 3675 &ffe_ctl->max_extent_size); 3676 3677 /* 3678 * If we didn't find a chunk, and we haven't failed on this block group 3679 * before, and this block group is in the middle of caching and we are 3680 * ok with waiting, then go ahead and wait for progress to be made, and 3681 * set @retry_unclustered to true. 3682 * 3683 * If @retry_unclustered is true then we've already waited on this 3684 * block group once and should move on to the next block group. 3685 */ 3686 if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached && 3687 ffe_ctl->loop > LOOP_CACHING_NOWAIT) { 3688 btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + 3689 ffe_ctl->empty_size); 3690 ffe_ctl->retry_unclustered = true; 3691 return -EAGAIN; 3692 } else if (!offset) { 3693 return 1; 3694 } 3695 ffe_ctl->found_offset = offset; 3696 return 0; 3697 } 3698 3699 static int do_allocation_clustered(struct btrfs_block_group *block_group, 3700 struct find_free_extent_ctl *ffe_ctl, 3701 struct btrfs_block_group **bg_ret) 3702 { 3703 int ret; 3704 3705 /* We want to try and use the cluster allocator, so lets look there */ 3706 if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { 3707 ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret); 3708 if (ret >= 0 || ret == -EAGAIN) 3709 return ret; 3710 /* ret == -ENOENT case falls through */ 3711 } 3712 3713 return find_free_extent_unclustered(block_group, ffe_ctl); 3714 } 3715 3716 /* 3717 * Tree-log block group locking 3718 * ============================ 3719 * 3720 * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which 3721 * indicates the starting address of a block group, which is reserved only 3722 * for tree-log metadata. 3723 * 3724 * Lock nesting 3725 * ============ 3726 * 3727 * space_info::lock 3728 * block_group::lock 3729 * fs_info::treelog_bg_lock 3730 */ 3731 3732 /* 3733 * Simple allocator for sequential-only block group. It only allows sequential 3734 * allocation. No need to play with trees. This function also reserves the 3735 * bytes as in btrfs_add_reserved_bytes. 3736 */ 3737 static int do_allocation_zoned(struct btrfs_block_group *block_group, 3738 struct find_free_extent_ctl *ffe_ctl, 3739 struct btrfs_block_group **bg_ret) 3740 { 3741 struct btrfs_fs_info *fs_info = block_group->fs_info; 3742 struct btrfs_space_info *space_info = block_group->space_info; 3743 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3744 u64 start = block_group->start; 3745 u64 num_bytes = ffe_ctl->num_bytes; 3746 u64 avail; 3747 u64 bytenr = block_group->start; 3748 u64 log_bytenr; 3749 u64 data_reloc_bytenr; 3750 int ret = 0; 3751 bool skip = false; 3752 3753 ASSERT(btrfs_is_zoned(block_group->fs_info)); 3754 3755 /* 3756 * Do not allow non-tree-log blocks in the dedicated tree-log block 3757 * group, and vice versa. 3758 */ 3759 spin_lock(&fs_info->treelog_bg_lock); 3760 log_bytenr = fs_info->treelog_bg; 3761 if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || 3762 (!ffe_ctl->for_treelog && bytenr == log_bytenr))) 3763 skip = true; 3764 spin_unlock(&fs_info->treelog_bg_lock); 3765 if (skip) 3766 return 1; 3767 3768 /* 3769 * Do not allow non-relocation blocks in the dedicated relocation block 3770 * group, and vice versa. 3771 */ 3772 spin_lock(&fs_info->relocation_bg_lock); 3773 data_reloc_bytenr = fs_info->data_reloc_bg; 3774 if (data_reloc_bytenr && 3775 ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || 3776 (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) 3777 skip = true; 3778 spin_unlock(&fs_info->relocation_bg_lock); 3779 if (skip) 3780 return 1; 3781 3782 /* Check RO and no space case before trying to activate it */ 3783 spin_lock(&block_group->lock); 3784 if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) { 3785 ret = 1; 3786 /* 3787 * May need to clear fs_info->{treelog,data_reloc}_bg. 3788 * Return the error after taking the locks. 3789 */ 3790 } 3791 spin_unlock(&block_group->lock); 3792 3793 if (!ret && !btrfs_zone_activate(block_group)) { 3794 ret = 1; 3795 /* 3796 * May need to clear fs_info->{treelog,data_reloc}_bg. 3797 * Return the error after taking the locks. 3798 */ 3799 } 3800 3801 spin_lock(&space_info->lock); 3802 spin_lock(&block_group->lock); 3803 spin_lock(&fs_info->treelog_bg_lock); 3804 spin_lock(&fs_info->relocation_bg_lock); 3805 3806 if (ret) 3807 goto out; 3808 3809 ASSERT(!ffe_ctl->for_treelog || 3810 block_group->start == fs_info->treelog_bg || 3811 fs_info->treelog_bg == 0); 3812 ASSERT(!ffe_ctl->for_data_reloc || 3813 block_group->start == fs_info->data_reloc_bg || 3814 fs_info->data_reloc_bg == 0); 3815 3816 if (block_group->ro || 3817 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { 3818 ret = 1; 3819 goto out; 3820 } 3821 3822 /* 3823 * Do not allow currently using block group to be tree-log dedicated 3824 * block group. 3825 */ 3826 if (ffe_ctl->for_treelog && !fs_info->treelog_bg && 3827 (block_group->used || block_group->reserved)) { 3828 ret = 1; 3829 goto out; 3830 } 3831 3832 /* 3833 * Do not allow currently used block group to be the data relocation 3834 * dedicated block group. 3835 */ 3836 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && 3837 (block_group->used || block_group->reserved)) { 3838 ret = 1; 3839 goto out; 3840 } 3841 3842 WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); 3843 avail = block_group->zone_capacity - block_group->alloc_offset; 3844 if (avail < num_bytes) { 3845 if (ffe_ctl->max_extent_size < avail) { 3846 /* 3847 * With sequential allocator, free space is always 3848 * contiguous 3849 */ 3850 ffe_ctl->max_extent_size = avail; 3851 ffe_ctl->total_free_space = avail; 3852 } 3853 ret = 1; 3854 goto out; 3855 } 3856 3857 if (ffe_ctl->for_treelog && !fs_info->treelog_bg) 3858 fs_info->treelog_bg = block_group->start; 3859 3860 if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg) 3861 fs_info->data_reloc_bg = block_group->start; 3862 3863 ffe_ctl->found_offset = start + block_group->alloc_offset; 3864 block_group->alloc_offset += num_bytes; 3865 spin_lock(&ctl->tree_lock); 3866 ctl->free_space -= num_bytes; 3867 spin_unlock(&ctl->tree_lock); 3868 3869 /* 3870 * We do not check if found_offset is aligned to stripesize. The 3871 * address is anyway rewritten when using zone append writing. 3872 */ 3873 3874 ffe_ctl->search_start = ffe_ctl->found_offset; 3875 3876 out: 3877 if (ret && ffe_ctl->for_treelog) 3878 fs_info->treelog_bg = 0; 3879 if (ret && ffe_ctl->for_data_reloc && 3880 fs_info->data_reloc_bg == block_group->start) { 3881 /* 3882 * Do not allow further allocations from this block group. 3883 * Compared to increasing the ->ro, setting the 3884 * ->zoned_data_reloc_ongoing flag still allows nocow 3885 * writers to come in. See btrfs_inc_nocow_writers(). 3886 * 3887 * We need to disable an allocation to avoid an allocation of 3888 * regular (non-relocation data) extent. With mix of relocation 3889 * extents and regular extents, we can dispatch WRITE commands 3890 * (for relocation extents) and ZONE APPEND commands (for 3891 * regular extents) at the same time to the same zone, which 3892 * easily break the write pointer. 3893 */ 3894 set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags); 3895 fs_info->data_reloc_bg = 0; 3896 } 3897 spin_unlock(&fs_info->relocation_bg_lock); 3898 spin_unlock(&fs_info->treelog_bg_lock); 3899 spin_unlock(&block_group->lock); 3900 spin_unlock(&space_info->lock); 3901 return ret; 3902 } 3903 3904 static int do_allocation(struct btrfs_block_group *block_group, 3905 struct find_free_extent_ctl *ffe_ctl, 3906 struct btrfs_block_group **bg_ret) 3907 { 3908 switch (ffe_ctl->policy) { 3909 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3910 return do_allocation_clustered(block_group, ffe_ctl, bg_ret); 3911 case BTRFS_EXTENT_ALLOC_ZONED: 3912 return do_allocation_zoned(block_group, ffe_ctl, bg_ret); 3913 default: 3914 BUG(); 3915 } 3916 } 3917 3918 static void release_block_group(struct btrfs_block_group *block_group, 3919 struct find_free_extent_ctl *ffe_ctl, 3920 int delalloc) 3921 { 3922 switch (ffe_ctl->policy) { 3923 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3924 ffe_ctl->retry_clustered = false; 3925 ffe_ctl->retry_unclustered = false; 3926 break; 3927 case BTRFS_EXTENT_ALLOC_ZONED: 3928 /* Nothing to do */ 3929 break; 3930 default: 3931 BUG(); 3932 } 3933 3934 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != 3935 ffe_ctl->index); 3936 btrfs_release_block_group(block_group, delalloc); 3937 } 3938 3939 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, 3940 struct btrfs_key *ins) 3941 { 3942 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 3943 3944 if (!ffe_ctl->use_cluster && last_ptr) { 3945 spin_lock(&last_ptr->lock); 3946 last_ptr->window_start = ins->objectid; 3947 spin_unlock(&last_ptr->lock); 3948 } 3949 } 3950 3951 static void found_extent(struct find_free_extent_ctl *ffe_ctl, 3952 struct btrfs_key *ins) 3953 { 3954 switch (ffe_ctl->policy) { 3955 case BTRFS_EXTENT_ALLOC_CLUSTERED: 3956 found_extent_clustered(ffe_ctl, ins); 3957 break; 3958 case BTRFS_EXTENT_ALLOC_ZONED: 3959 /* Nothing to do */ 3960 break; 3961 default: 3962 BUG(); 3963 } 3964 } 3965 3966 static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info, 3967 struct find_free_extent_ctl *ffe_ctl) 3968 { 3969 /* If we can activate new zone, just allocate a chunk and use it */ 3970 if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) 3971 return 0; 3972 3973 /* 3974 * We already reached the max active zones. Try to finish one block 3975 * group to make a room for a new block group. This is only possible 3976 * for a data block group because btrfs_zone_finish() may need to wait 3977 * for a running transaction which can cause a deadlock for metadata 3978 * allocation. 3979 */ 3980 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { 3981 int ret = btrfs_zone_finish_one_bg(fs_info); 3982 3983 if (ret == 1) 3984 return 0; 3985 else if (ret < 0) 3986 return ret; 3987 } 3988 3989 /* 3990 * If we have enough free space left in an already active block group 3991 * and we can't activate any other zone now, do not allow allocating a 3992 * new chunk and let find_free_extent() retry with a smaller size. 3993 */ 3994 if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) 3995 return -ENOSPC; 3996 3997 /* 3998 * Even min_alloc_size is not left in any block groups. Since we cannot 3999 * activate a new block group, allocating it may not help. Let's tell a 4000 * caller to try again and hope it progress something by writing some 4001 * parts of the region. That is only possible for data block groups, 4002 * where a part of the region can be written. 4003 */ 4004 if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) 4005 return -EAGAIN; 4006 4007 /* 4008 * We cannot activate a new block group and no enough space left in any 4009 * block groups. So, allocating a new block group may not help. But, 4010 * there is nothing to do anyway, so let's go with it. 4011 */ 4012 return 0; 4013 } 4014 4015 static int can_allocate_chunk(struct btrfs_fs_info *fs_info, 4016 struct find_free_extent_ctl *ffe_ctl) 4017 { 4018 switch (ffe_ctl->policy) { 4019 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4020 return 0; 4021 case BTRFS_EXTENT_ALLOC_ZONED: 4022 return can_allocate_chunk_zoned(fs_info, ffe_ctl); 4023 default: 4024 BUG(); 4025 } 4026 } 4027 4028 static int chunk_allocation_failed(struct find_free_extent_ctl *ffe_ctl) 4029 { 4030 switch (ffe_ctl->policy) { 4031 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4032 /* 4033 * If we can't allocate a new chunk we've already looped through 4034 * at least once, move on to the NO_EMPTY_SIZE case. 4035 */ 4036 ffe_ctl->loop = LOOP_NO_EMPTY_SIZE; 4037 return 0; 4038 case BTRFS_EXTENT_ALLOC_ZONED: 4039 /* Give up here */ 4040 return -ENOSPC; 4041 default: 4042 BUG(); 4043 } 4044 } 4045 4046 /* 4047 * Return >0 means caller needs to re-search for free extent 4048 * Return 0 means we have the needed free extent. 4049 * Return <0 means we failed to locate any free extent. 4050 */ 4051 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, 4052 struct btrfs_key *ins, 4053 struct find_free_extent_ctl *ffe_ctl, 4054 bool full_search) 4055 { 4056 struct btrfs_root *root = fs_info->chunk_root; 4057 int ret; 4058 4059 if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && 4060 ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) 4061 ffe_ctl->orig_have_caching_bg = true; 4062 4063 if (ins->objectid) { 4064 found_extent(ffe_ctl, ins); 4065 return 0; 4066 } 4067 4068 if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) 4069 return 1; 4070 4071 ffe_ctl->index++; 4072 if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) 4073 return 1; 4074 4075 /* 4076 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 4077 * caching kthreads as we move along 4078 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 4079 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 4080 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 4081 * again 4082 */ 4083 if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { 4084 ffe_ctl->index = 0; 4085 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) { 4086 /* 4087 * We want to skip the LOOP_CACHING_WAIT step if we 4088 * don't have any uncached bgs and we've already done a 4089 * full search through. 4090 */ 4091 if (ffe_ctl->orig_have_caching_bg || !full_search) 4092 ffe_ctl->loop = LOOP_CACHING_WAIT; 4093 else 4094 ffe_ctl->loop = LOOP_ALLOC_CHUNK; 4095 } else { 4096 ffe_ctl->loop++; 4097 } 4098 4099 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { 4100 struct btrfs_trans_handle *trans; 4101 int exist = 0; 4102 4103 /*Check if allocation policy allows to create a new chunk */ 4104 ret = can_allocate_chunk(fs_info, ffe_ctl); 4105 if (ret) 4106 return ret; 4107 4108 trans = current->journal_info; 4109 if (trans) 4110 exist = 1; 4111 else 4112 trans = btrfs_join_transaction(root); 4113 4114 if (IS_ERR(trans)) { 4115 ret = PTR_ERR(trans); 4116 return ret; 4117 } 4118 4119 ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, 4120 CHUNK_ALLOC_FORCE_FOR_EXTENT); 4121 4122 /* Do not bail out on ENOSPC since we can do more. */ 4123 if (ret == -ENOSPC) 4124 ret = chunk_allocation_failed(ffe_ctl); 4125 else if (ret < 0) 4126 btrfs_abort_transaction(trans, ret); 4127 else 4128 ret = 0; 4129 if (!exist) 4130 btrfs_end_transaction(trans); 4131 if (ret) 4132 return ret; 4133 } 4134 4135 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { 4136 if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) 4137 return -ENOSPC; 4138 4139 /* 4140 * Don't loop again if we already have no empty_size and 4141 * no empty_cluster. 4142 */ 4143 if (ffe_ctl->empty_size == 0 && 4144 ffe_ctl->empty_cluster == 0) 4145 return -ENOSPC; 4146 ffe_ctl->empty_size = 0; 4147 ffe_ctl->empty_cluster = 0; 4148 } 4149 return 1; 4150 } 4151 return -ENOSPC; 4152 } 4153 4154 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, 4155 struct find_free_extent_ctl *ffe_ctl, 4156 struct btrfs_space_info *space_info, 4157 struct btrfs_key *ins) 4158 { 4159 /* 4160 * If our free space is heavily fragmented we may not be able to make 4161 * big contiguous allocations, so instead of doing the expensive search 4162 * for free space, simply return ENOSPC with our max_extent_size so we 4163 * can go ahead and search for a more manageable chunk. 4164 * 4165 * If our max_extent_size is large enough for our allocation simply 4166 * disable clustering since we will likely not be able to find enough 4167 * space to create a cluster and induce latency trying. 4168 */ 4169 if (space_info->max_extent_size) { 4170 spin_lock(&space_info->lock); 4171 if (space_info->max_extent_size && 4172 ffe_ctl->num_bytes > space_info->max_extent_size) { 4173 ins->offset = space_info->max_extent_size; 4174 spin_unlock(&space_info->lock); 4175 return -ENOSPC; 4176 } else if (space_info->max_extent_size) { 4177 ffe_ctl->use_cluster = false; 4178 } 4179 spin_unlock(&space_info->lock); 4180 } 4181 4182 ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, 4183 &ffe_ctl->empty_cluster); 4184 if (ffe_ctl->last_ptr) { 4185 struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; 4186 4187 spin_lock(&last_ptr->lock); 4188 if (last_ptr->block_group) 4189 ffe_ctl->hint_byte = last_ptr->window_start; 4190 if (last_ptr->fragmented) { 4191 /* 4192 * We still set window_start so we can keep track of the 4193 * last place we found an allocation to try and save 4194 * some time. 4195 */ 4196 ffe_ctl->hint_byte = last_ptr->window_start; 4197 ffe_ctl->use_cluster = false; 4198 } 4199 spin_unlock(&last_ptr->lock); 4200 } 4201 4202 return 0; 4203 } 4204 4205 static int prepare_allocation(struct btrfs_fs_info *fs_info, 4206 struct find_free_extent_ctl *ffe_ctl, 4207 struct btrfs_space_info *space_info, 4208 struct btrfs_key *ins) 4209 { 4210 switch (ffe_ctl->policy) { 4211 case BTRFS_EXTENT_ALLOC_CLUSTERED: 4212 return prepare_allocation_clustered(fs_info, ffe_ctl, 4213 space_info, ins); 4214 case BTRFS_EXTENT_ALLOC_ZONED: 4215 if (ffe_ctl->for_treelog) { 4216 spin_lock(&fs_info->treelog_bg_lock); 4217 if (fs_info->treelog_bg) 4218 ffe_ctl->hint_byte = fs_info->treelog_bg; 4219 spin_unlock(&fs_info->treelog_bg_lock); 4220 } 4221 if (ffe_ctl->for_data_reloc) { 4222 spin_lock(&fs_info->relocation_bg_lock); 4223 if (fs_info->data_reloc_bg) 4224 ffe_ctl->hint_byte = fs_info->data_reloc_bg; 4225 spin_unlock(&fs_info->relocation_bg_lock); 4226 } 4227 return 0; 4228 default: 4229 BUG(); 4230 } 4231 } 4232 4233 /* 4234 * walks the btree of allocated extents and find a hole of a given size. 4235 * The key ins is changed to record the hole: 4236 * ins->objectid == start position 4237 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4238 * ins->offset == the size of the hole. 4239 * Any available blocks before search_start are skipped. 4240 * 4241 * If there is no suitable free space, we will record the max size of 4242 * the free space extent currently. 4243 * 4244 * The overall logic and call chain: 4245 * 4246 * find_free_extent() 4247 * |- Iterate through all block groups 4248 * | |- Get a valid block group 4249 * | |- Try to do clustered allocation in that block group 4250 * | |- Try to do unclustered allocation in that block group 4251 * | |- Check if the result is valid 4252 * | | |- If valid, then exit 4253 * | |- Jump to next block group 4254 * | 4255 * |- Push harder to find free extents 4256 * |- If not found, re-iterate all block groups 4257 */ 4258 static noinline int find_free_extent(struct btrfs_root *root, 4259 struct btrfs_key *ins, 4260 struct find_free_extent_ctl *ffe_ctl) 4261 { 4262 struct btrfs_fs_info *fs_info = root->fs_info; 4263 int ret = 0; 4264 int cache_block_group_error = 0; 4265 struct btrfs_block_group *block_group = NULL; 4266 struct btrfs_space_info *space_info; 4267 bool full_search = false; 4268 4269 WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); 4270 4271 ffe_ctl->search_start = 0; 4272 /* For clustered allocation */ 4273 ffe_ctl->empty_cluster = 0; 4274 ffe_ctl->last_ptr = NULL; 4275 ffe_ctl->use_cluster = true; 4276 ffe_ctl->have_caching_bg = false; 4277 ffe_ctl->orig_have_caching_bg = false; 4278 ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags); 4279 ffe_ctl->loop = 0; 4280 /* For clustered allocation */ 4281 ffe_ctl->retry_clustered = false; 4282 ffe_ctl->retry_unclustered = false; 4283 ffe_ctl->cached = 0; 4284 ffe_ctl->max_extent_size = 0; 4285 ffe_ctl->total_free_space = 0; 4286 ffe_ctl->found_offset = 0; 4287 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; 4288 4289 if (btrfs_is_zoned(fs_info)) 4290 ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; 4291 4292 ins->type = BTRFS_EXTENT_ITEM_KEY; 4293 ins->objectid = 0; 4294 ins->offset = 0; 4295 4296 trace_find_free_extent(root, ffe_ctl->num_bytes, ffe_ctl->empty_size, 4297 ffe_ctl->flags); 4298 4299 space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); 4300 if (!space_info) { 4301 btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); 4302 return -ENOSPC; 4303 } 4304 4305 ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); 4306 if (ret < 0) 4307 return ret; 4308 4309 ffe_ctl->search_start = max(ffe_ctl->search_start, 4310 first_logical_byte(fs_info)); 4311 ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); 4312 if (ffe_ctl->search_start == ffe_ctl->hint_byte) { 4313 block_group = btrfs_lookup_block_group(fs_info, 4314 ffe_ctl->search_start); 4315 /* 4316 * we don't want to use the block group if it doesn't match our 4317 * allocation bits, or if its not cached. 4318 * 4319 * However if we are re-searching with an ideal block group 4320 * picked out then we don't care that the block group is cached. 4321 */ 4322 if (block_group && block_group_bits(block_group, ffe_ctl->flags) && 4323 block_group->cached != BTRFS_CACHE_NO) { 4324 down_read(&space_info->groups_sem); 4325 if (list_empty(&block_group->list) || 4326 block_group->ro) { 4327 /* 4328 * someone is removing this block group, 4329 * we can't jump into the have_block_group 4330 * target because our list pointers are not 4331 * valid 4332 */ 4333 btrfs_put_block_group(block_group); 4334 up_read(&space_info->groups_sem); 4335 } else { 4336 ffe_ctl->index = btrfs_bg_flags_to_raid_index( 4337 block_group->flags); 4338 btrfs_lock_block_group(block_group, 4339 ffe_ctl->delalloc); 4340 goto have_block_group; 4341 } 4342 } else if (block_group) { 4343 btrfs_put_block_group(block_group); 4344 } 4345 } 4346 search: 4347 ffe_ctl->have_caching_bg = false; 4348 if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || 4349 ffe_ctl->index == 0) 4350 full_search = true; 4351 down_read(&space_info->groups_sem); 4352 list_for_each_entry(block_group, 4353 &space_info->block_groups[ffe_ctl->index], list) { 4354 struct btrfs_block_group *bg_ret; 4355 4356 /* If the block group is read-only, we can skip it entirely. */ 4357 if (unlikely(block_group->ro)) { 4358 if (ffe_ctl->for_treelog) 4359 btrfs_clear_treelog_bg(block_group); 4360 if (ffe_ctl->for_data_reloc) 4361 btrfs_clear_data_reloc_bg(block_group); 4362 continue; 4363 } 4364 4365 btrfs_grab_block_group(block_group, ffe_ctl->delalloc); 4366 ffe_ctl->search_start = block_group->start; 4367 4368 /* 4369 * this can happen if we end up cycling through all the 4370 * raid types, but we want to make sure we only allocate 4371 * for the proper type. 4372 */ 4373 if (!block_group_bits(block_group, ffe_ctl->flags)) { 4374 u64 extra = BTRFS_BLOCK_GROUP_DUP | 4375 BTRFS_BLOCK_GROUP_RAID1_MASK | 4376 BTRFS_BLOCK_GROUP_RAID56_MASK | 4377 BTRFS_BLOCK_GROUP_RAID10; 4378 4379 /* 4380 * if they asked for extra copies and this block group 4381 * doesn't provide them, bail. This does allow us to 4382 * fill raid0 from raid1. 4383 */ 4384 if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) 4385 goto loop; 4386 4387 /* 4388 * This block group has different flags than we want. 4389 * It's possible that we have MIXED_GROUP flag but no 4390 * block group is mixed. Just skip such block group. 4391 */ 4392 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4393 continue; 4394 } 4395 4396 have_block_group: 4397 ffe_ctl->cached = btrfs_block_group_done(block_group); 4398 if (unlikely(!ffe_ctl->cached)) { 4399 ffe_ctl->have_caching_bg = true; 4400 ret = btrfs_cache_block_group(block_group, false); 4401 4402 /* 4403 * If we get ENOMEM here or something else we want to 4404 * try other block groups, because it may not be fatal. 4405 * However if we can't find anything else we need to 4406 * save our return here so that we return the actual 4407 * error that caused problems, not ENOSPC. 4408 */ 4409 if (ret < 0) { 4410 if (!cache_block_group_error) 4411 cache_block_group_error = ret; 4412 ret = 0; 4413 goto loop; 4414 } 4415 ret = 0; 4416 } 4417 4418 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) 4419 goto loop; 4420 4421 bg_ret = NULL; 4422 ret = do_allocation(block_group, ffe_ctl, &bg_ret); 4423 if (ret == 0) { 4424 if (bg_ret && bg_ret != block_group) { 4425 btrfs_release_block_group(block_group, 4426 ffe_ctl->delalloc); 4427 block_group = bg_ret; 4428 } 4429 } else if (ret == -EAGAIN) { 4430 goto have_block_group; 4431 } else if (ret > 0) { 4432 goto loop; 4433 } 4434 4435 /* Checks */ 4436 ffe_ctl->search_start = round_up(ffe_ctl->found_offset, 4437 fs_info->stripesize); 4438 4439 /* move on to the next group */ 4440 if (ffe_ctl->search_start + ffe_ctl->num_bytes > 4441 block_group->start + block_group->length) { 4442 btrfs_add_free_space_unused(block_group, 4443 ffe_ctl->found_offset, 4444 ffe_ctl->num_bytes); 4445 goto loop; 4446 } 4447 4448 if (ffe_ctl->found_offset < ffe_ctl->search_start) 4449 btrfs_add_free_space_unused(block_group, 4450 ffe_ctl->found_offset, 4451 ffe_ctl->search_start - ffe_ctl->found_offset); 4452 4453 ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes, 4454 ffe_ctl->num_bytes, 4455 ffe_ctl->delalloc); 4456 if (ret == -EAGAIN) { 4457 btrfs_add_free_space_unused(block_group, 4458 ffe_ctl->found_offset, 4459 ffe_ctl->num_bytes); 4460 goto loop; 4461 } 4462 btrfs_inc_block_group_reservations(block_group); 4463 4464 /* we are all good, lets return */ 4465 ins->objectid = ffe_ctl->search_start; 4466 ins->offset = ffe_ctl->num_bytes; 4467 4468 trace_btrfs_reserve_extent(block_group, ffe_ctl->search_start, 4469 ffe_ctl->num_bytes); 4470 btrfs_release_block_group(block_group, ffe_ctl->delalloc); 4471 break; 4472 loop: 4473 release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc); 4474 cond_resched(); 4475 } 4476 up_read(&space_info->groups_sem); 4477 4478 ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); 4479 if (ret > 0) 4480 goto search; 4481 4482 if (ret == -ENOSPC && !cache_block_group_error) { 4483 /* 4484 * Use ffe_ctl->total_free_space as fallback if we can't find 4485 * any contiguous hole. 4486 */ 4487 if (!ffe_ctl->max_extent_size) 4488 ffe_ctl->max_extent_size = ffe_ctl->total_free_space; 4489 spin_lock(&space_info->lock); 4490 space_info->max_extent_size = ffe_ctl->max_extent_size; 4491 spin_unlock(&space_info->lock); 4492 ins->offset = ffe_ctl->max_extent_size; 4493 } else if (ret == -ENOSPC) { 4494 ret = cache_block_group_error; 4495 } 4496 return ret; 4497 } 4498 4499 /* 4500 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a 4501 * hole that is at least as big as @num_bytes. 4502 * 4503 * @root - The root that will contain this extent 4504 * 4505 * @ram_bytes - The amount of space in ram that @num_bytes take. This 4506 * is used for accounting purposes. This value differs 4507 * from @num_bytes only in the case of compressed extents. 4508 * 4509 * @num_bytes - Number of bytes to allocate on-disk. 4510 * 4511 * @min_alloc_size - Indicates the minimum amount of space that the 4512 * allocator should try to satisfy. In some cases 4513 * @num_bytes may be larger than what is required and if 4514 * the filesystem is fragmented then allocation fails. 4515 * However, the presence of @min_alloc_size gives a 4516 * chance to try and satisfy the smaller allocation. 4517 * 4518 * @empty_size - A hint that you plan on doing more COW. This is the 4519 * size in bytes the allocator should try to find free 4520 * next to the block it returns. This is just a hint and 4521 * may be ignored by the allocator. 4522 * 4523 * @hint_byte - Hint to the allocator to start searching above the byte 4524 * address passed. It might be ignored. 4525 * 4526 * @ins - This key is modified to record the found hole. It will 4527 * have the following values: 4528 * ins->objectid == start position 4529 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4530 * ins->offset == the size of the hole. 4531 * 4532 * @is_data - Boolean flag indicating whether an extent is 4533 * allocated for data (true) or metadata (false) 4534 * 4535 * @delalloc - Boolean flag indicating whether this allocation is for 4536 * delalloc or not. If 'true' data_rwsem of block groups 4537 * is going to be acquired. 4538 * 4539 * 4540 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In 4541 * case -ENOSPC is returned then @ins->offset will contain the size of the 4542 * largest available hole the allocator managed to find. 4543 */ 4544 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, 4545 u64 num_bytes, u64 min_alloc_size, 4546 u64 empty_size, u64 hint_byte, 4547 struct btrfs_key *ins, int is_data, int delalloc) 4548 { 4549 struct btrfs_fs_info *fs_info = root->fs_info; 4550 struct find_free_extent_ctl ffe_ctl = {}; 4551 bool final_tried = num_bytes == min_alloc_size; 4552 u64 flags; 4553 int ret; 4554 bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4555 bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); 4556 4557 flags = get_alloc_profile_by_root(root, is_data); 4558 again: 4559 WARN_ON(num_bytes < fs_info->sectorsize); 4560 4561 ffe_ctl.ram_bytes = ram_bytes; 4562 ffe_ctl.num_bytes = num_bytes; 4563 ffe_ctl.min_alloc_size = min_alloc_size; 4564 ffe_ctl.empty_size = empty_size; 4565 ffe_ctl.flags = flags; 4566 ffe_ctl.delalloc = delalloc; 4567 ffe_ctl.hint_byte = hint_byte; 4568 ffe_ctl.for_treelog = for_treelog; 4569 ffe_ctl.for_data_reloc = for_data_reloc; 4570 4571 ret = find_free_extent(root, ins, &ffe_ctl); 4572 if (!ret && !is_data) { 4573 btrfs_dec_block_group_reservations(fs_info, ins->objectid); 4574 } else if (ret == -ENOSPC) { 4575 if (!final_tried && ins->offset) { 4576 num_bytes = min(num_bytes >> 1, ins->offset); 4577 num_bytes = round_down(num_bytes, 4578 fs_info->sectorsize); 4579 num_bytes = max(num_bytes, min_alloc_size); 4580 ram_bytes = num_bytes; 4581 if (num_bytes == min_alloc_size) 4582 final_tried = true; 4583 goto again; 4584 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { 4585 struct btrfs_space_info *sinfo; 4586 4587 sinfo = btrfs_find_space_info(fs_info, flags); 4588 btrfs_err(fs_info, 4589 "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d", 4590 flags, num_bytes, for_treelog, for_data_reloc); 4591 if (sinfo) 4592 btrfs_dump_space_info(fs_info, sinfo, 4593 num_bytes, 1); 4594 } 4595 } 4596 4597 return ret; 4598 } 4599 4600 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, 4601 u64 start, u64 len, int delalloc) 4602 { 4603 struct btrfs_block_group *cache; 4604 4605 cache = btrfs_lookup_block_group(fs_info, start); 4606 if (!cache) { 4607 btrfs_err(fs_info, "Unable to find block group for %llu", 4608 start); 4609 return -ENOSPC; 4610 } 4611 4612 btrfs_add_free_space(cache, start, len); 4613 btrfs_free_reserved_bytes(cache, len, delalloc); 4614 trace_btrfs_reserved_extent_free(fs_info, start, len); 4615 4616 btrfs_put_block_group(cache); 4617 return 0; 4618 } 4619 4620 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, 4621 u64 len) 4622 { 4623 struct btrfs_block_group *cache; 4624 int ret = 0; 4625 4626 cache = btrfs_lookup_block_group(trans->fs_info, start); 4627 if (!cache) { 4628 btrfs_err(trans->fs_info, "unable to find block group for %llu", 4629 start); 4630 return -ENOSPC; 4631 } 4632 4633 ret = pin_down_extent(trans, cache, start, len, 1); 4634 btrfs_put_block_group(cache); 4635 return ret; 4636 } 4637 4638 static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, 4639 u64 num_bytes) 4640 { 4641 struct btrfs_fs_info *fs_info = trans->fs_info; 4642 int ret; 4643 4644 ret = remove_from_free_space_tree(trans, bytenr, num_bytes); 4645 if (ret) 4646 return ret; 4647 4648 ret = btrfs_update_block_group(trans, bytenr, num_bytes, true); 4649 if (ret) { 4650 ASSERT(!ret); 4651 btrfs_err(fs_info, "update block group failed for %llu %llu", 4652 bytenr, num_bytes); 4653 return ret; 4654 } 4655 4656 trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes); 4657 return 0; 4658 } 4659 4660 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4661 u64 parent, u64 root_objectid, 4662 u64 flags, u64 owner, u64 offset, 4663 struct btrfs_key *ins, int ref_mod) 4664 { 4665 struct btrfs_fs_info *fs_info = trans->fs_info; 4666 struct btrfs_root *extent_root; 4667 int ret; 4668 struct btrfs_extent_item *extent_item; 4669 struct btrfs_extent_inline_ref *iref; 4670 struct btrfs_path *path; 4671 struct extent_buffer *leaf; 4672 int type; 4673 u32 size; 4674 4675 if (parent > 0) 4676 type = BTRFS_SHARED_DATA_REF_KEY; 4677 else 4678 type = BTRFS_EXTENT_DATA_REF_KEY; 4679 4680 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 4681 4682 path = btrfs_alloc_path(); 4683 if (!path) 4684 return -ENOMEM; 4685 4686 extent_root = btrfs_extent_root(fs_info, ins->objectid); 4687 ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size); 4688 if (ret) { 4689 btrfs_free_path(path); 4690 return ret; 4691 } 4692 4693 leaf = path->nodes[0]; 4694 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4695 struct btrfs_extent_item); 4696 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 4697 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4698 btrfs_set_extent_flags(leaf, extent_item, 4699 flags | BTRFS_EXTENT_FLAG_DATA); 4700 4701 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4702 btrfs_set_extent_inline_ref_type(leaf, iref, type); 4703 if (parent > 0) { 4704 struct btrfs_shared_data_ref *ref; 4705 ref = (struct btrfs_shared_data_ref *)(iref + 1); 4706 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 4707 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 4708 } else { 4709 struct btrfs_extent_data_ref *ref; 4710 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 4711 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 4712 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 4713 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 4714 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 4715 } 4716 4717 btrfs_mark_buffer_dirty(path->nodes[0]); 4718 btrfs_free_path(path); 4719 4720 return alloc_reserved_extent(trans, ins->objectid, ins->offset); 4721 } 4722 4723 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 4724 struct btrfs_delayed_ref_node *node, 4725 struct btrfs_delayed_extent_op *extent_op) 4726 { 4727 struct btrfs_fs_info *fs_info = trans->fs_info; 4728 struct btrfs_root *extent_root; 4729 int ret; 4730 struct btrfs_extent_item *extent_item; 4731 struct btrfs_key extent_key; 4732 struct btrfs_tree_block_info *block_info; 4733 struct btrfs_extent_inline_ref *iref; 4734 struct btrfs_path *path; 4735 struct extent_buffer *leaf; 4736 struct btrfs_delayed_tree_ref *ref; 4737 u32 size = sizeof(*extent_item) + sizeof(*iref); 4738 u64 flags = extent_op->flags_to_set; 4739 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4740 4741 ref = btrfs_delayed_node_to_tree_ref(node); 4742 4743 extent_key.objectid = node->bytenr; 4744 if (skinny_metadata) { 4745 extent_key.offset = ref->level; 4746 extent_key.type = BTRFS_METADATA_ITEM_KEY; 4747 } else { 4748 extent_key.offset = node->num_bytes; 4749 extent_key.type = BTRFS_EXTENT_ITEM_KEY; 4750 size += sizeof(*block_info); 4751 } 4752 4753 path = btrfs_alloc_path(); 4754 if (!path) 4755 return -ENOMEM; 4756 4757 extent_root = btrfs_extent_root(fs_info, extent_key.objectid); 4758 ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key, 4759 size); 4760 if (ret) { 4761 btrfs_free_path(path); 4762 return ret; 4763 } 4764 4765 leaf = path->nodes[0]; 4766 extent_item = btrfs_item_ptr(leaf, path->slots[0], 4767 struct btrfs_extent_item); 4768 btrfs_set_extent_refs(leaf, extent_item, 1); 4769 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 4770 btrfs_set_extent_flags(leaf, extent_item, 4771 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 4772 4773 if (skinny_metadata) { 4774 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 4775 } else { 4776 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 4777 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); 4778 btrfs_set_tree_block_level(leaf, block_info, ref->level); 4779 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 4780 } 4781 4782 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { 4783 btrfs_set_extent_inline_ref_type(leaf, iref, 4784 BTRFS_SHARED_BLOCK_REF_KEY); 4785 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent); 4786 } else { 4787 btrfs_set_extent_inline_ref_type(leaf, iref, 4788 BTRFS_TREE_BLOCK_REF_KEY); 4789 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root); 4790 } 4791 4792 btrfs_mark_buffer_dirty(leaf); 4793 btrfs_free_path(path); 4794 4795 return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize); 4796 } 4797 4798 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 4799 struct btrfs_root *root, u64 owner, 4800 u64 offset, u64 ram_bytes, 4801 struct btrfs_key *ins) 4802 { 4803 struct btrfs_ref generic_ref = { 0 }; 4804 4805 BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 4806 4807 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 4808 ins->objectid, ins->offset, 0); 4809 btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, 4810 offset, 0, false); 4811 btrfs_ref_tree_mod(root->fs_info, &generic_ref); 4812 4813 return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes); 4814 } 4815 4816 /* 4817 * this is used by the tree logging recovery code. It records that 4818 * an extent has been allocated and makes sure to clear the free 4819 * space cache bits as well 4820 */ 4821 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 4822 u64 root_objectid, u64 owner, u64 offset, 4823 struct btrfs_key *ins) 4824 { 4825 struct btrfs_fs_info *fs_info = trans->fs_info; 4826 int ret; 4827 struct btrfs_block_group *block_group; 4828 struct btrfs_space_info *space_info; 4829 4830 /* 4831 * Mixed block groups will exclude before processing the log so we only 4832 * need to do the exclude dance if this fs isn't mixed. 4833 */ 4834 if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 4835 ret = __exclude_logged_extent(fs_info, ins->objectid, 4836 ins->offset); 4837 if (ret) 4838 return ret; 4839 } 4840 4841 block_group = btrfs_lookup_block_group(fs_info, ins->objectid); 4842 if (!block_group) 4843 return -EINVAL; 4844 4845 space_info = block_group->space_info; 4846 spin_lock(&space_info->lock); 4847 spin_lock(&block_group->lock); 4848 space_info->bytes_reserved += ins->offset; 4849 block_group->reserved += ins->offset; 4850 spin_unlock(&block_group->lock); 4851 spin_unlock(&space_info->lock); 4852 4853 ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner, 4854 offset, ins, 1); 4855 if (ret) 4856 btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); 4857 btrfs_put_block_group(block_group); 4858 return ret; 4859 } 4860 4861 static struct extent_buffer * 4862 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4863 u64 bytenr, int level, u64 owner, 4864 enum btrfs_lock_nesting nest) 4865 { 4866 struct btrfs_fs_info *fs_info = root->fs_info; 4867 struct extent_buffer *buf; 4868 u64 lockdep_owner = owner; 4869 4870 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level); 4871 if (IS_ERR(buf)) 4872 return buf; 4873 4874 /* 4875 * Extra safety check in case the extent tree is corrupted and extent 4876 * allocator chooses to use a tree block which is already used and 4877 * locked. 4878 */ 4879 if (buf->lock_owner == current->pid) { 4880 btrfs_err_rl(fs_info, 4881 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected", 4882 buf->start, btrfs_header_owner(buf), current->pid); 4883 free_extent_buffer(buf); 4884 return ERR_PTR(-EUCLEAN); 4885 } 4886 4887 /* 4888 * The reloc trees are just snapshots, so we need them to appear to be 4889 * just like any other fs tree WRT lockdep. 4890 * 4891 * The exception however is in replace_path() in relocation, where we 4892 * hold the lock on the original fs root and then search for the reloc 4893 * root. At that point we need to make sure any reloc root buffers are 4894 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make 4895 * lockdep happy. 4896 */ 4897 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID && 4898 !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) 4899 lockdep_owner = BTRFS_FS_TREE_OBJECTID; 4900 4901 /* btrfs_clean_tree_block() accesses generation field. */ 4902 btrfs_set_header_generation(buf, trans->transid); 4903 4904 /* 4905 * This needs to stay, because we could allocate a freed block from an 4906 * old tree into a new tree, so we need to make sure this new block is 4907 * set to the appropriate level and owner. 4908 */ 4909 btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level); 4910 4911 __btrfs_tree_lock(buf, nest); 4912 btrfs_clean_tree_block(buf); 4913 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); 4914 clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags); 4915 4916 set_extent_buffer_uptodate(buf); 4917 4918 memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); 4919 btrfs_set_header_level(buf, level); 4920 btrfs_set_header_bytenr(buf, buf->start); 4921 btrfs_set_header_generation(buf, trans->transid); 4922 btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV); 4923 btrfs_set_header_owner(buf, owner); 4924 write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); 4925 write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); 4926 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 4927 buf->log_index = root->log_transid % 2; 4928 /* 4929 * we allow two log transactions at a time, use different 4930 * EXTENT bit to differentiate dirty pages. 4931 */ 4932 if (buf->log_index == 0) 4933 set_extent_dirty(&root->dirty_log_pages, buf->start, 4934 buf->start + buf->len - 1, GFP_NOFS); 4935 else 4936 set_extent_new(&root->dirty_log_pages, buf->start, 4937 buf->start + buf->len - 1); 4938 } else { 4939 buf->log_index = -1; 4940 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 4941 buf->start + buf->len - 1, GFP_NOFS); 4942 } 4943 /* this returns a buffer locked for blocking */ 4944 return buf; 4945 } 4946 4947 /* 4948 * finds a free extent and does all the dirty work required for allocation 4949 * returns the tree buffer or an ERR_PTR on error. 4950 */ 4951 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 4952 struct btrfs_root *root, 4953 u64 parent, u64 root_objectid, 4954 const struct btrfs_disk_key *key, 4955 int level, u64 hint, 4956 u64 empty_size, 4957 enum btrfs_lock_nesting nest) 4958 { 4959 struct btrfs_fs_info *fs_info = root->fs_info; 4960 struct btrfs_key ins; 4961 struct btrfs_block_rsv *block_rsv; 4962 struct extent_buffer *buf; 4963 struct btrfs_delayed_extent_op *extent_op; 4964 struct btrfs_ref generic_ref = { 0 }; 4965 u64 flags = 0; 4966 int ret; 4967 u32 blocksize = fs_info->nodesize; 4968 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 4969 4970 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4971 if (btrfs_is_testing(fs_info)) { 4972 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, 4973 level, root_objectid, nest); 4974 if (!IS_ERR(buf)) 4975 root->alloc_bytenr += blocksize; 4976 return buf; 4977 } 4978 #endif 4979 4980 block_rsv = btrfs_use_block_rsv(trans, root, blocksize); 4981 if (IS_ERR(block_rsv)) 4982 return ERR_CAST(block_rsv); 4983 4984 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize, 4985 empty_size, hint, &ins, 0, 0); 4986 if (ret) 4987 goto out_unuse; 4988 4989 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level, 4990 root_objectid, nest); 4991 if (IS_ERR(buf)) { 4992 ret = PTR_ERR(buf); 4993 goto out_free_reserved; 4994 } 4995 4996 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4997 if (parent == 0) 4998 parent = ins.objectid; 4999 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 5000 } else 5001 BUG_ON(parent > 0); 5002 5003 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 5004 extent_op = btrfs_alloc_delayed_extent_op(); 5005 if (!extent_op) { 5006 ret = -ENOMEM; 5007 goto out_free_buf; 5008 } 5009 if (key) 5010 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 5011 else 5012 memset(&extent_op->key, 0, sizeof(extent_op->key)); 5013 extent_op->flags_to_set = flags; 5014 extent_op->update_key = skinny_metadata ? false : true; 5015 extent_op->update_flags = true; 5016 extent_op->level = level; 5017 5018 btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, 5019 ins.objectid, ins.offset, parent); 5020 btrfs_init_tree_ref(&generic_ref, level, root_objectid, 5021 root->root_key.objectid, false); 5022 btrfs_ref_tree_mod(fs_info, &generic_ref); 5023 ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op); 5024 if (ret) 5025 goto out_free_delayed; 5026 } 5027 return buf; 5028 5029 out_free_delayed: 5030 btrfs_free_delayed_extent_op(extent_op); 5031 out_free_buf: 5032 btrfs_tree_unlock(buf); 5033 free_extent_buffer(buf); 5034 out_free_reserved: 5035 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); 5036 out_unuse: 5037 btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); 5038 return ERR_PTR(ret); 5039 } 5040 5041 struct walk_control { 5042 u64 refs[BTRFS_MAX_LEVEL]; 5043 u64 flags[BTRFS_MAX_LEVEL]; 5044 struct btrfs_key update_progress; 5045 struct btrfs_key drop_progress; 5046 int drop_level; 5047 int stage; 5048 int level; 5049 int shared_level; 5050 int update_ref; 5051 int keep_locks; 5052 int reada_slot; 5053 int reada_count; 5054 int restarted; 5055 }; 5056 5057 #define DROP_REFERENCE 1 5058 #define UPDATE_BACKREF 2 5059 5060 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 5061 struct btrfs_root *root, 5062 struct walk_control *wc, 5063 struct btrfs_path *path) 5064 { 5065 struct btrfs_fs_info *fs_info = root->fs_info; 5066 u64 bytenr; 5067 u64 generation; 5068 u64 refs; 5069 u64 flags; 5070 u32 nritems; 5071 struct btrfs_key key; 5072 struct extent_buffer *eb; 5073 int ret; 5074 int slot; 5075 int nread = 0; 5076 5077 if (path->slots[wc->level] < wc->reada_slot) { 5078 wc->reada_count = wc->reada_count * 2 / 3; 5079 wc->reada_count = max(wc->reada_count, 2); 5080 } else { 5081 wc->reada_count = wc->reada_count * 3 / 2; 5082 wc->reada_count = min_t(int, wc->reada_count, 5083 BTRFS_NODEPTRS_PER_BLOCK(fs_info)); 5084 } 5085 5086 eb = path->nodes[wc->level]; 5087 nritems = btrfs_header_nritems(eb); 5088 5089 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 5090 if (nread >= wc->reada_count) 5091 break; 5092 5093 cond_resched(); 5094 bytenr = btrfs_node_blockptr(eb, slot); 5095 generation = btrfs_node_ptr_generation(eb, slot); 5096 5097 if (slot == path->slots[wc->level]) 5098 goto reada; 5099 5100 if (wc->stage == UPDATE_BACKREF && 5101 generation <= root->root_key.offset) 5102 continue; 5103 5104 /* We don't lock the tree block, it's OK to be racy here */ 5105 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, 5106 wc->level - 1, 1, &refs, 5107 &flags); 5108 /* We don't care about errors in readahead. */ 5109 if (ret < 0) 5110 continue; 5111 BUG_ON(refs == 0); 5112 5113 if (wc->stage == DROP_REFERENCE) { 5114 if (refs == 1) 5115 goto reada; 5116 5117 if (wc->level == 1 && 5118 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5119 continue; 5120 if (!wc->update_ref || 5121 generation <= root->root_key.offset) 5122 continue; 5123 btrfs_node_key_to_cpu(eb, &key, slot); 5124 ret = btrfs_comp_cpu_keys(&key, 5125 &wc->update_progress); 5126 if (ret < 0) 5127 continue; 5128 } else { 5129 if (wc->level == 1 && 5130 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5131 continue; 5132 } 5133 reada: 5134 btrfs_readahead_node_child(eb, slot); 5135 nread++; 5136 } 5137 wc->reada_slot = slot; 5138 } 5139 5140 /* 5141 * helper to process tree block while walking down the tree. 5142 * 5143 * when wc->stage == UPDATE_BACKREF, this function updates 5144 * back refs for pointers in the block. 5145 * 5146 * NOTE: return value 1 means we should stop walking down. 5147 */ 5148 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5149 struct btrfs_root *root, 5150 struct btrfs_path *path, 5151 struct walk_control *wc, int lookup_info) 5152 { 5153 struct btrfs_fs_info *fs_info = root->fs_info; 5154 int level = wc->level; 5155 struct extent_buffer *eb = path->nodes[level]; 5156 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5157 int ret; 5158 5159 if (wc->stage == UPDATE_BACKREF && 5160 btrfs_header_owner(eb) != root->root_key.objectid) 5161 return 1; 5162 5163 /* 5164 * when reference count of tree block is 1, it won't increase 5165 * again. once full backref flag is set, we never clear it. 5166 */ 5167 if (lookup_info && 5168 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5169 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 5170 BUG_ON(!path->locks[level]); 5171 ret = btrfs_lookup_extent_info(trans, fs_info, 5172 eb->start, level, 1, 5173 &wc->refs[level], 5174 &wc->flags[level]); 5175 BUG_ON(ret == -ENOMEM); 5176 if (ret) 5177 return ret; 5178 BUG_ON(wc->refs[level] == 0); 5179 } 5180 5181 if (wc->stage == DROP_REFERENCE) { 5182 if (wc->refs[level] > 1) 5183 return 1; 5184 5185 if (path->locks[level] && !wc->keep_locks) { 5186 btrfs_tree_unlock_rw(eb, path->locks[level]); 5187 path->locks[level] = 0; 5188 } 5189 return 0; 5190 } 5191 5192 /* wc->stage == UPDATE_BACKREF */ 5193 if (!(wc->flags[level] & flag)) { 5194 BUG_ON(!path->locks[level]); 5195 ret = btrfs_inc_ref(trans, root, eb, 1); 5196 BUG_ON(ret); /* -ENOMEM */ 5197 ret = btrfs_dec_ref(trans, root, eb, 0); 5198 BUG_ON(ret); /* -ENOMEM */ 5199 ret = btrfs_set_disk_extent_flags(trans, eb, flag, 5200 btrfs_header_level(eb)); 5201 BUG_ON(ret); /* -ENOMEM */ 5202 wc->flags[level] |= flag; 5203 } 5204 5205 /* 5206 * the block is shared by multiple trees, so it's not good to 5207 * keep the tree lock 5208 */ 5209 if (path->locks[level] && level > 0) { 5210 btrfs_tree_unlock_rw(eb, path->locks[level]); 5211 path->locks[level] = 0; 5212 } 5213 return 0; 5214 } 5215 5216 /* 5217 * This is used to verify a ref exists for this root to deal with a bug where we 5218 * would have a drop_progress key that hadn't been updated properly. 5219 */ 5220 static int check_ref_exists(struct btrfs_trans_handle *trans, 5221 struct btrfs_root *root, u64 bytenr, u64 parent, 5222 int level) 5223 { 5224 struct btrfs_path *path; 5225 struct btrfs_extent_inline_ref *iref; 5226 int ret; 5227 5228 path = btrfs_alloc_path(); 5229 if (!path) 5230 return -ENOMEM; 5231 5232 ret = lookup_extent_backref(trans, path, &iref, bytenr, 5233 root->fs_info->nodesize, parent, 5234 root->root_key.objectid, level, 0); 5235 btrfs_free_path(path); 5236 if (ret == -ENOENT) 5237 return 0; 5238 if (ret < 0) 5239 return ret; 5240 return 1; 5241 } 5242 5243 /* 5244 * helper to process tree block pointer. 5245 * 5246 * when wc->stage == DROP_REFERENCE, this function checks 5247 * reference count of the block pointed to. if the block 5248 * is shared and we need update back refs for the subtree 5249 * rooted at the block, this function changes wc->stage to 5250 * UPDATE_BACKREF. if the block is shared and there is no 5251 * need to update back, this function drops the reference 5252 * to the block. 5253 * 5254 * NOTE: return value 1 means we should stop walking down. 5255 */ 5256 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5257 struct btrfs_root *root, 5258 struct btrfs_path *path, 5259 struct walk_control *wc, int *lookup_info) 5260 { 5261 struct btrfs_fs_info *fs_info = root->fs_info; 5262 u64 bytenr; 5263 u64 generation; 5264 u64 parent; 5265 struct btrfs_tree_parent_check check = { 0 }; 5266 struct btrfs_key key; 5267 struct btrfs_ref ref = { 0 }; 5268 struct extent_buffer *next; 5269 int level = wc->level; 5270 int reada = 0; 5271 int ret = 0; 5272 bool need_account = false; 5273 5274 generation = btrfs_node_ptr_generation(path->nodes[level], 5275 path->slots[level]); 5276 /* 5277 * if the lower level block was created before the snapshot 5278 * was created, we know there is no need to update back refs 5279 * for the subtree 5280 */ 5281 if (wc->stage == UPDATE_BACKREF && 5282 generation <= root->root_key.offset) { 5283 *lookup_info = 1; 5284 return 1; 5285 } 5286 5287 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5288 5289 check.level = level - 1; 5290 check.transid = generation; 5291 check.owner_root = root->root_key.objectid; 5292 check.has_first_key = true; 5293 btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, 5294 path->slots[level]); 5295 5296 next = find_extent_buffer(fs_info, bytenr); 5297 if (!next) { 5298 next = btrfs_find_create_tree_block(fs_info, bytenr, 5299 root->root_key.objectid, level - 1); 5300 if (IS_ERR(next)) 5301 return PTR_ERR(next); 5302 reada = 1; 5303 } 5304 btrfs_tree_lock(next); 5305 5306 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, 5307 &wc->refs[level - 1], 5308 &wc->flags[level - 1]); 5309 if (ret < 0) 5310 goto out_unlock; 5311 5312 if (unlikely(wc->refs[level - 1] == 0)) { 5313 btrfs_err(fs_info, "Missing references."); 5314 ret = -EIO; 5315 goto out_unlock; 5316 } 5317 *lookup_info = 0; 5318 5319 if (wc->stage == DROP_REFERENCE) { 5320 if (wc->refs[level - 1] > 1) { 5321 need_account = true; 5322 if (level == 1 && 5323 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5324 goto skip; 5325 5326 if (!wc->update_ref || 5327 generation <= root->root_key.offset) 5328 goto skip; 5329 5330 btrfs_node_key_to_cpu(path->nodes[level], &key, 5331 path->slots[level]); 5332 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 5333 if (ret < 0) 5334 goto skip; 5335 5336 wc->stage = UPDATE_BACKREF; 5337 wc->shared_level = level - 1; 5338 } 5339 } else { 5340 if (level == 1 && 5341 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5342 goto skip; 5343 } 5344 5345 if (!btrfs_buffer_uptodate(next, generation, 0)) { 5346 btrfs_tree_unlock(next); 5347 free_extent_buffer(next); 5348 next = NULL; 5349 *lookup_info = 1; 5350 } 5351 5352 if (!next) { 5353 if (reada && level == 1) 5354 reada_walk_down(trans, root, wc, path); 5355 next = read_tree_block(fs_info, bytenr, &check); 5356 if (IS_ERR(next)) { 5357 return PTR_ERR(next); 5358 } else if (!extent_buffer_uptodate(next)) { 5359 free_extent_buffer(next); 5360 return -EIO; 5361 } 5362 btrfs_tree_lock(next); 5363 } 5364 5365 level--; 5366 ASSERT(level == btrfs_header_level(next)); 5367 if (level != btrfs_header_level(next)) { 5368 btrfs_err(root->fs_info, "mismatched level"); 5369 ret = -EIO; 5370 goto out_unlock; 5371 } 5372 path->nodes[level] = next; 5373 path->slots[level] = 0; 5374 path->locks[level] = BTRFS_WRITE_LOCK; 5375 wc->level = level; 5376 if (wc->level == 1) 5377 wc->reada_slot = 0; 5378 return 0; 5379 skip: 5380 wc->refs[level - 1] = 0; 5381 wc->flags[level - 1] = 0; 5382 if (wc->stage == DROP_REFERENCE) { 5383 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5384 parent = path->nodes[level]->start; 5385 } else { 5386 ASSERT(root->root_key.objectid == 5387 btrfs_header_owner(path->nodes[level])); 5388 if (root->root_key.objectid != 5389 btrfs_header_owner(path->nodes[level])) { 5390 btrfs_err(root->fs_info, 5391 "mismatched block owner"); 5392 ret = -EIO; 5393 goto out_unlock; 5394 } 5395 parent = 0; 5396 } 5397 5398 /* 5399 * If we had a drop_progress we need to verify the refs are set 5400 * as expected. If we find our ref then we know that from here 5401 * on out everything should be correct, and we can clear the 5402 * ->restarted flag. 5403 */ 5404 if (wc->restarted) { 5405 ret = check_ref_exists(trans, root, bytenr, parent, 5406 level - 1); 5407 if (ret < 0) 5408 goto out_unlock; 5409 if (ret == 0) 5410 goto no_delete; 5411 ret = 0; 5412 wc->restarted = 0; 5413 } 5414 5415 /* 5416 * Reloc tree doesn't contribute to qgroup numbers, and we have 5417 * already accounted them at merge time (replace_path), 5418 * thus we could skip expensive subtree trace here. 5419 */ 5420 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 5421 need_account) { 5422 ret = btrfs_qgroup_trace_subtree(trans, next, 5423 generation, level - 1); 5424 if (ret) { 5425 btrfs_err_rl(fs_info, 5426 "Error %d accounting shared subtree. Quota is out of sync, rescan required.", 5427 ret); 5428 } 5429 } 5430 5431 /* 5432 * We need to update the next key in our walk control so we can 5433 * update the drop_progress key accordingly. We don't care if 5434 * find_next_key doesn't find a key because that means we're at 5435 * the end and are going to clean up now. 5436 */ 5437 wc->drop_level = level; 5438 find_next_key(path, level, &wc->drop_progress); 5439 5440 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, 5441 fs_info->nodesize, parent); 5442 btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid, 5443 0, false); 5444 ret = btrfs_free_extent(trans, &ref); 5445 if (ret) 5446 goto out_unlock; 5447 } 5448 no_delete: 5449 *lookup_info = 1; 5450 ret = 1; 5451 5452 out_unlock: 5453 btrfs_tree_unlock(next); 5454 free_extent_buffer(next); 5455 5456 return ret; 5457 } 5458 5459 /* 5460 * helper to process tree block while walking up the tree. 5461 * 5462 * when wc->stage == DROP_REFERENCE, this function drops 5463 * reference count on the block. 5464 * 5465 * when wc->stage == UPDATE_BACKREF, this function changes 5466 * wc->stage back to DROP_REFERENCE if we changed wc->stage 5467 * to UPDATE_BACKREF previously while processing the block. 5468 * 5469 * NOTE: return value 1 means we should stop walking up. 5470 */ 5471 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 5472 struct btrfs_root *root, 5473 struct btrfs_path *path, 5474 struct walk_control *wc) 5475 { 5476 struct btrfs_fs_info *fs_info = root->fs_info; 5477 int ret; 5478 int level = wc->level; 5479 struct extent_buffer *eb = path->nodes[level]; 5480 u64 parent = 0; 5481 5482 if (wc->stage == UPDATE_BACKREF) { 5483 BUG_ON(wc->shared_level < level); 5484 if (level < wc->shared_level) 5485 goto out; 5486 5487 ret = find_next_key(path, level + 1, &wc->update_progress); 5488 if (ret > 0) 5489 wc->update_ref = 0; 5490 5491 wc->stage = DROP_REFERENCE; 5492 wc->shared_level = -1; 5493 path->slots[level] = 0; 5494 5495 /* 5496 * check reference count again if the block isn't locked. 5497 * we should start walking down the tree again if reference 5498 * count is one. 5499 */ 5500 if (!path->locks[level]) { 5501 BUG_ON(level == 0); 5502 btrfs_tree_lock(eb); 5503 path->locks[level] = BTRFS_WRITE_LOCK; 5504 5505 ret = btrfs_lookup_extent_info(trans, fs_info, 5506 eb->start, level, 1, 5507 &wc->refs[level], 5508 &wc->flags[level]); 5509 if (ret < 0) { 5510 btrfs_tree_unlock_rw(eb, path->locks[level]); 5511 path->locks[level] = 0; 5512 return ret; 5513 } 5514 BUG_ON(wc->refs[level] == 0); 5515 if (wc->refs[level] == 1) { 5516 btrfs_tree_unlock_rw(eb, path->locks[level]); 5517 path->locks[level] = 0; 5518 return 1; 5519 } 5520 } 5521 } 5522 5523 /* wc->stage == DROP_REFERENCE */ 5524 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 5525 5526 if (wc->refs[level] == 1) { 5527 if (level == 0) { 5528 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5529 ret = btrfs_dec_ref(trans, root, eb, 1); 5530 else 5531 ret = btrfs_dec_ref(trans, root, eb, 0); 5532 BUG_ON(ret); /* -ENOMEM */ 5533 if (is_fstree(root->root_key.objectid)) { 5534 ret = btrfs_qgroup_trace_leaf_items(trans, eb); 5535 if (ret) { 5536 btrfs_err_rl(fs_info, 5537 "error %d accounting leaf items, quota is out of sync, rescan required", 5538 ret); 5539 } 5540 } 5541 } 5542 /* make block locked assertion in btrfs_clean_tree_block happy */ 5543 if (!path->locks[level] && 5544 btrfs_header_generation(eb) == trans->transid) { 5545 btrfs_tree_lock(eb); 5546 path->locks[level] = BTRFS_WRITE_LOCK; 5547 } 5548 btrfs_clean_tree_block(eb); 5549 } 5550 5551 if (eb == root->node) { 5552 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5553 parent = eb->start; 5554 else if (root->root_key.objectid != btrfs_header_owner(eb)) 5555 goto owner_mismatch; 5556 } else { 5557 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 5558 parent = path->nodes[level + 1]->start; 5559 else if (root->root_key.objectid != 5560 btrfs_header_owner(path->nodes[level + 1])) 5561 goto owner_mismatch; 5562 } 5563 5564 btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, 5565 wc->refs[level] == 1); 5566 out: 5567 wc->refs[level] = 0; 5568 wc->flags[level] = 0; 5569 return 0; 5570 5571 owner_mismatch: 5572 btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu", 5573 btrfs_header_owner(eb), root->root_key.objectid); 5574 return -EUCLEAN; 5575 } 5576 5577 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 5578 struct btrfs_root *root, 5579 struct btrfs_path *path, 5580 struct walk_control *wc) 5581 { 5582 int level = wc->level; 5583 int lookup_info = 1; 5584 int ret; 5585 5586 while (level >= 0) { 5587 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5588 if (ret > 0) 5589 break; 5590 5591 if (level == 0) 5592 break; 5593 5594 if (path->slots[level] >= 5595 btrfs_header_nritems(path->nodes[level])) 5596 break; 5597 5598 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5599 if (ret > 0) { 5600 path->slots[level]++; 5601 continue; 5602 } else if (ret < 0) 5603 return ret; 5604 level = wc->level; 5605 } 5606 return 0; 5607 } 5608 5609 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 5610 struct btrfs_root *root, 5611 struct btrfs_path *path, 5612 struct walk_control *wc, int max_level) 5613 { 5614 int level = wc->level; 5615 int ret; 5616 5617 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 5618 while (level < max_level && path->nodes[level]) { 5619 wc->level = level; 5620 if (path->slots[level] + 1 < 5621 btrfs_header_nritems(path->nodes[level])) { 5622 path->slots[level]++; 5623 return 0; 5624 } else { 5625 ret = walk_up_proc(trans, root, path, wc); 5626 if (ret > 0) 5627 return 0; 5628 if (ret < 0) 5629 return ret; 5630 5631 if (path->locks[level]) { 5632 btrfs_tree_unlock_rw(path->nodes[level], 5633 path->locks[level]); 5634 path->locks[level] = 0; 5635 } 5636 free_extent_buffer(path->nodes[level]); 5637 path->nodes[level] = NULL; 5638 level++; 5639 } 5640 } 5641 return 1; 5642 } 5643 5644 /* 5645 * drop a subvolume tree. 5646 * 5647 * this function traverses the tree freeing any blocks that only 5648 * referenced by the tree. 5649 * 5650 * when a shared tree block is found. this function decreases its 5651 * reference count by one. if update_ref is true, this function 5652 * also make sure backrefs for the shared block and all lower level 5653 * blocks are properly updated. 5654 * 5655 * If called with for_reloc == 0, may exit early with -EAGAIN 5656 */ 5657 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) 5658 { 5659 const bool is_reloc_root = (root->root_key.objectid == 5660 BTRFS_TREE_RELOC_OBJECTID); 5661 struct btrfs_fs_info *fs_info = root->fs_info; 5662 struct btrfs_path *path; 5663 struct btrfs_trans_handle *trans; 5664 struct btrfs_root *tree_root = fs_info->tree_root; 5665 struct btrfs_root_item *root_item = &root->root_item; 5666 struct walk_control *wc; 5667 struct btrfs_key key; 5668 int err = 0; 5669 int ret; 5670 int level; 5671 bool root_dropped = false; 5672 bool unfinished_drop = false; 5673 5674 btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid); 5675 5676 path = btrfs_alloc_path(); 5677 if (!path) { 5678 err = -ENOMEM; 5679 goto out; 5680 } 5681 5682 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5683 if (!wc) { 5684 btrfs_free_path(path); 5685 err = -ENOMEM; 5686 goto out; 5687 } 5688 5689 /* 5690 * Use join to avoid potential EINTR from transaction start. See 5691 * wait_reserve_ticket and the whole reservation callchain. 5692 */ 5693 if (for_reloc) 5694 trans = btrfs_join_transaction(tree_root); 5695 else 5696 trans = btrfs_start_transaction(tree_root, 0); 5697 if (IS_ERR(trans)) { 5698 err = PTR_ERR(trans); 5699 goto out_free; 5700 } 5701 5702 err = btrfs_run_delayed_items(trans); 5703 if (err) 5704 goto out_end_trans; 5705 5706 /* 5707 * This will help us catch people modifying the fs tree while we're 5708 * dropping it. It is unsafe to mess with the fs tree while it's being 5709 * dropped as we unlock the root node and parent nodes as we walk down 5710 * the tree, assuming nothing will change. If something does change 5711 * then we'll have stale information and drop references to blocks we've 5712 * already dropped. 5713 */ 5714 set_bit(BTRFS_ROOT_DELETING, &root->state); 5715 unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); 5716 5717 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 5718 level = btrfs_header_level(root->node); 5719 path->nodes[level] = btrfs_lock_root_node(root); 5720 path->slots[level] = 0; 5721 path->locks[level] = BTRFS_WRITE_LOCK; 5722 memset(&wc->update_progress, 0, 5723 sizeof(wc->update_progress)); 5724 } else { 5725 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 5726 memcpy(&wc->update_progress, &key, 5727 sizeof(wc->update_progress)); 5728 5729 level = btrfs_root_drop_level(root_item); 5730 BUG_ON(level == 0); 5731 path->lowest_level = level; 5732 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5733 path->lowest_level = 0; 5734 if (ret < 0) { 5735 err = ret; 5736 goto out_end_trans; 5737 } 5738 WARN_ON(ret > 0); 5739 5740 /* 5741 * unlock our path, this is safe because only this 5742 * function is allowed to delete this snapshot 5743 */ 5744 btrfs_unlock_up_safe(path, 0); 5745 5746 level = btrfs_header_level(root->node); 5747 while (1) { 5748 btrfs_tree_lock(path->nodes[level]); 5749 path->locks[level] = BTRFS_WRITE_LOCK; 5750 5751 ret = btrfs_lookup_extent_info(trans, fs_info, 5752 path->nodes[level]->start, 5753 level, 1, &wc->refs[level], 5754 &wc->flags[level]); 5755 if (ret < 0) { 5756 err = ret; 5757 goto out_end_trans; 5758 } 5759 BUG_ON(wc->refs[level] == 0); 5760 5761 if (level == btrfs_root_drop_level(root_item)) 5762 break; 5763 5764 btrfs_tree_unlock(path->nodes[level]); 5765 path->locks[level] = 0; 5766 WARN_ON(wc->refs[level] != 1); 5767 level--; 5768 } 5769 } 5770 5771 wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); 5772 wc->level = level; 5773 wc->shared_level = -1; 5774 wc->stage = DROP_REFERENCE; 5775 wc->update_ref = update_ref; 5776 wc->keep_locks = 0; 5777 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5778 5779 while (1) { 5780 5781 ret = walk_down_tree(trans, root, path, wc); 5782 if (ret < 0) { 5783 err = ret; 5784 break; 5785 } 5786 5787 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 5788 if (ret < 0) { 5789 err = ret; 5790 break; 5791 } 5792 5793 if (ret > 0) { 5794 BUG_ON(wc->stage != DROP_REFERENCE); 5795 break; 5796 } 5797 5798 if (wc->stage == DROP_REFERENCE) { 5799 wc->drop_level = wc->level; 5800 btrfs_node_key_to_cpu(path->nodes[wc->drop_level], 5801 &wc->drop_progress, 5802 path->slots[wc->drop_level]); 5803 } 5804 btrfs_cpu_key_to_disk(&root_item->drop_progress, 5805 &wc->drop_progress); 5806 btrfs_set_root_drop_level(root_item, wc->drop_level); 5807 5808 BUG_ON(wc->level == 0); 5809 if (btrfs_should_end_transaction(trans) || 5810 (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { 5811 ret = btrfs_update_root(trans, tree_root, 5812 &root->root_key, 5813 root_item); 5814 if (ret) { 5815 btrfs_abort_transaction(trans, ret); 5816 err = ret; 5817 goto out_end_trans; 5818 } 5819 5820 if (!is_reloc_root) 5821 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5822 5823 btrfs_end_transaction_throttle(trans); 5824 if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { 5825 btrfs_debug(fs_info, 5826 "drop snapshot early exit"); 5827 err = -EAGAIN; 5828 goto out_free; 5829 } 5830 5831 /* 5832 * Use join to avoid potential EINTR from transaction 5833 * start. See wait_reserve_ticket and the whole 5834 * reservation callchain. 5835 */ 5836 if (for_reloc) 5837 trans = btrfs_join_transaction(tree_root); 5838 else 5839 trans = btrfs_start_transaction(tree_root, 0); 5840 if (IS_ERR(trans)) { 5841 err = PTR_ERR(trans); 5842 goto out_free; 5843 } 5844 } 5845 } 5846 btrfs_release_path(path); 5847 if (err) 5848 goto out_end_trans; 5849 5850 ret = btrfs_del_root(trans, &root->root_key); 5851 if (ret) { 5852 btrfs_abort_transaction(trans, ret); 5853 err = ret; 5854 goto out_end_trans; 5855 } 5856 5857 if (!is_reloc_root) { 5858 ret = btrfs_find_root(tree_root, &root->root_key, path, 5859 NULL, NULL); 5860 if (ret < 0) { 5861 btrfs_abort_transaction(trans, ret); 5862 err = ret; 5863 goto out_end_trans; 5864 } else if (ret > 0) { 5865 /* if we fail to delete the orphan item this time 5866 * around, it'll get picked up the next time. 5867 * 5868 * The most common failure here is just -ENOENT. 5869 */ 5870 btrfs_del_orphan_item(trans, tree_root, 5871 root->root_key.objectid); 5872 } 5873 } 5874 5875 /* 5876 * This subvolume is going to be completely dropped, and won't be 5877 * recorded as dirty roots, thus pertrans meta rsv will not be freed at 5878 * commit transaction time. So free it here manually. 5879 */ 5880 btrfs_qgroup_convert_reserved_meta(root, INT_MAX); 5881 btrfs_qgroup_free_meta_all_pertrans(root); 5882 5883 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) 5884 btrfs_add_dropped_root(trans, root); 5885 else 5886 btrfs_put_root(root); 5887 root_dropped = true; 5888 out_end_trans: 5889 if (!is_reloc_root) 5890 btrfs_set_last_root_drop_gen(fs_info, trans->transid); 5891 5892 btrfs_end_transaction_throttle(trans); 5893 out_free: 5894 kfree(wc); 5895 btrfs_free_path(path); 5896 out: 5897 /* 5898 * We were an unfinished drop root, check to see if there are any 5899 * pending, and if not clear and wake up any waiters. 5900 */ 5901 if (!err && unfinished_drop) 5902 btrfs_maybe_wake_unfinished_drop(fs_info); 5903 5904 /* 5905 * So if we need to stop dropping the snapshot for whatever reason we 5906 * need to make sure to add it back to the dead root list so that we 5907 * keep trying to do the work later. This also cleans up roots if we 5908 * don't have it in the radix (like when we recover after a power fail 5909 * or unmount) so we don't leak memory. 5910 */ 5911 if (!for_reloc && !root_dropped) 5912 btrfs_add_dead_root(root); 5913 return err; 5914 } 5915 5916 /* 5917 * drop subtree rooted at tree block 'node'. 5918 * 5919 * NOTE: this function will unlock and release tree block 'node' 5920 * only used by relocation code 5921 */ 5922 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 5923 struct btrfs_root *root, 5924 struct extent_buffer *node, 5925 struct extent_buffer *parent) 5926 { 5927 struct btrfs_fs_info *fs_info = root->fs_info; 5928 struct btrfs_path *path; 5929 struct walk_control *wc; 5930 int level; 5931 int parent_level; 5932 int ret = 0; 5933 int wret; 5934 5935 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 5936 5937 path = btrfs_alloc_path(); 5938 if (!path) 5939 return -ENOMEM; 5940 5941 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5942 if (!wc) { 5943 btrfs_free_path(path); 5944 return -ENOMEM; 5945 } 5946 5947 btrfs_assert_tree_write_locked(parent); 5948 parent_level = btrfs_header_level(parent); 5949 atomic_inc(&parent->refs); 5950 path->nodes[parent_level] = parent; 5951 path->slots[parent_level] = btrfs_header_nritems(parent); 5952 5953 btrfs_assert_tree_write_locked(node); 5954 level = btrfs_header_level(node); 5955 path->nodes[level] = node; 5956 path->slots[level] = 0; 5957 path->locks[level] = BTRFS_WRITE_LOCK; 5958 5959 wc->refs[parent_level] = 1; 5960 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5961 wc->level = level; 5962 wc->shared_level = -1; 5963 wc->stage = DROP_REFERENCE; 5964 wc->update_ref = 0; 5965 wc->keep_locks = 1; 5966 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); 5967 5968 while (1) { 5969 wret = walk_down_tree(trans, root, path, wc); 5970 if (wret < 0) { 5971 ret = wret; 5972 break; 5973 } 5974 5975 wret = walk_up_tree(trans, root, path, wc, parent_level); 5976 if (wret < 0) 5977 ret = wret; 5978 if (wret != 0) 5979 break; 5980 } 5981 5982 kfree(wc); 5983 btrfs_free_path(path); 5984 return ret; 5985 } 5986 5987 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, 5988 u64 start, u64 end) 5989 { 5990 return unpin_extent_range(fs_info, start, end, false); 5991 } 5992 5993 /* 5994 * It used to be that old block groups would be left around forever. 5995 * Iterating over them would be enough to trim unused space. Since we 5996 * now automatically remove them, we also need to iterate over unallocated 5997 * space. 5998 * 5999 * We don't want a transaction for this since the discard may take a 6000 * substantial amount of time. We don't require that a transaction be 6001 * running, but we do need to take a running transaction into account 6002 * to ensure that we're not discarding chunks that were released or 6003 * allocated in the current transaction. 6004 * 6005 * Holding the chunks lock will prevent other threads from allocating 6006 * or releasing chunks, but it won't prevent a running transaction 6007 * from committing and releasing the memory that the pending chunks 6008 * list head uses. For that, we need to take a reference to the 6009 * transaction and hold the commit root sem. We only need to hold 6010 * it while performing the free space search since we have already 6011 * held back allocations. 6012 */ 6013 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) 6014 { 6015 u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; 6016 int ret; 6017 6018 *trimmed = 0; 6019 6020 /* Discard not supported = nothing to do. */ 6021 if (!bdev_max_discard_sectors(device->bdev)) 6022 return 0; 6023 6024 /* Not writable = nothing to do. */ 6025 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) 6026 return 0; 6027 6028 /* No free space = nothing to do. */ 6029 if (device->total_bytes <= device->bytes_used) 6030 return 0; 6031 6032 ret = 0; 6033 6034 while (1) { 6035 struct btrfs_fs_info *fs_info = device->fs_info; 6036 u64 bytes; 6037 6038 ret = mutex_lock_interruptible(&fs_info->chunk_mutex); 6039 if (ret) 6040 break; 6041 6042 find_first_clear_extent_bit(&device->alloc_state, start, 6043 &start, &end, 6044 CHUNK_TRIMMED | CHUNK_ALLOCATED); 6045 6046 /* Check if there are any CHUNK_* bits left */ 6047 if (start > device->total_bytes) { 6048 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 6049 btrfs_warn_in_rcu(fs_info, 6050 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", 6051 start, end - start + 1, 6052 btrfs_dev_name(device), 6053 device->total_bytes); 6054 mutex_unlock(&fs_info->chunk_mutex); 6055 ret = 0; 6056 break; 6057 } 6058 6059 /* Ensure we skip the reserved space on each device. */ 6060 start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); 6061 6062 /* 6063 * If find_first_clear_extent_bit find a range that spans the 6064 * end of the device it will set end to -1, in this case it's up 6065 * to the caller to trim the value to the size of the device. 6066 */ 6067 end = min(end, device->total_bytes - 1); 6068 6069 len = end - start + 1; 6070 6071 /* We didn't find any extents */ 6072 if (!len) { 6073 mutex_unlock(&fs_info->chunk_mutex); 6074 ret = 0; 6075 break; 6076 } 6077 6078 ret = btrfs_issue_discard(device->bdev, start, len, 6079 &bytes); 6080 if (!ret) 6081 set_extent_bits(&device->alloc_state, start, 6082 start + bytes - 1, 6083 CHUNK_TRIMMED); 6084 mutex_unlock(&fs_info->chunk_mutex); 6085 6086 if (ret) 6087 break; 6088 6089 start += len; 6090 *trimmed += bytes; 6091 6092 if (fatal_signal_pending(current)) { 6093 ret = -ERESTARTSYS; 6094 break; 6095 } 6096 6097 cond_resched(); 6098 } 6099 6100 return ret; 6101 } 6102 6103 /* 6104 * Trim the whole filesystem by: 6105 * 1) trimming the free space in each block group 6106 * 2) trimming the unallocated space on each device 6107 * 6108 * This will also continue trimming even if a block group or device encounters 6109 * an error. The return value will be the last error, or 0 if nothing bad 6110 * happens. 6111 */ 6112 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) 6113 { 6114 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 6115 struct btrfs_block_group *cache = NULL; 6116 struct btrfs_device *device; 6117 u64 group_trimmed; 6118 u64 range_end = U64_MAX; 6119 u64 start; 6120 u64 end; 6121 u64 trimmed = 0; 6122 u64 bg_failed = 0; 6123 u64 dev_failed = 0; 6124 int bg_ret = 0; 6125 int dev_ret = 0; 6126 int ret = 0; 6127 6128 if (range->start == U64_MAX) 6129 return -EINVAL; 6130 6131 /* 6132 * Check range overflow if range->len is set. 6133 * The default range->len is U64_MAX. 6134 */ 6135 if (range->len != U64_MAX && 6136 check_add_overflow(range->start, range->len, &range_end)) 6137 return -EINVAL; 6138 6139 cache = btrfs_lookup_first_block_group(fs_info, range->start); 6140 for (; cache; cache = btrfs_next_block_group(cache)) { 6141 if (cache->start >= range_end) { 6142 btrfs_put_block_group(cache); 6143 break; 6144 } 6145 6146 start = max(range->start, cache->start); 6147 end = min(range_end, cache->start + cache->length); 6148 6149 if (end - start >= range->minlen) { 6150 if (!btrfs_block_group_done(cache)) { 6151 ret = btrfs_cache_block_group(cache, true); 6152 if (ret) { 6153 bg_failed++; 6154 bg_ret = ret; 6155 continue; 6156 } 6157 } 6158 ret = btrfs_trim_block_group(cache, 6159 &group_trimmed, 6160 start, 6161 end, 6162 range->minlen); 6163 6164 trimmed += group_trimmed; 6165 if (ret) { 6166 bg_failed++; 6167 bg_ret = ret; 6168 continue; 6169 } 6170 } 6171 } 6172 6173 if (bg_failed) 6174 btrfs_warn(fs_info, 6175 "failed to trim %llu block group(s), last error %d", 6176 bg_failed, bg_ret); 6177 6178 mutex_lock(&fs_devices->device_list_mutex); 6179 list_for_each_entry(device, &fs_devices->devices, dev_list) { 6180 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) 6181 continue; 6182 6183 ret = btrfs_trim_free_extents(device, &group_trimmed); 6184 if (ret) { 6185 dev_failed++; 6186 dev_ret = ret; 6187 break; 6188 } 6189 6190 trimmed += group_trimmed; 6191 } 6192 mutex_unlock(&fs_devices->device_list_mutex); 6193 6194 if (dev_failed) 6195 btrfs_warn(fs_info, 6196 "failed to trim %llu device(s), last error %d", 6197 dev_failed, dev_ret); 6198 range->len = trimmed; 6199 if (bg_ret) 6200 return bg_ret; 6201 return dev_ret; 6202 } 6203