1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/pagemap.h> 20 #include <linux/writeback.h> 21 #include <linux/blkdev.h> 22 #include <linux/sort.h> 23 #include <linux/rcupdate.h> 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/ratelimit.h> 27 #include <linux/percpu_counter.h> 28 #include "hash.h" 29 #include "tree-log.h" 30 #include "disk-io.h" 31 #include "print-tree.h" 32 #include "volumes.h" 33 #include "raid56.h" 34 #include "locking.h" 35 #include "free-space-cache.h" 36 #include "math.h" 37 #include "sysfs.h" 38 #include "qgroup.h" 39 40 #undef SCRAMBLE_DELAYED_REFS 41 42 /* 43 * control flags for do_chunk_alloc's force field 44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk 45 * if we really need one. 46 * 47 * CHUNK_ALLOC_LIMITED means to only try and allocate one 48 * if we have very few chunks already allocated. This is 49 * used as part of the clustering code to help make sure 50 * we have a good pool of storage to cluster in, without 51 * filling the FS with empty chunks 52 * 53 * CHUNK_ALLOC_FORCE means it must try to allocate one 54 * 55 */ 56 enum { 57 CHUNK_ALLOC_NO_FORCE = 0, 58 CHUNK_ALLOC_LIMITED = 1, 59 CHUNK_ALLOC_FORCE = 2, 60 }; 61 62 /* 63 * Control how reservations are dealt with. 64 * 65 * RESERVE_FREE - freeing a reservation. 66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for 67 * ENOSPC accounting 68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update 69 * bytes_may_use as the ENOSPC accounting is done elsewhere 70 */ 71 enum { 72 RESERVE_FREE = 0, 73 RESERVE_ALLOC = 1, 74 RESERVE_ALLOC_NO_ACCOUNT = 2, 75 }; 76 77 static int update_block_group(struct btrfs_root *root, 78 u64 bytenr, u64 num_bytes, int alloc); 79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 80 struct btrfs_root *root, 81 u64 bytenr, u64 num_bytes, u64 parent, 82 u64 root_objectid, u64 owner_objectid, 83 u64 owner_offset, int refs_to_drop, 84 struct btrfs_delayed_extent_op *extra_op, 85 int no_quota); 86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 87 struct extent_buffer *leaf, 88 struct btrfs_extent_item *ei); 89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 90 struct btrfs_root *root, 91 u64 parent, u64 root_objectid, 92 u64 flags, u64 owner, u64 offset, 93 struct btrfs_key *ins, int ref_mod); 94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 95 struct btrfs_root *root, 96 u64 parent, u64 root_objectid, 97 u64 flags, struct btrfs_disk_key *key, 98 int level, struct btrfs_key *ins, 99 int no_quota); 100 static int do_chunk_alloc(struct btrfs_trans_handle *trans, 101 struct btrfs_root *extent_root, u64 flags, 102 int force); 103 static int find_next_key(struct btrfs_path *path, int level, 104 struct btrfs_key *key); 105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 106 int dump_block_groups); 107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 108 u64 num_bytes, int reserve, 109 int delalloc); 110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 111 u64 num_bytes); 112 int btrfs_pin_extent(struct btrfs_root *root, 113 u64 bytenr, u64 num_bytes, int reserved); 114 115 static noinline int 116 block_group_cache_done(struct btrfs_block_group_cache *cache) 117 { 118 smp_mb(); 119 return cache->cached == BTRFS_CACHE_FINISHED || 120 cache->cached == BTRFS_CACHE_ERROR; 121 } 122 123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 124 { 125 return (cache->flags & bits) == bits; 126 } 127 128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 129 { 130 atomic_inc(&cache->count); 131 } 132 133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache) 134 { 135 if (atomic_dec_and_test(&cache->count)) { 136 WARN_ON(cache->pinned > 0); 137 WARN_ON(cache->reserved > 0); 138 kfree(cache->free_space_ctl); 139 kfree(cache); 140 } 141 } 142 143 /* 144 * this adds the block group to the fs_info rb tree for the block group 145 * cache 146 */ 147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 148 struct btrfs_block_group_cache *block_group) 149 { 150 struct rb_node **p; 151 struct rb_node *parent = NULL; 152 struct btrfs_block_group_cache *cache; 153 154 spin_lock(&info->block_group_cache_lock); 155 p = &info->block_group_cache_tree.rb_node; 156 157 while (*p) { 158 parent = *p; 159 cache = rb_entry(parent, struct btrfs_block_group_cache, 160 cache_node); 161 if (block_group->key.objectid < cache->key.objectid) { 162 p = &(*p)->rb_left; 163 } else if (block_group->key.objectid > cache->key.objectid) { 164 p = &(*p)->rb_right; 165 } else { 166 spin_unlock(&info->block_group_cache_lock); 167 return -EEXIST; 168 } 169 } 170 171 rb_link_node(&block_group->cache_node, parent, p); 172 rb_insert_color(&block_group->cache_node, 173 &info->block_group_cache_tree); 174 175 if (info->first_logical_byte > block_group->key.objectid) 176 info->first_logical_byte = block_group->key.objectid; 177 178 spin_unlock(&info->block_group_cache_lock); 179 180 return 0; 181 } 182 183 /* 184 * This will return the block group at or after bytenr if contains is 0, else 185 * it will return the block group that contains the bytenr 186 */ 187 static struct btrfs_block_group_cache * 188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, 189 int contains) 190 { 191 struct btrfs_block_group_cache *cache, *ret = NULL; 192 struct rb_node *n; 193 u64 end, start; 194 195 spin_lock(&info->block_group_cache_lock); 196 n = info->block_group_cache_tree.rb_node; 197 198 while (n) { 199 cache = rb_entry(n, struct btrfs_block_group_cache, 200 cache_node); 201 end = cache->key.objectid + cache->key.offset - 1; 202 start = cache->key.objectid; 203 204 if (bytenr < start) { 205 if (!contains && (!ret || start < ret->key.objectid)) 206 ret = cache; 207 n = n->rb_left; 208 } else if (bytenr > start) { 209 if (contains && bytenr <= end) { 210 ret = cache; 211 break; 212 } 213 n = n->rb_right; 214 } else { 215 ret = cache; 216 break; 217 } 218 } 219 if (ret) { 220 btrfs_get_block_group(ret); 221 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) 222 info->first_logical_byte = ret->key.objectid; 223 } 224 spin_unlock(&info->block_group_cache_lock); 225 226 return ret; 227 } 228 229 static int add_excluded_extent(struct btrfs_root *root, 230 u64 start, u64 num_bytes) 231 { 232 u64 end = start + num_bytes - 1; 233 set_extent_bits(&root->fs_info->freed_extents[0], 234 start, end, EXTENT_UPTODATE, GFP_NOFS); 235 set_extent_bits(&root->fs_info->freed_extents[1], 236 start, end, EXTENT_UPTODATE, GFP_NOFS); 237 return 0; 238 } 239 240 static void free_excluded_extents(struct btrfs_root *root, 241 struct btrfs_block_group_cache *cache) 242 { 243 u64 start, end; 244 245 start = cache->key.objectid; 246 end = start + cache->key.offset - 1; 247 248 clear_extent_bits(&root->fs_info->freed_extents[0], 249 start, end, EXTENT_UPTODATE, GFP_NOFS); 250 clear_extent_bits(&root->fs_info->freed_extents[1], 251 start, end, EXTENT_UPTODATE, GFP_NOFS); 252 } 253 254 static int exclude_super_stripes(struct btrfs_root *root, 255 struct btrfs_block_group_cache *cache) 256 { 257 u64 bytenr; 258 u64 *logical; 259 int stripe_len; 260 int i, nr, ret; 261 262 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { 263 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; 264 cache->bytes_super += stripe_len; 265 ret = add_excluded_extent(root, cache->key.objectid, 266 stripe_len); 267 if (ret) 268 return ret; 269 } 270 271 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 272 bytenr = btrfs_sb_offset(i); 273 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 274 cache->key.objectid, bytenr, 275 0, &logical, &nr, &stripe_len); 276 if (ret) 277 return ret; 278 279 while (nr--) { 280 u64 start, len; 281 282 if (logical[nr] > cache->key.objectid + 283 cache->key.offset) 284 continue; 285 286 if (logical[nr] + stripe_len <= cache->key.objectid) 287 continue; 288 289 start = logical[nr]; 290 if (start < cache->key.objectid) { 291 start = cache->key.objectid; 292 len = (logical[nr] + stripe_len) - start; 293 } else { 294 len = min_t(u64, stripe_len, 295 cache->key.objectid + 296 cache->key.offset - start); 297 } 298 299 cache->bytes_super += len; 300 ret = add_excluded_extent(root, start, len); 301 if (ret) { 302 kfree(logical); 303 return ret; 304 } 305 } 306 307 kfree(logical); 308 } 309 return 0; 310 } 311 312 static struct btrfs_caching_control * 313 get_caching_control(struct btrfs_block_group_cache *cache) 314 { 315 struct btrfs_caching_control *ctl; 316 317 spin_lock(&cache->lock); 318 if (!cache->caching_ctl) { 319 spin_unlock(&cache->lock); 320 return NULL; 321 } 322 323 ctl = cache->caching_ctl; 324 atomic_inc(&ctl->count); 325 spin_unlock(&cache->lock); 326 return ctl; 327 } 328 329 static void put_caching_control(struct btrfs_caching_control *ctl) 330 { 331 if (atomic_dec_and_test(&ctl->count)) 332 kfree(ctl); 333 } 334 335 /* 336 * this is only called by cache_block_group, since we could have freed extents 337 * we need to check the pinned_extents for any extents that can't be used yet 338 * since their free space will be released as soon as the transaction commits. 339 */ 340 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, 341 struct btrfs_fs_info *info, u64 start, u64 end) 342 { 343 u64 extent_start, extent_end, size, total_added = 0; 344 int ret; 345 346 while (start < end) { 347 ret = find_first_extent_bit(info->pinned_extents, start, 348 &extent_start, &extent_end, 349 EXTENT_DIRTY | EXTENT_UPTODATE, 350 NULL); 351 if (ret) 352 break; 353 354 if (extent_start <= start) { 355 start = extent_end + 1; 356 } else if (extent_start > start && extent_start < end) { 357 size = extent_start - start; 358 total_added += size; 359 ret = btrfs_add_free_space(block_group, start, 360 size); 361 BUG_ON(ret); /* -ENOMEM or logic error */ 362 start = extent_end + 1; 363 } else { 364 break; 365 } 366 } 367 368 if (start < end) { 369 size = end - start; 370 total_added += size; 371 ret = btrfs_add_free_space(block_group, start, size); 372 BUG_ON(ret); /* -ENOMEM or logic error */ 373 } 374 375 return total_added; 376 } 377 378 static noinline void caching_thread(struct btrfs_work *work) 379 { 380 struct btrfs_block_group_cache *block_group; 381 struct btrfs_fs_info *fs_info; 382 struct btrfs_caching_control *caching_ctl; 383 struct btrfs_root *extent_root; 384 struct btrfs_path *path; 385 struct extent_buffer *leaf; 386 struct btrfs_key key; 387 u64 total_found = 0; 388 u64 last = 0; 389 u32 nritems; 390 int ret = -ENOMEM; 391 392 caching_ctl = container_of(work, struct btrfs_caching_control, work); 393 block_group = caching_ctl->block_group; 394 fs_info = block_group->fs_info; 395 extent_root = fs_info->extent_root; 396 397 path = btrfs_alloc_path(); 398 if (!path) 399 goto out; 400 401 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 402 403 /* 404 * We don't want to deadlock with somebody trying to allocate a new 405 * extent for the extent root while also trying to search the extent 406 * root to add free space. So we skip locking and search the commit 407 * root, since its read-only 408 */ 409 path->skip_locking = 1; 410 path->search_commit_root = 1; 411 path->reada = 1; 412 413 key.objectid = last; 414 key.offset = 0; 415 key.type = BTRFS_EXTENT_ITEM_KEY; 416 again: 417 mutex_lock(&caching_ctl->mutex); 418 /* need to make sure the commit_root doesn't disappear */ 419 down_read(&fs_info->commit_root_sem); 420 421 next: 422 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 423 if (ret < 0) 424 goto err; 425 426 leaf = path->nodes[0]; 427 nritems = btrfs_header_nritems(leaf); 428 429 while (1) { 430 if (btrfs_fs_closing(fs_info) > 1) { 431 last = (u64)-1; 432 break; 433 } 434 435 if (path->slots[0] < nritems) { 436 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 437 } else { 438 ret = find_next_key(path, 0, &key); 439 if (ret) 440 break; 441 442 if (need_resched() || 443 rwsem_is_contended(&fs_info->commit_root_sem)) { 444 caching_ctl->progress = last; 445 btrfs_release_path(path); 446 up_read(&fs_info->commit_root_sem); 447 mutex_unlock(&caching_ctl->mutex); 448 cond_resched(); 449 goto again; 450 } 451 452 ret = btrfs_next_leaf(extent_root, path); 453 if (ret < 0) 454 goto err; 455 if (ret) 456 break; 457 leaf = path->nodes[0]; 458 nritems = btrfs_header_nritems(leaf); 459 continue; 460 } 461 462 if (key.objectid < last) { 463 key.objectid = last; 464 key.offset = 0; 465 key.type = BTRFS_EXTENT_ITEM_KEY; 466 467 caching_ctl->progress = last; 468 btrfs_release_path(path); 469 goto next; 470 } 471 472 if (key.objectid < block_group->key.objectid) { 473 path->slots[0]++; 474 continue; 475 } 476 477 if (key.objectid >= block_group->key.objectid + 478 block_group->key.offset) 479 break; 480 481 if (key.type == BTRFS_EXTENT_ITEM_KEY || 482 key.type == BTRFS_METADATA_ITEM_KEY) { 483 total_found += add_new_free_space(block_group, 484 fs_info, last, 485 key.objectid); 486 if (key.type == BTRFS_METADATA_ITEM_KEY) 487 last = key.objectid + 488 fs_info->tree_root->nodesize; 489 else 490 last = key.objectid + key.offset; 491 492 if (total_found > (1024 * 1024 * 2)) { 493 total_found = 0; 494 wake_up(&caching_ctl->wait); 495 } 496 } 497 path->slots[0]++; 498 } 499 ret = 0; 500 501 total_found += add_new_free_space(block_group, fs_info, last, 502 block_group->key.objectid + 503 block_group->key.offset); 504 caching_ctl->progress = (u64)-1; 505 506 spin_lock(&block_group->lock); 507 block_group->caching_ctl = NULL; 508 block_group->cached = BTRFS_CACHE_FINISHED; 509 spin_unlock(&block_group->lock); 510 511 err: 512 btrfs_free_path(path); 513 up_read(&fs_info->commit_root_sem); 514 515 free_excluded_extents(extent_root, block_group); 516 517 mutex_unlock(&caching_ctl->mutex); 518 out: 519 if (ret) { 520 spin_lock(&block_group->lock); 521 block_group->caching_ctl = NULL; 522 block_group->cached = BTRFS_CACHE_ERROR; 523 spin_unlock(&block_group->lock); 524 } 525 wake_up(&caching_ctl->wait); 526 527 put_caching_control(caching_ctl); 528 btrfs_put_block_group(block_group); 529 } 530 531 static int cache_block_group(struct btrfs_block_group_cache *cache, 532 int load_cache_only) 533 { 534 DEFINE_WAIT(wait); 535 struct btrfs_fs_info *fs_info = cache->fs_info; 536 struct btrfs_caching_control *caching_ctl; 537 int ret = 0; 538 539 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 540 if (!caching_ctl) 541 return -ENOMEM; 542 543 INIT_LIST_HEAD(&caching_ctl->list); 544 mutex_init(&caching_ctl->mutex); 545 init_waitqueue_head(&caching_ctl->wait); 546 caching_ctl->block_group = cache; 547 caching_ctl->progress = cache->key.objectid; 548 atomic_set(&caching_ctl->count, 1); 549 btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, 550 caching_thread, NULL, NULL); 551 552 spin_lock(&cache->lock); 553 /* 554 * This should be a rare occasion, but this could happen I think in the 555 * case where one thread starts to load the space cache info, and then 556 * some other thread starts a transaction commit which tries to do an 557 * allocation while the other thread is still loading the space cache 558 * info. The previous loop should have kept us from choosing this block 559 * group, but if we've moved to the state where we will wait on caching 560 * block groups we need to first check if we're doing a fast load here, 561 * so we can wait for it to finish, otherwise we could end up allocating 562 * from a block group who's cache gets evicted for one reason or 563 * another. 564 */ 565 while (cache->cached == BTRFS_CACHE_FAST) { 566 struct btrfs_caching_control *ctl; 567 568 ctl = cache->caching_ctl; 569 atomic_inc(&ctl->count); 570 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); 571 spin_unlock(&cache->lock); 572 573 schedule(); 574 575 finish_wait(&ctl->wait, &wait); 576 put_caching_control(ctl); 577 spin_lock(&cache->lock); 578 } 579 580 if (cache->cached != BTRFS_CACHE_NO) { 581 spin_unlock(&cache->lock); 582 kfree(caching_ctl); 583 return 0; 584 } 585 WARN_ON(cache->caching_ctl); 586 cache->caching_ctl = caching_ctl; 587 cache->cached = BTRFS_CACHE_FAST; 588 spin_unlock(&cache->lock); 589 590 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) { 591 mutex_lock(&caching_ctl->mutex); 592 ret = load_free_space_cache(fs_info, cache); 593 594 spin_lock(&cache->lock); 595 if (ret == 1) { 596 cache->caching_ctl = NULL; 597 cache->cached = BTRFS_CACHE_FINISHED; 598 cache->last_byte_to_unpin = (u64)-1; 599 caching_ctl->progress = (u64)-1; 600 } else { 601 if (load_cache_only) { 602 cache->caching_ctl = NULL; 603 cache->cached = BTRFS_CACHE_NO; 604 } else { 605 cache->cached = BTRFS_CACHE_STARTED; 606 cache->has_caching_ctl = 1; 607 } 608 } 609 spin_unlock(&cache->lock); 610 mutex_unlock(&caching_ctl->mutex); 611 612 wake_up(&caching_ctl->wait); 613 if (ret == 1) { 614 put_caching_control(caching_ctl); 615 free_excluded_extents(fs_info->extent_root, cache); 616 return 0; 617 } 618 } else { 619 /* 620 * We are not going to do the fast caching, set cached to the 621 * appropriate value and wakeup any waiters. 622 */ 623 spin_lock(&cache->lock); 624 if (load_cache_only) { 625 cache->caching_ctl = NULL; 626 cache->cached = BTRFS_CACHE_NO; 627 } else { 628 cache->cached = BTRFS_CACHE_STARTED; 629 cache->has_caching_ctl = 1; 630 } 631 spin_unlock(&cache->lock); 632 wake_up(&caching_ctl->wait); 633 } 634 635 if (load_cache_only) { 636 put_caching_control(caching_ctl); 637 return 0; 638 } 639 640 down_write(&fs_info->commit_root_sem); 641 atomic_inc(&caching_ctl->count); 642 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 643 up_write(&fs_info->commit_root_sem); 644 645 btrfs_get_block_group(cache); 646 647 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); 648 649 return ret; 650 } 651 652 /* 653 * return the block group that starts at or after bytenr 654 */ 655 static struct btrfs_block_group_cache * 656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) 657 { 658 struct btrfs_block_group_cache *cache; 659 660 cache = block_group_cache_tree_search(info, bytenr, 0); 661 662 return cache; 663 } 664 665 /* 666 * return the block group that contains the given bytenr 667 */ 668 struct btrfs_block_group_cache *btrfs_lookup_block_group( 669 struct btrfs_fs_info *info, 670 u64 bytenr) 671 { 672 struct btrfs_block_group_cache *cache; 673 674 cache = block_group_cache_tree_search(info, bytenr, 1); 675 676 return cache; 677 } 678 679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, 680 u64 flags) 681 { 682 struct list_head *head = &info->space_info; 683 struct btrfs_space_info *found; 684 685 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; 686 687 rcu_read_lock(); 688 list_for_each_entry_rcu(found, head, list) { 689 if (found->flags & flags) { 690 rcu_read_unlock(); 691 return found; 692 } 693 } 694 rcu_read_unlock(); 695 return NULL; 696 } 697 698 /* 699 * after adding space to the filesystem, we need to clear the full flags 700 * on all the space infos. 701 */ 702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 703 { 704 struct list_head *head = &info->space_info; 705 struct btrfs_space_info *found; 706 707 rcu_read_lock(); 708 list_for_each_entry_rcu(found, head, list) 709 found->full = 0; 710 rcu_read_unlock(); 711 } 712 713 /* simple helper to search for an existing data extent at a given offset */ 714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len) 715 { 716 int ret; 717 struct btrfs_key key; 718 struct btrfs_path *path; 719 720 path = btrfs_alloc_path(); 721 if (!path) 722 return -ENOMEM; 723 724 key.objectid = start; 725 key.offset = len; 726 key.type = BTRFS_EXTENT_ITEM_KEY; 727 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 728 0, 0); 729 btrfs_free_path(path); 730 return ret; 731 } 732 733 /* 734 * helper function to lookup reference count and flags of a tree block. 735 * 736 * the head node for delayed ref is used to store the sum of all the 737 * reference count modifications queued up in the rbtree. the head 738 * node may also store the extent flags to set. This way you can check 739 * to see what the reference count and extent flags would be if all of 740 * the delayed refs are not processed. 741 */ 742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 743 struct btrfs_root *root, u64 bytenr, 744 u64 offset, int metadata, u64 *refs, u64 *flags) 745 { 746 struct btrfs_delayed_ref_head *head; 747 struct btrfs_delayed_ref_root *delayed_refs; 748 struct btrfs_path *path; 749 struct btrfs_extent_item *ei; 750 struct extent_buffer *leaf; 751 struct btrfs_key key; 752 u32 item_size; 753 u64 num_refs; 754 u64 extent_flags; 755 int ret; 756 757 /* 758 * If we don't have skinny metadata, don't bother doing anything 759 * different 760 */ 761 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) { 762 offset = root->nodesize; 763 metadata = 0; 764 } 765 766 path = btrfs_alloc_path(); 767 if (!path) 768 return -ENOMEM; 769 770 if (!trans) { 771 path->skip_locking = 1; 772 path->search_commit_root = 1; 773 } 774 775 search_again: 776 key.objectid = bytenr; 777 key.offset = offset; 778 if (metadata) 779 key.type = BTRFS_METADATA_ITEM_KEY; 780 else 781 key.type = BTRFS_EXTENT_ITEM_KEY; 782 783 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 784 &key, path, 0, 0); 785 if (ret < 0) 786 goto out_free; 787 788 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { 789 if (path->slots[0]) { 790 path->slots[0]--; 791 btrfs_item_key_to_cpu(path->nodes[0], &key, 792 path->slots[0]); 793 if (key.objectid == bytenr && 794 key.type == BTRFS_EXTENT_ITEM_KEY && 795 key.offset == root->nodesize) 796 ret = 0; 797 } 798 } 799 800 if (ret == 0) { 801 leaf = path->nodes[0]; 802 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 803 if (item_size >= sizeof(*ei)) { 804 ei = btrfs_item_ptr(leaf, path->slots[0], 805 struct btrfs_extent_item); 806 num_refs = btrfs_extent_refs(leaf, ei); 807 extent_flags = btrfs_extent_flags(leaf, ei); 808 } else { 809 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 810 struct btrfs_extent_item_v0 *ei0; 811 BUG_ON(item_size != sizeof(*ei0)); 812 ei0 = btrfs_item_ptr(leaf, path->slots[0], 813 struct btrfs_extent_item_v0); 814 num_refs = btrfs_extent_refs_v0(leaf, ei0); 815 /* FIXME: this isn't correct for data */ 816 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 817 #else 818 BUG(); 819 #endif 820 } 821 BUG_ON(num_refs == 0); 822 } else { 823 num_refs = 0; 824 extent_flags = 0; 825 ret = 0; 826 } 827 828 if (!trans) 829 goto out; 830 831 delayed_refs = &trans->transaction->delayed_refs; 832 spin_lock(&delayed_refs->lock); 833 head = btrfs_find_delayed_ref_head(trans, bytenr); 834 if (head) { 835 if (!mutex_trylock(&head->mutex)) { 836 atomic_inc(&head->node.refs); 837 spin_unlock(&delayed_refs->lock); 838 839 btrfs_release_path(path); 840 841 /* 842 * Mutex was contended, block until it's released and try 843 * again 844 */ 845 mutex_lock(&head->mutex); 846 mutex_unlock(&head->mutex); 847 btrfs_put_delayed_ref(&head->node); 848 goto search_again; 849 } 850 spin_lock(&head->lock); 851 if (head->extent_op && head->extent_op->update_flags) 852 extent_flags |= head->extent_op->flags_to_set; 853 else 854 BUG_ON(num_refs == 0); 855 856 num_refs += head->node.ref_mod; 857 spin_unlock(&head->lock); 858 mutex_unlock(&head->mutex); 859 } 860 spin_unlock(&delayed_refs->lock); 861 out: 862 WARN_ON(num_refs == 0); 863 if (refs) 864 *refs = num_refs; 865 if (flags) 866 *flags = extent_flags; 867 out_free: 868 btrfs_free_path(path); 869 return ret; 870 } 871 872 /* 873 * Back reference rules. Back refs have three main goals: 874 * 875 * 1) differentiate between all holders of references to an extent so that 876 * when a reference is dropped we can make sure it was a valid reference 877 * before freeing the extent. 878 * 879 * 2) Provide enough information to quickly find the holders of an extent 880 * if we notice a given block is corrupted or bad. 881 * 882 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 883 * maintenance. This is actually the same as #2, but with a slightly 884 * different use case. 885 * 886 * There are two kinds of back refs. The implicit back refs is optimized 887 * for pointers in non-shared tree blocks. For a given pointer in a block, 888 * back refs of this kind provide information about the block's owner tree 889 * and the pointer's key. These information allow us to find the block by 890 * b-tree searching. The full back refs is for pointers in tree blocks not 891 * referenced by their owner trees. The location of tree block is recorded 892 * in the back refs. Actually the full back refs is generic, and can be 893 * used in all cases the implicit back refs is used. The major shortcoming 894 * of the full back refs is its overhead. Every time a tree block gets 895 * COWed, we have to update back refs entry for all pointers in it. 896 * 897 * For a newly allocated tree block, we use implicit back refs for 898 * pointers in it. This means most tree related operations only involve 899 * implicit back refs. For a tree block created in old transaction, the 900 * only way to drop a reference to it is COW it. So we can detect the 901 * event that tree block loses its owner tree's reference and do the 902 * back refs conversion. 903 * 904 * When a tree block is COW'd through a tree, there are four cases: 905 * 906 * The reference count of the block is one and the tree is the block's 907 * owner tree. Nothing to do in this case. 908 * 909 * The reference count of the block is one and the tree is not the 910 * block's owner tree. In this case, full back refs is used for pointers 911 * in the block. Remove these full back refs, add implicit back refs for 912 * every pointers in the new block. 913 * 914 * The reference count of the block is greater than one and the tree is 915 * the block's owner tree. In this case, implicit back refs is used for 916 * pointers in the block. Add full back refs for every pointers in the 917 * block, increase lower level extents' reference counts. The original 918 * implicit back refs are entailed to the new block. 919 * 920 * The reference count of the block is greater than one and the tree is 921 * not the block's owner tree. Add implicit back refs for every pointer in 922 * the new block, increase lower level extents' reference count. 923 * 924 * Back Reference Key composing: 925 * 926 * The key objectid corresponds to the first byte in the extent, 927 * The key type is used to differentiate between types of back refs. 928 * There are different meanings of the key offset for different types 929 * of back refs. 930 * 931 * File extents can be referenced by: 932 * 933 * - multiple snapshots, subvolumes, or different generations in one subvol 934 * - different files inside a single subvolume 935 * - different offsets inside a file (bookend extents in file.c) 936 * 937 * The extent ref structure for the implicit back refs has fields for: 938 * 939 * - Objectid of the subvolume root 940 * - objectid of the file holding the reference 941 * - original offset in the file 942 * - how many bookend extents 943 * 944 * The key offset for the implicit back refs is hash of the first 945 * three fields. 946 * 947 * The extent ref structure for the full back refs has field for: 948 * 949 * - number of pointers in the tree leaf 950 * 951 * The key offset for the implicit back refs is the first byte of 952 * the tree leaf 953 * 954 * When a file extent is allocated, The implicit back refs is used. 955 * the fields are filled in: 956 * 957 * (root_key.objectid, inode objectid, offset in file, 1) 958 * 959 * When a file extent is removed file truncation, we find the 960 * corresponding implicit back refs and check the following fields: 961 * 962 * (btrfs_header_owner(leaf), inode objectid, offset in file) 963 * 964 * Btree extents can be referenced by: 965 * 966 * - Different subvolumes 967 * 968 * Both the implicit back refs and the full back refs for tree blocks 969 * only consist of key. The key offset for the implicit back refs is 970 * objectid of block's owner tree. The key offset for the full back refs 971 * is the first byte of parent block. 972 * 973 * When implicit back refs is used, information about the lowest key and 974 * level of the tree block are required. These information are stored in 975 * tree block info structure. 976 */ 977 978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 979 static int convert_extent_item_v0(struct btrfs_trans_handle *trans, 980 struct btrfs_root *root, 981 struct btrfs_path *path, 982 u64 owner, u32 extra_size) 983 { 984 struct btrfs_extent_item *item; 985 struct btrfs_extent_item_v0 *ei0; 986 struct btrfs_extent_ref_v0 *ref0; 987 struct btrfs_tree_block_info *bi; 988 struct extent_buffer *leaf; 989 struct btrfs_key key; 990 struct btrfs_key found_key; 991 u32 new_size = sizeof(*item); 992 u64 refs; 993 int ret; 994 995 leaf = path->nodes[0]; 996 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0)); 997 998 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 999 ei0 = btrfs_item_ptr(leaf, path->slots[0], 1000 struct btrfs_extent_item_v0); 1001 refs = btrfs_extent_refs_v0(leaf, ei0); 1002 1003 if (owner == (u64)-1) { 1004 while (1) { 1005 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1006 ret = btrfs_next_leaf(root, path); 1007 if (ret < 0) 1008 return ret; 1009 BUG_ON(ret > 0); /* Corruption */ 1010 leaf = path->nodes[0]; 1011 } 1012 btrfs_item_key_to_cpu(leaf, &found_key, 1013 path->slots[0]); 1014 BUG_ON(key.objectid != found_key.objectid); 1015 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) { 1016 path->slots[0]++; 1017 continue; 1018 } 1019 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1020 struct btrfs_extent_ref_v0); 1021 owner = btrfs_ref_objectid_v0(leaf, ref0); 1022 break; 1023 } 1024 } 1025 btrfs_release_path(path); 1026 1027 if (owner < BTRFS_FIRST_FREE_OBJECTID) 1028 new_size += sizeof(*bi); 1029 1030 new_size -= sizeof(*ei0); 1031 ret = btrfs_search_slot(trans, root, &key, path, 1032 new_size + extra_size, 1); 1033 if (ret < 0) 1034 return ret; 1035 BUG_ON(ret); /* Corruption */ 1036 1037 btrfs_extend_item(root, path, new_size); 1038 1039 leaf = path->nodes[0]; 1040 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1041 btrfs_set_extent_refs(leaf, item, refs); 1042 /* FIXME: get real generation */ 1043 btrfs_set_extent_generation(leaf, item, 0); 1044 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1045 btrfs_set_extent_flags(leaf, item, 1046 BTRFS_EXTENT_FLAG_TREE_BLOCK | 1047 BTRFS_BLOCK_FLAG_FULL_BACKREF); 1048 bi = (struct btrfs_tree_block_info *)(item + 1); 1049 /* FIXME: get first key of the block */ 1050 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi)); 1051 btrfs_set_tree_block_level(leaf, bi, (int)owner); 1052 } else { 1053 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA); 1054 } 1055 btrfs_mark_buffer_dirty(leaf); 1056 return 0; 1057 } 1058 #endif 1059 1060 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 1061 { 1062 u32 high_crc = ~(u32)0; 1063 u32 low_crc = ~(u32)0; 1064 __le64 lenum; 1065 1066 lenum = cpu_to_le64(root_objectid); 1067 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); 1068 lenum = cpu_to_le64(owner); 1069 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 1070 lenum = cpu_to_le64(offset); 1071 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); 1072 1073 return ((u64)high_crc << 31) ^ (u64)low_crc; 1074 } 1075 1076 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 1077 struct btrfs_extent_data_ref *ref) 1078 { 1079 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 1080 btrfs_extent_data_ref_objectid(leaf, ref), 1081 btrfs_extent_data_ref_offset(leaf, ref)); 1082 } 1083 1084 static int match_extent_data_ref(struct extent_buffer *leaf, 1085 struct btrfs_extent_data_ref *ref, 1086 u64 root_objectid, u64 owner, u64 offset) 1087 { 1088 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 1089 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 1090 btrfs_extent_data_ref_offset(leaf, ref) != offset) 1091 return 0; 1092 return 1; 1093 } 1094 1095 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 1096 struct btrfs_root *root, 1097 struct btrfs_path *path, 1098 u64 bytenr, u64 parent, 1099 u64 root_objectid, 1100 u64 owner, u64 offset) 1101 { 1102 struct btrfs_key key; 1103 struct btrfs_extent_data_ref *ref; 1104 struct extent_buffer *leaf; 1105 u32 nritems; 1106 int ret; 1107 int recow; 1108 int err = -ENOENT; 1109 1110 key.objectid = bytenr; 1111 if (parent) { 1112 key.type = BTRFS_SHARED_DATA_REF_KEY; 1113 key.offset = parent; 1114 } else { 1115 key.type = BTRFS_EXTENT_DATA_REF_KEY; 1116 key.offset = hash_extent_data_ref(root_objectid, 1117 owner, offset); 1118 } 1119 again: 1120 recow = 0; 1121 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1122 if (ret < 0) { 1123 err = ret; 1124 goto fail; 1125 } 1126 1127 if (parent) { 1128 if (!ret) 1129 return 0; 1130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1131 key.type = BTRFS_EXTENT_REF_V0_KEY; 1132 btrfs_release_path(path); 1133 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1134 if (ret < 0) { 1135 err = ret; 1136 goto fail; 1137 } 1138 if (!ret) 1139 return 0; 1140 #endif 1141 goto fail; 1142 } 1143 1144 leaf = path->nodes[0]; 1145 nritems = btrfs_header_nritems(leaf); 1146 while (1) { 1147 if (path->slots[0] >= nritems) { 1148 ret = btrfs_next_leaf(root, path); 1149 if (ret < 0) 1150 err = ret; 1151 if (ret) 1152 goto fail; 1153 1154 leaf = path->nodes[0]; 1155 nritems = btrfs_header_nritems(leaf); 1156 recow = 1; 1157 } 1158 1159 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1160 if (key.objectid != bytenr || 1161 key.type != BTRFS_EXTENT_DATA_REF_KEY) 1162 goto fail; 1163 1164 ref = btrfs_item_ptr(leaf, path->slots[0], 1165 struct btrfs_extent_data_ref); 1166 1167 if (match_extent_data_ref(leaf, ref, root_objectid, 1168 owner, offset)) { 1169 if (recow) { 1170 btrfs_release_path(path); 1171 goto again; 1172 } 1173 err = 0; 1174 break; 1175 } 1176 path->slots[0]++; 1177 } 1178 fail: 1179 return err; 1180 } 1181 1182 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 1183 struct btrfs_root *root, 1184 struct btrfs_path *path, 1185 u64 bytenr, u64 parent, 1186 u64 root_objectid, u64 owner, 1187 u64 offset, int refs_to_add) 1188 { 1189 struct btrfs_key key; 1190 struct extent_buffer *leaf; 1191 u32 size; 1192 u32 num_refs; 1193 int ret; 1194 1195 key.objectid = bytenr; 1196 if (parent) { 1197 key.type = BTRFS_SHARED_DATA_REF_KEY; 1198 key.offset = parent; 1199 size = sizeof(struct btrfs_shared_data_ref); 1200 } else { 1201 key.type = BTRFS_EXTENT_DATA_REF_KEY; 1202 key.offset = hash_extent_data_ref(root_objectid, 1203 owner, offset); 1204 size = sizeof(struct btrfs_extent_data_ref); 1205 } 1206 1207 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 1208 if (ret && ret != -EEXIST) 1209 goto fail; 1210 1211 leaf = path->nodes[0]; 1212 if (parent) { 1213 struct btrfs_shared_data_ref *ref; 1214 ref = btrfs_item_ptr(leaf, path->slots[0], 1215 struct btrfs_shared_data_ref); 1216 if (ret == 0) { 1217 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 1218 } else { 1219 num_refs = btrfs_shared_data_ref_count(leaf, ref); 1220 num_refs += refs_to_add; 1221 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 1222 } 1223 } else { 1224 struct btrfs_extent_data_ref *ref; 1225 while (ret == -EEXIST) { 1226 ref = btrfs_item_ptr(leaf, path->slots[0], 1227 struct btrfs_extent_data_ref); 1228 if (match_extent_data_ref(leaf, ref, root_objectid, 1229 owner, offset)) 1230 break; 1231 btrfs_release_path(path); 1232 key.offset++; 1233 ret = btrfs_insert_empty_item(trans, root, path, &key, 1234 size); 1235 if (ret && ret != -EEXIST) 1236 goto fail; 1237 1238 leaf = path->nodes[0]; 1239 } 1240 ref = btrfs_item_ptr(leaf, path->slots[0], 1241 struct btrfs_extent_data_ref); 1242 if (ret == 0) { 1243 btrfs_set_extent_data_ref_root(leaf, ref, 1244 root_objectid); 1245 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 1246 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 1247 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 1248 } else { 1249 num_refs = btrfs_extent_data_ref_count(leaf, ref); 1250 num_refs += refs_to_add; 1251 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 1252 } 1253 } 1254 btrfs_mark_buffer_dirty(leaf); 1255 ret = 0; 1256 fail: 1257 btrfs_release_path(path); 1258 return ret; 1259 } 1260 1261 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 1262 struct btrfs_root *root, 1263 struct btrfs_path *path, 1264 int refs_to_drop, int *last_ref) 1265 { 1266 struct btrfs_key key; 1267 struct btrfs_extent_data_ref *ref1 = NULL; 1268 struct btrfs_shared_data_ref *ref2 = NULL; 1269 struct extent_buffer *leaf; 1270 u32 num_refs = 0; 1271 int ret = 0; 1272 1273 leaf = path->nodes[0]; 1274 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1275 1276 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 1277 ref1 = btrfs_item_ptr(leaf, path->slots[0], 1278 struct btrfs_extent_data_ref); 1279 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1280 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 1281 ref2 = btrfs_item_ptr(leaf, path->slots[0], 1282 struct btrfs_shared_data_ref); 1283 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1285 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 1286 struct btrfs_extent_ref_v0 *ref0; 1287 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1288 struct btrfs_extent_ref_v0); 1289 num_refs = btrfs_ref_count_v0(leaf, ref0); 1290 #endif 1291 } else { 1292 BUG(); 1293 } 1294 1295 BUG_ON(num_refs < refs_to_drop); 1296 num_refs -= refs_to_drop; 1297 1298 if (num_refs == 0) { 1299 ret = btrfs_del_item(trans, root, path); 1300 *last_ref = 1; 1301 } else { 1302 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 1303 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 1304 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 1305 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1307 else { 1308 struct btrfs_extent_ref_v0 *ref0; 1309 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1310 struct btrfs_extent_ref_v0); 1311 btrfs_set_ref_count_v0(leaf, ref0, num_refs); 1312 } 1313 #endif 1314 btrfs_mark_buffer_dirty(leaf); 1315 } 1316 return ret; 1317 } 1318 1319 static noinline u32 extent_data_ref_count(struct btrfs_root *root, 1320 struct btrfs_path *path, 1321 struct btrfs_extent_inline_ref *iref) 1322 { 1323 struct btrfs_key key; 1324 struct extent_buffer *leaf; 1325 struct btrfs_extent_data_ref *ref1; 1326 struct btrfs_shared_data_ref *ref2; 1327 u32 num_refs = 0; 1328 1329 leaf = path->nodes[0]; 1330 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1331 if (iref) { 1332 if (btrfs_extent_inline_ref_type(leaf, iref) == 1333 BTRFS_EXTENT_DATA_REF_KEY) { 1334 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 1335 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1336 } else { 1337 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 1338 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1339 } 1340 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 1341 ref1 = btrfs_item_ptr(leaf, path->slots[0], 1342 struct btrfs_extent_data_ref); 1343 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1344 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 1345 ref2 = btrfs_item_ptr(leaf, path->slots[0], 1346 struct btrfs_shared_data_ref); 1347 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1348 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1349 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 1350 struct btrfs_extent_ref_v0 *ref0; 1351 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1352 struct btrfs_extent_ref_v0); 1353 num_refs = btrfs_ref_count_v0(leaf, ref0); 1354 #endif 1355 } else { 1356 WARN_ON(1); 1357 } 1358 return num_refs; 1359 } 1360 1361 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 1362 struct btrfs_root *root, 1363 struct btrfs_path *path, 1364 u64 bytenr, u64 parent, 1365 u64 root_objectid) 1366 { 1367 struct btrfs_key key; 1368 int ret; 1369 1370 key.objectid = bytenr; 1371 if (parent) { 1372 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 1373 key.offset = parent; 1374 } else { 1375 key.type = BTRFS_TREE_BLOCK_REF_KEY; 1376 key.offset = root_objectid; 1377 } 1378 1379 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1380 if (ret > 0) 1381 ret = -ENOENT; 1382 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1383 if (ret == -ENOENT && parent) { 1384 btrfs_release_path(path); 1385 key.type = BTRFS_EXTENT_REF_V0_KEY; 1386 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1387 if (ret > 0) 1388 ret = -ENOENT; 1389 } 1390 #endif 1391 return ret; 1392 } 1393 1394 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 1395 struct btrfs_root *root, 1396 struct btrfs_path *path, 1397 u64 bytenr, u64 parent, 1398 u64 root_objectid) 1399 { 1400 struct btrfs_key key; 1401 int ret; 1402 1403 key.objectid = bytenr; 1404 if (parent) { 1405 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 1406 key.offset = parent; 1407 } else { 1408 key.type = BTRFS_TREE_BLOCK_REF_KEY; 1409 key.offset = root_objectid; 1410 } 1411 1412 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1413 btrfs_release_path(path); 1414 return ret; 1415 } 1416 1417 static inline int extent_ref_type(u64 parent, u64 owner) 1418 { 1419 int type; 1420 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1421 if (parent > 0) 1422 type = BTRFS_SHARED_BLOCK_REF_KEY; 1423 else 1424 type = BTRFS_TREE_BLOCK_REF_KEY; 1425 } else { 1426 if (parent > 0) 1427 type = BTRFS_SHARED_DATA_REF_KEY; 1428 else 1429 type = BTRFS_EXTENT_DATA_REF_KEY; 1430 } 1431 return type; 1432 } 1433 1434 static int find_next_key(struct btrfs_path *path, int level, 1435 struct btrfs_key *key) 1436 1437 { 1438 for (; level < BTRFS_MAX_LEVEL; level++) { 1439 if (!path->nodes[level]) 1440 break; 1441 if (path->slots[level] + 1 >= 1442 btrfs_header_nritems(path->nodes[level])) 1443 continue; 1444 if (level == 0) 1445 btrfs_item_key_to_cpu(path->nodes[level], key, 1446 path->slots[level] + 1); 1447 else 1448 btrfs_node_key_to_cpu(path->nodes[level], key, 1449 path->slots[level] + 1); 1450 return 0; 1451 } 1452 return 1; 1453 } 1454 1455 /* 1456 * look for inline back ref. if back ref is found, *ref_ret is set 1457 * to the address of inline back ref, and 0 is returned. 1458 * 1459 * if back ref isn't found, *ref_ret is set to the address where it 1460 * should be inserted, and -ENOENT is returned. 1461 * 1462 * if insert is true and there are too many inline back refs, the path 1463 * points to the extent item, and -EAGAIN is returned. 1464 * 1465 * NOTE: inline back refs are ordered in the same way that back ref 1466 * items in the tree are ordered. 1467 */ 1468 static noinline_for_stack 1469 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 1470 struct btrfs_root *root, 1471 struct btrfs_path *path, 1472 struct btrfs_extent_inline_ref **ref_ret, 1473 u64 bytenr, u64 num_bytes, 1474 u64 parent, u64 root_objectid, 1475 u64 owner, u64 offset, int insert) 1476 { 1477 struct btrfs_key key; 1478 struct extent_buffer *leaf; 1479 struct btrfs_extent_item *ei; 1480 struct btrfs_extent_inline_ref *iref; 1481 u64 flags; 1482 u64 item_size; 1483 unsigned long ptr; 1484 unsigned long end; 1485 int extra_size; 1486 int type; 1487 int want; 1488 int ret; 1489 int err = 0; 1490 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 1491 SKINNY_METADATA); 1492 1493 key.objectid = bytenr; 1494 key.type = BTRFS_EXTENT_ITEM_KEY; 1495 key.offset = num_bytes; 1496 1497 want = extent_ref_type(parent, owner); 1498 if (insert) { 1499 extra_size = btrfs_extent_inline_ref_size(want); 1500 path->keep_locks = 1; 1501 } else 1502 extra_size = -1; 1503 1504 /* 1505 * Owner is our parent level, so we can just add one to get the level 1506 * for the block we are interested in. 1507 */ 1508 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { 1509 key.type = BTRFS_METADATA_ITEM_KEY; 1510 key.offset = owner; 1511 } 1512 1513 again: 1514 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 1515 if (ret < 0) { 1516 err = ret; 1517 goto out; 1518 } 1519 1520 /* 1521 * We may be a newly converted file system which still has the old fat 1522 * extent entries for metadata, so try and see if we have one of those. 1523 */ 1524 if (ret > 0 && skinny_metadata) { 1525 skinny_metadata = false; 1526 if (path->slots[0]) { 1527 path->slots[0]--; 1528 btrfs_item_key_to_cpu(path->nodes[0], &key, 1529 path->slots[0]); 1530 if (key.objectid == bytenr && 1531 key.type == BTRFS_EXTENT_ITEM_KEY && 1532 key.offset == num_bytes) 1533 ret = 0; 1534 } 1535 if (ret) { 1536 key.objectid = bytenr; 1537 key.type = BTRFS_EXTENT_ITEM_KEY; 1538 key.offset = num_bytes; 1539 btrfs_release_path(path); 1540 goto again; 1541 } 1542 } 1543 1544 if (ret && !insert) { 1545 err = -ENOENT; 1546 goto out; 1547 } else if (WARN_ON(ret)) { 1548 err = -EIO; 1549 goto out; 1550 } 1551 1552 leaf = path->nodes[0]; 1553 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1554 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1555 if (item_size < sizeof(*ei)) { 1556 if (!insert) { 1557 err = -ENOENT; 1558 goto out; 1559 } 1560 ret = convert_extent_item_v0(trans, root, path, owner, 1561 extra_size); 1562 if (ret < 0) { 1563 err = ret; 1564 goto out; 1565 } 1566 leaf = path->nodes[0]; 1567 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1568 } 1569 #endif 1570 BUG_ON(item_size < sizeof(*ei)); 1571 1572 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1573 flags = btrfs_extent_flags(leaf, ei); 1574 1575 ptr = (unsigned long)(ei + 1); 1576 end = (unsigned long)ei + item_size; 1577 1578 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { 1579 ptr += sizeof(struct btrfs_tree_block_info); 1580 BUG_ON(ptr > end); 1581 } 1582 1583 err = -ENOENT; 1584 while (1) { 1585 if (ptr >= end) { 1586 WARN_ON(ptr > end); 1587 break; 1588 } 1589 iref = (struct btrfs_extent_inline_ref *)ptr; 1590 type = btrfs_extent_inline_ref_type(leaf, iref); 1591 if (want < type) 1592 break; 1593 if (want > type) { 1594 ptr += btrfs_extent_inline_ref_size(type); 1595 continue; 1596 } 1597 1598 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1599 struct btrfs_extent_data_ref *dref; 1600 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1601 if (match_extent_data_ref(leaf, dref, root_objectid, 1602 owner, offset)) { 1603 err = 0; 1604 break; 1605 } 1606 if (hash_extent_data_ref_item(leaf, dref) < 1607 hash_extent_data_ref(root_objectid, owner, offset)) 1608 break; 1609 } else { 1610 u64 ref_offset; 1611 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 1612 if (parent > 0) { 1613 if (parent == ref_offset) { 1614 err = 0; 1615 break; 1616 } 1617 if (ref_offset < parent) 1618 break; 1619 } else { 1620 if (root_objectid == ref_offset) { 1621 err = 0; 1622 break; 1623 } 1624 if (ref_offset < root_objectid) 1625 break; 1626 } 1627 } 1628 ptr += btrfs_extent_inline_ref_size(type); 1629 } 1630 if (err == -ENOENT && insert) { 1631 if (item_size + extra_size >= 1632 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 1633 err = -EAGAIN; 1634 goto out; 1635 } 1636 /* 1637 * To add new inline back ref, we have to make sure 1638 * there is no corresponding back ref item. 1639 * For simplicity, we just do not add new inline back 1640 * ref if there is any kind of item for this block 1641 */ 1642 if (find_next_key(path, 0, &key) == 0 && 1643 key.objectid == bytenr && 1644 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 1645 err = -EAGAIN; 1646 goto out; 1647 } 1648 } 1649 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 1650 out: 1651 if (insert) { 1652 path->keep_locks = 0; 1653 btrfs_unlock_up_safe(path, 1); 1654 } 1655 return err; 1656 } 1657 1658 /* 1659 * helper to add new inline back ref 1660 */ 1661 static noinline_for_stack 1662 void setup_inline_extent_backref(struct btrfs_root *root, 1663 struct btrfs_path *path, 1664 struct btrfs_extent_inline_ref *iref, 1665 u64 parent, u64 root_objectid, 1666 u64 owner, u64 offset, int refs_to_add, 1667 struct btrfs_delayed_extent_op *extent_op) 1668 { 1669 struct extent_buffer *leaf; 1670 struct btrfs_extent_item *ei; 1671 unsigned long ptr; 1672 unsigned long end; 1673 unsigned long item_offset; 1674 u64 refs; 1675 int size; 1676 int type; 1677 1678 leaf = path->nodes[0]; 1679 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1680 item_offset = (unsigned long)iref - (unsigned long)ei; 1681 1682 type = extent_ref_type(parent, owner); 1683 size = btrfs_extent_inline_ref_size(type); 1684 1685 btrfs_extend_item(root, path, size); 1686 1687 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1688 refs = btrfs_extent_refs(leaf, ei); 1689 refs += refs_to_add; 1690 btrfs_set_extent_refs(leaf, ei, refs); 1691 if (extent_op) 1692 __run_delayed_extent_op(extent_op, leaf, ei); 1693 1694 ptr = (unsigned long)ei + item_offset; 1695 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); 1696 if (ptr < end - size) 1697 memmove_extent_buffer(leaf, ptr + size, ptr, 1698 end - size - ptr); 1699 1700 iref = (struct btrfs_extent_inline_ref *)ptr; 1701 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1702 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1703 struct btrfs_extent_data_ref *dref; 1704 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1705 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1706 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1707 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1708 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1709 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1710 struct btrfs_shared_data_ref *sref; 1711 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1712 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1713 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1714 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1715 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1716 } else { 1717 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1718 } 1719 btrfs_mark_buffer_dirty(leaf); 1720 } 1721 1722 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1723 struct btrfs_root *root, 1724 struct btrfs_path *path, 1725 struct btrfs_extent_inline_ref **ref_ret, 1726 u64 bytenr, u64 num_bytes, u64 parent, 1727 u64 root_objectid, u64 owner, u64 offset) 1728 { 1729 int ret; 1730 1731 ret = lookup_inline_extent_backref(trans, root, path, ref_ret, 1732 bytenr, num_bytes, parent, 1733 root_objectid, owner, offset, 0); 1734 if (ret != -ENOENT) 1735 return ret; 1736 1737 btrfs_release_path(path); 1738 *ref_ret = NULL; 1739 1740 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1741 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent, 1742 root_objectid); 1743 } else { 1744 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent, 1745 root_objectid, owner, offset); 1746 } 1747 return ret; 1748 } 1749 1750 /* 1751 * helper to update/remove inline back ref 1752 */ 1753 static noinline_for_stack 1754 void update_inline_extent_backref(struct btrfs_root *root, 1755 struct btrfs_path *path, 1756 struct btrfs_extent_inline_ref *iref, 1757 int refs_to_mod, 1758 struct btrfs_delayed_extent_op *extent_op, 1759 int *last_ref) 1760 { 1761 struct extent_buffer *leaf; 1762 struct btrfs_extent_item *ei; 1763 struct btrfs_extent_data_ref *dref = NULL; 1764 struct btrfs_shared_data_ref *sref = NULL; 1765 unsigned long ptr; 1766 unsigned long end; 1767 u32 item_size; 1768 int size; 1769 int type; 1770 u64 refs; 1771 1772 leaf = path->nodes[0]; 1773 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1774 refs = btrfs_extent_refs(leaf, ei); 1775 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); 1776 refs += refs_to_mod; 1777 btrfs_set_extent_refs(leaf, ei, refs); 1778 if (extent_op) 1779 __run_delayed_extent_op(extent_op, leaf, ei); 1780 1781 type = btrfs_extent_inline_ref_type(leaf, iref); 1782 1783 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1784 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1785 refs = btrfs_extent_data_ref_count(leaf, dref); 1786 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1787 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1788 refs = btrfs_shared_data_ref_count(leaf, sref); 1789 } else { 1790 refs = 1; 1791 BUG_ON(refs_to_mod != -1); 1792 } 1793 1794 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); 1795 refs += refs_to_mod; 1796 1797 if (refs > 0) { 1798 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1799 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1800 else 1801 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1802 } else { 1803 *last_ref = 1; 1804 size = btrfs_extent_inline_ref_size(type); 1805 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1806 ptr = (unsigned long)iref; 1807 end = (unsigned long)ei + item_size; 1808 if (ptr + size < end) 1809 memmove_extent_buffer(leaf, ptr, ptr + size, 1810 end - ptr - size); 1811 item_size -= size; 1812 btrfs_truncate_item(root, path, item_size, 1); 1813 } 1814 btrfs_mark_buffer_dirty(leaf); 1815 } 1816 1817 static noinline_for_stack 1818 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1819 struct btrfs_root *root, 1820 struct btrfs_path *path, 1821 u64 bytenr, u64 num_bytes, u64 parent, 1822 u64 root_objectid, u64 owner, 1823 u64 offset, int refs_to_add, 1824 struct btrfs_delayed_extent_op *extent_op) 1825 { 1826 struct btrfs_extent_inline_ref *iref; 1827 int ret; 1828 1829 ret = lookup_inline_extent_backref(trans, root, path, &iref, 1830 bytenr, num_bytes, parent, 1831 root_objectid, owner, offset, 1); 1832 if (ret == 0) { 1833 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); 1834 update_inline_extent_backref(root, path, iref, 1835 refs_to_add, extent_op, NULL); 1836 } else if (ret == -ENOENT) { 1837 setup_inline_extent_backref(root, path, iref, parent, 1838 root_objectid, owner, offset, 1839 refs_to_add, extent_op); 1840 ret = 0; 1841 } 1842 return ret; 1843 } 1844 1845 static int insert_extent_backref(struct btrfs_trans_handle *trans, 1846 struct btrfs_root *root, 1847 struct btrfs_path *path, 1848 u64 bytenr, u64 parent, u64 root_objectid, 1849 u64 owner, u64 offset, int refs_to_add) 1850 { 1851 int ret; 1852 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1853 BUG_ON(refs_to_add != 1); 1854 ret = insert_tree_block_ref(trans, root, path, bytenr, 1855 parent, root_objectid); 1856 } else { 1857 ret = insert_extent_data_ref(trans, root, path, bytenr, 1858 parent, root_objectid, 1859 owner, offset, refs_to_add); 1860 } 1861 return ret; 1862 } 1863 1864 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1865 struct btrfs_root *root, 1866 struct btrfs_path *path, 1867 struct btrfs_extent_inline_ref *iref, 1868 int refs_to_drop, int is_data, int *last_ref) 1869 { 1870 int ret = 0; 1871 1872 BUG_ON(!is_data && refs_to_drop != 1); 1873 if (iref) { 1874 update_inline_extent_backref(root, path, iref, 1875 -refs_to_drop, NULL, last_ref); 1876 } else if (is_data) { 1877 ret = remove_extent_data_ref(trans, root, path, refs_to_drop, 1878 last_ref); 1879 } else { 1880 *last_ref = 1; 1881 ret = btrfs_del_item(trans, root, path); 1882 } 1883 return ret; 1884 } 1885 1886 static int btrfs_issue_discard(struct block_device *bdev, 1887 u64 start, u64 len) 1888 { 1889 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); 1890 } 1891 1892 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1893 u64 num_bytes, u64 *actual_bytes) 1894 { 1895 int ret; 1896 u64 discarded_bytes = 0; 1897 struct btrfs_bio *bbio = NULL; 1898 1899 1900 /* Tell the block device(s) that the sectors can be discarded */ 1901 ret = btrfs_map_block(root->fs_info, REQ_DISCARD, 1902 bytenr, &num_bytes, &bbio, 0); 1903 /* Error condition is -ENOMEM */ 1904 if (!ret) { 1905 struct btrfs_bio_stripe *stripe = bbio->stripes; 1906 int i; 1907 1908 1909 for (i = 0; i < bbio->num_stripes; i++, stripe++) { 1910 if (!stripe->dev->can_discard) 1911 continue; 1912 1913 ret = btrfs_issue_discard(stripe->dev->bdev, 1914 stripe->physical, 1915 stripe->length); 1916 if (!ret) 1917 discarded_bytes += stripe->length; 1918 else if (ret != -EOPNOTSUPP) 1919 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */ 1920 1921 /* 1922 * Just in case we get back EOPNOTSUPP for some reason, 1923 * just ignore the return value so we don't screw up 1924 * people calling discard_extent. 1925 */ 1926 ret = 0; 1927 } 1928 kfree(bbio); 1929 } 1930 1931 if (actual_bytes) 1932 *actual_bytes = discarded_bytes; 1933 1934 1935 if (ret == -EOPNOTSUPP) 1936 ret = 0; 1937 return ret; 1938 } 1939 1940 /* Can return -ENOMEM */ 1941 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1942 struct btrfs_root *root, 1943 u64 bytenr, u64 num_bytes, u64 parent, 1944 u64 root_objectid, u64 owner, u64 offset, 1945 int no_quota) 1946 { 1947 int ret; 1948 struct btrfs_fs_info *fs_info = root->fs_info; 1949 1950 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID && 1951 root_objectid == BTRFS_TREE_LOG_OBJECTID); 1952 1953 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1954 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr, 1955 num_bytes, 1956 parent, root_objectid, (int)owner, 1957 BTRFS_ADD_DELAYED_REF, NULL, no_quota); 1958 } else { 1959 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr, 1960 num_bytes, 1961 parent, root_objectid, owner, offset, 1962 BTRFS_ADD_DELAYED_REF, NULL, no_quota); 1963 } 1964 return ret; 1965 } 1966 1967 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1968 struct btrfs_root *root, 1969 u64 bytenr, u64 num_bytes, 1970 u64 parent, u64 root_objectid, 1971 u64 owner, u64 offset, int refs_to_add, 1972 int no_quota, 1973 struct btrfs_delayed_extent_op *extent_op) 1974 { 1975 struct btrfs_fs_info *fs_info = root->fs_info; 1976 struct btrfs_path *path; 1977 struct extent_buffer *leaf; 1978 struct btrfs_extent_item *item; 1979 struct btrfs_key key; 1980 u64 refs; 1981 int ret; 1982 enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL; 1983 1984 path = btrfs_alloc_path(); 1985 if (!path) 1986 return -ENOMEM; 1987 1988 if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled) 1989 no_quota = 1; 1990 1991 path->reada = 1; 1992 path->leave_spinning = 1; 1993 /* this will setup the path even if it fails to insert the back ref */ 1994 ret = insert_inline_extent_backref(trans, fs_info->extent_root, path, 1995 bytenr, num_bytes, parent, 1996 root_objectid, owner, offset, 1997 refs_to_add, extent_op); 1998 if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota)) 1999 goto out; 2000 /* 2001 * Ok we were able to insert an inline extent and it appears to be a new 2002 * reference, deal with the qgroup accounting. 2003 */ 2004 if (!ret && !no_quota) { 2005 ASSERT(root->fs_info->quota_enabled); 2006 leaf = path->nodes[0]; 2007 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2008 item = btrfs_item_ptr(leaf, path->slots[0], 2009 struct btrfs_extent_item); 2010 if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add) 2011 type = BTRFS_QGROUP_OPER_ADD_SHARED; 2012 btrfs_release_path(path); 2013 2014 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid, 2015 bytenr, num_bytes, type, 0); 2016 goto out; 2017 } 2018 2019 /* 2020 * Ok we had -EAGAIN which means we didn't have space to insert and 2021 * inline extent ref, so just update the reference count and add a 2022 * normal backref. 2023 */ 2024 leaf = path->nodes[0]; 2025 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2026 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2027 refs = btrfs_extent_refs(leaf, item); 2028 if (refs) 2029 type = BTRFS_QGROUP_OPER_ADD_SHARED; 2030 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 2031 if (extent_op) 2032 __run_delayed_extent_op(extent_op, leaf, item); 2033 2034 btrfs_mark_buffer_dirty(leaf); 2035 btrfs_release_path(path); 2036 2037 if (!no_quota) { 2038 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid, 2039 bytenr, num_bytes, type, 0); 2040 if (ret) 2041 goto out; 2042 } 2043 2044 path->reada = 1; 2045 path->leave_spinning = 1; 2046 /* now insert the actual backref */ 2047 ret = insert_extent_backref(trans, root->fs_info->extent_root, 2048 path, bytenr, parent, root_objectid, 2049 owner, offset, refs_to_add); 2050 if (ret) 2051 btrfs_abort_transaction(trans, root, ret); 2052 out: 2053 btrfs_free_path(path); 2054 return ret; 2055 } 2056 2057 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 2058 struct btrfs_root *root, 2059 struct btrfs_delayed_ref_node *node, 2060 struct btrfs_delayed_extent_op *extent_op, 2061 int insert_reserved) 2062 { 2063 int ret = 0; 2064 struct btrfs_delayed_data_ref *ref; 2065 struct btrfs_key ins; 2066 u64 parent = 0; 2067 u64 ref_root = 0; 2068 u64 flags = 0; 2069 2070 ins.objectid = node->bytenr; 2071 ins.offset = node->num_bytes; 2072 ins.type = BTRFS_EXTENT_ITEM_KEY; 2073 2074 ref = btrfs_delayed_node_to_data_ref(node); 2075 trace_run_delayed_data_ref(node, ref, node->action); 2076 2077 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 2078 parent = ref->parent; 2079 ref_root = ref->root; 2080 2081 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 2082 if (extent_op) 2083 flags |= extent_op->flags_to_set; 2084 ret = alloc_reserved_file_extent(trans, root, 2085 parent, ref_root, flags, 2086 ref->objectid, ref->offset, 2087 &ins, node->ref_mod); 2088 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 2089 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, 2090 node->num_bytes, parent, 2091 ref_root, ref->objectid, 2092 ref->offset, node->ref_mod, 2093 node->no_quota, extent_op); 2094 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 2095 ret = __btrfs_free_extent(trans, root, node->bytenr, 2096 node->num_bytes, parent, 2097 ref_root, ref->objectid, 2098 ref->offset, node->ref_mod, 2099 extent_op, node->no_quota); 2100 } else { 2101 BUG(); 2102 } 2103 return ret; 2104 } 2105 2106 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 2107 struct extent_buffer *leaf, 2108 struct btrfs_extent_item *ei) 2109 { 2110 u64 flags = btrfs_extent_flags(leaf, ei); 2111 if (extent_op->update_flags) { 2112 flags |= extent_op->flags_to_set; 2113 btrfs_set_extent_flags(leaf, ei, flags); 2114 } 2115 2116 if (extent_op->update_key) { 2117 struct btrfs_tree_block_info *bi; 2118 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 2119 bi = (struct btrfs_tree_block_info *)(ei + 1); 2120 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 2121 } 2122 } 2123 2124 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 2125 struct btrfs_root *root, 2126 struct btrfs_delayed_ref_node *node, 2127 struct btrfs_delayed_extent_op *extent_op) 2128 { 2129 struct btrfs_key key; 2130 struct btrfs_path *path; 2131 struct btrfs_extent_item *ei; 2132 struct extent_buffer *leaf; 2133 u32 item_size; 2134 int ret; 2135 int err = 0; 2136 int metadata = !extent_op->is_data; 2137 2138 if (trans->aborted) 2139 return 0; 2140 2141 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) 2142 metadata = 0; 2143 2144 path = btrfs_alloc_path(); 2145 if (!path) 2146 return -ENOMEM; 2147 2148 key.objectid = node->bytenr; 2149 2150 if (metadata) { 2151 key.type = BTRFS_METADATA_ITEM_KEY; 2152 key.offset = extent_op->level; 2153 } else { 2154 key.type = BTRFS_EXTENT_ITEM_KEY; 2155 key.offset = node->num_bytes; 2156 } 2157 2158 again: 2159 path->reada = 1; 2160 path->leave_spinning = 1; 2161 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, 2162 path, 0, 1); 2163 if (ret < 0) { 2164 err = ret; 2165 goto out; 2166 } 2167 if (ret > 0) { 2168 if (metadata) { 2169 if (path->slots[0] > 0) { 2170 path->slots[0]--; 2171 btrfs_item_key_to_cpu(path->nodes[0], &key, 2172 path->slots[0]); 2173 if (key.objectid == node->bytenr && 2174 key.type == BTRFS_EXTENT_ITEM_KEY && 2175 key.offset == node->num_bytes) 2176 ret = 0; 2177 } 2178 if (ret > 0) { 2179 btrfs_release_path(path); 2180 metadata = 0; 2181 2182 key.objectid = node->bytenr; 2183 key.offset = node->num_bytes; 2184 key.type = BTRFS_EXTENT_ITEM_KEY; 2185 goto again; 2186 } 2187 } else { 2188 err = -EIO; 2189 goto out; 2190 } 2191 } 2192 2193 leaf = path->nodes[0]; 2194 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 2195 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 2196 if (item_size < sizeof(*ei)) { 2197 ret = convert_extent_item_v0(trans, root->fs_info->extent_root, 2198 path, (u64)-1, 0); 2199 if (ret < 0) { 2200 err = ret; 2201 goto out; 2202 } 2203 leaf = path->nodes[0]; 2204 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 2205 } 2206 #endif 2207 BUG_ON(item_size < sizeof(*ei)); 2208 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2209 __run_delayed_extent_op(extent_op, leaf, ei); 2210 2211 btrfs_mark_buffer_dirty(leaf); 2212 out: 2213 btrfs_free_path(path); 2214 return err; 2215 } 2216 2217 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 2218 struct btrfs_root *root, 2219 struct btrfs_delayed_ref_node *node, 2220 struct btrfs_delayed_extent_op *extent_op, 2221 int insert_reserved) 2222 { 2223 int ret = 0; 2224 struct btrfs_delayed_tree_ref *ref; 2225 struct btrfs_key ins; 2226 u64 parent = 0; 2227 u64 ref_root = 0; 2228 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 2229 SKINNY_METADATA); 2230 2231 ref = btrfs_delayed_node_to_tree_ref(node); 2232 trace_run_delayed_tree_ref(node, ref, node->action); 2233 2234 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 2235 parent = ref->parent; 2236 ref_root = ref->root; 2237 2238 ins.objectid = node->bytenr; 2239 if (skinny_metadata) { 2240 ins.offset = ref->level; 2241 ins.type = BTRFS_METADATA_ITEM_KEY; 2242 } else { 2243 ins.offset = node->num_bytes; 2244 ins.type = BTRFS_EXTENT_ITEM_KEY; 2245 } 2246 2247 BUG_ON(node->ref_mod != 1); 2248 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 2249 BUG_ON(!extent_op || !extent_op->update_flags); 2250 ret = alloc_reserved_tree_block(trans, root, 2251 parent, ref_root, 2252 extent_op->flags_to_set, 2253 &extent_op->key, 2254 ref->level, &ins, 2255 node->no_quota); 2256 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 2257 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, 2258 node->num_bytes, parent, ref_root, 2259 ref->level, 0, 1, node->no_quota, 2260 extent_op); 2261 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 2262 ret = __btrfs_free_extent(trans, root, node->bytenr, 2263 node->num_bytes, parent, ref_root, 2264 ref->level, 0, 1, extent_op, 2265 node->no_quota); 2266 } else { 2267 BUG(); 2268 } 2269 return ret; 2270 } 2271 2272 /* helper function to actually process a single delayed ref entry */ 2273 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 2274 struct btrfs_root *root, 2275 struct btrfs_delayed_ref_node *node, 2276 struct btrfs_delayed_extent_op *extent_op, 2277 int insert_reserved) 2278 { 2279 int ret = 0; 2280 2281 if (trans->aborted) { 2282 if (insert_reserved) 2283 btrfs_pin_extent(root, node->bytenr, 2284 node->num_bytes, 1); 2285 return 0; 2286 } 2287 2288 if (btrfs_delayed_ref_is_head(node)) { 2289 struct btrfs_delayed_ref_head *head; 2290 /* 2291 * we've hit the end of the chain and we were supposed 2292 * to insert this extent into the tree. But, it got 2293 * deleted before we ever needed to insert it, so all 2294 * we have to do is clean up the accounting 2295 */ 2296 BUG_ON(extent_op); 2297 head = btrfs_delayed_node_to_head(node); 2298 trace_run_delayed_ref_head(node, head, node->action); 2299 2300 if (insert_reserved) { 2301 btrfs_pin_extent(root, node->bytenr, 2302 node->num_bytes, 1); 2303 if (head->is_data) { 2304 ret = btrfs_del_csums(trans, root, 2305 node->bytenr, 2306 node->num_bytes); 2307 } 2308 } 2309 return ret; 2310 } 2311 2312 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 2313 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 2314 ret = run_delayed_tree_ref(trans, root, node, extent_op, 2315 insert_reserved); 2316 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 2317 node->type == BTRFS_SHARED_DATA_REF_KEY) 2318 ret = run_delayed_data_ref(trans, root, node, extent_op, 2319 insert_reserved); 2320 else 2321 BUG(); 2322 return ret; 2323 } 2324 2325 static noinline struct btrfs_delayed_ref_node * 2326 select_delayed_ref(struct btrfs_delayed_ref_head *head) 2327 { 2328 struct rb_node *node; 2329 struct btrfs_delayed_ref_node *ref, *last = NULL;; 2330 2331 /* 2332 * select delayed ref of type BTRFS_ADD_DELAYED_REF first. 2333 * this prevents ref count from going down to zero when 2334 * there still are pending delayed ref. 2335 */ 2336 node = rb_first(&head->ref_root); 2337 while (node) { 2338 ref = rb_entry(node, struct btrfs_delayed_ref_node, 2339 rb_node); 2340 if (ref->action == BTRFS_ADD_DELAYED_REF) 2341 return ref; 2342 else if (last == NULL) 2343 last = ref; 2344 node = rb_next(node); 2345 } 2346 return last; 2347 } 2348 2349 /* 2350 * Returns 0 on success or if called with an already aborted transaction. 2351 * Returns -ENOMEM or -EIO on failure and will abort the transaction. 2352 */ 2353 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2354 struct btrfs_root *root, 2355 unsigned long nr) 2356 { 2357 struct btrfs_delayed_ref_root *delayed_refs; 2358 struct btrfs_delayed_ref_node *ref; 2359 struct btrfs_delayed_ref_head *locked_ref = NULL; 2360 struct btrfs_delayed_extent_op *extent_op; 2361 struct btrfs_fs_info *fs_info = root->fs_info; 2362 ktime_t start = ktime_get(); 2363 int ret; 2364 unsigned long count = 0; 2365 unsigned long actual_count = 0; 2366 int must_insert_reserved = 0; 2367 2368 delayed_refs = &trans->transaction->delayed_refs; 2369 while (1) { 2370 if (!locked_ref) { 2371 if (count >= nr) 2372 break; 2373 2374 spin_lock(&delayed_refs->lock); 2375 locked_ref = btrfs_select_ref_head(trans); 2376 if (!locked_ref) { 2377 spin_unlock(&delayed_refs->lock); 2378 break; 2379 } 2380 2381 /* grab the lock that says we are going to process 2382 * all the refs for this head */ 2383 ret = btrfs_delayed_ref_lock(trans, locked_ref); 2384 spin_unlock(&delayed_refs->lock); 2385 /* 2386 * we may have dropped the spin lock to get the head 2387 * mutex lock, and that might have given someone else 2388 * time to free the head. If that's true, it has been 2389 * removed from our list and we can move on. 2390 */ 2391 if (ret == -EAGAIN) { 2392 locked_ref = NULL; 2393 count++; 2394 continue; 2395 } 2396 } 2397 2398 /* 2399 * We need to try and merge add/drops of the same ref since we 2400 * can run into issues with relocate dropping the implicit ref 2401 * and then it being added back again before the drop can 2402 * finish. If we merged anything we need to re-loop so we can 2403 * get a good ref. 2404 */ 2405 spin_lock(&locked_ref->lock); 2406 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs, 2407 locked_ref); 2408 2409 /* 2410 * locked_ref is the head node, so we have to go one 2411 * node back for any delayed ref updates 2412 */ 2413 ref = select_delayed_ref(locked_ref); 2414 2415 if (ref && ref->seq && 2416 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { 2417 spin_unlock(&locked_ref->lock); 2418 btrfs_delayed_ref_unlock(locked_ref); 2419 spin_lock(&delayed_refs->lock); 2420 locked_ref->processing = 0; 2421 delayed_refs->num_heads_ready++; 2422 spin_unlock(&delayed_refs->lock); 2423 locked_ref = NULL; 2424 cond_resched(); 2425 count++; 2426 continue; 2427 } 2428 2429 /* 2430 * record the must insert reserved flag before we 2431 * drop the spin lock. 2432 */ 2433 must_insert_reserved = locked_ref->must_insert_reserved; 2434 locked_ref->must_insert_reserved = 0; 2435 2436 extent_op = locked_ref->extent_op; 2437 locked_ref->extent_op = NULL; 2438 2439 if (!ref) { 2440 2441 2442 /* All delayed refs have been processed, Go ahead 2443 * and send the head node to run_one_delayed_ref, 2444 * so that any accounting fixes can happen 2445 */ 2446 ref = &locked_ref->node; 2447 2448 if (extent_op && must_insert_reserved) { 2449 btrfs_free_delayed_extent_op(extent_op); 2450 extent_op = NULL; 2451 } 2452 2453 if (extent_op) { 2454 spin_unlock(&locked_ref->lock); 2455 ret = run_delayed_extent_op(trans, root, 2456 ref, extent_op); 2457 btrfs_free_delayed_extent_op(extent_op); 2458 2459 if (ret) { 2460 /* 2461 * Need to reset must_insert_reserved if 2462 * there was an error so the abort stuff 2463 * can cleanup the reserved space 2464 * properly. 2465 */ 2466 if (must_insert_reserved) 2467 locked_ref->must_insert_reserved = 1; 2468 locked_ref->processing = 0; 2469 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); 2470 btrfs_delayed_ref_unlock(locked_ref); 2471 return ret; 2472 } 2473 continue; 2474 } 2475 2476 /* 2477 * Need to drop our head ref lock and re-aqcuire the 2478 * delayed ref lock and then re-check to make sure 2479 * nobody got added. 2480 */ 2481 spin_unlock(&locked_ref->lock); 2482 spin_lock(&delayed_refs->lock); 2483 spin_lock(&locked_ref->lock); 2484 if (rb_first(&locked_ref->ref_root) || 2485 locked_ref->extent_op) { 2486 spin_unlock(&locked_ref->lock); 2487 spin_unlock(&delayed_refs->lock); 2488 continue; 2489 } 2490 ref->in_tree = 0; 2491 delayed_refs->num_heads--; 2492 rb_erase(&locked_ref->href_node, 2493 &delayed_refs->href_root); 2494 spin_unlock(&delayed_refs->lock); 2495 } else { 2496 actual_count++; 2497 ref->in_tree = 0; 2498 rb_erase(&ref->rb_node, &locked_ref->ref_root); 2499 } 2500 atomic_dec(&delayed_refs->num_entries); 2501 2502 if (!btrfs_delayed_ref_is_head(ref)) { 2503 /* 2504 * when we play the delayed ref, also correct the 2505 * ref_mod on head 2506 */ 2507 switch (ref->action) { 2508 case BTRFS_ADD_DELAYED_REF: 2509 case BTRFS_ADD_DELAYED_EXTENT: 2510 locked_ref->node.ref_mod -= ref->ref_mod; 2511 break; 2512 case BTRFS_DROP_DELAYED_REF: 2513 locked_ref->node.ref_mod += ref->ref_mod; 2514 break; 2515 default: 2516 WARN_ON(1); 2517 } 2518 } 2519 spin_unlock(&locked_ref->lock); 2520 2521 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2522 must_insert_reserved); 2523 2524 btrfs_free_delayed_extent_op(extent_op); 2525 if (ret) { 2526 locked_ref->processing = 0; 2527 btrfs_delayed_ref_unlock(locked_ref); 2528 btrfs_put_delayed_ref(ref); 2529 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret); 2530 return ret; 2531 } 2532 2533 /* 2534 * If this node is a head, that means all the refs in this head 2535 * have been dealt with, and we will pick the next head to deal 2536 * with, so we must unlock the head and drop it from the cluster 2537 * list before we release it. 2538 */ 2539 if (btrfs_delayed_ref_is_head(ref)) { 2540 btrfs_delayed_ref_unlock(locked_ref); 2541 locked_ref = NULL; 2542 } 2543 btrfs_put_delayed_ref(ref); 2544 count++; 2545 cond_resched(); 2546 } 2547 2548 /* 2549 * We don't want to include ref heads since we can have empty ref heads 2550 * and those will drastically skew our runtime down since we just do 2551 * accounting, no actual extent tree updates. 2552 */ 2553 if (actual_count > 0) { 2554 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start)); 2555 u64 avg; 2556 2557 /* 2558 * We weigh the current average higher than our current runtime 2559 * to avoid large swings in the average. 2560 */ 2561 spin_lock(&delayed_refs->lock); 2562 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime; 2563 avg = div64_u64(avg, 4); 2564 fs_info->avg_delayed_ref_runtime = avg; 2565 spin_unlock(&delayed_refs->lock); 2566 } 2567 return 0; 2568 } 2569 2570 #ifdef SCRAMBLE_DELAYED_REFS 2571 /* 2572 * Normally delayed refs get processed in ascending bytenr order. This 2573 * correlates in most cases to the order added. To expose dependencies on this 2574 * order, we start to process the tree in the middle instead of the beginning 2575 */ 2576 static u64 find_middle(struct rb_root *root) 2577 { 2578 struct rb_node *n = root->rb_node; 2579 struct btrfs_delayed_ref_node *entry; 2580 int alt = 1; 2581 u64 middle; 2582 u64 first = 0, last = 0; 2583 2584 n = rb_first(root); 2585 if (n) { 2586 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2587 first = entry->bytenr; 2588 } 2589 n = rb_last(root); 2590 if (n) { 2591 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2592 last = entry->bytenr; 2593 } 2594 n = root->rb_node; 2595 2596 while (n) { 2597 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); 2598 WARN_ON(!entry->in_tree); 2599 2600 middle = entry->bytenr; 2601 2602 if (alt) 2603 n = n->rb_left; 2604 else 2605 n = n->rb_right; 2606 2607 alt = 1 - alt; 2608 } 2609 return middle; 2610 } 2611 #endif 2612 2613 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads) 2614 { 2615 u64 num_bytes; 2616 2617 num_bytes = heads * (sizeof(struct btrfs_extent_item) + 2618 sizeof(struct btrfs_extent_inline_ref)); 2619 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) 2620 num_bytes += heads * sizeof(struct btrfs_tree_block_info); 2621 2622 /* 2623 * We don't ever fill up leaves all the way so multiply by 2 just to be 2624 * closer to what we're really going to want to ouse. 2625 */ 2626 return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); 2627 } 2628 2629 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 2630 struct btrfs_root *root) 2631 { 2632 struct btrfs_block_rsv *global_rsv; 2633 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready; 2634 u64 num_bytes; 2635 int ret = 0; 2636 2637 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 2638 num_heads = heads_to_leaves(root, num_heads); 2639 if (num_heads > 1) 2640 num_bytes += (num_heads - 1) * root->nodesize; 2641 num_bytes <<= 1; 2642 global_rsv = &root->fs_info->global_block_rsv; 2643 2644 /* 2645 * If we can't allocate any more chunks lets make sure we have _lots_ of 2646 * wiggle room since running delayed refs can create more delayed refs. 2647 */ 2648 if (global_rsv->space_info->full) 2649 num_bytes <<= 1; 2650 2651 spin_lock(&global_rsv->lock); 2652 if (global_rsv->reserved <= num_bytes) 2653 ret = 1; 2654 spin_unlock(&global_rsv->lock); 2655 return ret; 2656 } 2657 2658 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 2659 struct btrfs_root *root) 2660 { 2661 struct btrfs_fs_info *fs_info = root->fs_info; 2662 u64 num_entries = 2663 atomic_read(&trans->transaction->delayed_refs.num_entries); 2664 u64 avg_runtime; 2665 u64 val; 2666 2667 smp_mb(); 2668 avg_runtime = fs_info->avg_delayed_ref_runtime; 2669 val = num_entries * avg_runtime; 2670 if (num_entries * avg_runtime >= NSEC_PER_SEC) 2671 return 1; 2672 if (val >= NSEC_PER_SEC / 2) 2673 return 2; 2674 2675 return btrfs_check_space_for_delayed_refs(trans, root); 2676 } 2677 2678 struct async_delayed_refs { 2679 struct btrfs_root *root; 2680 int count; 2681 int error; 2682 int sync; 2683 struct completion wait; 2684 struct btrfs_work work; 2685 }; 2686 2687 static void delayed_ref_async_start(struct btrfs_work *work) 2688 { 2689 struct async_delayed_refs *async; 2690 struct btrfs_trans_handle *trans; 2691 int ret; 2692 2693 async = container_of(work, struct async_delayed_refs, work); 2694 2695 trans = btrfs_join_transaction(async->root); 2696 if (IS_ERR(trans)) { 2697 async->error = PTR_ERR(trans); 2698 goto done; 2699 } 2700 2701 /* 2702 * trans->sync means that when we call end_transaciton, we won't 2703 * wait on delayed refs 2704 */ 2705 trans->sync = true; 2706 ret = btrfs_run_delayed_refs(trans, async->root, async->count); 2707 if (ret) 2708 async->error = ret; 2709 2710 ret = btrfs_end_transaction(trans, async->root); 2711 if (ret && !async->error) 2712 async->error = ret; 2713 done: 2714 if (async->sync) 2715 complete(&async->wait); 2716 else 2717 kfree(async); 2718 } 2719 2720 int btrfs_async_run_delayed_refs(struct btrfs_root *root, 2721 unsigned long count, int wait) 2722 { 2723 struct async_delayed_refs *async; 2724 int ret; 2725 2726 async = kmalloc(sizeof(*async), GFP_NOFS); 2727 if (!async) 2728 return -ENOMEM; 2729 2730 async->root = root->fs_info->tree_root; 2731 async->count = count; 2732 async->error = 0; 2733 if (wait) 2734 async->sync = 1; 2735 else 2736 async->sync = 0; 2737 init_completion(&async->wait); 2738 2739 btrfs_init_work(&async->work, btrfs_extent_refs_helper, 2740 delayed_ref_async_start, NULL, NULL); 2741 2742 btrfs_queue_work(root->fs_info->extent_workers, &async->work); 2743 2744 if (wait) { 2745 wait_for_completion(&async->wait); 2746 ret = async->error; 2747 kfree(async); 2748 return ret; 2749 } 2750 return 0; 2751 } 2752 2753 /* 2754 * this starts processing the delayed reference count updates and 2755 * extent insertions we have queued up so far. count can be 2756 * 0, which means to process everything in the tree at the start 2757 * of the run (but not newly added entries), or it can be some target 2758 * number you'd like to process. 2759 * 2760 * Returns 0 on success or if called with an aborted transaction 2761 * Returns <0 on error and aborts the transaction 2762 */ 2763 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2764 struct btrfs_root *root, unsigned long count) 2765 { 2766 struct rb_node *node; 2767 struct btrfs_delayed_ref_root *delayed_refs; 2768 struct btrfs_delayed_ref_head *head; 2769 int ret; 2770 int run_all = count == (unsigned long)-1; 2771 int run_most = 0; 2772 2773 /* We'll clean this up in btrfs_cleanup_transaction */ 2774 if (trans->aborted) 2775 return 0; 2776 2777 if (root == root->fs_info->extent_root) 2778 root = root->fs_info->tree_root; 2779 2780 delayed_refs = &trans->transaction->delayed_refs; 2781 if (count == 0) { 2782 count = atomic_read(&delayed_refs->num_entries) * 2; 2783 run_most = 1; 2784 } 2785 2786 again: 2787 #ifdef SCRAMBLE_DELAYED_REFS 2788 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2789 #endif 2790 ret = __btrfs_run_delayed_refs(trans, root, count); 2791 if (ret < 0) { 2792 btrfs_abort_transaction(trans, root, ret); 2793 return ret; 2794 } 2795 2796 if (run_all) { 2797 if (!list_empty(&trans->new_bgs)) 2798 btrfs_create_pending_block_groups(trans, root); 2799 2800 spin_lock(&delayed_refs->lock); 2801 node = rb_first(&delayed_refs->href_root); 2802 if (!node) { 2803 spin_unlock(&delayed_refs->lock); 2804 goto out; 2805 } 2806 count = (unsigned long)-1; 2807 2808 while (node) { 2809 head = rb_entry(node, struct btrfs_delayed_ref_head, 2810 href_node); 2811 if (btrfs_delayed_ref_is_head(&head->node)) { 2812 struct btrfs_delayed_ref_node *ref; 2813 2814 ref = &head->node; 2815 atomic_inc(&ref->refs); 2816 2817 spin_unlock(&delayed_refs->lock); 2818 /* 2819 * Mutex was contended, block until it's 2820 * released and try again 2821 */ 2822 mutex_lock(&head->mutex); 2823 mutex_unlock(&head->mutex); 2824 2825 btrfs_put_delayed_ref(ref); 2826 cond_resched(); 2827 goto again; 2828 } else { 2829 WARN_ON(1); 2830 } 2831 node = rb_next(node); 2832 } 2833 spin_unlock(&delayed_refs->lock); 2834 cond_resched(); 2835 goto again; 2836 } 2837 out: 2838 ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info); 2839 if (ret) 2840 return ret; 2841 assert_qgroups_uptodate(trans); 2842 return 0; 2843 } 2844 2845 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2846 struct btrfs_root *root, 2847 u64 bytenr, u64 num_bytes, u64 flags, 2848 int level, int is_data) 2849 { 2850 struct btrfs_delayed_extent_op *extent_op; 2851 int ret; 2852 2853 extent_op = btrfs_alloc_delayed_extent_op(); 2854 if (!extent_op) 2855 return -ENOMEM; 2856 2857 extent_op->flags_to_set = flags; 2858 extent_op->update_flags = 1; 2859 extent_op->update_key = 0; 2860 extent_op->is_data = is_data ? 1 : 0; 2861 extent_op->level = level; 2862 2863 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, 2864 num_bytes, extent_op); 2865 if (ret) 2866 btrfs_free_delayed_extent_op(extent_op); 2867 return ret; 2868 } 2869 2870 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, 2871 struct btrfs_root *root, 2872 struct btrfs_path *path, 2873 u64 objectid, u64 offset, u64 bytenr) 2874 { 2875 struct btrfs_delayed_ref_head *head; 2876 struct btrfs_delayed_ref_node *ref; 2877 struct btrfs_delayed_data_ref *data_ref; 2878 struct btrfs_delayed_ref_root *delayed_refs; 2879 struct rb_node *node; 2880 int ret = 0; 2881 2882 delayed_refs = &trans->transaction->delayed_refs; 2883 spin_lock(&delayed_refs->lock); 2884 head = btrfs_find_delayed_ref_head(trans, bytenr); 2885 if (!head) { 2886 spin_unlock(&delayed_refs->lock); 2887 return 0; 2888 } 2889 2890 if (!mutex_trylock(&head->mutex)) { 2891 atomic_inc(&head->node.refs); 2892 spin_unlock(&delayed_refs->lock); 2893 2894 btrfs_release_path(path); 2895 2896 /* 2897 * Mutex was contended, block until it's released and let 2898 * caller try again 2899 */ 2900 mutex_lock(&head->mutex); 2901 mutex_unlock(&head->mutex); 2902 btrfs_put_delayed_ref(&head->node); 2903 return -EAGAIN; 2904 } 2905 spin_unlock(&delayed_refs->lock); 2906 2907 spin_lock(&head->lock); 2908 node = rb_first(&head->ref_root); 2909 while (node) { 2910 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 2911 node = rb_next(node); 2912 2913 /* If it's a shared ref we know a cross reference exists */ 2914 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { 2915 ret = 1; 2916 break; 2917 } 2918 2919 data_ref = btrfs_delayed_node_to_data_ref(ref); 2920 2921 /* 2922 * If our ref doesn't match the one we're currently looking at 2923 * then we have a cross reference. 2924 */ 2925 if (data_ref->root != root->root_key.objectid || 2926 data_ref->objectid != objectid || 2927 data_ref->offset != offset) { 2928 ret = 1; 2929 break; 2930 } 2931 } 2932 spin_unlock(&head->lock); 2933 mutex_unlock(&head->mutex); 2934 return ret; 2935 } 2936 2937 static noinline int check_committed_ref(struct btrfs_trans_handle *trans, 2938 struct btrfs_root *root, 2939 struct btrfs_path *path, 2940 u64 objectid, u64 offset, u64 bytenr) 2941 { 2942 struct btrfs_root *extent_root = root->fs_info->extent_root; 2943 struct extent_buffer *leaf; 2944 struct btrfs_extent_data_ref *ref; 2945 struct btrfs_extent_inline_ref *iref; 2946 struct btrfs_extent_item *ei; 2947 struct btrfs_key key; 2948 u32 item_size; 2949 int ret; 2950 2951 key.objectid = bytenr; 2952 key.offset = (u64)-1; 2953 key.type = BTRFS_EXTENT_ITEM_KEY; 2954 2955 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2956 if (ret < 0) 2957 goto out; 2958 BUG_ON(ret == 0); /* Corruption */ 2959 2960 ret = -ENOENT; 2961 if (path->slots[0] == 0) 2962 goto out; 2963 2964 path->slots[0]--; 2965 leaf = path->nodes[0]; 2966 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2967 2968 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2969 goto out; 2970 2971 ret = 1; 2972 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 2973 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 2974 if (item_size < sizeof(*ei)) { 2975 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 2976 goto out; 2977 } 2978 #endif 2979 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2980 2981 if (item_size != sizeof(*ei) + 2982 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2983 goto out; 2984 2985 if (btrfs_extent_generation(leaf, ei) <= 2986 btrfs_root_last_snapshot(&root->root_item)) 2987 goto out; 2988 2989 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2990 if (btrfs_extent_inline_ref_type(leaf, iref) != 2991 BTRFS_EXTENT_DATA_REF_KEY) 2992 goto out; 2993 2994 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2995 if (btrfs_extent_refs(leaf, ei) != 2996 btrfs_extent_data_ref_count(leaf, ref) || 2997 btrfs_extent_data_ref_root(leaf, ref) != 2998 root->root_key.objectid || 2999 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 3000 btrfs_extent_data_ref_offset(leaf, ref) != offset) 3001 goto out; 3002 3003 ret = 0; 3004 out: 3005 return ret; 3006 } 3007 3008 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 3009 struct btrfs_root *root, 3010 u64 objectid, u64 offset, u64 bytenr) 3011 { 3012 struct btrfs_path *path; 3013 int ret; 3014 int ret2; 3015 3016 path = btrfs_alloc_path(); 3017 if (!path) 3018 return -ENOENT; 3019 3020 do { 3021 ret = check_committed_ref(trans, root, path, objectid, 3022 offset, bytenr); 3023 if (ret && ret != -ENOENT) 3024 goto out; 3025 3026 ret2 = check_delayed_ref(trans, root, path, objectid, 3027 offset, bytenr); 3028 } while (ret2 == -EAGAIN); 3029 3030 if (ret2 && ret2 != -ENOENT) { 3031 ret = ret2; 3032 goto out; 3033 } 3034 3035 if (ret != -ENOENT || ret2 != -ENOENT) 3036 ret = 0; 3037 out: 3038 btrfs_free_path(path); 3039 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 3040 WARN_ON(ret > 0); 3041 return ret; 3042 } 3043 3044 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 3045 struct btrfs_root *root, 3046 struct extent_buffer *buf, 3047 int full_backref, int inc) 3048 { 3049 u64 bytenr; 3050 u64 num_bytes; 3051 u64 parent; 3052 u64 ref_root; 3053 u32 nritems; 3054 struct btrfs_key key; 3055 struct btrfs_file_extent_item *fi; 3056 int i; 3057 int level; 3058 int ret = 0; 3059 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, 3060 u64, u64, u64, u64, u64, u64, int); 3061 3062 3063 if (btrfs_test_is_dummy_root(root)) 3064 return 0; 3065 3066 ref_root = btrfs_header_owner(buf); 3067 nritems = btrfs_header_nritems(buf); 3068 level = btrfs_header_level(buf); 3069 3070 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0) 3071 return 0; 3072 3073 if (inc) 3074 process_func = btrfs_inc_extent_ref; 3075 else 3076 process_func = btrfs_free_extent; 3077 3078 if (full_backref) 3079 parent = buf->start; 3080 else 3081 parent = 0; 3082 3083 for (i = 0; i < nritems; i++) { 3084 if (level == 0) { 3085 btrfs_item_key_to_cpu(buf, &key, i); 3086 if (key.type != BTRFS_EXTENT_DATA_KEY) 3087 continue; 3088 fi = btrfs_item_ptr(buf, i, 3089 struct btrfs_file_extent_item); 3090 if (btrfs_file_extent_type(buf, fi) == 3091 BTRFS_FILE_EXTENT_INLINE) 3092 continue; 3093 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 3094 if (bytenr == 0) 3095 continue; 3096 3097 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 3098 key.offset -= btrfs_file_extent_offset(buf, fi); 3099 ret = process_func(trans, root, bytenr, num_bytes, 3100 parent, ref_root, key.objectid, 3101 key.offset, 1); 3102 if (ret) 3103 goto fail; 3104 } else { 3105 bytenr = btrfs_node_blockptr(buf, i); 3106 num_bytes = root->nodesize; 3107 ret = process_func(trans, root, bytenr, num_bytes, 3108 parent, ref_root, level - 1, 0, 3109 1); 3110 if (ret) 3111 goto fail; 3112 } 3113 } 3114 return 0; 3115 fail: 3116 return ret; 3117 } 3118 3119 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3120 struct extent_buffer *buf, int full_backref) 3121 { 3122 return __btrfs_mod_ref(trans, root, buf, full_backref, 1); 3123 } 3124 3125 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3126 struct extent_buffer *buf, int full_backref) 3127 { 3128 return __btrfs_mod_ref(trans, root, buf, full_backref, 0); 3129 } 3130 3131 static int write_one_cache_group(struct btrfs_trans_handle *trans, 3132 struct btrfs_root *root, 3133 struct btrfs_path *path, 3134 struct btrfs_block_group_cache *cache) 3135 { 3136 int ret; 3137 struct btrfs_root *extent_root = root->fs_info->extent_root; 3138 unsigned long bi; 3139 struct extent_buffer *leaf; 3140 3141 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 3142 if (ret) { 3143 if (ret > 0) 3144 ret = -ENOENT; 3145 goto fail; 3146 } 3147 3148 leaf = path->nodes[0]; 3149 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3150 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); 3151 btrfs_mark_buffer_dirty(leaf); 3152 btrfs_release_path(path); 3153 fail: 3154 if (ret) 3155 btrfs_abort_transaction(trans, root, ret); 3156 return ret; 3157 3158 } 3159 3160 static struct btrfs_block_group_cache * 3161 next_block_group(struct btrfs_root *root, 3162 struct btrfs_block_group_cache *cache) 3163 { 3164 struct rb_node *node; 3165 3166 spin_lock(&root->fs_info->block_group_cache_lock); 3167 3168 /* If our block group was removed, we need a full search. */ 3169 if (RB_EMPTY_NODE(&cache->cache_node)) { 3170 const u64 next_bytenr = cache->key.objectid + cache->key.offset; 3171 3172 spin_unlock(&root->fs_info->block_group_cache_lock); 3173 btrfs_put_block_group(cache); 3174 cache = btrfs_lookup_first_block_group(root->fs_info, 3175 next_bytenr); 3176 return cache; 3177 } 3178 node = rb_next(&cache->cache_node); 3179 btrfs_put_block_group(cache); 3180 if (node) { 3181 cache = rb_entry(node, struct btrfs_block_group_cache, 3182 cache_node); 3183 btrfs_get_block_group(cache); 3184 } else 3185 cache = NULL; 3186 spin_unlock(&root->fs_info->block_group_cache_lock); 3187 return cache; 3188 } 3189 3190 static int cache_save_setup(struct btrfs_block_group_cache *block_group, 3191 struct btrfs_trans_handle *trans, 3192 struct btrfs_path *path) 3193 { 3194 struct btrfs_root *root = block_group->fs_info->tree_root; 3195 struct inode *inode = NULL; 3196 u64 alloc_hint = 0; 3197 int dcs = BTRFS_DC_ERROR; 3198 int num_pages = 0; 3199 int retries = 0; 3200 int ret = 0; 3201 3202 /* 3203 * If this block group is smaller than 100 megs don't bother caching the 3204 * block group. 3205 */ 3206 if (block_group->key.offset < (100 * 1024 * 1024)) { 3207 spin_lock(&block_group->lock); 3208 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3209 spin_unlock(&block_group->lock); 3210 return 0; 3211 } 3212 3213 again: 3214 inode = lookup_free_space_inode(root, block_group, path); 3215 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 3216 ret = PTR_ERR(inode); 3217 btrfs_release_path(path); 3218 goto out; 3219 } 3220 3221 if (IS_ERR(inode)) { 3222 BUG_ON(retries); 3223 retries++; 3224 3225 if (block_group->ro) 3226 goto out_free; 3227 3228 ret = create_free_space_inode(root, trans, block_group, path); 3229 if (ret) 3230 goto out_free; 3231 goto again; 3232 } 3233 3234 /* We've already setup this transaction, go ahead and exit */ 3235 if (block_group->cache_generation == trans->transid && 3236 i_size_read(inode)) { 3237 dcs = BTRFS_DC_SETUP; 3238 goto out_put; 3239 } 3240 3241 /* 3242 * We want to set the generation to 0, that way if anything goes wrong 3243 * from here on out we know not to trust this cache when we load up next 3244 * time. 3245 */ 3246 BTRFS_I(inode)->generation = 0; 3247 ret = btrfs_update_inode(trans, root, inode); 3248 WARN_ON(ret); 3249 3250 if (i_size_read(inode) > 0) { 3251 ret = btrfs_check_trunc_cache_free_space(root, 3252 &root->fs_info->global_block_rsv); 3253 if (ret) 3254 goto out_put; 3255 3256 ret = btrfs_truncate_free_space_cache(root, trans, inode); 3257 if (ret) 3258 goto out_put; 3259 } 3260 3261 spin_lock(&block_group->lock); 3262 if (block_group->cached != BTRFS_CACHE_FINISHED || 3263 !btrfs_test_opt(root, SPACE_CACHE) || 3264 block_group->delalloc_bytes) { 3265 /* 3266 * don't bother trying to write stuff out _if_ 3267 * a) we're not cached, 3268 * b) we're with nospace_cache mount option. 3269 */ 3270 dcs = BTRFS_DC_WRITTEN; 3271 spin_unlock(&block_group->lock); 3272 goto out_put; 3273 } 3274 spin_unlock(&block_group->lock); 3275 3276 /* 3277 * Try to preallocate enough space based on how big the block group is. 3278 * Keep in mind this has to include any pinned space which could end up 3279 * taking up quite a bit since it's not folded into the other space 3280 * cache. 3281 */ 3282 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024); 3283 if (!num_pages) 3284 num_pages = 1; 3285 3286 num_pages *= 16; 3287 num_pages *= PAGE_CACHE_SIZE; 3288 3289 ret = btrfs_check_data_free_space(inode, num_pages); 3290 if (ret) 3291 goto out_put; 3292 3293 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 3294 num_pages, num_pages, 3295 &alloc_hint); 3296 if (!ret) 3297 dcs = BTRFS_DC_SETUP; 3298 btrfs_free_reserved_data_space(inode, num_pages); 3299 3300 out_put: 3301 iput(inode); 3302 out_free: 3303 btrfs_release_path(path); 3304 out: 3305 spin_lock(&block_group->lock); 3306 if (!ret && dcs == BTRFS_DC_SETUP) 3307 block_group->cache_generation = trans->transid; 3308 block_group->disk_cache_state = dcs; 3309 spin_unlock(&block_group->lock); 3310 3311 return ret; 3312 } 3313 3314 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3315 struct btrfs_root *root) 3316 { 3317 struct btrfs_block_group_cache *cache; 3318 int err = 0; 3319 struct btrfs_path *path; 3320 u64 last = 0; 3321 3322 path = btrfs_alloc_path(); 3323 if (!path) 3324 return -ENOMEM; 3325 3326 again: 3327 while (1) { 3328 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3329 while (cache) { 3330 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 3331 break; 3332 cache = next_block_group(root, cache); 3333 } 3334 if (!cache) { 3335 if (last == 0) 3336 break; 3337 last = 0; 3338 continue; 3339 } 3340 err = cache_save_setup(cache, trans, path); 3341 last = cache->key.objectid + cache->key.offset; 3342 btrfs_put_block_group(cache); 3343 } 3344 3345 while (1) { 3346 if (last == 0) { 3347 err = btrfs_run_delayed_refs(trans, root, 3348 (unsigned long)-1); 3349 if (err) /* File system offline */ 3350 goto out; 3351 } 3352 3353 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3354 while (cache) { 3355 if (cache->disk_cache_state == BTRFS_DC_CLEAR) { 3356 btrfs_put_block_group(cache); 3357 goto again; 3358 } 3359 3360 if (cache->dirty) 3361 break; 3362 cache = next_block_group(root, cache); 3363 } 3364 if (!cache) { 3365 if (last == 0) 3366 break; 3367 last = 0; 3368 continue; 3369 } 3370 3371 if (cache->disk_cache_state == BTRFS_DC_SETUP) 3372 cache->disk_cache_state = BTRFS_DC_NEED_WRITE; 3373 cache->dirty = 0; 3374 last = cache->key.objectid + cache->key.offset; 3375 3376 err = write_one_cache_group(trans, root, path, cache); 3377 btrfs_put_block_group(cache); 3378 if (err) /* File system offline */ 3379 goto out; 3380 } 3381 3382 while (1) { 3383 /* 3384 * I don't think this is needed since we're just marking our 3385 * preallocated extent as written, but just in case it can't 3386 * hurt. 3387 */ 3388 if (last == 0) { 3389 err = btrfs_run_delayed_refs(trans, root, 3390 (unsigned long)-1); 3391 if (err) /* File system offline */ 3392 goto out; 3393 } 3394 3395 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3396 while (cache) { 3397 /* 3398 * Really this shouldn't happen, but it could if we 3399 * couldn't write the entire preallocated extent and 3400 * splitting the extent resulted in a new block. 3401 */ 3402 if (cache->dirty) { 3403 btrfs_put_block_group(cache); 3404 goto again; 3405 } 3406 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 3407 break; 3408 cache = next_block_group(root, cache); 3409 } 3410 if (!cache) { 3411 if (last == 0) 3412 break; 3413 last = 0; 3414 continue; 3415 } 3416 3417 err = btrfs_write_out_cache(root, trans, cache, path); 3418 3419 /* 3420 * If we didn't have an error then the cache state is still 3421 * NEED_WRITE, so we can set it to WRITTEN. 3422 */ 3423 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 3424 cache->disk_cache_state = BTRFS_DC_WRITTEN; 3425 last = cache->key.objectid + cache->key.offset; 3426 btrfs_put_block_group(cache); 3427 } 3428 out: 3429 3430 btrfs_free_path(path); 3431 return err; 3432 } 3433 3434 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 3435 { 3436 struct btrfs_block_group_cache *block_group; 3437 int readonly = 0; 3438 3439 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 3440 if (!block_group || block_group->ro) 3441 readonly = 1; 3442 if (block_group) 3443 btrfs_put_block_group(block_group); 3444 return readonly; 3445 } 3446 3447 static const char *alloc_name(u64 flags) 3448 { 3449 switch (flags) { 3450 case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA: 3451 return "mixed"; 3452 case BTRFS_BLOCK_GROUP_METADATA: 3453 return "metadata"; 3454 case BTRFS_BLOCK_GROUP_DATA: 3455 return "data"; 3456 case BTRFS_BLOCK_GROUP_SYSTEM: 3457 return "system"; 3458 default: 3459 WARN_ON(1); 3460 return "invalid-combination"; 3461 }; 3462 } 3463 3464 static int update_space_info(struct btrfs_fs_info *info, u64 flags, 3465 u64 total_bytes, u64 bytes_used, 3466 struct btrfs_space_info **space_info) 3467 { 3468 struct btrfs_space_info *found; 3469 int i; 3470 int factor; 3471 int ret; 3472 3473 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 3474 BTRFS_BLOCK_GROUP_RAID10)) 3475 factor = 2; 3476 else 3477 factor = 1; 3478 3479 found = __find_space_info(info, flags); 3480 if (found) { 3481 spin_lock(&found->lock); 3482 found->total_bytes += total_bytes; 3483 found->disk_total += total_bytes * factor; 3484 found->bytes_used += bytes_used; 3485 found->disk_used += bytes_used * factor; 3486 found->full = 0; 3487 spin_unlock(&found->lock); 3488 *space_info = found; 3489 return 0; 3490 } 3491 found = kzalloc(sizeof(*found), GFP_NOFS); 3492 if (!found) 3493 return -ENOMEM; 3494 3495 ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL); 3496 if (ret) { 3497 kfree(found); 3498 return ret; 3499 } 3500 3501 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 3502 INIT_LIST_HEAD(&found->block_groups[i]); 3503 init_rwsem(&found->groups_sem); 3504 spin_lock_init(&found->lock); 3505 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; 3506 found->total_bytes = total_bytes; 3507 found->disk_total = total_bytes * factor; 3508 found->bytes_used = bytes_used; 3509 found->disk_used = bytes_used * factor; 3510 found->bytes_pinned = 0; 3511 found->bytes_reserved = 0; 3512 found->bytes_readonly = 0; 3513 found->bytes_may_use = 0; 3514 found->full = 0; 3515 found->force_alloc = CHUNK_ALLOC_NO_FORCE; 3516 found->chunk_alloc = 0; 3517 found->flush = 0; 3518 init_waitqueue_head(&found->wait); 3519 INIT_LIST_HEAD(&found->ro_bgs); 3520 3521 ret = kobject_init_and_add(&found->kobj, &space_info_ktype, 3522 info->space_info_kobj, "%s", 3523 alloc_name(found->flags)); 3524 if (ret) { 3525 kfree(found); 3526 return ret; 3527 } 3528 3529 *space_info = found; 3530 list_add_rcu(&found->list, &info->space_info); 3531 if (flags & BTRFS_BLOCK_GROUP_DATA) 3532 info->data_sinfo = found; 3533 3534 return ret; 3535 } 3536 3537 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 3538 { 3539 u64 extra_flags = chunk_to_extended(flags) & 3540 BTRFS_EXTENDED_PROFILE_MASK; 3541 3542 write_seqlock(&fs_info->profiles_lock); 3543 if (flags & BTRFS_BLOCK_GROUP_DATA) 3544 fs_info->avail_data_alloc_bits |= extra_flags; 3545 if (flags & BTRFS_BLOCK_GROUP_METADATA) 3546 fs_info->avail_metadata_alloc_bits |= extra_flags; 3547 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3548 fs_info->avail_system_alloc_bits |= extra_flags; 3549 write_sequnlock(&fs_info->profiles_lock); 3550 } 3551 3552 /* 3553 * returns target flags in extended format or 0 if restripe for this 3554 * chunk_type is not in progress 3555 * 3556 * should be called with either volume_mutex or balance_lock held 3557 */ 3558 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 3559 { 3560 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3561 u64 target = 0; 3562 3563 if (!bctl) 3564 return 0; 3565 3566 if (flags & BTRFS_BLOCK_GROUP_DATA && 3567 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3568 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; 3569 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && 3570 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3571 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; 3572 } else if (flags & BTRFS_BLOCK_GROUP_METADATA && 3573 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { 3574 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; 3575 } 3576 3577 return target; 3578 } 3579 3580 /* 3581 * @flags: available profiles in extended format (see ctree.h) 3582 * 3583 * Returns reduced profile in chunk format. If profile changing is in 3584 * progress (either running or paused) picks the target profile (if it's 3585 * already available), otherwise falls back to plain reducing. 3586 */ 3587 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3588 { 3589 u64 num_devices = root->fs_info->fs_devices->rw_devices; 3590 u64 target; 3591 u64 tmp; 3592 3593 /* 3594 * see if restripe for this chunk_type is in progress, if so 3595 * try to reduce to the target profile 3596 */ 3597 spin_lock(&root->fs_info->balance_lock); 3598 target = get_restripe_target(root->fs_info, flags); 3599 if (target) { 3600 /* pick target profile only if it's already available */ 3601 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { 3602 spin_unlock(&root->fs_info->balance_lock); 3603 return extended_to_chunk(target); 3604 } 3605 } 3606 spin_unlock(&root->fs_info->balance_lock); 3607 3608 /* First, mask out the RAID levels which aren't possible */ 3609 if (num_devices == 1) 3610 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 | 3611 BTRFS_BLOCK_GROUP_RAID5); 3612 if (num_devices < 3) 3613 flags &= ~BTRFS_BLOCK_GROUP_RAID6; 3614 if (num_devices < 4) 3615 flags &= ~BTRFS_BLOCK_GROUP_RAID10; 3616 3617 tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | 3618 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | 3619 BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10); 3620 flags &= ~tmp; 3621 3622 if (tmp & BTRFS_BLOCK_GROUP_RAID6) 3623 tmp = BTRFS_BLOCK_GROUP_RAID6; 3624 else if (tmp & BTRFS_BLOCK_GROUP_RAID5) 3625 tmp = BTRFS_BLOCK_GROUP_RAID5; 3626 else if (tmp & BTRFS_BLOCK_GROUP_RAID10) 3627 tmp = BTRFS_BLOCK_GROUP_RAID10; 3628 else if (tmp & BTRFS_BLOCK_GROUP_RAID1) 3629 tmp = BTRFS_BLOCK_GROUP_RAID1; 3630 else if (tmp & BTRFS_BLOCK_GROUP_RAID0) 3631 tmp = BTRFS_BLOCK_GROUP_RAID0; 3632 3633 return extended_to_chunk(flags | tmp); 3634 } 3635 3636 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags) 3637 { 3638 unsigned seq; 3639 u64 flags; 3640 3641 do { 3642 flags = orig_flags; 3643 seq = read_seqbegin(&root->fs_info->profiles_lock); 3644 3645 if (flags & BTRFS_BLOCK_GROUP_DATA) 3646 flags |= root->fs_info->avail_data_alloc_bits; 3647 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3648 flags |= root->fs_info->avail_system_alloc_bits; 3649 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 3650 flags |= root->fs_info->avail_metadata_alloc_bits; 3651 } while (read_seqretry(&root->fs_info->profiles_lock, seq)); 3652 3653 return btrfs_reduce_alloc_profile(root, flags); 3654 } 3655 3656 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) 3657 { 3658 u64 flags; 3659 u64 ret; 3660 3661 if (data) 3662 flags = BTRFS_BLOCK_GROUP_DATA; 3663 else if (root == root->fs_info->chunk_root) 3664 flags = BTRFS_BLOCK_GROUP_SYSTEM; 3665 else 3666 flags = BTRFS_BLOCK_GROUP_METADATA; 3667 3668 ret = get_alloc_profile(root, flags); 3669 return ret; 3670 } 3671 3672 /* 3673 * This will check the space that the inode allocates from to make sure we have 3674 * enough space for bytes. 3675 */ 3676 int btrfs_check_data_free_space(struct inode *inode, u64 bytes) 3677 { 3678 struct btrfs_space_info *data_sinfo; 3679 struct btrfs_root *root = BTRFS_I(inode)->root; 3680 struct btrfs_fs_info *fs_info = root->fs_info; 3681 u64 used; 3682 int ret = 0, committed = 0, alloc_chunk = 1; 3683 3684 /* make sure bytes are sectorsize aligned */ 3685 bytes = ALIGN(bytes, root->sectorsize); 3686 3687 if (btrfs_is_free_space_inode(inode)) { 3688 committed = 1; 3689 ASSERT(current->journal_info); 3690 } 3691 3692 data_sinfo = fs_info->data_sinfo; 3693 if (!data_sinfo) 3694 goto alloc; 3695 3696 again: 3697 /* make sure we have enough space to handle the data first */ 3698 spin_lock(&data_sinfo->lock); 3699 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + 3700 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + 3701 data_sinfo->bytes_may_use; 3702 3703 if (used + bytes > data_sinfo->total_bytes) { 3704 struct btrfs_trans_handle *trans; 3705 3706 /* 3707 * if we don't have enough free bytes in this space then we need 3708 * to alloc a new chunk. 3709 */ 3710 if (!data_sinfo->full && alloc_chunk) { 3711 u64 alloc_target; 3712 3713 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; 3714 spin_unlock(&data_sinfo->lock); 3715 alloc: 3716 alloc_target = btrfs_get_alloc_profile(root, 1); 3717 /* 3718 * It is ugly that we don't call nolock join 3719 * transaction for the free space inode case here. 3720 * But it is safe because we only do the data space 3721 * reservation for the free space cache in the 3722 * transaction context, the common join transaction 3723 * just increase the counter of the current transaction 3724 * handler, doesn't try to acquire the trans_lock of 3725 * the fs. 3726 */ 3727 trans = btrfs_join_transaction(root); 3728 if (IS_ERR(trans)) 3729 return PTR_ERR(trans); 3730 3731 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 3732 alloc_target, 3733 CHUNK_ALLOC_NO_FORCE); 3734 btrfs_end_transaction(trans, root); 3735 if (ret < 0) { 3736 if (ret != -ENOSPC) 3737 return ret; 3738 else 3739 goto commit_trans; 3740 } 3741 3742 if (!data_sinfo) 3743 data_sinfo = fs_info->data_sinfo; 3744 3745 goto again; 3746 } 3747 3748 /* 3749 * If we don't have enough pinned space to deal with this 3750 * allocation don't bother committing the transaction. 3751 */ 3752 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned, 3753 bytes) < 0) 3754 committed = 1; 3755 spin_unlock(&data_sinfo->lock); 3756 3757 /* commit the current transaction and try again */ 3758 commit_trans: 3759 if (!committed && 3760 !atomic_read(&root->fs_info->open_ioctl_trans)) { 3761 committed = 1; 3762 3763 trans = btrfs_join_transaction(root); 3764 if (IS_ERR(trans)) 3765 return PTR_ERR(trans); 3766 ret = btrfs_commit_transaction(trans, root); 3767 if (ret) 3768 return ret; 3769 goto again; 3770 } 3771 3772 trace_btrfs_space_reservation(root->fs_info, 3773 "space_info:enospc", 3774 data_sinfo->flags, bytes, 1); 3775 return -ENOSPC; 3776 } 3777 data_sinfo->bytes_may_use += bytes; 3778 trace_btrfs_space_reservation(root->fs_info, "space_info", 3779 data_sinfo->flags, bytes, 1); 3780 spin_unlock(&data_sinfo->lock); 3781 3782 return 0; 3783 } 3784 3785 /* 3786 * Called if we need to clear a data reservation for this inode. 3787 */ 3788 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) 3789 { 3790 struct btrfs_root *root = BTRFS_I(inode)->root; 3791 struct btrfs_space_info *data_sinfo; 3792 3793 /* make sure bytes are sectorsize aligned */ 3794 bytes = ALIGN(bytes, root->sectorsize); 3795 3796 data_sinfo = root->fs_info->data_sinfo; 3797 spin_lock(&data_sinfo->lock); 3798 WARN_ON(data_sinfo->bytes_may_use < bytes); 3799 data_sinfo->bytes_may_use -= bytes; 3800 trace_btrfs_space_reservation(root->fs_info, "space_info", 3801 data_sinfo->flags, bytes, 0); 3802 spin_unlock(&data_sinfo->lock); 3803 } 3804 3805 static void force_metadata_allocation(struct btrfs_fs_info *info) 3806 { 3807 struct list_head *head = &info->space_info; 3808 struct btrfs_space_info *found; 3809 3810 rcu_read_lock(); 3811 list_for_each_entry_rcu(found, head, list) { 3812 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3813 found->force_alloc = CHUNK_ALLOC_FORCE; 3814 } 3815 rcu_read_unlock(); 3816 } 3817 3818 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global) 3819 { 3820 return (global->size << 1); 3821 } 3822 3823 static int should_alloc_chunk(struct btrfs_root *root, 3824 struct btrfs_space_info *sinfo, int force) 3825 { 3826 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 3827 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; 3828 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; 3829 u64 thresh; 3830 3831 if (force == CHUNK_ALLOC_FORCE) 3832 return 1; 3833 3834 /* 3835 * We need to take into account the global rsv because for all intents 3836 * and purposes it's used space. Don't worry about locking the 3837 * global_rsv, it doesn't change except when the transaction commits. 3838 */ 3839 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA) 3840 num_allocated += calc_global_rsv_need_space(global_rsv); 3841 3842 /* 3843 * in limited mode, we want to have some free space up to 3844 * about 1% of the FS size. 3845 */ 3846 if (force == CHUNK_ALLOC_LIMITED) { 3847 thresh = btrfs_super_total_bytes(root->fs_info->super_copy); 3848 thresh = max_t(u64, 64 * 1024 * 1024, 3849 div_factor_fine(thresh, 1)); 3850 3851 if (num_bytes - num_allocated < thresh) 3852 return 1; 3853 } 3854 3855 if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8)) 3856 return 0; 3857 return 1; 3858 } 3859 3860 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type) 3861 { 3862 u64 num_dev; 3863 3864 if (type & (BTRFS_BLOCK_GROUP_RAID10 | 3865 BTRFS_BLOCK_GROUP_RAID0 | 3866 BTRFS_BLOCK_GROUP_RAID5 | 3867 BTRFS_BLOCK_GROUP_RAID6)) 3868 num_dev = root->fs_info->fs_devices->rw_devices; 3869 else if (type & BTRFS_BLOCK_GROUP_RAID1) 3870 num_dev = 2; 3871 else 3872 num_dev = 1; /* DUP or single */ 3873 3874 /* metadata for updaing devices and chunk tree */ 3875 return btrfs_calc_trans_metadata_size(root, num_dev + 1); 3876 } 3877 3878 static void check_system_chunk(struct btrfs_trans_handle *trans, 3879 struct btrfs_root *root, u64 type) 3880 { 3881 struct btrfs_space_info *info; 3882 u64 left; 3883 u64 thresh; 3884 3885 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 3886 spin_lock(&info->lock); 3887 left = info->total_bytes - info->bytes_used - info->bytes_pinned - 3888 info->bytes_reserved - info->bytes_readonly; 3889 spin_unlock(&info->lock); 3890 3891 thresh = get_system_chunk_thresh(root, type); 3892 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) { 3893 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu", 3894 left, thresh, type); 3895 dump_space_info(info, 0, 0); 3896 } 3897 3898 if (left < thresh) { 3899 u64 flags; 3900 3901 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0); 3902 btrfs_alloc_chunk(trans, root, flags); 3903 } 3904 } 3905 3906 static int do_chunk_alloc(struct btrfs_trans_handle *trans, 3907 struct btrfs_root *extent_root, u64 flags, int force) 3908 { 3909 struct btrfs_space_info *space_info; 3910 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3911 int wait_for_alloc = 0; 3912 int ret = 0; 3913 3914 /* Don't re-enter if we're already allocating a chunk */ 3915 if (trans->allocating_chunk) 3916 return -ENOSPC; 3917 3918 space_info = __find_space_info(extent_root->fs_info, flags); 3919 if (!space_info) { 3920 ret = update_space_info(extent_root->fs_info, flags, 3921 0, 0, &space_info); 3922 BUG_ON(ret); /* -ENOMEM */ 3923 } 3924 BUG_ON(!space_info); /* Logic error */ 3925 3926 again: 3927 spin_lock(&space_info->lock); 3928 if (force < space_info->force_alloc) 3929 force = space_info->force_alloc; 3930 if (space_info->full) { 3931 if (should_alloc_chunk(extent_root, space_info, force)) 3932 ret = -ENOSPC; 3933 else 3934 ret = 0; 3935 spin_unlock(&space_info->lock); 3936 return ret; 3937 } 3938 3939 if (!should_alloc_chunk(extent_root, space_info, force)) { 3940 spin_unlock(&space_info->lock); 3941 return 0; 3942 } else if (space_info->chunk_alloc) { 3943 wait_for_alloc = 1; 3944 } else { 3945 space_info->chunk_alloc = 1; 3946 } 3947 3948 spin_unlock(&space_info->lock); 3949 3950 mutex_lock(&fs_info->chunk_mutex); 3951 3952 /* 3953 * The chunk_mutex is held throughout the entirety of a chunk 3954 * allocation, so once we've acquired the chunk_mutex we know that the 3955 * other guy is done and we need to recheck and see if we should 3956 * allocate. 3957 */ 3958 if (wait_for_alloc) { 3959 mutex_unlock(&fs_info->chunk_mutex); 3960 wait_for_alloc = 0; 3961 goto again; 3962 } 3963 3964 trans->allocating_chunk = true; 3965 3966 /* 3967 * If we have mixed data/metadata chunks we want to make sure we keep 3968 * allocating mixed chunks instead of individual chunks. 3969 */ 3970 if (btrfs_mixed_space_info(space_info)) 3971 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 3972 3973 /* 3974 * if we're doing a data chunk, go ahead and make sure that 3975 * we keep a reasonable number of metadata chunks allocated in the 3976 * FS as well. 3977 */ 3978 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 3979 fs_info->data_chunk_allocations++; 3980 if (!(fs_info->data_chunk_allocations % 3981 fs_info->metadata_ratio)) 3982 force_metadata_allocation(fs_info); 3983 } 3984 3985 /* 3986 * Check if we have enough space in SYSTEM chunk because we may need 3987 * to update devices. 3988 */ 3989 check_system_chunk(trans, extent_root, flags); 3990 3991 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3992 trans->allocating_chunk = false; 3993 3994 spin_lock(&space_info->lock); 3995 if (ret < 0 && ret != -ENOSPC) 3996 goto out; 3997 if (ret) 3998 space_info->full = 1; 3999 else 4000 ret = 1; 4001 4002 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; 4003 out: 4004 space_info->chunk_alloc = 0; 4005 spin_unlock(&space_info->lock); 4006 mutex_unlock(&fs_info->chunk_mutex); 4007 return ret; 4008 } 4009 4010 static int can_overcommit(struct btrfs_root *root, 4011 struct btrfs_space_info *space_info, u64 bytes, 4012 enum btrfs_reserve_flush_enum flush) 4013 { 4014 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 4015 u64 profile = btrfs_get_alloc_profile(root, 0); 4016 u64 space_size; 4017 u64 avail; 4018 u64 used; 4019 4020 used = space_info->bytes_used + space_info->bytes_reserved + 4021 space_info->bytes_pinned + space_info->bytes_readonly; 4022 4023 /* 4024 * We only want to allow over committing if we have lots of actual space 4025 * free, but if we don't have enough space to handle the global reserve 4026 * space then we could end up having a real enospc problem when trying 4027 * to allocate a chunk or some other such important allocation. 4028 */ 4029 spin_lock(&global_rsv->lock); 4030 space_size = calc_global_rsv_need_space(global_rsv); 4031 spin_unlock(&global_rsv->lock); 4032 if (used + space_size >= space_info->total_bytes) 4033 return 0; 4034 4035 used += space_info->bytes_may_use; 4036 4037 spin_lock(&root->fs_info->free_chunk_lock); 4038 avail = root->fs_info->free_chunk_space; 4039 spin_unlock(&root->fs_info->free_chunk_lock); 4040 4041 /* 4042 * If we have dup, raid1 or raid10 then only half of the free 4043 * space is actually useable. For raid56, the space info used 4044 * doesn't include the parity drive, so we don't have to 4045 * change the math 4046 */ 4047 if (profile & (BTRFS_BLOCK_GROUP_DUP | 4048 BTRFS_BLOCK_GROUP_RAID1 | 4049 BTRFS_BLOCK_GROUP_RAID10)) 4050 avail >>= 1; 4051 4052 /* 4053 * If we aren't flushing all things, let us overcommit up to 4054 * 1/2th of the space. If we can flush, don't let us overcommit 4055 * too much, let it overcommit up to 1/8 of the space. 4056 */ 4057 if (flush == BTRFS_RESERVE_FLUSH_ALL) 4058 avail >>= 3; 4059 else 4060 avail >>= 1; 4061 4062 if (used + bytes < space_info->total_bytes + avail) 4063 return 1; 4064 return 0; 4065 } 4066 4067 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, 4068 unsigned long nr_pages, int nr_items) 4069 { 4070 struct super_block *sb = root->fs_info->sb; 4071 4072 if (down_read_trylock(&sb->s_umount)) { 4073 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE); 4074 up_read(&sb->s_umount); 4075 } else { 4076 /* 4077 * We needn't worry the filesystem going from r/w to r/o though 4078 * we don't acquire ->s_umount mutex, because the filesystem 4079 * should guarantee the delalloc inodes list be empty after 4080 * the filesystem is readonly(all dirty pages are written to 4081 * the disk). 4082 */ 4083 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items); 4084 if (!current->journal_info) 4085 btrfs_wait_ordered_roots(root->fs_info, nr_items); 4086 } 4087 } 4088 4089 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim) 4090 { 4091 u64 bytes; 4092 int nr; 4093 4094 bytes = btrfs_calc_trans_metadata_size(root, 1); 4095 nr = (int)div64_u64(to_reclaim, bytes); 4096 if (!nr) 4097 nr = 1; 4098 return nr; 4099 } 4100 4101 #define EXTENT_SIZE_PER_ITEM (256 * 1024) 4102 4103 /* 4104 * shrink metadata reservation for delalloc 4105 */ 4106 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, 4107 bool wait_ordered) 4108 { 4109 struct btrfs_block_rsv *block_rsv; 4110 struct btrfs_space_info *space_info; 4111 struct btrfs_trans_handle *trans; 4112 u64 delalloc_bytes; 4113 u64 max_reclaim; 4114 long time_left; 4115 unsigned long nr_pages; 4116 int loops; 4117 int items; 4118 enum btrfs_reserve_flush_enum flush; 4119 4120 /* Calc the number of the pages we need flush for space reservation */ 4121 items = calc_reclaim_items_nr(root, to_reclaim); 4122 to_reclaim = items * EXTENT_SIZE_PER_ITEM; 4123 4124 trans = (struct btrfs_trans_handle *)current->journal_info; 4125 block_rsv = &root->fs_info->delalloc_block_rsv; 4126 space_info = block_rsv->space_info; 4127 4128 delalloc_bytes = percpu_counter_sum_positive( 4129 &root->fs_info->delalloc_bytes); 4130 if (delalloc_bytes == 0) { 4131 if (trans) 4132 return; 4133 if (wait_ordered) 4134 btrfs_wait_ordered_roots(root->fs_info, items); 4135 return; 4136 } 4137 4138 loops = 0; 4139 while (delalloc_bytes && loops < 3) { 4140 max_reclaim = min(delalloc_bytes, to_reclaim); 4141 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; 4142 btrfs_writeback_inodes_sb_nr(root, nr_pages, items); 4143 /* 4144 * We need to wait for the async pages to actually start before 4145 * we do anything. 4146 */ 4147 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages); 4148 if (!max_reclaim) 4149 goto skip_async; 4150 4151 if (max_reclaim <= nr_pages) 4152 max_reclaim = 0; 4153 else 4154 max_reclaim -= nr_pages; 4155 4156 wait_event(root->fs_info->async_submit_wait, 4157 atomic_read(&root->fs_info->async_delalloc_pages) <= 4158 (int)max_reclaim); 4159 skip_async: 4160 if (!trans) 4161 flush = BTRFS_RESERVE_FLUSH_ALL; 4162 else 4163 flush = BTRFS_RESERVE_NO_FLUSH; 4164 spin_lock(&space_info->lock); 4165 if (can_overcommit(root, space_info, orig, flush)) { 4166 spin_unlock(&space_info->lock); 4167 break; 4168 } 4169 spin_unlock(&space_info->lock); 4170 4171 loops++; 4172 if (wait_ordered && !trans) { 4173 btrfs_wait_ordered_roots(root->fs_info, items); 4174 } else { 4175 time_left = schedule_timeout_killable(1); 4176 if (time_left) 4177 break; 4178 } 4179 delalloc_bytes = percpu_counter_sum_positive( 4180 &root->fs_info->delalloc_bytes); 4181 } 4182 } 4183 4184 /** 4185 * maybe_commit_transaction - possibly commit the transaction if its ok to 4186 * @root - the root we're allocating for 4187 * @bytes - the number of bytes we want to reserve 4188 * @force - force the commit 4189 * 4190 * This will check to make sure that committing the transaction will actually 4191 * get us somewhere and then commit the transaction if it does. Otherwise it 4192 * will return -ENOSPC. 4193 */ 4194 static int may_commit_transaction(struct btrfs_root *root, 4195 struct btrfs_space_info *space_info, 4196 u64 bytes, int force) 4197 { 4198 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv; 4199 struct btrfs_trans_handle *trans; 4200 4201 trans = (struct btrfs_trans_handle *)current->journal_info; 4202 if (trans) 4203 return -EAGAIN; 4204 4205 if (force) 4206 goto commit; 4207 4208 /* See if there is enough pinned space to make this reservation */ 4209 if (percpu_counter_compare(&space_info->total_bytes_pinned, 4210 bytes) >= 0) 4211 goto commit; 4212 4213 /* 4214 * See if there is some space in the delayed insertion reservation for 4215 * this reservation. 4216 */ 4217 if (space_info != delayed_rsv->space_info) 4218 return -ENOSPC; 4219 4220 spin_lock(&delayed_rsv->lock); 4221 if (percpu_counter_compare(&space_info->total_bytes_pinned, 4222 bytes - delayed_rsv->size) >= 0) { 4223 spin_unlock(&delayed_rsv->lock); 4224 return -ENOSPC; 4225 } 4226 spin_unlock(&delayed_rsv->lock); 4227 4228 commit: 4229 trans = btrfs_join_transaction(root); 4230 if (IS_ERR(trans)) 4231 return -ENOSPC; 4232 4233 return btrfs_commit_transaction(trans, root); 4234 } 4235 4236 enum flush_state { 4237 FLUSH_DELAYED_ITEMS_NR = 1, 4238 FLUSH_DELAYED_ITEMS = 2, 4239 FLUSH_DELALLOC = 3, 4240 FLUSH_DELALLOC_WAIT = 4, 4241 ALLOC_CHUNK = 5, 4242 COMMIT_TRANS = 6, 4243 }; 4244 4245 static int flush_space(struct btrfs_root *root, 4246 struct btrfs_space_info *space_info, u64 num_bytes, 4247 u64 orig_bytes, int state) 4248 { 4249 struct btrfs_trans_handle *trans; 4250 int nr; 4251 int ret = 0; 4252 4253 switch (state) { 4254 case FLUSH_DELAYED_ITEMS_NR: 4255 case FLUSH_DELAYED_ITEMS: 4256 if (state == FLUSH_DELAYED_ITEMS_NR) 4257 nr = calc_reclaim_items_nr(root, num_bytes) * 2; 4258 else 4259 nr = -1; 4260 4261 trans = btrfs_join_transaction(root); 4262 if (IS_ERR(trans)) { 4263 ret = PTR_ERR(trans); 4264 break; 4265 } 4266 ret = btrfs_run_delayed_items_nr(trans, root, nr); 4267 btrfs_end_transaction(trans, root); 4268 break; 4269 case FLUSH_DELALLOC: 4270 case FLUSH_DELALLOC_WAIT: 4271 shrink_delalloc(root, num_bytes * 2, orig_bytes, 4272 state == FLUSH_DELALLOC_WAIT); 4273 break; 4274 case ALLOC_CHUNK: 4275 trans = btrfs_join_transaction(root); 4276 if (IS_ERR(trans)) { 4277 ret = PTR_ERR(trans); 4278 break; 4279 } 4280 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 4281 btrfs_get_alloc_profile(root, 0), 4282 CHUNK_ALLOC_NO_FORCE); 4283 btrfs_end_transaction(trans, root); 4284 if (ret == -ENOSPC) 4285 ret = 0; 4286 break; 4287 case COMMIT_TRANS: 4288 ret = may_commit_transaction(root, space_info, orig_bytes, 0); 4289 break; 4290 default: 4291 ret = -ENOSPC; 4292 break; 4293 } 4294 4295 return ret; 4296 } 4297 4298 static inline u64 4299 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root, 4300 struct btrfs_space_info *space_info) 4301 { 4302 u64 used; 4303 u64 expected; 4304 u64 to_reclaim; 4305 4306 to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024, 4307 16 * 1024 * 1024); 4308 spin_lock(&space_info->lock); 4309 if (can_overcommit(root, space_info, to_reclaim, 4310 BTRFS_RESERVE_FLUSH_ALL)) { 4311 to_reclaim = 0; 4312 goto out; 4313 } 4314 4315 used = space_info->bytes_used + space_info->bytes_reserved + 4316 space_info->bytes_pinned + space_info->bytes_readonly + 4317 space_info->bytes_may_use; 4318 if (can_overcommit(root, space_info, 1024 * 1024, 4319 BTRFS_RESERVE_FLUSH_ALL)) 4320 expected = div_factor_fine(space_info->total_bytes, 95); 4321 else 4322 expected = div_factor_fine(space_info->total_bytes, 90); 4323 4324 if (used > expected) 4325 to_reclaim = used - expected; 4326 else 4327 to_reclaim = 0; 4328 to_reclaim = min(to_reclaim, space_info->bytes_may_use + 4329 space_info->bytes_reserved); 4330 out: 4331 spin_unlock(&space_info->lock); 4332 4333 return to_reclaim; 4334 } 4335 4336 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info, 4337 struct btrfs_fs_info *fs_info, u64 used) 4338 { 4339 return (used >= div_factor_fine(space_info->total_bytes, 98) && 4340 !btrfs_fs_closing(fs_info) && 4341 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); 4342 } 4343 4344 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info, 4345 struct btrfs_fs_info *fs_info, 4346 int flush_state) 4347 { 4348 u64 used; 4349 4350 spin_lock(&space_info->lock); 4351 /* 4352 * We run out of space and have not got any free space via flush_space, 4353 * so don't bother doing async reclaim. 4354 */ 4355 if (flush_state > COMMIT_TRANS && space_info->full) { 4356 spin_unlock(&space_info->lock); 4357 return 0; 4358 } 4359 4360 used = space_info->bytes_used + space_info->bytes_reserved + 4361 space_info->bytes_pinned + space_info->bytes_readonly + 4362 space_info->bytes_may_use; 4363 if (need_do_async_reclaim(space_info, fs_info, used)) { 4364 spin_unlock(&space_info->lock); 4365 return 1; 4366 } 4367 spin_unlock(&space_info->lock); 4368 4369 return 0; 4370 } 4371 4372 static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 4373 { 4374 struct btrfs_fs_info *fs_info; 4375 struct btrfs_space_info *space_info; 4376 u64 to_reclaim; 4377 int flush_state; 4378 4379 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 4380 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 4381 4382 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root, 4383 space_info); 4384 if (!to_reclaim) 4385 return; 4386 4387 flush_state = FLUSH_DELAYED_ITEMS_NR; 4388 do { 4389 flush_space(fs_info->fs_root, space_info, to_reclaim, 4390 to_reclaim, flush_state); 4391 flush_state++; 4392 if (!btrfs_need_do_async_reclaim(space_info, fs_info, 4393 flush_state)) 4394 return; 4395 } while (flush_state <= COMMIT_TRANS); 4396 4397 if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state)) 4398 queue_work(system_unbound_wq, work); 4399 } 4400 4401 void btrfs_init_async_reclaim_work(struct work_struct *work) 4402 { 4403 INIT_WORK(work, btrfs_async_reclaim_metadata_space); 4404 } 4405 4406 /** 4407 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space 4408 * @root - the root we're allocating for 4409 * @block_rsv - the block_rsv we're allocating for 4410 * @orig_bytes - the number of bytes we want 4411 * @flush - whether or not we can flush to make our reservation 4412 * 4413 * This will reserve orgi_bytes number of bytes from the space info associated 4414 * with the block_rsv. If there is not enough space it will make an attempt to 4415 * flush out space to make room. It will do this by flushing delalloc if 4416 * possible or committing the transaction. If flush is 0 then no attempts to 4417 * regain reservations will be made and this will fail if there is not enough 4418 * space already. 4419 */ 4420 static int reserve_metadata_bytes(struct btrfs_root *root, 4421 struct btrfs_block_rsv *block_rsv, 4422 u64 orig_bytes, 4423 enum btrfs_reserve_flush_enum flush) 4424 { 4425 struct btrfs_space_info *space_info = block_rsv->space_info; 4426 u64 used; 4427 u64 num_bytes = orig_bytes; 4428 int flush_state = FLUSH_DELAYED_ITEMS_NR; 4429 int ret = 0; 4430 bool flushing = false; 4431 4432 again: 4433 ret = 0; 4434 spin_lock(&space_info->lock); 4435 /* 4436 * We only want to wait if somebody other than us is flushing and we 4437 * are actually allowed to flush all things. 4438 */ 4439 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing && 4440 space_info->flush) { 4441 spin_unlock(&space_info->lock); 4442 /* 4443 * If we have a trans handle we can't wait because the flusher 4444 * may have to commit the transaction, which would mean we would 4445 * deadlock since we are waiting for the flusher to finish, but 4446 * hold the current transaction open. 4447 */ 4448 if (current->journal_info) 4449 return -EAGAIN; 4450 ret = wait_event_killable(space_info->wait, !space_info->flush); 4451 /* Must have been killed, return */ 4452 if (ret) 4453 return -EINTR; 4454 4455 spin_lock(&space_info->lock); 4456 } 4457 4458 ret = -ENOSPC; 4459 used = space_info->bytes_used + space_info->bytes_reserved + 4460 space_info->bytes_pinned + space_info->bytes_readonly + 4461 space_info->bytes_may_use; 4462 4463 /* 4464 * The idea here is that we've not already over-reserved the block group 4465 * then we can go ahead and save our reservation first and then start 4466 * flushing if we need to. Otherwise if we've already overcommitted 4467 * lets start flushing stuff first and then come back and try to make 4468 * our reservation. 4469 */ 4470 if (used <= space_info->total_bytes) { 4471 if (used + orig_bytes <= space_info->total_bytes) { 4472 space_info->bytes_may_use += orig_bytes; 4473 trace_btrfs_space_reservation(root->fs_info, 4474 "space_info", space_info->flags, orig_bytes, 1); 4475 ret = 0; 4476 } else { 4477 /* 4478 * Ok set num_bytes to orig_bytes since we aren't 4479 * overocmmitted, this way we only try and reclaim what 4480 * we need. 4481 */ 4482 num_bytes = orig_bytes; 4483 } 4484 } else { 4485 /* 4486 * Ok we're over committed, set num_bytes to the overcommitted 4487 * amount plus the amount of bytes that we need for this 4488 * reservation. 4489 */ 4490 num_bytes = used - space_info->total_bytes + 4491 (orig_bytes * 2); 4492 } 4493 4494 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) { 4495 space_info->bytes_may_use += orig_bytes; 4496 trace_btrfs_space_reservation(root->fs_info, "space_info", 4497 space_info->flags, orig_bytes, 4498 1); 4499 ret = 0; 4500 } 4501 4502 /* 4503 * Couldn't make our reservation, save our place so while we're trying 4504 * to reclaim space we can actually use it instead of somebody else 4505 * stealing it from us. 4506 * 4507 * We make the other tasks wait for the flush only when we can flush 4508 * all things. 4509 */ 4510 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 4511 flushing = true; 4512 space_info->flush = 1; 4513 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 4514 used += orig_bytes; 4515 /* 4516 * We will do the space reservation dance during log replay, 4517 * which means we won't have fs_info->fs_root set, so don't do 4518 * the async reclaim as we will panic. 4519 */ 4520 if (!root->fs_info->log_root_recovering && 4521 need_do_async_reclaim(space_info, root->fs_info, used) && 4522 !work_busy(&root->fs_info->async_reclaim_work)) 4523 queue_work(system_unbound_wq, 4524 &root->fs_info->async_reclaim_work); 4525 } 4526 spin_unlock(&space_info->lock); 4527 4528 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) 4529 goto out; 4530 4531 ret = flush_space(root, space_info, num_bytes, orig_bytes, 4532 flush_state); 4533 flush_state++; 4534 4535 /* 4536 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock 4537 * would happen. So skip delalloc flush. 4538 */ 4539 if (flush == BTRFS_RESERVE_FLUSH_LIMIT && 4540 (flush_state == FLUSH_DELALLOC || 4541 flush_state == FLUSH_DELALLOC_WAIT)) 4542 flush_state = ALLOC_CHUNK; 4543 4544 if (!ret) 4545 goto again; 4546 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT && 4547 flush_state < COMMIT_TRANS) 4548 goto again; 4549 else if (flush == BTRFS_RESERVE_FLUSH_ALL && 4550 flush_state <= COMMIT_TRANS) 4551 goto again; 4552 4553 out: 4554 if (ret == -ENOSPC && 4555 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) { 4556 struct btrfs_block_rsv *global_rsv = 4557 &root->fs_info->global_block_rsv; 4558 4559 if (block_rsv != global_rsv && 4560 !block_rsv_use_bytes(global_rsv, orig_bytes)) 4561 ret = 0; 4562 } 4563 if (ret == -ENOSPC) 4564 trace_btrfs_space_reservation(root->fs_info, 4565 "space_info:enospc", 4566 space_info->flags, orig_bytes, 1); 4567 if (flushing) { 4568 spin_lock(&space_info->lock); 4569 space_info->flush = 0; 4570 wake_up_all(&space_info->wait); 4571 spin_unlock(&space_info->lock); 4572 } 4573 return ret; 4574 } 4575 4576 static struct btrfs_block_rsv *get_block_rsv( 4577 const struct btrfs_trans_handle *trans, 4578 const struct btrfs_root *root) 4579 { 4580 struct btrfs_block_rsv *block_rsv = NULL; 4581 4582 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4583 block_rsv = trans->block_rsv; 4584 4585 if (root == root->fs_info->csum_root && trans->adding_csums) 4586 block_rsv = trans->block_rsv; 4587 4588 if (root == root->fs_info->uuid_root) 4589 block_rsv = trans->block_rsv; 4590 4591 if (!block_rsv) 4592 block_rsv = root->block_rsv; 4593 4594 if (!block_rsv) 4595 block_rsv = &root->fs_info->empty_block_rsv; 4596 4597 return block_rsv; 4598 } 4599 4600 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 4601 u64 num_bytes) 4602 { 4603 int ret = -ENOSPC; 4604 spin_lock(&block_rsv->lock); 4605 if (block_rsv->reserved >= num_bytes) { 4606 block_rsv->reserved -= num_bytes; 4607 if (block_rsv->reserved < block_rsv->size) 4608 block_rsv->full = 0; 4609 ret = 0; 4610 } 4611 spin_unlock(&block_rsv->lock); 4612 return ret; 4613 } 4614 4615 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, 4616 u64 num_bytes, int update_size) 4617 { 4618 spin_lock(&block_rsv->lock); 4619 block_rsv->reserved += num_bytes; 4620 if (update_size) 4621 block_rsv->size += num_bytes; 4622 else if (block_rsv->reserved >= block_rsv->size) 4623 block_rsv->full = 1; 4624 spin_unlock(&block_rsv->lock); 4625 } 4626 4627 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 4628 struct btrfs_block_rsv *dest, u64 num_bytes, 4629 int min_factor) 4630 { 4631 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 4632 u64 min_bytes; 4633 4634 if (global_rsv->space_info != dest->space_info) 4635 return -ENOSPC; 4636 4637 spin_lock(&global_rsv->lock); 4638 min_bytes = div_factor(global_rsv->size, min_factor); 4639 if (global_rsv->reserved < min_bytes + num_bytes) { 4640 spin_unlock(&global_rsv->lock); 4641 return -ENOSPC; 4642 } 4643 global_rsv->reserved -= num_bytes; 4644 if (global_rsv->reserved < global_rsv->size) 4645 global_rsv->full = 0; 4646 spin_unlock(&global_rsv->lock); 4647 4648 block_rsv_add_bytes(dest, num_bytes, 1); 4649 return 0; 4650 } 4651 4652 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info, 4653 struct btrfs_block_rsv *block_rsv, 4654 struct btrfs_block_rsv *dest, u64 num_bytes) 4655 { 4656 struct btrfs_space_info *space_info = block_rsv->space_info; 4657 4658 spin_lock(&block_rsv->lock); 4659 if (num_bytes == (u64)-1) 4660 num_bytes = block_rsv->size; 4661 block_rsv->size -= num_bytes; 4662 if (block_rsv->reserved >= block_rsv->size) { 4663 num_bytes = block_rsv->reserved - block_rsv->size; 4664 block_rsv->reserved = block_rsv->size; 4665 block_rsv->full = 1; 4666 } else { 4667 num_bytes = 0; 4668 } 4669 spin_unlock(&block_rsv->lock); 4670 4671 if (num_bytes > 0) { 4672 if (dest) { 4673 spin_lock(&dest->lock); 4674 if (!dest->full) { 4675 u64 bytes_to_add; 4676 4677 bytes_to_add = dest->size - dest->reserved; 4678 bytes_to_add = min(num_bytes, bytes_to_add); 4679 dest->reserved += bytes_to_add; 4680 if (dest->reserved >= dest->size) 4681 dest->full = 1; 4682 num_bytes -= bytes_to_add; 4683 } 4684 spin_unlock(&dest->lock); 4685 } 4686 if (num_bytes) { 4687 spin_lock(&space_info->lock); 4688 space_info->bytes_may_use -= num_bytes; 4689 trace_btrfs_space_reservation(fs_info, "space_info", 4690 space_info->flags, num_bytes, 0); 4691 spin_unlock(&space_info->lock); 4692 } 4693 } 4694 } 4695 4696 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, 4697 struct btrfs_block_rsv *dst, u64 num_bytes) 4698 { 4699 int ret; 4700 4701 ret = block_rsv_use_bytes(src, num_bytes); 4702 if (ret) 4703 return ret; 4704 4705 block_rsv_add_bytes(dst, num_bytes, 1); 4706 return 0; 4707 } 4708 4709 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type) 4710 { 4711 memset(rsv, 0, sizeof(*rsv)); 4712 spin_lock_init(&rsv->lock); 4713 rsv->type = type; 4714 } 4715 4716 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 4717 unsigned short type) 4718 { 4719 struct btrfs_block_rsv *block_rsv; 4720 struct btrfs_fs_info *fs_info = root->fs_info; 4721 4722 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); 4723 if (!block_rsv) 4724 return NULL; 4725 4726 btrfs_init_block_rsv(block_rsv, type); 4727 block_rsv->space_info = __find_space_info(fs_info, 4728 BTRFS_BLOCK_GROUP_METADATA); 4729 return block_rsv; 4730 } 4731 4732 void btrfs_free_block_rsv(struct btrfs_root *root, 4733 struct btrfs_block_rsv *rsv) 4734 { 4735 if (!rsv) 4736 return; 4737 btrfs_block_rsv_release(root, rsv, (u64)-1); 4738 kfree(rsv); 4739 } 4740 4741 int btrfs_block_rsv_add(struct btrfs_root *root, 4742 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 4743 enum btrfs_reserve_flush_enum flush) 4744 { 4745 int ret; 4746 4747 if (num_bytes == 0) 4748 return 0; 4749 4750 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); 4751 if (!ret) { 4752 block_rsv_add_bytes(block_rsv, num_bytes, 1); 4753 return 0; 4754 } 4755 4756 return ret; 4757 } 4758 4759 int btrfs_block_rsv_check(struct btrfs_root *root, 4760 struct btrfs_block_rsv *block_rsv, int min_factor) 4761 { 4762 u64 num_bytes = 0; 4763 int ret = -ENOSPC; 4764 4765 if (!block_rsv) 4766 return 0; 4767 4768 spin_lock(&block_rsv->lock); 4769 num_bytes = div_factor(block_rsv->size, min_factor); 4770 if (block_rsv->reserved >= num_bytes) 4771 ret = 0; 4772 spin_unlock(&block_rsv->lock); 4773 4774 return ret; 4775 } 4776 4777 int btrfs_block_rsv_refill(struct btrfs_root *root, 4778 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 4779 enum btrfs_reserve_flush_enum flush) 4780 { 4781 u64 num_bytes = 0; 4782 int ret = -ENOSPC; 4783 4784 if (!block_rsv) 4785 return 0; 4786 4787 spin_lock(&block_rsv->lock); 4788 num_bytes = min_reserved; 4789 if (block_rsv->reserved >= num_bytes) 4790 ret = 0; 4791 else 4792 num_bytes -= block_rsv->reserved; 4793 spin_unlock(&block_rsv->lock); 4794 4795 if (!ret) 4796 return 0; 4797 4798 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); 4799 if (!ret) { 4800 block_rsv_add_bytes(block_rsv, num_bytes, 0); 4801 return 0; 4802 } 4803 4804 return ret; 4805 } 4806 4807 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 4808 struct btrfs_block_rsv *dst_rsv, 4809 u64 num_bytes) 4810 { 4811 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 4812 } 4813 4814 void btrfs_block_rsv_release(struct btrfs_root *root, 4815 struct btrfs_block_rsv *block_rsv, 4816 u64 num_bytes) 4817 { 4818 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 4819 if (global_rsv == block_rsv || 4820 block_rsv->space_info != global_rsv->space_info) 4821 global_rsv = NULL; 4822 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv, 4823 num_bytes); 4824 } 4825 4826 /* 4827 * helper to calculate size of global block reservation. 4828 * the desired value is sum of space used by extent tree, 4829 * checksum tree and root tree 4830 */ 4831 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) 4832 { 4833 struct btrfs_space_info *sinfo; 4834 u64 num_bytes; 4835 u64 meta_used; 4836 u64 data_used; 4837 int csum_size = btrfs_super_csum_size(fs_info->super_copy); 4838 4839 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); 4840 spin_lock(&sinfo->lock); 4841 data_used = sinfo->bytes_used; 4842 spin_unlock(&sinfo->lock); 4843 4844 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 4845 spin_lock(&sinfo->lock); 4846 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) 4847 data_used = 0; 4848 meta_used = sinfo->bytes_used; 4849 spin_unlock(&sinfo->lock); 4850 4851 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * 4852 csum_size * 2; 4853 num_bytes += div64_u64(data_used + meta_used, 50); 4854 4855 if (num_bytes * 3 > meta_used) 4856 num_bytes = div64_u64(meta_used, 3); 4857 4858 return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10); 4859 } 4860 4861 static void update_global_block_rsv(struct btrfs_fs_info *fs_info) 4862 { 4863 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; 4864 struct btrfs_space_info *sinfo = block_rsv->space_info; 4865 u64 num_bytes; 4866 4867 num_bytes = calc_global_metadata_size(fs_info); 4868 4869 spin_lock(&sinfo->lock); 4870 spin_lock(&block_rsv->lock); 4871 4872 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024); 4873 4874 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + 4875 sinfo->bytes_reserved + sinfo->bytes_readonly + 4876 sinfo->bytes_may_use; 4877 4878 if (sinfo->total_bytes > num_bytes) { 4879 num_bytes = sinfo->total_bytes - num_bytes; 4880 block_rsv->reserved += num_bytes; 4881 sinfo->bytes_may_use += num_bytes; 4882 trace_btrfs_space_reservation(fs_info, "space_info", 4883 sinfo->flags, num_bytes, 1); 4884 } 4885 4886 if (block_rsv->reserved >= block_rsv->size) { 4887 num_bytes = block_rsv->reserved - block_rsv->size; 4888 sinfo->bytes_may_use -= num_bytes; 4889 trace_btrfs_space_reservation(fs_info, "space_info", 4890 sinfo->flags, num_bytes, 0); 4891 block_rsv->reserved = block_rsv->size; 4892 block_rsv->full = 1; 4893 } 4894 4895 spin_unlock(&block_rsv->lock); 4896 spin_unlock(&sinfo->lock); 4897 } 4898 4899 static void init_global_block_rsv(struct btrfs_fs_info *fs_info) 4900 { 4901 struct btrfs_space_info *space_info; 4902 4903 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 4904 fs_info->chunk_block_rsv.space_info = space_info; 4905 4906 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 4907 fs_info->global_block_rsv.space_info = space_info; 4908 fs_info->delalloc_block_rsv.space_info = space_info; 4909 fs_info->trans_block_rsv.space_info = space_info; 4910 fs_info->empty_block_rsv.space_info = space_info; 4911 fs_info->delayed_block_rsv.space_info = space_info; 4912 4913 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv; 4914 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; 4915 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; 4916 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; 4917 if (fs_info->quota_root) 4918 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv; 4919 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; 4920 4921 update_global_block_rsv(fs_info); 4922 } 4923 4924 static void release_global_block_rsv(struct btrfs_fs_info *fs_info) 4925 { 4926 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, 4927 (u64)-1); 4928 WARN_ON(fs_info->delalloc_block_rsv.size > 0); 4929 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0); 4930 WARN_ON(fs_info->trans_block_rsv.size > 0); 4931 WARN_ON(fs_info->trans_block_rsv.reserved > 0); 4932 WARN_ON(fs_info->chunk_block_rsv.size > 0); 4933 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 4934 WARN_ON(fs_info->delayed_block_rsv.size > 0); 4935 WARN_ON(fs_info->delayed_block_rsv.reserved > 0); 4936 } 4937 4938 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 4939 struct btrfs_root *root) 4940 { 4941 if (!trans->block_rsv) 4942 return; 4943 4944 if (!trans->bytes_reserved) 4945 return; 4946 4947 trace_btrfs_space_reservation(root->fs_info, "transaction", 4948 trans->transid, trans->bytes_reserved, 0); 4949 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); 4950 trans->bytes_reserved = 0; 4951 } 4952 4953 /* Can only return 0 or -ENOSPC */ 4954 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 4955 struct inode *inode) 4956 { 4957 struct btrfs_root *root = BTRFS_I(inode)->root; 4958 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); 4959 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; 4960 4961 /* 4962 * We need to hold space in order to delete our orphan item once we've 4963 * added it, so this takes the reservation so we can release it later 4964 * when we are truly done with the orphan item. 4965 */ 4966 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 4967 trace_btrfs_space_reservation(root->fs_info, "orphan", 4968 btrfs_ino(inode), num_bytes, 1); 4969 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 4970 } 4971 4972 void btrfs_orphan_release_metadata(struct inode *inode) 4973 { 4974 struct btrfs_root *root = BTRFS_I(inode)->root; 4975 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 4976 trace_btrfs_space_reservation(root->fs_info, "orphan", 4977 btrfs_ino(inode), num_bytes, 0); 4978 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 4979 } 4980 4981 /* 4982 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation 4983 * root: the root of the parent directory 4984 * rsv: block reservation 4985 * items: the number of items that we need do reservation 4986 * qgroup_reserved: used to return the reserved size in qgroup 4987 * 4988 * This function is used to reserve the space for snapshot/subvolume 4989 * creation and deletion. Those operations are different with the 4990 * common file/directory operations, they change two fs/file trees 4991 * and root tree, the number of items that the qgroup reserves is 4992 * different with the free space reservation. So we can not use 4993 * the space reseravtion mechanism in start_transaction(). 4994 */ 4995 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 4996 struct btrfs_block_rsv *rsv, 4997 int items, 4998 u64 *qgroup_reserved, 4999 bool use_global_rsv) 5000 { 5001 u64 num_bytes; 5002 int ret; 5003 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 5004 5005 if (root->fs_info->quota_enabled) { 5006 /* One for parent inode, two for dir entries */ 5007 num_bytes = 3 * root->nodesize; 5008 ret = btrfs_qgroup_reserve(root, num_bytes); 5009 if (ret) 5010 return ret; 5011 } else { 5012 num_bytes = 0; 5013 } 5014 5015 *qgroup_reserved = num_bytes; 5016 5017 num_bytes = btrfs_calc_trans_metadata_size(root, items); 5018 rsv->space_info = __find_space_info(root->fs_info, 5019 BTRFS_BLOCK_GROUP_METADATA); 5020 ret = btrfs_block_rsv_add(root, rsv, num_bytes, 5021 BTRFS_RESERVE_FLUSH_ALL); 5022 5023 if (ret == -ENOSPC && use_global_rsv) 5024 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes); 5025 5026 if (ret) { 5027 if (*qgroup_reserved) 5028 btrfs_qgroup_free(root, *qgroup_reserved); 5029 } 5030 5031 return ret; 5032 } 5033 5034 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 5035 struct btrfs_block_rsv *rsv, 5036 u64 qgroup_reserved) 5037 { 5038 btrfs_block_rsv_release(root, rsv, (u64)-1); 5039 if (qgroup_reserved) 5040 btrfs_qgroup_free(root, qgroup_reserved); 5041 } 5042 5043 /** 5044 * drop_outstanding_extent - drop an outstanding extent 5045 * @inode: the inode we're dropping the extent for 5046 * 5047 * This is called when we are freeing up an outstanding extent, either called 5048 * after an error or after an extent is written. This will return the number of 5049 * reserved extents that need to be freed. This must be called with 5050 * BTRFS_I(inode)->lock held. 5051 */ 5052 static unsigned drop_outstanding_extent(struct inode *inode) 5053 { 5054 unsigned drop_inode_space = 0; 5055 unsigned dropped_extents = 0; 5056 5057 BUG_ON(!BTRFS_I(inode)->outstanding_extents); 5058 BTRFS_I(inode)->outstanding_extents--; 5059 5060 if (BTRFS_I(inode)->outstanding_extents == 0 && 5061 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, 5062 &BTRFS_I(inode)->runtime_flags)) 5063 drop_inode_space = 1; 5064 5065 /* 5066 * If we have more or the same amount of outsanding extents than we have 5067 * reserved then we need to leave the reserved extents count alone. 5068 */ 5069 if (BTRFS_I(inode)->outstanding_extents >= 5070 BTRFS_I(inode)->reserved_extents) 5071 return drop_inode_space; 5072 5073 dropped_extents = BTRFS_I(inode)->reserved_extents - 5074 BTRFS_I(inode)->outstanding_extents; 5075 BTRFS_I(inode)->reserved_extents -= dropped_extents; 5076 return dropped_extents + drop_inode_space; 5077 } 5078 5079 /** 5080 * calc_csum_metadata_size - return the amount of metada space that must be 5081 * reserved/free'd for the given bytes. 5082 * @inode: the inode we're manipulating 5083 * @num_bytes: the number of bytes in question 5084 * @reserve: 1 if we are reserving space, 0 if we are freeing space 5085 * 5086 * This adjusts the number of csum_bytes in the inode and then returns the 5087 * correct amount of metadata that must either be reserved or freed. We 5088 * calculate how many checksums we can fit into one leaf and then divide the 5089 * number of bytes that will need to be checksumed by this value to figure out 5090 * how many checksums will be required. If we are adding bytes then the number 5091 * may go up and we will return the number of additional bytes that must be 5092 * reserved. If it is going down we will return the number of bytes that must 5093 * be freed. 5094 * 5095 * This must be called with BTRFS_I(inode)->lock held. 5096 */ 5097 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, 5098 int reserve) 5099 { 5100 struct btrfs_root *root = BTRFS_I(inode)->root; 5101 u64 csum_size; 5102 int num_csums_per_leaf; 5103 int num_csums; 5104 int old_csums; 5105 5106 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM && 5107 BTRFS_I(inode)->csum_bytes == 0) 5108 return 0; 5109 5110 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); 5111 if (reserve) 5112 BTRFS_I(inode)->csum_bytes += num_bytes; 5113 else 5114 BTRFS_I(inode)->csum_bytes -= num_bytes; 5115 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item); 5116 num_csums_per_leaf = (int)div64_u64(csum_size, 5117 sizeof(struct btrfs_csum_item) + 5118 sizeof(struct btrfs_disk_key)); 5119 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); 5120 num_csums = num_csums + num_csums_per_leaf - 1; 5121 num_csums = num_csums / num_csums_per_leaf; 5122 5123 old_csums = old_csums + num_csums_per_leaf - 1; 5124 old_csums = old_csums / num_csums_per_leaf; 5125 5126 /* No change, no need to reserve more */ 5127 if (old_csums == num_csums) 5128 return 0; 5129 5130 if (reserve) 5131 return btrfs_calc_trans_metadata_size(root, 5132 num_csums - old_csums); 5133 5134 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums); 5135 } 5136 5137 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) 5138 { 5139 struct btrfs_root *root = BTRFS_I(inode)->root; 5140 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; 5141 u64 to_reserve = 0; 5142 u64 csum_bytes; 5143 unsigned nr_extents = 0; 5144 int extra_reserve = 0; 5145 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; 5146 int ret = 0; 5147 bool delalloc_lock = true; 5148 u64 to_free = 0; 5149 unsigned dropped; 5150 5151 /* If we are a free space inode we need to not flush since we will be in 5152 * the middle of a transaction commit. We also don't need the delalloc 5153 * mutex since we won't race with anybody. We need this mostly to make 5154 * lockdep shut its filthy mouth. 5155 */ 5156 if (btrfs_is_free_space_inode(inode)) { 5157 flush = BTRFS_RESERVE_NO_FLUSH; 5158 delalloc_lock = false; 5159 } 5160 5161 if (flush != BTRFS_RESERVE_NO_FLUSH && 5162 btrfs_transaction_in_commit(root->fs_info)) 5163 schedule_timeout(1); 5164 5165 if (delalloc_lock) 5166 mutex_lock(&BTRFS_I(inode)->delalloc_mutex); 5167 5168 num_bytes = ALIGN(num_bytes, root->sectorsize); 5169 5170 spin_lock(&BTRFS_I(inode)->lock); 5171 BTRFS_I(inode)->outstanding_extents++; 5172 5173 if (BTRFS_I(inode)->outstanding_extents > 5174 BTRFS_I(inode)->reserved_extents) 5175 nr_extents = BTRFS_I(inode)->outstanding_extents - 5176 BTRFS_I(inode)->reserved_extents; 5177 5178 /* 5179 * Add an item to reserve for updating the inode when we complete the 5180 * delalloc io. 5181 */ 5182 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED, 5183 &BTRFS_I(inode)->runtime_flags)) { 5184 nr_extents++; 5185 extra_reserve = 1; 5186 } 5187 5188 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); 5189 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); 5190 csum_bytes = BTRFS_I(inode)->csum_bytes; 5191 spin_unlock(&BTRFS_I(inode)->lock); 5192 5193 if (root->fs_info->quota_enabled) { 5194 ret = btrfs_qgroup_reserve(root, num_bytes + 5195 nr_extents * root->nodesize); 5196 if (ret) 5197 goto out_fail; 5198 } 5199 5200 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 5201 if (unlikely(ret)) { 5202 if (root->fs_info->quota_enabled) 5203 btrfs_qgroup_free(root, num_bytes + 5204 nr_extents * root->nodesize); 5205 goto out_fail; 5206 } 5207 5208 spin_lock(&BTRFS_I(inode)->lock); 5209 if (extra_reserve) { 5210 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED, 5211 &BTRFS_I(inode)->runtime_flags); 5212 nr_extents--; 5213 } 5214 BTRFS_I(inode)->reserved_extents += nr_extents; 5215 spin_unlock(&BTRFS_I(inode)->lock); 5216 5217 if (delalloc_lock) 5218 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); 5219 5220 if (to_reserve) 5221 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5222 btrfs_ino(inode), to_reserve, 1); 5223 block_rsv_add_bytes(block_rsv, to_reserve, 1); 5224 5225 return 0; 5226 5227 out_fail: 5228 spin_lock(&BTRFS_I(inode)->lock); 5229 dropped = drop_outstanding_extent(inode); 5230 /* 5231 * If the inodes csum_bytes is the same as the original 5232 * csum_bytes then we know we haven't raced with any free()ers 5233 * so we can just reduce our inodes csum bytes and carry on. 5234 */ 5235 if (BTRFS_I(inode)->csum_bytes == csum_bytes) { 5236 calc_csum_metadata_size(inode, num_bytes, 0); 5237 } else { 5238 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; 5239 u64 bytes; 5240 5241 /* 5242 * This is tricky, but first we need to figure out how much we 5243 * free'd from any free-ers that occured during this 5244 * reservation, so we reset ->csum_bytes to the csum_bytes 5245 * before we dropped our lock, and then call the free for the 5246 * number of bytes that were freed while we were trying our 5247 * reservation. 5248 */ 5249 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; 5250 BTRFS_I(inode)->csum_bytes = csum_bytes; 5251 to_free = calc_csum_metadata_size(inode, bytes, 0); 5252 5253 5254 /* 5255 * Now we need to see how much we would have freed had we not 5256 * been making this reservation and our ->csum_bytes were not 5257 * artificially inflated. 5258 */ 5259 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; 5260 bytes = csum_bytes - orig_csum_bytes; 5261 bytes = calc_csum_metadata_size(inode, bytes, 0); 5262 5263 /* 5264 * Now reset ->csum_bytes to what it should be. If bytes is 5265 * more than to_free then we would have free'd more space had we 5266 * not had an artificially high ->csum_bytes, so we need to free 5267 * the remainder. If bytes is the same or less then we don't 5268 * need to do anything, the other free-ers did the correct 5269 * thing. 5270 */ 5271 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; 5272 if (bytes > to_free) 5273 to_free = bytes - to_free; 5274 else 5275 to_free = 0; 5276 } 5277 spin_unlock(&BTRFS_I(inode)->lock); 5278 if (dropped) 5279 to_free += btrfs_calc_trans_metadata_size(root, dropped); 5280 5281 if (to_free) { 5282 btrfs_block_rsv_release(root, block_rsv, to_free); 5283 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5284 btrfs_ino(inode), to_free, 0); 5285 } 5286 if (delalloc_lock) 5287 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); 5288 return ret; 5289 } 5290 5291 /** 5292 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode 5293 * @inode: the inode to release the reservation for 5294 * @num_bytes: the number of bytes we're releasing 5295 * 5296 * This will release the metadata reservation for an inode. This can be called 5297 * once we complete IO for a given set of bytes to release their metadata 5298 * reservations. 5299 */ 5300 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) 5301 { 5302 struct btrfs_root *root = BTRFS_I(inode)->root; 5303 u64 to_free = 0; 5304 unsigned dropped; 5305 5306 num_bytes = ALIGN(num_bytes, root->sectorsize); 5307 spin_lock(&BTRFS_I(inode)->lock); 5308 dropped = drop_outstanding_extent(inode); 5309 5310 if (num_bytes) 5311 to_free = calc_csum_metadata_size(inode, num_bytes, 0); 5312 spin_unlock(&BTRFS_I(inode)->lock); 5313 if (dropped > 0) 5314 to_free += btrfs_calc_trans_metadata_size(root, dropped); 5315 5316 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5317 btrfs_ino(inode), to_free, 0); 5318 if (root->fs_info->quota_enabled) { 5319 btrfs_qgroup_free(root, num_bytes + 5320 dropped * root->nodesize); 5321 } 5322 5323 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, 5324 to_free); 5325 } 5326 5327 /** 5328 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc 5329 * @inode: inode we're writing to 5330 * @num_bytes: the number of bytes we want to allocate 5331 * 5332 * This will do the following things 5333 * 5334 * o reserve space in the data space info for num_bytes 5335 * o reserve space in the metadata space info based on number of outstanding 5336 * extents and how much csums will be needed 5337 * o add to the inodes ->delalloc_bytes 5338 * o add it to the fs_info's delalloc inodes list. 5339 * 5340 * This will return 0 for success and -ENOSPC if there is no space left. 5341 */ 5342 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes) 5343 { 5344 int ret; 5345 5346 ret = btrfs_check_data_free_space(inode, num_bytes); 5347 if (ret) 5348 return ret; 5349 5350 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes); 5351 if (ret) { 5352 btrfs_free_reserved_data_space(inode, num_bytes); 5353 return ret; 5354 } 5355 5356 return 0; 5357 } 5358 5359 /** 5360 * btrfs_delalloc_release_space - release data and metadata space for delalloc 5361 * @inode: inode we're releasing space for 5362 * @num_bytes: the number of bytes we want to free up 5363 * 5364 * This must be matched with a call to btrfs_delalloc_reserve_space. This is 5365 * called in the case that we don't need the metadata AND data reservations 5366 * anymore. So if there is an error or we insert an inline extent. 5367 * 5368 * This function will release the metadata space that was not used and will 5369 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes 5370 * list if there are no delalloc bytes left. 5371 */ 5372 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) 5373 { 5374 btrfs_delalloc_release_metadata(inode, num_bytes); 5375 btrfs_free_reserved_data_space(inode, num_bytes); 5376 } 5377 5378 static int update_block_group(struct btrfs_root *root, 5379 u64 bytenr, u64 num_bytes, int alloc) 5380 { 5381 struct btrfs_block_group_cache *cache = NULL; 5382 struct btrfs_fs_info *info = root->fs_info; 5383 u64 total = num_bytes; 5384 u64 old_val; 5385 u64 byte_in_group; 5386 int factor; 5387 5388 /* block accounting for super block */ 5389 spin_lock(&info->delalloc_root_lock); 5390 old_val = btrfs_super_bytes_used(info->super_copy); 5391 if (alloc) 5392 old_val += num_bytes; 5393 else 5394 old_val -= num_bytes; 5395 btrfs_set_super_bytes_used(info->super_copy, old_val); 5396 spin_unlock(&info->delalloc_root_lock); 5397 5398 while (total) { 5399 cache = btrfs_lookup_block_group(info, bytenr); 5400 if (!cache) 5401 return -ENOENT; 5402 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | 5403 BTRFS_BLOCK_GROUP_RAID1 | 5404 BTRFS_BLOCK_GROUP_RAID10)) 5405 factor = 2; 5406 else 5407 factor = 1; 5408 /* 5409 * If this block group has free space cache written out, we 5410 * need to make sure to load it if we are removing space. This 5411 * is because we need the unpinning stage to actually add the 5412 * space back to the block group, otherwise we will leak space. 5413 */ 5414 if (!alloc && cache->cached == BTRFS_CACHE_NO) 5415 cache_block_group(cache, 1); 5416 5417 byte_in_group = bytenr - cache->key.objectid; 5418 WARN_ON(byte_in_group > cache->key.offset); 5419 5420 spin_lock(&cache->space_info->lock); 5421 spin_lock(&cache->lock); 5422 5423 if (btrfs_test_opt(root, SPACE_CACHE) && 5424 cache->disk_cache_state < BTRFS_DC_CLEAR) 5425 cache->disk_cache_state = BTRFS_DC_CLEAR; 5426 5427 cache->dirty = 1; 5428 old_val = btrfs_block_group_used(&cache->item); 5429 num_bytes = min(total, cache->key.offset - byte_in_group); 5430 if (alloc) { 5431 old_val += num_bytes; 5432 btrfs_set_block_group_used(&cache->item, old_val); 5433 cache->reserved -= num_bytes; 5434 cache->space_info->bytes_reserved -= num_bytes; 5435 cache->space_info->bytes_used += num_bytes; 5436 cache->space_info->disk_used += num_bytes * factor; 5437 spin_unlock(&cache->lock); 5438 spin_unlock(&cache->space_info->lock); 5439 } else { 5440 old_val -= num_bytes; 5441 btrfs_set_block_group_used(&cache->item, old_val); 5442 cache->pinned += num_bytes; 5443 cache->space_info->bytes_pinned += num_bytes; 5444 cache->space_info->bytes_used -= num_bytes; 5445 cache->space_info->disk_used -= num_bytes * factor; 5446 spin_unlock(&cache->lock); 5447 spin_unlock(&cache->space_info->lock); 5448 5449 set_extent_dirty(info->pinned_extents, 5450 bytenr, bytenr + num_bytes - 1, 5451 GFP_NOFS | __GFP_NOFAIL); 5452 /* 5453 * No longer have used bytes in this block group, queue 5454 * it for deletion. 5455 */ 5456 if (old_val == 0) { 5457 spin_lock(&info->unused_bgs_lock); 5458 if (list_empty(&cache->bg_list)) { 5459 btrfs_get_block_group(cache); 5460 list_add_tail(&cache->bg_list, 5461 &info->unused_bgs); 5462 } 5463 spin_unlock(&info->unused_bgs_lock); 5464 } 5465 } 5466 btrfs_put_block_group(cache); 5467 total -= num_bytes; 5468 bytenr += num_bytes; 5469 } 5470 return 0; 5471 } 5472 5473 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) 5474 { 5475 struct btrfs_block_group_cache *cache; 5476 u64 bytenr; 5477 5478 spin_lock(&root->fs_info->block_group_cache_lock); 5479 bytenr = root->fs_info->first_logical_byte; 5480 spin_unlock(&root->fs_info->block_group_cache_lock); 5481 5482 if (bytenr < (u64)-1) 5483 return bytenr; 5484 5485 cache = btrfs_lookup_first_block_group(root->fs_info, search_start); 5486 if (!cache) 5487 return 0; 5488 5489 bytenr = cache->key.objectid; 5490 btrfs_put_block_group(cache); 5491 5492 return bytenr; 5493 } 5494 5495 static int pin_down_extent(struct btrfs_root *root, 5496 struct btrfs_block_group_cache *cache, 5497 u64 bytenr, u64 num_bytes, int reserved) 5498 { 5499 spin_lock(&cache->space_info->lock); 5500 spin_lock(&cache->lock); 5501 cache->pinned += num_bytes; 5502 cache->space_info->bytes_pinned += num_bytes; 5503 if (reserved) { 5504 cache->reserved -= num_bytes; 5505 cache->space_info->bytes_reserved -= num_bytes; 5506 } 5507 spin_unlock(&cache->lock); 5508 spin_unlock(&cache->space_info->lock); 5509 5510 set_extent_dirty(root->fs_info->pinned_extents, bytenr, 5511 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); 5512 if (reserved) 5513 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes); 5514 return 0; 5515 } 5516 5517 /* 5518 * this function must be called within transaction 5519 */ 5520 int btrfs_pin_extent(struct btrfs_root *root, 5521 u64 bytenr, u64 num_bytes, int reserved) 5522 { 5523 struct btrfs_block_group_cache *cache; 5524 5525 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 5526 BUG_ON(!cache); /* Logic error */ 5527 5528 pin_down_extent(root, cache, bytenr, num_bytes, reserved); 5529 5530 btrfs_put_block_group(cache); 5531 return 0; 5532 } 5533 5534 /* 5535 * this function must be called within transaction 5536 */ 5537 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 5538 u64 bytenr, u64 num_bytes) 5539 { 5540 struct btrfs_block_group_cache *cache; 5541 int ret; 5542 5543 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 5544 if (!cache) 5545 return -EINVAL; 5546 5547 /* 5548 * pull in the free space cache (if any) so that our pin 5549 * removes the free space from the cache. We have load_only set 5550 * to one because the slow code to read in the free extents does check 5551 * the pinned extents. 5552 */ 5553 cache_block_group(cache, 1); 5554 5555 pin_down_extent(root, cache, bytenr, num_bytes, 0); 5556 5557 /* remove us from the free space cache (if we're there at all) */ 5558 ret = btrfs_remove_free_space(cache, bytenr, num_bytes); 5559 btrfs_put_block_group(cache); 5560 return ret; 5561 } 5562 5563 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes) 5564 { 5565 int ret; 5566 struct btrfs_block_group_cache *block_group; 5567 struct btrfs_caching_control *caching_ctl; 5568 5569 block_group = btrfs_lookup_block_group(root->fs_info, start); 5570 if (!block_group) 5571 return -EINVAL; 5572 5573 cache_block_group(block_group, 0); 5574 caching_ctl = get_caching_control(block_group); 5575 5576 if (!caching_ctl) { 5577 /* Logic error */ 5578 BUG_ON(!block_group_cache_done(block_group)); 5579 ret = btrfs_remove_free_space(block_group, start, num_bytes); 5580 } else { 5581 mutex_lock(&caching_ctl->mutex); 5582 5583 if (start >= caching_ctl->progress) { 5584 ret = add_excluded_extent(root, start, num_bytes); 5585 } else if (start + num_bytes <= caching_ctl->progress) { 5586 ret = btrfs_remove_free_space(block_group, 5587 start, num_bytes); 5588 } else { 5589 num_bytes = caching_ctl->progress - start; 5590 ret = btrfs_remove_free_space(block_group, 5591 start, num_bytes); 5592 if (ret) 5593 goto out_lock; 5594 5595 num_bytes = (start + num_bytes) - 5596 caching_ctl->progress; 5597 start = caching_ctl->progress; 5598 ret = add_excluded_extent(root, start, num_bytes); 5599 } 5600 out_lock: 5601 mutex_unlock(&caching_ctl->mutex); 5602 put_caching_control(caching_ctl); 5603 } 5604 btrfs_put_block_group(block_group); 5605 return ret; 5606 } 5607 5608 int btrfs_exclude_logged_extents(struct btrfs_root *log, 5609 struct extent_buffer *eb) 5610 { 5611 struct btrfs_file_extent_item *item; 5612 struct btrfs_key key; 5613 int found_type; 5614 int i; 5615 5616 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) 5617 return 0; 5618 5619 for (i = 0; i < btrfs_header_nritems(eb); i++) { 5620 btrfs_item_key_to_cpu(eb, &key, i); 5621 if (key.type != BTRFS_EXTENT_DATA_KEY) 5622 continue; 5623 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 5624 found_type = btrfs_file_extent_type(eb, item); 5625 if (found_type == BTRFS_FILE_EXTENT_INLINE) 5626 continue; 5627 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 5628 continue; 5629 key.objectid = btrfs_file_extent_disk_bytenr(eb, item); 5630 key.offset = btrfs_file_extent_disk_num_bytes(eb, item); 5631 __exclude_logged_extent(log, key.objectid, key.offset); 5632 } 5633 5634 return 0; 5635 } 5636 5637 /** 5638 * btrfs_update_reserved_bytes - update the block_group and space info counters 5639 * @cache: The cache we are manipulating 5640 * @num_bytes: The number of bytes in question 5641 * @reserve: One of the reservation enums 5642 * @delalloc: The blocks are allocated for the delalloc write 5643 * 5644 * This is called by the allocator when it reserves space, or by somebody who is 5645 * freeing space that was never actually used on disk. For example if you 5646 * reserve some space for a new leaf in transaction A and before transaction A 5647 * commits you free that leaf, you call this with reserve set to 0 in order to 5648 * clear the reservation. 5649 * 5650 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper 5651 * ENOSPC accounting. For data we handle the reservation through clearing the 5652 * delalloc bits in the io_tree. We have to do this since we could end up 5653 * allocating less disk space for the amount of data we have reserved in the 5654 * case of compression. 5655 * 5656 * If this is a reservation and the block group has become read only we cannot 5657 * make the reservation and return -EAGAIN, otherwise this function always 5658 * succeeds. 5659 */ 5660 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 5661 u64 num_bytes, int reserve, int delalloc) 5662 { 5663 struct btrfs_space_info *space_info = cache->space_info; 5664 int ret = 0; 5665 5666 spin_lock(&space_info->lock); 5667 spin_lock(&cache->lock); 5668 if (reserve != RESERVE_FREE) { 5669 if (cache->ro) { 5670 ret = -EAGAIN; 5671 } else { 5672 cache->reserved += num_bytes; 5673 space_info->bytes_reserved += num_bytes; 5674 if (reserve == RESERVE_ALLOC) { 5675 trace_btrfs_space_reservation(cache->fs_info, 5676 "space_info", space_info->flags, 5677 num_bytes, 0); 5678 space_info->bytes_may_use -= num_bytes; 5679 } 5680 5681 if (delalloc) 5682 cache->delalloc_bytes += num_bytes; 5683 } 5684 } else { 5685 if (cache->ro) 5686 space_info->bytes_readonly += num_bytes; 5687 cache->reserved -= num_bytes; 5688 space_info->bytes_reserved -= num_bytes; 5689 5690 if (delalloc) 5691 cache->delalloc_bytes -= num_bytes; 5692 } 5693 spin_unlock(&cache->lock); 5694 spin_unlock(&space_info->lock); 5695 return ret; 5696 } 5697 5698 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 5699 struct btrfs_root *root) 5700 { 5701 struct btrfs_fs_info *fs_info = root->fs_info; 5702 struct btrfs_caching_control *next; 5703 struct btrfs_caching_control *caching_ctl; 5704 struct btrfs_block_group_cache *cache; 5705 5706 down_write(&fs_info->commit_root_sem); 5707 5708 list_for_each_entry_safe(caching_ctl, next, 5709 &fs_info->caching_block_groups, list) { 5710 cache = caching_ctl->block_group; 5711 if (block_group_cache_done(cache)) { 5712 cache->last_byte_to_unpin = (u64)-1; 5713 list_del_init(&caching_ctl->list); 5714 put_caching_control(caching_ctl); 5715 } else { 5716 cache->last_byte_to_unpin = caching_ctl->progress; 5717 } 5718 } 5719 5720 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 5721 fs_info->pinned_extents = &fs_info->freed_extents[1]; 5722 else 5723 fs_info->pinned_extents = &fs_info->freed_extents[0]; 5724 5725 up_write(&fs_info->commit_root_sem); 5726 5727 update_global_block_rsv(fs_info); 5728 } 5729 5730 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end, 5731 const bool return_free_space) 5732 { 5733 struct btrfs_fs_info *fs_info = root->fs_info; 5734 struct btrfs_block_group_cache *cache = NULL; 5735 struct btrfs_space_info *space_info; 5736 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 5737 u64 len; 5738 bool readonly; 5739 5740 while (start <= end) { 5741 readonly = false; 5742 if (!cache || 5743 start >= cache->key.objectid + cache->key.offset) { 5744 if (cache) 5745 btrfs_put_block_group(cache); 5746 cache = btrfs_lookup_block_group(fs_info, start); 5747 BUG_ON(!cache); /* Logic error */ 5748 } 5749 5750 len = cache->key.objectid + cache->key.offset - start; 5751 len = min(len, end + 1 - start); 5752 5753 if (start < cache->last_byte_to_unpin) { 5754 len = min(len, cache->last_byte_to_unpin - start); 5755 if (return_free_space) 5756 btrfs_add_free_space(cache, start, len); 5757 } 5758 5759 start += len; 5760 space_info = cache->space_info; 5761 5762 spin_lock(&space_info->lock); 5763 spin_lock(&cache->lock); 5764 cache->pinned -= len; 5765 space_info->bytes_pinned -= len; 5766 percpu_counter_add(&space_info->total_bytes_pinned, -len); 5767 if (cache->ro) { 5768 space_info->bytes_readonly += len; 5769 readonly = true; 5770 } 5771 spin_unlock(&cache->lock); 5772 if (!readonly && global_rsv->space_info == space_info) { 5773 spin_lock(&global_rsv->lock); 5774 if (!global_rsv->full) { 5775 len = min(len, global_rsv->size - 5776 global_rsv->reserved); 5777 global_rsv->reserved += len; 5778 space_info->bytes_may_use += len; 5779 if (global_rsv->reserved >= global_rsv->size) 5780 global_rsv->full = 1; 5781 } 5782 spin_unlock(&global_rsv->lock); 5783 } 5784 spin_unlock(&space_info->lock); 5785 } 5786 5787 if (cache) 5788 btrfs_put_block_group(cache); 5789 return 0; 5790 } 5791 5792 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 5793 struct btrfs_root *root) 5794 { 5795 struct btrfs_fs_info *fs_info = root->fs_info; 5796 struct extent_io_tree *unpin; 5797 u64 start; 5798 u64 end; 5799 int ret; 5800 5801 if (trans->aborted) 5802 return 0; 5803 5804 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 5805 unpin = &fs_info->freed_extents[1]; 5806 else 5807 unpin = &fs_info->freed_extents[0]; 5808 5809 while (1) { 5810 ret = find_first_extent_bit(unpin, 0, &start, &end, 5811 EXTENT_DIRTY, NULL); 5812 if (ret) 5813 break; 5814 5815 if (btrfs_test_opt(root, DISCARD)) 5816 ret = btrfs_discard_extent(root, start, 5817 end + 1 - start, NULL); 5818 5819 clear_extent_dirty(unpin, start, end, GFP_NOFS); 5820 unpin_extent_range(root, start, end, true); 5821 cond_resched(); 5822 } 5823 5824 return 0; 5825 } 5826 5827 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes, 5828 u64 owner, u64 root_objectid) 5829 { 5830 struct btrfs_space_info *space_info; 5831 u64 flags; 5832 5833 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 5834 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID) 5835 flags = BTRFS_BLOCK_GROUP_SYSTEM; 5836 else 5837 flags = BTRFS_BLOCK_GROUP_METADATA; 5838 } else { 5839 flags = BTRFS_BLOCK_GROUP_DATA; 5840 } 5841 5842 space_info = __find_space_info(fs_info, flags); 5843 BUG_ON(!space_info); /* Logic bug */ 5844 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes); 5845 } 5846 5847 5848 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 5849 struct btrfs_root *root, 5850 u64 bytenr, u64 num_bytes, u64 parent, 5851 u64 root_objectid, u64 owner_objectid, 5852 u64 owner_offset, int refs_to_drop, 5853 struct btrfs_delayed_extent_op *extent_op, 5854 int no_quota) 5855 { 5856 struct btrfs_key key; 5857 struct btrfs_path *path; 5858 struct btrfs_fs_info *info = root->fs_info; 5859 struct btrfs_root *extent_root = info->extent_root; 5860 struct extent_buffer *leaf; 5861 struct btrfs_extent_item *ei; 5862 struct btrfs_extent_inline_ref *iref; 5863 int ret; 5864 int is_data; 5865 int extent_slot = 0; 5866 int found_extent = 0; 5867 int num_to_del = 1; 5868 u32 item_size; 5869 u64 refs; 5870 int last_ref = 0; 5871 enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL; 5872 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 5873 SKINNY_METADATA); 5874 5875 if (!info->quota_enabled || !is_fstree(root_objectid)) 5876 no_quota = 1; 5877 5878 path = btrfs_alloc_path(); 5879 if (!path) 5880 return -ENOMEM; 5881 5882 path->reada = 1; 5883 path->leave_spinning = 1; 5884 5885 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 5886 BUG_ON(!is_data && refs_to_drop != 1); 5887 5888 if (is_data) 5889 skinny_metadata = 0; 5890 5891 ret = lookup_extent_backref(trans, extent_root, path, &iref, 5892 bytenr, num_bytes, parent, 5893 root_objectid, owner_objectid, 5894 owner_offset); 5895 if (ret == 0) { 5896 extent_slot = path->slots[0]; 5897 while (extent_slot >= 0) { 5898 btrfs_item_key_to_cpu(path->nodes[0], &key, 5899 extent_slot); 5900 if (key.objectid != bytenr) 5901 break; 5902 if (key.type == BTRFS_EXTENT_ITEM_KEY && 5903 key.offset == num_bytes) { 5904 found_extent = 1; 5905 break; 5906 } 5907 if (key.type == BTRFS_METADATA_ITEM_KEY && 5908 key.offset == owner_objectid) { 5909 found_extent = 1; 5910 break; 5911 } 5912 if (path->slots[0] - extent_slot > 5) 5913 break; 5914 extent_slot--; 5915 } 5916 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 5917 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot); 5918 if (found_extent && item_size < sizeof(*ei)) 5919 found_extent = 0; 5920 #endif 5921 if (!found_extent) { 5922 BUG_ON(iref); 5923 ret = remove_extent_backref(trans, extent_root, path, 5924 NULL, refs_to_drop, 5925 is_data, &last_ref); 5926 if (ret) { 5927 btrfs_abort_transaction(trans, extent_root, ret); 5928 goto out; 5929 } 5930 btrfs_release_path(path); 5931 path->leave_spinning = 1; 5932 5933 key.objectid = bytenr; 5934 key.type = BTRFS_EXTENT_ITEM_KEY; 5935 key.offset = num_bytes; 5936 5937 if (!is_data && skinny_metadata) { 5938 key.type = BTRFS_METADATA_ITEM_KEY; 5939 key.offset = owner_objectid; 5940 } 5941 5942 ret = btrfs_search_slot(trans, extent_root, 5943 &key, path, -1, 1); 5944 if (ret > 0 && skinny_metadata && path->slots[0]) { 5945 /* 5946 * Couldn't find our skinny metadata item, 5947 * see if we have ye olde extent item. 5948 */ 5949 path->slots[0]--; 5950 btrfs_item_key_to_cpu(path->nodes[0], &key, 5951 path->slots[0]); 5952 if (key.objectid == bytenr && 5953 key.type == BTRFS_EXTENT_ITEM_KEY && 5954 key.offset == num_bytes) 5955 ret = 0; 5956 } 5957 5958 if (ret > 0 && skinny_metadata) { 5959 skinny_metadata = false; 5960 key.objectid = bytenr; 5961 key.type = BTRFS_EXTENT_ITEM_KEY; 5962 key.offset = num_bytes; 5963 btrfs_release_path(path); 5964 ret = btrfs_search_slot(trans, extent_root, 5965 &key, path, -1, 1); 5966 } 5967 5968 if (ret) { 5969 btrfs_err(info, "umm, got %d back from search, was looking for %llu", 5970 ret, bytenr); 5971 if (ret > 0) 5972 btrfs_print_leaf(extent_root, 5973 path->nodes[0]); 5974 } 5975 if (ret < 0) { 5976 btrfs_abort_transaction(trans, extent_root, ret); 5977 goto out; 5978 } 5979 extent_slot = path->slots[0]; 5980 } 5981 } else if (WARN_ON(ret == -ENOENT)) { 5982 btrfs_print_leaf(extent_root, path->nodes[0]); 5983 btrfs_err(info, 5984 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu", 5985 bytenr, parent, root_objectid, owner_objectid, 5986 owner_offset); 5987 btrfs_abort_transaction(trans, extent_root, ret); 5988 goto out; 5989 } else { 5990 btrfs_abort_transaction(trans, extent_root, ret); 5991 goto out; 5992 } 5993 5994 leaf = path->nodes[0]; 5995 item_size = btrfs_item_size_nr(leaf, extent_slot); 5996 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 5997 if (item_size < sizeof(*ei)) { 5998 BUG_ON(found_extent || extent_slot != path->slots[0]); 5999 ret = convert_extent_item_v0(trans, extent_root, path, 6000 owner_objectid, 0); 6001 if (ret < 0) { 6002 btrfs_abort_transaction(trans, extent_root, ret); 6003 goto out; 6004 } 6005 6006 btrfs_release_path(path); 6007 path->leave_spinning = 1; 6008 6009 key.objectid = bytenr; 6010 key.type = BTRFS_EXTENT_ITEM_KEY; 6011 key.offset = num_bytes; 6012 6013 ret = btrfs_search_slot(trans, extent_root, &key, path, 6014 -1, 1); 6015 if (ret) { 6016 btrfs_err(info, "umm, got %d back from search, was looking for %llu", 6017 ret, bytenr); 6018 btrfs_print_leaf(extent_root, path->nodes[0]); 6019 } 6020 if (ret < 0) { 6021 btrfs_abort_transaction(trans, extent_root, ret); 6022 goto out; 6023 } 6024 6025 extent_slot = path->slots[0]; 6026 leaf = path->nodes[0]; 6027 item_size = btrfs_item_size_nr(leaf, extent_slot); 6028 } 6029 #endif 6030 BUG_ON(item_size < sizeof(*ei)); 6031 ei = btrfs_item_ptr(leaf, extent_slot, 6032 struct btrfs_extent_item); 6033 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && 6034 key.type == BTRFS_EXTENT_ITEM_KEY) { 6035 struct btrfs_tree_block_info *bi; 6036 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi)); 6037 bi = (struct btrfs_tree_block_info *)(ei + 1); 6038 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 6039 } 6040 6041 refs = btrfs_extent_refs(leaf, ei); 6042 if (refs < refs_to_drop) { 6043 btrfs_err(info, "trying to drop %d refs but we only have %Lu " 6044 "for bytenr %Lu", refs_to_drop, refs, bytenr); 6045 ret = -EINVAL; 6046 btrfs_abort_transaction(trans, extent_root, ret); 6047 goto out; 6048 } 6049 refs -= refs_to_drop; 6050 6051 if (refs > 0) { 6052 type = BTRFS_QGROUP_OPER_SUB_SHARED; 6053 if (extent_op) 6054 __run_delayed_extent_op(extent_op, leaf, ei); 6055 /* 6056 * In the case of inline back ref, reference count will 6057 * be updated by remove_extent_backref 6058 */ 6059 if (iref) { 6060 BUG_ON(!found_extent); 6061 } else { 6062 btrfs_set_extent_refs(leaf, ei, refs); 6063 btrfs_mark_buffer_dirty(leaf); 6064 } 6065 if (found_extent) { 6066 ret = remove_extent_backref(trans, extent_root, path, 6067 iref, refs_to_drop, 6068 is_data, &last_ref); 6069 if (ret) { 6070 btrfs_abort_transaction(trans, extent_root, ret); 6071 goto out; 6072 } 6073 } 6074 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid, 6075 root_objectid); 6076 } else { 6077 if (found_extent) { 6078 BUG_ON(is_data && refs_to_drop != 6079 extent_data_ref_count(root, path, iref)); 6080 if (iref) { 6081 BUG_ON(path->slots[0] != extent_slot); 6082 } else { 6083 BUG_ON(path->slots[0] != extent_slot + 1); 6084 path->slots[0] = extent_slot; 6085 num_to_del = 2; 6086 } 6087 } 6088 6089 last_ref = 1; 6090 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 6091 num_to_del); 6092 if (ret) { 6093 btrfs_abort_transaction(trans, extent_root, ret); 6094 goto out; 6095 } 6096 btrfs_release_path(path); 6097 6098 if (is_data) { 6099 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 6100 if (ret) { 6101 btrfs_abort_transaction(trans, extent_root, ret); 6102 goto out; 6103 } 6104 } 6105 6106 ret = update_block_group(root, bytenr, num_bytes, 0); 6107 if (ret) { 6108 btrfs_abort_transaction(trans, extent_root, ret); 6109 goto out; 6110 } 6111 } 6112 btrfs_release_path(path); 6113 6114 /* Deal with the quota accounting */ 6115 if (!ret && last_ref && !no_quota) { 6116 int mod_seq = 0; 6117 6118 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID && 6119 type == BTRFS_QGROUP_OPER_SUB_SHARED) 6120 mod_seq = 1; 6121 6122 ret = btrfs_qgroup_record_ref(trans, info, root_objectid, 6123 bytenr, num_bytes, type, 6124 mod_seq); 6125 } 6126 out: 6127 btrfs_free_path(path); 6128 return ret; 6129 } 6130 6131 /* 6132 * when we free an block, it is possible (and likely) that we free the last 6133 * delayed ref for that extent as well. This searches the delayed ref tree for 6134 * a given extent, and if there are no other delayed refs to be processed, it 6135 * removes it from the tree. 6136 */ 6137 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 6138 struct btrfs_root *root, u64 bytenr) 6139 { 6140 struct btrfs_delayed_ref_head *head; 6141 struct btrfs_delayed_ref_root *delayed_refs; 6142 int ret = 0; 6143 6144 delayed_refs = &trans->transaction->delayed_refs; 6145 spin_lock(&delayed_refs->lock); 6146 head = btrfs_find_delayed_ref_head(trans, bytenr); 6147 if (!head) 6148 goto out_delayed_unlock; 6149 6150 spin_lock(&head->lock); 6151 if (rb_first(&head->ref_root)) 6152 goto out; 6153 6154 if (head->extent_op) { 6155 if (!head->must_insert_reserved) 6156 goto out; 6157 btrfs_free_delayed_extent_op(head->extent_op); 6158 head->extent_op = NULL; 6159 } 6160 6161 /* 6162 * waiting for the lock here would deadlock. If someone else has it 6163 * locked they are already in the process of dropping it anyway 6164 */ 6165 if (!mutex_trylock(&head->mutex)) 6166 goto out; 6167 6168 /* 6169 * at this point we have a head with no other entries. Go 6170 * ahead and process it. 6171 */ 6172 head->node.in_tree = 0; 6173 rb_erase(&head->href_node, &delayed_refs->href_root); 6174 6175 atomic_dec(&delayed_refs->num_entries); 6176 6177 /* 6178 * we don't take a ref on the node because we're removing it from the 6179 * tree, so we just steal the ref the tree was holding. 6180 */ 6181 delayed_refs->num_heads--; 6182 if (head->processing == 0) 6183 delayed_refs->num_heads_ready--; 6184 head->processing = 0; 6185 spin_unlock(&head->lock); 6186 spin_unlock(&delayed_refs->lock); 6187 6188 BUG_ON(head->extent_op); 6189 if (head->must_insert_reserved) 6190 ret = 1; 6191 6192 mutex_unlock(&head->mutex); 6193 btrfs_put_delayed_ref(&head->node); 6194 return ret; 6195 out: 6196 spin_unlock(&head->lock); 6197 6198 out_delayed_unlock: 6199 spin_unlock(&delayed_refs->lock); 6200 return 0; 6201 } 6202 6203 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 6204 struct btrfs_root *root, 6205 struct extent_buffer *buf, 6206 u64 parent, int last_ref) 6207 { 6208 struct btrfs_block_group_cache *cache = NULL; 6209 int pin = 1; 6210 int ret; 6211 6212 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 6213 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, 6214 buf->start, buf->len, 6215 parent, root->root_key.objectid, 6216 btrfs_header_level(buf), 6217 BTRFS_DROP_DELAYED_REF, NULL, 0); 6218 BUG_ON(ret); /* -ENOMEM */ 6219 } 6220 6221 if (!last_ref) 6222 return; 6223 6224 cache = btrfs_lookup_block_group(root->fs_info, buf->start); 6225 6226 if (btrfs_header_generation(buf) == trans->transid) { 6227 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 6228 ret = check_ref_cleanup(trans, root, buf->start); 6229 if (!ret) 6230 goto out; 6231 } 6232 6233 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 6234 pin_down_extent(root, cache, buf->start, buf->len, 1); 6235 goto out; 6236 } 6237 6238 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 6239 6240 btrfs_add_free_space(cache, buf->start, buf->len); 6241 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0); 6242 trace_btrfs_reserved_extent_free(root, buf->start, buf->len); 6243 pin = 0; 6244 } 6245 out: 6246 if (pin) 6247 add_pinned_bytes(root->fs_info, buf->len, 6248 btrfs_header_level(buf), 6249 root->root_key.objectid); 6250 6251 /* 6252 * Deleting the buffer, clear the corrupt flag since it doesn't matter 6253 * anymore. 6254 */ 6255 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); 6256 btrfs_put_block_group(cache); 6257 } 6258 6259 /* Can return -ENOMEM */ 6260 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, 6261 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 6262 u64 owner, u64 offset, int no_quota) 6263 { 6264 int ret; 6265 struct btrfs_fs_info *fs_info = root->fs_info; 6266 6267 if (btrfs_test_is_dummy_root(root)) 6268 return 0; 6269 6270 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid); 6271 6272 /* 6273 * tree log blocks never actually go into the extent allocation 6274 * tree, just update pinning info and exit early. 6275 */ 6276 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { 6277 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); 6278 /* unlocks the pinned mutex */ 6279 btrfs_pin_extent(root, bytenr, num_bytes, 1); 6280 ret = 0; 6281 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { 6282 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr, 6283 num_bytes, 6284 parent, root_objectid, (int)owner, 6285 BTRFS_DROP_DELAYED_REF, NULL, no_quota); 6286 } else { 6287 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr, 6288 num_bytes, 6289 parent, root_objectid, owner, 6290 offset, BTRFS_DROP_DELAYED_REF, 6291 NULL, no_quota); 6292 } 6293 return ret; 6294 } 6295 6296 /* 6297 * when we wait for progress in the block group caching, its because 6298 * our allocation attempt failed at least once. So, we must sleep 6299 * and let some progress happen before we try again. 6300 * 6301 * This function will sleep at least once waiting for new free space to 6302 * show up, and then it will check the block group free space numbers 6303 * for our min num_bytes. Another option is to have it go ahead 6304 * and look in the rbtree for a free extent of a given size, but this 6305 * is a good start. 6306 * 6307 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using 6308 * any of the information in this block group. 6309 */ 6310 static noinline void 6311 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, 6312 u64 num_bytes) 6313 { 6314 struct btrfs_caching_control *caching_ctl; 6315 6316 caching_ctl = get_caching_control(cache); 6317 if (!caching_ctl) 6318 return; 6319 6320 wait_event(caching_ctl->wait, block_group_cache_done(cache) || 6321 (cache->free_space_ctl->free_space >= num_bytes)); 6322 6323 put_caching_control(caching_ctl); 6324 } 6325 6326 static noinline int 6327 wait_block_group_cache_done(struct btrfs_block_group_cache *cache) 6328 { 6329 struct btrfs_caching_control *caching_ctl; 6330 int ret = 0; 6331 6332 caching_ctl = get_caching_control(cache); 6333 if (!caching_ctl) 6334 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; 6335 6336 wait_event(caching_ctl->wait, block_group_cache_done(cache)); 6337 if (cache->cached == BTRFS_CACHE_ERROR) 6338 ret = -EIO; 6339 put_caching_control(caching_ctl); 6340 return ret; 6341 } 6342 6343 int __get_raid_index(u64 flags) 6344 { 6345 if (flags & BTRFS_BLOCK_GROUP_RAID10) 6346 return BTRFS_RAID_RAID10; 6347 else if (flags & BTRFS_BLOCK_GROUP_RAID1) 6348 return BTRFS_RAID_RAID1; 6349 else if (flags & BTRFS_BLOCK_GROUP_DUP) 6350 return BTRFS_RAID_DUP; 6351 else if (flags & BTRFS_BLOCK_GROUP_RAID0) 6352 return BTRFS_RAID_RAID0; 6353 else if (flags & BTRFS_BLOCK_GROUP_RAID5) 6354 return BTRFS_RAID_RAID5; 6355 else if (flags & BTRFS_BLOCK_GROUP_RAID6) 6356 return BTRFS_RAID_RAID6; 6357 6358 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */ 6359 } 6360 6361 int get_block_group_index(struct btrfs_block_group_cache *cache) 6362 { 6363 return __get_raid_index(cache->flags); 6364 } 6365 6366 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = { 6367 [BTRFS_RAID_RAID10] = "raid10", 6368 [BTRFS_RAID_RAID1] = "raid1", 6369 [BTRFS_RAID_DUP] = "dup", 6370 [BTRFS_RAID_RAID0] = "raid0", 6371 [BTRFS_RAID_SINGLE] = "single", 6372 [BTRFS_RAID_RAID5] = "raid5", 6373 [BTRFS_RAID_RAID6] = "raid6", 6374 }; 6375 6376 static const char *get_raid_name(enum btrfs_raid_types type) 6377 { 6378 if (type >= BTRFS_NR_RAID_TYPES) 6379 return NULL; 6380 6381 return btrfs_raid_type_names[type]; 6382 } 6383 6384 enum btrfs_loop_type { 6385 LOOP_CACHING_NOWAIT = 0, 6386 LOOP_CACHING_WAIT = 1, 6387 LOOP_ALLOC_CHUNK = 2, 6388 LOOP_NO_EMPTY_SIZE = 3, 6389 }; 6390 6391 static inline void 6392 btrfs_lock_block_group(struct btrfs_block_group_cache *cache, 6393 int delalloc) 6394 { 6395 if (delalloc) 6396 down_read(&cache->data_rwsem); 6397 } 6398 6399 static inline void 6400 btrfs_grab_block_group(struct btrfs_block_group_cache *cache, 6401 int delalloc) 6402 { 6403 btrfs_get_block_group(cache); 6404 if (delalloc) 6405 down_read(&cache->data_rwsem); 6406 } 6407 6408 static struct btrfs_block_group_cache * 6409 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, 6410 struct btrfs_free_cluster *cluster, 6411 int delalloc) 6412 { 6413 struct btrfs_block_group_cache *used_bg; 6414 bool locked = false; 6415 again: 6416 spin_lock(&cluster->refill_lock); 6417 if (locked) { 6418 if (used_bg == cluster->block_group) 6419 return used_bg; 6420 6421 up_read(&used_bg->data_rwsem); 6422 btrfs_put_block_group(used_bg); 6423 } 6424 6425 used_bg = cluster->block_group; 6426 if (!used_bg) 6427 return NULL; 6428 6429 if (used_bg == block_group) 6430 return used_bg; 6431 6432 btrfs_get_block_group(used_bg); 6433 6434 if (!delalloc) 6435 return used_bg; 6436 6437 if (down_read_trylock(&used_bg->data_rwsem)) 6438 return used_bg; 6439 6440 spin_unlock(&cluster->refill_lock); 6441 down_read(&used_bg->data_rwsem); 6442 locked = true; 6443 goto again; 6444 } 6445 6446 static inline void 6447 btrfs_release_block_group(struct btrfs_block_group_cache *cache, 6448 int delalloc) 6449 { 6450 if (delalloc) 6451 up_read(&cache->data_rwsem); 6452 btrfs_put_block_group(cache); 6453 } 6454 6455 /* 6456 * walks the btree of allocated extents and find a hole of a given size. 6457 * The key ins is changed to record the hole: 6458 * ins->objectid == start position 6459 * ins->flags = BTRFS_EXTENT_ITEM_KEY 6460 * ins->offset == the size of the hole. 6461 * Any available blocks before search_start are skipped. 6462 * 6463 * If there is no suitable free space, we will record the max size of 6464 * the free space extent currently. 6465 */ 6466 static noinline int find_free_extent(struct btrfs_root *orig_root, 6467 u64 num_bytes, u64 empty_size, 6468 u64 hint_byte, struct btrfs_key *ins, 6469 u64 flags, int delalloc) 6470 { 6471 int ret = 0; 6472 struct btrfs_root *root = orig_root->fs_info->extent_root; 6473 struct btrfs_free_cluster *last_ptr = NULL; 6474 struct btrfs_block_group_cache *block_group = NULL; 6475 u64 search_start = 0; 6476 u64 max_extent_size = 0; 6477 int empty_cluster = 2 * 1024 * 1024; 6478 struct btrfs_space_info *space_info; 6479 int loop = 0; 6480 int index = __get_raid_index(flags); 6481 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ? 6482 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC; 6483 bool failed_cluster_refill = false; 6484 bool failed_alloc = false; 6485 bool use_cluster = true; 6486 bool have_caching_bg = false; 6487 6488 WARN_ON(num_bytes < root->sectorsize); 6489 ins->type = BTRFS_EXTENT_ITEM_KEY; 6490 ins->objectid = 0; 6491 ins->offset = 0; 6492 6493 trace_find_free_extent(orig_root, num_bytes, empty_size, flags); 6494 6495 space_info = __find_space_info(root->fs_info, flags); 6496 if (!space_info) { 6497 btrfs_err(root->fs_info, "No space info for %llu", flags); 6498 return -ENOSPC; 6499 } 6500 6501 /* 6502 * If the space info is for both data and metadata it means we have a 6503 * small filesystem and we can't use the clustering stuff. 6504 */ 6505 if (btrfs_mixed_space_info(space_info)) 6506 use_cluster = false; 6507 6508 if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) { 6509 last_ptr = &root->fs_info->meta_alloc_cluster; 6510 if (!btrfs_test_opt(root, SSD)) 6511 empty_cluster = 64 * 1024; 6512 } 6513 6514 if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster && 6515 btrfs_test_opt(root, SSD)) { 6516 last_ptr = &root->fs_info->data_alloc_cluster; 6517 } 6518 6519 if (last_ptr) { 6520 spin_lock(&last_ptr->lock); 6521 if (last_ptr->block_group) 6522 hint_byte = last_ptr->window_start; 6523 spin_unlock(&last_ptr->lock); 6524 } 6525 6526 search_start = max(search_start, first_logical_byte(root, 0)); 6527 search_start = max(search_start, hint_byte); 6528 6529 if (!last_ptr) 6530 empty_cluster = 0; 6531 6532 if (search_start == hint_byte) { 6533 block_group = btrfs_lookup_block_group(root->fs_info, 6534 search_start); 6535 /* 6536 * we don't want to use the block group if it doesn't match our 6537 * allocation bits, or if its not cached. 6538 * 6539 * However if we are re-searching with an ideal block group 6540 * picked out then we don't care that the block group is cached. 6541 */ 6542 if (block_group && block_group_bits(block_group, flags) && 6543 block_group->cached != BTRFS_CACHE_NO) { 6544 down_read(&space_info->groups_sem); 6545 if (list_empty(&block_group->list) || 6546 block_group->ro) { 6547 /* 6548 * someone is removing this block group, 6549 * we can't jump into the have_block_group 6550 * target because our list pointers are not 6551 * valid 6552 */ 6553 btrfs_put_block_group(block_group); 6554 up_read(&space_info->groups_sem); 6555 } else { 6556 index = get_block_group_index(block_group); 6557 btrfs_lock_block_group(block_group, delalloc); 6558 goto have_block_group; 6559 } 6560 } else if (block_group) { 6561 btrfs_put_block_group(block_group); 6562 } 6563 } 6564 search: 6565 have_caching_bg = false; 6566 down_read(&space_info->groups_sem); 6567 list_for_each_entry(block_group, &space_info->block_groups[index], 6568 list) { 6569 u64 offset; 6570 int cached; 6571 6572 btrfs_grab_block_group(block_group, delalloc); 6573 search_start = block_group->key.objectid; 6574 6575 /* 6576 * this can happen if we end up cycling through all the 6577 * raid types, but we want to make sure we only allocate 6578 * for the proper type. 6579 */ 6580 if (!block_group_bits(block_group, flags)) { 6581 u64 extra = BTRFS_BLOCK_GROUP_DUP | 6582 BTRFS_BLOCK_GROUP_RAID1 | 6583 BTRFS_BLOCK_GROUP_RAID5 | 6584 BTRFS_BLOCK_GROUP_RAID6 | 6585 BTRFS_BLOCK_GROUP_RAID10; 6586 6587 /* 6588 * if they asked for extra copies and this block group 6589 * doesn't provide them, bail. This does allow us to 6590 * fill raid0 from raid1. 6591 */ 6592 if ((flags & extra) && !(block_group->flags & extra)) 6593 goto loop; 6594 } 6595 6596 have_block_group: 6597 cached = block_group_cache_done(block_group); 6598 if (unlikely(!cached)) { 6599 ret = cache_block_group(block_group, 0); 6600 BUG_ON(ret < 0); 6601 ret = 0; 6602 } 6603 6604 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) 6605 goto loop; 6606 if (unlikely(block_group->ro)) 6607 goto loop; 6608 6609 /* 6610 * Ok we want to try and use the cluster allocator, so 6611 * lets look there 6612 */ 6613 if (last_ptr) { 6614 struct btrfs_block_group_cache *used_block_group; 6615 unsigned long aligned_cluster; 6616 /* 6617 * the refill lock keeps out other 6618 * people trying to start a new cluster 6619 */ 6620 used_block_group = btrfs_lock_cluster(block_group, 6621 last_ptr, 6622 delalloc); 6623 if (!used_block_group) 6624 goto refill_cluster; 6625 6626 if (used_block_group != block_group && 6627 (used_block_group->ro || 6628 !block_group_bits(used_block_group, flags))) 6629 goto release_cluster; 6630 6631 offset = btrfs_alloc_from_cluster(used_block_group, 6632 last_ptr, 6633 num_bytes, 6634 used_block_group->key.objectid, 6635 &max_extent_size); 6636 if (offset) { 6637 /* we have a block, we're done */ 6638 spin_unlock(&last_ptr->refill_lock); 6639 trace_btrfs_reserve_extent_cluster(root, 6640 used_block_group, 6641 search_start, num_bytes); 6642 if (used_block_group != block_group) { 6643 btrfs_release_block_group(block_group, 6644 delalloc); 6645 block_group = used_block_group; 6646 } 6647 goto checks; 6648 } 6649 6650 WARN_ON(last_ptr->block_group != used_block_group); 6651 release_cluster: 6652 /* If we are on LOOP_NO_EMPTY_SIZE, we can't 6653 * set up a new clusters, so lets just skip it 6654 * and let the allocator find whatever block 6655 * it can find. If we reach this point, we 6656 * will have tried the cluster allocator 6657 * plenty of times and not have found 6658 * anything, so we are likely way too 6659 * fragmented for the clustering stuff to find 6660 * anything. 6661 * 6662 * However, if the cluster is taken from the 6663 * current block group, release the cluster 6664 * first, so that we stand a better chance of 6665 * succeeding in the unclustered 6666 * allocation. */ 6667 if (loop >= LOOP_NO_EMPTY_SIZE && 6668 used_block_group != block_group) { 6669 spin_unlock(&last_ptr->refill_lock); 6670 btrfs_release_block_group(used_block_group, 6671 delalloc); 6672 goto unclustered_alloc; 6673 } 6674 6675 /* 6676 * this cluster didn't work out, free it and 6677 * start over 6678 */ 6679 btrfs_return_cluster_to_free_space(NULL, last_ptr); 6680 6681 if (used_block_group != block_group) 6682 btrfs_release_block_group(used_block_group, 6683 delalloc); 6684 refill_cluster: 6685 if (loop >= LOOP_NO_EMPTY_SIZE) { 6686 spin_unlock(&last_ptr->refill_lock); 6687 goto unclustered_alloc; 6688 } 6689 6690 aligned_cluster = max_t(unsigned long, 6691 empty_cluster + empty_size, 6692 block_group->full_stripe_len); 6693 6694 /* allocate a cluster in this block group */ 6695 ret = btrfs_find_space_cluster(root, block_group, 6696 last_ptr, search_start, 6697 num_bytes, 6698 aligned_cluster); 6699 if (ret == 0) { 6700 /* 6701 * now pull our allocation out of this 6702 * cluster 6703 */ 6704 offset = btrfs_alloc_from_cluster(block_group, 6705 last_ptr, 6706 num_bytes, 6707 search_start, 6708 &max_extent_size); 6709 if (offset) { 6710 /* we found one, proceed */ 6711 spin_unlock(&last_ptr->refill_lock); 6712 trace_btrfs_reserve_extent_cluster(root, 6713 block_group, search_start, 6714 num_bytes); 6715 goto checks; 6716 } 6717 } else if (!cached && loop > LOOP_CACHING_NOWAIT 6718 && !failed_cluster_refill) { 6719 spin_unlock(&last_ptr->refill_lock); 6720 6721 failed_cluster_refill = true; 6722 wait_block_group_cache_progress(block_group, 6723 num_bytes + empty_cluster + empty_size); 6724 goto have_block_group; 6725 } 6726 6727 /* 6728 * at this point we either didn't find a cluster 6729 * or we weren't able to allocate a block from our 6730 * cluster. Free the cluster we've been trying 6731 * to use, and go to the next block group 6732 */ 6733 btrfs_return_cluster_to_free_space(NULL, last_ptr); 6734 spin_unlock(&last_ptr->refill_lock); 6735 goto loop; 6736 } 6737 6738 unclustered_alloc: 6739 spin_lock(&block_group->free_space_ctl->tree_lock); 6740 if (cached && 6741 block_group->free_space_ctl->free_space < 6742 num_bytes + empty_cluster + empty_size) { 6743 if (block_group->free_space_ctl->free_space > 6744 max_extent_size) 6745 max_extent_size = 6746 block_group->free_space_ctl->free_space; 6747 spin_unlock(&block_group->free_space_ctl->tree_lock); 6748 goto loop; 6749 } 6750 spin_unlock(&block_group->free_space_ctl->tree_lock); 6751 6752 offset = btrfs_find_space_for_alloc(block_group, search_start, 6753 num_bytes, empty_size, 6754 &max_extent_size); 6755 /* 6756 * If we didn't find a chunk, and we haven't failed on this 6757 * block group before, and this block group is in the middle of 6758 * caching and we are ok with waiting, then go ahead and wait 6759 * for progress to be made, and set failed_alloc to true. 6760 * 6761 * If failed_alloc is true then we've already waited on this 6762 * block group once and should move on to the next block group. 6763 */ 6764 if (!offset && !failed_alloc && !cached && 6765 loop > LOOP_CACHING_NOWAIT) { 6766 wait_block_group_cache_progress(block_group, 6767 num_bytes + empty_size); 6768 failed_alloc = true; 6769 goto have_block_group; 6770 } else if (!offset) { 6771 if (!cached) 6772 have_caching_bg = true; 6773 goto loop; 6774 } 6775 checks: 6776 search_start = ALIGN(offset, root->stripesize); 6777 6778 /* move on to the next group */ 6779 if (search_start + num_bytes > 6780 block_group->key.objectid + block_group->key.offset) { 6781 btrfs_add_free_space(block_group, offset, num_bytes); 6782 goto loop; 6783 } 6784 6785 if (offset < search_start) 6786 btrfs_add_free_space(block_group, offset, 6787 search_start - offset); 6788 BUG_ON(offset > search_start); 6789 6790 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 6791 alloc_type, delalloc); 6792 if (ret == -EAGAIN) { 6793 btrfs_add_free_space(block_group, offset, num_bytes); 6794 goto loop; 6795 } 6796 6797 /* we are all good, lets return */ 6798 ins->objectid = search_start; 6799 ins->offset = num_bytes; 6800 6801 trace_btrfs_reserve_extent(orig_root, block_group, 6802 search_start, num_bytes); 6803 btrfs_release_block_group(block_group, delalloc); 6804 break; 6805 loop: 6806 failed_cluster_refill = false; 6807 failed_alloc = false; 6808 BUG_ON(index != get_block_group_index(block_group)); 6809 btrfs_release_block_group(block_group, delalloc); 6810 } 6811 up_read(&space_info->groups_sem); 6812 6813 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg) 6814 goto search; 6815 6816 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) 6817 goto search; 6818 6819 /* 6820 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 6821 * caching kthreads as we move along 6822 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 6823 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 6824 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 6825 * again 6826 */ 6827 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { 6828 index = 0; 6829 loop++; 6830 if (loop == LOOP_ALLOC_CHUNK) { 6831 struct btrfs_trans_handle *trans; 6832 int exist = 0; 6833 6834 trans = current->journal_info; 6835 if (trans) 6836 exist = 1; 6837 else 6838 trans = btrfs_join_transaction(root); 6839 6840 if (IS_ERR(trans)) { 6841 ret = PTR_ERR(trans); 6842 goto out; 6843 } 6844 6845 ret = do_chunk_alloc(trans, root, flags, 6846 CHUNK_ALLOC_FORCE); 6847 /* 6848 * Do not bail out on ENOSPC since we 6849 * can do more things. 6850 */ 6851 if (ret < 0 && ret != -ENOSPC) 6852 btrfs_abort_transaction(trans, 6853 root, ret); 6854 else 6855 ret = 0; 6856 if (!exist) 6857 btrfs_end_transaction(trans, root); 6858 if (ret) 6859 goto out; 6860 } 6861 6862 if (loop == LOOP_NO_EMPTY_SIZE) { 6863 empty_size = 0; 6864 empty_cluster = 0; 6865 } 6866 6867 goto search; 6868 } else if (!ins->objectid) { 6869 ret = -ENOSPC; 6870 } else if (ins->objectid) { 6871 ret = 0; 6872 } 6873 out: 6874 if (ret == -ENOSPC) 6875 ins->offset = max_extent_size; 6876 return ret; 6877 } 6878 6879 static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 6880 int dump_block_groups) 6881 { 6882 struct btrfs_block_group_cache *cache; 6883 int index = 0; 6884 6885 spin_lock(&info->lock); 6886 printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n", 6887 info->flags, 6888 info->total_bytes - info->bytes_used - info->bytes_pinned - 6889 info->bytes_reserved - info->bytes_readonly, 6890 (info->full) ? "" : "not "); 6891 printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, " 6892 "reserved=%llu, may_use=%llu, readonly=%llu\n", 6893 info->total_bytes, info->bytes_used, info->bytes_pinned, 6894 info->bytes_reserved, info->bytes_may_use, 6895 info->bytes_readonly); 6896 spin_unlock(&info->lock); 6897 6898 if (!dump_block_groups) 6899 return; 6900 6901 down_read(&info->groups_sem); 6902 again: 6903 list_for_each_entry(cache, &info->block_groups[index], list) { 6904 spin_lock(&cache->lock); 6905 printk(KERN_INFO "BTRFS: " 6906 "block group %llu has %llu bytes, " 6907 "%llu used %llu pinned %llu reserved %s\n", 6908 cache->key.objectid, cache->key.offset, 6909 btrfs_block_group_used(&cache->item), cache->pinned, 6910 cache->reserved, cache->ro ? "[readonly]" : ""); 6911 btrfs_dump_free_space(cache, bytes); 6912 spin_unlock(&cache->lock); 6913 } 6914 if (++index < BTRFS_NR_RAID_TYPES) 6915 goto again; 6916 up_read(&info->groups_sem); 6917 } 6918 6919 int btrfs_reserve_extent(struct btrfs_root *root, 6920 u64 num_bytes, u64 min_alloc_size, 6921 u64 empty_size, u64 hint_byte, 6922 struct btrfs_key *ins, int is_data, int delalloc) 6923 { 6924 bool final_tried = false; 6925 u64 flags; 6926 int ret; 6927 6928 flags = btrfs_get_alloc_profile(root, is_data); 6929 again: 6930 WARN_ON(num_bytes < root->sectorsize); 6931 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins, 6932 flags, delalloc); 6933 6934 if (ret == -ENOSPC) { 6935 if (!final_tried && ins->offset) { 6936 num_bytes = min(num_bytes >> 1, ins->offset); 6937 num_bytes = round_down(num_bytes, root->sectorsize); 6938 num_bytes = max(num_bytes, min_alloc_size); 6939 if (num_bytes == min_alloc_size) 6940 final_tried = true; 6941 goto again; 6942 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) { 6943 struct btrfs_space_info *sinfo; 6944 6945 sinfo = __find_space_info(root->fs_info, flags); 6946 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu", 6947 flags, num_bytes); 6948 if (sinfo) 6949 dump_space_info(sinfo, num_bytes, 1); 6950 } 6951 } 6952 6953 return ret; 6954 } 6955 6956 static int __btrfs_free_reserved_extent(struct btrfs_root *root, 6957 u64 start, u64 len, 6958 int pin, int delalloc) 6959 { 6960 struct btrfs_block_group_cache *cache; 6961 int ret = 0; 6962 6963 cache = btrfs_lookup_block_group(root->fs_info, start); 6964 if (!cache) { 6965 btrfs_err(root->fs_info, "Unable to find block group for %llu", 6966 start); 6967 return -ENOSPC; 6968 } 6969 6970 if (btrfs_test_opt(root, DISCARD)) 6971 ret = btrfs_discard_extent(root, start, len, NULL); 6972 6973 if (pin) 6974 pin_down_extent(root, cache, start, len, 1); 6975 else { 6976 btrfs_add_free_space(cache, start, len); 6977 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc); 6978 } 6979 btrfs_put_block_group(cache); 6980 6981 trace_btrfs_reserved_extent_free(root, start, len); 6982 6983 return ret; 6984 } 6985 6986 int btrfs_free_reserved_extent(struct btrfs_root *root, 6987 u64 start, u64 len, int delalloc) 6988 { 6989 return __btrfs_free_reserved_extent(root, start, len, 0, delalloc); 6990 } 6991 6992 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 6993 u64 start, u64 len) 6994 { 6995 return __btrfs_free_reserved_extent(root, start, len, 1, 0); 6996 } 6997 6998 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 6999 struct btrfs_root *root, 7000 u64 parent, u64 root_objectid, 7001 u64 flags, u64 owner, u64 offset, 7002 struct btrfs_key *ins, int ref_mod) 7003 { 7004 int ret; 7005 struct btrfs_fs_info *fs_info = root->fs_info; 7006 struct btrfs_extent_item *extent_item; 7007 struct btrfs_extent_inline_ref *iref; 7008 struct btrfs_path *path; 7009 struct extent_buffer *leaf; 7010 int type; 7011 u32 size; 7012 7013 if (parent > 0) 7014 type = BTRFS_SHARED_DATA_REF_KEY; 7015 else 7016 type = BTRFS_EXTENT_DATA_REF_KEY; 7017 7018 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 7019 7020 path = btrfs_alloc_path(); 7021 if (!path) 7022 return -ENOMEM; 7023 7024 path->leave_spinning = 1; 7025 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 7026 ins, size); 7027 if (ret) { 7028 btrfs_free_path(path); 7029 return ret; 7030 } 7031 7032 leaf = path->nodes[0]; 7033 extent_item = btrfs_item_ptr(leaf, path->slots[0], 7034 struct btrfs_extent_item); 7035 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 7036 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 7037 btrfs_set_extent_flags(leaf, extent_item, 7038 flags | BTRFS_EXTENT_FLAG_DATA); 7039 7040 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 7041 btrfs_set_extent_inline_ref_type(leaf, iref, type); 7042 if (parent > 0) { 7043 struct btrfs_shared_data_ref *ref; 7044 ref = (struct btrfs_shared_data_ref *)(iref + 1); 7045 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 7046 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 7047 } else { 7048 struct btrfs_extent_data_ref *ref; 7049 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 7050 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 7051 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 7052 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 7053 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 7054 } 7055 7056 btrfs_mark_buffer_dirty(path->nodes[0]); 7057 btrfs_free_path(path); 7058 7059 /* Always set parent to 0 here since its exclusive anyway. */ 7060 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid, 7061 ins->objectid, ins->offset, 7062 BTRFS_QGROUP_OPER_ADD_EXCL, 0); 7063 if (ret) 7064 return ret; 7065 7066 ret = update_block_group(root, ins->objectid, ins->offset, 1); 7067 if (ret) { /* -ENOENT, logic error */ 7068 btrfs_err(fs_info, "update block group failed for %llu %llu", 7069 ins->objectid, ins->offset); 7070 BUG(); 7071 } 7072 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); 7073 return ret; 7074 } 7075 7076 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 7077 struct btrfs_root *root, 7078 u64 parent, u64 root_objectid, 7079 u64 flags, struct btrfs_disk_key *key, 7080 int level, struct btrfs_key *ins, 7081 int no_quota) 7082 { 7083 int ret; 7084 struct btrfs_fs_info *fs_info = root->fs_info; 7085 struct btrfs_extent_item *extent_item; 7086 struct btrfs_tree_block_info *block_info; 7087 struct btrfs_extent_inline_ref *iref; 7088 struct btrfs_path *path; 7089 struct extent_buffer *leaf; 7090 u32 size = sizeof(*extent_item) + sizeof(*iref); 7091 u64 num_bytes = ins->offset; 7092 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 7093 SKINNY_METADATA); 7094 7095 if (!skinny_metadata) 7096 size += sizeof(*block_info); 7097 7098 path = btrfs_alloc_path(); 7099 if (!path) { 7100 btrfs_free_and_pin_reserved_extent(root, ins->objectid, 7101 root->nodesize); 7102 return -ENOMEM; 7103 } 7104 7105 path->leave_spinning = 1; 7106 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 7107 ins, size); 7108 if (ret) { 7109 btrfs_free_and_pin_reserved_extent(root, ins->objectid, 7110 root->nodesize); 7111 btrfs_free_path(path); 7112 return ret; 7113 } 7114 7115 leaf = path->nodes[0]; 7116 extent_item = btrfs_item_ptr(leaf, path->slots[0], 7117 struct btrfs_extent_item); 7118 btrfs_set_extent_refs(leaf, extent_item, 1); 7119 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 7120 btrfs_set_extent_flags(leaf, extent_item, 7121 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 7122 7123 if (skinny_metadata) { 7124 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 7125 num_bytes = root->nodesize; 7126 } else { 7127 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 7128 btrfs_set_tree_block_key(leaf, block_info, key); 7129 btrfs_set_tree_block_level(leaf, block_info, level); 7130 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 7131 } 7132 7133 if (parent > 0) { 7134 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 7135 btrfs_set_extent_inline_ref_type(leaf, iref, 7136 BTRFS_SHARED_BLOCK_REF_KEY); 7137 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 7138 } else { 7139 btrfs_set_extent_inline_ref_type(leaf, iref, 7140 BTRFS_TREE_BLOCK_REF_KEY); 7141 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 7142 } 7143 7144 btrfs_mark_buffer_dirty(leaf); 7145 btrfs_free_path(path); 7146 7147 if (!no_quota) { 7148 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid, 7149 ins->objectid, num_bytes, 7150 BTRFS_QGROUP_OPER_ADD_EXCL, 0); 7151 if (ret) 7152 return ret; 7153 } 7154 7155 ret = update_block_group(root, ins->objectid, root->nodesize, 1); 7156 if (ret) { /* -ENOENT, logic error */ 7157 btrfs_err(fs_info, "update block group failed for %llu %llu", 7158 ins->objectid, ins->offset); 7159 BUG(); 7160 } 7161 7162 trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize); 7163 return ret; 7164 } 7165 7166 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 7167 struct btrfs_root *root, 7168 u64 root_objectid, u64 owner, 7169 u64 offset, struct btrfs_key *ins) 7170 { 7171 int ret; 7172 7173 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID); 7174 7175 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid, 7176 ins->offset, 0, 7177 root_objectid, owner, offset, 7178 BTRFS_ADD_DELAYED_EXTENT, NULL, 0); 7179 return ret; 7180 } 7181 7182 /* 7183 * this is used by the tree logging recovery code. It records that 7184 * an extent has been allocated and makes sure to clear the free 7185 * space cache bits as well 7186 */ 7187 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 7188 struct btrfs_root *root, 7189 u64 root_objectid, u64 owner, u64 offset, 7190 struct btrfs_key *ins) 7191 { 7192 int ret; 7193 struct btrfs_block_group_cache *block_group; 7194 7195 /* 7196 * Mixed block groups will exclude before processing the log so we only 7197 * need to do the exlude dance if this fs isn't mixed. 7198 */ 7199 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) { 7200 ret = __exclude_logged_extent(root, ins->objectid, ins->offset); 7201 if (ret) 7202 return ret; 7203 } 7204 7205 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 7206 if (!block_group) 7207 return -EINVAL; 7208 7209 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 7210 RESERVE_ALLOC_NO_ACCOUNT, 0); 7211 BUG_ON(ret); /* logic error */ 7212 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 7213 0, owner, offset, ins, 1); 7214 btrfs_put_block_group(block_group); 7215 return ret; 7216 } 7217 7218 static struct extent_buffer * 7219 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, 7220 u64 bytenr, u32 blocksize, int level) 7221 { 7222 struct extent_buffer *buf; 7223 7224 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 7225 if (!buf) 7226 return ERR_PTR(-ENOMEM); 7227 btrfs_set_header_generation(buf, trans->transid); 7228 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); 7229 btrfs_tree_lock(buf); 7230 clean_tree_block(trans, root, buf); 7231 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); 7232 7233 btrfs_set_lock_blocking(buf); 7234 btrfs_set_buffer_uptodate(buf); 7235 7236 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 7237 buf->log_index = root->log_transid % 2; 7238 /* 7239 * we allow two log transactions at a time, use different 7240 * EXENT bit to differentiate dirty pages. 7241 */ 7242 if (buf->log_index == 0) 7243 set_extent_dirty(&root->dirty_log_pages, buf->start, 7244 buf->start + buf->len - 1, GFP_NOFS); 7245 else 7246 set_extent_new(&root->dirty_log_pages, buf->start, 7247 buf->start + buf->len - 1, GFP_NOFS); 7248 } else { 7249 buf->log_index = -1; 7250 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 7251 buf->start + buf->len - 1, GFP_NOFS); 7252 } 7253 trans->blocks_used++; 7254 /* this returns a buffer locked for blocking */ 7255 return buf; 7256 } 7257 7258 static struct btrfs_block_rsv * 7259 use_block_rsv(struct btrfs_trans_handle *trans, 7260 struct btrfs_root *root, u32 blocksize) 7261 { 7262 struct btrfs_block_rsv *block_rsv; 7263 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 7264 int ret; 7265 bool global_updated = false; 7266 7267 block_rsv = get_block_rsv(trans, root); 7268 7269 if (unlikely(block_rsv->size == 0)) 7270 goto try_reserve; 7271 again: 7272 ret = block_rsv_use_bytes(block_rsv, blocksize); 7273 if (!ret) 7274 return block_rsv; 7275 7276 if (block_rsv->failfast) 7277 return ERR_PTR(ret); 7278 7279 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) { 7280 global_updated = true; 7281 update_global_block_rsv(root->fs_info); 7282 goto again; 7283 } 7284 7285 if (btrfs_test_opt(root, ENOSPC_DEBUG)) { 7286 static DEFINE_RATELIMIT_STATE(_rs, 7287 DEFAULT_RATELIMIT_INTERVAL * 10, 7288 /*DEFAULT_RATELIMIT_BURST*/ 1); 7289 if (__ratelimit(&_rs)) 7290 WARN(1, KERN_DEBUG 7291 "BTRFS: block rsv returned %d\n", ret); 7292 } 7293 try_reserve: 7294 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 7295 BTRFS_RESERVE_NO_FLUSH); 7296 if (!ret) 7297 return block_rsv; 7298 /* 7299 * If we couldn't reserve metadata bytes try and use some from 7300 * the global reserve if its space type is the same as the global 7301 * reservation. 7302 */ 7303 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL && 7304 block_rsv->space_info == global_rsv->space_info) { 7305 ret = block_rsv_use_bytes(global_rsv, blocksize); 7306 if (!ret) 7307 return global_rsv; 7308 } 7309 return ERR_PTR(ret); 7310 } 7311 7312 static void unuse_block_rsv(struct btrfs_fs_info *fs_info, 7313 struct btrfs_block_rsv *block_rsv, u32 blocksize) 7314 { 7315 block_rsv_add_bytes(block_rsv, blocksize, 0); 7316 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0); 7317 } 7318 7319 /* 7320 * finds a free extent and does all the dirty work required for allocation 7321 * returns the key for the extent through ins, and a tree buffer for 7322 * the first block of the extent through buf. 7323 * 7324 * returns the tree buffer or NULL. 7325 */ 7326 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 7327 struct btrfs_root *root, 7328 u64 parent, u64 root_objectid, 7329 struct btrfs_disk_key *key, int level, 7330 u64 hint, u64 empty_size) 7331 { 7332 struct btrfs_key ins; 7333 struct btrfs_block_rsv *block_rsv; 7334 struct extent_buffer *buf; 7335 u64 flags = 0; 7336 int ret; 7337 u32 blocksize = root->nodesize; 7338 bool skinny_metadata = btrfs_fs_incompat(root->fs_info, 7339 SKINNY_METADATA); 7340 7341 if (btrfs_test_is_dummy_root(root)) { 7342 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, 7343 blocksize, level); 7344 if (!IS_ERR(buf)) 7345 root->alloc_bytenr += blocksize; 7346 return buf; 7347 } 7348 7349 block_rsv = use_block_rsv(trans, root, blocksize); 7350 if (IS_ERR(block_rsv)) 7351 return ERR_CAST(block_rsv); 7352 7353 ret = btrfs_reserve_extent(root, blocksize, blocksize, 7354 empty_size, hint, &ins, 0, 0); 7355 if (ret) { 7356 unuse_block_rsv(root->fs_info, block_rsv, blocksize); 7357 return ERR_PTR(ret); 7358 } 7359 7360 buf = btrfs_init_new_buffer(trans, root, ins.objectid, 7361 blocksize, level); 7362 BUG_ON(IS_ERR(buf)); /* -ENOMEM */ 7363 7364 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 7365 if (parent == 0) 7366 parent = ins.objectid; 7367 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 7368 } else 7369 BUG_ON(parent > 0); 7370 7371 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 7372 struct btrfs_delayed_extent_op *extent_op; 7373 extent_op = btrfs_alloc_delayed_extent_op(); 7374 BUG_ON(!extent_op); /* -ENOMEM */ 7375 if (key) 7376 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 7377 else 7378 memset(&extent_op->key, 0, sizeof(extent_op->key)); 7379 extent_op->flags_to_set = flags; 7380 if (skinny_metadata) 7381 extent_op->update_key = 0; 7382 else 7383 extent_op->update_key = 1; 7384 extent_op->update_flags = 1; 7385 extent_op->is_data = 0; 7386 extent_op->level = level; 7387 7388 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, 7389 ins.objectid, 7390 ins.offset, parent, root_objectid, 7391 level, BTRFS_ADD_DELAYED_EXTENT, 7392 extent_op, 0); 7393 BUG_ON(ret); /* -ENOMEM */ 7394 } 7395 return buf; 7396 } 7397 7398 struct walk_control { 7399 u64 refs[BTRFS_MAX_LEVEL]; 7400 u64 flags[BTRFS_MAX_LEVEL]; 7401 struct btrfs_key update_progress; 7402 int stage; 7403 int level; 7404 int shared_level; 7405 int update_ref; 7406 int keep_locks; 7407 int reada_slot; 7408 int reada_count; 7409 int for_reloc; 7410 }; 7411 7412 #define DROP_REFERENCE 1 7413 #define UPDATE_BACKREF 2 7414 7415 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 7416 struct btrfs_root *root, 7417 struct walk_control *wc, 7418 struct btrfs_path *path) 7419 { 7420 u64 bytenr; 7421 u64 generation; 7422 u64 refs; 7423 u64 flags; 7424 u32 nritems; 7425 u32 blocksize; 7426 struct btrfs_key key; 7427 struct extent_buffer *eb; 7428 int ret; 7429 int slot; 7430 int nread = 0; 7431 7432 if (path->slots[wc->level] < wc->reada_slot) { 7433 wc->reada_count = wc->reada_count * 2 / 3; 7434 wc->reada_count = max(wc->reada_count, 2); 7435 } else { 7436 wc->reada_count = wc->reada_count * 3 / 2; 7437 wc->reada_count = min_t(int, wc->reada_count, 7438 BTRFS_NODEPTRS_PER_BLOCK(root)); 7439 } 7440 7441 eb = path->nodes[wc->level]; 7442 nritems = btrfs_header_nritems(eb); 7443 blocksize = root->nodesize; 7444 7445 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 7446 if (nread >= wc->reada_count) 7447 break; 7448 7449 cond_resched(); 7450 bytenr = btrfs_node_blockptr(eb, slot); 7451 generation = btrfs_node_ptr_generation(eb, slot); 7452 7453 if (slot == path->slots[wc->level]) 7454 goto reada; 7455 7456 if (wc->stage == UPDATE_BACKREF && 7457 generation <= root->root_key.offset) 7458 continue; 7459 7460 /* We don't lock the tree block, it's OK to be racy here */ 7461 ret = btrfs_lookup_extent_info(trans, root, bytenr, 7462 wc->level - 1, 1, &refs, 7463 &flags); 7464 /* We don't care about errors in readahead. */ 7465 if (ret < 0) 7466 continue; 7467 BUG_ON(refs == 0); 7468 7469 if (wc->stage == DROP_REFERENCE) { 7470 if (refs == 1) 7471 goto reada; 7472 7473 if (wc->level == 1 && 7474 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7475 continue; 7476 if (!wc->update_ref || 7477 generation <= root->root_key.offset) 7478 continue; 7479 btrfs_node_key_to_cpu(eb, &key, slot); 7480 ret = btrfs_comp_cpu_keys(&key, 7481 &wc->update_progress); 7482 if (ret < 0) 7483 continue; 7484 } else { 7485 if (wc->level == 1 && 7486 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7487 continue; 7488 } 7489 reada: 7490 readahead_tree_block(root, bytenr, blocksize); 7491 nread++; 7492 } 7493 wc->reada_slot = slot; 7494 } 7495 7496 static int account_leaf_items(struct btrfs_trans_handle *trans, 7497 struct btrfs_root *root, 7498 struct extent_buffer *eb) 7499 { 7500 int nr = btrfs_header_nritems(eb); 7501 int i, extent_type, ret; 7502 struct btrfs_key key; 7503 struct btrfs_file_extent_item *fi; 7504 u64 bytenr, num_bytes; 7505 7506 for (i = 0; i < nr; i++) { 7507 btrfs_item_key_to_cpu(eb, &key, i); 7508 7509 if (key.type != BTRFS_EXTENT_DATA_KEY) 7510 continue; 7511 7512 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 7513 /* filter out non qgroup-accountable extents */ 7514 extent_type = btrfs_file_extent_type(eb, fi); 7515 7516 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 7517 continue; 7518 7519 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 7520 if (!bytenr) 7521 continue; 7522 7523 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 7524 7525 ret = btrfs_qgroup_record_ref(trans, root->fs_info, 7526 root->objectid, 7527 bytenr, num_bytes, 7528 BTRFS_QGROUP_OPER_SUB_SUBTREE, 0); 7529 if (ret) 7530 return ret; 7531 } 7532 return 0; 7533 } 7534 7535 /* 7536 * Walk up the tree from the bottom, freeing leaves and any interior 7537 * nodes which have had all slots visited. If a node (leaf or 7538 * interior) is freed, the node above it will have it's slot 7539 * incremented. The root node will never be freed. 7540 * 7541 * At the end of this function, we should have a path which has all 7542 * slots incremented to the next position for a search. If we need to 7543 * read a new node it will be NULL and the node above it will have the 7544 * correct slot selected for a later read. 7545 * 7546 * If we increment the root nodes slot counter past the number of 7547 * elements, 1 is returned to signal completion of the search. 7548 */ 7549 static int adjust_slots_upwards(struct btrfs_root *root, 7550 struct btrfs_path *path, int root_level) 7551 { 7552 int level = 0; 7553 int nr, slot; 7554 struct extent_buffer *eb; 7555 7556 if (root_level == 0) 7557 return 1; 7558 7559 while (level <= root_level) { 7560 eb = path->nodes[level]; 7561 nr = btrfs_header_nritems(eb); 7562 path->slots[level]++; 7563 slot = path->slots[level]; 7564 if (slot >= nr || level == 0) { 7565 /* 7566 * Don't free the root - we will detect this 7567 * condition after our loop and return a 7568 * positive value for caller to stop walking the tree. 7569 */ 7570 if (level != root_level) { 7571 btrfs_tree_unlock_rw(eb, path->locks[level]); 7572 path->locks[level] = 0; 7573 7574 free_extent_buffer(eb); 7575 path->nodes[level] = NULL; 7576 path->slots[level] = 0; 7577 } 7578 } else { 7579 /* 7580 * We have a valid slot to walk back down 7581 * from. Stop here so caller can process these 7582 * new nodes. 7583 */ 7584 break; 7585 } 7586 7587 level++; 7588 } 7589 7590 eb = path->nodes[root_level]; 7591 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 7592 return 1; 7593 7594 return 0; 7595 } 7596 7597 /* 7598 * root_eb is the subtree root and is locked before this function is called. 7599 */ 7600 static int account_shared_subtree(struct btrfs_trans_handle *trans, 7601 struct btrfs_root *root, 7602 struct extent_buffer *root_eb, 7603 u64 root_gen, 7604 int root_level) 7605 { 7606 int ret = 0; 7607 int level; 7608 struct extent_buffer *eb = root_eb; 7609 struct btrfs_path *path = NULL; 7610 7611 BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL); 7612 BUG_ON(root_eb == NULL); 7613 7614 if (!root->fs_info->quota_enabled) 7615 return 0; 7616 7617 if (!extent_buffer_uptodate(root_eb)) { 7618 ret = btrfs_read_buffer(root_eb, root_gen); 7619 if (ret) 7620 goto out; 7621 } 7622 7623 if (root_level == 0) { 7624 ret = account_leaf_items(trans, root, root_eb); 7625 goto out; 7626 } 7627 7628 path = btrfs_alloc_path(); 7629 if (!path) 7630 return -ENOMEM; 7631 7632 /* 7633 * Walk down the tree. Missing extent blocks are filled in as 7634 * we go. Metadata is accounted every time we read a new 7635 * extent block. 7636 * 7637 * When we reach a leaf, we account for file extent items in it, 7638 * walk back up the tree (adjusting slot pointers as we go) 7639 * and restart the search process. 7640 */ 7641 extent_buffer_get(root_eb); /* For path */ 7642 path->nodes[root_level] = root_eb; 7643 path->slots[root_level] = 0; 7644 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 7645 walk_down: 7646 level = root_level; 7647 while (level >= 0) { 7648 if (path->nodes[level] == NULL) { 7649 int parent_slot; 7650 u64 child_gen; 7651 u64 child_bytenr; 7652 7653 /* We need to get child blockptr/gen from 7654 * parent before we can read it. */ 7655 eb = path->nodes[level + 1]; 7656 parent_slot = path->slots[level + 1]; 7657 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 7658 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 7659 7660 eb = read_tree_block(root, child_bytenr, child_gen); 7661 if (!eb || !extent_buffer_uptodate(eb)) { 7662 ret = -EIO; 7663 goto out; 7664 } 7665 7666 path->nodes[level] = eb; 7667 path->slots[level] = 0; 7668 7669 btrfs_tree_read_lock(eb); 7670 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 7671 path->locks[level] = BTRFS_READ_LOCK_BLOCKING; 7672 7673 ret = btrfs_qgroup_record_ref(trans, root->fs_info, 7674 root->objectid, 7675 child_bytenr, 7676 root->nodesize, 7677 BTRFS_QGROUP_OPER_SUB_SUBTREE, 7678 0); 7679 if (ret) 7680 goto out; 7681 7682 } 7683 7684 if (level == 0) { 7685 ret = account_leaf_items(trans, root, path->nodes[level]); 7686 if (ret) 7687 goto out; 7688 7689 /* Nonzero return here means we completed our search */ 7690 ret = adjust_slots_upwards(root, path, root_level); 7691 if (ret) 7692 break; 7693 7694 /* Restart search with new slots */ 7695 goto walk_down; 7696 } 7697 7698 level--; 7699 } 7700 7701 ret = 0; 7702 out: 7703 btrfs_free_path(path); 7704 7705 return ret; 7706 } 7707 7708 /* 7709 * helper to process tree block while walking down the tree. 7710 * 7711 * when wc->stage == UPDATE_BACKREF, this function updates 7712 * back refs for pointers in the block. 7713 * 7714 * NOTE: return value 1 means we should stop walking down. 7715 */ 7716 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 7717 struct btrfs_root *root, 7718 struct btrfs_path *path, 7719 struct walk_control *wc, int lookup_info) 7720 { 7721 int level = wc->level; 7722 struct extent_buffer *eb = path->nodes[level]; 7723 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 7724 int ret; 7725 7726 if (wc->stage == UPDATE_BACKREF && 7727 btrfs_header_owner(eb) != root->root_key.objectid) 7728 return 1; 7729 7730 /* 7731 * when reference count of tree block is 1, it won't increase 7732 * again. once full backref flag is set, we never clear it. 7733 */ 7734 if (lookup_info && 7735 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 7736 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 7737 BUG_ON(!path->locks[level]); 7738 ret = btrfs_lookup_extent_info(trans, root, 7739 eb->start, level, 1, 7740 &wc->refs[level], 7741 &wc->flags[level]); 7742 BUG_ON(ret == -ENOMEM); 7743 if (ret) 7744 return ret; 7745 BUG_ON(wc->refs[level] == 0); 7746 } 7747 7748 if (wc->stage == DROP_REFERENCE) { 7749 if (wc->refs[level] > 1) 7750 return 1; 7751 7752 if (path->locks[level] && !wc->keep_locks) { 7753 btrfs_tree_unlock_rw(eb, path->locks[level]); 7754 path->locks[level] = 0; 7755 } 7756 return 0; 7757 } 7758 7759 /* wc->stage == UPDATE_BACKREF */ 7760 if (!(wc->flags[level] & flag)) { 7761 BUG_ON(!path->locks[level]); 7762 ret = btrfs_inc_ref(trans, root, eb, 1); 7763 BUG_ON(ret); /* -ENOMEM */ 7764 ret = btrfs_dec_ref(trans, root, eb, 0); 7765 BUG_ON(ret); /* -ENOMEM */ 7766 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, 7767 eb->len, flag, 7768 btrfs_header_level(eb), 0); 7769 BUG_ON(ret); /* -ENOMEM */ 7770 wc->flags[level] |= flag; 7771 } 7772 7773 /* 7774 * the block is shared by multiple trees, so it's not good to 7775 * keep the tree lock 7776 */ 7777 if (path->locks[level] && level > 0) { 7778 btrfs_tree_unlock_rw(eb, path->locks[level]); 7779 path->locks[level] = 0; 7780 } 7781 return 0; 7782 } 7783 7784 /* 7785 * helper to process tree block pointer. 7786 * 7787 * when wc->stage == DROP_REFERENCE, this function checks 7788 * reference count of the block pointed to. if the block 7789 * is shared and we need update back refs for the subtree 7790 * rooted at the block, this function changes wc->stage to 7791 * UPDATE_BACKREF. if the block is shared and there is no 7792 * need to update back, this function drops the reference 7793 * to the block. 7794 * 7795 * NOTE: return value 1 means we should stop walking down. 7796 */ 7797 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 7798 struct btrfs_root *root, 7799 struct btrfs_path *path, 7800 struct walk_control *wc, int *lookup_info) 7801 { 7802 u64 bytenr; 7803 u64 generation; 7804 u64 parent; 7805 u32 blocksize; 7806 struct btrfs_key key; 7807 struct extent_buffer *next; 7808 int level = wc->level; 7809 int reada = 0; 7810 int ret = 0; 7811 bool need_account = false; 7812 7813 generation = btrfs_node_ptr_generation(path->nodes[level], 7814 path->slots[level]); 7815 /* 7816 * if the lower level block was created before the snapshot 7817 * was created, we know there is no need to update back refs 7818 * for the subtree 7819 */ 7820 if (wc->stage == UPDATE_BACKREF && 7821 generation <= root->root_key.offset) { 7822 *lookup_info = 1; 7823 return 1; 7824 } 7825 7826 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 7827 blocksize = root->nodesize; 7828 7829 next = btrfs_find_tree_block(root, bytenr); 7830 if (!next) { 7831 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 7832 if (!next) 7833 return -ENOMEM; 7834 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, 7835 level - 1); 7836 reada = 1; 7837 } 7838 btrfs_tree_lock(next); 7839 btrfs_set_lock_blocking(next); 7840 7841 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1, 7842 &wc->refs[level - 1], 7843 &wc->flags[level - 1]); 7844 if (ret < 0) { 7845 btrfs_tree_unlock(next); 7846 return ret; 7847 } 7848 7849 if (unlikely(wc->refs[level - 1] == 0)) { 7850 btrfs_err(root->fs_info, "Missing references."); 7851 BUG(); 7852 } 7853 *lookup_info = 0; 7854 7855 if (wc->stage == DROP_REFERENCE) { 7856 if (wc->refs[level - 1] > 1) { 7857 need_account = true; 7858 if (level == 1 && 7859 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7860 goto skip; 7861 7862 if (!wc->update_ref || 7863 generation <= root->root_key.offset) 7864 goto skip; 7865 7866 btrfs_node_key_to_cpu(path->nodes[level], &key, 7867 path->slots[level]); 7868 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 7869 if (ret < 0) 7870 goto skip; 7871 7872 wc->stage = UPDATE_BACKREF; 7873 wc->shared_level = level - 1; 7874 } 7875 } else { 7876 if (level == 1 && 7877 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 7878 goto skip; 7879 } 7880 7881 if (!btrfs_buffer_uptodate(next, generation, 0)) { 7882 btrfs_tree_unlock(next); 7883 free_extent_buffer(next); 7884 next = NULL; 7885 *lookup_info = 1; 7886 } 7887 7888 if (!next) { 7889 if (reada && level == 1) 7890 reada_walk_down(trans, root, wc, path); 7891 next = read_tree_block(root, bytenr, generation); 7892 if (!next || !extent_buffer_uptodate(next)) { 7893 free_extent_buffer(next); 7894 return -EIO; 7895 } 7896 btrfs_tree_lock(next); 7897 btrfs_set_lock_blocking(next); 7898 } 7899 7900 level--; 7901 BUG_ON(level != btrfs_header_level(next)); 7902 path->nodes[level] = next; 7903 path->slots[level] = 0; 7904 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7905 wc->level = level; 7906 if (wc->level == 1) 7907 wc->reada_slot = 0; 7908 return 0; 7909 skip: 7910 wc->refs[level - 1] = 0; 7911 wc->flags[level - 1] = 0; 7912 if (wc->stage == DROP_REFERENCE) { 7913 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 7914 parent = path->nodes[level]->start; 7915 } else { 7916 BUG_ON(root->root_key.objectid != 7917 btrfs_header_owner(path->nodes[level])); 7918 parent = 0; 7919 } 7920 7921 if (need_account) { 7922 ret = account_shared_subtree(trans, root, next, 7923 generation, level - 1); 7924 if (ret) { 7925 printk_ratelimited(KERN_ERR "BTRFS: %s Error " 7926 "%d accounting shared subtree. Quota " 7927 "is out of sync, rescan required.\n", 7928 root->fs_info->sb->s_id, ret); 7929 } 7930 } 7931 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, 7932 root->root_key.objectid, level - 1, 0, 0); 7933 BUG_ON(ret); /* -ENOMEM */ 7934 } 7935 btrfs_tree_unlock(next); 7936 free_extent_buffer(next); 7937 *lookup_info = 1; 7938 return 1; 7939 } 7940 7941 /* 7942 * helper to process tree block while walking up the tree. 7943 * 7944 * when wc->stage == DROP_REFERENCE, this function drops 7945 * reference count on the block. 7946 * 7947 * when wc->stage == UPDATE_BACKREF, this function changes 7948 * wc->stage back to DROP_REFERENCE if we changed wc->stage 7949 * to UPDATE_BACKREF previously while processing the block. 7950 * 7951 * NOTE: return value 1 means we should stop walking up. 7952 */ 7953 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 7954 struct btrfs_root *root, 7955 struct btrfs_path *path, 7956 struct walk_control *wc) 7957 { 7958 int ret; 7959 int level = wc->level; 7960 struct extent_buffer *eb = path->nodes[level]; 7961 u64 parent = 0; 7962 7963 if (wc->stage == UPDATE_BACKREF) { 7964 BUG_ON(wc->shared_level < level); 7965 if (level < wc->shared_level) 7966 goto out; 7967 7968 ret = find_next_key(path, level + 1, &wc->update_progress); 7969 if (ret > 0) 7970 wc->update_ref = 0; 7971 7972 wc->stage = DROP_REFERENCE; 7973 wc->shared_level = -1; 7974 path->slots[level] = 0; 7975 7976 /* 7977 * check reference count again if the block isn't locked. 7978 * we should start walking down the tree again if reference 7979 * count is one. 7980 */ 7981 if (!path->locks[level]) { 7982 BUG_ON(level == 0); 7983 btrfs_tree_lock(eb); 7984 btrfs_set_lock_blocking(eb); 7985 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 7986 7987 ret = btrfs_lookup_extent_info(trans, root, 7988 eb->start, level, 1, 7989 &wc->refs[level], 7990 &wc->flags[level]); 7991 if (ret < 0) { 7992 btrfs_tree_unlock_rw(eb, path->locks[level]); 7993 path->locks[level] = 0; 7994 return ret; 7995 } 7996 BUG_ON(wc->refs[level] == 0); 7997 if (wc->refs[level] == 1) { 7998 btrfs_tree_unlock_rw(eb, path->locks[level]); 7999 path->locks[level] = 0; 8000 return 1; 8001 } 8002 } 8003 } 8004 8005 /* wc->stage == DROP_REFERENCE */ 8006 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 8007 8008 if (wc->refs[level] == 1) { 8009 if (level == 0) { 8010 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 8011 ret = btrfs_dec_ref(trans, root, eb, 1); 8012 else 8013 ret = btrfs_dec_ref(trans, root, eb, 0); 8014 BUG_ON(ret); /* -ENOMEM */ 8015 ret = account_leaf_items(trans, root, eb); 8016 if (ret) { 8017 printk_ratelimited(KERN_ERR "BTRFS: %s Error " 8018 "%d accounting leaf items. Quota " 8019 "is out of sync, rescan required.\n", 8020 root->fs_info->sb->s_id, ret); 8021 } 8022 } 8023 /* make block locked assertion in clean_tree_block happy */ 8024 if (!path->locks[level] && 8025 btrfs_header_generation(eb) == trans->transid) { 8026 btrfs_tree_lock(eb); 8027 btrfs_set_lock_blocking(eb); 8028 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 8029 } 8030 clean_tree_block(trans, root, eb); 8031 } 8032 8033 if (eb == root->node) { 8034 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 8035 parent = eb->start; 8036 else 8037 BUG_ON(root->root_key.objectid != 8038 btrfs_header_owner(eb)); 8039 } else { 8040 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 8041 parent = path->nodes[level + 1]->start; 8042 else 8043 BUG_ON(root->root_key.objectid != 8044 btrfs_header_owner(path->nodes[level + 1])); 8045 } 8046 8047 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); 8048 out: 8049 wc->refs[level] = 0; 8050 wc->flags[level] = 0; 8051 return 0; 8052 } 8053 8054 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 8055 struct btrfs_root *root, 8056 struct btrfs_path *path, 8057 struct walk_control *wc) 8058 { 8059 int level = wc->level; 8060 int lookup_info = 1; 8061 int ret; 8062 8063 while (level >= 0) { 8064 ret = walk_down_proc(trans, root, path, wc, lookup_info); 8065 if (ret > 0) 8066 break; 8067 8068 if (level == 0) 8069 break; 8070 8071 if (path->slots[level] >= 8072 btrfs_header_nritems(path->nodes[level])) 8073 break; 8074 8075 ret = do_walk_down(trans, root, path, wc, &lookup_info); 8076 if (ret > 0) { 8077 path->slots[level]++; 8078 continue; 8079 } else if (ret < 0) 8080 return ret; 8081 level = wc->level; 8082 } 8083 return 0; 8084 } 8085 8086 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 8087 struct btrfs_root *root, 8088 struct btrfs_path *path, 8089 struct walk_control *wc, int max_level) 8090 { 8091 int level = wc->level; 8092 int ret; 8093 8094 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 8095 while (level < max_level && path->nodes[level]) { 8096 wc->level = level; 8097 if (path->slots[level] + 1 < 8098 btrfs_header_nritems(path->nodes[level])) { 8099 path->slots[level]++; 8100 return 0; 8101 } else { 8102 ret = walk_up_proc(trans, root, path, wc); 8103 if (ret > 0) 8104 return 0; 8105 8106 if (path->locks[level]) { 8107 btrfs_tree_unlock_rw(path->nodes[level], 8108 path->locks[level]); 8109 path->locks[level] = 0; 8110 } 8111 free_extent_buffer(path->nodes[level]); 8112 path->nodes[level] = NULL; 8113 level++; 8114 } 8115 } 8116 return 1; 8117 } 8118 8119 /* 8120 * drop a subvolume tree. 8121 * 8122 * this function traverses the tree freeing any blocks that only 8123 * referenced by the tree. 8124 * 8125 * when a shared tree block is found. this function decreases its 8126 * reference count by one. if update_ref is true, this function 8127 * also make sure backrefs for the shared block and all lower level 8128 * blocks are properly updated. 8129 * 8130 * If called with for_reloc == 0, may exit early with -EAGAIN 8131 */ 8132 int btrfs_drop_snapshot(struct btrfs_root *root, 8133 struct btrfs_block_rsv *block_rsv, int update_ref, 8134 int for_reloc) 8135 { 8136 struct btrfs_path *path; 8137 struct btrfs_trans_handle *trans; 8138 struct btrfs_root *tree_root = root->fs_info->tree_root; 8139 struct btrfs_root_item *root_item = &root->root_item; 8140 struct walk_control *wc; 8141 struct btrfs_key key; 8142 int err = 0; 8143 int ret; 8144 int level; 8145 bool root_dropped = false; 8146 8147 btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid); 8148 8149 path = btrfs_alloc_path(); 8150 if (!path) { 8151 err = -ENOMEM; 8152 goto out; 8153 } 8154 8155 wc = kzalloc(sizeof(*wc), GFP_NOFS); 8156 if (!wc) { 8157 btrfs_free_path(path); 8158 err = -ENOMEM; 8159 goto out; 8160 } 8161 8162 trans = btrfs_start_transaction(tree_root, 0); 8163 if (IS_ERR(trans)) { 8164 err = PTR_ERR(trans); 8165 goto out_free; 8166 } 8167 8168 if (block_rsv) 8169 trans->block_rsv = block_rsv; 8170 8171 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 8172 level = btrfs_header_level(root->node); 8173 path->nodes[level] = btrfs_lock_root_node(root); 8174 btrfs_set_lock_blocking(path->nodes[level]); 8175 path->slots[level] = 0; 8176 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 8177 memset(&wc->update_progress, 0, 8178 sizeof(wc->update_progress)); 8179 } else { 8180 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 8181 memcpy(&wc->update_progress, &key, 8182 sizeof(wc->update_progress)); 8183 8184 level = root_item->drop_level; 8185 BUG_ON(level == 0); 8186 path->lowest_level = level; 8187 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 8188 path->lowest_level = 0; 8189 if (ret < 0) { 8190 err = ret; 8191 goto out_end_trans; 8192 } 8193 WARN_ON(ret > 0); 8194 8195 /* 8196 * unlock our path, this is safe because only this 8197 * function is allowed to delete this snapshot 8198 */ 8199 btrfs_unlock_up_safe(path, 0); 8200 8201 level = btrfs_header_level(root->node); 8202 while (1) { 8203 btrfs_tree_lock(path->nodes[level]); 8204 btrfs_set_lock_blocking(path->nodes[level]); 8205 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 8206 8207 ret = btrfs_lookup_extent_info(trans, root, 8208 path->nodes[level]->start, 8209 level, 1, &wc->refs[level], 8210 &wc->flags[level]); 8211 if (ret < 0) { 8212 err = ret; 8213 goto out_end_trans; 8214 } 8215 BUG_ON(wc->refs[level] == 0); 8216 8217 if (level == root_item->drop_level) 8218 break; 8219 8220 btrfs_tree_unlock(path->nodes[level]); 8221 path->locks[level] = 0; 8222 WARN_ON(wc->refs[level] != 1); 8223 level--; 8224 } 8225 } 8226 8227 wc->level = level; 8228 wc->shared_level = -1; 8229 wc->stage = DROP_REFERENCE; 8230 wc->update_ref = update_ref; 8231 wc->keep_locks = 0; 8232 wc->for_reloc = for_reloc; 8233 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); 8234 8235 while (1) { 8236 8237 ret = walk_down_tree(trans, root, path, wc); 8238 if (ret < 0) { 8239 err = ret; 8240 break; 8241 } 8242 8243 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 8244 if (ret < 0) { 8245 err = ret; 8246 break; 8247 } 8248 8249 if (ret > 0) { 8250 BUG_ON(wc->stage != DROP_REFERENCE); 8251 break; 8252 } 8253 8254 if (wc->stage == DROP_REFERENCE) { 8255 level = wc->level; 8256 btrfs_node_key(path->nodes[level], 8257 &root_item->drop_progress, 8258 path->slots[level]); 8259 root_item->drop_level = level; 8260 } 8261 8262 BUG_ON(wc->level == 0); 8263 if (btrfs_should_end_transaction(trans, tree_root) || 8264 (!for_reloc && btrfs_need_cleaner_sleep(root))) { 8265 ret = btrfs_update_root(trans, tree_root, 8266 &root->root_key, 8267 root_item); 8268 if (ret) { 8269 btrfs_abort_transaction(trans, tree_root, ret); 8270 err = ret; 8271 goto out_end_trans; 8272 } 8273 8274 /* 8275 * Qgroup update accounting is run from 8276 * delayed ref handling. This usually works 8277 * out because delayed refs are normally the 8278 * only way qgroup updates are added. However, 8279 * we may have added updates during our tree 8280 * walk so run qgroups here to make sure we 8281 * don't lose any updates. 8282 */ 8283 ret = btrfs_delayed_qgroup_accounting(trans, 8284 root->fs_info); 8285 if (ret) 8286 printk_ratelimited(KERN_ERR "BTRFS: Failure %d " 8287 "running qgroup updates " 8288 "during snapshot delete. " 8289 "Quota is out of sync, " 8290 "rescan required.\n", ret); 8291 8292 btrfs_end_transaction_throttle(trans, tree_root); 8293 if (!for_reloc && btrfs_need_cleaner_sleep(root)) { 8294 pr_debug("BTRFS: drop snapshot early exit\n"); 8295 err = -EAGAIN; 8296 goto out_free; 8297 } 8298 8299 trans = btrfs_start_transaction(tree_root, 0); 8300 if (IS_ERR(trans)) { 8301 err = PTR_ERR(trans); 8302 goto out_free; 8303 } 8304 if (block_rsv) 8305 trans->block_rsv = block_rsv; 8306 } 8307 } 8308 btrfs_release_path(path); 8309 if (err) 8310 goto out_end_trans; 8311 8312 ret = btrfs_del_root(trans, tree_root, &root->root_key); 8313 if (ret) { 8314 btrfs_abort_transaction(trans, tree_root, ret); 8315 goto out_end_trans; 8316 } 8317 8318 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 8319 ret = btrfs_find_root(tree_root, &root->root_key, path, 8320 NULL, NULL); 8321 if (ret < 0) { 8322 btrfs_abort_transaction(trans, tree_root, ret); 8323 err = ret; 8324 goto out_end_trans; 8325 } else if (ret > 0) { 8326 /* if we fail to delete the orphan item this time 8327 * around, it'll get picked up the next time. 8328 * 8329 * The most common failure here is just -ENOENT. 8330 */ 8331 btrfs_del_orphan_item(trans, tree_root, 8332 root->root_key.objectid); 8333 } 8334 } 8335 8336 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) { 8337 btrfs_drop_and_free_fs_root(tree_root->fs_info, root); 8338 } else { 8339 free_extent_buffer(root->node); 8340 free_extent_buffer(root->commit_root); 8341 btrfs_put_fs_root(root); 8342 } 8343 root_dropped = true; 8344 out_end_trans: 8345 ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info); 8346 if (ret) 8347 printk_ratelimited(KERN_ERR "BTRFS: Failure %d " 8348 "running qgroup updates " 8349 "during snapshot delete. " 8350 "Quota is out of sync, " 8351 "rescan required.\n", ret); 8352 8353 btrfs_end_transaction_throttle(trans, tree_root); 8354 out_free: 8355 kfree(wc); 8356 btrfs_free_path(path); 8357 out: 8358 /* 8359 * So if we need to stop dropping the snapshot for whatever reason we 8360 * need to make sure to add it back to the dead root list so that we 8361 * keep trying to do the work later. This also cleans up roots if we 8362 * don't have it in the radix (like when we recover after a power fail 8363 * or unmount) so we don't leak memory. 8364 */ 8365 if (!for_reloc && root_dropped == false) 8366 btrfs_add_dead_root(root); 8367 if (err && err != -EAGAIN) 8368 btrfs_std_error(root->fs_info, err); 8369 return err; 8370 } 8371 8372 /* 8373 * drop subtree rooted at tree block 'node'. 8374 * 8375 * NOTE: this function will unlock and release tree block 'node' 8376 * only used by relocation code 8377 */ 8378 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 8379 struct btrfs_root *root, 8380 struct extent_buffer *node, 8381 struct extent_buffer *parent) 8382 { 8383 struct btrfs_path *path; 8384 struct walk_control *wc; 8385 int level; 8386 int parent_level; 8387 int ret = 0; 8388 int wret; 8389 8390 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 8391 8392 path = btrfs_alloc_path(); 8393 if (!path) 8394 return -ENOMEM; 8395 8396 wc = kzalloc(sizeof(*wc), GFP_NOFS); 8397 if (!wc) { 8398 btrfs_free_path(path); 8399 return -ENOMEM; 8400 } 8401 8402 btrfs_assert_tree_locked(parent); 8403 parent_level = btrfs_header_level(parent); 8404 extent_buffer_get(parent); 8405 path->nodes[parent_level] = parent; 8406 path->slots[parent_level] = btrfs_header_nritems(parent); 8407 8408 btrfs_assert_tree_locked(node); 8409 level = btrfs_header_level(node); 8410 path->nodes[level] = node; 8411 path->slots[level] = 0; 8412 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 8413 8414 wc->refs[parent_level] = 1; 8415 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 8416 wc->level = level; 8417 wc->shared_level = -1; 8418 wc->stage = DROP_REFERENCE; 8419 wc->update_ref = 0; 8420 wc->keep_locks = 1; 8421 wc->for_reloc = 1; 8422 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); 8423 8424 while (1) { 8425 wret = walk_down_tree(trans, root, path, wc); 8426 if (wret < 0) { 8427 ret = wret; 8428 break; 8429 } 8430 8431 wret = walk_up_tree(trans, root, path, wc, parent_level); 8432 if (wret < 0) 8433 ret = wret; 8434 if (wret != 0) 8435 break; 8436 } 8437 8438 kfree(wc); 8439 btrfs_free_path(path); 8440 return ret; 8441 } 8442 8443 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) 8444 { 8445 u64 num_devices; 8446 u64 stripped; 8447 8448 /* 8449 * if restripe for this chunk_type is on pick target profile and 8450 * return, otherwise do the usual balance 8451 */ 8452 stripped = get_restripe_target(root->fs_info, flags); 8453 if (stripped) 8454 return extended_to_chunk(stripped); 8455 8456 num_devices = root->fs_info->fs_devices->rw_devices; 8457 8458 stripped = BTRFS_BLOCK_GROUP_RAID0 | 8459 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 | 8460 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; 8461 8462 if (num_devices == 1) { 8463 stripped |= BTRFS_BLOCK_GROUP_DUP; 8464 stripped = flags & ~stripped; 8465 8466 /* turn raid0 into single device chunks */ 8467 if (flags & BTRFS_BLOCK_GROUP_RAID0) 8468 return stripped; 8469 8470 /* turn mirroring into duplication */ 8471 if (flags & (BTRFS_BLOCK_GROUP_RAID1 | 8472 BTRFS_BLOCK_GROUP_RAID10)) 8473 return stripped | BTRFS_BLOCK_GROUP_DUP; 8474 } else { 8475 /* they already had raid on here, just return */ 8476 if (flags & stripped) 8477 return flags; 8478 8479 stripped |= BTRFS_BLOCK_GROUP_DUP; 8480 stripped = flags & ~stripped; 8481 8482 /* switch duplicated blocks with raid1 */ 8483 if (flags & BTRFS_BLOCK_GROUP_DUP) 8484 return stripped | BTRFS_BLOCK_GROUP_RAID1; 8485 8486 /* this is drive concat, leave it alone */ 8487 } 8488 8489 return flags; 8490 } 8491 8492 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force) 8493 { 8494 struct btrfs_space_info *sinfo = cache->space_info; 8495 u64 num_bytes; 8496 u64 min_allocable_bytes; 8497 int ret = -ENOSPC; 8498 8499 8500 /* 8501 * We need some metadata space and system metadata space for 8502 * allocating chunks in some corner cases until we force to set 8503 * it to be readonly. 8504 */ 8505 if ((sinfo->flags & 8506 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && 8507 !force) 8508 min_allocable_bytes = 1 * 1024 * 1024; 8509 else 8510 min_allocable_bytes = 0; 8511 8512 spin_lock(&sinfo->lock); 8513 spin_lock(&cache->lock); 8514 8515 if (cache->ro) { 8516 ret = 0; 8517 goto out; 8518 } 8519 8520 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 8521 cache->bytes_super - btrfs_block_group_used(&cache->item); 8522 8523 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + 8524 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes + 8525 min_allocable_bytes <= sinfo->total_bytes) { 8526 sinfo->bytes_readonly += num_bytes; 8527 cache->ro = 1; 8528 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); 8529 ret = 0; 8530 } 8531 out: 8532 spin_unlock(&cache->lock); 8533 spin_unlock(&sinfo->lock); 8534 return ret; 8535 } 8536 8537 int btrfs_set_block_group_ro(struct btrfs_root *root, 8538 struct btrfs_block_group_cache *cache) 8539 8540 { 8541 struct btrfs_trans_handle *trans; 8542 u64 alloc_flags; 8543 int ret; 8544 8545 BUG_ON(cache->ro); 8546 8547 trans = btrfs_join_transaction(root); 8548 if (IS_ERR(trans)) 8549 return PTR_ERR(trans); 8550 8551 alloc_flags = update_block_group_flags(root, cache->flags); 8552 if (alloc_flags != cache->flags) { 8553 ret = do_chunk_alloc(trans, root, alloc_flags, 8554 CHUNK_ALLOC_FORCE); 8555 if (ret < 0) 8556 goto out; 8557 } 8558 8559 ret = set_block_group_ro(cache, 0); 8560 if (!ret) 8561 goto out; 8562 alloc_flags = get_alloc_profile(root, cache->space_info->flags); 8563 ret = do_chunk_alloc(trans, root, alloc_flags, 8564 CHUNK_ALLOC_FORCE); 8565 if (ret < 0) 8566 goto out; 8567 ret = set_block_group_ro(cache, 0); 8568 out: 8569 btrfs_end_transaction(trans, root); 8570 return ret; 8571 } 8572 8573 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 8574 struct btrfs_root *root, u64 type) 8575 { 8576 u64 alloc_flags = get_alloc_profile(root, type); 8577 return do_chunk_alloc(trans, root, alloc_flags, 8578 CHUNK_ALLOC_FORCE); 8579 } 8580 8581 /* 8582 * helper to account the unused space of all the readonly block group in the 8583 * space_info. takes mirrors into account. 8584 */ 8585 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) 8586 { 8587 struct btrfs_block_group_cache *block_group; 8588 u64 free_bytes = 0; 8589 int factor; 8590 8591 /* It's df, we don't care if it's racey */ 8592 if (list_empty(&sinfo->ro_bgs)) 8593 return 0; 8594 8595 spin_lock(&sinfo->lock); 8596 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { 8597 spin_lock(&block_group->lock); 8598 8599 if (!block_group->ro) { 8600 spin_unlock(&block_group->lock); 8601 continue; 8602 } 8603 8604 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 | 8605 BTRFS_BLOCK_GROUP_RAID10 | 8606 BTRFS_BLOCK_GROUP_DUP)) 8607 factor = 2; 8608 else 8609 factor = 1; 8610 8611 free_bytes += (block_group->key.offset - 8612 btrfs_block_group_used(&block_group->item)) * 8613 factor; 8614 8615 spin_unlock(&block_group->lock); 8616 } 8617 spin_unlock(&sinfo->lock); 8618 8619 return free_bytes; 8620 } 8621 8622 void btrfs_set_block_group_rw(struct btrfs_root *root, 8623 struct btrfs_block_group_cache *cache) 8624 { 8625 struct btrfs_space_info *sinfo = cache->space_info; 8626 u64 num_bytes; 8627 8628 BUG_ON(!cache->ro); 8629 8630 spin_lock(&sinfo->lock); 8631 spin_lock(&cache->lock); 8632 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 8633 cache->bytes_super - btrfs_block_group_used(&cache->item); 8634 sinfo->bytes_readonly -= num_bytes; 8635 cache->ro = 0; 8636 list_del_init(&cache->ro_list); 8637 spin_unlock(&cache->lock); 8638 spin_unlock(&sinfo->lock); 8639 } 8640 8641 /* 8642 * checks to see if its even possible to relocate this block group. 8643 * 8644 * @return - -1 if it's not a good idea to relocate this block group, 0 if its 8645 * ok to go ahead and try. 8646 */ 8647 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) 8648 { 8649 struct btrfs_block_group_cache *block_group; 8650 struct btrfs_space_info *space_info; 8651 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 8652 struct btrfs_device *device; 8653 struct btrfs_trans_handle *trans; 8654 u64 min_free; 8655 u64 dev_min = 1; 8656 u64 dev_nr = 0; 8657 u64 target; 8658 int index; 8659 int full = 0; 8660 int ret = 0; 8661 8662 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 8663 8664 /* odd, couldn't find the block group, leave it alone */ 8665 if (!block_group) 8666 return -1; 8667 8668 min_free = btrfs_block_group_used(&block_group->item); 8669 8670 /* no bytes used, we're good */ 8671 if (!min_free) 8672 goto out; 8673 8674 space_info = block_group->space_info; 8675 spin_lock(&space_info->lock); 8676 8677 full = space_info->full; 8678 8679 /* 8680 * if this is the last block group we have in this space, we can't 8681 * relocate it unless we're able to allocate a new chunk below. 8682 * 8683 * Otherwise, we need to make sure we have room in the space to handle 8684 * all of the extents from this block group. If we can, we're good 8685 */ 8686 if ((space_info->total_bytes != block_group->key.offset) && 8687 (space_info->bytes_used + space_info->bytes_reserved + 8688 space_info->bytes_pinned + space_info->bytes_readonly + 8689 min_free < space_info->total_bytes)) { 8690 spin_unlock(&space_info->lock); 8691 goto out; 8692 } 8693 spin_unlock(&space_info->lock); 8694 8695 /* 8696 * ok we don't have enough space, but maybe we have free space on our 8697 * devices to allocate new chunks for relocation, so loop through our 8698 * alloc devices and guess if we have enough space. if this block 8699 * group is going to be restriped, run checks against the target 8700 * profile instead of the current one. 8701 */ 8702 ret = -1; 8703 8704 /* 8705 * index: 8706 * 0: raid10 8707 * 1: raid1 8708 * 2: dup 8709 * 3: raid0 8710 * 4: single 8711 */ 8712 target = get_restripe_target(root->fs_info, block_group->flags); 8713 if (target) { 8714 index = __get_raid_index(extended_to_chunk(target)); 8715 } else { 8716 /* 8717 * this is just a balance, so if we were marked as full 8718 * we know there is no space for a new chunk 8719 */ 8720 if (full) 8721 goto out; 8722 8723 index = get_block_group_index(block_group); 8724 } 8725 8726 if (index == BTRFS_RAID_RAID10) { 8727 dev_min = 4; 8728 /* Divide by 2 */ 8729 min_free >>= 1; 8730 } else if (index == BTRFS_RAID_RAID1) { 8731 dev_min = 2; 8732 } else if (index == BTRFS_RAID_DUP) { 8733 /* Multiply by 2 */ 8734 min_free <<= 1; 8735 } else if (index == BTRFS_RAID_RAID0) { 8736 dev_min = fs_devices->rw_devices; 8737 do_div(min_free, dev_min); 8738 } 8739 8740 /* We need to do this so that we can look at pending chunks */ 8741 trans = btrfs_join_transaction(root); 8742 if (IS_ERR(trans)) { 8743 ret = PTR_ERR(trans); 8744 goto out; 8745 } 8746 8747 mutex_lock(&root->fs_info->chunk_mutex); 8748 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 8749 u64 dev_offset; 8750 8751 /* 8752 * check to make sure we can actually find a chunk with enough 8753 * space to fit our block group in. 8754 */ 8755 if (device->total_bytes > device->bytes_used + min_free && 8756 !device->is_tgtdev_for_dev_replace) { 8757 ret = find_free_dev_extent(trans, device, min_free, 8758 &dev_offset, NULL); 8759 if (!ret) 8760 dev_nr++; 8761 8762 if (dev_nr >= dev_min) 8763 break; 8764 8765 ret = -1; 8766 } 8767 } 8768 mutex_unlock(&root->fs_info->chunk_mutex); 8769 btrfs_end_transaction(trans, root); 8770 out: 8771 btrfs_put_block_group(block_group); 8772 return ret; 8773 } 8774 8775 static int find_first_block_group(struct btrfs_root *root, 8776 struct btrfs_path *path, struct btrfs_key *key) 8777 { 8778 int ret = 0; 8779 struct btrfs_key found_key; 8780 struct extent_buffer *leaf; 8781 int slot; 8782 8783 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 8784 if (ret < 0) 8785 goto out; 8786 8787 while (1) { 8788 slot = path->slots[0]; 8789 leaf = path->nodes[0]; 8790 if (slot >= btrfs_header_nritems(leaf)) { 8791 ret = btrfs_next_leaf(root, path); 8792 if (ret == 0) 8793 continue; 8794 if (ret < 0) 8795 goto out; 8796 break; 8797 } 8798 btrfs_item_key_to_cpu(leaf, &found_key, slot); 8799 8800 if (found_key.objectid >= key->objectid && 8801 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 8802 ret = 0; 8803 goto out; 8804 } 8805 path->slots[0]++; 8806 } 8807 out: 8808 return ret; 8809 } 8810 8811 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 8812 { 8813 struct btrfs_block_group_cache *block_group; 8814 u64 last = 0; 8815 8816 while (1) { 8817 struct inode *inode; 8818 8819 block_group = btrfs_lookup_first_block_group(info, last); 8820 while (block_group) { 8821 spin_lock(&block_group->lock); 8822 if (block_group->iref) 8823 break; 8824 spin_unlock(&block_group->lock); 8825 block_group = next_block_group(info->tree_root, 8826 block_group); 8827 } 8828 if (!block_group) { 8829 if (last == 0) 8830 break; 8831 last = 0; 8832 continue; 8833 } 8834 8835 inode = block_group->inode; 8836 block_group->iref = 0; 8837 block_group->inode = NULL; 8838 spin_unlock(&block_group->lock); 8839 iput(inode); 8840 last = block_group->key.objectid + block_group->key.offset; 8841 btrfs_put_block_group(block_group); 8842 } 8843 } 8844 8845 int btrfs_free_block_groups(struct btrfs_fs_info *info) 8846 { 8847 struct btrfs_block_group_cache *block_group; 8848 struct btrfs_space_info *space_info; 8849 struct btrfs_caching_control *caching_ctl; 8850 struct rb_node *n; 8851 8852 down_write(&info->commit_root_sem); 8853 while (!list_empty(&info->caching_block_groups)) { 8854 caching_ctl = list_entry(info->caching_block_groups.next, 8855 struct btrfs_caching_control, list); 8856 list_del(&caching_ctl->list); 8857 put_caching_control(caching_ctl); 8858 } 8859 up_write(&info->commit_root_sem); 8860 8861 spin_lock(&info->unused_bgs_lock); 8862 while (!list_empty(&info->unused_bgs)) { 8863 block_group = list_first_entry(&info->unused_bgs, 8864 struct btrfs_block_group_cache, 8865 bg_list); 8866 list_del_init(&block_group->bg_list); 8867 btrfs_put_block_group(block_group); 8868 } 8869 spin_unlock(&info->unused_bgs_lock); 8870 8871 spin_lock(&info->block_group_cache_lock); 8872 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 8873 block_group = rb_entry(n, struct btrfs_block_group_cache, 8874 cache_node); 8875 rb_erase(&block_group->cache_node, 8876 &info->block_group_cache_tree); 8877 RB_CLEAR_NODE(&block_group->cache_node); 8878 spin_unlock(&info->block_group_cache_lock); 8879 8880 down_write(&block_group->space_info->groups_sem); 8881 list_del(&block_group->list); 8882 up_write(&block_group->space_info->groups_sem); 8883 8884 if (block_group->cached == BTRFS_CACHE_STARTED) 8885 wait_block_group_cache_done(block_group); 8886 8887 /* 8888 * We haven't cached this block group, which means we could 8889 * possibly have excluded extents on this block group. 8890 */ 8891 if (block_group->cached == BTRFS_CACHE_NO || 8892 block_group->cached == BTRFS_CACHE_ERROR) 8893 free_excluded_extents(info->extent_root, block_group); 8894 8895 btrfs_remove_free_space_cache(block_group); 8896 btrfs_put_block_group(block_group); 8897 8898 spin_lock(&info->block_group_cache_lock); 8899 } 8900 spin_unlock(&info->block_group_cache_lock); 8901 8902 /* now that all the block groups are freed, go through and 8903 * free all the space_info structs. This is only called during 8904 * the final stages of unmount, and so we know nobody is 8905 * using them. We call synchronize_rcu() once before we start, 8906 * just to be on the safe side. 8907 */ 8908 synchronize_rcu(); 8909 8910 release_global_block_rsv(info); 8911 8912 while (!list_empty(&info->space_info)) { 8913 int i; 8914 8915 space_info = list_entry(info->space_info.next, 8916 struct btrfs_space_info, 8917 list); 8918 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) { 8919 if (WARN_ON(space_info->bytes_pinned > 0 || 8920 space_info->bytes_reserved > 0 || 8921 space_info->bytes_may_use > 0)) { 8922 dump_space_info(space_info, 0, 0); 8923 } 8924 } 8925 list_del(&space_info->list); 8926 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { 8927 struct kobject *kobj; 8928 kobj = space_info->block_group_kobjs[i]; 8929 space_info->block_group_kobjs[i] = NULL; 8930 if (kobj) { 8931 kobject_del(kobj); 8932 kobject_put(kobj); 8933 } 8934 } 8935 kobject_del(&space_info->kobj); 8936 kobject_put(&space_info->kobj); 8937 } 8938 return 0; 8939 } 8940 8941 static void __link_block_group(struct btrfs_space_info *space_info, 8942 struct btrfs_block_group_cache *cache) 8943 { 8944 int index = get_block_group_index(cache); 8945 bool first = false; 8946 8947 down_write(&space_info->groups_sem); 8948 if (list_empty(&space_info->block_groups[index])) 8949 first = true; 8950 list_add_tail(&cache->list, &space_info->block_groups[index]); 8951 up_write(&space_info->groups_sem); 8952 8953 if (first) { 8954 struct raid_kobject *rkobj; 8955 int ret; 8956 8957 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); 8958 if (!rkobj) 8959 goto out_err; 8960 rkobj->raid_type = index; 8961 kobject_init(&rkobj->kobj, &btrfs_raid_ktype); 8962 ret = kobject_add(&rkobj->kobj, &space_info->kobj, 8963 "%s", get_raid_name(index)); 8964 if (ret) { 8965 kobject_put(&rkobj->kobj); 8966 goto out_err; 8967 } 8968 space_info->block_group_kobjs[index] = &rkobj->kobj; 8969 } 8970 8971 return; 8972 out_err: 8973 pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n"); 8974 } 8975 8976 static struct btrfs_block_group_cache * 8977 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) 8978 { 8979 struct btrfs_block_group_cache *cache; 8980 8981 cache = kzalloc(sizeof(*cache), GFP_NOFS); 8982 if (!cache) 8983 return NULL; 8984 8985 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), 8986 GFP_NOFS); 8987 if (!cache->free_space_ctl) { 8988 kfree(cache); 8989 return NULL; 8990 } 8991 8992 cache->key.objectid = start; 8993 cache->key.offset = size; 8994 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 8995 8996 cache->sectorsize = root->sectorsize; 8997 cache->fs_info = root->fs_info; 8998 cache->full_stripe_len = btrfs_full_stripe_len(root, 8999 &root->fs_info->mapping_tree, 9000 start); 9001 atomic_set(&cache->count, 1); 9002 spin_lock_init(&cache->lock); 9003 init_rwsem(&cache->data_rwsem); 9004 INIT_LIST_HEAD(&cache->list); 9005 INIT_LIST_HEAD(&cache->cluster_list); 9006 INIT_LIST_HEAD(&cache->bg_list); 9007 INIT_LIST_HEAD(&cache->ro_list); 9008 btrfs_init_free_space_ctl(cache); 9009 atomic_set(&cache->trimming, 0); 9010 9011 return cache; 9012 } 9013 9014 int btrfs_read_block_groups(struct btrfs_root *root) 9015 { 9016 struct btrfs_path *path; 9017 int ret; 9018 struct btrfs_block_group_cache *cache; 9019 struct btrfs_fs_info *info = root->fs_info; 9020 struct btrfs_space_info *space_info; 9021 struct btrfs_key key; 9022 struct btrfs_key found_key; 9023 struct extent_buffer *leaf; 9024 int need_clear = 0; 9025 u64 cache_gen; 9026 9027 root = info->extent_root; 9028 key.objectid = 0; 9029 key.offset = 0; 9030 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 9031 path = btrfs_alloc_path(); 9032 if (!path) 9033 return -ENOMEM; 9034 path->reada = 1; 9035 9036 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); 9037 if (btrfs_test_opt(root, SPACE_CACHE) && 9038 btrfs_super_generation(root->fs_info->super_copy) != cache_gen) 9039 need_clear = 1; 9040 if (btrfs_test_opt(root, CLEAR_CACHE)) 9041 need_clear = 1; 9042 9043 while (1) { 9044 ret = find_first_block_group(root, path, &key); 9045 if (ret > 0) 9046 break; 9047 if (ret != 0) 9048 goto error; 9049 9050 leaf = path->nodes[0]; 9051 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 9052 9053 cache = btrfs_create_block_group_cache(root, found_key.objectid, 9054 found_key.offset); 9055 if (!cache) { 9056 ret = -ENOMEM; 9057 goto error; 9058 } 9059 9060 if (need_clear) { 9061 /* 9062 * When we mount with old space cache, we need to 9063 * set BTRFS_DC_CLEAR and set dirty flag. 9064 * 9065 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we 9066 * truncate the old free space cache inode and 9067 * setup a new one. 9068 * b) Setting 'dirty flag' makes sure that we flush 9069 * the new space cache info onto disk. 9070 */ 9071 cache->disk_cache_state = BTRFS_DC_CLEAR; 9072 if (btrfs_test_opt(root, SPACE_CACHE)) 9073 cache->dirty = 1; 9074 } 9075 9076 read_extent_buffer(leaf, &cache->item, 9077 btrfs_item_ptr_offset(leaf, path->slots[0]), 9078 sizeof(cache->item)); 9079 cache->flags = btrfs_block_group_flags(&cache->item); 9080 9081 key.objectid = found_key.objectid + found_key.offset; 9082 btrfs_release_path(path); 9083 9084 /* 9085 * We need to exclude the super stripes now so that the space 9086 * info has super bytes accounted for, otherwise we'll think 9087 * we have more space than we actually do. 9088 */ 9089 ret = exclude_super_stripes(root, cache); 9090 if (ret) { 9091 /* 9092 * We may have excluded something, so call this just in 9093 * case. 9094 */ 9095 free_excluded_extents(root, cache); 9096 btrfs_put_block_group(cache); 9097 goto error; 9098 } 9099 9100 /* 9101 * check for two cases, either we are full, and therefore 9102 * don't need to bother with the caching work since we won't 9103 * find any space, or we are empty, and we can just add all 9104 * the space in and be done with it. This saves us _alot_ of 9105 * time, particularly in the full case. 9106 */ 9107 if (found_key.offset == btrfs_block_group_used(&cache->item)) { 9108 cache->last_byte_to_unpin = (u64)-1; 9109 cache->cached = BTRFS_CACHE_FINISHED; 9110 free_excluded_extents(root, cache); 9111 } else if (btrfs_block_group_used(&cache->item) == 0) { 9112 cache->last_byte_to_unpin = (u64)-1; 9113 cache->cached = BTRFS_CACHE_FINISHED; 9114 add_new_free_space(cache, root->fs_info, 9115 found_key.objectid, 9116 found_key.objectid + 9117 found_key.offset); 9118 free_excluded_extents(root, cache); 9119 } 9120 9121 ret = btrfs_add_block_group_cache(root->fs_info, cache); 9122 if (ret) { 9123 btrfs_remove_free_space_cache(cache); 9124 btrfs_put_block_group(cache); 9125 goto error; 9126 } 9127 9128 ret = update_space_info(info, cache->flags, found_key.offset, 9129 btrfs_block_group_used(&cache->item), 9130 &space_info); 9131 if (ret) { 9132 btrfs_remove_free_space_cache(cache); 9133 spin_lock(&info->block_group_cache_lock); 9134 rb_erase(&cache->cache_node, 9135 &info->block_group_cache_tree); 9136 RB_CLEAR_NODE(&cache->cache_node); 9137 spin_unlock(&info->block_group_cache_lock); 9138 btrfs_put_block_group(cache); 9139 goto error; 9140 } 9141 9142 cache->space_info = space_info; 9143 spin_lock(&cache->space_info->lock); 9144 cache->space_info->bytes_readonly += cache->bytes_super; 9145 spin_unlock(&cache->space_info->lock); 9146 9147 __link_block_group(space_info, cache); 9148 9149 set_avail_alloc_bits(root->fs_info, cache->flags); 9150 if (btrfs_chunk_readonly(root, cache->key.objectid)) { 9151 set_block_group_ro(cache, 1); 9152 } else if (btrfs_block_group_used(&cache->item) == 0) { 9153 spin_lock(&info->unused_bgs_lock); 9154 /* Should always be true but just in case. */ 9155 if (list_empty(&cache->bg_list)) { 9156 btrfs_get_block_group(cache); 9157 list_add_tail(&cache->bg_list, 9158 &info->unused_bgs); 9159 } 9160 spin_unlock(&info->unused_bgs_lock); 9161 } 9162 } 9163 9164 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { 9165 if (!(get_alloc_profile(root, space_info->flags) & 9166 (BTRFS_BLOCK_GROUP_RAID10 | 9167 BTRFS_BLOCK_GROUP_RAID1 | 9168 BTRFS_BLOCK_GROUP_RAID5 | 9169 BTRFS_BLOCK_GROUP_RAID6 | 9170 BTRFS_BLOCK_GROUP_DUP))) 9171 continue; 9172 /* 9173 * avoid allocating from un-mirrored block group if there are 9174 * mirrored block groups. 9175 */ 9176 list_for_each_entry(cache, 9177 &space_info->block_groups[BTRFS_RAID_RAID0], 9178 list) 9179 set_block_group_ro(cache, 1); 9180 list_for_each_entry(cache, 9181 &space_info->block_groups[BTRFS_RAID_SINGLE], 9182 list) 9183 set_block_group_ro(cache, 1); 9184 } 9185 9186 init_global_block_rsv(info); 9187 ret = 0; 9188 error: 9189 btrfs_free_path(path); 9190 return ret; 9191 } 9192 9193 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 9194 struct btrfs_root *root) 9195 { 9196 struct btrfs_block_group_cache *block_group, *tmp; 9197 struct btrfs_root *extent_root = root->fs_info->extent_root; 9198 struct btrfs_block_group_item item; 9199 struct btrfs_key key; 9200 int ret = 0; 9201 9202 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 9203 if (ret) 9204 goto next; 9205 9206 spin_lock(&block_group->lock); 9207 memcpy(&item, &block_group->item, sizeof(item)); 9208 memcpy(&key, &block_group->key, sizeof(key)); 9209 spin_unlock(&block_group->lock); 9210 9211 ret = btrfs_insert_item(trans, extent_root, &key, &item, 9212 sizeof(item)); 9213 if (ret) 9214 btrfs_abort_transaction(trans, extent_root, ret); 9215 ret = btrfs_finish_chunk_alloc(trans, extent_root, 9216 key.objectid, key.offset); 9217 if (ret) 9218 btrfs_abort_transaction(trans, extent_root, ret); 9219 next: 9220 list_del_init(&block_group->bg_list); 9221 } 9222 } 9223 9224 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 9225 struct btrfs_root *root, u64 bytes_used, 9226 u64 type, u64 chunk_objectid, u64 chunk_offset, 9227 u64 size) 9228 { 9229 int ret; 9230 struct btrfs_root *extent_root; 9231 struct btrfs_block_group_cache *cache; 9232 9233 extent_root = root->fs_info->extent_root; 9234 9235 btrfs_set_log_full_commit(root->fs_info, trans); 9236 9237 cache = btrfs_create_block_group_cache(root, chunk_offset, size); 9238 if (!cache) 9239 return -ENOMEM; 9240 9241 btrfs_set_block_group_used(&cache->item, bytes_used); 9242 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); 9243 btrfs_set_block_group_flags(&cache->item, type); 9244 9245 cache->flags = type; 9246 cache->last_byte_to_unpin = (u64)-1; 9247 cache->cached = BTRFS_CACHE_FINISHED; 9248 ret = exclude_super_stripes(root, cache); 9249 if (ret) { 9250 /* 9251 * We may have excluded something, so call this just in 9252 * case. 9253 */ 9254 free_excluded_extents(root, cache); 9255 btrfs_put_block_group(cache); 9256 return ret; 9257 } 9258 9259 add_new_free_space(cache, root->fs_info, chunk_offset, 9260 chunk_offset + size); 9261 9262 free_excluded_extents(root, cache); 9263 9264 ret = btrfs_add_block_group_cache(root->fs_info, cache); 9265 if (ret) { 9266 btrfs_remove_free_space_cache(cache); 9267 btrfs_put_block_group(cache); 9268 return ret; 9269 } 9270 9271 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 9272 &cache->space_info); 9273 if (ret) { 9274 btrfs_remove_free_space_cache(cache); 9275 spin_lock(&root->fs_info->block_group_cache_lock); 9276 rb_erase(&cache->cache_node, 9277 &root->fs_info->block_group_cache_tree); 9278 RB_CLEAR_NODE(&cache->cache_node); 9279 spin_unlock(&root->fs_info->block_group_cache_lock); 9280 btrfs_put_block_group(cache); 9281 return ret; 9282 } 9283 update_global_block_rsv(root->fs_info); 9284 9285 spin_lock(&cache->space_info->lock); 9286 cache->space_info->bytes_readonly += cache->bytes_super; 9287 spin_unlock(&cache->space_info->lock); 9288 9289 __link_block_group(cache->space_info, cache); 9290 9291 list_add_tail(&cache->bg_list, &trans->new_bgs); 9292 9293 set_avail_alloc_bits(extent_root->fs_info, type); 9294 9295 return 0; 9296 } 9297 9298 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 9299 { 9300 u64 extra_flags = chunk_to_extended(flags) & 9301 BTRFS_EXTENDED_PROFILE_MASK; 9302 9303 write_seqlock(&fs_info->profiles_lock); 9304 if (flags & BTRFS_BLOCK_GROUP_DATA) 9305 fs_info->avail_data_alloc_bits &= ~extra_flags; 9306 if (flags & BTRFS_BLOCK_GROUP_METADATA) 9307 fs_info->avail_metadata_alloc_bits &= ~extra_flags; 9308 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 9309 fs_info->avail_system_alloc_bits &= ~extra_flags; 9310 write_sequnlock(&fs_info->profiles_lock); 9311 } 9312 9313 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 9314 struct btrfs_root *root, u64 group_start, 9315 struct extent_map *em) 9316 { 9317 struct btrfs_path *path; 9318 struct btrfs_block_group_cache *block_group; 9319 struct btrfs_free_cluster *cluster; 9320 struct btrfs_root *tree_root = root->fs_info->tree_root; 9321 struct btrfs_key key; 9322 struct inode *inode; 9323 struct kobject *kobj = NULL; 9324 int ret; 9325 int index; 9326 int factor; 9327 struct btrfs_caching_control *caching_ctl = NULL; 9328 bool remove_em; 9329 9330 root = root->fs_info->extent_root; 9331 9332 block_group = btrfs_lookup_block_group(root->fs_info, group_start); 9333 BUG_ON(!block_group); 9334 BUG_ON(!block_group->ro); 9335 9336 /* 9337 * Free the reserved super bytes from this block group before 9338 * remove it. 9339 */ 9340 free_excluded_extents(root, block_group); 9341 9342 memcpy(&key, &block_group->key, sizeof(key)); 9343 index = get_block_group_index(block_group); 9344 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | 9345 BTRFS_BLOCK_GROUP_RAID1 | 9346 BTRFS_BLOCK_GROUP_RAID10)) 9347 factor = 2; 9348 else 9349 factor = 1; 9350 9351 /* make sure this block group isn't part of an allocation cluster */ 9352 cluster = &root->fs_info->data_alloc_cluster; 9353 spin_lock(&cluster->refill_lock); 9354 btrfs_return_cluster_to_free_space(block_group, cluster); 9355 spin_unlock(&cluster->refill_lock); 9356 9357 /* 9358 * make sure this block group isn't part of a metadata 9359 * allocation cluster 9360 */ 9361 cluster = &root->fs_info->meta_alloc_cluster; 9362 spin_lock(&cluster->refill_lock); 9363 btrfs_return_cluster_to_free_space(block_group, cluster); 9364 spin_unlock(&cluster->refill_lock); 9365 9366 path = btrfs_alloc_path(); 9367 if (!path) { 9368 ret = -ENOMEM; 9369 goto out; 9370 } 9371 9372 inode = lookup_free_space_inode(tree_root, block_group, path); 9373 if (!IS_ERR(inode)) { 9374 ret = btrfs_orphan_add(trans, inode); 9375 if (ret) { 9376 btrfs_add_delayed_iput(inode); 9377 goto out; 9378 } 9379 clear_nlink(inode); 9380 /* One for the block groups ref */ 9381 spin_lock(&block_group->lock); 9382 if (block_group->iref) { 9383 block_group->iref = 0; 9384 block_group->inode = NULL; 9385 spin_unlock(&block_group->lock); 9386 iput(inode); 9387 } else { 9388 spin_unlock(&block_group->lock); 9389 } 9390 /* One for our lookup ref */ 9391 btrfs_add_delayed_iput(inode); 9392 } 9393 9394 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 9395 key.offset = block_group->key.objectid; 9396 key.type = 0; 9397 9398 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); 9399 if (ret < 0) 9400 goto out; 9401 if (ret > 0) 9402 btrfs_release_path(path); 9403 if (ret == 0) { 9404 ret = btrfs_del_item(trans, tree_root, path); 9405 if (ret) 9406 goto out; 9407 btrfs_release_path(path); 9408 } 9409 9410 spin_lock(&root->fs_info->block_group_cache_lock); 9411 rb_erase(&block_group->cache_node, 9412 &root->fs_info->block_group_cache_tree); 9413 RB_CLEAR_NODE(&block_group->cache_node); 9414 9415 if (root->fs_info->first_logical_byte == block_group->key.objectid) 9416 root->fs_info->first_logical_byte = (u64)-1; 9417 spin_unlock(&root->fs_info->block_group_cache_lock); 9418 9419 down_write(&block_group->space_info->groups_sem); 9420 /* 9421 * we must use list_del_init so people can check to see if they 9422 * are still on the list after taking the semaphore 9423 */ 9424 list_del_init(&block_group->list); 9425 if (list_empty(&block_group->space_info->block_groups[index])) { 9426 kobj = block_group->space_info->block_group_kobjs[index]; 9427 block_group->space_info->block_group_kobjs[index] = NULL; 9428 clear_avail_alloc_bits(root->fs_info, block_group->flags); 9429 } 9430 up_write(&block_group->space_info->groups_sem); 9431 if (kobj) { 9432 kobject_del(kobj); 9433 kobject_put(kobj); 9434 } 9435 9436 if (block_group->has_caching_ctl) 9437 caching_ctl = get_caching_control(block_group); 9438 if (block_group->cached == BTRFS_CACHE_STARTED) 9439 wait_block_group_cache_done(block_group); 9440 if (block_group->has_caching_ctl) { 9441 down_write(&root->fs_info->commit_root_sem); 9442 if (!caching_ctl) { 9443 struct btrfs_caching_control *ctl; 9444 9445 list_for_each_entry(ctl, 9446 &root->fs_info->caching_block_groups, list) 9447 if (ctl->block_group == block_group) { 9448 caching_ctl = ctl; 9449 atomic_inc(&caching_ctl->count); 9450 break; 9451 } 9452 } 9453 if (caching_ctl) 9454 list_del_init(&caching_ctl->list); 9455 up_write(&root->fs_info->commit_root_sem); 9456 if (caching_ctl) { 9457 /* Once for the caching bgs list and once for us. */ 9458 put_caching_control(caching_ctl); 9459 put_caching_control(caching_ctl); 9460 } 9461 } 9462 9463 btrfs_remove_free_space_cache(block_group); 9464 9465 spin_lock(&block_group->space_info->lock); 9466 list_del_init(&block_group->ro_list); 9467 block_group->space_info->total_bytes -= block_group->key.offset; 9468 block_group->space_info->bytes_readonly -= block_group->key.offset; 9469 block_group->space_info->disk_total -= block_group->key.offset * factor; 9470 spin_unlock(&block_group->space_info->lock); 9471 9472 memcpy(&key, &block_group->key, sizeof(key)); 9473 9474 lock_chunks(root); 9475 if (!list_empty(&em->list)) { 9476 /* We're in the transaction->pending_chunks list. */ 9477 free_extent_map(em); 9478 } 9479 spin_lock(&block_group->lock); 9480 block_group->removed = 1; 9481 /* 9482 * At this point trimming can't start on this block group, because we 9483 * removed the block group from the tree fs_info->block_group_cache_tree 9484 * so no one can't find it anymore and even if someone already got this 9485 * block group before we removed it from the rbtree, they have already 9486 * incremented block_group->trimming - if they didn't, they won't find 9487 * any free space entries because we already removed them all when we 9488 * called btrfs_remove_free_space_cache(). 9489 * 9490 * And we must not remove the extent map from the fs_info->mapping_tree 9491 * to prevent the same logical address range and physical device space 9492 * ranges from being reused for a new block group. This is because our 9493 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is 9494 * completely transactionless, so while it is trimming a range the 9495 * currently running transaction might finish and a new one start, 9496 * allowing for new block groups to be created that can reuse the same 9497 * physical device locations unless we take this special care. 9498 */ 9499 remove_em = (atomic_read(&block_group->trimming) == 0); 9500 /* 9501 * Make sure a trimmer task always sees the em in the pinned_chunks list 9502 * if it sees block_group->removed == 1 (needs to lock block_group->lock 9503 * before checking block_group->removed). 9504 */ 9505 if (!remove_em) { 9506 /* 9507 * Our em might be in trans->transaction->pending_chunks which 9508 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks), 9509 * and so is the fs_info->pinned_chunks list. 9510 * 9511 * So at this point we must be holding the chunk_mutex to avoid 9512 * any races with chunk allocation (more specifically at 9513 * volumes.c:contains_pending_extent()), to ensure it always 9514 * sees the em, either in the pending_chunks list or in the 9515 * pinned_chunks list. 9516 */ 9517 list_move_tail(&em->list, &root->fs_info->pinned_chunks); 9518 } 9519 spin_unlock(&block_group->lock); 9520 9521 if (remove_em) { 9522 struct extent_map_tree *em_tree; 9523 9524 em_tree = &root->fs_info->mapping_tree.map_tree; 9525 write_lock(&em_tree->lock); 9526 /* 9527 * The em might be in the pending_chunks list, so make sure the 9528 * chunk mutex is locked, since remove_extent_mapping() will 9529 * delete us from that list. 9530 */ 9531 remove_extent_mapping(em_tree, em); 9532 write_unlock(&em_tree->lock); 9533 /* once for the tree */ 9534 free_extent_map(em); 9535 } 9536 9537 unlock_chunks(root); 9538 9539 btrfs_put_block_group(block_group); 9540 btrfs_put_block_group(block_group); 9541 9542 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 9543 if (ret > 0) 9544 ret = -EIO; 9545 if (ret < 0) 9546 goto out; 9547 9548 ret = btrfs_del_item(trans, root, path); 9549 out: 9550 btrfs_free_path(path); 9551 return ret; 9552 } 9553 9554 /* 9555 * Process the unused_bgs list and remove any that don't have any allocated 9556 * space inside of them. 9557 */ 9558 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) 9559 { 9560 struct btrfs_block_group_cache *block_group; 9561 struct btrfs_space_info *space_info; 9562 struct btrfs_root *root = fs_info->extent_root; 9563 struct btrfs_trans_handle *trans; 9564 int ret = 0; 9565 9566 if (!fs_info->open) 9567 return; 9568 9569 spin_lock(&fs_info->unused_bgs_lock); 9570 while (!list_empty(&fs_info->unused_bgs)) { 9571 u64 start, end; 9572 9573 block_group = list_first_entry(&fs_info->unused_bgs, 9574 struct btrfs_block_group_cache, 9575 bg_list); 9576 space_info = block_group->space_info; 9577 list_del_init(&block_group->bg_list); 9578 if (ret || btrfs_mixed_space_info(space_info)) { 9579 btrfs_put_block_group(block_group); 9580 continue; 9581 } 9582 spin_unlock(&fs_info->unused_bgs_lock); 9583 9584 /* Don't want to race with allocators so take the groups_sem */ 9585 down_write(&space_info->groups_sem); 9586 spin_lock(&block_group->lock); 9587 if (block_group->reserved || 9588 btrfs_block_group_used(&block_group->item) || 9589 block_group->ro) { 9590 /* 9591 * We want to bail if we made new allocations or have 9592 * outstanding allocations in this block group. We do 9593 * the ro check in case balance is currently acting on 9594 * this block group. 9595 */ 9596 spin_unlock(&block_group->lock); 9597 up_write(&space_info->groups_sem); 9598 goto next; 9599 } 9600 spin_unlock(&block_group->lock); 9601 9602 /* We don't want to force the issue, only flip if it's ok. */ 9603 ret = set_block_group_ro(block_group, 0); 9604 up_write(&space_info->groups_sem); 9605 if (ret < 0) { 9606 ret = 0; 9607 goto next; 9608 } 9609 9610 /* 9611 * Want to do this before we do anything else so we can recover 9612 * properly if we fail to join the transaction. 9613 */ 9614 trans = btrfs_join_transaction(root); 9615 if (IS_ERR(trans)) { 9616 btrfs_set_block_group_rw(root, block_group); 9617 ret = PTR_ERR(trans); 9618 goto next; 9619 } 9620 9621 /* 9622 * We could have pending pinned extents for this block group, 9623 * just delete them, we don't care about them anymore. 9624 */ 9625 start = block_group->key.objectid; 9626 end = start + block_group->key.offset - 1; 9627 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, 9628 EXTENT_DIRTY, GFP_NOFS); 9629 if (ret) { 9630 btrfs_set_block_group_rw(root, block_group); 9631 goto end_trans; 9632 } 9633 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, 9634 EXTENT_DIRTY, GFP_NOFS); 9635 if (ret) { 9636 btrfs_set_block_group_rw(root, block_group); 9637 goto end_trans; 9638 } 9639 9640 /* Reset pinned so btrfs_put_block_group doesn't complain */ 9641 block_group->pinned = 0; 9642 9643 /* 9644 * Btrfs_remove_chunk will abort the transaction if things go 9645 * horribly wrong. 9646 */ 9647 ret = btrfs_remove_chunk(trans, root, 9648 block_group->key.objectid); 9649 end_trans: 9650 btrfs_end_transaction(trans, root); 9651 next: 9652 btrfs_put_block_group(block_group); 9653 spin_lock(&fs_info->unused_bgs_lock); 9654 } 9655 spin_unlock(&fs_info->unused_bgs_lock); 9656 } 9657 9658 int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 9659 { 9660 struct btrfs_space_info *space_info; 9661 struct btrfs_super_block *disk_super; 9662 u64 features; 9663 u64 flags; 9664 int mixed = 0; 9665 int ret; 9666 9667 disk_super = fs_info->super_copy; 9668 if (!btrfs_super_root(disk_super)) 9669 return 1; 9670 9671 features = btrfs_super_incompat_flags(disk_super); 9672 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) 9673 mixed = 1; 9674 9675 flags = BTRFS_BLOCK_GROUP_SYSTEM; 9676 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 9677 if (ret) 9678 goto out; 9679 9680 if (mixed) { 9681 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; 9682 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 9683 } else { 9684 flags = BTRFS_BLOCK_GROUP_METADATA; 9685 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 9686 if (ret) 9687 goto out; 9688 9689 flags = BTRFS_BLOCK_GROUP_DATA; 9690 ret = update_space_info(fs_info, flags, 0, 0, &space_info); 9691 } 9692 out: 9693 return ret; 9694 } 9695 9696 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 9697 { 9698 return unpin_extent_range(root, start, end, false); 9699 } 9700 9701 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) 9702 { 9703 struct btrfs_fs_info *fs_info = root->fs_info; 9704 struct btrfs_block_group_cache *cache = NULL; 9705 u64 group_trimmed; 9706 u64 start; 9707 u64 end; 9708 u64 trimmed = 0; 9709 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy); 9710 int ret = 0; 9711 9712 /* 9713 * try to trim all FS space, our block group may start from non-zero. 9714 */ 9715 if (range->len == total_bytes) 9716 cache = btrfs_lookup_first_block_group(fs_info, range->start); 9717 else 9718 cache = btrfs_lookup_block_group(fs_info, range->start); 9719 9720 while (cache) { 9721 if (cache->key.objectid >= (range->start + range->len)) { 9722 btrfs_put_block_group(cache); 9723 break; 9724 } 9725 9726 start = max(range->start, cache->key.objectid); 9727 end = min(range->start + range->len, 9728 cache->key.objectid + cache->key.offset); 9729 9730 if (end - start >= range->minlen) { 9731 if (!block_group_cache_done(cache)) { 9732 ret = cache_block_group(cache, 0); 9733 if (ret) { 9734 btrfs_put_block_group(cache); 9735 break; 9736 } 9737 ret = wait_block_group_cache_done(cache); 9738 if (ret) { 9739 btrfs_put_block_group(cache); 9740 break; 9741 } 9742 } 9743 ret = btrfs_trim_block_group(cache, 9744 &group_trimmed, 9745 start, 9746 end, 9747 range->minlen); 9748 9749 trimmed += group_trimmed; 9750 if (ret) { 9751 btrfs_put_block_group(cache); 9752 break; 9753 } 9754 } 9755 9756 cache = next_block_group(fs_info->tree_root, cache); 9757 } 9758 9759 range->len = trimmed; 9760 return ret; 9761 } 9762 9763 /* 9764 * btrfs_{start,end}_write_no_snapshoting() are similar to 9765 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing 9766 * data into the page cache through nocow before the subvolume is snapshoted, 9767 * but flush the data into disk after the snapshot creation, or to prevent 9768 * operations while snapshoting is ongoing and that cause the snapshot to be 9769 * inconsistent (writes followed by expanding truncates for example). 9770 */ 9771 void btrfs_end_write_no_snapshoting(struct btrfs_root *root) 9772 { 9773 percpu_counter_dec(&root->subv_writers->counter); 9774 /* 9775 * Make sure counter is updated before we wake up 9776 * waiters. 9777 */ 9778 smp_mb(); 9779 if (waitqueue_active(&root->subv_writers->wait)) 9780 wake_up(&root->subv_writers->wait); 9781 } 9782 9783 int btrfs_start_write_no_snapshoting(struct btrfs_root *root) 9784 { 9785 if (atomic_read(&root->will_be_snapshoted)) 9786 return 0; 9787 9788 percpu_counter_inc(&root->subv_writers->counter); 9789 /* 9790 * Make sure counter is updated before we check for snapshot creation. 9791 */ 9792 smp_mb(); 9793 if (atomic_read(&root->will_be_snapshoted)) { 9794 btrfs_end_write_no_snapshoting(root); 9795 return 0; 9796 } 9797 return 1; 9798 } 9799