1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #include <linux/sched.h> 19 #include <linux/pagemap.h> 20 #include <linux/writeback.h> 21 #include <linux/blkdev.h> 22 #include <linux/sort.h> 23 #include <linux/rcupdate.h> 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include "compat.h" 27 #include "hash.h" 28 #include "ctree.h" 29 #include "disk-io.h" 30 #include "print-tree.h" 31 #include "transaction.h" 32 #include "volumes.h" 33 #include "locking.h" 34 #include "free-space-cache.h" 35 36 static int update_block_group(struct btrfs_trans_handle *trans, 37 struct btrfs_root *root, 38 u64 bytenr, u64 num_bytes, int alloc); 39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache, 40 u64 num_bytes, int reserve, int sinfo); 41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 42 struct btrfs_root *root, 43 u64 bytenr, u64 num_bytes, u64 parent, 44 u64 root_objectid, u64 owner_objectid, 45 u64 owner_offset, int refs_to_drop, 46 struct btrfs_delayed_extent_op *extra_op); 47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 48 struct extent_buffer *leaf, 49 struct btrfs_extent_item *ei); 50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 51 struct btrfs_root *root, 52 u64 parent, u64 root_objectid, 53 u64 flags, u64 owner, u64 offset, 54 struct btrfs_key *ins, int ref_mod); 55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 56 struct btrfs_root *root, 57 u64 parent, u64 root_objectid, 58 u64 flags, struct btrfs_disk_key *key, 59 int level, struct btrfs_key *ins); 60 static int do_chunk_alloc(struct btrfs_trans_handle *trans, 61 struct btrfs_root *extent_root, u64 alloc_bytes, 62 u64 flags, int force); 63 static int find_next_key(struct btrfs_path *path, int level, 64 struct btrfs_key *key); 65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 66 int dump_block_groups); 67 68 static noinline int 69 block_group_cache_done(struct btrfs_block_group_cache *cache) 70 { 71 smp_mb(); 72 return cache->cached == BTRFS_CACHE_FINISHED; 73 } 74 75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 76 { 77 return (cache->flags & bits) == bits; 78 } 79 80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 81 { 82 atomic_inc(&cache->count); 83 } 84 85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache) 86 { 87 if (atomic_dec_and_test(&cache->count)) { 88 WARN_ON(cache->pinned > 0); 89 WARN_ON(cache->reserved > 0); 90 WARN_ON(cache->reserved_pinned > 0); 91 kfree(cache); 92 } 93 } 94 95 /* 96 * this adds the block group to the fs_info rb tree for the block group 97 * cache 98 */ 99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, 100 struct btrfs_block_group_cache *block_group) 101 { 102 struct rb_node **p; 103 struct rb_node *parent = NULL; 104 struct btrfs_block_group_cache *cache; 105 106 spin_lock(&info->block_group_cache_lock); 107 p = &info->block_group_cache_tree.rb_node; 108 109 while (*p) { 110 parent = *p; 111 cache = rb_entry(parent, struct btrfs_block_group_cache, 112 cache_node); 113 if (block_group->key.objectid < cache->key.objectid) { 114 p = &(*p)->rb_left; 115 } else if (block_group->key.objectid > cache->key.objectid) { 116 p = &(*p)->rb_right; 117 } else { 118 spin_unlock(&info->block_group_cache_lock); 119 return -EEXIST; 120 } 121 } 122 123 rb_link_node(&block_group->cache_node, parent, p); 124 rb_insert_color(&block_group->cache_node, 125 &info->block_group_cache_tree); 126 spin_unlock(&info->block_group_cache_lock); 127 128 return 0; 129 } 130 131 /* 132 * This will return the block group at or after bytenr if contains is 0, else 133 * it will return the block group that contains the bytenr 134 */ 135 static struct btrfs_block_group_cache * 136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, 137 int contains) 138 { 139 struct btrfs_block_group_cache *cache, *ret = NULL; 140 struct rb_node *n; 141 u64 end, start; 142 143 spin_lock(&info->block_group_cache_lock); 144 n = info->block_group_cache_tree.rb_node; 145 146 while (n) { 147 cache = rb_entry(n, struct btrfs_block_group_cache, 148 cache_node); 149 end = cache->key.objectid + cache->key.offset - 1; 150 start = cache->key.objectid; 151 152 if (bytenr < start) { 153 if (!contains && (!ret || start < ret->key.objectid)) 154 ret = cache; 155 n = n->rb_left; 156 } else if (bytenr > start) { 157 if (contains && bytenr <= end) { 158 ret = cache; 159 break; 160 } 161 n = n->rb_right; 162 } else { 163 ret = cache; 164 break; 165 } 166 } 167 if (ret) 168 btrfs_get_block_group(ret); 169 spin_unlock(&info->block_group_cache_lock); 170 171 return ret; 172 } 173 174 static int add_excluded_extent(struct btrfs_root *root, 175 u64 start, u64 num_bytes) 176 { 177 u64 end = start + num_bytes - 1; 178 set_extent_bits(&root->fs_info->freed_extents[0], 179 start, end, EXTENT_UPTODATE, GFP_NOFS); 180 set_extent_bits(&root->fs_info->freed_extents[1], 181 start, end, EXTENT_UPTODATE, GFP_NOFS); 182 return 0; 183 } 184 185 static void free_excluded_extents(struct btrfs_root *root, 186 struct btrfs_block_group_cache *cache) 187 { 188 u64 start, end; 189 190 start = cache->key.objectid; 191 end = start + cache->key.offset - 1; 192 193 clear_extent_bits(&root->fs_info->freed_extents[0], 194 start, end, EXTENT_UPTODATE, GFP_NOFS); 195 clear_extent_bits(&root->fs_info->freed_extents[1], 196 start, end, EXTENT_UPTODATE, GFP_NOFS); 197 } 198 199 static int exclude_super_stripes(struct btrfs_root *root, 200 struct btrfs_block_group_cache *cache) 201 { 202 u64 bytenr; 203 u64 *logical; 204 int stripe_len; 205 int i, nr, ret; 206 207 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { 208 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; 209 cache->bytes_super += stripe_len; 210 ret = add_excluded_extent(root, cache->key.objectid, 211 stripe_len); 212 BUG_ON(ret); 213 } 214 215 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 216 bytenr = btrfs_sb_offset(i); 217 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 218 cache->key.objectid, bytenr, 219 0, &logical, &nr, &stripe_len); 220 BUG_ON(ret); 221 222 while (nr--) { 223 cache->bytes_super += stripe_len; 224 ret = add_excluded_extent(root, logical[nr], 225 stripe_len); 226 BUG_ON(ret); 227 } 228 229 kfree(logical); 230 } 231 return 0; 232 } 233 234 static struct btrfs_caching_control * 235 get_caching_control(struct btrfs_block_group_cache *cache) 236 { 237 struct btrfs_caching_control *ctl; 238 239 spin_lock(&cache->lock); 240 if (cache->cached != BTRFS_CACHE_STARTED) { 241 spin_unlock(&cache->lock); 242 return NULL; 243 } 244 245 /* We're loading it the fast way, so we don't have a caching_ctl. */ 246 if (!cache->caching_ctl) { 247 spin_unlock(&cache->lock); 248 return NULL; 249 } 250 251 ctl = cache->caching_ctl; 252 atomic_inc(&ctl->count); 253 spin_unlock(&cache->lock); 254 return ctl; 255 } 256 257 static void put_caching_control(struct btrfs_caching_control *ctl) 258 { 259 if (atomic_dec_and_test(&ctl->count)) 260 kfree(ctl); 261 } 262 263 /* 264 * this is only called by cache_block_group, since we could have freed extents 265 * we need to check the pinned_extents for any extents that can't be used yet 266 * since their free space will be released as soon as the transaction commits. 267 */ 268 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, 269 struct btrfs_fs_info *info, u64 start, u64 end) 270 { 271 u64 extent_start, extent_end, size, total_added = 0; 272 int ret; 273 274 while (start < end) { 275 ret = find_first_extent_bit(info->pinned_extents, start, 276 &extent_start, &extent_end, 277 EXTENT_DIRTY | EXTENT_UPTODATE); 278 if (ret) 279 break; 280 281 if (extent_start <= start) { 282 start = extent_end + 1; 283 } else if (extent_start > start && extent_start < end) { 284 size = extent_start - start; 285 total_added += size; 286 ret = btrfs_add_free_space(block_group, start, 287 size); 288 BUG_ON(ret); 289 start = extent_end + 1; 290 } else { 291 break; 292 } 293 } 294 295 if (start < end) { 296 size = end - start; 297 total_added += size; 298 ret = btrfs_add_free_space(block_group, start, size); 299 BUG_ON(ret); 300 } 301 302 return total_added; 303 } 304 305 static int caching_kthread(void *data) 306 { 307 struct btrfs_block_group_cache *block_group = data; 308 struct btrfs_fs_info *fs_info = block_group->fs_info; 309 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl; 310 struct btrfs_root *extent_root = fs_info->extent_root; 311 struct btrfs_path *path; 312 struct extent_buffer *leaf; 313 struct btrfs_key key; 314 u64 total_found = 0; 315 u64 last = 0; 316 u32 nritems; 317 int ret = 0; 318 319 path = btrfs_alloc_path(); 320 if (!path) 321 return -ENOMEM; 322 323 exclude_super_stripes(extent_root, block_group); 324 spin_lock(&block_group->space_info->lock); 325 block_group->space_info->bytes_readonly += block_group->bytes_super; 326 spin_unlock(&block_group->space_info->lock); 327 328 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 329 330 /* 331 * We don't want to deadlock with somebody trying to allocate a new 332 * extent for the extent root while also trying to search the extent 333 * root to add free space. So we skip locking and search the commit 334 * root, since its read-only 335 */ 336 path->skip_locking = 1; 337 path->search_commit_root = 1; 338 path->reada = 2; 339 340 key.objectid = last; 341 key.offset = 0; 342 key.type = BTRFS_EXTENT_ITEM_KEY; 343 again: 344 mutex_lock(&caching_ctl->mutex); 345 /* need to make sure the commit_root doesn't disappear */ 346 down_read(&fs_info->extent_commit_sem); 347 348 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 349 if (ret < 0) 350 goto err; 351 352 leaf = path->nodes[0]; 353 nritems = btrfs_header_nritems(leaf); 354 355 while (1) { 356 smp_mb(); 357 if (fs_info->closing > 1) { 358 last = (u64)-1; 359 break; 360 } 361 362 if (path->slots[0] < nritems) { 363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 364 } else { 365 ret = find_next_key(path, 0, &key); 366 if (ret) 367 break; 368 369 caching_ctl->progress = last; 370 btrfs_release_path(extent_root, path); 371 up_read(&fs_info->extent_commit_sem); 372 mutex_unlock(&caching_ctl->mutex); 373 if (btrfs_transaction_in_commit(fs_info)) 374 schedule_timeout(1); 375 else 376 cond_resched(); 377 goto again; 378 } 379 380 if (key.objectid < block_group->key.objectid) { 381 path->slots[0]++; 382 continue; 383 } 384 385 if (key.objectid >= block_group->key.objectid + 386 block_group->key.offset) 387 break; 388 389 if (key.type == BTRFS_EXTENT_ITEM_KEY) { 390 total_found += add_new_free_space(block_group, 391 fs_info, last, 392 key.objectid); 393 last = key.objectid + key.offset; 394 395 if (total_found > (1024 * 1024 * 2)) { 396 total_found = 0; 397 wake_up(&caching_ctl->wait); 398 } 399 } 400 path->slots[0]++; 401 } 402 ret = 0; 403 404 total_found += add_new_free_space(block_group, fs_info, last, 405 block_group->key.objectid + 406 block_group->key.offset); 407 caching_ctl->progress = (u64)-1; 408 409 spin_lock(&block_group->lock); 410 block_group->caching_ctl = NULL; 411 block_group->cached = BTRFS_CACHE_FINISHED; 412 spin_unlock(&block_group->lock); 413 414 err: 415 btrfs_free_path(path); 416 up_read(&fs_info->extent_commit_sem); 417 418 free_excluded_extents(extent_root, block_group); 419 420 mutex_unlock(&caching_ctl->mutex); 421 wake_up(&caching_ctl->wait); 422 423 put_caching_control(caching_ctl); 424 atomic_dec(&block_group->space_info->caching_threads); 425 btrfs_put_block_group(block_group); 426 427 return 0; 428 } 429 430 static int cache_block_group(struct btrfs_block_group_cache *cache, 431 struct btrfs_trans_handle *trans, 432 struct btrfs_root *root, 433 int load_cache_only) 434 { 435 struct btrfs_fs_info *fs_info = cache->fs_info; 436 struct btrfs_caching_control *caching_ctl; 437 struct task_struct *tsk; 438 int ret = 0; 439 440 smp_mb(); 441 if (cache->cached != BTRFS_CACHE_NO) 442 return 0; 443 444 /* 445 * We can't do the read from on-disk cache during a commit since we need 446 * to have the normal tree locking. Also if we are currently trying to 447 * allocate blocks for the tree root we can't do the fast caching since 448 * we likely hold important locks. 449 */ 450 if (!trans->transaction->in_commit && 451 (root && root != root->fs_info->tree_root)) { 452 spin_lock(&cache->lock); 453 if (cache->cached != BTRFS_CACHE_NO) { 454 spin_unlock(&cache->lock); 455 return 0; 456 } 457 cache->cached = BTRFS_CACHE_STARTED; 458 spin_unlock(&cache->lock); 459 460 ret = load_free_space_cache(fs_info, cache); 461 462 spin_lock(&cache->lock); 463 if (ret == 1) { 464 cache->cached = BTRFS_CACHE_FINISHED; 465 cache->last_byte_to_unpin = (u64)-1; 466 } else { 467 cache->cached = BTRFS_CACHE_NO; 468 } 469 spin_unlock(&cache->lock); 470 if (ret == 1) 471 return 0; 472 } 473 474 if (load_cache_only) 475 return 0; 476 477 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL); 478 BUG_ON(!caching_ctl); 479 480 INIT_LIST_HEAD(&caching_ctl->list); 481 mutex_init(&caching_ctl->mutex); 482 init_waitqueue_head(&caching_ctl->wait); 483 caching_ctl->block_group = cache; 484 caching_ctl->progress = cache->key.objectid; 485 /* one for caching kthread, one for caching block group list */ 486 atomic_set(&caching_ctl->count, 2); 487 488 spin_lock(&cache->lock); 489 if (cache->cached != BTRFS_CACHE_NO) { 490 spin_unlock(&cache->lock); 491 kfree(caching_ctl); 492 return 0; 493 } 494 cache->caching_ctl = caching_ctl; 495 cache->cached = BTRFS_CACHE_STARTED; 496 spin_unlock(&cache->lock); 497 498 down_write(&fs_info->extent_commit_sem); 499 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); 500 up_write(&fs_info->extent_commit_sem); 501 502 atomic_inc(&cache->space_info->caching_threads); 503 btrfs_get_block_group(cache); 504 505 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", 506 cache->key.objectid); 507 if (IS_ERR(tsk)) { 508 ret = PTR_ERR(tsk); 509 printk(KERN_ERR "error running thread %d\n", ret); 510 BUG(); 511 } 512 513 return ret; 514 } 515 516 /* 517 * return the block group that starts at or after bytenr 518 */ 519 static struct btrfs_block_group_cache * 520 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) 521 { 522 struct btrfs_block_group_cache *cache; 523 524 cache = block_group_cache_tree_search(info, bytenr, 0); 525 526 return cache; 527 } 528 529 /* 530 * return the block group that contains the given bytenr 531 */ 532 struct btrfs_block_group_cache *btrfs_lookup_block_group( 533 struct btrfs_fs_info *info, 534 u64 bytenr) 535 { 536 struct btrfs_block_group_cache *cache; 537 538 cache = block_group_cache_tree_search(info, bytenr, 1); 539 540 return cache; 541 } 542 543 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, 544 u64 flags) 545 { 546 struct list_head *head = &info->space_info; 547 struct btrfs_space_info *found; 548 549 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM | 550 BTRFS_BLOCK_GROUP_METADATA; 551 552 rcu_read_lock(); 553 list_for_each_entry_rcu(found, head, list) { 554 if (found->flags & flags) { 555 rcu_read_unlock(); 556 return found; 557 } 558 } 559 rcu_read_unlock(); 560 return NULL; 561 } 562 563 /* 564 * after adding space to the filesystem, we need to clear the full flags 565 * on all the space infos. 566 */ 567 void btrfs_clear_space_info_full(struct btrfs_fs_info *info) 568 { 569 struct list_head *head = &info->space_info; 570 struct btrfs_space_info *found; 571 572 rcu_read_lock(); 573 list_for_each_entry_rcu(found, head, list) 574 found->full = 0; 575 rcu_read_unlock(); 576 } 577 578 static u64 div_factor(u64 num, int factor) 579 { 580 if (factor == 10) 581 return num; 582 num *= factor; 583 do_div(num, 10); 584 return num; 585 } 586 587 static u64 div_factor_fine(u64 num, int factor) 588 { 589 if (factor == 100) 590 return num; 591 num *= factor; 592 do_div(num, 100); 593 return num; 594 } 595 596 u64 btrfs_find_block_group(struct btrfs_root *root, 597 u64 search_start, u64 search_hint, int owner) 598 { 599 struct btrfs_block_group_cache *cache; 600 u64 used; 601 u64 last = max(search_hint, search_start); 602 u64 group_start = 0; 603 int full_search = 0; 604 int factor = 9; 605 int wrapped = 0; 606 again: 607 while (1) { 608 cache = btrfs_lookup_first_block_group(root->fs_info, last); 609 if (!cache) 610 break; 611 612 spin_lock(&cache->lock); 613 last = cache->key.objectid + cache->key.offset; 614 used = btrfs_block_group_used(&cache->item); 615 616 if ((full_search || !cache->ro) && 617 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) { 618 if (used + cache->pinned + cache->reserved < 619 div_factor(cache->key.offset, factor)) { 620 group_start = cache->key.objectid; 621 spin_unlock(&cache->lock); 622 btrfs_put_block_group(cache); 623 goto found; 624 } 625 } 626 spin_unlock(&cache->lock); 627 btrfs_put_block_group(cache); 628 cond_resched(); 629 } 630 if (!wrapped) { 631 last = search_start; 632 wrapped = 1; 633 goto again; 634 } 635 if (!full_search && factor < 10) { 636 last = search_start; 637 full_search = 1; 638 factor = 10; 639 goto again; 640 } 641 found: 642 return group_start; 643 } 644 645 /* simple helper to search for an existing extent at a given offset */ 646 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) 647 { 648 int ret; 649 struct btrfs_key key; 650 struct btrfs_path *path; 651 652 path = btrfs_alloc_path(); 653 BUG_ON(!path); 654 key.objectid = start; 655 key.offset = len; 656 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 657 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 658 0, 0); 659 btrfs_free_path(path); 660 return ret; 661 } 662 663 /* 664 * helper function to lookup reference count and flags of extent. 665 * 666 * the head node for delayed ref is used to store the sum of all the 667 * reference count modifications queued up in the rbtree. the head 668 * node may also store the extent flags to set. This way you can check 669 * to see what the reference count and extent flags would be if all of 670 * the delayed refs are not processed. 671 */ 672 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 673 struct btrfs_root *root, u64 bytenr, 674 u64 num_bytes, u64 *refs, u64 *flags) 675 { 676 struct btrfs_delayed_ref_head *head; 677 struct btrfs_delayed_ref_root *delayed_refs; 678 struct btrfs_path *path; 679 struct btrfs_extent_item *ei; 680 struct extent_buffer *leaf; 681 struct btrfs_key key; 682 u32 item_size; 683 u64 num_refs; 684 u64 extent_flags; 685 int ret; 686 687 path = btrfs_alloc_path(); 688 if (!path) 689 return -ENOMEM; 690 691 key.objectid = bytenr; 692 key.type = BTRFS_EXTENT_ITEM_KEY; 693 key.offset = num_bytes; 694 if (!trans) { 695 path->skip_locking = 1; 696 path->search_commit_root = 1; 697 } 698 again: 699 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 700 &key, path, 0, 0); 701 if (ret < 0) 702 goto out_free; 703 704 if (ret == 0) { 705 leaf = path->nodes[0]; 706 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 707 if (item_size >= sizeof(*ei)) { 708 ei = btrfs_item_ptr(leaf, path->slots[0], 709 struct btrfs_extent_item); 710 num_refs = btrfs_extent_refs(leaf, ei); 711 extent_flags = btrfs_extent_flags(leaf, ei); 712 } else { 713 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 714 struct btrfs_extent_item_v0 *ei0; 715 BUG_ON(item_size != sizeof(*ei0)); 716 ei0 = btrfs_item_ptr(leaf, path->slots[0], 717 struct btrfs_extent_item_v0); 718 num_refs = btrfs_extent_refs_v0(leaf, ei0); 719 /* FIXME: this isn't correct for data */ 720 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 721 #else 722 BUG(); 723 #endif 724 } 725 BUG_ON(num_refs == 0); 726 } else { 727 num_refs = 0; 728 extent_flags = 0; 729 ret = 0; 730 } 731 732 if (!trans) 733 goto out; 734 735 delayed_refs = &trans->transaction->delayed_refs; 736 spin_lock(&delayed_refs->lock); 737 head = btrfs_find_delayed_ref_head(trans, bytenr); 738 if (head) { 739 if (!mutex_trylock(&head->mutex)) { 740 atomic_inc(&head->node.refs); 741 spin_unlock(&delayed_refs->lock); 742 743 btrfs_release_path(root->fs_info->extent_root, path); 744 745 mutex_lock(&head->mutex); 746 mutex_unlock(&head->mutex); 747 btrfs_put_delayed_ref(&head->node); 748 goto again; 749 } 750 if (head->extent_op && head->extent_op->update_flags) 751 extent_flags |= head->extent_op->flags_to_set; 752 else 753 BUG_ON(num_refs == 0); 754 755 num_refs += head->node.ref_mod; 756 mutex_unlock(&head->mutex); 757 } 758 spin_unlock(&delayed_refs->lock); 759 out: 760 WARN_ON(num_refs == 0); 761 if (refs) 762 *refs = num_refs; 763 if (flags) 764 *flags = extent_flags; 765 out_free: 766 btrfs_free_path(path); 767 return ret; 768 } 769 770 /* 771 * Back reference rules. Back refs have three main goals: 772 * 773 * 1) differentiate between all holders of references to an extent so that 774 * when a reference is dropped we can make sure it was a valid reference 775 * before freeing the extent. 776 * 777 * 2) Provide enough information to quickly find the holders of an extent 778 * if we notice a given block is corrupted or bad. 779 * 780 * 3) Make it easy to migrate blocks for FS shrinking or storage pool 781 * maintenance. This is actually the same as #2, but with a slightly 782 * different use case. 783 * 784 * There are two kinds of back refs. The implicit back refs is optimized 785 * for pointers in non-shared tree blocks. For a given pointer in a block, 786 * back refs of this kind provide information about the block's owner tree 787 * and the pointer's key. These information allow us to find the block by 788 * b-tree searching. The full back refs is for pointers in tree blocks not 789 * referenced by their owner trees. The location of tree block is recorded 790 * in the back refs. Actually the full back refs is generic, and can be 791 * used in all cases the implicit back refs is used. The major shortcoming 792 * of the full back refs is its overhead. Every time a tree block gets 793 * COWed, we have to update back refs entry for all pointers in it. 794 * 795 * For a newly allocated tree block, we use implicit back refs for 796 * pointers in it. This means most tree related operations only involve 797 * implicit back refs. For a tree block created in old transaction, the 798 * only way to drop a reference to it is COW it. So we can detect the 799 * event that tree block loses its owner tree's reference and do the 800 * back refs conversion. 801 * 802 * When a tree block is COW'd through a tree, there are four cases: 803 * 804 * The reference count of the block is one and the tree is the block's 805 * owner tree. Nothing to do in this case. 806 * 807 * The reference count of the block is one and the tree is not the 808 * block's owner tree. In this case, full back refs is used for pointers 809 * in the block. Remove these full back refs, add implicit back refs for 810 * every pointers in the new block. 811 * 812 * The reference count of the block is greater than one and the tree is 813 * the block's owner tree. In this case, implicit back refs is used for 814 * pointers in the block. Add full back refs for every pointers in the 815 * block, increase lower level extents' reference counts. The original 816 * implicit back refs are entailed to the new block. 817 * 818 * The reference count of the block is greater than one and the tree is 819 * not the block's owner tree. Add implicit back refs for every pointer in 820 * the new block, increase lower level extents' reference count. 821 * 822 * Back Reference Key composing: 823 * 824 * The key objectid corresponds to the first byte in the extent, 825 * The key type is used to differentiate between types of back refs. 826 * There are different meanings of the key offset for different types 827 * of back refs. 828 * 829 * File extents can be referenced by: 830 * 831 * - multiple snapshots, subvolumes, or different generations in one subvol 832 * - different files inside a single subvolume 833 * - different offsets inside a file (bookend extents in file.c) 834 * 835 * The extent ref structure for the implicit back refs has fields for: 836 * 837 * - Objectid of the subvolume root 838 * - objectid of the file holding the reference 839 * - original offset in the file 840 * - how many bookend extents 841 * 842 * The key offset for the implicit back refs is hash of the first 843 * three fields. 844 * 845 * The extent ref structure for the full back refs has field for: 846 * 847 * - number of pointers in the tree leaf 848 * 849 * The key offset for the implicit back refs is the first byte of 850 * the tree leaf 851 * 852 * When a file extent is allocated, The implicit back refs is used. 853 * the fields are filled in: 854 * 855 * (root_key.objectid, inode objectid, offset in file, 1) 856 * 857 * When a file extent is removed file truncation, we find the 858 * corresponding implicit back refs and check the following fields: 859 * 860 * (btrfs_header_owner(leaf), inode objectid, offset in file) 861 * 862 * Btree extents can be referenced by: 863 * 864 * - Different subvolumes 865 * 866 * Both the implicit back refs and the full back refs for tree blocks 867 * only consist of key. The key offset for the implicit back refs is 868 * objectid of block's owner tree. The key offset for the full back refs 869 * is the first byte of parent block. 870 * 871 * When implicit back refs is used, information about the lowest key and 872 * level of the tree block are required. These information are stored in 873 * tree block info structure. 874 */ 875 876 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 877 static int convert_extent_item_v0(struct btrfs_trans_handle *trans, 878 struct btrfs_root *root, 879 struct btrfs_path *path, 880 u64 owner, u32 extra_size) 881 { 882 struct btrfs_extent_item *item; 883 struct btrfs_extent_item_v0 *ei0; 884 struct btrfs_extent_ref_v0 *ref0; 885 struct btrfs_tree_block_info *bi; 886 struct extent_buffer *leaf; 887 struct btrfs_key key; 888 struct btrfs_key found_key; 889 u32 new_size = sizeof(*item); 890 u64 refs; 891 int ret; 892 893 leaf = path->nodes[0]; 894 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0)); 895 896 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 897 ei0 = btrfs_item_ptr(leaf, path->slots[0], 898 struct btrfs_extent_item_v0); 899 refs = btrfs_extent_refs_v0(leaf, ei0); 900 901 if (owner == (u64)-1) { 902 while (1) { 903 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 904 ret = btrfs_next_leaf(root, path); 905 if (ret < 0) 906 return ret; 907 BUG_ON(ret > 0); 908 leaf = path->nodes[0]; 909 } 910 btrfs_item_key_to_cpu(leaf, &found_key, 911 path->slots[0]); 912 BUG_ON(key.objectid != found_key.objectid); 913 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) { 914 path->slots[0]++; 915 continue; 916 } 917 ref0 = btrfs_item_ptr(leaf, path->slots[0], 918 struct btrfs_extent_ref_v0); 919 owner = btrfs_ref_objectid_v0(leaf, ref0); 920 break; 921 } 922 } 923 btrfs_release_path(root, path); 924 925 if (owner < BTRFS_FIRST_FREE_OBJECTID) 926 new_size += sizeof(*bi); 927 928 new_size -= sizeof(*ei0); 929 ret = btrfs_search_slot(trans, root, &key, path, 930 new_size + extra_size, 1); 931 if (ret < 0) 932 return ret; 933 BUG_ON(ret); 934 935 ret = btrfs_extend_item(trans, root, path, new_size); 936 BUG_ON(ret); 937 938 leaf = path->nodes[0]; 939 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 940 btrfs_set_extent_refs(leaf, item, refs); 941 /* FIXME: get real generation */ 942 btrfs_set_extent_generation(leaf, item, 0); 943 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 944 btrfs_set_extent_flags(leaf, item, 945 BTRFS_EXTENT_FLAG_TREE_BLOCK | 946 BTRFS_BLOCK_FLAG_FULL_BACKREF); 947 bi = (struct btrfs_tree_block_info *)(item + 1); 948 /* FIXME: get first key of the block */ 949 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi)); 950 btrfs_set_tree_block_level(leaf, bi, (int)owner); 951 } else { 952 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA); 953 } 954 btrfs_mark_buffer_dirty(leaf); 955 return 0; 956 } 957 #endif 958 959 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) 960 { 961 u32 high_crc = ~(u32)0; 962 u32 low_crc = ~(u32)0; 963 __le64 lenum; 964 965 lenum = cpu_to_le64(root_objectid); 966 high_crc = crc32c(high_crc, &lenum, sizeof(lenum)); 967 lenum = cpu_to_le64(owner); 968 low_crc = crc32c(low_crc, &lenum, sizeof(lenum)); 969 lenum = cpu_to_le64(offset); 970 low_crc = crc32c(low_crc, &lenum, sizeof(lenum)); 971 972 return ((u64)high_crc << 31) ^ (u64)low_crc; 973 } 974 975 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, 976 struct btrfs_extent_data_ref *ref) 977 { 978 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), 979 btrfs_extent_data_ref_objectid(leaf, ref), 980 btrfs_extent_data_ref_offset(leaf, ref)); 981 } 982 983 static int match_extent_data_ref(struct extent_buffer *leaf, 984 struct btrfs_extent_data_ref *ref, 985 u64 root_objectid, u64 owner, u64 offset) 986 { 987 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || 988 btrfs_extent_data_ref_objectid(leaf, ref) != owner || 989 btrfs_extent_data_ref_offset(leaf, ref) != offset) 990 return 0; 991 return 1; 992 } 993 994 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, 995 struct btrfs_root *root, 996 struct btrfs_path *path, 997 u64 bytenr, u64 parent, 998 u64 root_objectid, 999 u64 owner, u64 offset) 1000 { 1001 struct btrfs_key key; 1002 struct btrfs_extent_data_ref *ref; 1003 struct extent_buffer *leaf; 1004 u32 nritems; 1005 int ret; 1006 int recow; 1007 int err = -ENOENT; 1008 1009 key.objectid = bytenr; 1010 if (parent) { 1011 key.type = BTRFS_SHARED_DATA_REF_KEY; 1012 key.offset = parent; 1013 } else { 1014 key.type = BTRFS_EXTENT_DATA_REF_KEY; 1015 key.offset = hash_extent_data_ref(root_objectid, 1016 owner, offset); 1017 } 1018 again: 1019 recow = 0; 1020 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1021 if (ret < 0) { 1022 err = ret; 1023 goto fail; 1024 } 1025 1026 if (parent) { 1027 if (!ret) 1028 return 0; 1029 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1030 key.type = BTRFS_EXTENT_REF_V0_KEY; 1031 btrfs_release_path(root, path); 1032 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1033 if (ret < 0) { 1034 err = ret; 1035 goto fail; 1036 } 1037 if (!ret) 1038 return 0; 1039 #endif 1040 goto fail; 1041 } 1042 1043 leaf = path->nodes[0]; 1044 nritems = btrfs_header_nritems(leaf); 1045 while (1) { 1046 if (path->slots[0] >= nritems) { 1047 ret = btrfs_next_leaf(root, path); 1048 if (ret < 0) 1049 err = ret; 1050 if (ret) 1051 goto fail; 1052 1053 leaf = path->nodes[0]; 1054 nritems = btrfs_header_nritems(leaf); 1055 recow = 1; 1056 } 1057 1058 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1059 if (key.objectid != bytenr || 1060 key.type != BTRFS_EXTENT_DATA_REF_KEY) 1061 goto fail; 1062 1063 ref = btrfs_item_ptr(leaf, path->slots[0], 1064 struct btrfs_extent_data_ref); 1065 1066 if (match_extent_data_ref(leaf, ref, root_objectid, 1067 owner, offset)) { 1068 if (recow) { 1069 btrfs_release_path(root, path); 1070 goto again; 1071 } 1072 err = 0; 1073 break; 1074 } 1075 path->slots[0]++; 1076 } 1077 fail: 1078 return err; 1079 } 1080 1081 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, 1082 struct btrfs_root *root, 1083 struct btrfs_path *path, 1084 u64 bytenr, u64 parent, 1085 u64 root_objectid, u64 owner, 1086 u64 offset, int refs_to_add) 1087 { 1088 struct btrfs_key key; 1089 struct extent_buffer *leaf; 1090 u32 size; 1091 u32 num_refs; 1092 int ret; 1093 1094 key.objectid = bytenr; 1095 if (parent) { 1096 key.type = BTRFS_SHARED_DATA_REF_KEY; 1097 key.offset = parent; 1098 size = sizeof(struct btrfs_shared_data_ref); 1099 } else { 1100 key.type = BTRFS_EXTENT_DATA_REF_KEY; 1101 key.offset = hash_extent_data_ref(root_objectid, 1102 owner, offset); 1103 size = sizeof(struct btrfs_extent_data_ref); 1104 } 1105 1106 ret = btrfs_insert_empty_item(trans, root, path, &key, size); 1107 if (ret && ret != -EEXIST) 1108 goto fail; 1109 1110 leaf = path->nodes[0]; 1111 if (parent) { 1112 struct btrfs_shared_data_ref *ref; 1113 ref = btrfs_item_ptr(leaf, path->slots[0], 1114 struct btrfs_shared_data_ref); 1115 if (ret == 0) { 1116 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); 1117 } else { 1118 num_refs = btrfs_shared_data_ref_count(leaf, ref); 1119 num_refs += refs_to_add; 1120 btrfs_set_shared_data_ref_count(leaf, ref, num_refs); 1121 } 1122 } else { 1123 struct btrfs_extent_data_ref *ref; 1124 while (ret == -EEXIST) { 1125 ref = btrfs_item_ptr(leaf, path->slots[0], 1126 struct btrfs_extent_data_ref); 1127 if (match_extent_data_ref(leaf, ref, root_objectid, 1128 owner, offset)) 1129 break; 1130 btrfs_release_path(root, path); 1131 key.offset++; 1132 ret = btrfs_insert_empty_item(trans, root, path, &key, 1133 size); 1134 if (ret && ret != -EEXIST) 1135 goto fail; 1136 1137 leaf = path->nodes[0]; 1138 } 1139 ref = btrfs_item_ptr(leaf, path->slots[0], 1140 struct btrfs_extent_data_ref); 1141 if (ret == 0) { 1142 btrfs_set_extent_data_ref_root(leaf, ref, 1143 root_objectid); 1144 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 1145 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 1146 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); 1147 } else { 1148 num_refs = btrfs_extent_data_ref_count(leaf, ref); 1149 num_refs += refs_to_add; 1150 btrfs_set_extent_data_ref_count(leaf, ref, num_refs); 1151 } 1152 } 1153 btrfs_mark_buffer_dirty(leaf); 1154 ret = 0; 1155 fail: 1156 btrfs_release_path(root, path); 1157 return ret; 1158 } 1159 1160 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, 1161 struct btrfs_root *root, 1162 struct btrfs_path *path, 1163 int refs_to_drop) 1164 { 1165 struct btrfs_key key; 1166 struct btrfs_extent_data_ref *ref1 = NULL; 1167 struct btrfs_shared_data_ref *ref2 = NULL; 1168 struct extent_buffer *leaf; 1169 u32 num_refs = 0; 1170 int ret = 0; 1171 1172 leaf = path->nodes[0]; 1173 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1174 1175 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 1176 ref1 = btrfs_item_ptr(leaf, path->slots[0], 1177 struct btrfs_extent_data_ref); 1178 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1179 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 1180 ref2 = btrfs_item_ptr(leaf, path->slots[0], 1181 struct btrfs_shared_data_ref); 1182 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1183 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1184 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 1185 struct btrfs_extent_ref_v0 *ref0; 1186 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1187 struct btrfs_extent_ref_v0); 1188 num_refs = btrfs_ref_count_v0(leaf, ref0); 1189 #endif 1190 } else { 1191 BUG(); 1192 } 1193 1194 BUG_ON(num_refs < refs_to_drop); 1195 num_refs -= refs_to_drop; 1196 1197 if (num_refs == 0) { 1198 ret = btrfs_del_item(trans, root, path); 1199 } else { 1200 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) 1201 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); 1202 else if (key.type == BTRFS_SHARED_DATA_REF_KEY) 1203 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); 1204 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1205 else { 1206 struct btrfs_extent_ref_v0 *ref0; 1207 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1208 struct btrfs_extent_ref_v0); 1209 btrfs_set_ref_count_v0(leaf, ref0, num_refs); 1210 } 1211 #endif 1212 btrfs_mark_buffer_dirty(leaf); 1213 } 1214 return ret; 1215 } 1216 1217 static noinline u32 extent_data_ref_count(struct btrfs_root *root, 1218 struct btrfs_path *path, 1219 struct btrfs_extent_inline_ref *iref) 1220 { 1221 struct btrfs_key key; 1222 struct extent_buffer *leaf; 1223 struct btrfs_extent_data_ref *ref1; 1224 struct btrfs_shared_data_ref *ref2; 1225 u32 num_refs = 0; 1226 1227 leaf = path->nodes[0]; 1228 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 1229 if (iref) { 1230 if (btrfs_extent_inline_ref_type(leaf, iref) == 1231 BTRFS_EXTENT_DATA_REF_KEY) { 1232 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); 1233 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1234 } else { 1235 ref2 = (struct btrfs_shared_data_ref *)(iref + 1); 1236 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1237 } 1238 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { 1239 ref1 = btrfs_item_ptr(leaf, path->slots[0], 1240 struct btrfs_extent_data_ref); 1241 num_refs = btrfs_extent_data_ref_count(leaf, ref1); 1242 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { 1243 ref2 = btrfs_item_ptr(leaf, path->slots[0], 1244 struct btrfs_shared_data_ref); 1245 num_refs = btrfs_shared_data_ref_count(leaf, ref2); 1246 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1247 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { 1248 struct btrfs_extent_ref_v0 *ref0; 1249 ref0 = btrfs_item_ptr(leaf, path->slots[0], 1250 struct btrfs_extent_ref_v0); 1251 num_refs = btrfs_ref_count_v0(leaf, ref0); 1252 #endif 1253 } else { 1254 WARN_ON(1); 1255 } 1256 return num_refs; 1257 } 1258 1259 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, 1260 struct btrfs_root *root, 1261 struct btrfs_path *path, 1262 u64 bytenr, u64 parent, 1263 u64 root_objectid) 1264 { 1265 struct btrfs_key key; 1266 int ret; 1267 1268 key.objectid = bytenr; 1269 if (parent) { 1270 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 1271 key.offset = parent; 1272 } else { 1273 key.type = BTRFS_TREE_BLOCK_REF_KEY; 1274 key.offset = root_objectid; 1275 } 1276 1277 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1278 if (ret > 0) 1279 ret = -ENOENT; 1280 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1281 if (ret == -ENOENT && parent) { 1282 btrfs_release_path(root, path); 1283 key.type = BTRFS_EXTENT_REF_V0_KEY; 1284 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1285 if (ret > 0) 1286 ret = -ENOENT; 1287 } 1288 #endif 1289 return ret; 1290 } 1291 1292 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, 1293 struct btrfs_root *root, 1294 struct btrfs_path *path, 1295 u64 bytenr, u64 parent, 1296 u64 root_objectid) 1297 { 1298 struct btrfs_key key; 1299 int ret; 1300 1301 key.objectid = bytenr; 1302 if (parent) { 1303 key.type = BTRFS_SHARED_BLOCK_REF_KEY; 1304 key.offset = parent; 1305 } else { 1306 key.type = BTRFS_TREE_BLOCK_REF_KEY; 1307 key.offset = root_objectid; 1308 } 1309 1310 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1311 btrfs_release_path(root, path); 1312 return ret; 1313 } 1314 1315 static inline int extent_ref_type(u64 parent, u64 owner) 1316 { 1317 int type; 1318 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1319 if (parent > 0) 1320 type = BTRFS_SHARED_BLOCK_REF_KEY; 1321 else 1322 type = BTRFS_TREE_BLOCK_REF_KEY; 1323 } else { 1324 if (parent > 0) 1325 type = BTRFS_SHARED_DATA_REF_KEY; 1326 else 1327 type = BTRFS_EXTENT_DATA_REF_KEY; 1328 } 1329 return type; 1330 } 1331 1332 static int find_next_key(struct btrfs_path *path, int level, 1333 struct btrfs_key *key) 1334 1335 { 1336 for (; level < BTRFS_MAX_LEVEL; level++) { 1337 if (!path->nodes[level]) 1338 break; 1339 if (path->slots[level] + 1 >= 1340 btrfs_header_nritems(path->nodes[level])) 1341 continue; 1342 if (level == 0) 1343 btrfs_item_key_to_cpu(path->nodes[level], key, 1344 path->slots[level] + 1); 1345 else 1346 btrfs_node_key_to_cpu(path->nodes[level], key, 1347 path->slots[level] + 1); 1348 return 0; 1349 } 1350 return 1; 1351 } 1352 1353 /* 1354 * look for inline back ref. if back ref is found, *ref_ret is set 1355 * to the address of inline back ref, and 0 is returned. 1356 * 1357 * if back ref isn't found, *ref_ret is set to the address where it 1358 * should be inserted, and -ENOENT is returned. 1359 * 1360 * if insert is true and there are too many inline back refs, the path 1361 * points to the extent item, and -EAGAIN is returned. 1362 * 1363 * NOTE: inline back refs are ordered in the same way that back ref 1364 * items in the tree are ordered. 1365 */ 1366 static noinline_for_stack 1367 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, 1368 struct btrfs_root *root, 1369 struct btrfs_path *path, 1370 struct btrfs_extent_inline_ref **ref_ret, 1371 u64 bytenr, u64 num_bytes, 1372 u64 parent, u64 root_objectid, 1373 u64 owner, u64 offset, int insert) 1374 { 1375 struct btrfs_key key; 1376 struct extent_buffer *leaf; 1377 struct btrfs_extent_item *ei; 1378 struct btrfs_extent_inline_ref *iref; 1379 u64 flags; 1380 u64 item_size; 1381 unsigned long ptr; 1382 unsigned long end; 1383 int extra_size; 1384 int type; 1385 int want; 1386 int ret; 1387 int err = 0; 1388 1389 key.objectid = bytenr; 1390 key.type = BTRFS_EXTENT_ITEM_KEY; 1391 key.offset = num_bytes; 1392 1393 want = extent_ref_type(parent, owner); 1394 if (insert) { 1395 extra_size = btrfs_extent_inline_ref_size(want); 1396 path->keep_locks = 1; 1397 } else 1398 extra_size = -1; 1399 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); 1400 if (ret < 0) { 1401 err = ret; 1402 goto out; 1403 } 1404 BUG_ON(ret); 1405 1406 leaf = path->nodes[0]; 1407 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1408 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1409 if (item_size < sizeof(*ei)) { 1410 if (!insert) { 1411 err = -ENOENT; 1412 goto out; 1413 } 1414 ret = convert_extent_item_v0(trans, root, path, owner, 1415 extra_size); 1416 if (ret < 0) { 1417 err = ret; 1418 goto out; 1419 } 1420 leaf = path->nodes[0]; 1421 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1422 } 1423 #endif 1424 BUG_ON(item_size < sizeof(*ei)); 1425 1426 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1427 flags = btrfs_extent_flags(leaf, ei); 1428 1429 ptr = (unsigned long)(ei + 1); 1430 end = (unsigned long)ei + item_size; 1431 1432 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1433 ptr += sizeof(struct btrfs_tree_block_info); 1434 BUG_ON(ptr > end); 1435 } else { 1436 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 1437 } 1438 1439 err = -ENOENT; 1440 while (1) { 1441 if (ptr >= end) { 1442 WARN_ON(ptr > end); 1443 break; 1444 } 1445 iref = (struct btrfs_extent_inline_ref *)ptr; 1446 type = btrfs_extent_inline_ref_type(leaf, iref); 1447 if (want < type) 1448 break; 1449 if (want > type) { 1450 ptr += btrfs_extent_inline_ref_size(type); 1451 continue; 1452 } 1453 1454 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1455 struct btrfs_extent_data_ref *dref; 1456 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1457 if (match_extent_data_ref(leaf, dref, root_objectid, 1458 owner, offset)) { 1459 err = 0; 1460 break; 1461 } 1462 if (hash_extent_data_ref_item(leaf, dref) < 1463 hash_extent_data_ref(root_objectid, owner, offset)) 1464 break; 1465 } else { 1466 u64 ref_offset; 1467 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); 1468 if (parent > 0) { 1469 if (parent == ref_offset) { 1470 err = 0; 1471 break; 1472 } 1473 if (ref_offset < parent) 1474 break; 1475 } else { 1476 if (root_objectid == ref_offset) { 1477 err = 0; 1478 break; 1479 } 1480 if (ref_offset < root_objectid) 1481 break; 1482 } 1483 } 1484 ptr += btrfs_extent_inline_ref_size(type); 1485 } 1486 if (err == -ENOENT && insert) { 1487 if (item_size + extra_size >= 1488 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { 1489 err = -EAGAIN; 1490 goto out; 1491 } 1492 /* 1493 * To add new inline back ref, we have to make sure 1494 * there is no corresponding back ref item. 1495 * For simplicity, we just do not add new inline back 1496 * ref if there is any kind of item for this block 1497 */ 1498 if (find_next_key(path, 0, &key) == 0 && 1499 key.objectid == bytenr && 1500 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { 1501 err = -EAGAIN; 1502 goto out; 1503 } 1504 } 1505 *ref_ret = (struct btrfs_extent_inline_ref *)ptr; 1506 out: 1507 if (insert) { 1508 path->keep_locks = 0; 1509 btrfs_unlock_up_safe(path, 1); 1510 } 1511 return err; 1512 } 1513 1514 /* 1515 * helper to add new inline back ref 1516 */ 1517 static noinline_for_stack 1518 int setup_inline_extent_backref(struct btrfs_trans_handle *trans, 1519 struct btrfs_root *root, 1520 struct btrfs_path *path, 1521 struct btrfs_extent_inline_ref *iref, 1522 u64 parent, u64 root_objectid, 1523 u64 owner, u64 offset, int refs_to_add, 1524 struct btrfs_delayed_extent_op *extent_op) 1525 { 1526 struct extent_buffer *leaf; 1527 struct btrfs_extent_item *ei; 1528 unsigned long ptr; 1529 unsigned long end; 1530 unsigned long item_offset; 1531 u64 refs; 1532 int size; 1533 int type; 1534 int ret; 1535 1536 leaf = path->nodes[0]; 1537 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1538 item_offset = (unsigned long)iref - (unsigned long)ei; 1539 1540 type = extent_ref_type(parent, owner); 1541 size = btrfs_extent_inline_ref_size(type); 1542 1543 ret = btrfs_extend_item(trans, root, path, size); 1544 BUG_ON(ret); 1545 1546 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1547 refs = btrfs_extent_refs(leaf, ei); 1548 refs += refs_to_add; 1549 btrfs_set_extent_refs(leaf, ei, refs); 1550 if (extent_op) 1551 __run_delayed_extent_op(extent_op, leaf, ei); 1552 1553 ptr = (unsigned long)ei + item_offset; 1554 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); 1555 if (ptr < end - size) 1556 memmove_extent_buffer(leaf, ptr + size, ptr, 1557 end - size - ptr); 1558 1559 iref = (struct btrfs_extent_inline_ref *)ptr; 1560 btrfs_set_extent_inline_ref_type(leaf, iref, type); 1561 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1562 struct btrfs_extent_data_ref *dref; 1563 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1564 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); 1565 btrfs_set_extent_data_ref_objectid(leaf, dref, owner); 1566 btrfs_set_extent_data_ref_offset(leaf, dref, offset); 1567 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); 1568 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1569 struct btrfs_shared_data_ref *sref; 1570 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1571 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); 1572 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1573 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { 1574 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 1575 } else { 1576 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1577 } 1578 btrfs_mark_buffer_dirty(leaf); 1579 return 0; 1580 } 1581 1582 static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1583 struct btrfs_root *root, 1584 struct btrfs_path *path, 1585 struct btrfs_extent_inline_ref **ref_ret, 1586 u64 bytenr, u64 num_bytes, u64 parent, 1587 u64 root_objectid, u64 owner, u64 offset) 1588 { 1589 int ret; 1590 1591 ret = lookup_inline_extent_backref(trans, root, path, ref_ret, 1592 bytenr, num_bytes, parent, 1593 root_objectid, owner, offset, 0); 1594 if (ret != -ENOENT) 1595 return ret; 1596 1597 btrfs_release_path(root, path); 1598 *ref_ret = NULL; 1599 1600 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1601 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent, 1602 root_objectid); 1603 } else { 1604 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent, 1605 root_objectid, owner, offset); 1606 } 1607 return ret; 1608 } 1609 1610 /* 1611 * helper to update/remove inline back ref 1612 */ 1613 static noinline_for_stack 1614 int update_inline_extent_backref(struct btrfs_trans_handle *trans, 1615 struct btrfs_root *root, 1616 struct btrfs_path *path, 1617 struct btrfs_extent_inline_ref *iref, 1618 int refs_to_mod, 1619 struct btrfs_delayed_extent_op *extent_op) 1620 { 1621 struct extent_buffer *leaf; 1622 struct btrfs_extent_item *ei; 1623 struct btrfs_extent_data_ref *dref = NULL; 1624 struct btrfs_shared_data_ref *sref = NULL; 1625 unsigned long ptr; 1626 unsigned long end; 1627 u32 item_size; 1628 int size; 1629 int type; 1630 int ret; 1631 u64 refs; 1632 1633 leaf = path->nodes[0]; 1634 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1635 refs = btrfs_extent_refs(leaf, ei); 1636 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); 1637 refs += refs_to_mod; 1638 btrfs_set_extent_refs(leaf, ei, refs); 1639 if (extent_op) 1640 __run_delayed_extent_op(extent_op, leaf, ei); 1641 1642 type = btrfs_extent_inline_ref_type(leaf, iref); 1643 1644 if (type == BTRFS_EXTENT_DATA_REF_KEY) { 1645 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 1646 refs = btrfs_extent_data_ref_count(leaf, dref); 1647 } else if (type == BTRFS_SHARED_DATA_REF_KEY) { 1648 sref = (struct btrfs_shared_data_ref *)(iref + 1); 1649 refs = btrfs_shared_data_ref_count(leaf, sref); 1650 } else { 1651 refs = 1; 1652 BUG_ON(refs_to_mod != -1); 1653 } 1654 1655 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); 1656 refs += refs_to_mod; 1657 1658 if (refs > 0) { 1659 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1660 btrfs_set_extent_data_ref_count(leaf, dref, refs); 1661 else 1662 btrfs_set_shared_data_ref_count(leaf, sref, refs); 1663 } else { 1664 size = btrfs_extent_inline_ref_size(type); 1665 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1666 ptr = (unsigned long)iref; 1667 end = (unsigned long)ei + item_size; 1668 if (ptr + size < end) 1669 memmove_extent_buffer(leaf, ptr, ptr + size, 1670 end - ptr - size); 1671 item_size -= size; 1672 ret = btrfs_truncate_item(trans, root, path, item_size, 1); 1673 BUG_ON(ret); 1674 } 1675 btrfs_mark_buffer_dirty(leaf); 1676 return 0; 1677 } 1678 1679 static noinline_for_stack 1680 int insert_inline_extent_backref(struct btrfs_trans_handle *trans, 1681 struct btrfs_root *root, 1682 struct btrfs_path *path, 1683 u64 bytenr, u64 num_bytes, u64 parent, 1684 u64 root_objectid, u64 owner, 1685 u64 offset, int refs_to_add, 1686 struct btrfs_delayed_extent_op *extent_op) 1687 { 1688 struct btrfs_extent_inline_ref *iref; 1689 int ret; 1690 1691 ret = lookup_inline_extent_backref(trans, root, path, &iref, 1692 bytenr, num_bytes, parent, 1693 root_objectid, owner, offset, 1); 1694 if (ret == 0) { 1695 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); 1696 ret = update_inline_extent_backref(trans, root, path, iref, 1697 refs_to_add, extent_op); 1698 } else if (ret == -ENOENT) { 1699 ret = setup_inline_extent_backref(trans, root, path, iref, 1700 parent, root_objectid, 1701 owner, offset, refs_to_add, 1702 extent_op); 1703 } 1704 return ret; 1705 } 1706 1707 static int insert_extent_backref(struct btrfs_trans_handle *trans, 1708 struct btrfs_root *root, 1709 struct btrfs_path *path, 1710 u64 bytenr, u64 parent, u64 root_objectid, 1711 u64 owner, u64 offset, int refs_to_add) 1712 { 1713 int ret; 1714 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1715 BUG_ON(refs_to_add != 1); 1716 ret = insert_tree_block_ref(trans, root, path, bytenr, 1717 parent, root_objectid); 1718 } else { 1719 ret = insert_extent_data_ref(trans, root, path, bytenr, 1720 parent, root_objectid, 1721 owner, offset, refs_to_add); 1722 } 1723 return ret; 1724 } 1725 1726 static int remove_extent_backref(struct btrfs_trans_handle *trans, 1727 struct btrfs_root *root, 1728 struct btrfs_path *path, 1729 struct btrfs_extent_inline_ref *iref, 1730 int refs_to_drop, int is_data) 1731 { 1732 int ret; 1733 1734 BUG_ON(!is_data && refs_to_drop != 1); 1735 if (iref) { 1736 ret = update_inline_extent_backref(trans, root, path, iref, 1737 -refs_to_drop, NULL); 1738 } else if (is_data) { 1739 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1740 } else { 1741 ret = btrfs_del_item(trans, root, path); 1742 } 1743 return ret; 1744 } 1745 1746 static void btrfs_issue_discard(struct block_device *bdev, 1747 u64 start, u64 len) 1748 { 1749 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0); 1750 } 1751 1752 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1753 u64 num_bytes) 1754 { 1755 int ret; 1756 u64 map_length = num_bytes; 1757 struct btrfs_multi_bio *multi = NULL; 1758 1759 if (!btrfs_test_opt(root, DISCARD)) 1760 return 0; 1761 1762 /* Tell the block device(s) that the sectors can be discarded */ 1763 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, 1764 bytenr, &map_length, &multi, 0); 1765 if (!ret) { 1766 struct btrfs_bio_stripe *stripe = multi->stripes; 1767 int i; 1768 1769 if (map_length > num_bytes) 1770 map_length = num_bytes; 1771 1772 for (i = 0; i < multi->num_stripes; i++, stripe++) { 1773 btrfs_issue_discard(stripe->dev->bdev, 1774 stripe->physical, 1775 map_length); 1776 } 1777 kfree(multi); 1778 } 1779 1780 return ret; 1781 } 1782 1783 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1784 struct btrfs_root *root, 1785 u64 bytenr, u64 num_bytes, u64 parent, 1786 u64 root_objectid, u64 owner, u64 offset) 1787 { 1788 int ret; 1789 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID && 1790 root_objectid == BTRFS_TREE_LOG_OBJECTID); 1791 1792 if (owner < BTRFS_FIRST_FREE_OBJECTID) { 1793 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, 1794 parent, root_objectid, (int)owner, 1795 BTRFS_ADD_DELAYED_REF, NULL); 1796 } else { 1797 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, 1798 parent, root_objectid, owner, offset, 1799 BTRFS_ADD_DELAYED_REF, NULL); 1800 } 1801 return ret; 1802 } 1803 1804 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1805 struct btrfs_root *root, 1806 u64 bytenr, u64 num_bytes, 1807 u64 parent, u64 root_objectid, 1808 u64 owner, u64 offset, int refs_to_add, 1809 struct btrfs_delayed_extent_op *extent_op) 1810 { 1811 struct btrfs_path *path; 1812 struct extent_buffer *leaf; 1813 struct btrfs_extent_item *item; 1814 u64 refs; 1815 int ret; 1816 int err = 0; 1817 1818 path = btrfs_alloc_path(); 1819 if (!path) 1820 return -ENOMEM; 1821 1822 path->reada = 1; 1823 path->leave_spinning = 1; 1824 /* this will setup the path even if it fails to insert the back ref */ 1825 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root, 1826 path, bytenr, num_bytes, parent, 1827 root_objectid, owner, offset, 1828 refs_to_add, extent_op); 1829 if (ret == 0) 1830 goto out; 1831 1832 if (ret != -EAGAIN) { 1833 err = ret; 1834 goto out; 1835 } 1836 1837 leaf = path->nodes[0]; 1838 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1839 refs = btrfs_extent_refs(leaf, item); 1840 btrfs_set_extent_refs(leaf, item, refs + refs_to_add); 1841 if (extent_op) 1842 __run_delayed_extent_op(extent_op, leaf, item); 1843 1844 btrfs_mark_buffer_dirty(leaf); 1845 btrfs_release_path(root->fs_info->extent_root, path); 1846 1847 path->reada = 1; 1848 path->leave_spinning = 1; 1849 1850 /* now insert the actual backref */ 1851 ret = insert_extent_backref(trans, root->fs_info->extent_root, 1852 path, bytenr, parent, root_objectid, 1853 owner, offset, refs_to_add); 1854 BUG_ON(ret); 1855 out: 1856 btrfs_free_path(path); 1857 return err; 1858 } 1859 1860 static int run_delayed_data_ref(struct btrfs_trans_handle *trans, 1861 struct btrfs_root *root, 1862 struct btrfs_delayed_ref_node *node, 1863 struct btrfs_delayed_extent_op *extent_op, 1864 int insert_reserved) 1865 { 1866 int ret = 0; 1867 struct btrfs_delayed_data_ref *ref; 1868 struct btrfs_key ins; 1869 u64 parent = 0; 1870 u64 ref_root = 0; 1871 u64 flags = 0; 1872 1873 ins.objectid = node->bytenr; 1874 ins.offset = node->num_bytes; 1875 ins.type = BTRFS_EXTENT_ITEM_KEY; 1876 1877 ref = btrfs_delayed_node_to_data_ref(node); 1878 if (node->type == BTRFS_SHARED_DATA_REF_KEY) 1879 parent = ref->parent; 1880 else 1881 ref_root = ref->root; 1882 1883 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 1884 if (extent_op) { 1885 BUG_ON(extent_op->update_key); 1886 flags |= extent_op->flags_to_set; 1887 } 1888 ret = alloc_reserved_file_extent(trans, root, 1889 parent, ref_root, flags, 1890 ref->objectid, ref->offset, 1891 &ins, node->ref_mod); 1892 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 1893 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, 1894 node->num_bytes, parent, 1895 ref_root, ref->objectid, 1896 ref->offset, node->ref_mod, 1897 extent_op); 1898 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 1899 ret = __btrfs_free_extent(trans, root, node->bytenr, 1900 node->num_bytes, parent, 1901 ref_root, ref->objectid, 1902 ref->offset, node->ref_mod, 1903 extent_op); 1904 } else { 1905 BUG(); 1906 } 1907 return ret; 1908 } 1909 1910 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, 1911 struct extent_buffer *leaf, 1912 struct btrfs_extent_item *ei) 1913 { 1914 u64 flags = btrfs_extent_flags(leaf, ei); 1915 if (extent_op->update_flags) { 1916 flags |= extent_op->flags_to_set; 1917 btrfs_set_extent_flags(leaf, ei, flags); 1918 } 1919 1920 if (extent_op->update_key) { 1921 struct btrfs_tree_block_info *bi; 1922 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); 1923 bi = (struct btrfs_tree_block_info *)(ei + 1); 1924 btrfs_set_tree_block_key(leaf, bi, &extent_op->key); 1925 } 1926 } 1927 1928 static int run_delayed_extent_op(struct btrfs_trans_handle *trans, 1929 struct btrfs_root *root, 1930 struct btrfs_delayed_ref_node *node, 1931 struct btrfs_delayed_extent_op *extent_op) 1932 { 1933 struct btrfs_key key; 1934 struct btrfs_path *path; 1935 struct btrfs_extent_item *ei; 1936 struct extent_buffer *leaf; 1937 u32 item_size; 1938 int ret; 1939 int err = 0; 1940 1941 path = btrfs_alloc_path(); 1942 if (!path) 1943 return -ENOMEM; 1944 1945 key.objectid = node->bytenr; 1946 key.type = BTRFS_EXTENT_ITEM_KEY; 1947 key.offset = node->num_bytes; 1948 1949 path->reada = 1; 1950 path->leave_spinning = 1; 1951 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, 1952 path, 0, 1); 1953 if (ret < 0) { 1954 err = ret; 1955 goto out; 1956 } 1957 if (ret > 0) { 1958 err = -EIO; 1959 goto out; 1960 } 1961 1962 leaf = path->nodes[0]; 1963 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1964 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 1965 if (item_size < sizeof(*ei)) { 1966 ret = convert_extent_item_v0(trans, root->fs_info->extent_root, 1967 path, (u64)-1, 0); 1968 if (ret < 0) { 1969 err = ret; 1970 goto out; 1971 } 1972 leaf = path->nodes[0]; 1973 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1974 } 1975 #endif 1976 BUG_ON(item_size < sizeof(*ei)); 1977 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1978 __run_delayed_extent_op(extent_op, leaf, ei); 1979 1980 btrfs_mark_buffer_dirty(leaf); 1981 out: 1982 btrfs_free_path(path); 1983 return err; 1984 } 1985 1986 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, 1987 struct btrfs_root *root, 1988 struct btrfs_delayed_ref_node *node, 1989 struct btrfs_delayed_extent_op *extent_op, 1990 int insert_reserved) 1991 { 1992 int ret = 0; 1993 struct btrfs_delayed_tree_ref *ref; 1994 struct btrfs_key ins; 1995 u64 parent = 0; 1996 u64 ref_root = 0; 1997 1998 ins.objectid = node->bytenr; 1999 ins.offset = node->num_bytes; 2000 ins.type = BTRFS_EXTENT_ITEM_KEY; 2001 2002 ref = btrfs_delayed_node_to_tree_ref(node); 2003 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) 2004 parent = ref->parent; 2005 else 2006 ref_root = ref->root; 2007 2008 BUG_ON(node->ref_mod != 1); 2009 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { 2010 BUG_ON(!extent_op || !extent_op->update_flags || 2011 !extent_op->update_key); 2012 ret = alloc_reserved_tree_block(trans, root, 2013 parent, ref_root, 2014 extent_op->flags_to_set, 2015 &extent_op->key, 2016 ref->level, &ins); 2017 } else if (node->action == BTRFS_ADD_DELAYED_REF) { 2018 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, 2019 node->num_bytes, parent, ref_root, 2020 ref->level, 0, 1, extent_op); 2021 } else if (node->action == BTRFS_DROP_DELAYED_REF) { 2022 ret = __btrfs_free_extent(trans, root, node->bytenr, 2023 node->num_bytes, parent, ref_root, 2024 ref->level, 0, 1, extent_op); 2025 } else { 2026 BUG(); 2027 } 2028 return ret; 2029 } 2030 2031 /* helper function to actually process a single delayed ref entry */ 2032 static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 2033 struct btrfs_root *root, 2034 struct btrfs_delayed_ref_node *node, 2035 struct btrfs_delayed_extent_op *extent_op, 2036 int insert_reserved) 2037 { 2038 int ret; 2039 if (btrfs_delayed_ref_is_head(node)) { 2040 struct btrfs_delayed_ref_head *head; 2041 /* 2042 * we've hit the end of the chain and we were supposed 2043 * to insert this extent into the tree. But, it got 2044 * deleted before we ever needed to insert it, so all 2045 * we have to do is clean up the accounting 2046 */ 2047 BUG_ON(extent_op); 2048 head = btrfs_delayed_node_to_head(node); 2049 if (insert_reserved) { 2050 btrfs_pin_extent(root, node->bytenr, 2051 node->num_bytes, 1); 2052 if (head->is_data) { 2053 ret = btrfs_del_csums(trans, root, 2054 node->bytenr, 2055 node->num_bytes); 2056 BUG_ON(ret); 2057 } 2058 } 2059 mutex_unlock(&head->mutex); 2060 return 0; 2061 } 2062 2063 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 2064 node->type == BTRFS_SHARED_BLOCK_REF_KEY) 2065 ret = run_delayed_tree_ref(trans, root, node, extent_op, 2066 insert_reserved); 2067 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || 2068 node->type == BTRFS_SHARED_DATA_REF_KEY) 2069 ret = run_delayed_data_ref(trans, root, node, extent_op, 2070 insert_reserved); 2071 else 2072 BUG(); 2073 return ret; 2074 } 2075 2076 static noinline struct btrfs_delayed_ref_node * 2077 select_delayed_ref(struct btrfs_delayed_ref_head *head) 2078 { 2079 struct rb_node *node; 2080 struct btrfs_delayed_ref_node *ref; 2081 int action = BTRFS_ADD_DELAYED_REF; 2082 again: 2083 /* 2084 * select delayed ref of type BTRFS_ADD_DELAYED_REF first. 2085 * this prevents ref count from going down to zero when 2086 * there still are pending delayed ref. 2087 */ 2088 node = rb_prev(&head->node.rb_node); 2089 while (1) { 2090 if (!node) 2091 break; 2092 ref = rb_entry(node, struct btrfs_delayed_ref_node, 2093 rb_node); 2094 if (ref->bytenr != head->node.bytenr) 2095 break; 2096 if (ref->action == action) 2097 return ref; 2098 node = rb_prev(node); 2099 } 2100 if (action == BTRFS_ADD_DELAYED_REF) { 2101 action = BTRFS_DROP_DELAYED_REF; 2102 goto again; 2103 } 2104 return NULL; 2105 } 2106 2107 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, 2108 struct btrfs_root *root, 2109 struct list_head *cluster) 2110 { 2111 struct btrfs_delayed_ref_root *delayed_refs; 2112 struct btrfs_delayed_ref_node *ref; 2113 struct btrfs_delayed_ref_head *locked_ref = NULL; 2114 struct btrfs_delayed_extent_op *extent_op; 2115 int ret; 2116 int count = 0; 2117 int must_insert_reserved = 0; 2118 2119 delayed_refs = &trans->transaction->delayed_refs; 2120 while (1) { 2121 if (!locked_ref) { 2122 /* pick a new head ref from the cluster list */ 2123 if (list_empty(cluster)) 2124 break; 2125 2126 locked_ref = list_entry(cluster->next, 2127 struct btrfs_delayed_ref_head, cluster); 2128 2129 /* grab the lock that says we are going to process 2130 * all the refs for this head */ 2131 ret = btrfs_delayed_ref_lock(trans, locked_ref); 2132 2133 /* 2134 * we may have dropped the spin lock to get the head 2135 * mutex lock, and that might have given someone else 2136 * time to free the head. If that's true, it has been 2137 * removed from our list and we can move on. 2138 */ 2139 if (ret == -EAGAIN) { 2140 locked_ref = NULL; 2141 count++; 2142 continue; 2143 } 2144 } 2145 2146 /* 2147 * record the must insert reserved flag before we 2148 * drop the spin lock. 2149 */ 2150 must_insert_reserved = locked_ref->must_insert_reserved; 2151 locked_ref->must_insert_reserved = 0; 2152 2153 extent_op = locked_ref->extent_op; 2154 locked_ref->extent_op = NULL; 2155 2156 /* 2157 * locked_ref is the head node, so we have to go one 2158 * node back for any delayed ref updates 2159 */ 2160 ref = select_delayed_ref(locked_ref); 2161 if (!ref) { 2162 /* All delayed refs have been processed, Go ahead 2163 * and send the head node to run_one_delayed_ref, 2164 * so that any accounting fixes can happen 2165 */ 2166 ref = &locked_ref->node; 2167 2168 if (extent_op && must_insert_reserved) { 2169 kfree(extent_op); 2170 extent_op = NULL; 2171 } 2172 2173 if (extent_op) { 2174 spin_unlock(&delayed_refs->lock); 2175 2176 ret = run_delayed_extent_op(trans, root, 2177 ref, extent_op); 2178 BUG_ON(ret); 2179 kfree(extent_op); 2180 2181 cond_resched(); 2182 spin_lock(&delayed_refs->lock); 2183 continue; 2184 } 2185 2186 list_del_init(&locked_ref->cluster); 2187 locked_ref = NULL; 2188 } 2189 2190 ref->in_tree = 0; 2191 rb_erase(&ref->rb_node, &delayed_refs->root); 2192 delayed_refs->num_entries--; 2193 2194 spin_unlock(&delayed_refs->lock); 2195 2196 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2197 must_insert_reserved); 2198 BUG_ON(ret); 2199 2200 btrfs_put_delayed_ref(ref); 2201 kfree(extent_op); 2202 count++; 2203 2204 cond_resched(); 2205 spin_lock(&delayed_refs->lock); 2206 } 2207 return count; 2208 } 2209 2210 /* 2211 * this starts processing the delayed reference count updates and 2212 * extent insertions we have queued up so far. count can be 2213 * 0, which means to process everything in the tree at the start 2214 * of the run (but not newly added entries), or it can be some target 2215 * number you'd like to process. 2216 */ 2217 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2218 struct btrfs_root *root, unsigned long count) 2219 { 2220 struct rb_node *node; 2221 struct btrfs_delayed_ref_root *delayed_refs; 2222 struct btrfs_delayed_ref_node *ref; 2223 struct list_head cluster; 2224 int ret; 2225 int run_all = count == (unsigned long)-1; 2226 int run_most = 0; 2227 2228 if (root == root->fs_info->extent_root) 2229 root = root->fs_info->tree_root; 2230 2231 delayed_refs = &trans->transaction->delayed_refs; 2232 INIT_LIST_HEAD(&cluster); 2233 again: 2234 spin_lock(&delayed_refs->lock); 2235 if (count == 0) { 2236 count = delayed_refs->num_entries * 2; 2237 run_most = 1; 2238 } 2239 while (1) { 2240 if (!(run_all || run_most) && 2241 delayed_refs->num_heads_ready < 64) 2242 break; 2243 2244 /* 2245 * go find something we can process in the rbtree. We start at 2246 * the beginning of the tree, and then build a cluster 2247 * of refs to process starting at the first one we are able to 2248 * lock 2249 */ 2250 ret = btrfs_find_ref_cluster(trans, &cluster, 2251 delayed_refs->run_delayed_start); 2252 if (ret) 2253 break; 2254 2255 ret = run_clustered_refs(trans, root, &cluster); 2256 BUG_ON(ret < 0); 2257 2258 count -= min_t(unsigned long, ret, count); 2259 2260 if (count == 0) 2261 break; 2262 } 2263 2264 if (run_all) { 2265 node = rb_first(&delayed_refs->root); 2266 if (!node) 2267 goto out; 2268 count = (unsigned long)-1; 2269 2270 while (node) { 2271 ref = rb_entry(node, struct btrfs_delayed_ref_node, 2272 rb_node); 2273 if (btrfs_delayed_ref_is_head(ref)) { 2274 struct btrfs_delayed_ref_head *head; 2275 2276 head = btrfs_delayed_node_to_head(ref); 2277 atomic_inc(&ref->refs); 2278 2279 spin_unlock(&delayed_refs->lock); 2280 mutex_lock(&head->mutex); 2281 mutex_unlock(&head->mutex); 2282 2283 btrfs_put_delayed_ref(ref); 2284 cond_resched(); 2285 goto again; 2286 } 2287 node = rb_next(node); 2288 } 2289 spin_unlock(&delayed_refs->lock); 2290 schedule_timeout(1); 2291 goto again; 2292 } 2293 out: 2294 spin_unlock(&delayed_refs->lock); 2295 return 0; 2296 } 2297 2298 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2299 struct btrfs_root *root, 2300 u64 bytenr, u64 num_bytes, u64 flags, 2301 int is_data) 2302 { 2303 struct btrfs_delayed_extent_op *extent_op; 2304 int ret; 2305 2306 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); 2307 if (!extent_op) 2308 return -ENOMEM; 2309 2310 extent_op->flags_to_set = flags; 2311 extent_op->update_flags = 1; 2312 extent_op->update_key = 0; 2313 extent_op->is_data = is_data ? 1 : 0; 2314 2315 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op); 2316 if (ret) 2317 kfree(extent_op); 2318 return ret; 2319 } 2320 2321 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, 2322 struct btrfs_root *root, 2323 struct btrfs_path *path, 2324 u64 objectid, u64 offset, u64 bytenr) 2325 { 2326 struct btrfs_delayed_ref_head *head; 2327 struct btrfs_delayed_ref_node *ref; 2328 struct btrfs_delayed_data_ref *data_ref; 2329 struct btrfs_delayed_ref_root *delayed_refs; 2330 struct rb_node *node; 2331 int ret = 0; 2332 2333 ret = -ENOENT; 2334 delayed_refs = &trans->transaction->delayed_refs; 2335 spin_lock(&delayed_refs->lock); 2336 head = btrfs_find_delayed_ref_head(trans, bytenr); 2337 if (!head) 2338 goto out; 2339 2340 if (!mutex_trylock(&head->mutex)) { 2341 atomic_inc(&head->node.refs); 2342 spin_unlock(&delayed_refs->lock); 2343 2344 btrfs_release_path(root->fs_info->extent_root, path); 2345 2346 mutex_lock(&head->mutex); 2347 mutex_unlock(&head->mutex); 2348 btrfs_put_delayed_ref(&head->node); 2349 return -EAGAIN; 2350 } 2351 2352 node = rb_prev(&head->node.rb_node); 2353 if (!node) 2354 goto out_unlock; 2355 2356 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 2357 2358 if (ref->bytenr != bytenr) 2359 goto out_unlock; 2360 2361 ret = 1; 2362 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) 2363 goto out_unlock; 2364 2365 data_ref = btrfs_delayed_node_to_data_ref(ref); 2366 2367 node = rb_prev(node); 2368 if (node) { 2369 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 2370 if (ref->bytenr == bytenr) 2371 goto out_unlock; 2372 } 2373 2374 if (data_ref->root != root->root_key.objectid || 2375 data_ref->objectid != objectid || data_ref->offset != offset) 2376 goto out_unlock; 2377 2378 ret = 0; 2379 out_unlock: 2380 mutex_unlock(&head->mutex); 2381 out: 2382 spin_unlock(&delayed_refs->lock); 2383 return ret; 2384 } 2385 2386 static noinline int check_committed_ref(struct btrfs_trans_handle *trans, 2387 struct btrfs_root *root, 2388 struct btrfs_path *path, 2389 u64 objectid, u64 offset, u64 bytenr) 2390 { 2391 struct btrfs_root *extent_root = root->fs_info->extent_root; 2392 struct extent_buffer *leaf; 2393 struct btrfs_extent_data_ref *ref; 2394 struct btrfs_extent_inline_ref *iref; 2395 struct btrfs_extent_item *ei; 2396 struct btrfs_key key; 2397 u32 item_size; 2398 int ret; 2399 2400 key.objectid = bytenr; 2401 key.offset = (u64)-1; 2402 key.type = BTRFS_EXTENT_ITEM_KEY; 2403 2404 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2405 if (ret < 0) 2406 goto out; 2407 BUG_ON(ret == 0); 2408 2409 ret = -ENOENT; 2410 if (path->slots[0] == 0) 2411 goto out; 2412 2413 path->slots[0]--; 2414 leaf = path->nodes[0]; 2415 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2416 2417 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) 2418 goto out; 2419 2420 ret = 1; 2421 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 2422 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 2423 if (item_size < sizeof(*ei)) { 2424 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); 2425 goto out; 2426 } 2427 #endif 2428 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 2429 2430 if (item_size != sizeof(*ei) + 2431 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) 2432 goto out; 2433 2434 if (btrfs_extent_generation(leaf, ei) <= 2435 btrfs_root_last_snapshot(&root->root_item)) 2436 goto out; 2437 2438 iref = (struct btrfs_extent_inline_ref *)(ei + 1); 2439 if (btrfs_extent_inline_ref_type(leaf, iref) != 2440 BTRFS_EXTENT_DATA_REF_KEY) 2441 goto out; 2442 2443 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 2444 if (btrfs_extent_refs(leaf, ei) != 2445 btrfs_extent_data_ref_count(leaf, ref) || 2446 btrfs_extent_data_ref_root(leaf, ref) != 2447 root->root_key.objectid || 2448 btrfs_extent_data_ref_objectid(leaf, ref) != objectid || 2449 btrfs_extent_data_ref_offset(leaf, ref) != offset) 2450 goto out; 2451 2452 ret = 0; 2453 out: 2454 return ret; 2455 } 2456 2457 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 2458 struct btrfs_root *root, 2459 u64 objectid, u64 offset, u64 bytenr) 2460 { 2461 struct btrfs_path *path; 2462 int ret; 2463 int ret2; 2464 2465 path = btrfs_alloc_path(); 2466 if (!path) 2467 return -ENOENT; 2468 2469 do { 2470 ret = check_committed_ref(trans, root, path, objectid, 2471 offset, bytenr); 2472 if (ret && ret != -ENOENT) 2473 goto out; 2474 2475 ret2 = check_delayed_ref(trans, root, path, objectid, 2476 offset, bytenr); 2477 } while (ret2 == -EAGAIN); 2478 2479 if (ret2 && ret2 != -ENOENT) { 2480 ret = ret2; 2481 goto out; 2482 } 2483 2484 if (ret != -ENOENT || ret2 != -ENOENT) 2485 ret = 0; 2486 out: 2487 btrfs_free_path(path); 2488 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 2489 WARN_ON(ret > 0); 2490 return ret; 2491 } 2492 2493 #if 0 2494 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2495 struct extent_buffer *buf, u32 nr_extents) 2496 { 2497 struct btrfs_key key; 2498 struct btrfs_file_extent_item *fi; 2499 u64 root_gen; 2500 u32 nritems; 2501 int i; 2502 int level; 2503 int ret = 0; 2504 int shared = 0; 2505 2506 if (!root->ref_cows) 2507 return 0; 2508 2509 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 2510 shared = 0; 2511 root_gen = root->root_key.offset; 2512 } else { 2513 shared = 1; 2514 root_gen = trans->transid - 1; 2515 } 2516 2517 level = btrfs_header_level(buf); 2518 nritems = btrfs_header_nritems(buf); 2519 2520 if (level == 0) { 2521 struct btrfs_leaf_ref *ref; 2522 struct btrfs_extent_info *info; 2523 2524 ref = btrfs_alloc_leaf_ref(root, nr_extents); 2525 if (!ref) { 2526 ret = -ENOMEM; 2527 goto out; 2528 } 2529 2530 ref->root_gen = root_gen; 2531 ref->bytenr = buf->start; 2532 ref->owner = btrfs_header_owner(buf); 2533 ref->generation = btrfs_header_generation(buf); 2534 ref->nritems = nr_extents; 2535 info = ref->extents; 2536 2537 for (i = 0; nr_extents > 0 && i < nritems; i++) { 2538 u64 disk_bytenr; 2539 btrfs_item_key_to_cpu(buf, &key, i); 2540 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) 2541 continue; 2542 fi = btrfs_item_ptr(buf, i, 2543 struct btrfs_file_extent_item); 2544 if (btrfs_file_extent_type(buf, fi) == 2545 BTRFS_FILE_EXTENT_INLINE) 2546 continue; 2547 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 2548 if (disk_bytenr == 0) 2549 continue; 2550 2551 info->bytenr = disk_bytenr; 2552 info->num_bytes = 2553 btrfs_file_extent_disk_num_bytes(buf, fi); 2554 info->objectid = key.objectid; 2555 info->offset = key.offset; 2556 info++; 2557 } 2558 2559 ret = btrfs_add_leaf_ref(root, ref, shared); 2560 if (ret == -EEXIST && shared) { 2561 struct btrfs_leaf_ref *old; 2562 old = btrfs_lookup_leaf_ref(root, ref->bytenr); 2563 BUG_ON(!old); 2564 btrfs_remove_leaf_ref(root, old); 2565 btrfs_free_leaf_ref(root, old); 2566 ret = btrfs_add_leaf_ref(root, ref, shared); 2567 } 2568 WARN_ON(ret); 2569 btrfs_free_leaf_ref(root, ref); 2570 } 2571 out: 2572 return ret; 2573 } 2574 2575 /* when a block goes through cow, we update the reference counts of 2576 * everything that block points to. The internal pointers of the block 2577 * can be in just about any order, and it is likely to have clusters of 2578 * things that are close together and clusters of things that are not. 2579 * 2580 * To help reduce the seeks that come with updating all of these reference 2581 * counts, sort them by byte number before actual updates are done. 2582 * 2583 * struct refsort is used to match byte number to slot in the btree block. 2584 * we sort based on the byte number and then use the slot to actually 2585 * find the item. 2586 * 2587 * struct refsort is smaller than strcut btrfs_item and smaller than 2588 * struct btrfs_key_ptr. Since we're currently limited to the page size 2589 * for a btree block, there's no way for a kmalloc of refsorts for a 2590 * single node to be bigger than a page. 2591 */ 2592 struct refsort { 2593 u64 bytenr; 2594 u32 slot; 2595 }; 2596 2597 /* 2598 * for passing into sort() 2599 */ 2600 static int refsort_cmp(const void *a_void, const void *b_void) 2601 { 2602 const struct refsort *a = a_void; 2603 const struct refsort *b = b_void; 2604 2605 if (a->bytenr < b->bytenr) 2606 return -1; 2607 if (a->bytenr > b->bytenr) 2608 return 1; 2609 return 0; 2610 } 2611 #endif 2612 2613 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, 2614 struct btrfs_root *root, 2615 struct extent_buffer *buf, 2616 int full_backref, int inc) 2617 { 2618 u64 bytenr; 2619 u64 num_bytes; 2620 u64 parent; 2621 u64 ref_root; 2622 u32 nritems; 2623 struct btrfs_key key; 2624 struct btrfs_file_extent_item *fi; 2625 int i; 2626 int level; 2627 int ret = 0; 2628 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, 2629 u64, u64, u64, u64, u64, u64); 2630 2631 ref_root = btrfs_header_owner(buf); 2632 nritems = btrfs_header_nritems(buf); 2633 level = btrfs_header_level(buf); 2634 2635 if (!root->ref_cows && level == 0) 2636 return 0; 2637 2638 if (inc) 2639 process_func = btrfs_inc_extent_ref; 2640 else 2641 process_func = btrfs_free_extent; 2642 2643 if (full_backref) 2644 parent = buf->start; 2645 else 2646 parent = 0; 2647 2648 for (i = 0; i < nritems; i++) { 2649 if (level == 0) { 2650 btrfs_item_key_to_cpu(buf, &key, i); 2651 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) 2652 continue; 2653 fi = btrfs_item_ptr(buf, i, 2654 struct btrfs_file_extent_item); 2655 if (btrfs_file_extent_type(buf, fi) == 2656 BTRFS_FILE_EXTENT_INLINE) 2657 continue; 2658 bytenr = btrfs_file_extent_disk_bytenr(buf, fi); 2659 if (bytenr == 0) 2660 continue; 2661 2662 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); 2663 key.offset -= btrfs_file_extent_offset(buf, fi); 2664 ret = process_func(trans, root, bytenr, num_bytes, 2665 parent, ref_root, key.objectid, 2666 key.offset); 2667 if (ret) 2668 goto fail; 2669 } else { 2670 bytenr = btrfs_node_blockptr(buf, i); 2671 num_bytes = btrfs_level_size(root, level - 1); 2672 ret = process_func(trans, root, bytenr, num_bytes, 2673 parent, ref_root, level - 1, 0); 2674 if (ret) 2675 goto fail; 2676 } 2677 } 2678 return 0; 2679 fail: 2680 BUG(); 2681 return ret; 2682 } 2683 2684 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2685 struct extent_buffer *buf, int full_backref) 2686 { 2687 return __btrfs_mod_ref(trans, root, buf, full_backref, 1); 2688 } 2689 2690 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2691 struct extent_buffer *buf, int full_backref) 2692 { 2693 return __btrfs_mod_ref(trans, root, buf, full_backref, 0); 2694 } 2695 2696 static int write_one_cache_group(struct btrfs_trans_handle *trans, 2697 struct btrfs_root *root, 2698 struct btrfs_path *path, 2699 struct btrfs_block_group_cache *cache) 2700 { 2701 int ret; 2702 struct btrfs_root *extent_root = root->fs_info->extent_root; 2703 unsigned long bi; 2704 struct extent_buffer *leaf; 2705 2706 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 2707 if (ret < 0) 2708 goto fail; 2709 BUG_ON(ret); 2710 2711 leaf = path->nodes[0]; 2712 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2713 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); 2714 btrfs_mark_buffer_dirty(leaf); 2715 btrfs_release_path(extent_root, path); 2716 fail: 2717 if (ret) 2718 return ret; 2719 return 0; 2720 2721 } 2722 2723 static struct btrfs_block_group_cache * 2724 next_block_group(struct btrfs_root *root, 2725 struct btrfs_block_group_cache *cache) 2726 { 2727 struct rb_node *node; 2728 spin_lock(&root->fs_info->block_group_cache_lock); 2729 node = rb_next(&cache->cache_node); 2730 btrfs_put_block_group(cache); 2731 if (node) { 2732 cache = rb_entry(node, struct btrfs_block_group_cache, 2733 cache_node); 2734 btrfs_get_block_group(cache); 2735 } else 2736 cache = NULL; 2737 spin_unlock(&root->fs_info->block_group_cache_lock); 2738 return cache; 2739 } 2740 2741 static int cache_save_setup(struct btrfs_block_group_cache *block_group, 2742 struct btrfs_trans_handle *trans, 2743 struct btrfs_path *path) 2744 { 2745 struct btrfs_root *root = block_group->fs_info->tree_root; 2746 struct inode *inode = NULL; 2747 u64 alloc_hint = 0; 2748 int dcs = BTRFS_DC_ERROR; 2749 int num_pages = 0; 2750 int retries = 0; 2751 int ret = 0; 2752 2753 /* 2754 * If this block group is smaller than 100 megs don't bother caching the 2755 * block group. 2756 */ 2757 if (block_group->key.offset < (100 * 1024 * 1024)) { 2758 spin_lock(&block_group->lock); 2759 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 2760 spin_unlock(&block_group->lock); 2761 return 0; 2762 } 2763 2764 again: 2765 inode = lookup_free_space_inode(root, block_group, path); 2766 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 2767 ret = PTR_ERR(inode); 2768 btrfs_release_path(root, path); 2769 goto out; 2770 } 2771 2772 if (IS_ERR(inode)) { 2773 BUG_ON(retries); 2774 retries++; 2775 2776 if (block_group->ro) 2777 goto out_free; 2778 2779 ret = create_free_space_inode(root, trans, block_group, path); 2780 if (ret) 2781 goto out_free; 2782 goto again; 2783 } 2784 2785 /* 2786 * We want to set the generation to 0, that way if anything goes wrong 2787 * from here on out we know not to trust this cache when we load up next 2788 * time. 2789 */ 2790 BTRFS_I(inode)->generation = 0; 2791 ret = btrfs_update_inode(trans, root, inode); 2792 WARN_ON(ret); 2793 2794 if (i_size_read(inode) > 0) { 2795 ret = btrfs_truncate_free_space_cache(root, trans, path, 2796 inode); 2797 if (ret) 2798 goto out_put; 2799 } 2800 2801 spin_lock(&block_group->lock); 2802 if (block_group->cached != BTRFS_CACHE_FINISHED) { 2803 /* We're not cached, don't bother trying to write stuff out */ 2804 dcs = BTRFS_DC_WRITTEN; 2805 spin_unlock(&block_group->lock); 2806 goto out_put; 2807 } 2808 spin_unlock(&block_group->lock); 2809 2810 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); 2811 if (!num_pages) 2812 num_pages = 1; 2813 2814 /* 2815 * Just to make absolutely sure we have enough space, we're going to 2816 * preallocate 12 pages worth of space for each block group. In 2817 * practice we ought to use at most 8, but we need extra space so we can 2818 * add our header and have a terminator between the extents and the 2819 * bitmaps. 2820 */ 2821 num_pages *= 16; 2822 num_pages *= PAGE_CACHE_SIZE; 2823 2824 ret = btrfs_check_data_free_space(inode, num_pages); 2825 if (ret) 2826 goto out_put; 2827 2828 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 2829 num_pages, num_pages, 2830 &alloc_hint); 2831 if (!ret) 2832 dcs = BTRFS_DC_SETUP; 2833 btrfs_free_reserved_data_space(inode, num_pages); 2834 out_put: 2835 iput(inode); 2836 out_free: 2837 btrfs_release_path(root, path); 2838 out: 2839 spin_lock(&block_group->lock); 2840 block_group->disk_cache_state = dcs; 2841 spin_unlock(&block_group->lock); 2842 2843 return ret; 2844 } 2845 2846 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 2847 struct btrfs_root *root) 2848 { 2849 struct btrfs_block_group_cache *cache; 2850 int err = 0; 2851 struct btrfs_path *path; 2852 u64 last = 0; 2853 2854 path = btrfs_alloc_path(); 2855 if (!path) 2856 return -ENOMEM; 2857 2858 again: 2859 while (1) { 2860 cache = btrfs_lookup_first_block_group(root->fs_info, last); 2861 while (cache) { 2862 if (cache->disk_cache_state == BTRFS_DC_CLEAR) 2863 break; 2864 cache = next_block_group(root, cache); 2865 } 2866 if (!cache) { 2867 if (last == 0) 2868 break; 2869 last = 0; 2870 continue; 2871 } 2872 err = cache_save_setup(cache, trans, path); 2873 last = cache->key.objectid + cache->key.offset; 2874 btrfs_put_block_group(cache); 2875 } 2876 2877 while (1) { 2878 if (last == 0) { 2879 err = btrfs_run_delayed_refs(trans, root, 2880 (unsigned long)-1); 2881 BUG_ON(err); 2882 } 2883 2884 cache = btrfs_lookup_first_block_group(root->fs_info, last); 2885 while (cache) { 2886 if (cache->disk_cache_state == BTRFS_DC_CLEAR) { 2887 btrfs_put_block_group(cache); 2888 goto again; 2889 } 2890 2891 if (cache->dirty) 2892 break; 2893 cache = next_block_group(root, cache); 2894 } 2895 if (!cache) { 2896 if (last == 0) 2897 break; 2898 last = 0; 2899 continue; 2900 } 2901 2902 if (cache->disk_cache_state == BTRFS_DC_SETUP) 2903 cache->disk_cache_state = BTRFS_DC_NEED_WRITE; 2904 cache->dirty = 0; 2905 last = cache->key.objectid + cache->key.offset; 2906 2907 err = write_one_cache_group(trans, root, path, cache); 2908 BUG_ON(err); 2909 btrfs_put_block_group(cache); 2910 } 2911 2912 while (1) { 2913 /* 2914 * I don't think this is needed since we're just marking our 2915 * preallocated extent as written, but just in case it can't 2916 * hurt. 2917 */ 2918 if (last == 0) { 2919 err = btrfs_run_delayed_refs(trans, root, 2920 (unsigned long)-1); 2921 BUG_ON(err); 2922 } 2923 2924 cache = btrfs_lookup_first_block_group(root->fs_info, last); 2925 while (cache) { 2926 /* 2927 * Really this shouldn't happen, but it could if we 2928 * couldn't write the entire preallocated extent and 2929 * splitting the extent resulted in a new block. 2930 */ 2931 if (cache->dirty) { 2932 btrfs_put_block_group(cache); 2933 goto again; 2934 } 2935 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 2936 break; 2937 cache = next_block_group(root, cache); 2938 } 2939 if (!cache) { 2940 if (last == 0) 2941 break; 2942 last = 0; 2943 continue; 2944 } 2945 2946 btrfs_write_out_cache(root, trans, cache, path); 2947 2948 /* 2949 * If we didn't have an error then the cache state is still 2950 * NEED_WRITE, so we can set it to WRITTEN. 2951 */ 2952 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 2953 cache->disk_cache_state = BTRFS_DC_WRITTEN; 2954 last = cache->key.objectid + cache->key.offset; 2955 btrfs_put_block_group(cache); 2956 } 2957 2958 btrfs_free_path(path); 2959 return 0; 2960 } 2961 2962 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 2963 { 2964 struct btrfs_block_group_cache *block_group; 2965 int readonly = 0; 2966 2967 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 2968 if (!block_group || block_group->ro) 2969 readonly = 1; 2970 if (block_group) 2971 btrfs_put_block_group(block_group); 2972 return readonly; 2973 } 2974 2975 static int update_space_info(struct btrfs_fs_info *info, u64 flags, 2976 u64 total_bytes, u64 bytes_used, 2977 struct btrfs_space_info **space_info) 2978 { 2979 struct btrfs_space_info *found; 2980 int i; 2981 int factor; 2982 2983 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | 2984 BTRFS_BLOCK_GROUP_RAID10)) 2985 factor = 2; 2986 else 2987 factor = 1; 2988 2989 found = __find_space_info(info, flags); 2990 if (found) { 2991 spin_lock(&found->lock); 2992 found->total_bytes += total_bytes; 2993 found->disk_total += total_bytes * factor; 2994 found->bytes_used += bytes_used; 2995 found->disk_used += bytes_used * factor; 2996 found->full = 0; 2997 spin_unlock(&found->lock); 2998 *space_info = found; 2999 return 0; 3000 } 3001 found = kzalloc(sizeof(*found), GFP_NOFS); 3002 if (!found) 3003 return -ENOMEM; 3004 3005 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) 3006 INIT_LIST_HEAD(&found->block_groups[i]); 3007 init_rwsem(&found->groups_sem); 3008 spin_lock_init(&found->lock); 3009 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA | 3010 BTRFS_BLOCK_GROUP_SYSTEM | 3011 BTRFS_BLOCK_GROUP_METADATA); 3012 found->total_bytes = total_bytes; 3013 found->disk_total = total_bytes * factor; 3014 found->bytes_used = bytes_used; 3015 found->disk_used = bytes_used * factor; 3016 found->bytes_pinned = 0; 3017 found->bytes_reserved = 0; 3018 found->bytes_readonly = 0; 3019 found->bytes_may_use = 0; 3020 found->full = 0; 3021 found->force_alloc = 0; 3022 *space_info = found; 3023 list_add_rcu(&found->list, &info->space_info); 3024 atomic_set(&found->caching_threads, 0); 3025 return 0; 3026 } 3027 3028 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 3029 { 3030 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 | 3031 BTRFS_BLOCK_GROUP_RAID1 | 3032 BTRFS_BLOCK_GROUP_RAID10 | 3033 BTRFS_BLOCK_GROUP_DUP); 3034 if (extra_flags) { 3035 if (flags & BTRFS_BLOCK_GROUP_DATA) 3036 fs_info->avail_data_alloc_bits |= extra_flags; 3037 if (flags & BTRFS_BLOCK_GROUP_METADATA) 3038 fs_info->avail_metadata_alloc_bits |= extra_flags; 3039 if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3040 fs_info->avail_system_alloc_bits |= extra_flags; 3041 } 3042 } 3043 3044 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3045 { 3046 /* 3047 * we add in the count of missing devices because we want 3048 * to make sure that any RAID levels on a degraded FS 3049 * continue to be honored. 3050 */ 3051 u64 num_devices = root->fs_info->fs_devices->rw_devices + 3052 root->fs_info->fs_devices->missing_devices; 3053 3054 if (num_devices == 1) 3055 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); 3056 if (num_devices < 4) 3057 flags &= ~BTRFS_BLOCK_GROUP_RAID10; 3058 3059 if ((flags & BTRFS_BLOCK_GROUP_DUP) && 3060 (flags & (BTRFS_BLOCK_GROUP_RAID1 | 3061 BTRFS_BLOCK_GROUP_RAID10))) { 3062 flags &= ~BTRFS_BLOCK_GROUP_DUP; 3063 } 3064 3065 if ((flags & BTRFS_BLOCK_GROUP_RAID1) && 3066 (flags & BTRFS_BLOCK_GROUP_RAID10)) { 3067 flags &= ~BTRFS_BLOCK_GROUP_RAID1; 3068 } 3069 3070 if ((flags & BTRFS_BLOCK_GROUP_RAID0) && 3071 ((flags & BTRFS_BLOCK_GROUP_RAID1) | 3072 (flags & BTRFS_BLOCK_GROUP_RAID10) | 3073 (flags & BTRFS_BLOCK_GROUP_DUP))) 3074 flags &= ~BTRFS_BLOCK_GROUP_RAID0; 3075 return flags; 3076 } 3077 3078 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) 3079 { 3080 if (flags & BTRFS_BLOCK_GROUP_DATA) 3081 flags |= root->fs_info->avail_data_alloc_bits & 3082 root->fs_info->data_alloc_profile; 3083 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) 3084 flags |= root->fs_info->avail_system_alloc_bits & 3085 root->fs_info->system_alloc_profile; 3086 else if (flags & BTRFS_BLOCK_GROUP_METADATA) 3087 flags |= root->fs_info->avail_metadata_alloc_bits & 3088 root->fs_info->metadata_alloc_profile; 3089 return btrfs_reduce_alloc_profile(root, flags); 3090 } 3091 3092 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) 3093 { 3094 u64 flags; 3095 3096 if (data) 3097 flags = BTRFS_BLOCK_GROUP_DATA; 3098 else if (root == root->fs_info->chunk_root) 3099 flags = BTRFS_BLOCK_GROUP_SYSTEM; 3100 else 3101 flags = BTRFS_BLOCK_GROUP_METADATA; 3102 3103 return get_alloc_profile(root, flags); 3104 } 3105 3106 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode) 3107 { 3108 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info, 3109 BTRFS_BLOCK_GROUP_DATA); 3110 } 3111 3112 /* 3113 * This will check the space that the inode allocates from to make sure we have 3114 * enough space for bytes. 3115 */ 3116 int btrfs_check_data_free_space(struct inode *inode, u64 bytes) 3117 { 3118 struct btrfs_space_info *data_sinfo; 3119 struct btrfs_root *root = BTRFS_I(inode)->root; 3120 u64 used; 3121 int ret = 0, committed = 0, alloc_chunk = 1; 3122 3123 /* make sure bytes are sectorsize aligned */ 3124 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3125 3126 if (root == root->fs_info->tree_root) { 3127 alloc_chunk = 0; 3128 committed = 1; 3129 } 3130 3131 data_sinfo = BTRFS_I(inode)->space_info; 3132 if (!data_sinfo) 3133 goto alloc; 3134 3135 again: 3136 /* make sure we have enough space to handle the data first */ 3137 spin_lock(&data_sinfo->lock); 3138 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + 3139 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + 3140 data_sinfo->bytes_may_use; 3141 3142 if (used + bytes > data_sinfo->total_bytes) { 3143 struct btrfs_trans_handle *trans; 3144 3145 /* 3146 * if we don't have enough free bytes in this space then we need 3147 * to alloc a new chunk. 3148 */ 3149 if (!data_sinfo->full && alloc_chunk) { 3150 u64 alloc_target; 3151 3152 data_sinfo->force_alloc = 1; 3153 spin_unlock(&data_sinfo->lock); 3154 alloc: 3155 alloc_target = btrfs_get_alloc_profile(root, 1); 3156 trans = btrfs_join_transaction(root, 1); 3157 if (IS_ERR(trans)) 3158 return PTR_ERR(trans); 3159 3160 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 3161 bytes + 2 * 1024 * 1024, 3162 alloc_target, 0); 3163 btrfs_end_transaction(trans, root); 3164 if (ret < 0) { 3165 if (ret != -ENOSPC) 3166 return ret; 3167 else 3168 goto commit_trans; 3169 } 3170 3171 if (!data_sinfo) { 3172 btrfs_set_inode_space_info(root, inode); 3173 data_sinfo = BTRFS_I(inode)->space_info; 3174 } 3175 goto again; 3176 } 3177 spin_unlock(&data_sinfo->lock); 3178 3179 /* commit the current transaction and try again */ 3180 commit_trans: 3181 if (!committed && !root->fs_info->open_ioctl_trans) { 3182 committed = 1; 3183 trans = btrfs_join_transaction(root, 1); 3184 if (IS_ERR(trans)) 3185 return PTR_ERR(trans); 3186 ret = btrfs_commit_transaction(trans, root); 3187 if (ret) 3188 return ret; 3189 goto again; 3190 } 3191 3192 #if 0 /* I hope we never need this code again, just in case */ 3193 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " 3194 "%llu bytes_reserved, " "%llu bytes_pinned, " 3195 "%llu bytes_readonly, %llu may use %llu total\n", 3196 (unsigned long long)bytes, 3197 (unsigned long long)data_sinfo->bytes_used, 3198 (unsigned long long)data_sinfo->bytes_reserved, 3199 (unsigned long long)data_sinfo->bytes_pinned, 3200 (unsigned long long)data_sinfo->bytes_readonly, 3201 (unsigned long long)data_sinfo->bytes_may_use, 3202 (unsigned long long)data_sinfo->total_bytes); 3203 #endif 3204 return -ENOSPC; 3205 } 3206 data_sinfo->bytes_may_use += bytes; 3207 BTRFS_I(inode)->reserved_bytes += bytes; 3208 spin_unlock(&data_sinfo->lock); 3209 3210 return 0; 3211 } 3212 3213 /* 3214 * called when we are clearing an delalloc extent from the 3215 * inode's io_tree or there was an error for whatever reason 3216 * after calling btrfs_check_data_free_space 3217 */ 3218 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) 3219 { 3220 struct btrfs_root *root = BTRFS_I(inode)->root; 3221 struct btrfs_space_info *data_sinfo; 3222 3223 /* make sure bytes are sectorsize aligned */ 3224 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3225 3226 data_sinfo = BTRFS_I(inode)->space_info; 3227 spin_lock(&data_sinfo->lock); 3228 data_sinfo->bytes_may_use -= bytes; 3229 BTRFS_I(inode)->reserved_bytes -= bytes; 3230 spin_unlock(&data_sinfo->lock); 3231 } 3232 3233 static void force_metadata_allocation(struct btrfs_fs_info *info) 3234 { 3235 struct list_head *head = &info->space_info; 3236 struct btrfs_space_info *found; 3237 3238 rcu_read_lock(); 3239 list_for_each_entry_rcu(found, head, list) { 3240 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3241 found->force_alloc = 1; 3242 } 3243 rcu_read_unlock(); 3244 } 3245 3246 static int should_alloc_chunk(struct btrfs_root *root, 3247 struct btrfs_space_info *sinfo, u64 alloc_bytes) 3248 { 3249 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; 3250 u64 thresh; 3251 3252 if (sinfo->bytes_used + sinfo->bytes_reserved + 3253 alloc_bytes + 256 * 1024 * 1024 < num_bytes) 3254 return 0; 3255 3256 if (sinfo->bytes_used + sinfo->bytes_reserved + 3257 alloc_bytes < div_factor(num_bytes, 8)) 3258 return 0; 3259 3260 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); 3261 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); 3262 3263 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) 3264 return 0; 3265 3266 return 1; 3267 } 3268 3269 static int do_chunk_alloc(struct btrfs_trans_handle *trans, 3270 struct btrfs_root *extent_root, u64 alloc_bytes, 3271 u64 flags, int force) 3272 { 3273 struct btrfs_space_info *space_info; 3274 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3275 int ret = 0; 3276 3277 mutex_lock(&fs_info->chunk_mutex); 3278 3279 flags = btrfs_reduce_alloc_profile(extent_root, flags); 3280 3281 space_info = __find_space_info(extent_root->fs_info, flags); 3282 if (!space_info) { 3283 ret = update_space_info(extent_root->fs_info, flags, 3284 0, 0, &space_info); 3285 BUG_ON(ret); 3286 } 3287 BUG_ON(!space_info); 3288 3289 spin_lock(&space_info->lock); 3290 if (space_info->force_alloc) 3291 force = 1; 3292 if (space_info->full) { 3293 spin_unlock(&space_info->lock); 3294 goto out; 3295 } 3296 3297 if (!force && !should_alloc_chunk(extent_root, space_info, 3298 alloc_bytes)) { 3299 spin_unlock(&space_info->lock); 3300 goto out; 3301 } 3302 spin_unlock(&space_info->lock); 3303 3304 /* 3305 * If we have mixed data/metadata chunks we want to make sure we keep 3306 * allocating mixed chunks instead of individual chunks. 3307 */ 3308 if (btrfs_mixed_space_info(space_info)) 3309 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); 3310 3311 /* 3312 * if we're doing a data chunk, go ahead and make sure that 3313 * we keep a reasonable number of metadata chunks allocated in the 3314 * FS as well. 3315 */ 3316 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { 3317 fs_info->data_chunk_allocations++; 3318 if (!(fs_info->data_chunk_allocations % 3319 fs_info->metadata_ratio)) 3320 force_metadata_allocation(fs_info); 3321 } 3322 3323 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3324 spin_lock(&space_info->lock); 3325 if (ret) 3326 space_info->full = 1; 3327 else 3328 ret = 1; 3329 space_info->force_alloc = 0; 3330 spin_unlock(&space_info->lock); 3331 out: 3332 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3333 return ret; 3334 } 3335 3336 /* 3337 * shrink metadata reservation for delalloc 3338 */ 3339 static int shrink_delalloc(struct btrfs_trans_handle *trans, 3340 struct btrfs_root *root, u64 to_reclaim, int sync) 3341 { 3342 struct btrfs_block_rsv *block_rsv; 3343 struct btrfs_space_info *space_info; 3344 u64 reserved; 3345 u64 max_reclaim; 3346 u64 reclaimed = 0; 3347 int pause = 1; 3348 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; 3349 3350 block_rsv = &root->fs_info->delalloc_block_rsv; 3351 space_info = block_rsv->space_info; 3352 3353 smp_mb(); 3354 reserved = space_info->bytes_reserved; 3355 3356 if (reserved == 0) 3357 return 0; 3358 3359 max_reclaim = min(reserved, to_reclaim); 3360 3361 while (1) { 3362 /* have the flusher threads jump in and do some IO */ 3363 smp_mb(); 3364 nr_pages = min_t(unsigned long, nr_pages, 3365 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT); 3366 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); 3367 3368 spin_lock(&space_info->lock); 3369 if (reserved > space_info->bytes_reserved) 3370 reclaimed += reserved - space_info->bytes_reserved; 3371 reserved = space_info->bytes_reserved; 3372 spin_unlock(&space_info->lock); 3373 3374 if (reserved == 0 || reclaimed >= max_reclaim) 3375 break; 3376 3377 if (trans && trans->transaction->blocked) 3378 return -EAGAIN; 3379 3380 __set_current_state(TASK_INTERRUPTIBLE); 3381 schedule_timeout(pause); 3382 pause <<= 1; 3383 if (pause > HZ / 10) 3384 pause = HZ / 10; 3385 3386 } 3387 return reclaimed >= to_reclaim; 3388 } 3389 3390 /* 3391 * Retries tells us how many times we've called reserve_metadata_bytes. The 3392 * idea is if this is the first call (retries == 0) then we will add to our 3393 * reserved count if we can't make the allocation in order to hold our place 3394 * while we go and try and free up space. That way for retries > 1 we don't try 3395 * and add space, we just check to see if the amount of unused space is >= the 3396 * total space, meaning that our reservation is valid. 3397 * 3398 * However if we don't intend to retry this reservation, pass -1 as retries so 3399 * that it short circuits this logic. 3400 */ 3401 static int reserve_metadata_bytes(struct btrfs_trans_handle *trans, 3402 struct btrfs_root *root, 3403 struct btrfs_block_rsv *block_rsv, 3404 u64 orig_bytes, int flush) 3405 { 3406 struct btrfs_space_info *space_info = block_rsv->space_info; 3407 u64 unused; 3408 u64 num_bytes = orig_bytes; 3409 int retries = 0; 3410 int ret = 0; 3411 bool reserved = false; 3412 bool committed = false; 3413 3414 again: 3415 ret = -ENOSPC; 3416 if (reserved) 3417 num_bytes = 0; 3418 3419 spin_lock(&space_info->lock); 3420 unused = space_info->bytes_used + space_info->bytes_reserved + 3421 space_info->bytes_pinned + space_info->bytes_readonly + 3422 space_info->bytes_may_use; 3423 3424 /* 3425 * The idea here is that we've not already over-reserved the block group 3426 * then we can go ahead and save our reservation first and then start 3427 * flushing if we need to. Otherwise if we've already overcommitted 3428 * lets start flushing stuff first and then come back and try to make 3429 * our reservation. 3430 */ 3431 if (unused <= space_info->total_bytes) { 3432 unused = space_info->total_bytes - unused; 3433 if (unused >= num_bytes) { 3434 if (!reserved) 3435 space_info->bytes_reserved += orig_bytes; 3436 ret = 0; 3437 } else { 3438 /* 3439 * Ok set num_bytes to orig_bytes since we aren't 3440 * overocmmitted, this way we only try and reclaim what 3441 * we need. 3442 */ 3443 num_bytes = orig_bytes; 3444 } 3445 } else { 3446 /* 3447 * Ok we're over committed, set num_bytes to the overcommitted 3448 * amount plus the amount of bytes that we need for this 3449 * reservation. 3450 */ 3451 num_bytes = unused - space_info->total_bytes + 3452 (orig_bytes * (retries + 1)); 3453 } 3454 3455 /* 3456 * Couldn't make our reservation, save our place so while we're trying 3457 * to reclaim space we can actually use it instead of somebody else 3458 * stealing it from us. 3459 */ 3460 if (ret && !reserved) { 3461 space_info->bytes_reserved += orig_bytes; 3462 reserved = true; 3463 } 3464 3465 spin_unlock(&space_info->lock); 3466 3467 if (!ret) 3468 return 0; 3469 3470 if (!flush) 3471 goto out; 3472 3473 /* 3474 * We do synchronous shrinking since we don't actually unreserve 3475 * metadata until after the IO is completed. 3476 */ 3477 ret = shrink_delalloc(trans, root, num_bytes, 1); 3478 if (ret > 0) 3479 return 0; 3480 else if (ret < 0) 3481 goto out; 3482 3483 /* 3484 * So if we were overcommitted it's possible that somebody else flushed 3485 * out enough space and we simply didn't have enough space to reclaim, 3486 * so go back around and try again. 3487 */ 3488 if (retries < 2) { 3489 retries++; 3490 goto again; 3491 } 3492 3493 spin_lock(&space_info->lock); 3494 /* 3495 * Not enough space to be reclaimed, don't bother committing the 3496 * transaction. 3497 */ 3498 if (space_info->bytes_pinned < orig_bytes) 3499 ret = -ENOSPC; 3500 spin_unlock(&space_info->lock); 3501 if (ret) 3502 goto out; 3503 3504 ret = -EAGAIN; 3505 if (trans || committed) 3506 goto out; 3507 3508 ret = -ENOSPC; 3509 trans = btrfs_join_transaction(root, 1); 3510 if (IS_ERR(trans)) 3511 goto out; 3512 ret = btrfs_commit_transaction(trans, root); 3513 if (!ret) { 3514 trans = NULL; 3515 committed = true; 3516 goto again; 3517 } 3518 3519 out: 3520 if (reserved) { 3521 spin_lock(&space_info->lock); 3522 space_info->bytes_reserved -= orig_bytes; 3523 spin_unlock(&space_info->lock); 3524 } 3525 3526 return ret; 3527 } 3528 3529 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, 3530 struct btrfs_root *root) 3531 { 3532 struct btrfs_block_rsv *block_rsv; 3533 if (root->ref_cows) 3534 block_rsv = trans->block_rsv; 3535 else 3536 block_rsv = root->block_rsv; 3537 3538 if (!block_rsv) 3539 block_rsv = &root->fs_info->empty_block_rsv; 3540 3541 return block_rsv; 3542 } 3543 3544 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 3545 u64 num_bytes) 3546 { 3547 int ret = -ENOSPC; 3548 spin_lock(&block_rsv->lock); 3549 if (block_rsv->reserved >= num_bytes) { 3550 block_rsv->reserved -= num_bytes; 3551 if (block_rsv->reserved < block_rsv->size) 3552 block_rsv->full = 0; 3553 ret = 0; 3554 } 3555 spin_unlock(&block_rsv->lock); 3556 return ret; 3557 } 3558 3559 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, 3560 u64 num_bytes, int update_size) 3561 { 3562 spin_lock(&block_rsv->lock); 3563 block_rsv->reserved += num_bytes; 3564 if (update_size) 3565 block_rsv->size += num_bytes; 3566 else if (block_rsv->reserved >= block_rsv->size) 3567 block_rsv->full = 1; 3568 spin_unlock(&block_rsv->lock); 3569 } 3570 3571 void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, 3572 struct btrfs_block_rsv *dest, u64 num_bytes) 3573 { 3574 struct btrfs_space_info *space_info = block_rsv->space_info; 3575 3576 spin_lock(&block_rsv->lock); 3577 if (num_bytes == (u64)-1) 3578 num_bytes = block_rsv->size; 3579 block_rsv->size -= num_bytes; 3580 if (block_rsv->reserved >= block_rsv->size) { 3581 num_bytes = block_rsv->reserved - block_rsv->size; 3582 block_rsv->reserved = block_rsv->size; 3583 block_rsv->full = 1; 3584 } else { 3585 num_bytes = 0; 3586 } 3587 spin_unlock(&block_rsv->lock); 3588 3589 if (num_bytes > 0) { 3590 if (dest) { 3591 block_rsv_add_bytes(dest, num_bytes, 0); 3592 } else { 3593 spin_lock(&space_info->lock); 3594 space_info->bytes_reserved -= num_bytes; 3595 spin_unlock(&space_info->lock); 3596 } 3597 } 3598 } 3599 3600 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, 3601 struct btrfs_block_rsv *dst, u64 num_bytes) 3602 { 3603 int ret; 3604 3605 ret = block_rsv_use_bytes(src, num_bytes); 3606 if (ret) 3607 return ret; 3608 3609 block_rsv_add_bytes(dst, num_bytes, 1); 3610 return 0; 3611 } 3612 3613 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv) 3614 { 3615 memset(rsv, 0, sizeof(*rsv)); 3616 spin_lock_init(&rsv->lock); 3617 atomic_set(&rsv->usage, 1); 3618 rsv->priority = 6; 3619 INIT_LIST_HEAD(&rsv->list); 3620 } 3621 3622 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) 3623 { 3624 struct btrfs_block_rsv *block_rsv; 3625 struct btrfs_fs_info *fs_info = root->fs_info; 3626 3627 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); 3628 if (!block_rsv) 3629 return NULL; 3630 3631 btrfs_init_block_rsv(block_rsv); 3632 block_rsv->space_info = __find_space_info(fs_info, 3633 BTRFS_BLOCK_GROUP_METADATA); 3634 return block_rsv; 3635 } 3636 3637 void btrfs_free_block_rsv(struct btrfs_root *root, 3638 struct btrfs_block_rsv *rsv) 3639 { 3640 if (rsv && atomic_dec_and_test(&rsv->usage)) { 3641 btrfs_block_rsv_release(root, rsv, (u64)-1); 3642 if (!rsv->durable) 3643 kfree(rsv); 3644 } 3645 } 3646 3647 /* 3648 * make the block_rsv struct be able to capture freed space. 3649 * the captured space will re-add to the the block_rsv struct 3650 * after transaction commit 3651 */ 3652 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info, 3653 struct btrfs_block_rsv *block_rsv) 3654 { 3655 block_rsv->durable = 1; 3656 mutex_lock(&fs_info->durable_block_rsv_mutex); 3657 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list); 3658 mutex_unlock(&fs_info->durable_block_rsv_mutex); 3659 } 3660 3661 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, 3662 struct btrfs_root *root, 3663 struct btrfs_block_rsv *block_rsv, 3664 u64 num_bytes) 3665 { 3666 int ret; 3667 3668 if (num_bytes == 0) 3669 return 0; 3670 3671 ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1); 3672 if (!ret) { 3673 block_rsv_add_bytes(block_rsv, num_bytes, 1); 3674 return 0; 3675 } 3676 3677 return ret; 3678 } 3679 3680 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, 3681 struct btrfs_root *root, 3682 struct btrfs_block_rsv *block_rsv, 3683 u64 min_reserved, int min_factor) 3684 { 3685 u64 num_bytes = 0; 3686 int commit_trans = 0; 3687 int ret = -ENOSPC; 3688 3689 if (!block_rsv) 3690 return 0; 3691 3692 spin_lock(&block_rsv->lock); 3693 if (min_factor > 0) 3694 num_bytes = div_factor(block_rsv->size, min_factor); 3695 if (min_reserved > num_bytes) 3696 num_bytes = min_reserved; 3697 3698 if (block_rsv->reserved >= num_bytes) { 3699 ret = 0; 3700 } else { 3701 num_bytes -= block_rsv->reserved; 3702 if (block_rsv->durable && 3703 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes) 3704 commit_trans = 1; 3705 } 3706 spin_unlock(&block_rsv->lock); 3707 if (!ret) 3708 return 0; 3709 3710 if (block_rsv->refill_used) { 3711 ret = reserve_metadata_bytes(trans, root, block_rsv, 3712 num_bytes, 0); 3713 if (!ret) { 3714 block_rsv_add_bytes(block_rsv, num_bytes, 0); 3715 return 0; 3716 } 3717 } 3718 3719 if (commit_trans) { 3720 if (trans) 3721 return -EAGAIN; 3722 3723 trans = btrfs_join_transaction(root, 1); 3724 BUG_ON(IS_ERR(trans)); 3725 ret = btrfs_commit_transaction(trans, root); 3726 return 0; 3727 } 3728 3729 return -ENOSPC; 3730 } 3731 3732 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3733 struct btrfs_block_rsv *dst_rsv, 3734 u64 num_bytes) 3735 { 3736 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3737 } 3738 3739 void btrfs_block_rsv_release(struct btrfs_root *root, 3740 struct btrfs_block_rsv *block_rsv, 3741 u64 num_bytes) 3742 { 3743 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 3744 if (global_rsv->full || global_rsv == block_rsv || 3745 block_rsv->space_info != global_rsv->space_info) 3746 global_rsv = NULL; 3747 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes); 3748 } 3749 3750 /* 3751 * helper to calculate size of global block reservation. 3752 * the desired value is sum of space used by extent tree, 3753 * checksum tree and root tree 3754 */ 3755 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) 3756 { 3757 struct btrfs_space_info *sinfo; 3758 u64 num_bytes; 3759 u64 meta_used; 3760 u64 data_used; 3761 int csum_size = btrfs_super_csum_size(&fs_info->super_copy); 3762 #if 0 3763 /* 3764 * per tree used space accounting can be inaccuracy, so we 3765 * can't rely on it. 3766 */ 3767 spin_lock(&fs_info->extent_root->accounting_lock); 3768 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); 3769 spin_unlock(&fs_info->extent_root->accounting_lock); 3770 3771 spin_lock(&fs_info->csum_root->accounting_lock); 3772 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); 3773 spin_unlock(&fs_info->csum_root->accounting_lock); 3774 3775 spin_lock(&fs_info->tree_root->accounting_lock); 3776 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); 3777 spin_unlock(&fs_info->tree_root->accounting_lock); 3778 #endif 3779 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); 3780 spin_lock(&sinfo->lock); 3781 data_used = sinfo->bytes_used; 3782 spin_unlock(&sinfo->lock); 3783 3784 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 3785 spin_lock(&sinfo->lock); 3786 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) 3787 data_used = 0; 3788 meta_used = sinfo->bytes_used; 3789 spin_unlock(&sinfo->lock); 3790 3791 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * 3792 csum_size * 2; 3793 num_bytes += div64_u64(data_used + meta_used, 50); 3794 3795 if (num_bytes * 3 > meta_used) 3796 num_bytes = div64_u64(meta_used, 3); 3797 3798 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); 3799 } 3800 3801 static void update_global_block_rsv(struct btrfs_fs_info *fs_info) 3802 { 3803 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; 3804 struct btrfs_space_info *sinfo = block_rsv->space_info; 3805 u64 num_bytes; 3806 3807 num_bytes = calc_global_metadata_size(fs_info); 3808 3809 spin_lock(&block_rsv->lock); 3810 spin_lock(&sinfo->lock); 3811 3812 block_rsv->size = num_bytes; 3813 3814 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + 3815 sinfo->bytes_reserved + sinfo->bytes_readonly + 3816 sinfo->bytes_may_use; 3817 3818 if (sinfo->total_bytes > num_bytes) { 3819 num_bytes = sinfo->total_bytes - num_bytes; 3820 block_rsv->reserved += num_bytes; 3821 sinfo->bytes_reserved += num_bytes; 3822 } 3823 3824 if (block_rsv->reserved >= block_rsv->size) { 3825 num_bytes = block_rsv->reserved - block_rsv->size; 3826 sinfo->bytes_reserved -= num_bytes; 3827 block_rsv->reserved = block_rsv->size; 3828 block_rsv->full = 1; 3829 } 3830 #if 0 3831 printk(KERN_INFO"global block rsv size %llu reserved %llu\n", 3832 block_rsv->size, block_rsv->reserved); 3833 #endif 3834 spin_unlock(&sinfo->lock); 3835 spin_unlock(&block_rsv->lock); 3836 } 3837 3838 static void init_global_block_rsv(struct btrfs_fs_info *fs_info) 3839 { 3840 struct btrfs_space_info *space_info; 3841 3842 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); 3843 fs_info->chunk_block_rsv.space_info = space_info; 3844 fs_info->chunk_block_rsv.priority = 10; 3845 3846 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 3847 fs_info->global_block_rsv.space_info = space_info; 3848 fs_info->global_block_rsv.priority = 10; 3849 fs_info->global_block_rsv.refill_used = 1; 3850 fs_info->delalloc_block_rsv.space_info = space_info; 3851 fs_info->trans_block_rsv.space_info = space_info; 3852 fs_info->empty_block_rsv.space_info = space_info; 3853 fs_info->empty_block_rsv.priority = 10; 3854 3855 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv; 3856 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; 3857 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; 3858 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; 3859 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; 3860 3861 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv); 3862 3863 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv); 3864 3865 update_global_block_rsv(fs_info); 3866 } 3867 3868 static void release_global_block_rsv(struct btrfs_fs_info *fs_info) 3869 { 3870 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1); 3871 WARN_ON(fs_info->delalloc_block_rsv.size > 0); 3872 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0); 3873 WARN_ON(fs_info->trans_block_rsv.size > 0); 3874 WARN_ON(fs_info->trans_block_rsv.reserved > 0); 3875 WARN_ON(fs_info->chunk_block_rsv.size > 0); 3876 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 3877 } 3878 3879 static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) 3880 { 3881 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3882 3 * num_items; 3883 } 3884 3885 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, 3886 struct btrfs_root *root, 3887 int num_items) 3888 { 3889 u64 num_bytes; 3890 int ret; 3891 3892 if (num_items == 0 || root->fs_info->chunk_root == root) 3893 return 0; 3894 3895 num_bytes = calc_trans_metadata_size(root, num_items); 3896 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, 3897 num_bytes); 3898 if (!ret) { 3899 trans->bytes_reserved += num_bytes; 3900 trans->block_rsv = &root->fs_info->trans_block_rsv; 3901 } 3902 return ret; 3903 } 3904 3905 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 3906 struct btrfs_root *root) 3907 { 3908 if (!trans->bytes_reserved) 3909 return; 3910 3911 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv); 3912 btrfs_block_rsv_release(root, trans->block_rsv, 3913 trans->bytes_reserved); 3914 trans->bytes_reserved = 0; 3915 } 3916 3917 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 3918 struct inode *inode) 3919 { 3920 struct btrfs_root *root = BTRFS_I(inode)->root; 3921 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); 3922 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; 3923 3924 /* 3925 * one for deleting orphan item, one for updating inode and 3926 * two for calling btrfs_truncate_inode_items. 3927 * 3928 * btrfs_truncate_inode_items is a delete operation, it frees 3929 * more space than it uses in most cases. So two units of 3930 * metadata space should be enough for calling it many times. 3931 * If all of the metadata space is used, we can commit 3932 * transaction and use space it freed. 3933 */ 3934 u64 num_bytes = calc_trans_metadata_size(root, 4); 3935 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3936 } 3937 3938 void btrfs_orphan_release_metadata(struct inode *inode) 3939 { 3940 struct btrfs_root *root = BTRFS_I(inode)->root; 3941 u64 num_bytes = calc_trans_metadata_size(root, 4); 3942 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 3943 } 3944 3945 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, 3946 struct btrfs_pending_snapshot *pending) 3947 { 3948 struct btrfs_root *root = pending->root; 3949 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); 3950 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv; 3951 /* 3952 * two for root back/forward refs, two for directory entries 3953 * and one for root of the snapshot. 3954 */ 3955 u64 num_bytes = calc_trans_metadata_size(root, 5); 3956 dst_rsv->space_info = src_rsv->space_info; 3957 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3958 } 3959 3960 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes) 3961 { 3962 return num_bytes >>= 3; 3963 } 3964 3965 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) 3966 { 3967 struct btrfs_root *root = BTRFS_I(inode)->root; 3968 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; 3969 u64 to_reserve; 3970 int nr_extents; 3971 int ret; 3972 3973 if (btrfs_transaction_in_commit(root->fs_info)) 3974 schedule_timeout(1); 3975 3976 num_bytes = ALIGN(num_bytes, root->sectorsize); 3977 3978 spin_lock(&BTRFS_I(inode)->accounting_lock); 3979 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; 3980 if (nr_extents > BTRFS_I(inode)->reserved_extents) { 3981 nr_extents -= BTRFS_I(inode)->reserved_extents; 3982 to_reserve = calc_trans_metadata_size(root, nr_extents); 3983 } else { 3984 nr_extents = 0; 3985 to_reserve = 0; 3986 } 3987 spin_unlock(&BTRFS_I(inode)->accounting_lock); 3988 3989 to_reserve += calc_csum_metadata_size(inode, num_bytes); 3990 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); 3991 if (ret) 3992 return ret; 3993 3994 spin_lock(&BTRFS_I(inode)->accounting_lock); 3995 BTRFS_I(inode)->reserved_extents += nr_extents; 3996 atomic_inc(&BTRFS_I(inode)->outstanding_extents); 3997 spin_unlock(&BTRFS_I(inode)->accounting_lock); 3998 3999 block_rsv_add_bytes(block_rsv, to_reserve, 1); 4000 4001 if (block_rsv->size > 512 * 1024 * 1024) 4002 shrink_delalloc(NULL, root, to_reserve, 0); 4003 4004 return 0; 4005 } 4006 4007 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) 4008 { 4009 struct btrfs_root *root = BTRFS_I(inode)->root; 4010 u64 to_free; 4011 int nr_extents; 4012 4013 num_bytes = ALIGN(num_bytes, root->sectorsize); 4014 atomic_dec(&BTRFS_I(inode)->outstanding_extents); 4015 4016 spin_lock(&BTRFS_I(inode)->accounting_lock); 4017 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); 4018 if (nr_extents < BTRFS_I(inode)->reserved_extents) { 4019 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; 4020 BTRFS_I(inode)->reserved_extents -= nr_extents; 4021 } else { 4022 nr_extents = 0; 4023 } 4024 spin_unlock(&BTRFS_I(inode)->accounting_lock); 4025 4026 to_free = calc_csum_metadata_size(inode, num_bytes); 4027 if (nr_extents > 0) 4028 to_free += calc_trans_metadata_size(root, nr_extents); 4029 4030 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, 4031 to_free); 4032 } 4033 4034 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes) 4035 { 4036 int ret; 4037 4038 ret = btrfs_check_data_free_space(inode, num_bytes); 4039 if (ret) 4040 return ret; 4041 4042 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes); 4043 if (ret) { 4044 btrfs_free_reserved_data_space(inode, num_bytes); 4045 return ret; 4046 } 4047 4048 return 0; 4049 } 4050 4051 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) 4052 { 4053 btrfs_delalloc_release_metadata(inode, num_bytes); 4054 btrfs_free_reserved_data_space(inode, num_bytes); 4055 } 4056 4057 static int update_block_group(struct btrfs_trans_handle *trans, 4058 struct btrfs_root *root, 4059 u64 bytenr, u64 num_bytes, int alloc) 4060 { 4061 struct btrfs_block_group_cache *cache = NULL; 4062 struct btrfs_fs_info *info = root->fs_info; 4063 u64 total = num_bytes; 4064 u64 old_val; 4065 u64 byte_in_group; 4066 int factor; 4067 4068 /* block accounting for super block */ 4069 spin_lock(&info->delalloc_lock); 4070 old_val = btrfs_super_bytes_used(&info->super_copy); 4071 if (alloc) 4072 old_val += num_bytes; 4073 else 4074 old_val -= num_bytes; 4075 btrfs_set_super_bytes_used(&info->super_copy, old_val); 4076 spin_unlock(&info->delalloc_lock); 4077 4078 while (total) { 4079 cache = btrfs_lookup_block_group(info, bytenr); 4080 if (!cache) 4081 return -1; 4082 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | 4083 BTRFS_BLOCK_GROUP_RAID1 | 4084 BTRFS_BLOCK_GROUP_RAID10)) 4085 factor = 2; 4086 else 4087 factor = 1; 4088 /* 4089 * If this block group has free space cache written out, we 4090 * need to make sure to load it if we are removing space. This 4091 * is because we need the unpinning stage to actually add the 4092 * space back to the block group, otherwise we will leak space. 4093 */ 4094 if (!alloc && cache->cached == BTRFS_CACHE_NO) 4095 cache_block_group(cache, trans, NULL, 1); 4096 4097 byte_in_group = bytenr - cache->key.objectid; 4098 WARN_ON(byte_in_group > cache->key.offset); 4099 4100 spin_lock(&cache->space_info->lock); 4101 spin_lock(&cache->lock); 4102 4103 if (btrfs_super_cache_generation(&info->super_copy) != 0 && 4104 cache->disk_cache_state < BTRFS_DC_CLEAR) 4105 cache->disk_cache_state = BTRFS_DC_CLEAR; 4106 4107 cache->dirty = 1; 4108 old_val = btrfs_block_group_used(&cache->item); 4109 num_bytes = min(total, cache->key.offset - byte_in_group); 4110 if (alloc) { 4111 old_val += num_bytes; 4112 btrfs_set_block_group_used(&cache->item, old_val); 4113 cache->reserved -= num_bytes; 4114 cache->space_info->bytes_reserved -= num_bytes; 4115 cache->space_info->bytes_used += num_bytes; 4116 cache->space_info->disk_used += num_bytes * factor; 4117 spin_unlock(&cache->lock); 4118 spin_unlock(&cache->space_info->lock); 4119 } else { 4120 old_val -= num_bytes; 4121 btrfs_set_block_group_used(&cache->item, old_val); 4122 cache->pinned += num_bytes; 4123 cache->space_info->bytes_pinned += num_bytes; 4124 cache->space_info->bytes_used -= num_bytes; 4125 cache->space_info->disk_used -= num_bytes * factor; 4126 spin_unlock(&cache->lock); 4127 spin_unlock(&cache->space_info->lock); 4128 4129 set_extent_dirty(info->pinned_extents, 4130 bytenr, bytenr + num_bytes - 1, 4131 GFP_NOFS | __GFP_NOFAIL); 4132 } 4133 btrfs_put_block_group(cache); 4134 total -= num_bytes; 4135 bytenr += num_bytes; 4136 } 4137 return 0; 4138 } 4139 4140 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) 4141 { 4142 struct btrfs_block_group_cache *cache; 4143 u64 bytenr; 4144 4145 cache = btrfs_lookup_first_block_group(root->fs_info, search_start); 4146 if (!cache) 4147 return 0; 4148 4149 bytenr = cache->key.objectid; 4150 btrfs_put_block_group(cache); 4151 4152 return bytenr; 4153 } 4154 4155 static int pin_down_extent(struct btrfs_root *root, 4156 struct btrfs_block_group_cache *cache, 4157 u64 bytenr, u64 num_bytes, int reserved) 4158 { 4159 spin_lock(&cache->space_info->lock); 4160 spin_lock(&cache->lock); 4161 cache->pinned += num_bytes; 4162 cache->space_info->bytes_pinned += num_bytes; 4163 if (reserved) { 4164 cache->reserved -= num_bytes; 4165 cache->space_info->bytes_reserved -= num_bytes; 4166 } 4167 spin_unlock(&cache->lock); 4168 spin_unlock(&cache->space_info->lock); 4169 4170 set_extent_dirty(root->fs_info->pinned_extents, bytenr, 4171 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); 4172 return 0; 4173 } 4174 4175 /* 4176 * this function must be called within transaction 4177 */ 4178 int btrfs_pin_extent(struct btrfs_root *root, 4179 u64 bytenr, u64 num_bytes, int reserved) 4180 { 4181 struct btrfs_block_group_cache *cache; 4182 4183 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 4184 BUG_ON(!cache); 4185 4186 pin_down_extent(root, cache, bytenr, num_bytes, reserved); 4187 4188 btrfs_put_block_group(cache); 4189 return 0; 4190 } 4191 4192 /* 4193 * update size of reserved extents. this function may return -EAGAIN 4194 * if 'reserve' is true or 'sinfo' is false. 4195 */ 4196 static int update_reserved_bytes(struct btrfs_block_group_cache *cache, 4197 u64 num_bytes, int reserve, int sinfo) 4198 { 4199 int ret = 0; 4200 if (sinfo) { 4201 struct btrfs_space_info *space_info = cache->space_info; 4202 spin_lock(&space_info->lock); 4203 spin_lock(&cache->lock); 4204 if (reserve) { 4205 if (cache->ro) { 4206 ret = -EAGAIN; 4207 } else { 4208 cache->reserved += num_bytes; 4209 space_info->bytes_reserved += num_bytes; 4210 } 4211 } else { 4212 if (cache->ro) 4213 space_info->bytes_readonly += num_bytes; 4214 cache->reserved -= num_bytes; 4215 space_info->bytes_reserved -= num_bytes; 4216 } 4217 spin_unlock(&cache->lock); 4218 spin_unlock(&space_info->lock); 4219 } else { 4220 spin_lock(&cache->lock); 4221 if (cache->ro) { 4222 ret = -EAGAIN; 4223 } else { 4224 if (reserve) 4225 cache->reserved += num_bytes; 4226 else 4227 cache->reserved -= num_bytes; 4228 } 4229 spin_unlock(&cache->lock); 4230 } 4231 return ret; 4232 } 4233 4234 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 4235 struct btrfs_root *root) 4236 { 4237 struct btrfs_fs_info *fs_info = root->fs_info; 4238 struct btrfs_caching_control *next; 4239 struct btrfs_caching_control *caching_ctl; 4240 struct btrfs_block_group_cache *cache; 4241 4242 down_write(&fs_info->extent_commit_sem); 4243 4244 list_for_each_entry_safe(caching_ctl, next, 4245 &fs_info->caching_block_groups, list) { 4246 cache = caching_ctl->block_group; 4247 if (block_group_cache_done(cache)) { 4248 cache->last_byte_to_unpin = (u64)-1; 4249 list_del_init(&caching_ctl->list); 4250 put_caching_control(caching_ctl); 4251 } else { 4252 cache->last_byte_to_unpin = caching_ctl->progress; 4253 } 4254 } 4255 4256 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 4257 fs_info->pinned_extents = &fs_info->freed_extents[1]; 4258 else 4259 fs_info->pinned_extents = &fs_info->freed_extents[0]; 4260 4261 up_write(&fs_info->extent_commit_sem); 4262 4263 update_global_block_rsv(fs_info); 4264 return 0; 4265 } 4266 4267 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 4268 { 4269 struct btrfs_fs_info *fs_info = root->fs_info; 4270 struct btrfs_block_group_cache *cache = NULL; 4271 u64 len; 4272 4273 while (start <= end) { 4274 if (!cache || 4275 start >= cache->key.objectid + cache->key.offset) { 4276 if (cache) 4277 btrfs_put_block_group(cache); 4278 cache = btrfs_lookup_block_group(fs_info, start); 4279 BUG_ON(!cache); 4280 } 4281 4282 len = cache->key.objectid + cache->key.offset - start; 4283 len = min(len, end + 1 - start); 4284 4285 if (start < cache->last_byte_to_unpin) { 4286 len = min(len, cache->last_byte_to_unpin - start); 4287 btrfs_add_free_space(cache, start, len); 4288 } 4289 4290 start += len; 4291 4292 spin_lock(&cache->space_info->lock); 4293 spin_lock(&cache->lock); 4294 cache->pinned -= len; 4295 cache->space_info->bytes_pinned -= len; 4296 if (cache->ro) { 4297 cache->space_info->bytes_readonly += len; 4298 } else if (cache->reserved_pinned > 0) { 4299 len = min(len, cache->reserved_pinned); 4300 cache->reserved_pinned -= len; 4301 cache->space_info->bytes_reserved += len; 4302 } 4303 spin_unlock(&cache->lock); 4304 spin_unlock(&cache->space_info->lock); 4305 } 4306 4307 if (cache) 4308 btrfs_put_block_group(cache); 4309 return 0; 4310 } 4311 4312 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 4313 struct btrfs_root *root) 4314 { 4315 struct btrfs_fs_info *fs_info = root->fs_info; 4316 struct extent_io_tree *unpin; 4317 struct btrfs_block_rsv *block_rsv; 4318 struct btrfs_block_rsv *next_rsv; 4319 u64 start; 4320 u64 end; 4321 int idx; 4322 int ret; 4323 4324 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 4325 unpin = &fs_info->freed_extents[1]; 4326 else 4327 unpin = &fs_info->freed_extents[0]; 4328 4329 while (1) { 4330 ret = find_first_extent_bit(unpin, 0, &start, &end, 4331 EXTENT_DIRTY); 4332 if (ret) 4333 break; 4334 4335 ret = btrfs_discard_extent(root, start, end + 1 - start); 4336 4337 clear_extent_dirty(unpin, start, end, GFP_NOFS); 4338 unpin_extent_range(root, start, end); 4339 cond_resched(); 4340 } 4341 4342 mutex_lock(&fs_info->durable_block_rsv_mutex); 4343 list_for_each_entry_safe(block_rsv, next_rsv, 4344 &fs_info->durable_block_rsv_list, list) { 4345 4346 idx = trans->transid & 0x1; 4347 if (block_rsv->freed[idx] > 0) { 4348 block_rsv_add_bytes(block_rsv, 4349 block_rsv->freed[idx], 0); 4350 block_rsv->freed[idx] = 0; 4351 } 4352 if (atomic_read(&block_rsv->usage) == 0) { 4353 btrfs_block_rsv_release(root, block_rsv, (u64)-1); 4354 4355 if (block_rsv->freed[0] == 0 && 4356 block_rsv->freed[1] == 0) { 4357 list_del_init(&block_rsv->list); 4358 kfree(block_rsv); 4359 } 4360 } else { 4361 btrfs_block_rsv_release(root, block_rsv, 0); 4362 } 4363 } 4364 mutex_unlock(&fs_info->durable_block_rsv_mutex); 4365 4366 return 0; 4367 } 4368 4369 static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 4370 struct btrfs_root *root, 4371 u64 bytenr, u64 num_bytes, u64 parent, 4372 u64 root_objectid, u64 owner_objectid, 4373 u64 owner_offset, int refs_to_drop, 4374 struct btrfs_delayed_extent_op *extent_op) 4375 { 4376 struct btrfs_key key; 4377 struct btrfs_path *path; 4378 struct btrfs_fs_info *info = root->fs_info; 4379 struct btrfs_root *extent_root = info->extent_root; 4380 struct extent_buffer *leaf; 4381 struct btrfs_extent_item *ei; 4382 struct btrfs_extent_inline_ref *iref; 4383 int ret; 4384 int is_data; 4385 int extent_slot = 0; 4386 int found_extent = 0; 4387 int num_to_del = 1; 4388 u32 item_size; 4389 u64 refs; 4390 4391 path = btrfs_alloc_path(); 4392 if (!path) 4393 return -ENOMEM; 4394 4395 path->reada = 1; 4396 path->leave_spinning = 1; 4397 4398 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; 4399 BUG_ON(!is_data && refs_to_drop != 1); 4400 4401 ret = lookup_extent_backref(trans, extent_root, path, &iref, 4402 bytenr, num_bytes, parent, 4403 root_objectid, owner_objectid, 4404 owner_offset); 4405 if (ret == 0) { 4406 extent_slot = path->slots[0]; 4407 while (extent_slot >= 0) { 4408 btrfs_item_key_to_cpu(path->nodes[0], &key, 4409 extent_slot); 4410 if (key.objectid != bytenr) 4411 break; 4412 if (key.type == BTRFS_EXTENT_ITEM_KEY && 4413 key.offset == num_bytes) { 4414 found_extent = 1; 4415 break; 4416 } 4417 if (path->slots[0] - extent_slot > 5) 4418 break; 4419 extent_slot--; 4420 } 4421 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 4422 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot); 4423 if (found_extent && item_size < sizeof(*ei)) 4424 found_extent = 0; 4425 #endif 4426 if (!found_extent) { 4427 BUG_ON(iref); 4428 ret = remove_extent_backref(trans, extent_root, path, 4429 NULL, refs_to_drop, 4430 is_data); 4431 BUG_ON(ret); 4432 btrfs_release_path(extent_root, path); 4433 path->leave_spinning = 1; 4434 4435 key.objectid = bytenr; 4436 key.type = BTRFS_EXTENT_ITEM_KEY; 4437 key.offset = num_bytes; 4438 4439 ret = btrfs_search_slot(trans, extent_root, 4440 &key, path, -1, 1); 4441 if (ret) { 4442 printk(KERN_ERR "umm, got %d back from search" 4443 ", was looking for %llu\n", ret, 4444 (unsigned long long)bytenr); 4445 btrfs_print_leaf(extent_root, path->nodes[0]); 4446 } 4447 BUG_ON(ret); 4448 extent_slot = path->slots[0]; 4449 } 4450 } else { 4451 btrfs_print_leaf(extent_root, path->nodes[0]); 4452 WARN_ON(1); 4453 printk(KERN_ERR "btrfs unable to find ref byte nr %llu " 4454 "parent %llu root %llu owner %llu offset %llu\n", 4455 (unsigned long long)bytenr, 4456 (unsigned long long)parent, 4457 (unsigned long long)root_objectid, 4458 (unsigned long long)owner_objectid, 4459 (unsigned long long)owner_offset); 4460 } 4461 4462 leaf = path->nodes[0]; 4463 item_size = btrfs_item_size_nr(leaf, extent_slot); 4464 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 4465 if (item_size < sizeof(*ei)) { 4466 BUG_ON(found_extent || extent_slot != path->slots[0]); 4467 ret = convert_extent_item_v0(trans, extent_root, path, 4468 owner_objectid, 0); 4469 BUG_ON(ret < 0); 4470 4471 btrfs_release_path(extent_root, path); 4472 path->leave_spinning = 1; 4473 4474 key.objectid = bytenr; 4475 key.type = BTRFS_EXTENT_ITEM_KEY; 4476 key.offset = num_bytes; 4477 4478 ret = btrfs_search_slot(trans, extent_root, &key, path, 4479 -1, 1); 4480 if (ret) { 4481 printk(KERN_ERR "umm, got %d back from search" 4482 ", was looking for %llu\n", ret, 4483 (unsigned long long)bytenr); 4484 btrfs_print_leaf(extent_root, path->nodes[0]); 4485 } 4486 BUG_ON(ret); 4487 extent_slot = path->slots[0]; 4488 leaf = path->nodes[0]; 4489 item_size = btrfs_item_size_nr(leaf, extent_slot); 4490 } 4491 #endif 4492 BUG_ON(item_size < sizeof(*ei)); 4493 ei = btrfs_item_ptr(leaf, extent_slot, 4494 struct btrfs_extent_item); 4495 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { 4496 struct btrfs_tree_block_info *bi; 4497 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi)); 4498 bi = (struct btrfs_tree_block_info *)(ei + 1); 4499 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); 4500 } 4501 4502 refs = btrfs_extent_refs(leaf, ei); 4503 BUG_ON(refs < refs_to_drop); 4504 refs -= refs_to_drop; 4505 4506 if (refs > 0) { 4507 if (extent_op) 4508 __run_delayed_extent_op(extent_op, leaf, ei); 4509 /* 4510 * In the case of inline back ref, reference count will 4511 * be updated by remove_extent_backref 4512 */ 4513 if (iref) { 4514 BUG_ON(!found_extent); 4515 } else { 4516 btrfs_set_extent_refs(leaf, ei, refs); 4517 btrfs_mark_buffer_dirty(leaf); 4518 } 4519 if (found_extent) { 4520 ret = remove_extent_backref(trans, extent_root, path, 4521 iref, refs_to_drop, 4522 is_data); 4523 BUG_ON(ret); 4524 } 4525 } else { 4526 if (found_extent) { 4527 BUG_ON(is_data && refs_to_drop != 4528 extent_data_ref_count(root, path, iref)); 4529 if (iref) { 4530 BUG_ON(path->slots[0] != extent_slot); 4531 } else { 4532 BUG_ON(path->slots[0] != extent_slot + 1); 4533 path->slots[0] = extent_slot; 4534 num_to_del = 2; 4535 } 4536 } 4537 4538 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 4539 num_to_del); 4540 BUG_ON(ret); 4541 btrfs_release_path(extent_root, path); 4542 4543 if (is_data) { 4544 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 4545 BUG_ON(ret); 4546 } else { 4547 invalidate_mapping_pages(info->btree_inode->i_mapping, 4548 bytenr >> PAGE_CACHE_SHIFT, 4549 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT); 4550 } 4551 4552 ret = update_block_group(trans, root, bytenr, num_bytes, 0); 4553 BUG_ON(ret); 4554 } 4555 btrfs_free_path(path); 4556 return ret; 4557 } 4558 4559 /* 4560 * when we free an block, it is possible (and likely) that we free the last 4561 * delayed ref for that extent as well. This searches the delayed ref tree for 4562 * a given extent, and if there are no other delayed refs to be processed, it 4563 * removes it from the tree. 4564 */ 4565 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, 4566 struct btrfs_root *root, u64 bytenr) 4567 { 4568 struct btrfs_delayed_ref_head *head; 4569 struct btrfs_delayed_ref_root *delayed_refs; 4570 struct btrfs_delayed_ref_node *ref; 4571 struct rb_node *node; 4572 int ret = 0; 4573 4574 delayed_refs = &trans->transaction->delayed_refs; 4575 spin_lock(&delayed_refs->lock); 4576 head = btrfs_find_delayed_ref_head(trans, bytenr); 4577 if (!head) 4578 goto out; 4579 4580 node = rb_prev(&head->node.rb_node); 4581 if (!node) 4582 goto out; 4583 4584 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 4585 4586 /* there are still entries for this ref, we can't drop it */ 4587 if (ref->bytenr == bytenr) 4588 goto out; 4589 4590 if (head->extent_op) { 4591 if (!head->must_insert_reserved) 4592 goto out; 4593 kfree(head->extent_op); 4594 head->extent_op = NULL; 4595 } 4596 4597 /* 4598 * waiting for the lock here would deadlock. If someone else has it 4599 * locked they are already in the process of dropping it anyway 4600 */ 4601 if (!mutex_trylock(&head->mutex)) 4602 goto out; 4603 4604 /* 4605 * at this point we have a head with no other entries. Go 4606 * ahead and process it. 4607 */ 4608 head->node.in_tree = 0; 4609 rb_erase(&head->node.rb_node, &delayed_refs->root); 4610 4611 delayed_refs->num_entries--; 4612 4613 /* 4614 * we don't take a ref on the node because we're removing it from the 4615 * tree, so we just steal the ref the tree was holding. 4616 */ 4617 delayed_refs->num_heads--; 4618 if (list_empty(&head->cluster)) 4619 delayed_refs->num_heads_ready--; 4620 4621 list_del_init(&head->cluster); 4622 spin_unlock(&delayed_refs->lock); 4623 4624 BUG_ON(head->extent_op); 4625 if (head->must_insert_reserved) 4626 ret = 1; 4627 4628 mutex_unlock(&head->mutex); 4629 btrfs_put_delayed_ref(&head->node); 4630 return ret; 4631 out: 4632 spin_unlock(&delayed_refs->lock); 4633 return 0; 4634 } 4635 4636 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 4637 struct btrfs_root *root, 4638 struct extent_buffer *buf, 4639 u64 parent, int last_ref) 4640 { 4641 struct btrfs_block_rsv *block_rsv; 4642 struct btrfs_block_group_cache *cache = NULL; 4643 int ret; 4644 4645 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4646 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len, 4647 parent, root->root_key.objectid, 4648 btrfs_header_level(buf), 4649 BTRFS_DROP_DELAYED_REF, NULL); 4650 BUG_ON(ret); 4651 } 4652 4653 if (!last_ref) 4654 return; 4655 4656 block_rsv = get_block_rsv(trans, root); 4657 cache = btrfs_lookup_block_group(root->fs_info, buf->start); 4658 if (block_rsv->space_info != cache->space_info) 4659 goto out; 4660 4661 if (btrfs_header_generation(buf) == trans->transid) { 4662 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4663 ret = check_ref_cleanup(trans, root, buf->start); 4664 if (!ret) 4665 goto pin; 4666 } 4667 4668 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { 4669 pin_down_extent(root, cache, buf->start, buf->len, 1); 4670 goto pin; 4671 } 4672 4673 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 4674 4675 btrfs_add_free_space(cache, buf->start, buf->len); 4676 ret = update_reserved_bytes(cache, buf->len, 0, 0); 4677 if (ret == -EAGAIN) { 4678 /* block group became read-only */ 4679 update_reserved_bytes(cache, buf->len, 0, 1); 4680 goto out; 4681 } 4682 4683 ret = 1; 4684 spin_lock(&block_rsv->lock); 4685 if (block_rsv->reserved < block_rsv->size) { 4686 block_rsv->reserved += buf->len; 4687 ret = 0; 4688 } 4689 spin_unlock(&block_rsv->lock); 4690 4691 if (ret) { 4692 spin_lock(&cache->space_info->lock); 4693 cache->space_info->bytes_reserved -= buf->len; 4694 spin_unlock(&cache->space_info->lock); 4695 } 4696 goto out; 4697 } 4698 pin: 4699 if (block_rsv->durable && !cache->ro) { 4700 ret = 0; 4701 spin_lock(&cache->lock); 4702 if (!cache->ro) { 4703 cache->reserved_pinned += buf->len; 4704 ret = 1; 4705 } 4706 spin_unlock(&cache->lock); 4707 4708 if (ret) { 4709 spin_lock(&block_rsv->lock); 4710 block_rsv->freed[trans->transid & 0x1] += buf->len; 4711 spin_unlock(&block_rsv->lock); 4712 } 4713 } 4714 out: 4715 btrfs_put_block_group(cache); 4716 } 4717 4718 int btrfs_free_extent(struct btrfs_trans_handle *trans, 4719 struct btrfs_root *root, 4720 u64 bytenr, u64 num_bytes, u64 parent, 4721 u64 root_objectid, u64 owner, u64 offset) 4722 { 4723 int ret; 4724 4725 /* 4726 * tree log blocks never actually go into the extent allocation 4727 * tree, just update pinning info and exit early. 4728 */ 4729 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { 4730 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); 4731 /* unlocks the pinned mutex */ 4732 btrfs_pin_extent(root, bytenr, num_bytes, 1); 4733 ret = 0; 4734 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { 4735 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, 4736 parent, root_objectid, (int)owner, 4737 BTRFS_DROP_DELAYED_REF, NULL); 4738 BUG_ON(ret); 4739 } else { 4740 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, 4741 parent, root_objectid, owner, 4742 offset, BTRFS_DROP_DELAYED_REF, NULL); 4743 BUG_ON(ret); 4744 } 4745 return ret; 4746 } 4747 4748 static u64 stripe_align(struct btrfs_root *root, u64 val) 4749 { 4750 u64 mask = ((u64)root->stripesize - 1); 4751 u64 ret = (val + mask) & ~mask; 4752 return ret; 4753 } 4754 4755 /* 4756 * when we wait for progress in the block group caching, its because 4757 * our allocation attempt failed at least once. So, we must sleep 4758 * and let some progress happen before we try again. 4759 * 4760 * This function will sleep at least once waiting for new free space to 4761 * show up, and then it will check the block group free space numbers 4762 * for our min num_bytes. Another option is to have it go ahead 4763 * and look in the rbtree for a free extent of a given size, but this 4764 * is a good start. 4765 */ 4766 static noinline int 4767 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, 4768 u64 num_bytes) 4769 { 4770 struct btrfs_caching_control *caching_ctl; 4771 DEFINE_WAIT(wait); 4772 4773 caching_ctl = get_caching_control(cache); 4774 if (!caching_ctl) 4775 return 0; 4776 4777 wait_event(caching_ctl->wait, block_group_cache_done(cache) || 4778 (cache->free_space >= num_bytes)); 4779 4780 put_caching_control(caching_ctl); 4781 return 0; 4782 } 4783 4784 static noinline int 4785 wait_block_group_cache_done(struct btrfs_block_group_cache *cache) 4786 { 4787 struct btrfs_caching_control *caching_ctl; 4788 DEFINE_WAIT(wait); 4789 4790 caching_ctl = get_caching_control(cache); 4791 if (!caching_ctl) 4792 return 0; 4793 4794 wait_event(caching_ctl->wait, block_group_cache_done(cache)); 4795 4796 put_caching_control(caching_ctl); 4797 return 0; 4798 } 4799 4800 static int get_block_group_index(struct btrfs_block_group_cache *cache) 4801 { 4802 int index; 4803 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10) 4804 index = 0; 4805 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1) 4806 index = 1; 4807 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP) 4808 index = 2; 4809 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0) 4810 index = 3; 4811 else 4812 index = 4; 4813 return index; 4814 } 4815 4816 enum btrfs_loop_type { 4817 LOOP_FIND_IDEAL = 0, 4818 LOOP_CACHING_NOWAIT = 1, 4819 LOOP_CACHING_WAIT = 2, 4820 LOOP_ALLOC_CHUNK = 3, 4821 LOOP_NO_EMPTY_SIZE = 4, 4822 }; 4823 4824 /* 4825 * walks the btree of allocated extents and find a hole of a given size. 4826 * The key ins is changed to record the hole: 4827 * ins->objectid == block start 4828 * ins->flags = BTRFS_EXTENT_ITEM_KEY 4829 * ins->offset == number of blocks 4830 * Any available blocks before search_start are skipped. 4831 */ 4832 static noinline int find_free_extent(struct btrfs_trans_handle *trans, 4833 struct btrfs_root *orig_root, 4834 u64 num_bytes, u64 empty_size, 4835 u64 search_start, u64 search_end, 4836 u64 hint_byte, struct btrfs_key *ins, 4837 int data) 4838 { 4839 int ret = 0; 4840 struct btrfs_root *root = orig_root->fs_info->extent_root; 4841 struct btrfs_free_cluster *last_ptr = NULL; 4842 struct btrfs_block_group_cache *block_group = NULL; 4843 int empty_cluster = 2 * 1024 * 1024; 4844 int allowed_chunk_alloc = 0; 4845 int done_chunk_alloc = 0; 4846 struct btrfs_space_info *space_info; 4847 int last_ptr_loop = 0; 4848 int loop = 0; 4849 int index = 0; 4850 bool found_uncached_bg = false; 4851 bool failed_cluster_refill = false; 4852 bool failed_alloc = false; 4853 bool use_cluster = true; 4854 u64 ideal_cache_percent = 0; 4855 u64 ideal_cache_offset = 0; 4856 4857 WARN_ON(num_bytes < root->sectorsize); 4858 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 4859 ins->objectid = 0; 4860 ins->offset = 0; 4861 4862 space_info = __find_space_info(root->fs_info, data); 4863 if (!space_info) { 4864 printk(KERN_ERR "No space info for %d\n", data); 4865 return -ENOSPC; 4866 } 4867 4868 /* 4869 * If the space info is for both data and metadata it means we have a 4870 * small filesystem and we can't use the clustering stuff. 4871 */ 4872 if (btrfs_mixed_space_info(space_info)) 4873 use_cluster = false; 4874 4875 if (orig_root->ref_cows || empty_size) 4876 allowed_chunk_alloc = 1; 4877 4878 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) { 4879 last_ptr = &root->fs_info->meta_alloc_cluster; 4880 if (!btrfs_test_opt(root, SSD)) 4881 empty_cluster = 64 * 1024; 4882 } 4883 4884 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster && 4885 btrfs_test_opt(root, SSD)) { 4886 last_ptr = &root->fs_info->data_alloc_cluster; 4887 } 4888 4889 if (last_ptr) { 4890 spin_lock(&last_ptr->lock); 4891 if (last_ptr->block_group) 4892 hint_byte = last_ptr->window_start; 4893 spin_unlock(&last_ptr->lock); 4894 } 4895 4896 search_start = max(search_start, first_logical_byte(root, 0)); 4897 search_start = max(search_start, hint_byte); 4898 4899 if (!last_ptr) 4900 empty_cluster = 0; 4901 4902 if (search_start == hint_byte) { 4903 ideal_cache: 4904 block_group = btrfs_lookup_block_group(root->fs_info, 4905 search_start); 4906 /* 4907 * we don't want to use the block group if it doesn't match our 4908 * allocation bits, or if its not cached. 4909 * 4910 * However if we are re-searching with an ideal block group 4911 * picked out then we don't care that the block group is cached. 4912 */ 4913 if (block_group && block_group_bits(block_group, data) && 4914 (block_group->cached != BTRFS_CACHE_NO || 4915 search_start == ideal_cache_offset)) { 4916 down_read(&space_info->groups_sem); 4917 if (list_empty(&block_group->list) || 4918 block_group->ro) { 4919 /* 4920 * someone is removing this block group, 4921 * we can't jump into the have_block_group 4922 * target because our list pointers are not 4923 * valid 4924 */ 4925 btrfs_put_block_group(block_group); 4926 up_read(&space_info->groups_sem); 4927 } else { 4928 index = get_block_group_index(block_group); 4929 goto have_block_group; 4930 } 4931 } else if (block_group) { 4932 btrfs_put_block_group(block_group); 4933 } 4934 } 4935 search: 4936 down_read(&space_info->groups_sem); 4937 list_for_each_entry(block_group, &space_info->block_groups[index], 4938 list) { 4939 u64 offset; 4940 int cached; 4941 4942 btrfs_get_block_group(block_group); 4943 search_start = block_group->key.objectid; 4944 4945 /* 4946 * this can happen if we end up cycling through all the 4947 * raid types, but we want to make sure we only allocate 4948 * for the proper type. 4949 */ 4950 if (!block_group_bits(block_group, data)) { 4951 u64 extra = BTRFS_BLOCK_GROUP_DUP | 4952 BTRFS_BLOCK_GROUP_RAID1 | 4953 BTRFS_BLOCK_GROUP_RAID10; 4954 4955 /* 4956 * if they asked for extra copies and this block group 4957 * doesn't provide them, bail. This does allow us to 4958 * fill raid0 from raid1. 4959 */ 4960 if ((data & extra) && !(block_group->flags & extra)) 4961 goto loop; 4962 } 4963 4964 have_block_group: 4965 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4966 u64 free_percent; 4967 4968 ret = cache_block_group(block_group, trans, 4969 orig_root, 1); 4970 if (block_group->cached == BTRFS_CACHE_FINISHED) 4971 goto have_block_group; 4972 4973 free_percent = btrfs_block_group_used(&block_group->item); 4974 free_percent *= 100; 4975 free_percent = div64_u64(free_percent, 4976 block_group->key.offset); 4977 free_percent = 100 - free_percent; 4978 if (free_percent > ideal_cache_percent && 4979 likely(!block_group->ro)) { 4980 ideal_cache_offset = block_group->key.objectid; 4981 ideal_cache_percent = free_percent; 4982 } 4983 4984 /* 4985 * We only want to start kthread caching if we are at 4986 * the point where we will wait for caching to make 4987 * progress, or if our ideal search is over and we've 4988 * found somebody to start caching. 4989 */ 4990 if (loop > LOOP_CACHING_NOWAIT || 4991 (loop > LOOP_FIND_IDEAL && 4992 atomic_read(&space_info->caching_threads) < 2)) { 4993 ret = cache_block_group(block_group, trans, 4994 orig_root, 0); 4995 BUG_ON(ret); 4996 } 4997 found_uncached_bg = true; 4998 4999 /* 5000 * If loop is set for cached only, try the next block 5001 * group. 5002 */ 5003 if (loop == LOOP_FIND_IDEAL) 5004 goto loop; 5005 } 5006 5007 cached = block_group_cache_done(block_group); 5008 if (unlikely(!cached)) 5009 found_uncached_bg = true; 5010 5011 if (unlikely(block_group->ro)) 5012 goto loop; 5013 5014 /* 5015 * Ok we want to try and use the cluster allocator, so lets look 5016 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will 5017 * have tried the cluster allocator plenty of times at this 5018 * point and not have found anything, so we are likely way too 5019 * fragmented for the clustering stuff to find anything, so lets 5020 * just skip it and let the allocator find whatever block it can 5021 * find 5022 */ 5023 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { 5024 /* 5025 * the refill lock keeps out other 5026 * people trying to start a new cluster 5027 */ 5028 spin_lock(&last_ptr->refill_lock); 5029 if (last_ptr->block_group && 5030 (last_ptr->block_group->ro || 5031 !block_group_bits(last_ptr->block_group, data))) { 5032 offset = 0; 5033 goto refill_cluster; 5034 } 5035 5036 offset = btrfs_alloc_from_cluster(block_group, last_ptr, 5037 num_bytes, search_start); 5038 if (offset) { 5039 /* we have a block, we're done */ 5040 spin_unlock(&last_ptr->refill_lock); 5041 goto checks; 5042 } 5043 5044 spin_lock(&last_ptr->lock); 5045 /* 5046 * whoops, this cluster doesn't actually point to 5047 * this block group. Get a ref on the block 5048 * group is does point to and try again 5049 */ 5050 if (!last_ptr_loop && last_ptr->block_group && 5051 last_ptr->block_group != block_group) { 5052 5053 btrfs_put_block_group(block_group); 5054 block_group = last_ptr->block_group; 5055 btrfs_get_block_group(block_group); 5056 spin_unlock(&last_ptr->lock); 5057 spin_unlock(&last_ptr->refill_lock); 5058 5059 last_ptr_loop = 1; 5060 search_start = block_group->key.objectid; 5061 /* 5062 * we know this block group is properly 5063 * in the list because 5064 * btrfs_remove_block_group, drops the 5065 * cluster before it removes the block 5066 * group from the list 5067 */ 5068 goto have_block_group; 5069 } 5070 spin_unlock(&last_ptr->lock); 5071 refill_cluster: 5072 /* 5073 * this cluster didn't work out, free it and 5074 * start over 5075 */ 5076 btrfs_return_cluster_to_free_space(NULL, last_ptr); 5077 5078 last_ptr_loop = 0; 5079 5080 /* allocate a cluster in this block group */ 5081 ret = btrfs_find_space_cluster(trans, root, 5082 block_group, last_ptr, 5083 offset, num_bytes, 5084 empty_cluster + empty_size); 5085 if (ret == 0) { 5086 /* 5087 * now pull our allocation out of this 5088 * cluster 5089 */ 5090 offset = btrfs_alloc_from_cluster(block_group, 5091 last_ptr, num_bytes, 5092 search_start); 5093 if (offset) { 5094 /* we found one, proceed */ 5095 spin_unlock(&last_ptr->refill_lock); 5096 goto checks; 5097 } 5098 } else if (!cached && loop > LOOP_CACHING_NOWAIT 5099 && !failed_cluster_refill) { 5100 spin_unlock(&last_ptr->refill_lock); 5101 5102 failed_cluster_refill = true; 5103 wait_block_group_cache_progress(block_group, 5104 num_bytes + empty_cluster + empty_size); 5105 goto have_block_group; 5106 } 5107 5108 /* 5109 * at this point we either didn't find a cluster 5110 * or we weren't able to allocate a block from our 5111 * cluster. Free the cluster we've been trying 5112 * to use, and go to the next block group 5113 */ 5114 btrfs_return_cluster_to_free_space(NULL, last_ptr); 5115 spin_unlock(&last_ptr->refill_lock); 5116 goto loop; 5117 } 5118 5119 offset = btrfs_find_space_for_alloc(block_group, search_start, 5120 num_bytes, empty_size); 5121 /* 5122 * If we didn't find a chunk, and we haven't failed on this 5123 * block group before, and this block group is in the middle of 5124 * caching and we are ok with waiting, then go ahead and wait 5125 * for progress to be made, and set failed_alloc to true. 5126 * 5127 * If failed_alloc is true then we've already waited on this 5128 * block group once and should move on to the next block group. 5129 */ 5130 if (!offset && !failed_alloc && !cached && 5131 loop > LOOP_CACHING_NOWAIT) { 5132 wait_block_group_cache_progress(block_group, 5133 num_bytes + empty_size); 5134 failed_alloc = true; 5135 goto have_block_group; 5136 } else if (!offset) { 5137 goto loop; 5138 } 5139 checks: 5140 search_start = stripe_align(root, offset); 5141 /* move on to the next group */ 5142 if (search_start + num_bytes >= search_end) { 5143 btrfs_add_free_space(block_group, offset, num_bytes); 5144 goto loop; 5145 } 5146 5147 /* move on to the next group */ 5148 if (search_start + num_bytes > 5149 block_group->key.objectid + block_group->key.offset) { 5150 btrfs_add_free_space(block_group, offset, num_bytes); 5151 goto loop; 5152 } 5153 5154 ins->objectid = search_start; 5155 ins->offset = num_bytes; 5156 5157 if (offset < search_start) 5158 btrfs_add_free_space(block_group, offset, 5159 search_start - offset); 5160 BUG_ON(offset > search_start); 5161 5162 ret = update_reserved_bytes(block_group, num_bytes, 1, 5163 (data & BTRFS_BLOCK_GROUP_DATA)); 5164 if (ret == -EAGAIN) { 5165 btrfs_add_free_space(block_group, offset, num_bytes); 5166 goto loop; 5167 } 5168 5169 /* we are all good, lets return */ 5170 ins->objectid = search_start; 5171 ins->offset = num_bytes; 5172 5173 if (offset < search_start) 5174 btrfs_add_free_space(block_group, offset, 5175 search_start - offset); 5176 BUG_ON(offset > search_start); 5177 break; 5178 loop: 5179 failed_cluster_refill = false; 5180 failed_alloc = false; 5181 BUG_ON(index != get_block_group_index(block_group)); 5182 btrfs_put_block_group(block_group); 5183 } 5184 up_read(&space_info->groups_sem); 5185 5186 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) 5187 goto search; 5188 5189 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for 5190 * for them to make caching progress. Also 5191 * determine the best possible bg to cache 5192 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 5193 * caching kthreads as we move along 5194 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 5195 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 5196 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 5197 * again 5198 */ 5199 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 5200 (found_uncached_bg || empty_size || empty_cluster || 5201 allowed_chunk_alloc)) { 5202 index = 0; 5203 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { 5204 found_uncached_bg = false; 5205 loop++; 5206 if (!ideal_cache_percent && 5207 atomic_read(&space_info->caching_threads)) 5208 goto search; 5209 5210 /* 5211 * 1 of the following 2 things have happened so far 5212 * 5213 * 1) We found an ideal block group for caching that 5214 * is mostly full and will cache quickly, so we might 5215 * as well wait for it. 5216 * 5217 * 2) We searched for cached only and we didn't find 5218 * anything, and we didn't start any caching kthreads 5219 * either, so chances are we will loop through and 5220 * start a couple caching kthreads, and then come back 5221 * around and just wait for them. This will be slower 5222 * because we will have 2 caching kthreads reading at 5223 * the same time when we could have just started one 5224 * and waited for it to get far enough to give us an 5225 * allocation, so go ahead and go to the wait caching 5226 * loop. 5227 */ 5228 loop = LOOP_CACHING_WAIT; 5229 search_start = ideal_cache_offset; 5230 ideal_cache_percent = 0; 5231 goto ideal_cache; 5232 } else if (loop == LOOP_FIND_IDEAL) { 5233 /* 5234 * Didn't find a uncached bg, wait on anything we find 5235 * next. 5236 */ 5237 loop = LOOP_CACHING_WAIT; 5238 goto search; 5239 } 5240 5241 if (loop < LOOP_CACHING_WAIT) { 5242 loop++; 5243 goto search; 5244 } 5245 5246 if (loop == LOOP_ALLOC_CHUNK) { 5247 empty_size = 0; 5248 empty_cluster = 0; 5249 } 5250 5251 if (allowed_chunk_alloc) { 5252 ret = do_chunk_alloc(trans, root, num_bytes + 5253 2 * 1024 * 1024, data, 1); 5254 allowed_chunk_alloc = 0; 5255 done_chunk_alloc = 1; 5256 } else if (!done_chunk_alloc) { 5257 space_info->force_alloc = 1; 5258 } 5259 5260 if (loop < LOOP_NO_EMPTY_SIZE) { 5261 loop++; 5262 goto search; 5263 } 5264 ret = -ENOSPC; 5265 } else if (!ins->objectid) { 5266 ret = -ENOSPC; 5267 } 5268 5269 /* we found what we needed */ 5270 if (ins->objectid) { 5271 if (!(data & BTRFS_BLOCK_GROUP_DATA)) 5272 trans->block_group = block_group->key.objectid; 5273 5274 btrfs_put_block_group(block_group); 5275 ret = 0; 5276 } 5277 5278 return ret; 5279 } 5280 5281 static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 5282 int dump_block_groups) 5283 { 5284 struct btrfs_block_group_cache *cache; 5285 int index = 0; 5286 5287 spin_lock(&info->lock); 5288 printk(KERN_INFO "space_info has %llu free, is %sfull\n", 5289 (unsigned long long)(info->total_bytes - info->bytes_used - 5290 info->bytes_pinned - info->bytes_reserved - 5291 info->bytes_readonly), 5292 (info->full) ? "" : "not "); 5293 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, " 5294 "reserved=%llu, may_use=%llu, readonly=%llu\n", 5295 (unsigned long long)info->total_bytes, 5296 (unsigned long long)info->bytes_used, 5297 (unsigned long long)info->bytes_pinned, 5298 (unsigned long long)info->bytes_reserved, 5299 (unsigned long long)info->bytes_may_use, 5300 (unsigned long long)info->bytes_readonly); 5301 spin_unlock(&info->lock); 5302 5303 if (!dump_block_groups) 5304 return; 5305 5306 down_read(&info->groups_sem); 5307 again: 5308 list_for_each_entry(cache, &info->block_groups[index], list) { 5309 spin_lock(&cache->lock); 5310 printk(KERN_INFO "block group %llu has %llu bytes, %llu used " 5311 "%llu pinned %llu reserved\n", 5312 (unsigned long long)cache->key.objectid, 5313 (unsigned long long)cache->key.offset, 5314 (unsigned long long)btrfs_block_group_used(&cache->item), 5315 (unsigned long long)cache->pinned, 5316 (unsigned long long)cache->reserved); 5317 btrfs_dump_free_space(cache, bytes); 5318 spin_unlock(&cache->lock); 5319 } 5320 if (++index < BTRFS_NR_RAID_TYPES) 5321 goto again; 5322 up_read(&info->groups_sem); 5323 } 5324 5325 int btrfs_reserve_extent(struct btrfs_trans_handle *trans, 5326 struct btrfs_root *root, 5327 u64 num_bytes, u64 min_alloc_size, 5328 u64 empty_size, u64 hint_byte, 5329 u64 search_end, struct btrfs_key *ins, 5330 u64 data) 5331 { 5332 int ret; 5333 u64 search_start = 0; 5334 5335 data = btrfs_get_alloc_profile(root, data); 5336 again: 5337 /* 5338 * the only place that sets empty_size is btrfs_realloc_node, which 5339 * is not called recursively on allocations 5340 */ 5341 if (empty_size || root->ref_cows) 5342 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 5343 num_bytes + 2 * 1024 * 1024, data, 0); 5344 5345 WARN_ON(num_bytes < root->sectorsize); 5346 ret = find_free_extent(trans, root, num_bytes, empty_size, 5347 search_start, search_end, hint_byte, 5348 ins, data); 5349 5350 if (ret == -ENOSPC && num_bytes > min_alloc_size) { 5351 num_bytes = num_bytes >> 1; 5352 num_bytes = num_bytes & ~(root->sectorsize - 1); 5353 num_bytes = max(num_bytes, min_alloc_size); 5354 do_chunk_alloc(trans, root->fs_info->extent_root, 5355 num_bytes, data, 1); 5356 goto again; 5357 } 5358 if (ret == -ENOSPC) { 5359 struct btrfs_space_info *sinfo; 5360 5361 sinfo = __find_space_info(root->fs_info, data); 5362 printk(KERN_ERR "btrfs allocation failed flags %llu, " 5363 "wanted %llu\n", (unsigned long long)data, 5364 (unsigned long long)num_bytes); 5365 dump_space_info(sinfo, num_bytes, 1); 5366 } 5367 5368 return ret; 5369 } 5370 5371 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) 5372 { 5373 struct btrfs_block_group_cache *cache; 5374 int ret = 0; 5375 5376 cache = btrfs_lookup_block_group(root->fs_info, start); 5377 if (!cache) { 5378 printk(KERN_ERR "Unable to find block group for %llu\n", 5379 (unsigned long long)start); 5380 return -ENOSPC; 5381 } 5382 5383 ret = btrfs_discard_extent(root, start, len); 5384 5385 btrfs_add_free_space(cache, start, len); 5386 update_reserved_bytes(cache, len, 0, 1); 5387 btrfs_put_block_group(cache); 5388 5389 return ret; 5390 } 5391 5392 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 5393 struct btrfs_root *root, 5394 u64 parent, u64 root_objectid, 5395 u64 flags, u64 owner, u64 offset, 5396 struct btrfs_key *ins, int ref_mod) 5397 { 5398 int ret; 5399 struct btrfs_fs_info *fs_info = root->fs_info; 5400 struct btrfs_extent_item *extent_item; 5401 struct btrfs_extent_inline_ref *iref; 5402 struct btrfs_path *path; 5403 struct extent_buffer *leaf; 5404 int type; 5405 u32 size; 5406 5407 if (parent > 0) 5408 type = BTRFS_SHARED_DATA_REF_KEY; 5409 else 5410 type = BTRFS_EXTENT_DATA_REF_KEY; 5411 5412 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); 5413 5414 path = btrfs_alloc_path(); 5415 BUG_ON(!path); 5416 5417 path->leave_spinning = 1; 5418 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5419 ins, size); 5420 BUG_ON(ret); 5421 5422 leaf = path->nodes[0]; 5423 extent_item = btrfs_item_ptr(leaf, path->slots[0], 5424 struct btrfs_extent_item); 5425 btrfs_set_extent_refs(leaf, extent_item, ref_mod); 5426 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 5427 btrfs_set_extent_flags(leaf, extent_item, 5428 flags | BTRFS_EXTENT_FLAG_DATA); 5429 5430 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); 5431 btrfs_set_extent_inline_ref_type(leaf, iref, type); 5432 if (parent > 0) { 5433 struct btrfs_shared_data_ref *ref; 5434 ref = (struct btrfs_shared_data_ref *)(iref + 1); 5435 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 5436 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); 5437 } else { 5438 struct btrfs_extent_data_ref *ref; 5439 ref = (struct btrfs_extent_data_ref *)(&iref->offset); 5440 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); 5441 btrfs_set_extent_data_ref_objectid(leaf, ref, owner); 5442 btrfs_set_extent_data_ref_offset(leaf, ref, offset); 5443 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); 5444 } 5445 5446 btrfs_mark_buffer_dirty(path->nodes[0]); 5447 btrfs_free_path(path); 5448 5449 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); 5450 if (ret) { 5451 printk(KERN_ERR "btrfs update block group failed for %llu " 5452 "%llu\n", (unsigned long long)ins->objectid, 5453 (unsigned long long)ins->offset); 5454 BUG(); 5455 } 5456 return ret; 5457 } 5458 5459 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, 5460 struct btrfs_root *root, 5461 u64 parent, u64 root_objectid, 5462 u64 flags, struct btrfs_disk_key *key, 5463 int level, struct btrfs_key *ins) 5464 { 5465 int ret; 5466 struct btrfs_fs_info *fs_info = root->fs_info; 5467 struct btrfs_extent_item *extent_item; 5468 struct btrfs_tree_block_info *block_info; 5469 struct btrfs_extent_inline_ref *iref; 5470 struct btrfs_path *path; 5471 struct extent_buffer *leaf; 5472 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref); 5473 5474 path = btrfs_alloc_path(); 5475 BUG_ON(!path); 5476 5477 path->leave_spinning = 1; 5478 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5479 ins, size); 5480 BUG_ON(ret); 5481 5482 leaf = path->nodes[0]; 5483 extent_item = btrfs_item_ptr(leaf, path->slots[0], 5484 struct btrfs_extent_item); 5485 btrfs_set_extent_refs(leaf, extent_item, 1); 5486 btrfs_set_extent_generation(leaf, extent_item, trans->transid); 5487 btrfs_set_extent_flags(leaf, extent_item, 5488 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); 5489 block_info = (struct btrfs_tree_block_info *)(extent_item + 1); 5490 5491 btrfs_set_tree_block_key(leaf, block_info, key); 5492 btrfs_set_tree_block_level(leaf, block_info, level); 5493 5494 iref = (struct btrfs_extent_inline_ref *)(block_info + 1); 5495 if (parent > 0) { 5496 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 5497 btrfs_set_extent_inline_ref_type(leaf, iref, 5498 BTRFS_SHARED_BLOCK_REF_KEY); 5499 btrfs_set_extent_inline_ref_offset(leaf, iref, parent); 5500 } else { 5501 btrfs_set_extent_inline_ref_type(leaf, iref, 5502 BTRFS_TREE_BLOCK_REF_KEY); 5503 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 5504 } 5505 5506 btrfs_mark_buffer_dirty(leaf); 5507 btrfs_free_path(path); 5508 5509 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); 5510 if (ret) { 5511 printk(KERN_ERR "btrfs update block group failed for %llu " 5512 "%llu\n", (unsigned long long)ins->objectid, 5513 (unsigned long long)ins->offset); 5514 BUG(); 5515 } 5516 return ret; 5517 } 5518 5519 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 5520 struct btrfs_root *root, 5521 u64 root_objectid, u64 owner, 5522 u64 offset, struct btrfs_key *ins) 5523 { 5524 int ret; 5525 5526 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID); 5527 5528 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset, 5529 0, root_objectid, owner, offset, 5530 BTRFS_ADD_DELAYED_EXTENT, NULL); 5531 return ret; 5532 } 5533 5534 /* 5535 * this is used by the tree logging recovery code. It records that 5536 * an extent has been allocated and makes sure to clear the free 5537 * space cache bits as well 5538 */ 5539 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 5540 struct btrfs_root *root, 5541 u64 root_objectid, u64 owner, u64 offset, 5542 struct btrfs_key *ins) 5543 { 5544 int ret; 5545 struct btrfs_block_group_cache *block_group; 5546 struct btrfs_caching_control *caching_ctl; 5547 u64 start = ins->objectid; 5548 u64 num_bytes = ins->offset; 5549 5550 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 5551 cache_block_group(block_group, trans, NULL, 0); 5552 caching_ctl = get_caching_control(block_group); 5553 5554 if (!caching_ctl) { 5555 BUG_ON(!block_group_cache_done(block_group)); 5556 ret = btrfs_remove_free_space(block_group, start, num_bytes); 5557 BUG_ON(ret); 5558 } else { 5559 mutex_lock(&caching_ctl->mutex); 5560 5561 if (start >= caching_ctl->progress) { 5562 ret = add_excluded_extent(root, start, num_bytes); 5563 BUG_ON(ret); 5564 } else if (start + num_bytes <= caching_ctl->progress) { 5565 ret = btrfs_remove_free_space(block_group, 5566 start, num_bytes); 5567 BUG_ON(ret); 5568 } else { 5569 num_bytes = caching_ctl->progress - start; 5570 ret = btrfs_remove_free_space(block_group, 5571 start, num_bytes); 5572 BUG_ON(ret); 5573 5574 start = caching_ctl->progress; 5575 num_bytes = ins->objectid + ins->offset - 5576 caching_ctl->progress; 5577 ret = add_excluded_extent(root, start, num_bytes); 5578 BUG_ON(ret); 5579 } 5580 5581 mutex_unlock(&caching_ctl->mutex); 5582 put_caching_control(caching_ctl); 5583 } 5584 5585 ret = update_reserved_bytes(block_group, ins->offset, 1, 1); 5586 BUG_ON(ret); 5587 btrfs_put_block_group(block_group); 5588 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 5589 0, owner, offset, ins, 1); 5590 return ret; 5591 } 5592 5593 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 5594 struct btrfs_root *root, 5595 u64 bytenr, u32 blocksize, 5596 int level) 5597 { 5598 struct extent_buffer *buf; 5599 5600 buf = btrfs_find_create_tree_block(root, bytenr, blocksize); 5601 if (!buf) 5602 return ERR_PTR(-ENOMEM); 5603 btrfs_set_header_generation(buf, trans->transid); 5604 btrfs_set_buffer_lockdep_class(buf, level); 5605 btrfs_tree_lock(buf); 5606 clean_tree_block(trans, root, buf); 5607 5608 btrfs_set_lock_blocking(buf); 5609 btrfs_set_buffer_uptodate(buf); 5610 5611 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 5612 /* 5613 * we allow two log transactions at a time, use different 5614 * EXENT bit to differentiate dirty pages. 5615 */ 5616 if (root->log_transid % 2 == 0) 5617 set_extent_dirty(&root->dirty_log_pages, buf->start, 5618 buf->start + buf->len - 1, GFP_NOFS); 5619 else 5620 set_extent_new(&root->dirty_log_pages, buf->start, 5621 buf->start + buf->len - 1, GFP_NOFS); 5622 } else { 5623 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 5624 buf->start + buf->len - 1, GFP_NOFS); 5625 } 5626 trans->blocks_used++; 5627 /* this returns a buffer locked for blocking */ 5628 return buf; 5629 } 5630 5631 static struct btrfs_block_rsv * 5632 use_block_rsv(struct btrfs_trans_handle *trans, 5633 struct btrfs_root *root, u32 blocksize) 5634 { 5635 struct btrfs_block_rsv *block_rsv; 5636 int ret; 5637 5638 block_rsv = get_block_rsv(trans, root); 5639 5640 if (block_rsv->size == 0) { 5641 ret = reserve_metadata_bytes(trans, root, block_rsv, 5642 blocksize, 0); 5643 if (ret) 5644 return ERR_PTR(ret); 5645 return block_rsv; 5646 } 5647 5648 ret = block_rsv_use_bytes(block_rsv, blocksize); 5649 if (!ret) 5650 return block_rsv; 5651 5652 return ERR_PTR(-ENOSPC); 5653 } 5654 5655 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize) 5656 { 5657 block_rsv_add_bytes(block_rsv, blocksize, 0); 5658 block_rsv_release_bytes(block_rsv, NULL, 0); 5659 } 5660 5661 /* 5662 * finds a free extent and does all the dirty work required for allocation 5663 * returns the key for the extent through ins, and a tree buffer for 5664 * the first block of the extent through buf. 5665 * 5666 * returns the tree buffer or NULL. 5667 */ 5668 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, 5669 struct btrfs_root *root, u32 blocksize, 5670 u64 parent, u64 root_objectid, 5671 struct btrfs_disk_key *key, int level, 5672 u64 hint, u64 empty_size) 5673 { 5674 struct btrfs_key ins; 5675 struct btrfs_block_rsv *block_rsv; 5676 struct extent_buffer *buf; 5677 u64 flags = 0; 5678 int ret; 5679 5680 5681 block_rsv = use_block_rsv(trans, root, blocksize); 5682 if (IS_ERR(block_rsv)) 5683 return ERR_CAST(block_rsv); 5684 5685 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize, 5686 empty_size, hint, (u64)-1, &ins, 0); 5687 if (ret) { 5688 unuse_block_rsv(block_rsv, blocksize); 5689 return ERR_PTR(ret); 5690 } 5691 5692 buf = btrfs_init_new_buffer(trans, root, ins.objectid, 5693 blocksize, level); 5694 BUG_ON(IS_ERR(buf)); 5695 5696 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 5697 if (parent == 0) 5698 parent = ins.objectid; 5699 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 5700 } else 5701 BUG_ON(parent > 0); 5702 5703 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 5704 struct btrfs_delayed_extent_op *extent_op; 5705 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); 5706 BUG_ON(!extent_op); 5707 if (key) 5708 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 5709 else 5710 memset(&extent_op->key, 0, sizeof(extent_op->key)); 5711 extent_op->flags_to_set = flags; 5712 extent_op->update_key = 1; 5713 extent_op->update_flags = 1; 5714 extent_op->is_data = 0; 5715 5716 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid, 5717 ins.offset, parent, root_objectid, 5718 level, BTRFS_ADD_DELAYED_EXTENT, 5719 extent_op); 5720 BUG_ON(ret); 5721 } 5722 return buf; 5723 } 5724 5725 struct walk_control { 5726 u64 refs[BTRFS_MAX_LEVEL]; 5727 u64 flags[BTRFS_MAX_LEVEL]; 5728 struct btrfs_key update_progress; 5729 int stage; 5730 int level; 5731 int shared_level; 5732 int update_ref; 5733 int keep_locks; 5734 int reada_slot; 5735 int reada_count; 5736 }; 5737 5738 #define DROP_REFERENCE 1 5739 #define UPDATE_BACKREF 2 5740 5741 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, 5742 struct btrfs_root *root, 5743 struct walk_control *wc, 5744 struct btrfs_path *path) 5745 { 5746 u64 bytenr; 5747 u64 generation; 5748 u64 refs; 5749 u64 flags; 5750 u32 nritems; 5751 u32 blocksize; 5752 struct btrfs_key key; 5753 struct extent_buffer *eb; 5754 int ret; 5755 int slot; 5756 int nread = 0; 5757 5758 if (path->slots[wc->level] < wc->reada_slot) { 5759 wc->reada_count = wc->reada_count * 2 / 3; 5760 wc->reada_count = max(wc->reada_count, 2); 5761 } else { 5762 wc->reada_count = wc->reada_count * 3 / 2; 5763 wc->reada_count = min_t(int, wc->reada_count, 5764 BTRFS_NODEPTRS_PER_BLOCK(root)); 5765 } 5766 5767 eb = path->nodes[wc->level]; 5768 nritems = btrfs_header_nritems(eb); 5769 blocksize = btrfs_level_size(root, wc->level - 1); 5770 5771 for (slot = path->slots[wc->level]; slot < nritems; slot++) { 5772 if (nread >= wc->reada_count) 5773 break; 5774 5775 cond_resched(); 5776 bytenr = btrfs_node_blockptr(eb, slot); 5777 generation = btrfs_node_ptr_generation(eb, slot); 5778 5779 if (slot == path->slots[wc->level]) 5780 goto reada; 5781 5782 if (wc->stage == UPDATE_BACKREF && 5783 generation <= root->root_key.offset) 5784 continue; 5785 5786 /* We don't lock the tree block, it's OK to be racy here */ 5787 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 5788 &refs, &flags); 5789 BUG_ON(ret); 5790 BUG_ON(refs == 0); 5791 5792 if (wc->stage == DROP_REFERENCE) { 5793 if (refs == 1) 5794 goto reada; 5795 5796 if (wc->level == 1 && 5797 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5798 continue; 5799 if (!wc->update_ref || 5800 generation <= root->root_key.offset) 5801 continue; 5802 btrfs_node_key_to_cpu(eb, &key, slot); 5803 ret = btrfs_comp_cpu_keys(&key, 5804 &wc->update_progress); 5805 if (ret < 0) 5806 continue; 5807 } else { 5808 if (wc->level == 1 && 5809 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5810 continue; 5811 } 5812 reada: 5813 ret = readahead_tree_block(root, bytenr, blocksize, 5814 generation); 5815 if (ret) 5816 break; 5817 nread++; 5818 } 5819 wc->reada_slot = slot; 5820 } 5821 5822 /* 5823 * hepler to process tree block while walking down the tree. 5824 * 5825 * when wc->stage == UPDATE_BACKREF, this function updates 5826 * back refs for pointers in the block. 5827 * 5828 * NOTE: return value 1 means we should stop walking down. 5829 */ 5830 static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5831 struct btrfs_root *root, 5832 struct btrfs_path *path, 5833 struct walk_control *wc, int lookup_info) 5834 { 5835 int level = wc->level; 5836 struct extent_buffer *eb = path->nodes[level]; 5837 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; 5838 int ret; 5839 5840 if (wc->stage == UPDATE_BACKREF && 5841 btrfs_header_owner(eb) != root->root_key.objectid) 5842 return 1; 5843 5844 /* 5845 * when reference count of tree block is 1, it won't increase 5846 * again. once full backref flag is set, we never clear it. 5847 */ 5848 if (lookup_info && 5849 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5850 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { 5851 BUG_ON(!path->locks[level]); 5852 ret = btrfs_lookup_extent_info(trans, root, 5853 eb->start, eb->len, 5854 &wc->refs[level], 5855 &wc->flags[level]); 5856 BUG_ON(ret); 5857 BUG_ON(wc->refs[level] == 0); 5858 } 5859 5860 if (wc->stage == DROP_REFERENCE) { 5861 if (wc->refs[level] > 1) 5862 return 1; 5863 5864 if (path->locks[level] && !wc->keep_locks) { 5865 btrfs_tree_unlock(eb); 5866 path->locks[level] = 0; 5867 } 5868 return 0; 5869 } 5870 5871 /* wc->stage == UPDATE_BACKREF */ 5872 if (!(wc->flags[level] & flag)) { 5873 BUG_ON(!path->locks[level]); 5874 ret = btrfs_inc_ref(trans, root, eb, 1); 5875 BUG_ON(ret); 5876 ret = btrfs_dec_ref(trans, root, eb, 0); 5877 BUG_ON(ret); 5878 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, 5879 eb->len, flag, 0); 5880 BUG_ON(ret); 5881 wc->flags[level] |= flag; 5882 } 5883 5884 /* 5885 * the block is shared by multiple trees, so it's not good to 5886 * keep the tree lock 5887 */ 5888 if (path->locks[level] && level > 0) { 5889 btrfs_tree_unlock(eb); 5890 path->locks[level] = 0; 5891 } 5892 return 0; 5893 } 5894 5895 /* 5896 * hepler to process tree block pointer. 5897 * 5898 * when wc->stage == DROP_REFERENCE, this function checks 5899 * reference count of the block pointed to. if the block 5900 * is shared and we need update back refs for the subtree 5901 * rooted at the block, this function changes wc->stage to 5902 * UPDATE_BACKREF. if the block is shared and there is no 5903 * need to update back, this function drops the reference 5904 * to the block. 5905 * 5906 * NOTE: return value 1 means we should stop walking down. 5907 */ 5908 static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5909 struct btrfs_root *root, 5910 struct btrfs_path *path, 5911 struct walk_control *wc, int *lookup_info) 5912 { 5913 u64 bytenr; 5914 u64 generation; 5915 u64 parent; 5916 u32 blocksize; 5917 struct btrfs_key key; 5918 struct extent_buffer *next; 5919 int level = wc->level; 5920 int reada = 0; 5921 int ret = 0; 5922 5923 generation = btrfs_node_ptr_generation(path->nodes[level], 5924 path->slots[level]); 5925 /* 5926 * if the lower level block was created before the snapshot 5927 * was created, we know there is no need to update back refs 5928 * for the subtree 5929 */ 5930 if (wc->stage == UPDATE_BACKREF && 5931 generation <= root->root_key.offset) { 5932 *lookup_info = 1; 5933 return 1; 5934 } 5935 5936 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5937 blocksize = btrfs_level_size(root, level - 1); 5938 5939 next = btrfs_find_tree_block(root, bytenr, blocksize); 5940 if (!next) { 5941 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 5942 if (!next) 5943 return -ENOMEM; 5944 reada = 1; 5945 } 5946 btrfs_tree_lock(next); 5947 btrfs_set_lock_blocking(next); 5948 5949 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 5950 &wc->refs[level - 1], 5951 &wc->flags[level - 1]); 5952 BUG_ON(ret); 5953 BUG_ON(wc->refs[level - 1] == 0); 5954 *lookup_info = 0; 5955 5956 if (wc->stage == DROP_REFERENCE) { 5957 if (wc->refs[level - 1] > 1) { 5958 if (level == 1 && 5959 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5960 goto skip; 5961 5962 if (!wc->update_ref || 5963 generation <= root->root_key.offset) 5964 goto skip; 5965 5966 btrfs_node_key_to_cpu(path->nodes[level], &key, 5967 path->slots[level]); 5968 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); 5969 if (ret < 0) 5970 goto skip; 5971 5972 wc->stage = UPDATE_BACKREF; 5973 wc->shared_level = level - 1; 5974 } 5975 } else { 5976 if (level == 1 && 5977 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) 5978 goto skip; 5979 } 5980 5981 if (!btrfs_buffer_uptodate(next, generation)) { 5982 btrfs_tree_unlock(next); 5983 free_extent_buffer(next); 5984 next = NULL; 5985 *lookup_info = 1; 5986 } 5987 5988 if (!next) { 5989 if (reada && level == 1) 5990 reada_walk_down(trans, root, wc, path); 5991 next = read_tree_block(root, bytenr, blocksize, generation); 5992 btrfs_tree_lock(next); 5993 btrfs_set_lock_blocking(next); 5994 } 5995 5996 level--; 5997 BUG_ON(level != btrfs_header_level(next)); 5998 path->nodes[level] = next; 5999 path->slots[level] = 0; 6000 path->locks[level] = 1; 6001 wc->level = level; 6002 if (wc->level == 1) 6003 wc->reada_slot = 0; 6004 return 0; 6005 skip: 6006 wc->refs[level - 1] = 0; 6007 wc->flags[level - 1] = 0; 6008 if (wc->stage == DROP_REFERENCE) { 6009 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 6010 parent = path->nodes[level]->start; 6011 } else { 6012 BUG_ON(root->root_key.objectid != 6013 btrfs_header_owner(path->nodes[level])); 6014 parent = 0; 6015 } 6016 6017 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, 6018 root->root_key.objectid, level - 1, 0); 6019 BUG_ON(ret); 6020 } 6021 btrfs_tree_unlock(next); 6022 free_extent_buffer(next); 6023 *lookup_info = 1; 6024 return 1; 6025 } 6026 6027 /* 6028 * hepler to process tree block while walking up the tree. 6029 * 6030 * when wc->stage == DROP_REFERENCE, this function drops 6031 * reference count on the block. 6032 * 6033 * when wc->stage == UPDATE_BACKREF, this function changes 6034 * wc->stage back to DROP_REFERENCE if we changed wc->stage 6035 * to UPDATE_BACKREF previously while processing the block. 6036 * 6037 * NOTE: return value 1 means we should stop walking up. 6038 */ 6039 static noinline int walk_up_proc(struct btrfs_trans_handle *trans, 6040 struct btrfs_root *root, 6041 struct btrfs_path *path, 6042 struct walk_control *wc) 6043 { 6044 int ret; 6045 int level = wc->level; 6046 struct extent_buffer *eb = path->nodes[level]; 6047 u64 parent = 0; 6048 6049 if (wc->stage == UPDATE_BACKREF) { 6050 BUG_ON(wc->shared_level < level); 6051 if (level < wc->shared_level) 6052 goto out; 6053 6054 ret = find_next_key(path, level + 1, &wc->update_progress); 6055 if (ret > 0) 6056 wc->update_ref = 0; 6057 6058 wc->stage = DROP_REFERENCE; 6059 wc->shared_level = -1; 6060 path->slots[level] = 0; 6061 6062 /* 6063 * check reference count again if the block isn't locked. 6064 * we should start walking down the tree again if reference 6065 * count is one. 6066 */ 6067 if (!path->locks[level]) { 6068 BUG_ON(level == 0); 6069 btrfs_tree_lock(eb); 6070 btrfs_set_lock_blocking(eb); 6071 path->locks[level] = 1; 6072 6073 ret = btrfs_lookup_extent_info(trans, root, 6074 eb->start, eb->len, 6075 &wc->refs[level], 6076 &wc->flags[level]); 6077 BUG_ON(ret); 6078 BUG_ON(wc->refs[level] == 0); 6079 if (wc->refs[level] == 1) { 6080 btrfs_tree_unlock(eb); 6081 path->locks[level] = 0; 6082 return 1; 6083 } 6084 } 6085 } 6086 6087 /* wc->stage == DROP_REFERENCE */ 6088 BUG_ON(wc->refs[level] > 1 && !path->locks[level]); 6089 6090 if (wc->refs[level] == 1) { 6091 if (level == 0) { 6092 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 6093 ret = btrfs_dec_ref(trans, root, eb, 1); 6094 else 6095 ret = btrfs_dec_ref(trans, root, eb, 0); 6096 BUG_ON(ret); 6097 } 6098 /* make block locked assertion in clean_tree_block happy */ 6099 if (!path->locks[level] && 6100 btrfs_header_generation(eb) == trans->transid) { 6101 btrfs_tree_lock(eb); 6102 btrfs_set_lock_blocking(eb); 6103 path->locks[level] = 1; 6104 } 6105 clean_tree_block(trans, root, eb); 6106 } 6107 6108 if (eb == root->node) { 6109 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 6110 parent = eb->start; 6111 else 6112 BUG_ON(root->root_key.objectid != 6113 btrfs_header_owner(eb)); 6114 } else { 6115 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) 6116 parent = path->nodes[level + 1]->start; 6117 else 6118 BUG_ON(root->root_key.objectid != 6119 btrfs_header_owner(path->nodes[level + 1])); 6120 } 6121 6122 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); 6123 out: 6124 wc->refs[level] = 0; 6125 wc->flags[level] = 0; 6126 return 0; 6127 } 6128 6129 static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 6130 struct btrfs_root *root, 6131 struct btrfs_path *path, 6132 struct walk_control *wc) 6133 { 6134 int level = wc->level; 6135 int lookup_info = 1; 6136 int ret; 6137 6138 while (level >= 0) { 6139 ret = walk_down_proc(trans, root, path, wc, lookup_info); 6140 if (ret > 0) 6141 break; 6142 6143 if (level == 0) 6144 break; 6145 6146 if (path->slots[level] >= 6147 btrfs_header_nritems(path->nodes[level])) 6148 break; 6149 6150 ret = do_walk_down(trans, root, path, wc, &lookup_info); 6151 if (ret > 0) { 6152 path->slots[level]++; 6153 continue; 6154 } else if (ret < 0) 6155 return ret; 6156 level = wc->level; 6157 } 6158 return 0; 6159 } 6160 6161 static noinline int walk_up_tree(struct btrfs_trans_handle *trans, 6162 struct btrfs_root *root, 6163 struct btrfs_path *path, 6164 struct walk_control *wc, int max_level) 6165 { 6166 int level = wc->level; 6167 int ret; 6168 6169 path->slots[level] = btrfs_header_nritems(path->nodes[level]); 6170 while (level < max_level && path->nodes[level]) { 6171 wc->level = level; 6172 if (path->slots[level] + 1 < 6173 btrfs_header_nritems(path->nodes[level])) { 6174 path->slots[level]++; 6175 return 0; 6176 } else { 6177 ret = walk_up_proc(trans, root, path, wc); 6178 if (ret > 0) 6179 return 0; 6180 6181 if (path->locks[level]) { 6182 btrfs_tree_unlock(path->nodes[level]); 6183 path->locks[level] = 0; 6184 } 6185 free_extent_buffer(path->nodes[level]); 6186 path->nodes[level] = NULL; 6187 level++; 6188 } 6189 } 6190 return 1; 6191 } 6192 6193 /* 6194 * drop a subvolume tree. 6195 * 6196 * this function traverses the tree freeing any blocks that only 6197 * referenced by the tree. 6198 * 6199 * when a shared tree block is found. this function decreases its 6200 * reference count by one. if update_ref is true, this function 6201 * also make sure backrefs for the shared block and all lower level 6202 * blocks are properly updated. 6203 */ 6204 int btrfs_drop_snapshot(struct btrfs_root *root, 6205 struct btrfs_block_rsv *block_rsv, int update_ref) 6206 { 6207 struct btrfs_path *path; 6208 struct btrfs_trans_handle *trans; 6209 struct btrfs_root *tree_root = root->fs_info->tree_root; 6210 struct btrfs_root_item *root_item = &root->root_item; 6211 struct walk_control *wc; 6212 struct btrfs_key key; 6213 int err = 0; 6214 int ret; 6215 int level; 6216 6217 path = btrfs_alloc_path(); 6218 BUG_ON(!path); 6219 6220 wc = kzalloc(sizeof(*wc), GFP_NOFS); 6221 BUG_ON(!wc); 6222 6223 trans = btrfs_start_transaction(tree_root, 0); 6224 if (block_rsv) 6225 trans->block_rsv = block_rsv; 6226 6227 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 6228 level = btrfs_header_level(root->node); 6229 path->nodes[level] = btrfs_lock_root_node(root); 6230 btrfs_set_lock_blocking(path->nodes[level]); 6231 path->slots[level] = 0; 6232 path->locks[level] = 1; 6233 memset(&wc->update_progress, 0, 6234 sizeof(wc->update_progress)); 6235 } else { 6236 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); 6237 memcpy(&wc->update_progress, &key, 6238 sizeof(wc->update_progress)); 6239 6240 level = root_item->drop_level; 6241 BUG_ON(level == 0); 6242 path->lowest_level = level; 6243 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6244 path->lowest_level = 0; 6245 if (ret < 0) { 6246 err = ret; 6247 goto out; 6248 } 6249 WARN_ON(ret > 0); 6250 6251 /* 6252 * unlock our path, this is safe because only this 6253 * function is allowed to delete this snapshot 6254 */ 6255 btrfs_unlock_up_safe(path, 0); 6256 6257 level = btrfs_header_level(root->node); 6258 while (1) { 6259 btrfs_tree_lock(path->nodes[level]); 6260 btrfs_set_lock_blocking(path->nodes[level]); 6261 6262 ret = btrfs_lookup_extent_info(trans, root, 6263 path->nodes[level]->start, 6264 path->nodes[level]->len, 6265 &wc->refs[level], 6266 &wc->flags[level]); 6267 BUG_ON(ret); 6268 BUG_ON(wc->refs[level] == 0); 6269 6270 if (level == root_item->drop_level) 6271 break; 6272 6273 btrfs_tree_unlock(path->nodes[level]); 6274 WARN_ON(wc->refs[level] != 1); 6275 level--; 6276 } 6277 } 6278 6279 wc->level = level; 6280 wc->shared_level = -1; 6281 wc->stage = DROP_REFERENCE; 6282 wc->update_ref = update_ref; 6283 wc->keep_locks = 0; 6284 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); 6285 6286 while (1) { 6287 ret = walk_down_tree(trans, root, path, wc); 6288 if (ret < 0) { 6289 err = ret; 6290 break; 6291 } 6292 6293 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); 6294 if (ret < 0) { 6295 err = ret; 6296 break; 6297 } 6298 6299 if (ret > 0) { 6300 BUG_ON(wc->stage != DROP_REFERENCE); 6301 break; 6302 } 6303 6304 if (wc->stage == DROP_REFERENCE) { 6305 level = wc->level; 6306 btrfs_node_key(path->nodes[level], 6307 &root_item->drop_progress, 6308 path->slots[level]); 6309 root_item->drop_level = level; 6310 } 6311 6312 BUG_ON(wc->level == 0); 6313 if (btrfs_should_end_transaction(trans, tree_root)) { 6314 ret = btrfs_update_root(trans, tree_root, 6315 &root->root_key, 6316 root_item); 6317 BUG_ON(ret); 6318 6319 btrfs_end_transaction_throttle(trans, tree_root); 6320 trans = btrfs_start_transaction(tree_root, 0); 6321 if (block_rsv) 6322 trans->block_rsv = block_rsv; 6323 } 6324 } 6325 btrfs_release_path(root, path); 6326 BUG_ON(err); 6327 6328 ret = btrfs_del_root(trans, tree_root, &root->root_key); 6329 BUG_ON(ret); 6330 6331 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 6332 ret = btrfs_find_last_root(tree_root, root->root_key.objectid, 6333 NULL, NULL); 6334 BUG_ON(ret < 0); 6335 if (ret > 0) { 6336 /* if we fail to delete the orphan item this time 6337 * around, it'll get picked up the next time. 6338 * 6339 * The most common failure here is just -ENOENT. 6340 */ 6341 btrfs_del_orphan_item(trans, tree_root, 6342 root->root_key.objectid); 6343 } 6344 } 6345 6346 if (root->in_radix) { 6347 btrfs_free_fs_root(tree_root->fs_info, root); 6348 } else { 6349 free_extent_buffer(root->node); 6350 free_extent_buffer(root->commit_root); 6351 kfree(root); 6352 } 6353 out: 6354 btrfs_end_transaction_throttle(trans, tree_root); 6355 kfree(wc); 6356 btrfs_free_path(path); 6357 return err; 6358 } 6359 6360 /* 6361 * drop subtree rooted at tree block 'node'. 6362 * 6363 * NOTE: this function will unlock and release tree block 'node' 6364 */ 6365 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 6366 struct btrfs_root *root, 6367 struct extent_buffer *node, 6368 struct extent_buffer *parent) 6369 { 6370 struct btrfs_path *path; 6371 struct walk_control *wc; 6372 int level; 6373 int parent_level; 6374 int ret = 0; 6375 int wret; 6376 6377 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 6378 6379 path = btrfs_alloc_path(); 6380 BUG_ON(!path); 6381 6382 wc = kzalloc(sizeof(*wc), GFP_NOFS); 6383 BUG_ON(!wc); 6384 6385 btrfs_assert_tree_locked(parent); 6386 parent_level = btrfs_header_level(parent); 6387 extent_buffer_get(parent); 6388 path->nodes[parent_level] = parent; 6389 path->slots[parent_level] = btrfs_header_nritems(parent); 6390 6391 btrfs_assert_tree_locked(node); 6392 level = btrfs_header_level(node); 6393 path->nodes[level] = node; 6394 path->slots[level] = 0; 6395 path->locks[level] = 1; 6396 6397 wc->refs[parent_level] = 1; 6398 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; 6399 wc->level = level; 6400 wc->shared_level = -1; 6401 wc->stage = DROP_REFERENCE; 6402 wc->update_ref = 0; 6403 wc->keep_locks = 1; 6404 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); 6405 6406 while (1) { 6407 wret = walk_down_tree(trans, root, path, wc); 6408 if (wret < 0) { 6409 ret = wret; 6410 break; 6411 } 6412 6413 wret = walk_up_tree(trans, root, path, wc, parent_level); 6414 if (wret < 0) 6415 ret = wret; 6416 if (wret != 0) 6417 break; 6418 } 6419 6420 kfree(wc); 6421 btrfs_free_path(path); 6422 return ret; 6423 } 6424 6425 #if 0 6426 static unsigned long calc_ra(unsigned long start, unsigned long last, 6427 unsigned long nr) 6428 { 6429 return min(last, start + nr - 1); 6430 } 6431 6432 static noinline int relocate_inode_pages(struct inode *inode, u64 start, 6433 u64 len) 6434 { 6435 u64 page_start; 6436 u64 page_end; 6437 unsigned long first_index; 6438 unsigned long last_index; 6439 unsigned long i; 6440 struct page *page; 6441 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 6442 struct file_ra_state *ra; 6443 struct btrfs_ordered_extent *ordered; 6444 unsigned int total_read = 0; 6445 unsigned int total_dirty = 0; 6446 int ret = 0; 6447 6448 ra = kzalloc(sizeof(*ra), GFP_NOFS); 6449 6450 mutex_lock(&inode->i_mutex); 6451 first_index = start >> PAGE_CACHE_SHIFT; 6452 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; 6453 6454 /* make sure the dirty trick played by the caller work */ 6455 ret = invalidate_inode_pages2_range(inode->i_mapping, 6456 first_index, last_index); 6457 if (ret) 6458 goto out_unlock; 6459 6460 file_ra_state_init(ra, inode->i_mapping); 6461 6462 for (i = first_index ; i <= last_index; i++) { 6463 if (total_read % ra->ra_pages == 0) { 6464 btrfs_force_ra(inode->i_mapping, ra, NULL, i, 6465 calc_ra(i, last_index, ra->ra_pages)); 6466 } 6467 total_read++; 6468 again: 6469 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode)) 6470 BUG_ON(1); 6471 page = grab_cache_page(inode->i_mapping, i); 6472 if (!page) { 6473 ret = -ENOMEM; 6474 goto out_unlock; 6475 } 6476 if (!PageUptodate(page)) { 6477 btrfs_readpage(NULL, page); 6478 lock_page(page); 6479 if (!PageUptodate(page)) { 6480 unlock_page(page); 6481 page_cache_release(page); 6482 ret = -EIO; 6483 goto out_unlock; 6484 } 6485 } 6486 wait_on_page_writeback(page); 6487 6488 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 6489 page_end = page_start + PAGE_CACHE_SIZE - 1; 6490 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 6491 6492 ordered = btrfs_lookup_ordered_extent(inode, page_start); 6493 if (ordered) { 6494 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 6495 unlock_page(page); 6496 page_cache_release(page); 6497 btrfs_start_ordered_extent(inode, ordered, 1); 6498 btrfs_put_ordered_extent(ordered); 6499 goto again; 6500 } 6501 set_page_extent_mapped(page); 6502 6503 if (i == first_index) 6504 set_extent_bits(io_tree, page_start, page_end, 6505 EXTENT_BOUNDARY, GFP_NOFS); 6506 btrfs_set_extent_delalloc(inode, page_start, page_end); 6507 6508 set_page_dirty(page); 6509 total_dirty++; 6510 6511 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 6512 unlock_page(page); 6513 page_cache_release(page); 6514 } 6515 6516 out_unlock: 6517 kfree(ra); 6518 mutex_unlock(&inode->i_mutex); 6519 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty); 6520 return ret; 6521 } 6522 6523 static noinline int relocate_data_extent(struct inode *reloc_inode, 6524 struct btrfs_key *extent_key, 6525 u64 offset) 6526 { 6527 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 6528 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree; 6529 struct extent_map *em; 6530 u64 start = extent_key->objectid - offset; 6531 u64 end = start + extent_key->offset - 1; 6532 6533 em = alloc_extent_map(GFP_NOFS); 6534 BUG_ON(!em || IS_ERR(em)); 6535 6536 em->start = start; 6537 em->len = extent_key->offset; 6538 em->block_len = extent_key->offset; 6539 em->block_start = extent_key->objectid; 6540 em->bdev = root->fs_info->fs_devices->latest_bdev; 6541 set_bit(EXTENT_FLAG_PINNED, &em->flags); 6542 6543 /* setup extent map to cheat btrfs_readpage */ 6544 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); 6545 while (1) { 6546 int ret; 6547 write_lock(&em_tree->lock); 6548 ret = add_extent_mapping(em_tree, em); 6549 write_unlock(&em_tree->lock); 6550 if (ret != -EEXIST) { 6551 free_extent_map(em); 6552 break; 6553 } 6554 btrfs_drop_extent_cache(reloc_inode, start, end, 0); 6555 } 6556 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); 6557 6558 return relocate_inode_pages(reloc_inode, start, extent_key->offset); 6559 } 6560 6561 struct btrfs_ref_path { 6562 u64 extent_start; 6563 u64 nodes[BTRFS_MAX_LEVEL]; 6564 u64 root_objectid; 6565 u64 root_generation; 6566 u64 owner_objectid; 6567 u32 num_refs; 6568 int lowest_level; 6569 int current_level; 6570 int shared_level; 6571 6572 struct btrfs_key node_keys[BTRFS_MAX_LEVEL]; 6573 u64 new_nodes[BTRFS_MAX_LEVEL]; 6574 }; 6575 6576 struct disk_extent { 6577 u64 ram_bytes; 6578 u64 disk_bytenr; 6579 u64 disk_num_bytes; 6580 u64 offset; 6581 u64 num_bytes; 6582 u8 compression; 6583 u8 encryption; 6584 u16 other_encoding; 6585 }; 6586 6587 static int is_cowonly_root(u64 root_objectid) 6588 { 6589 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || 6590 root_objectid == BTRFS_EXTENT_TREE_OBJECTID || 6591 root_objectid == BTRFS_CHUNK_TREE_OBJECTID || 6592 root_objectid == BTRFS_DEV_TREE_OBJECTID || 6593 root_objectid == BTRFS_TREE_LOG_OBJECTID || 6594 root_objectid == BTRFS_CSUM_TREE_OBJECTID) 6595 return 1; 6596 return 0; 6597 } 6598 6599 static noinline int __next_ref_path(struct btrfs_trans_handle *trans, 6600 struct btrfs_root *extent_root, 6601 struct btrfs_ref_path *ref_path, 6602 int first_time) 6603 { 6604 struct extent_buffer *leaf; 6605 struct btrfs_path *path; 6606 struct btrfs_extent_ref *ref; 6607 struct btrfs_key key; 6608 struct btrfs_key found_key; 6609 u64 bytenr; 6610 u32 nritems; 6611 int level; 6612 int ret = 1; 6613 6614 path = btrfs_alloc_path(); 6615 if (!path) 6616 return -ENOMEM; 6617 6618 if (first_time) { 6619 ref_path->lowest_level = -1; 6620 ref_path->current_level = -1; 6621 ref_path->shared_level = -1; 6622 goto walk_up; 6623 } 6624 walk_down: 6625 level = ref_path->current_level - 1; 6626 while (level >= -1) { 6627 u64 parent; 6628 if (level < ref_path->lowest_level) 6629 break; 6630 6631 if (level >= 0) 6632 bytenr = ref_path->nodes[level]; 6633 else 6634 bytenr = ref_path->extent_start; 6635 BUG_ON(bytenr == 0); 6636 6637 parent = ref_path->nodes[level + 1]; 6638 ref_path->nodes[level + 1] = 0; 6639 ref_path->current_level = level; 6640 BUG_ON(parent == 0); 6641 6642 key.objectid = bytenr; 6643 key.offset = parent + 1; 6644 key.type = BTRFS_EXTENT_REF_KEY; 6645 6646 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); 6647 if (ret < 0) 6648 goto out; 6649 BUG_ON(ret == 0); 6650 6651 leaf = path->nodes[0]; 6652 nritems = btrfs_header_nritems(leaf); 6653 if (path->slots[0] >= nritems) { 6654 ret = btrfs_next_leaf(extent_root, path); 6655 if (ret < 0) 6656 goto out; 6657 if (ret > 0) 6658 goto next; 6659 leaf = path->nodes[0]; 6660 } 6661 6662 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6663 if (found_key.objectid == bytenr && 6664 found_key.type == BTRFS_EXTENT_REF_KEY) { 6665 if (level < ref_path->shared_level) 6666 ref_path->shared_level = level; 6667 goto found; 6668 } 6669 next: 6670 level--; 6671 btrfs_release_path(extent_root, path); 6672 cond_resched(); 6673 } 6674 /* reached lowest level */ 6675 ret = 1; 6676 goto out; 6677 walk_up: 6678 level = ref_path->current_level; 6679 while (level < BTRFS_MAX_LEVEL - 1) { 6680 u64 ref_objectid; 6681 6682 if (level >= 0) 6683 bytenr = ref_path->nodes[level]; 6684 else 6685 bytenr = ref_path->extent_start; 6686 6687 BUG_ON(bytenr == 0); 6688 6689 key.objectid = bytenr; 6690 key.offset = 0; 6691 key.type = BTRFS_EXTENT_REF_KEY; 6692 6693 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); 6694 if (ret < 0) 6695 goto out; 6696 6697 leaf = path->nodes[0]; 6698 nritems = btrfs_header_nritems(leaf); 6699 if (path->slots[0] >= nritems) { 6700 ret = btrfs_next_leaf(extent_root, path); 6701 if (ret < 0) 6702 goto out; 6703 if (ret > 0) { 6704 /* the extent was freed by someone */ 6705 if (ref_path->lowest_level == level) 6706 goto out; 6707 btrfs_release_path(extent_root, path); 6708 goto walk_down; 6709 } 6710 leaf = path->nodes[0]; 6711 } 6712 6713 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6714 if (found_key.objectid != bytenr || 6715 found_key.type != BTRFS_EXTENT_REF_KEY) { 6716 /* the extent was freed by someone */ 6717 if (ref_path->lowest_level == level) { 6718 ret = 1; 6719 goto out; 6720 } 6721 btrfs_release_path(extent_root, path); 6722 goto walk_down; 6723 } 6724 found: 6725 ref = btrfs_item_ptr(leaf, path->slots[0], 6726 struct btrfs_extent_ref); 6727 ref_objectid = btrfs_ref_objectid(leaf, ref); 6728 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) { 6729 if (first_time) { 6730 level = (int)ref_objectid; 6731 BUG_ON(level >= BTRFS_MAX_LEVEL); 6732 ref_path->lowest_level = level; 6733 ref_path->current_level = level; 6734 ref_path->nodes[level] = bytenr; 6735 } else { 6736 WARN_ON(ref_objectid != level); 6737 } 6738 } else { 6739 WARN_ON(level != -1); 6740 } 6741 first_time = 0; 6742 6743 if (ref_path->lowest_level == level) { 6744 ref_path->owner_objectid = ref_objectid; 6745 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref); 6746 } 6747 6748 /* 6749 * the block is tree root or the block isn't in reference 6750 * counted tree. 6751 */ 6752 if (found_key.objectid == found_key.offset || 6753 is_cowonly_root(btrfs_ref_root(leaf, ref))) { 6754 ref_path->root_objectid = btrfs_ref_root(leaf, ref); 6755 ref_path->root_generation = 6756 btrfs_ref_generation(leaf, ref); 6757 if (level < 0) { 6758 /* special reference from the tree log */ 6759 ref_path->nodes[0] = found_key.offset; 6760 ref_path->current_level = 0; 6761 } 6762 ret = 0; 6763 goto out; 6764 } 6765 6766 level++; 6767 BUG_ON(ref_path->nodes[level] != 0); 6768 ref_path->nodes[level] = found_key.offset; 6769 ref_path->current_level = level; 6770 6771 /* 6772 * the reference was created in the running transaction, 6773 * no need to continue walking up. 6774 */ 6775 if (btrfs_ref_generation(leaf, ref) == trans->transid) { 6776 ref_path->root_objectid = btrfs_ref_root(leaf, ref); 6777 ref_path->root_generation = 6778 btrfs_ref_generation(leaf, ref); 6779 ret = 0; 6780 goto out; 6781 } 6782 6783 btrfs_release_path(extent_root, path); 6784 cond_resched(); 6785 } 6786 /* reached max tree level, but no tree root found. */ 6787 BUG(); 6788 out: 6789 btrfs_free_path(path); 6790 return ret; 6791 } 6792 6793 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans, 6794 struct btrfs_root *extent_root, 6795 struct btrfs_ref_path *ref_path, 6796 u64 extent_start) 6797 { 6798 memset(ref_path, 0, sizeof(*ref_path)); 6799 ref_path->extent_start = extent_start; 6800 6801 return __next_ref_path(trans, extent_root, ref_path, 1); 6802 } 6803 6804 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, 6805 struct btrfs_root *extent_root, 6806 struct btrfs_ref_path *ref_path) 6807 { 6808 return __next_ref_path(trans, extent_root, ref_path, 0); 6809 } 6810 6811 static noinline int get_new_locations(struct inode *reloc_inode, 6812 struct btrfs_key *extent_key, 6813 u64 offset, int no_fragment, 6814 struct disk_extent **extents, 6815 int *nr_extents) 6816 { 6817 struct btrfs_root *root = BTRFS_I(reloc_inode)->root; 6818 struct btrfs_path *path; 6819 struct btrfs_file_extent_item *fi; 6820 struct extent_buffer *leaf; 6821 struct disk_extent *exts = *extents; 6822 struct btrfs_key found_key; 6823 u64 cur_pos; 6824 u64 last_byte; 6825 u32 nritems; 6826 int nr = 0; 6827 int max = *nr_extents; 6828 int ret; 6829 6830 WARN_ON(!no_fragment && *extents); 6831 if (!exts) { 6832 max = 1; 6833 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS); 6834 if (!exts) 6835 return -ENOMEM; 6836 } 6837 6838 path = btrfs_alloc_path(); 6839 BUG_ON(!path); 6840 6841 cur_pos = extent_key->objectid - offset; 6842 last_byte = extent_key->objectid + extent_key->offset; 6843 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, 6844 cur_pos, 0); 6845 if (ret < 0) 6846 goto out; 6847 if (ret > 0) { 6848 ret = -ENOENT; 6849 goto out; 6850 } 6851 6852 while (1) { 6853 leaf = path->nodes[0]; 6854 nritems = btrfs_header_nritems(leaf); 6855 if (path->slots[0] >= nritems) { 6856 ret = btrfs_next_leaf(root, path); 6857 if (ret < 0) 6858 goto out; 6859 if (ret > 0) 6860 break; 6861 leaf = path->nodes[0]; 6862 } 6863 6864 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6865 if (found_key.offset != cur_pos || 6866 found_key.type != BTRFS_EXTENT_DATA_KEY || 6867 found_key.objectid != reloc_inode->i_ino) 6868 break; 6869 6870 fi = btrfs_item_ptr(leaf, path->slots[0], 6871 struct btrfs_file_extent_item); 6872 if (btrfs_file_extent_type(leaf, fi) != 6873 BTRFS_FILE_EXTENT_REG || 6874 btrfs_file_extent_disk_bytenr(leaf, fi) == 0) 6875 break; 6876 6877 if (nr == max) { 6878 struct disk_extent *old = exts; 6879 max *= 2; 6880 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); 6881 memcpy(exts, old, sizeof(*exts) * nr); 6882 if (old != *extents) 6883 kfree(old); 6884 } 6885 6886 exts[nr].disk_bytenr = 6887 btrfs_file_extent_disk_bytenr(leaf, fi); 6888 exts[nr].disk_num_bytes = 6889 btrfs_file_extent_disk_num_bytes(leaf, fi); 6890 exts[nr].offset = btrfs_file_extent_offset(leaf, fi); 6891 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 6892 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 6893 exts[nr].compression = btrfs_file_extent_compression(leaf, fi); 6894 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); 6895 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, 6896 fi); 6897 BUG_ON(exts[nr].offset > 0); 6898 BUG_ON(exts[nr].compression || exts[nr].encryption); 6899 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); 6900 6901 cur_pos += exts[nr].num_bytes; 6902 nr++; 6903 6904 if (cur_pos + offset >= last_byte) 6905 break; 6906 6907 if (no_fragment) { 6908 ret = 1; 6909 goto out; 6910 } 6911 path->slots[0]++; 6912 } 6913 6914 BUG_ON(cur_pos + offset > last_byte); 6915 if (cur_pos + offset < last_byte) { 6916 ret = -ENOENT; 6917 goto out; 6918 } 6919 ret = 0; 6920 out: 6921 btrfs_free_path(path); 6922 if (ret) { 6923 if (exts != *extents) 6924 kfree(exts); 6925 } else { 6926 *extents = exts; 6927 *nr_extents = nr; 6928 } 6929 return ret; 6930 } 6931 6932 static noinline int replace_one_extent(struct btrfs_trans_handle *trans, 6933 struct btrfs_root *root, 6934 struct btrfs_path *path, 6935 struct btrfs_key *extent_key, 6936 struct btrfs_key *leaf_key, 6937 struct btrfs_ref_path *ref_path, 6938 struct disk_extent *new_extents, 6939 int nr_extents) 6940 { 6941 struct extent_buffer *leaf; 6942 struct btrfs_file_extent_item *fi; 6943 struct inode *inode = NULL; 6944 struct btrfs_key key; 6945 u64 lock_start = 0; 6946 u64 lock_end = 0; 6947 u64 num_bytes; 6948 u64 ext_offset; 6949 u64 search_end = (u64)-1; 6950 u32 nritems; 6951 int nr_scaned = 0; 6952 int extent_locked = 0; 6953 int extent_type; 6954 int ret; 6955 6956 memcpy(&key, leaf_key, sizeof(key)); 6957 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { 6958 if (key.objectid < ref_path->owner_objectid || 6959 (key.objectid == ref_path->owner_objectid && 6960 key.type < BTRFS_EXTENT_DATA_KEY)) { 6961 key.objectid = ref_path->owner_objectid; 6962 key.type = BTRFS_EXTENT_DATA_KEY; 6963 key.offset = 0; 6964 } 6965 } 6966 6967 while (1) { 6968 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 6969 if (ret < 0) 6970 goto out; 6971 6972 leaf = path->nodes[0]; 6973 nritems = btrfs_header_nritems(leaf); 6974 next: 6975 if (extent_locked && ret > 0) { 6976 /* 6977 * the file extent item was modified by someone 6978 * before the extent got locked. 6979 */ 6980 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, 6981 lock_end, GFP_NOFS); 6982 extent_locked = 0; 6983 } 6984 6985 if (path->slots[0] >= nritems) { 6986 if (++nr_scaned > 2) 6987 break; 6988 6989 BUG_ON(extent_locked); 6990 ret = btrfs_next_leaf(root, path); 6991 if (ret < 0) 6992 goto out; 6993 if (ret > 0) 6994 break; 6995 leaf = path->nodes[0]; 6996 nritems = btrfs_header_nritems(leaf); 6997 } 6998 6999 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 7000 7001 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { 7002 if ((key.objectid > ref_path->owner_objectid) || 7003 (key.objectid == ref_path->owner_objectid && 7004 key.type > BTRFS_EXTENT_DATA_KEY) || 7005 key.offset >= search_end) 7006 break; 7007 } 7008 7009 if (inode && key.objectid != inode->i_ino) { 7010 BUG_ON(extent_locked); 7011 btrfs_release_path(root, path); 7012 mutex_unlock(&inode->i_mutex); 7013 iput(inode); 7014 inode = NULL; 7015 continue; 7016 } 7017 7018 if (key.type != BTRFS_EXTENT_DATA_KEY) { 7019 path->slots[0]++; 7020 ret = 1; 7021 goto next; 7022 } 7023 fi = btrfs_item_ptr(leaf, path->slots[0], 7024 struct btrfs_file_extent_item); 7025 extent_type = btrfs_file_extent_type(leaf, fi); 7026 if ((extent_type != BTRFS_FILE_EXTENT_REG && 7027 extent_type != BTRFS_FILE_EXTENT_PREALLOC) || 7028 (btrfs_file_extent_disk_bytenr(leaf, fi) != 7029 extent_key->objectid)) { 7030 path->slots[0]++; 7031 ret = 1; 7032 goto next; 7033 } 7034 7035 num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 7036 ext_offset = btrfs_file_extent_offset(leaf, fi); 7037 7038 if (search_end == (u64)-1) { 7039 search_end = key.offset - ext_offset + 7040 btrfs_file_extent_ram_bytes(leaf, fi); 7041 } 7042 7043 if (!extent_locked) { 7044 lock_start = key.offset; 7045 lock_end = lock_start + num_bytes - 1; 7046 } else { 7047 if (lock_start > key.offset || 7048 lock_end + 1 < key.offset + num_bytes) { 7049 unlock_extent(&BTRFS_I(inode)->io_tree, 7050 lock_start, lock_end, GFP_NOFS); 7051 extent_locked = 0; 7052 } 7053 } 7054 7055 if (!inode) { 7056 btrfs_release_path(root, path); 7057 7058 inode = btrfs_iget_locked(root->fs_info->sb, 7059 key.objectid, root); 7060 if (inode->i_state & I_NEW) { 7061 BTRFS_I(inode)->root = root; 7062 BTRFS_I(inode)->location.objectid = 7063 key.objectid; 7064 BTRFS_I(inode)->location.type = 7065 BTRFS_INODE_ITEM_KEY; 7066 BTRFS_I(inode)->location.offset = 0; 7067 btrfs_read_locked_inode(inode); 7068 unlock_new_inode(inode); 7069 } 7070 /* 7071 * some code call btrfs_commit_transaction while 7072 * holding the i_mutex, so we can't use mutex_lock 7073 * here. 7074 */ 7075 if (is_bad_inode(inode) || 7076 !mutex_trylock(&inode->i_mutex)) { 7077 iput(inode); 7078 inode = NULL; 7079 key.offset = (u64)-1; 7080 goto skip; 7081 } 7082 } 7083 7084 if (!extent_locked) { 7085 struct btrfs_ordered_extent *ordered; 7086 7087 btrfs_release_path(root, path); 7088 7089 lock_extent(&BTRFS_I(inode)->io_tree, lock_start, 7090 lock_end, GFP_NOFS); 7091 ordered = btrfs_lookup_first_ordered_extent(inode, 7092 lock_end); 7093 if (ordered && 7094 ordered->file_offset <= lock_end && 7095 ordered->file_offset + ordered->len > lock_start) { 7096 unlock_extent(&BTRFS_I(inode)->io_tree, 7097 lock_start, lock_end, GFP_NOFS); 7098 btrfs_start_ordered_extent(inode, ordered, 1); 7099 btrfs_put_ordered_extent(ordered); 7100 key.offset += num_bytes; 7101 goto skip; 7102 } 7103 if (ordered) 7104 btrfs_put_ordered_extent(ordered); 7105 7106 extent_locked = 1; 7107 continue; 7108 } 7109 7110 if (nr_extents == 1) { 7111 /* update extent pointer in place */ 7112 btrfs_set_file_extent_disk_bytenr(leaf, fi, 7113 new_extents[0].disk_bytenr); 7114 btrfs_set_file_extent_disk_num_bytes(leaf, fi, 7115 new_extents[0].disk_num_bytes); 7116 btrfs_mark_buffer_dirty(leaf); 7117 7118 btrfs_drop_extent_cache(inode, key.offset, 7119 key.offset + num_bytes - 1, 0); 7120 7121 ret = btrfs_inc_extent_ref(trans, root, 7122 new_extents[0].disk_bytenr, 7123 new_extents[0].disk_num_bytes, 7124 leaf->start, 7125 root->root_key.objectid, 7126 trans->transid, 7127 key.objectid); 7128 BUG_ON(ret); 7129 7130 ret = btrfs_free_extent(trans, root, 7131 extent_key->objectid, 7132 extent_key->offset, 7133 leaf->start, 7134 btrfs_header_owner(leaf), 7135 btrfs_header_generation(leaf), 7136 key.objectid, 0); 7137 BUG_ON(ret); 7138 7139 btrfs_release_path(root, path); 7140 key.offset += num_bytes; 7141 } else { 7142 BUG_ON(1); 7143 #if 0 7144 u64 alloc_hint; 7145 u64 extent_len; 7146 int i; 7147 /* 7148 * drop old extent pointer at first, then insert the 7149 * new pointers one bye one 7150 */ 7151 btrfs_release_path(root, path); 7152 ret = btrfs_drop_extents(trans, root, inode, key.offset, 7153 key.offset + num_bytes, 7154 key.offset, &alloc_hint); 7155 BUG_ON(ret); 7156 7157 for (i = 0; i < nr_extents; i++) { 7158 if (ext_offset >= new_extents[i].num_bytes) { 7159 ext_offset -= new_extents[i].num_bytes; 7160 continue; 7161 } 7162 extent_len = min(new_extents[i].num_bytes - 7163 ext_offset, num_bytes); 7164 7165 ret = btrfs_insert_empty_item(trans, root, 7166 path, &key, 7167 sizeof(*fi)); 7168 BUG_ON(ret); 7169 7170 leaf = path->nodes[0]; 7171 fi = btrfs_item_ptr(leaf, path->slots[0], 7172 struct btrfs_file_extent_item); 7173 btrfs_set_file_extent_generation(leaf, fi, 7174 trans->transid); 7175 btrfs_set_file_extent_type(leaf, fi, 7176 BTRFS_FILE_EXTENT_REG); 7177 btrfs_set_file_extent_disk_bytenr(leaf, fi, 7178 new_extents[i].disk_bytenr); 7179 btrfs_set_file_extent_disk_num_bytes(leaf, fi, 7180 new_extents[i].disk_num_bytes); 7181 btrfs_set_file_extent_ram_bytes(leaf, fi, 7182 new_extents[i].ram_bytes); 7183 7184 btrfs_set_file_extent_compression(leaf, fi, 7185 new_extents[i].compression); 7186 btrfs_set_file_extent_encryption(leaf, fi, 7187 new_extents[i].encryption); 7188 btrfs_set_file_extent_other_encoding(leaf, fi, 7189 new_extents[i].other_encoding); 7190 7191 btrfs_set_file_extent_num_bytes(leaf, fi, 7192 extent_len); 7193 ext_offset += new_extents[i].offset; 7194 btrfs_set_file_extent_offset(leaf, fi, 7195 ext_offset); 7196 btrfs_mark_buffer_dirty(leaf); 7197 7198 btrfs_drop_extent_cache(inode, key.offset, 7199 key.offset + extent_len - 1, 0); 7200 7201 ret = btrfs_inc_extent_ref(trans, root, 7202 new_extents[i].disk_bytenr, 7203 new_extents[i].disk_num_bytes, 7204 leaf->start, 7205 root->root_key.objectid, 7206 trans->transid, key.objectid); 7207 BUG_ON(ret); 7208 btrfs_release_path(root, path); 7209 7210 inode_add_bytes(inode, extent_len); 7211 7212 ext_offset = 0; 7213 num_bytes -= extent_len; 7214 key.offset += extent_len; 7215 7216 if (num_bytes == 0) 7217 break; 7218 } 7219 BUG_ON(i >= nr_extents); 7220 #endif 7221 } 7222 7223 if (extent_locked) { 7224 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, 7225 lock_end, GFP_NOFS); 7226 extent_locked = 0; 7227 } 7228 skip: 7229 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && 7230 key.offset >= search_end) 7231 break; 7232 7233 cond_resched(); 7234 } 7235 ret = 0; 7236 out: 7237 btrfs_release_path(root, path); 7238 if (inode) { 7239 mutex_unlock(&inode->i_mutex); 7240 if (extent_locked) { 7241 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, 7242 lock_end, GFP_NOFS); 7243 } 7244 iput(inode); 7245 } 7246 return ret; 7247 } 7248 7249 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, 7250 struct btrfs_root *root, 7251 struct extent_buffer *buf, u64 orig_start) 7252 { 7253 int level; 7254 int ret; 7255 7256 BUG_ON(btrfs_header_generation(buf) != trans->transid); 7257 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 7258 7259 level = btrfs_header_level(buf); 7260 if (level == 0) { 7261 struct btrfs_leaf_ref *ref; 7262 struct btrfs_leaf_ref *orig_ref; 7263 7264 orig_ref = btrfs_lookup_leaf_ref(root, orig_start); 7265 if (!orig_ref) 7266 return -ENOENT; 7267 7268 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems); 7269 if (!ref) { 7270 btrfs_free_leaf_ref(root, orig_ref); 7271 return -ENOMEM; 7272 } 7273 7274 ref->nritems = orig_ref->nritems; 7275 memcpy(ref->extents, orig_ref->extents, 7276 sizeof(ref->extents[0]) * ref->nritems); 7277 7278 btrfs_free_leaf_ref(root, orig_ref); 7279 7280 ref->root_gen = trans->transid; 7281 ref->bytenr = buf->start; 7282 ref->owner = btrfs_header_owner(buf); 7283 ref->generation = btrfs_header_generation(buf); 7284 7285 ret = btrfs_add_leaf_ref(root, ref, 0); 7286 WARN_ON(ret); 7287 btrfs_free_leaf_ref(root, ref); 7288 } 7289 return 0; 7290 } 7291 7292 static noinline int invalidate_extent_cache(struct btrfs_root *root, 7293 struct extent_buffer *leaf, 7294 struct btrfs_block_group_cache *group, 7295 struct btrfs_root *target_root) 7296 { 7297 struct btrfs_key key; 7298 struct inode *inode = NULL; 7299 struct btrfs_file_extent_item *fi; 7300 struct extent_state *cached_state = NULL; 7301 u64 num_bytes; 7302 u64 skip_objectid = 0; 7303 u32 nritems; 7304 u32 i; 7305 7306 nritems = btrfs_header_nritems(leaf); 7307 for (i = 0; i < nritems; i++) { 7308 btrfs_item_key_to_cpu(leaf, &key, i); 7309 if (key.objectid == skip_objectid || 7310 key.type != BTRFS_EXTENT_DATA_KEY) 7311 continue; 7312 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 7313 if (btrfs_file_extent_type(leaf, fi) == 7314 BTRFS_FILE_EXTENT_INLINE) 7315 continue; 7316 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) 7317 continue; 7318 if (!inode || inode->i_ino != key.objectid) { 7319 iput(inode); 7320 inode = btrfs_ilookup(target_root->fs_info->sb, 7321 key.objectid, target_root, 1); 7322 } 7323 if (!inode) { 7324 skip_objectid = key.objectid; 7325 continue; 7326 } 7327 num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 7328 7329 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, 7330 key.offset + num_bytes - 1, 0, &cached_state, 7331 GFP_NOFS); 7332 btrfs_drop_extent_cache(inode, key.offset, 7333 key.offset + num_bytes - 1, 1); 7334 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, 7335 key.offset + num_bytes - 1, &cached_state, 7336 GFP_NOFS); 7337 cond_resched(); 7338 } 7339 iput(inode); 7340 return 0; 7341 } 7342 7343 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, 7344 struct btrfs_root *root, 7345 struct extent_buffer *leaf, 7346 struct btrfs_block_group_cache *group, 7347 struct inode *reloc_inode) 7348 { 7349 struct btrfs_key key; 7350 struct btrfs_key extent_key; 7351 struct btrfs_file_extent_item *fi; 7352 struct btrfs_leaf_ref *ref; 7353 struct disk_extent *new_extent; 7354 u64 bytenr; 7355 u64 num_bytes; 7356 u32 nritems; 7357 u32 i; 7358 int ext_index; 7359 int nr_extent; 7360 int ret; 7361 7362 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); 7363 BUG_ON(!new_extent); 7364 7365 ref = btrfs_lookup_leaf_ref(root, leaf->start); 7366 BUG_ON(!ref); 7367 7368 ext_index = -1; 7369 nritems = btrfs_header_nritems(leaf); 7370 for (i = 0; i < nritems; i++) { 7371 btrfs_item_key_to_cpu(leaf, &key, i); 7372 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) 7373 continue; 7374 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); 7375 if (btrfs_file_extent_type(leaf, fi) == 7376 BTRFS_FILE_EXTENT_INLINE) 7377 continue; 7378 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 7379 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 7380 if (bytenr == 0) 7381 continue; 7382 7383 ext_index++; 7384 if (bytenr >= group->key.objectid + group->key.offset || 7385 bytenr + num_bytes <= group->key.objectid) 7386 continue; 7387 7388 extent_key.objectid = bytenr; 7389 extent_key.offset = num_bytes; 7390 extent_key.type = BTRFS_EXTENT_ITEM_KEY; 7391 nr_extent = 1; 7392 ret = get_new_locations(reloc_inode, &extent_key, 7393 group->key.objectid, 1, 7394 &new_extent, &nr_extent); 7395 if (ret > 0) 7396 continue; 7397 BUG_ON(ret < 0); 7398 7399 BUG_ON(ref->extents[ext_index].bytenr != bytenr); 7400 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes); 7401 ref->extents[ext_index].bytenr = new_extent->disk_bytenr; 7402 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; 7403 7404 btrfs_set_file_extent_disk_bytenr(leaf, fi, 7405 new_extent->disk_bytenr); 7406 btrfs_set_file_extent_disk_num_bytes(leaf, fi, 7407 new_extent->disk_num_bytes); 7408 btrfs_mark_buffer_dirty(leaf); 7409 7410 ret = btrfs_inc_extent_ref(trans, root, 7411 new_extent->disk_bytenr, 7412 new_extent->disk_num_bytes, 7413 leaf->start, 7414 root->root_key.objectid, 7415 trans->transid, key.objectid); 7416 BUG_ON(ret); 7417 7418 ret = btrfs_free_extent(trans, root, 7419 bytenr, num_bytes, leaf->start, 7420 btrfs_header_owner(leaf), 7421 btrfs_header_generation(leaf), 7422 key.objectid, 0); 7423 BUG_ON(ret); 7424 cond_resched(); 7425 } 7426 kfree(new_extent); 7427 BUG_ON(ext_index + 1 != ref->nritems); 7428 btrfs_free_leaf_ref(root, ref); 7429 return 0; 7430 } 7431 7432 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans, 7433 struct btrfs_root *root) 7434 { 7435 struct btrfs_root *reloc_root; 7436 int ret; 7437 7438 if (root->reloc_root) { 7439 reloc_root = root->reloc_root; 7440 root->reloc_root = NULL; 7441 list_add(&reloc_root->dead_list, 7442 &root->fs_info->dead_reloc_roots); 7443 7444 btrfs_set_root_bytenr(&reloc_root->root_item, 7445 reloc_root->node->start); 7446 btrfs_set_root_level(&root->root_item, 7447 btrfs_header_level(reloc_root->node)); 7448 memset(&reloc_root->root_item.drop_progress, 0, 7449 sizeof(struct btrfs_disk_key)); 7450 reloc_root->root_item.drop_level = 0; 7451 7452 ret = btrfs_update_root(trans, root->fs_info->tree_root, 7453 &reloc_root->root_key, 7454 &reloc_root->root_item); 7455 BUG_ON(ret); 7456 } 7457 return 0; 7458 } 7459 7460 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) 7461 { 7462 struct btrfs_trans_handle *trans; 7463 struct btrfs_root *reloc_root; 7464 struct btrfs_root *prev_root = NULL; 7465 struct list_head dead_roots; 7466 int ret; 7467 unsigned long nr; 7468 7469 INIT_LIST_HEAD(&dead_roots); 7470 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots); 7471 7472 while (!list_empty(&dead_roots)) { 7473 reloc_root = list_entry(dead_roots.prev, 7474 struct btrfs_root, dead_list); 7475 list_del_init(&reloc_root->dead_list); 7476 7477 BUG_ON(reloc_root->commit_root != NULL); 7478 while (1) { 7479 trans = btrfs_join_transaction(root, 1); 7480 BUG_ON(!trans); 7481 7482 mutex_lock(&root->fs_info->drop_mutex); 7483 ret = btrfs_drop_snapshot(trans, reloc_root); 7484 if (ret != -EAGAIN) 7485 break; 7486 mutex_unlock(&root->fs_info->drop_mutex); 7487 7488 nr = trans->blocks_used; 7489 ret = btrfs_end_transaction(trans, root); 7490 BUG_ON(ret); 7491 btrfs_btree_balance_dirty(root, nr); 7492 } 7493 7494 free_extent_buffer(reloc_root->node); 7495 7496 ret = btrfs_del_root(trans, root->fs_info->tree_root, 7497 &reloc_root->root_key); 7498 BUG_ON(ret); 7499 mutex_unlock(&root->fs_info->drop_mutex); 7500 7501 nr = trans->blocks_used; 7502 ret = btrfs_end_transaction(trans, root); 7503 BUG_ON(ret); 7504 btrfs_btree_balance_dirty(root, nr); 7505 7506 kfree(prev_root); 7507 prev_root = reloc_root; 7508 } 7509 if (prev_root) { 7510 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0); 7511 kfree(prev_root); 7512 } 7513 return 0; 7514 } 7515 7516 int btrfs_add_dead_reloc_root(struct btrfs_root *root) 7517 { 7518 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots); 7519 return 0; 7520 } 7521 7522 int btrfs_cleanup_reloc_trees(struct btrfs_root *root) 7523 { 7524 struct btrfs_root *reloc_root; 7525 struct btrfs_trans_handle *trans; 7526 struct btrfs_key location; 7527 int found; 7528 int ret; 7529 7530 mutex_lock(&root->fs_info->tree_reloc_mutex); 7531 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL); 7532 BUG_ON(ret); 7533 found = !list_empty(&root->fs_info->dead_reloc_roots); 7534 mutex_unlock(&root->fs_info->tree_reloc_mutex); 7535 7536 if (found) { 7537 trans = btrfs_start_transaction(root, 1); 7538 BUG_ON(!trans); 7539 ret = btrfs_commit_transaction(trans, root); 7540 BUG_ON(ret); 7541 } 7542 7543 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID; 7544 location.offset = (u64)-1; 7545 location.type = BTRFS_ROOT_ITEM_KEY; 7546 7547 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); 7548 BUG_ON(!reloc_root); 7549 btrfs_orphan_cleanup(reloc_root); 7550 return 0; 7551 } 7552 7553 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, 7554 struct btrfs_root *root) 7555 { 7556 struct btrfs_root *reloc_root; 7557 struct extent_buffer *eb; 7558 struct btrfs_root_item *root_item; 7559 struct btrfs_key root_key; 7560 int ret; 7561 7562 BUG_ON(!root->ref_cows); 7563 if (root->reloc_root) 7564 return 0; 7565 7566 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 7567 BUG_ON(!root_item); 7568 7569 ret = btrfs_copy_root(trans, root, root->commit_root, 7570 &eb, BTRFS_TREE_RELOC_OBJECTID); 7571 BUG_ON(ret); 7572 7573 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 7574 root_key.offset = root->root_key.objectid; 7575 root_key.type = BTRFS_ROOT_ITEM_KEY; 7576 7577 memcpy(root_item, &root->root_item, sizeof(root_item)); 7578 btrfs_set_root_refs(root_item, 0); 7579 btrfs_set_root_bytenr(root_item, eb->start); 7580 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 7581 btrfs_set_root_generation(root_item, trans->transid); 7582 7583 btrfs_tree_unlock(eb); 7584 free_extent_buffer(eb); 7585 7586 ret = btrfs_insert_root(trans, root->fs_info->tree_root, 7587 &root_key, root_item); 7588 BUG_ON(ret); 7589 kfree(root_item); 7590 7591 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, 7592 &root_key); 7593 BUG_ON(!reloc_root); 7594 reloc_root->last_trans = trans->transid; 7595 reloc_root->commit_root = NULL; 7596 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; 7597 7598 root->reloc_root = reloc_root; 7599 return 0; 7600 } 7601 7602 /* 7603 * Core function of space balance. 7604 * 7605 * The idea is using reloc trees to relocate tree blocks in reference 7606 * counted roots. There is one reloc tree for each subvol, and all 7607 * reloc trees share same root key objectid. Reloc trees are snapshots 7608 * of the latest committed roots of subvols (root->commit_root). 7609 * 7610 * To relocate a tree block referenced by a subvol, there are two steps. 7611 * COW the block through subvol's reloc tree, then update block pointer 7612 * in the subvol to point to the new block. Since all reloc trees share 7613 * same root key objectid, doing special handing for tree blocks owned 7614 * by them is easy. Once a tree block has been COWed in one reloc tree, 7615 * we can use the resulting new block directly when the same block is 7616 * required to COW again through other reloc trees. By this way, relocated 7617 * tree blocks are shared between reloc trees, so they are also shared 7618 * between subvols. 7619 */ 7620 static noinline int relocate_one_path(struct btrfs_trans_handle *trans, 7621 struct btrfs_root *root, 7622 struct btrfs_path *path, 7623 struct btrfs_key *first_key, 7624 struct btrfs_ref_path *ref_path, 7625 struct btrfs_block_group_cache *group, 7626 struct inode *reloc_inode) 7627 { 7628 struct btrfs_root *reloc_root; 7629 struct extent_buffer *eb = NULL; 7630 struct btrfs_key *keys; 7631 u64 *nodes; 7632 int level; 7633 int shared_level; 7634 int lowest_level = 0; 7635 int ret; 7636 7637 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) 7638 lowest_level = ref_path->owner_objectid; 7639 7640 if (!root->ref_cows) { 7641 path->lowest_level = lowest_level; 7642 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1); 7643 BUG_ON(ret < 0); 7644 path->lowest_level = 0; 7645 btrfs_release_path(root, path); 7646 return 0; 7647 } 7648 7649 mutex_lock(&root->fs_info->tree_reloc_mutex); 7650 ret = init_reloc_tree(trans, root); 7651 BUG_ON(ret); 7652 reloc_root = root->reloc_root; 7653 7654 shared_level = ref_path->shared_level; 7655 ref_path->shared_level = BTRFS_MAX_LEVEL - 1; 7656 7657 keys = ref_path->node_keys; 7658 nodes = ref_path->new_nodes; 7659 memset(&keys[shared_level + 1], 0, 7660 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1)); 7661 memset(&nodes[shared_level + 1], 0, 7662 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1)); 7663 7664 if (nodes[lowest_level] == 0) { 7665 path->lowest_level = lowest_level; 7666 ret = btrfs_search_slot(trans, reloc_root, first_key, path, 7667 0, 1); 7668 BUG_ON(ret); 7669 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) { 7670 eb = path->nodes[level]; 7671 if (!eb || eb == reloc_root->node) 7672 break; 7673 nodes[level] = eb->start; 7674 if (level == 0) 7675 btrfs_item_key_to_cpu(eb, &keys[level], 0); 7676 else 7677 btrfs_node_key_to_cpu(eb, &keys[level], 0); 7678 } 7679 if (nodes[0] && 7680 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { 7681 eb = path->nodes[0]; 7682 ret = replace_extents_in_leaf(trans, reloc_root, eb, 7683 group, reloc_inode); 7684 BUG_ON(ret); 7685 } 7686 btrfs_release_path(reloc_root, path); 7687 } else { 7688 ret = btrfs_merge_path(trans, reloc_root, keys, nodes, 7689 lowest_level); 7690 BUG_ON(ret); 7691 } 7692 7693 /* 7694 * replace tree blocks in the fs tree with tree blocks in 7695 * the reloc tree. 7696 */ 7697 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level); 7698 BUG_ON(ret < 0); 7699 7700 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { 7701 ret = btrfs_search_slot(trans, reloc_root, first_key, path, 7702 0, 0); 7703 BUG_ON(ret); 7704 extent_buffer_get(path->nodes[0]); 7705 eb = path->nodes[0]; 7706 btrfs_release_path(reloc_root, path); 7707 ret = invalidate_extent_cache(reloc_root, eb, group, root); 7708 BUG_ON(ret); 7709 free_extent_buffer(eb); 7710 } 7711 7712 mutex_unlock(&root->fs_info->tree_reloc_mutex); 7713 path->lowest_level = 0; 7714 return 0; 7715 } 7716 7717 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, 7718 struct btrfs_root *root, 7719 struct btrfs_path *path, 7720 struct btrfs_key *first_key, 7721 struct btrfs_ref_path *ref_path) 7722 { 7723 int ret; 7724 7725 ret = relocate_one_path(trans, root, path, first_key, 7726 ref_path, NULL, NULL); 7727 BUG_ON(ret); 7728 7729 return 0; 7730 } 7731 7732 static noinline int del_extent_zero(struct btrfs_trans_handle *trans, 7733 struct btrfs_root *extent_root, 7734 struct btrfs_path *path, 7735 struct btrfs_key *extent_key) 7736 { 7737 int ret; 7738 7739 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); 7740 if (ret) 7741 goto out; 7742 ret = btrfs_del_item(trans, extent_root, path); 7743 out: 7744 btrfs_release_path(extent_root, path); 7745 return ret; 7746 } 7747 7748 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, 7749 struct btrfs_ref_path *ref_path) 7750 { 7751 struct btrfs_key root_key; 7752 7753 root_key.objectid = ref_path->root_objectid; 7754 root_key.type = BTRFS_ROOT_ITEM_KEY; 7755 if (is_cowonly_root(ref_path->root_objectid)) 7756 root_key.offset = 0; 7757 else 7758 root_key.offset = (u64)-1; 7759 7760 return btrfs_read_fs_root_no_name(fs_info, &root_key); 7761 } 7762 7763 static noinline int relocate_one_extent(struct btrfs_root *extent_root, 7764 struct btrfs_path *path, 7765 struct btrfs_key *extent_key, 7766 struct btrfs_block_group_cache *group, 7767 struct inode *reloc_inode, int pass) 7768 { 7769 struct btrfs_trans_handle *trans; 7770 struct btrfs_root *found_root; 7771 struct btrfs_ref_path *ref_path = NULL; 7772 struct disk_extent *new_extents = NULL; 7773 int nr_extents = 0; 7774 int loops; 7775 int ret; 7776 int level; 7777 struct btrfs_key first_key; 7778 u64 prev_block = 0; 7779 7780 7781 trans = btrfs_start_transaction(extent_root, 1); 7782 BUG_ON(!trans); 7783 7784 if (extent_key->objectid == 0) { 7785 ret = del_extent_zero(trans, extent_root, path, extent_key); 7786 goto out; 7787 } 7788 7789 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); 7790 if (!ref_path) { 7791 ret = -ENOMEM; 7792 goto out; 7793 } 7794 7795 for (loops = 0; ; loops++) { 7796 if (loops == 0) { 7797 ret = btrfs_first_ref_path(trans, extent_root, ref_path, 7798 extent_key->objectid); 7799 } else { 7800 ret = btrfs_next_ref_path(trans, extent_root, ref_path); 7801 } 7802 if (ret < 0) 7803 goto out; 7804 if (ret > 0) 7805 break; 7806 7807 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID || 7808 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID) 7809 continue; 7810 7811 found_root = read_ref_root(extent_root->fs_info, ref_path); 7812 BUG_ON(!found_root); 7813 /* 7814 * for reference counted tree, only process reference paths 7815 * rooted at the latest committed root. 7816 */ 7817 if (found_root->ref_cows && 7818 ref_path->root_generation != found_root->root_key.offset) 7819 continue; 7820 7821 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { 7822 if (pass == 0) { 7823 /* 7824 * copy data extents to new locations 7825 */ 7826 u64 group_start = group->key.objectid; 7827 ret = relocate_data_extent(reloc_inode, 7828 extent_key, 7829 group_start); 7830 if (ret < 0) 7831 goto out; 7832 break; 7833 } 7834 level = 0; 7835 } else { 7836 level = ref_path->owner_objectid; 7837 } 7838 7839 if (prev_block != ref_path->nodes[level]) { 7840 struct extent_buffer *eb; 7841 u64 block_start = ref_path->nodes[level]; 7842 u64 block_size = btrfs_level_size(found_root, level); 7843 7844 eb = read_tree_block(found_root, block_start, 7845 block_size, 0); 7846 btrfs_tree_lock(eb); 7847 BUG_ON(level != btrfs_header_level(eb)); 7848 7849 if (level == 0) 7850 btrfs_item_key_to_cpu(eb, &first_key, 0); 7851 else 7852 btrfs_node_key_to_cpu(eb, &first_key, 0); 7853 7854 btrfs_tree_unlock(eb); 7855 free_extent_buffer(eb); 7856 prev_block = block_start; 7857 } 7858 7859 mutex_lock(&extent_root->fs_info->trans_mutex); 7860 btrfs_record_root_in_trans(found_root); 7861 mutex_unlock(&extent_root->fs_info->trans_mutex); 7862 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { 7863 /* 7864 * try to update data extent references while 7865 * keeping metadata shared between snapshots. 7866 */ 7867 if (pass == 1) { 7868 ret = relocate_one_path(trans, found_root, 7869 path, &first_key, ref_path, 7870 group, reloc_inode); 7871 if (ret < 0) 7872 goto out; 7873 continue; 7874 } 7875 /* 7876 * use fallback method to process the remaining 7877 * references. 7878 */ 7879 if (!new_extents) { 7880 u64 group_start = group->key.objectid; 7881 new_extents = kmalloc(sizeof(*new_extents), 7882 GFP_NOFS); 7883 nr_extents = 1; 7884 ret = get_new_locations(reloc_inode, 7885 extent_key, 7886 group_start, 1, 7887 &new_extents, 7888 &nr_extents); 7889 if (ret) 7890 goto out; 7891 } 7892 ret = replace_one_extent(trans, found_root, 7893 path, extent_key, 7894 &first_key, ref_path, 7895 new_extents, nr_extents); 7896 } else { 7897 ret = relocate_tree_block(trans, found_root, path, 7898 &first_key, ref_path); 7899 } 7900 if (ret < 0) 7901 goto out; 7902 } 7903 ret = 0; 7904 out: 7905 btrfs_end_transaction(trans, extent_root); 7906 kfree(new_extents); 7907 kfree(ref_path); 7908 return ret; 7909 } 7910 #endif 7911 7912 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) 7913 { 7914 u64 num_devices; 7915 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | 7916 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; 7917 7918 /* 7919 * we add in the count of missing devices because we want 7920 * to make sure that any RAID levels on a degraded FS 7921 * continue to be honored. 7922 */ 7923 num_devices = root->fs_info->fs_devices->rw_devices + 7924 root->fs_info->fs_devices->missing_devices; 7925 7926 if (num_devices == 1) { 7927 stripped |= BTRFS_BLOCK_GROUP_DUP; 7928 stripped = flags & ~stripped; 7929 7930 /* turn raid0 into single device chunks */ 7931 if (flags & BTRFS_BLOCK_GROUP_RAID0) 7932 return stripped; 7933 7934 /* turn mirroring into duplication */ 7935 if (flags & (BTRFS_BLOCK_GROUP_RAID1 | 7936 BTRFS_BLOCK_GROUP_RAID10)) 7937 return stripped | BTRFS_BLOCK_GROUP_DUP; 7938 return flags; 7939 } else { 7940 /* they already had raid on here, just return */ 7941 if (flags & stripped) 7942 return flags; 7943 7944 stripped |= BTRFS_BLOCK_GROUP_DUP; 7945 stripped = flags & ~stripped; 7946 7947 /* switch duplicated blocks with raid1 */ 7948 if (flags & BTRFS_BLOCK_GROUP_DUP) 7949 return stripped | BTRFS_BLOCK_GROUP_RAID1; 7950 7951 /* turn single device chunks into raid0 */ 7952 return stripped | BTRFS_BLOCK_GROUP_RAID0; 7953 } 7954 return flags; 7955 } 7956 7957 static int set_block_group_ro(struct btrfs_block_group_cache *cache) 7958 { 7959 struct btrfs_space_info *sinfo = cache->space_info; 7960 u64 num_bytes; 7961 int ret = -ENOSPC; 7962 7963 if (cache->ro) 7964 return 0; 7965 7966 spin_lock(&sinfo->lock); 7967 spin_lock(&cache->lock); 7968 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 7969 cache->bytes_super - btrfs_block_group_used(&cache->item); 7970 7971 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + 7972 sinfo->bytes_may_use + sinfo->bytes_readonly + 7973 cache->reserved_pinned + num_bytes <= sinfo->total_bytes) { 7974 sinfo->bytes_readonly += num_bytes; 7975 sinfo->bytes_reserved += cache->reserved_pinned; 7976 cache->reserved_pinned = 0; 7977 cache->ro = 1; 7978 ret = 0; 7979 } 7980 7981 spin_unlock(&cache->lock); 7982 spin_unlock(&sinfo->lock); 7983 return ret; 7984 } 7985 7986 int btrfs_set_block_group_ro(struct btrfs_root *root, 7987 struct btrfs_block_group_cache *cache) 7988 7989 { 7990 struct btrfs_trans_handle *trans; 7991 u64 alloc_flags; 7992 int ret; 7993 7994 BUG_ON(cache->ro); 7995 7996 trans = btrfs_join_transaction(root, 1); 7997 BUG_ON(IS_ERR(trans)); 7998 7999 alloc_flags = update_block_group_flags(root, cache->flags); 8000 if (alloc_flags != cache->flags) 8001 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8002 8003 ret = set_block_group_ro(cache); 8004 if (!ret) 8005 goto out; 8006 alloc_flags = get_alloc_profile(root, cache->space_info->flags); 8007 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8008 if (ret < 0) 8009 goto out; 8010 ret = set_block_group_ro(cache); 8011 out: 8012 btrfs_end_transaction(trans, root); 8013 return ret; 8014 } 8015 8016 /* 8017 * helper to account the unused space of all the readonly block group in the 8018 * list. takes mirrors into account. 8019 */ 8020 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list) 8021 { 8022 struct btrfs_block_group_cache *block_group; 8023 u64 free_bytes = 0; 8024 int factor; 8025 8026 list_for_each_entry(block_group, groups_list, list) { 8027 spin_lock(&block_group->lock); 8028 8029 if (!block_group->ro) { 8030 spin_unlock(&block_group->lock); 8031 continue; 8032 } 8033 8034 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 | 8035 BTRFS_BLOCK_GROUP_RAID10 | 8036 BTRFS_BLOCK_GROUP_DUP)) 8037 factor = 2; 8038 else 8039 factor = 1; 8040 8041 free_bytes += (block_group->key.offset - 8042 btrfs_block_group_used(&block_group->item)) * 8043 factor; 8044 8045 spin_unlock(&block_group->lock); 8046 } 8047 8048 return free_bytes; 8049 } 8050 8051 /* 8052 * helper to account the unused space of all the readonly block group in the 8053 * space_info. takes mirrors into account. 8054 */ 8055 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) 8056 { 8057 int i; 8058 u64 free_bytes = 0; 8059 8060 spin_lock(&sinfo->lock); 8061 8062 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++) 8063 if (!list_empty(&sinfo->block_groups[i])) 8064 free_bytes += __btrfs_get_ro_block_group_free_space( 8065 &sinfo->block_groups[i]); 8066 8067 spin_unlock(&sinfo->lock); 8068 8069 return free_bytes; 8070 } 8071 8072 int btrfs_set_block_group_rw(struct btrfs_root *root, 8073 struct btrfs_block_group_cache *cache) 8074 { 8075 struct btrfs_space_info *sinfo = cache->space_info; 8076 u64 num_bytes; 8077 8078 BUG_ON(!cache->ro); 8079 8080 spin_lock(&sinfo->lock); 8081 spin_lock(&cache->lock); 8082 num_bytes = cache->key.offset - cache->reserved - cache->pinned - 8083 cache->bytes_super - btrfs_block_group_used(&cache->item); 8084 sinfo->bytes_readonly -= num_bytes; 8085 cache->ro = 0; 8086 spin_unlock(&cache->lock); 8087 spin_unlock(&sinfo->lock); 8088 return 0; 8089 } 8090 8091 /* 8092 * checks to see if its even possible to relocate this block group. 8093 * 8094 * @return - -1 if it's not a good idea to relocate this block group, 0 if its 8095 * ok to go ahead and try. 8096 */ 8097 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) 8098 { 8099 struct btrfs_block_group_cache *block_group; 8100 struct btrfs_space_info *space_info; 8101 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 8102 struct btrfs_device *device; 8103 int full = 0; 8104 int ret = 0; 8105 8106 block_group = btrfs_lookup_block_group(root->fs_info, bytenr); 8107 8108 /* odd, couldn't find the block group, leave it alone */ 8109 if (!block_group) 8110 return -1; 8111 8112 /* no bytes used, we're good */ 8113 if (!btrfs_block_group_used(&block_group->item)) 8114 goto out; 8115 8116 space_info = block_group->space_info; 8117 spin_lock(&space_info->lock); 8118 8119 full = space_info->full; 8120 8121 /* 8122 * if this is the last block group we have in this space, we can't 8123 * relocate it unless we're able to allocate a new chunk below. 8124 * 8125 * Otherwise, we need to make sure we have room in the space to handle 8126 * all of the extents from this block group. If we can, we're good 8127 */ 8128 if ((space_info->total_bytes != block_group->key.offset) && 8129 (space_info->bytes_used + space_info->bytes_reserved + 8130 space_info->bytes_pinned + space_info->bytes_readonly + 8131 btrfs_block_group_used(&block_group->item) < 8132 space_info->total_bytes)) { 8133 spin_unlock(&space_info->lock); 8134 goto out; 8135 } 8136 spin_unlock(&space_info->lock); 8137 8138 /* 8139 * ok we don't have enough space, but maybe we have free space on our 8140 * devices to allocate new chunks for relocation, so loop through our 8141 * alloc devices and guess if we have enough space. However, if we 8142 * were marked as full, then we know there aren't enough chunks, and we 8143 * can just return. 8144 */ 8145 ret = -1; 8146 if (full) 8147 goto out; 8148 8149 mutex_lock(&root->fs_info->chunk_mutex); 8150 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 8151 u64 min_free = btrfs_block_group_used(&block_group->item); 8152 u64 dev_offset; 8153 8154 /* 8155 * check to make sure we can actually find a chunk with enough 8156 * space to fit our block group in. 8157 */ 8158 if (device->total_bytes > device->bytes_used + min_free) { 8159 ret = find_free_dev_extent(NULL, device, min_free, 8160 &dev_offset, NULL); 8161 if (!ret) 8162 break; 8163 ret = -1; 8164 } 8165 } 8166 mutex_unlock(&root->fs_info->chunk_mutex); 8167 out: 8168 btrfs_put_block_group(block_group); 8169 return ret; 8170 } 8171 8172 static int find_first_block_group(struct btrfs_root *root, 8173 struct btrfs_path *path, struct btrfs_key *key) 8174 { 8175 int ret = 0; 8176 struct btrfs_key found_key; 8177 struct extent_buffer *leaf; 8178 int slot; 8179 8180 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 8181 if (ret < 0) 8182 goto out; 8183 8184 while (1) { 8185 slot = path->slots[0]; 8186 leaf = path->nodes[0]; 8187 if (slot >= btrfs_header_nritems(leaf)) { 8188 ret = btrfs_next_leaf(root, path); 8189 if (ret == 0) 8190 continue; 8191 if (ret < 0) 8192 goto out; 8193 break; 8194 } 8195 btrfs_item_key_to_cpu(leaf, &found_key, slot); 8196 8197 if (found_key.objectid >= key->objectid && 8198 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 8199 ret = 0; 8200 goto out; 8201 } 8202 path->slots[0]++; 8203 } 8204 out: 8205 return ret; 8206 } 8207 8208 void btrfs_put_block_group_cache(struct btrfs_fs_info *info) 8209 { 8210 struct btrfs_block_group_cache *block_group; 8211 u64 last = 0; 8212 8213 while (1) { 8214 struct inode *inode; 8215 8216 block_group = btrfs_lookup_first_block_group(info, last); 8217 while (block_group) { 8218 spin_lock(&block_group->lock); 8219 if (block_group->iref) 8220 break; 8221 spin_unlock(&block_group->lock); 8222 block_group = next_block_group(info->tree_root, 8223 block_group); 8224 } 8225 if (!block_group) { 8226 if (last == 0) 8227 break; 8228 last = 0; 8229 continue; 8230 } 8231 8232 inode = block_group->inode; 8233 block_group->iref = 0; 8234 block_group->inode = NULL; 8235 spin_unlock(&block_group->lock); 8236 iput(inode); 8237 last = block_group->key.objectid + block_group->key.offset; 8238 btrfs_put_block_group(block_group); 8239 } 8240 } 8241 8242 int btrfs_free_block_groups(struct btrfs_fs_info *info) 8243 { 8244 struct btrfs_block_group_cache *block_group; 8245 struct btrfs_space_info *space_info; 8246 struct btrfs_caching_control *caching_ctl; 8247 struct rb_node *n; 8248 8249 down_write(&info->extent_commit_sem); 8250 while (!list_empty(&info->caching_block_groups)) { 8251 caching_ctl = list_entry(info->caching_block_groups.next, 8252 struct btrfs_caching_control, list); 8253 list_del(&caching_ctl->list); 8254 put_caching_control(caching_ctl); 8255 } 8256 up_write(&info->extent_commit_sem); 8257 8258 spin_lock(&info->block_group_cache_lock); 8259 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { 8260 block_group = rb_entry(n, struct btrfs_block_group_cache, 8261 cache_node); 8262 rb_erase(&block_group->cache_node, 8263 &info->block_group_cache_tree); 8264 spin_unlock(&info->block_group_cache_lock); 8265 8266 down_write(&block_group->space_info->groups_sem); 8267 list_del(&block_group->list); 8268 up_write(&block_group->space_info->groups_sem); 8269 8270 if (block_group->cached == BTRFS_CACHE_STARTED) 8271 wait_block_group_cache_done(block_group); 8272 8273 btrfs_remove_free_space_cache(block_group); 8274 btrfs_put_block_group(block_group); 8275 8276 spin_lock(&info->block_group_cache_lock); 8277 } 8278 spin_unlock(&info->block_group_cache_lock); 8279 8280 /* now that all the block groups are freed, go through and 8281 * free all the space_info structs. This is only called during 8282 * the final stages of unmount, and so we know nobody is 8283 * using them. We call synchronize_rcu() once before we start, 8284 * just to be on the safe side. 8285 */ 8286 synchronize_rcu(); 8287 8288 release_global_block_rsv(info); 8289 8290 while(!list_empty(&info->space_info)) { 8291 space_info = list_entry(info->space_info.next, 8292 struct btrfs_space_info, 8293 list); 8294 if (space_info->bytes_pinned > 0 || 8295 space_info->bytes_reserved > 0) { 8296 WARN_ON(1); 8297 dump_space_info(space_info, 0, 0); 8298 } 8299 list_del(&space_info->list); 8300 kfree(space_info); 8301 } 8302 return 0; 8303 } 8304 8305 static void __link_block_group(struct btrfs_space_info *space_info, 8306 struct btrfs_block_group_cache *cache) 8307 { 8308 int index = get_block_group_index(cache); 8309 8310 down_write(&space_info->groups_sem); 8311 list_add_tail(&cache->list, &space_info->block_groups[index]); 8312 up_write(&space_info->groups_sem); 8313 } 8314 8315 int btrfs_read_block_groups(struct btrfs_root *root) 8316 { 8317 struct btrfs_path *path; 8318 int ret; 8319 struct btrfs_block_group_cache *cache; 8320 struct btrfs_fs_info *info = root->fs_info; 8321 struct btrfs_space_info *space_info; 8322 struct btrfs_key key; 8323 struct btrfs_key found_key; 8324 struct extent_buffer *leaf; 8325 int need_clear = 0; 8326 u64 cache_gen; 8327 8328 root = info->extent_root; 8329 key.objectid = 0; 8330 key.offset = 0; 8331 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY); 8332 path = btrfs_alloc_path(); 8333 if (!path) 8334 return -ENOMEM; 8335 8336 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); 8337 if (cache_gen != 0 && 8338 btrfs_super_generation(&root->fs_info->super_copy) != cache_gen) 8339 need_clear = 1; 8340 if (btrfs_test_opt(root, CLEAR_CACHE)) 8341 need_clear = 1; 8342 if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen) 8343 printk(KERN_INFO "btrfs: disk space caching is enabled\n"); 8344 8345 while (1) { 8346 ret = find_first_block_group(root, path, &key); 8347 if (ret > 0) 8348 break; 8349 if (ret != 0) 8350 goto error; 8351 leaf = path->nodes[0]; 8352 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 8353 cache = kzalloc(sizeof(*cache), GFP_NOFS); 8354 if (!cache) { 8355 ret = -ENOMEM; 8356 goto error; 8357 } 8358 8359 atomic_set(&cache->count, 1); 8360 spin_lock_init(&cache->lock); 8361 spin_lock_init(&cache->tree_lock); 8362 cache->fs_info = info; 8363 INIT_LIST_HEAD(&cache->list); 8364 INIT_LIST_HEAD(&cache->cluster_list); 8365 8366 if (need_clear) 8367 cache->disk_cache_state = BTRFS_DC_CLEAR; 8368 8369 /* 8370 * we only want to have 32k of ram per block group for keeping 8371 * track of free space, and if we pass 1/2 of that we want to 8372 * start converting things over to using bitmaps 8373 */ 8374 cache->extents_thresh = ((1024 * 32) / 2) / 8375 sizeof(struct btrfs_free_space); 8376 8377 read_extent_buffer(leaf, &cache->item, 8378 btrfs_item_ptr_offset(leaf, path->slots[0]), 8379 sizeof(cache->item)); 8380 memcpy(&cache->key, &found_key, sizeof(found_key)); 8381 8382 key.objectid = found_key.objectid + found_key.offset; 8383 btrfs_release_path(root, path); 8384 cache->flags = btrfs_block_group_flags(&cache->item); 8385 cache->sectorsize = root->sectorsize; 8386 8387 /* 8388 * check for two cases, either we are full, and therefore 8389 * don't need to bother with the caching work since we won't 8390 * find any space, or we are empty, and we can just add all 8391 * the space in and be done with it. This saves us _alot_ of 8392 * time, particularly in the full case. 8393 */ 8394 if (found_key.offset == btrfs_block_group_used(&cache->item)) { 8395 exclude_super_stripes(root, cache); 8396 cache->last_byte_to_unpin = (u64)-1; 8397 cache->cached = BTRFS_CACHE_FINISHED; 8398 free_excluded_extents(root, cache); 8399 } else if (btrfs_block_group_used(&cache->item) == 0) { 8400 exclude_super_stripes(root, cache); 8401 cache->last_byte_to_unpin = (u64)-1; 8402 cache->cached = BTRFS_CACHE_FINISHED; 8403 add_new_free_space(cache, root->fs_info, 8404 found_key.objectid, 8405 found_key.objectid + 8406 found_key.offset); 8407 free_excluded_extents(root, cache); 8408 } 8409 8410 ret = update_space_info(info, cache->flags, found_key.offset, 8411 btrfs_block_group_used(&cache->item), 8412 &space_info); 8413 BUG_ON(ret); 8414 cache->space_info = space_info; 8415 spin_lock(&cache->space_info->lock); 8416 cache->space_info->bytes_readonly += cache->bytes_super; 8417 spin_unlock(&cache->space_info->lock); 8418 8419 __link_block_group(space_info, cache); 8420 8421 ret = btrfs_add_block_group_cache(root->fs_info, cache); 8422 BUG_ON(ret); 8423 8424 set_avail_alloc_bits(root->fs_info, cache->flags); 8425 if (btrfs_chunk_readonly(root, cache->key.objectid)) 8426 set_block_group_ro(cache); 8427 } 8428 8429 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { 8430 if (!(get_alloc_profile(root, space_info->flags) & 8431 (BTRFS_BLOCK_GROUP_RAID10 | 8432 BTRFS_BLOCK_GROUP_RAID1 | 8433 BTRFS_BLOCK_GROUP_DUP))) 8434 continue; 8435 /* 8436 * avoid allocating from un-mirrored block group if there are 8437 * mirrored block groups. 8438 */ 8439 list_for_each_entry(cache, &space_info->block_groups[3], list) 8440 set_block_group_ro(cache); 8441 list_for_each_entry(cache, &space_info->block_groups[4], list) 8442 set_block_group_ro(cache); 8443 } 8444 8445 init_global_block_rsv(info); 8446 ret = 0; 8447 error: 8448 btrfs_free_path(path); 8449 return ret; 8450 } 8451 8452 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 8453 struct btrfs_root *root, u64 bytes_used, 8454 u64 type, u64 chunk_objectid, u64 chunk_offset, 8455 u64 size) 8456 { 8457 int ret; 8458 struct btrfs_root *extent_root; 8459 struct btrfs_block_group_cache *cache; 8460 8461 extent_root = root->fs_info->extent_root; 8462 8463 root->fs_info->last_trans_log_full_commit = trans->transid; 8464 8465 cache = kzalloc(sizeof(*cache), GFP_NOFS); 8466 if (!cache) 8467 return -ENOMEM; 8468 8469 cache->key.objectid = chunk_offset; 8470 cache->key.offset = size; 8471 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 8472 cache->sectorsize = root->sectorsize; 8473 cache->fs_info = root->fs_info; 8474 8475 /* 8476 * we only want to have 32k of ram per block group for keeping track 8477 * of free space, and if we pass 1/2 of that we want to start 8478 * converting things over to using bitmaps 8479 */ 8480 cache->extents_thresh = ((1024 * 32) / 2) / 8481 sizeof(struct btrfs_free_space); 8482 atomic_set(&cache->count, 1); 8483 spin_lock_init(&cache->lock); 8484 spin_lock_init(&cache->tree_lock); 8485 INIT_LIST_HEAD(&cache->list); 8486 INIT_LIST_HEAD(&cache->cluster_list); 8487 8488 btrfs_set_block_group_used(&cache->item, bytes_used); 8489 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); 8490 cache->flags = type; 8491 btrfs_set_block_group_flags(&cache->item, type); 8492 8493 cache->last_byte_to_unpin = (u64)-1; 8494 cache->cached = BTRFS_CACHE_FINISHED; 8495 exclude_super_stripes(root, cache); 8496 8497 add_new_free_space(cache, root->fs_info, chunk_offset, 8498 chunk_offset + size); 8499 8500 free_excluded_extents(root, cache); 8501 8502 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 8503 &cache->space_info); 8504 BUG_ON(ret); 8505 8506 spin_lock(&cache->space_info->lock); 8507 cache->space_info->bytes_readonly += cache->bytes_super; 8508 spin_unlock(&cache->space_info->lock); 8509 8510 __link_block_group(cache->space_info, cache); 8511 8512 ret = btrfs_add_block_group_cache(root->fs_info, cache); 8513 BUG_ON(ret); 8514 8515 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, 8516 sizeof(cache->item)); 8517 BUG_ON(ret); 8518 8519 set_avail_alloc_bits(extent_root->fs_info, type); 8520 8521 return 0; 8522 } 8523 8524 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 8525 struct btrfs_root *root, u64 group_start) 8526 { 8527 struct btrfs_path *path; 8528 struct btrfs_block_group_cache *block_group; 8529 struct btrfs_free_cluster *cluster; 8530 struct btrfs_root *tree_root = root->fs_info->tree_root; 8531 struct btrfs_key key; 8532 struct inode *inode; 8533 int ret; 8534 int factor; 8535 8536 root = root->fs_info->extent_root; 8537 8538 block_group = btrfs_lookup_block_group(root->fs_info, group_start); 8539 BUG_ON(!block_group); 8540 BUG_ON(!block_group->ro); 8541 8542 memcpy(&key, &block_group->key, sizeof(key)); 8543 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | 8544 BTRFS_BLOCK_GROUP_RAID1 | 8545 BTRFS_BLOCK_GROUP_RAID10)) 8546 factor = 2; 8547 else 8548 factor = 1; 8549 8550 /* make sure this block group isn't part of an allocation cluster */ 8551 cluster = &root->fs_info->data_alloc_cluster; 8552 spin_lock(&cluster->refill_lock); 8553 btrfs_return_cluster_to_free_space(block_group, cluster); 8554 spin_unlock(&cluster->refill_lock); 8555 8556 /* 8557 * make sure this block group isn't part of a metadata 8558 * allocation cluster 8559 */ 8560 cluster = &root->fs_info->meta_alloc_cluster; 8561 spin_lock(&cluster->refill_lock); 8562 btrfs_return_cluster_to_free_space(block_group, cluster); 8563 spin_unlock(&cluster->refill_lock); 8564 8565 path = btrfs_alloc_path(); 8566 BUG_ON(!path); 8567 8568 inode = lookup_free_space_inode(root, block_group, path); 8569 if (!IS_ERR(inode)) { 8570 btrfs_orphan_add(trans, inode); 8571 clear_nlink(inode); 8572 /* One for the block groups ref */ 8573 spin_lock(&block_group->lock); 8574 if (block_group->iref) { 8575 block_group->iref = 0; 8576 block_group->inode = NULL; 8577 spin_unlock(&block_group->lock); 8578 iput(inode); 8579 } else { 8580 spin_unlock(&block_group->lock); 8581 } 8582 /* One for our lookup ref */ 8583 iput(inode); 8584 } 8585 8586 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 8587 key.offset = block_group->key.objectid; 8588 key.type = 0; 8589 8590 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); 8591 if (ret < 0) 8592 goto out; 8593 if (ret > 0) 8594 btrfs_release_path(tree_root, path); 8595 if (ret == 0) { 8596 ret = btrfs_del_item(trans, tree_root, path); 8597 if (ret) 8598 goto out; 8599 btrfs_release_path(tree_root, path); 8600 } 8601 8602 spin_lock(&root->fs_info->block_group_cache_lock); 8603 rb_erase(&block_group->cache_node, 8604 &root->fs_info->block_group_cache_tree); 8605 spin_unlock(&root->fs_info->block_group_cache_lock); 8606 8607 down_write(&block_group->space_info->groups_sem); 8608 /* 8609 * we must use list_del_init so people can check to see if they 8610 * are still on the list after taking the semaphore 8611 */ 8612 list_del_init(&block_group->list); 8613 up_write(&block_group->space_info->groups_sem); 8614 8615 if (block_group->cached == BTRFS_CACHE_STARTED) 8616 wait_block_group_cache_done(block_group); 8617 8618 btrfs_remove_free_space_cache(block_group); 8619 8620 spin_lock(&block_group->space_info->lock); 8621 block_group->space_info->total_bytes -= block_group->key.offset; 8622 block_group->space_info->bytes_readonly -= block_group->key.offset; 8623 block_group->space_info->disk_total -= block_group->key.offset * factor; 8624 spin_unlock(&block_group->space_info->lock); 8625 8626 memcpy(&key, &block_group->key, sizeof(key)); 8627 8628 btrfs_clear_space_info_full(root->fs_info); 8629 8630 btrfs_put_block_group(block_group); 8631 btrfs_put_block_group(block_group); 8632 8633 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 8634 if (ret > 0) 8635 ret = -EIO; 8636 if (ret < 0) 8637 goto out; 8638 8639 ret = btrfs_del_item(trans, root, path); 8640 out: 8641 btrfs_free_path(path); 8642 return ret; 8643 } 8644 8645 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 8646 { 8647 return unpin_extent_range(root, start, end); 8648 } 8649 8650 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 8651 u64 num_bytes) 8652 { 8653 return btrfs_discard_extent(root, bytenr, num_bytes); 8654 } 8655