1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "locking.h" 29 #include "tree-log.h" 30 #include "inode-map.h" 31 32 #define BTRFS_ROOT_TRANS_TAG 0 33 34 static noinline void put_transaction(struct btrfs_transaction *transaction) 35 { 36 WARN_ON(atomic_read(&transaction->use_count) == 0); 37 if (atomic_dec_and_test(&transaction->use_count)) { 38 BUG_ON(!list_empty(&transaction->list)); 39 memset(transaction, 0, sizeof(*transaction)); 40 kmem_cache_free(btrfs_transaction_cachep, transaction); 41 } 42 } 43 44 static noinline void switch_commit_root(struct btrfs_root *root) 45 { 46 free_extent_buffer(root->commit_root); 47 root->commit_root = btrfs_root_node(root); 48 } 49 50 /* 51 * either allocate a new transaction or hop into the existing one 52 */ 53 static noinline int join_transaction(struct btrfs_root *root, int nofail) 54 { 55 struct btrfs_transaction *cur_trans; 56 57 spin_lock(&root->fs_info->trans_lock); 58 loop: 59 if (root->fs_info->trans_no_join) { 60 if (!nofail) { 61 spin_unlock(&root->fs_info->trans_lock); 62 return -EBUSY; 63 } 64 } 65 66 cur_trans = root->fs_info->running_transaction; 67 if (cur_trans) { 68 atomic_inc(&cur_trans->use_count); 69 atomic_inc(&cur_trans->num_writers); 70 cur_trans->num_joined++; 71 spin_unlock(&root->fs_info->trans_lock); 72 return 0; 73 } 74 spin_unlock(&root->fs_info->trans_lock); 75 76 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 77 if (!cur_trans) 78 return -ENOMEM; 79 80 spin_lock(&root->fs_info->trans_lock); 81 if (root->fs_info->running_transaction) { 82 /* 83 * someone started a transaction after we unlocked. Make sure 84 * to redo the trans_no_join checks above 85 */ 86 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 87 cur_trans = root->fs_info->running_transaction; 88 goto loop; 89 } 90 91 atomic_set(&cur_trans->num_writers, 1); 92 cur_trans->num_joined = 0; 93 init_waitqueue_head(&cur_trans->writer_wait); 94 init_waitqueue_head(&cur_trans->commit_wait); 95 cur_trans->in_commit = 0; 96 cur_trans->blocked = 0; 97 /* 98 * One for this trans handle, one so it will live on until we 99 * commit the transaction. 100 */ 101 atomic_set(&cur_trans->use_count, 2); 102 cur_trans->commit_done = 0; 103 cur_trans->start_time = get_seconds(); 104 105 cur_trans->delayed_refs.root = RB_ROOT; 106 cur_trans->delayed_refs.num_entries = 0; 107 cur_trans->delayed_refs.num_heads_ready = 0; 108 cur_trans->delayed_refs.num_heads = 0; 109 cur_trans->delayed_refs.flushing = 0; 110 cur_trans->delayed_refs.run_delayed_start = 0; 111 spin_lock_init(&cur_trans->commit_lock); 112 spin_lock_init(&cur_trans->delayed_refs.lock); 113 114 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 115 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 116 extent_io_tree_init(&cur_trans->dirty_pages, 117 root->fs_info->btree_inode->i_mapping); 118 root->fs_info->generation++; 119 cur_trans->transid = root->fs_info->generation; 120 root->fs_info->running_transaction = cur_trans; 121 spin_unlock(&root->fs_info->trans_lock); 122 123 return 0; 124 } 125 126 /* 127 * this does all the record keeping required to make sure that a reference 128 * counted root is properly recorded in a given transaction. This is required 129 * to make sure the old root from before we joined the transaction is deleted 130 * when the transaction commits 131 */ 132 static int record_root_in_trans(struct btrfs_trans_handle *trans, 133 struct btrfs_root *root) 134 { 135 if (root->ref_cows && root->last_trans < trans->transid) { 136 WARN_ON(root == root->fs_info->extent_root); 137 WARN_ON(root->commit_root != root->node); 138 139 /* 140 * see below for in_trans_setup usage rules 141 * we have the reloc mutex held now, so there 142 * is only one writer in this function 143 */ 144 root->in_trans_setup = 1; 145 146 /* make sure readers find in_trans_setup before 147 * they find our root->last_trans update 148 */ 149 smp_wmb(); 150 151 spin_lock(&root->fs_info->fs_roots_radix_lock); 152 if (root->last_trans == trans->transid) { 153 spin_unlock(&root->fs_info->fs_roots_radix_lock); 154 return 0; 155 } 156 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 157 (unsigned long)root->root_key.objectid, 158 BTRFS_ROOT_TRANS_TAG); 159 spin_unlock(&root->fs_info->fs_roots_radix_lock); 160 root->last_trans = trans->transid; 161 162 /* this is pretty tricky. We don't want to 163 * take the relocation lock in btrfs_record_root_in_trans 164 * unless we're really doing the first setup for this root in 165 * this transaction. 166 * 167 * Normally we'd use root->last_trans as a flag to decide 168 * if we want to take the expensive mutex. 169 * 170 * But, we have to set root->last_trans before we 171 * init the relocation root, otherwise, we trip over warnings 172 * in ctree.c. The solution used here is to flag ourselves 173 * with root->in_trans_setup. When this is 1, we're still 174 * fixing up the reloc trees and everyone must wait. 175 * 176 * When this is zero, they can trust root->last_trans and fly 177 * through btrfs_record_root_in_trans without having to take the 178 * lock. smp_wmb() makes sure that all the writes above are 179 * done before we pop in the zero below 180 */ 181 btrfs_init_reloc_root(trans, root); 182 smp_wmb(); 183 root->in_trans_setup = 0; 184 } 185 return 0; 186 } 187 188 189 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 190 struct btrfs_root *root) 191 { 192 if (!root->ref_cows) 193 return 0; 194 195 /* 196 * see record_root_in_trans for comments about in_trans_setup usage 197 * and barriers 198 */ 199 smp_rmb(); 200 if (root->last_trans == trans->transid && 201 !root->in_trans_setup) 202 return 0; 203 204 mutex_lock(&root->fs_info->reloc_mutex); 205 record_root_in_trans(trans, root); 206 mutex_unlock(&root->fs_info->reloc_mutex); 207 208 return 0; 209 } 210 211 /* wait for commit against the current transaction to become unblocked 212 * when this is done, it is safe to start a new transaction, but the current 213 * transaction might not be fully on disk. 214 */ 215 static void wait_current_trans(struct btrfs_root *root) 216 { 217 struct btrfs_transaction *cur_trans; 218 219 spin_lock(&root->fs_info->trans_lock); 220 cur_trans = root->fs_info->running_transaction; 221 if (cur_trans && cur_trans->blocked) { 222 atomic_inc(&cur_trans->use_count); 223 spin_unlock(&root->fs_info->trans_lock); 224 225 wait_event(root->fs_info->transaction_wait, 226 !cur_trans->blocked); 227 put_transaction(cur_trans); 228 } else { 229 spin_unlock(&root->fs_info->trans_lock); 230 } 231 } 232 233 enum btrfs_trans_type { 234 TRANS_START, 235 TRANS_JOIN, 236 TRANS_USERSPACE, 237 TRANS_JOIN_NOLOCK, 238 }; 239 240 static int may_wait_transaction(struct btrfs_root *root, int type) 241 { 242 if (root->fs_info->log_root_recovering) 243 return 0; 244 245 if (type == TRANS_USERSPACE) 246 return 1; 247 248 if (type == TRANS_START && 249 !atomic_read(&root->fs_info->open_ioctl_trans)) 250 return 1; 251 252 return 0; 253 } 254 255 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 256 u64 num_items, int type) 257 { 258 struct btrfs_trans_handle *h; 259 struct btrfs_transaction *cur_trans; 260 u64 num_bytes = 0; 261 int ret; 262 263 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 264 return ERR_PTR(-EROFS); 265 266 if (current->journal_info) { 267 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); 268 h = current->journal_info; 269 h->use_count++; 270 h->orig_rsv = h->block_rsv; 271 h->block_rsv = NULL; 272 goto got_it; 273 } 274 275 /* 276 * Do the reservation before we join the transaction so we can do all 277 * the appropriate flushing if need be. 278 */ 279 if (num_items > 0 && root != root->fs_info->chunk_root) { 280 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 281 ret = btrfs_block_rsv_add(root, 282 &root->fs_info->trans_block_rsv, 283 num_bytes); 284 if (ret) 285 return ERR_PTR(ret); 286 } 287 again: 288 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 289 if (!h) 290 return ERR_PTR(-ENOMEM); 291 292 if (may_wait_transaction(root, type)) 293 wait_current_trans(root); 294 295 do { 296 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); 297 if (ret == -EBUSY) 298 wait_current_trans(root); 299 } while (ret == -EBUSY); 300 301 if (ret < 0) { 302 kmem_cache_free(btrfs_trans_handle_cachep, h); 303 return ERR_PTR(ret); 304 } 305 306 cur_trans = root->fs_info->running_transaction; 307 308 h->transid = cur_trans->transid; 309 h->transaction = cur_trans; 310 h->blocks_used = 0; 311 h->bytes_reserved = 0; 312 h->delayed_ref_updates = 0; 313 h->use_count = 1; 314 h->block_rsv = NULL; 315 h->orig_rsv = NULL; 316 317 smp_mb(); 318 if (cur_trans->blocked && may_wait_transaction(root, type)) { 319 btrfs_commit_transaction(h, root); 320 goto again; 321 } 322 323 if (num_bytes) { 324 h->block_rsv = &root->fs_info->trans_block_rsv; 325 h->bytes_reserved = num_bytes; 326 } 327 328 got_it: 329 btrfs_record_root_in_trans(h, root); 330 331 if (!current->journal_info && type != TRANS_USERSPACE) 332 current->journal_info = h; 333 return h; 334 } 335 336 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 337 int num_items) 338 { 339 return start_transaction(root, num_items, TRANS_START); 340 } 341 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 342 { 343 return start_transaction(root, 0, TRANS_JOIN); 344 } 345 346 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 347 { 348 return start_transaction(root, 0, TRANS_JOIN_NOLOCK); 349 } 350 351 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 352 { 353 return start_transaction(root, 0, TRANS_USERSPACE); 354 } 355 356 /* wait for a transaction commit to be fully complete */ 357 static noinline void wait_for_commit(struct btrfs_root *root, 358 struct btrfs_transaction *commit) 359 { 360 wait_event(commit->commit_wait, commit->commit_done); 361 } 362 363 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 364 { 365 struct btrfs_transaction *cur_trans = NULL, *t; 366 int ret; 367 368 ret = 0; 369 if (transid) { 370 if (transid <= root->fs_info->last_trans_committed) 371 goto out; 372 373 /* find specified transaction */ 374 spin_lock(&root->fs_info->trans_lock); 375 list_for_each_entry(t, &root->fs_info->trans_list, list) { 376 if (t->transid == transid) { 377 cur_trans = t; 378 atomic_inc(&cur_trans->use_count); 379 break; 380 } 381 if (t->transid > transid) 382 break; 383 } 384 spin_unlock(&root->fs_info->trans_lock); 385 ret = -EINVAL; 386 if (!cur_trans) 387 goto out; /* bad transid */ 388 } else { 389 /* find newest transaction that is committing | committed */ 390 spin_lock(&root->fs_info->trans_lock); 391 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 392 list) { 393 if (t->in_commit) { 394 if (t->commit_done) 395 break; 396 cur_trans = t; 397 atomic_inc(&cur_trans->use_count); 398 break; 399 } 400 } 401 spin_unlock(&root->fs_info->trans_lock); 402 if (!cur_trans) 403 goto out; /* nothing committing|committed */ 404 } 405 406 wait_for_commit(root, cur_trans); 407 408 put_transaction(cur_trans); 409 ret = 0; 410 out: 411 return ret; 412 } 413 414 void btrfs_throttle(struct btrfs_root *root) 415 { 416 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 417 wait_current_trans(root); 418 } 419 420 static int should_end_transaction(struct btrfs_trans_handle *trans, 421 struct btrfs_root *root) 422 { 423 int ret; 424 425 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 426 return ret ? 1 : 0; 427 } 428 429 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 430 struct btrfs_root *root) 431 { 432 struct btrfs_transaction *cur_trans = trans->transaction; 433 struct btrfs_block_rsv *rsv = trans->block_rsv; 434 int updates; 435 436 smp_mb(); 437 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 438 return 1; 439 440 /* 441 * We need to do this in case we're deleting csums so the global block 442 * rsv get's used instead of the csum block rsv. 443 */ 444 trans->block_rsv = NULL; 445 446 updates = trans->delayed_ref_updates; 447 trans->delayed_ref_updates = 0; 448 if (updates) 449 btrfs_run_delayed_refs(trans, root, updates); 450 451 trans->block_rsv = rsv; 452 453 return should_end_transaction(trans, root); 454 } 455 456 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 457 struct btrfs_root *root, int throttle, int lock) 458 { 459 struct btrfs_transaction *cur_trans = trans->transaction; 460 struct btrfs_fs_info *info = root->fs_info; 461 int count = 0; 462 463 if (--trans->use_count) { 464 trans->block_rsv = trans->orig_rsv; 465 return 0; 466 } 467 468 btrfs_trans_release_metadata(trans, root); 469 trans->block_rsv = NULL; 470 while (count < 4) { 471 unsigned long cur = trans->delayed_ref_updates; 472 trans->delayed_ref_updates = 0; 473 if (cur && 474 trans->transaction->delayed_refs.num_heads_ready > 64) { 475 trans->delayed_ref_updates = 0; 476 477 /* 478 * do a full flush if the transaction is trying 479 * to close 480 */ 481 if (trans->transaction->delayed_refs.flushing) 482 cur = 0; 483 btrfs_run_delayed_refs(trans, root, cur); 484 } else { 485 break; 486 } 487 count++; 488 } 489 490 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 491 should_end_transaction(trans, root)) { 492 trans->transaction->blocked = 1; 493 smp_wmb(); 494 } 495 496 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 497 if (throttle) { 498 /* 499 * We may race with somebody else here so end up having 500 * to call end_transaction on ourselves again, so inc 501 * our use_count. 502 */ 503 trans->use_count++; 504 return btrfs_commit_transaction(trans, root); 505 } else { 506 wake_up_process(info->transaction_kthread); 507 } 508 } 509 510 WARN_ON(cur_trans != info->running_transaction); 511 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 512 atomic_dec(&cur_trans->num_writers); 513 514 smp_mb(); 515 if (waitqueue_active(&cur_trans->writer_wait)) 516 wake_up(&cur_trans->writer_wait); 517 put_transaction(cur_trans); 518 519 if (current->journal_info == trans) 520 current->journal_info = NULL; 521 memset(trans, 0, sizeof(*trans)); 522 kmem_cache_free(btrfs_trans_handle_cachep, trans); 523 524 if (throttle) 525 btrfs_run_delayed_iputs(root); 526 527 return 0; 528 } 529 530 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 531 struct btrfs_root *root) 532 { 533 int ret; 534 535 ret = __btrfs_end_transaction(trans, root, 0, 1); 536 if (ret) 537 return ret; 538 return 0; 539 } 540 541 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 542 struct btrfs_root *root) 543 { 544 int ret; 545 546 ret = __btrfs_end_transaction(trans, root, 1, 1); 547 if (ret) 548 return ret; 549 return 0; 550 } 551 552 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, 553 struct btrfs_root *root) 554 { 555 int ret; 556 557 ret = __btrfs_end_transaction(trans, root, 0, 0); 558 if (ret) 559 return ret; 560 return 0; 561 } 562 563 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, 564 struct btrfs_root *root) 565 { 566 return __btrfs_end_transaction(trans, root, 1, 1); 567 } 568 569 /* 570 * when btree blocks are allocated, they have some corresponding bits set for 571 * them in one of two extent_io trees. This is used to make sure all of 572 * those extents are sent to disk but does not wait on them 573 */ 574 int btrfs_write_marked_extents(struct btrfs_root *root, 575 struct extent_io_tree *dirty_pages, int mark) 576 { 577 int err = 0; 578 int werr = 0; 579 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 580 u64 start = 0; 581 u64 end; 582 583 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 584 mark)) { 585 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark, 586 GFP_NOFS); 587 err = filemap_fdatawrite_range(mapping, start, end); 588 if (err) 589 werr = err; 590 cond_resched(); 591 start = end + 1; 592 } 593 if (err) 594 werr = err; 595 return werr; 596 } 597 598 /* 599 * when btree blocks are allocated, they have some corresponding bits set for 600 * them in one of two extent_io trees. This is used to make sure all of 601 * those extents are on disk for transaction or log commit. We wait 602 * on all the pages and clear them from the dirty pages state tree 603 */ 604 int btrfs_wait_marked_extents(struct btrfs_root *root, 605 struct extent_io_tree *dirty_pages, int mark) 606 { 607 int err = 0; 608 int werr = 0; 609 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 610 u64 start = 0; 611 u64 end; 612 613 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 614 EXTENT_NEED_WAIT)) { 615 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS); 616 err = filemap_fdatawait_range(mapping, start, end); 617 if (err) 618 werr = err; 619 cond_resched(); 620 start = end + 1; 621 } 622 if (err) 623 werr = err; 624 return werr; 625 } 626 627 /* 628 * when btree blocks are allocated, they have some corresponding bits set for 629 * them in one of two extent_io trees. This is used to make sure all of 630 * those extents are on disk for transaction or log commit 631 */ 632 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 633 struct extent_io_tree *dirty_pages, int mark) 634 { 635 int ret; 636 int ret2; 637 638 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 639 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 640 641 if (ret) 642 return ret; 643 if (ret2) 644 return ret2; 645 return 0; 646 } 647 648 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 649 struct btrfs_root *root) 650 { 651 if (!trans || !trans->transaction) { 652 struct inode *btree_inode; 653 btree_inode = root->fs_info->btree_inode; 654 return filemap_write_and_wait(btree_inode->i_mapping); 655 } 656 return btrfs_write_and_wait_marked_extents(root, 657 &trans->transaction->dirty_pages, 658 EXTENT_DIRTY); 659 } 660 661 /* 662 * this is used to update the root pointer in the tree of tree roots. 663 * 664 * But, in the case of the extent allocation tree, updating the root 665 * pointer may allocate blocks which may change the root of the extent 666 * allocation tree. 667 * 668 * So, this loops and repeats and makes sure the cowonly root didn't 669 * change while the root pointer was being updated in the metadata. 670 */ 671 static int update_cowonly_root(struct btrfs_trans_handle *trans, 672 struct btrfs_root *root) 673 { 674 int ret; 675 u64 old_root_bytenr; 676 u64 old_root_used; 677 struct btrfs_root *tree_root = root->fs_info->tree_root; 678 679 old_root_used = btrfs_root_used(&root->root_item); 680 btrfs_write_dirty_block_groups(trans, root); 681 682 while (1) { 683 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 684 if (old_root_bytenr == root->node->start && 685 old_root_used == btrfs_root_used(&root->root_item)) 686 break; 687 688 btrfs_set_root_node(&root->root_item, root->node); 689 ret = btrfs_update_root(trans, tree_root, 690 &root->root_key, 691 &root->root_item); 692 BUG_ON(ret); 693 694 old_root_used = btrfs_root_used(&root->root_item); 695 ret = btrfs_write_dirty_block_groups(trans, root); 696 BUG_ON(ret); 697 } 698 699 if (root != root->fs_info->extent_root) 700 switch_commit_root(root); 701 702 return 0; 703 } 704 705 /* 706 * update all the cowonly tree roots on disk 707 */ 708 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 709 struct btrfs_root *root) 710 { 711 struct btrfs_fs_info *fs_info = root->fs_info; 712 struct list_head *next; 713 struct extent_buffer *eb; 714 int ret; 715 716 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 717 BUG_ON(ret); 718 719 eb = btrfs_lock_root_node(fs_info->tree_root); 720 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); 721 btrfs_tree_unlock(eb); 722 free_extent_buffer(eb); 723 724 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 725 BUG_ON(ret); 726 727 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 728 next = fs_info->dirty_cowonly_roots.next; 729 list_del_init(next); 730 root = list_entry(next, struct btrfs_root, dirty_list); 731 732 update_cowonly_root(trans, root); 733 } 734 735 down_write(&fs_info->extent_commit_sem); 736 switch_commit_root(fs_info->extent_root); 737 up_write(&fs_info->extent_commit_sem); 738 739 return 0; 740 } 741 742 /* 743 * dead roots are old snapshots that need to be deleted. This allocates 744 * a dirty root struct and adds it into the list of dead roots that need to 745 * be deleted 746 */ 747 int btrfs_add_dead_root(struct btrfs_root *root) 748 { 749 spin_lock(&root->fs_info->trans_lock); 750 list_add(&root->root_list, &root->fs_info->dead_roots); 751 spin_unlock(&root->fs_info->trans_lock); 752 return 0; 753 } 754 755 /* 756 * update all the cowonly tree roots on disk 757 */ 758 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 759 struct btrfs_root *root) 760 { 761 struct btrfs_root *gang[8]; 762 struct btrfs_fs_info *fs_info = root->fs_info; 763 int i; 764 int ret; 765 int err = 0; 766 767 spin_lock(&fs_info->fs_roots_radix_lock); 768 while (1) { 769 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 770 (void **)gang, 0, 771 ARRAY_SIZE(gang), 772 BTRFS_ROOT_TRANS_TAG); 773 if (ret == 0) 774 break; 775 for (i = 0; i < ret; i++) { 776 root = gang[i]; 777 radix_tree_tag_clear(&fs_info->fs_roots_radix, 778 (unsigned long)root->root_key.objectid, 779 BTRFS_ROOT_TRANS_TAG); 780 spin_unlock(&fs_info->fs_roots_radix_lock); 781 782 btrfs_free_log(trans, root); 783 btrfs_update_reloc_root(trans, root); 784 btrfs_orphan_commit_root(trans, root); 785 786 btrfs_save_ino_cache(root, trans); 787 788 if (root->commit_root != root->node) { 789 mutex_lock(&root->fs_commit_mutex); 790 switch_commit_root(root); 791 btrfs_unpin_free_ino(root); 792 mutex_unlock(&root->fs_commit_mutex); 793 794 btrfs_set_root_node(&root->root_item, 795 root->node); 796 } 797 798 err = btrfs_update_root(trans, fs_info->tree_root, 799 &root->root_key, 800 &root->root_item); 801 spin_lock(&fs_info->fs_roots_radix_lock); 802 if (err) 803 break; 804 } 805 } 806 spin_unlock(&fs_info->fs_roots_radix_lock); 807 return err; 808 } 809 810 /* 811 * defrag a given btree. If cacheonly == 1, this won't read from the disk, 812 * otherwise every leaf in the btree is read and defragged. 813 */ 814 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) 815 { 816 struct btrfs_fs_info *info = root->fs_info; 817 struct btrfs_trans_handle *trans; 818 int ret; 819 unsigned long nr; 820 821 if (xchg(&root->defrag_running, 1)) 822 return 0; 823 824 while (1) { 825 trans = btrfs_start_transaction(root, 0); 826 if (IS_ERR(trans)) 827 return PTR_ERR(trans); 828 829 ret = btrfs_defrag_leaves(trans, root, cacheonly); 830 831 nr = trans->blocks_used; 832 btrfs_end_transaction(trans, root); 833 btrfs_btree_balance_dirty(info->tree_root, nr); 834 cond_resched(); 835 836 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 837 break; 838 } 839 root->defrag_running = 0; 840 return ret; 841 } 842 843 /* 844 * new snapshots need to be created at a very specific time in the 845 * transaction commit. This does the actual creation 846 */ 847 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 848 struct btrfs_fs_info *fs_info, 849 struct btrfs_pending_snapshot *pending) 850 { 851 struct btrfs_key key; 852 struct btrfs_root_item *new_root_item; 853 struct btrfs_root *tree_root = fs_info->tree_root; 854 struct btrfs_root *root = pending->root; 855 struct btrfs_root *parent_root; 856 struct btrfs_block_rsv *rsv; 857 struct inode *parent_inode; 858 struct dentry *parent; 859 struct dentry *dentry; 860 struct extent_buffer *tmp; 861 struct extent_buffer *old; 862 int ret; 863 u64 to_reserve = 0; 864 u64 index = 0; 865 u64 objectid; 866 u64 root_flags; 867 868 rsv = trans->block_rsv; 869 870 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 871 if (!new_root_item) { 872 pending->error = -ENOMEM; 873 goto fail; 874 } 875 876 ret = btrfs_find_free_objectid(tree_root, &objectid); 877 if (ret) { 878 pending->error = ret; 879 goto fail; 880 } 881 882 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 883 884 if (to_reserve > 0) { 885 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv, 886 to_reserve); 887 if (ret) { 888 pending->error = ret; 889 goto fail; 890 } 891 } 892 893 key.objectid = objectid; 894 key.offset = (u64)-1; 895 key.type = BTRFS_ROOT_ITEM_KEY; 896 897 trans->block_rsv = &pending->block_rsv; 898 899 dentry = pending->dentry; 900 parent = dget_parent(dentry); 901 parent_inode = parent->d_inode; 902 parent_root = BTRFS_I(parent_inode)->root; 903 record_root_in_trans(trans, parent_root); 904 905 /* 906 * insert the directory item 907 */ 908 ret = btrfs_set_inode_index(parent_inode, &index); 909 BUG_ON(ret); 910 ret = btrfs_insert_dir_item(trans, parent_root, 911 dentry->d_name.name, dentry->d_name.len, 912 parent_inode, &key, 913 BTRFS_FT_DIR, index); 914 BUG_ON(ret); 915 916 btrfs_i_size_write(parent_inode, parent_inode->i_size + 917 dentry->d_name.len * 2); 918 ret = btrfs_update_inode(trans, parent_root, parent_inode); 919 BUG_ON(ret); 920 921 /* 922 * pull in the delayed directory update 923 * and the delayed inode item 924 * otherwise we corrupt the FS during 925 * snapshot 926 */ 927 ret = btrfs_run_delayed_items(trans, root); 928 BUG_ON(ret); 929 930 record_root_in_trans(trans, root); 931 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 932 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 933 btrfs_check_and_init_root_item(new_root_item); 934 935 root_flags = btrfs_root_flags(new_root_item); 936 if (pending->readonly) 937 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 938 else 939 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 940 btrfs_set_root_flags(new_root_item, root_flags); 941 942 old = btrfs_lock_root_node(root); 943 btrfs_cow_block(trans, root, old, NULL, 0, &old); 944 btrfs_set_lock_blocking(old); 945 946 btrfs_copy_root(trans, root, old, &tmp, objectid); 947 btrfs_tree_unlock(old); 948 free_extent_buffer(old); 949 950 btrfs_set_root_node(new_root_item, tmp); 951 /* record when the snapshot was created in key.offset */ 952 key.offset = trans->transid; 953 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 954 btrfs_tree_unlock(tmp); 955 free_extent_buffer(tmp); 956 BUG_ON(ret); 957 958 /* 959 * insert root back/forward references 960 */ 961 ret = btrfs_add_root_ref(trans, tree_root, objectid, 962 parent_root->root_key.objectid, 963 btrfs_ino(parent_inode), index, 964 dentry->d_name.name, dentry->d_name.len); 965 BUG_ON(ret); 966 dput(parent); 967 968 key.offset = (u64)-1; 969 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 970 BUG_ON(IS_ERR(pending->snap)); 971 972 btrfs_reloc_post_snapshot(trans, pending); 973 fail: 974 kfree(new_root_item); 975 trans->block_rsv = rsv; 976 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 977 return 0; 978 } 979 980 /* 981 * create all the snapshots we've scheduled for creation 982 */ 983 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 984 struct btrfs_fs_info *fs_info) 985 { 986 struct btrfs_pending_snapshot *pending; 987 struct list_head *head = &trans->transaction->pending_snapshots; 988 int ret; 989 990 list_for_each_entry(pending, head, list) { 991 ret = create_pending_snapshot(trans, fs_info, pending); 992 BUG_ON(ret); 993 } 994 return 0; 995 } 996 997 static void update_super_roots(struct btrfs_root *root) 998 { 999 struct btrfs_root_item *root_item; 1000 struct btrfs_super_block *super; 1001 1002 super = root->fs_info->super_copy; 1003 1004 root_item = &root->fs_info->chunk_root->root_item; 1005 super->chunk_root = root_item->bytenr; 1006 super->chunk_root_generation = root_item->generation; 1007 super->chunk_root_level = root_item->level; 1008 1009 root_item = &root->fs_info->tree_root->root_item; 1010 super->root = root_item->bytenr; 1011 super->generation = root_item->generation; 1012 super->root_level = root_item->level; 1013 if (btrfs_test_opt(root, SPACE_CACHE)) 1014 super->cache_generation = root_item->generation; 1015 } 1016 1017 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1018 { 1019 int ret = 0; 1020 spin_lock(&info->trans_lock); 1021 if (info->running_transaction) 1022 ret = info->running_transaction->in_commit; 1023 spin_unlock(&info->trans_lock); 1024 return ret; 1025 } 1026 1027 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1028 { 1029 int ret = 0; 1030 spin_lock(&info->trans_lock); 1031 if (info->running_transaction) 1032 ret = info->running_transaction->blocked; 1033 spin_unlock(&info->trans_lock); 1034 return ret; 1035 } 1036 1037 /* 1038 * wait for the current transaction commit to start and block subsequent 1039 * transaction joins 1040 */ 1041 static void wait_current_trans_commit_start(struct btrfs_root *root, 1042 struct btrfs_transaction *trans) 1043 { 1044 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit); 1045 } 1046 1047 /* 1048 * wait for the current transaction to start and then become unblocked. 1049 * caller holds ref. 1050 */ 1051 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1052 struct btrfs_transaction *trans) 1053 { 1054 wait_event(root->fs_info->transaction_wait, 1055 trans->commit_done || (trans->in_commit && !trans->blocked)); 1056 } 1057 1058 /* 1059 * commit transactions asynchronously. once btrfs_commit_transaction_async 1060 * returns, any subsequent transaction will not be allowed to join. 1061 */ 1062 struct btrfs_async_commit { 1063 struct btrfs_trans_handle *newtrans; 1064 struct btrfs_root *root; 1065 struct delayed_work work; 1066 }; 1067 1068 static void do_async_commit(struct work_struct *work) 1069 { 1070 struct btrfs_async_commit *ac = 1071 container_of(work, struct btrfs_async_commit, work.work); 1072 1073 btrfs_commit_transaction(ac->newtrans, ac->root); 1074 kfree(ac); 1075 } 1076 1077 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1078 struct btrfs_root *root, 1079 int wait_for_unblock) 1080 { 1081 struct btrfs_async_commit *ac; 1082 struct btrfs_transaction *cur_trans; 1083 1084 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1085 if (!ac) 1086 return -ENOMEM; 1087 1088 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1089 ac->root = root; 1090 ac->newtrans = btrfs_join_transaction(root); 1091 if (IS_ERR(ac->newtrans)) { 1092 int err = PTR_ERR(ac->newtrans); 1093 kfree(ac); 1094 return err; 1095 } 1096 1097 /* take transaction reference */ 1098 cur_trans = trans->transaction; 1099 atomic_inc(&cur_trans->use_count); 1100 1101 btrfs_end_transaction(trans, root); 1102 schedule_delayed_work(&ac->work, 0); 1103 1104 /* wait for transaction to start and unblock */ 1105 if (wait_for_unblock) 1106 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1107 else 1108 wait_current_trans_commit_start(root, cur_trans); 1109 1110 if (current->journal_info == trans) 1111 current->journal_info = NULL; 1112 1113 put_transaction(cur_trans); 1114 return 0; 1115 } 1116 1117 /* 1118 * btrfs_transaction state sequence: 1119 * in_commit = 0, blocked = 0 (initial) 1120 * in_commit = 1, blocked = 1 1121 * blocked = 0 1122 * commit_done = 1 1123 */ 1124 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1125 struct btrfs_root *root) 1126 { 1127 unsigned long joined = 0; 1128 struct btrfs_transaction *cur_trans; 1129 struct btrfs_transaction *prev_trans = NULL; 1130 DEFINE_WAIT(wait); 1131 int ret; 1132 int should_grow = 0; 1133 unsigned long now = get_seconds(); 1134 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1135 1136 btrfs_run_ordered_operations(root, 0); 1137 1138 btrfs_trans_release_metadata(trans, root); 1139 trans->block_rsv = NULL; 1140 1141 /* make a pass through all the delayed refs we have so far 1142 * any runnings procs may add more while we are here 1143 */ 1144 ret = btrfs_run_delayed_refs(trans, root, 0); 1145 BUG_ON(ret); 1146 1147 cur_trans = trans->transaction; 1148 /* 1149 * set the flushing flag so procs in this transaction have to 1150 * start sending their work down. 1151 */ 1152 cur_trans->delayed_refs.flushing = 1; 1153 1154 ret = btrfs_run_delayed_refs(trans, root, 0); 1155 BUG_ON(ret); 1156 1157 spin_lock(&cur_trans->commit_lock); 1158 if (cur_trans->in_commit) { 1159 spin_unlock(&cur_trans->commit_lock); 1160 atomic_inc(&cur_trans->use_count); 1161 btrfs_end_transaction(trans, root); 1162 1163 wait_for_commit(root, cur_trans); 1164 1165 put_transaction(cur_trans); 1166 1167 return 0; 1168 } 1169 1170 trans->transaction->in_commit = 1; 1171 trans->transaction->blocked = 1; 1172 spin_unlock(&cur_trans->commit_lock); 1173 wake_up(&root->fs_info->transaction_blocked_wait); 1174 1175 spin_lock(&root->fs_info->trans_lock); 1176 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1177 prev_trans = list_entry(cur_trans->list.prev, 1178 struct btrfs_transaction, list); 1179 if (!prev_trans->commit_done) { 1180 atomic_inc(&prev_trans->use_count); 1181 spin_unlock(&root->fs_info->trans_lock); 1182 1183 wait_for_commit(root, prev_trans); 1184 1185 put_transaction(prev_trans); 1186 } else { 1187 spin_unlock(&root->fs_info->trans_lock); 1188 } 1189 } else { 1190 spin_unlock(&root->fs_info->trans_lock); 1191 } 1192 1193 if (now < cur_trans->start_time || now - cur_trans->start_time < 1) 1194 should_grow = 1; 1195 1196 do { 1197 int snap_pending = 0; 1198 1199 joined = cur_trans->num_joined; 1200 if (!list_empty(&trans->transaction->pending_snapshots)) 1201 snap_pending = 1; 1202 1203 WARN_ON(cur_trans != trans->transaction); 1204 1205 if (flush_on_commit || snap_pending) { 1206 btrfs_start_delalloc_inodes(root, 1); 1207 ret = btrfs_wait_ordered_extents(root, 0, 1); 1208 BUG_ON(ret); 1209 } 1210 1211 ret = btrfs_run_delayed_items(trans, root); 1212 BUG_ON(ret); 1213 1214 /* 1215 * rename don't use btrfs_join_transaction, so, once we 1216 * set the transaction to blocked above, we aren't going 1217 * to get any new ordered operations. We can safely run 1218 * it here and no for sure that nothing new will be added 1219 * to the list 1220 */ 1221 btrfs_run_ordered_operations(root, 1); 1222 1223 prepare_to_wait(&cur_trans->writer_wait, &wait, 1224 TASK_UNINTERRUPTIBLE); 1225 1226 if (atomic_read(&cur_trans->num_writers) > 1) 1227 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1228 else if (should_grow) 1229 schedule_timeout(1); 1230 1231 finish_wait(&cur_trans->writer_wait, &wait); 1232 } while (atomic_read(&cur_trans->num_writers) > 1 || 1233 (should_grow && cur_trans->num_joined != joined)); 1234 1235 /* 1236 * Ok now we need to make sure to block out any other joins while we 1237 * commit the transaction. We could have started a join before setting 1238 * no_join so make sure to wait for num_writers to == 1 again. 1239 */ 1240 spin_lock(&root->fs_info->trans_lock); 1241 root->fs_info->trans_no_join = 1; 1242 spin_unlock(&root->fs_info->trans_lock); 1243 wait_event(cur_trans->writer_wait, 1244 atomic_read(&cur_trans->num_writers) == 1); 1245 1246 /* 1247 * the reloc mutex makes sure that we stop 1248 * the balancing code from coming in and moving 1249 * extents around in the middle of the commit 1250 */ 1251 mutex_lock(&root->fs_info->reloc_mutex); 1252 1253 ret = btrfs_run_delayed_items(trans, root); 1254 BUG_ON(ret); 1255 1256 ret = create_pending_snapshots(trans, root->fs_info); 1257 BUG_ON(ret); 1258 1259 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1260 BUG_ON(ret); 1261 1262 /* 1263 * make sure none of the code above managed to slip in a 1264 * delayed item 1265 */ 1266 btrfs_assert_delayed_root_empty(root); 1267 1268 WARN_ON(cur_trans != trans->transaction); 1269 1270 btrfs_scrub_pause(root); 1271 /* btrfs_commit_tree_roots is responsible for getting the 1272 * various roots consistent with each other. Every pointer 1273 * in the tree of tree roots has to point to the most up to date 1274 * root for every subvolume and other tree. So, we have to keep 1275 * the tree logging code from jumping in and changing any 1276 * of the trees. 1277 * 1278 * At this point in the commit, there can't be any tree-log 1279 * writers, but a little lower down we drop the trans mutex 1280 * and let new people in. By holding the tree_log_mutex 1281 * from now until after the super is written, we avoid races 1282 * with the tree-log code. 1283 */ 1284 mutex_lock(&root->fs_info->tree_log_mutex); 1285 1286 ret = commit_fs_roots(trans, root); 1287 BUG_ON(ret); 1288 1289 /* commit_fs_roots gets rid of all the tree log roots, it is now 1290 * safe to free the root of tree log roots 1291 */ 1292 btrfs_free_log_root_tree(trans, root->fs_info); 1293 1294 ret = commit_cowonly_roots(trans, root); 1295 BUG_ON(ret); 1296 1297 btrfs_prepare_extent_commit(trans, root); 1298 1299 cur_trans = root->fs_info->running_transaction; 1300 1301 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1302 root->fs_info->tree_root->node); 1303 switch_commit_root(root->fs_info->tree_root); 1304 1305 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1306 root->fs_info->chunk_root->node); 1307 switch_commit_root(root->fs_info->chunk_root); 1308 1309 update_super_roots(root); 1310 1311 if (!root->fs_info->log_root_recovering) { 1312 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1313 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 1314 } 1315 1316 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1317 sizeof(*root->fs_info->super_copy)); 1318 1319 trans->transaction->blocked = 0; 1320 spin_lock(&root->fs_info->trans_lock); 1321 root->fs_info->running_transaction = NULL; 1322 root->fs_info->trans_no_join = 0; 1323 spin_unlock(&root->fs_info->trans_lock); 1324 mutex_unlock(&root->fs_info->reloc_mutex); 1325 1326 wake_up(&root->fs_info->transaction_wait); 1327 1328 ret = btrfs_write_and_wait_transaction(trans, root); 1329 BUG_ON(ret); 1330 write_ctree_super(trans, root, 0); 1331 1332 /* 1333 * the super is written, we can safely allow the tree-loggers 1334 * to go about their business 1335 */ 1336 mutex_unlock(&root->fs_info->tree_log_mutex); 1337 1338 btrfs_finish_extent_commit(trans, root); 1339 1340 cur_trans->commit_done = 1; 1341 1342 root->fs_info->last_trans_committed = cur_trans->transid; 1343 1344 wake_up(&cur_trans->commit_wait); 1345 1346 spin_lock(&root->fs_info->trans_lock); 1347 list_del_init(&cur_trans->list); 1348 spin_unlock(&root->fs_info->trans_lock); 1349 1350 put_transaction(cur_trans); 1351 put_transaction(cur_trans); 1352 1353 trace_btrfs_transaction_commit(root); 1354 1355 btrfs_scrub_continue(root); 1356 1357 if (current->journal_info == trans) 1358 current->journal_info = NULL; 1359 1360 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1361 1362 if (current != root->fs_info->transaction_kthread) 1363 btrfs_run_delayed_iputs(root); 1364 1365 return ret; 1366 } 1367 1368 /* 1369 * interface function to delete all the snapshots we have scheduled for deletion 1370 */ 1371 int btrfs_clean_old_snapshots(struct btrfs_root *root) 1372 { 1373 LIST_HEAD(list); 1374 struct btrfs_fs_info *fs_info = root->fs_info; 1375 1376 spin_lock(&fs_info->trans_lock); 1377 list_splice_init(&fs_info->dead_roots, &list); 1378 spin_unlock(&fs_info->trans_lock); 1379 1380 while (!list_empty(&list)) { 1381 root = list_entry(list.next, struct btrfs_root, root_list); 1382 list_del(&root->root_list); 1383 1384 btrfs_kill_all_delayed_nodes(root); 1385 1386 if (btrfs_header_backref_rev(root->node) < 1387 BTRFS_MIXED_BACKREF_REV) 1388 btrfs_drop_snapshot(root, NULL, 0); 1389 else 1390 btrfs_drop_snapshot(root, NULL, 1); 1391 } 1392 return 0; 1393 } 1394