1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "locking.h" 29 #include "tree-log.h" 30 #include "inode-map.h" 31 32 #define BTRFS_ROOT_TRANS_TAG 0 33 34 static noinline void put_transaction(struct btrfs_transaction *transaction) 35 { 36 WARN_ON(atomic_read(&transaction->use_count) == 0); 37 if (atomic_dec_and_test(&transaction->use_count)) { 38 BUG_ON(!list_empty(&transaction->list)); 39 WARN_ON(transaction->delayed_refs.root.rb_node); 40 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head)); 41 memset(transaction, 0, sizeof(*transaction)); 42 kmem_cache_free(btrfs_transaction_cachep, transaction); 43 } 44 } 45 46 static noinline void switch_commit_root(struct btrfs_root *root) 47 { 48 free_extent_buffer(root->commit_root); 49 root->commit_root = btrfs_root_node(root); 50 } 51 52 /* 53 * either allocate a new transaction or hop into the existing one 54 */ 55 static noinline int join_transaction(struct btrfs_root *root, int nofail) 56 { 57 struct btrfs_transaction *cur_trans; 58 59 spin_lock(&root->fs_info->trans_lock); 60 loop: 61 if (root->fs_info->trans_no_join) { 62 if (!nofail) { 63 spin_unlock(&root->fs_info->trans_lock); 64 return -EBUSY; 65 } 66 } 67 68 cur_trans = root->fs_info->running_transaction; 69 if (cur_trans) { 70 atomic_inc(&cur_trans->use_count); 71 atomic_inc(&cur_trans->num_writers); 72 cur_trans->num_joined++; 73 spin_unlock(&root->fs_info->trans_lock); 74 return 0; 75 } 76 spin_unlock(&root->fs_info->trans_lock); 77 78 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 79 if (!cur_trans) 80 return -ENOMEM; 81 82 spin_lock(&root->fs_info->trans_lock); 83 if (root->fs_info->running_transaction) { 84 /* 85 * someone started a transaction after we unlocked. Make sure 86 * to redo the trans_no_join checks above 87 */ 88 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 89 cur_trans = root->fs_info->running_transaction; 90 goto loop; 91 } 92 93 atomic_set(&cur_trans->num_writers, 1); 94 cur_trans->num_joined = 0; 95 init_waitqueue_head(&cur_trans->writer_wait); 96 init_waitqueue_head(&cur_trans->commit_wait); 97 cur_trans->in_commit = 0; 98 cur_trans->blocked = 0; 99 /* 100 * One for this trans handle, one so it will live on until we 101 * commit the transaction. 102 */ 103 atomic_set(&cur_trans->use_count, 2); 104 cur_trans->commit_done = 0; 105 cur_trans->start_time = get_seconds(); 106 107 cur_trans->delayed_refs.root = RB_ROOT; 108 cur_trans->delayed_refs.num_entries = 0; 109 cur_trans->delayed_refs.num_heads_ready = 0; 110 cur_trans->delayed_refs.num_heads = 0; 111 cur_trans->delayed_refs.flushing = 0; 112 cur_trans->delayed_refs.run_delayed_start = 0; 113 cur_trans->delayed_refs.seq = 1; 114 init_waitqueue_head(&cur_trans->delayed_refs.seq_wait); 115 spin_lock_init(&cur_trans->commit_lock); 116 spin_lock_init(&cur_trans->delayed_refs.lock); 117 INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head); 118 119 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 120 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 121 extent_io_tree_init(&cur_trans->dirty_pages, 122 root->fs_info->btree_inode->i_mapping); 123 root->fs_info->generation++; 124 cur_trans->transid = root->fs_info->generation; 125 root->fs_info->running_transaction = cur_trans; 126 spin_unlock(&root->fs_info->trans_lock); 127 128 return 0; 129 } 130 131 /* 132 * this does all the record keeping required to make sure that a reference 133 * counted root is properly recorded in a given transaction. This is required 134 * to make sure the old root from before we joined the transaction is deleted 135 * when the transaction commits 136 */ 137 static int record_root_in_trans(struct btrfs_trans_handle *trans, 138 struct btrfs_root *root) 139 { 140 if (root->ref_cows && root->last_trans < trans->transid) { 141 WARN_ON(root == root->fs_info->extent_root); 142 WARN_ON(root->commit_root != root->node); 143 144 /* 145 * see below for in_trans_setup usage rules 146 * we have the reloc mutex held now, so there 147 * is only one writer in this function 148 */ 149 root->in_trans_setup = 1; 150 151 /* make sure readers find in_trans_setup before 152 * they find our root->last_trans update 153 */ 154 smp_wmb(); 155 156 spin_lock(&root->fs_info->fs_roots_radix_lock); 157 if (root->last_trans == trans->transid) { 158 spin_unlock(&root->fs_info->fs_roots_radix_lock); 159 return 0; 160 } 161 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 162 (unsigned long)root->root_key.objectid, 163 BTRFS_ROOT_TRANS_TAG); 164 spin_unlock(&root->fs_info->fs_roots_radix_lock); 165 root->last_trans = trans->transid; 166 167 /* this is pretty tricky. We don't want to 168 * take the relocation lock in btrfs_record_root_in_trans 169 * unless we're really doing the first setup for this root in 170 * this transaction. 171 * 172 * Normally we'd use root->last_trans as a flag to decide 173 * if we want to take the expensive mutex. 174 * 175 * But, we have to set root->last_trans before we 176 * init the relocation root, otherwise, we trip over warnings 177 * in ctree.c. The solution used here is to flag ourselves 178 * with root->in_trans_setup. When this is 1, we're still 179 * fixing up the reloc trees and everyone must wait. 180 * 181 * When this is zero, they can trust root->last_trans and fly 182 * through btrfs_record_root_in_trans without having to take the 183 * lock. smp_wmb() makes sure that all the writes above are 184 * done before we pop in the zero below 185 */ 186 btrfs_init_reloc_root(trans, root); 187 smp_wmb(); 188 root->in_trans_setup = 0; 189 } 190 return 0; 191 } 192 193 194 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 195 struct btrfs_root *root) 196 { 197 if (!root->ref_cows) 198 return 0; 199 200 /* 201 * see record_root_in_trans for comments about in_trans_setup usage 202 * and barriers 203 */ 204 smp_rmb(); 205 if (root->last_trans == trans->transid && 206 !root->in_trans_setup) 207 return 0; 208 209 mutex_lock(&root->fs_info->reloc_mutex); 210 record_root_in_trans(trans, root); 211 mutex_unlock(&root->fs_info->reloc_mutex); 212 213 return 0; 214 } 215 216 /* wait for commit against the current transaction to become unblocked 217 * when this is done, it is safe to start a new transaction, but the current 218 * transaction might not be fully on disk. 219 */ 220 static void wait_current_trans(struct btrfs_root *root) 221 { 222 struct btrfs_transaction *cur_trans; 223 224 spin_lock(&root->fs_info->trans_lock); 225 cur_trans = root->fs_info->running_transaction; 226 if (cur_trans && cur_trans->blocked) { 227 atomic_inc(&cur_trans->use_count); 228 spin_unlock(&root->fs_info->trans_lock); 229 230 wait_event(root->fs_info->transaction_wait, 231 !cur_trans->blocked); 232 put_transaction(cur_trans); 233 } else { 234 spin_unlock(&root->fs_info->trans_lock); 235 } 236 } 237 238 enum btrfs_trans_type { 239 TRANS_START, 240 TRANS_JOIN, 241 TRANS_USERSPACE, 242 TRANS_JOIN_NOLOCK, 243 }; 244 245 static int may_wait_transaction(struct btrfs_root *root, int type) 246 { 247 if (root->fs_info->log_root_recovering) 248 return 0; 249 250 if (type == TRANS_USERSPACE) 251 return 1; 252 253 if (type == TRANS_START && 254 !atomic_read(&root->fs_info->open_ioctl_trans)) 255 return 1; 256 257 return 0; 258 } 259 260 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 261 u64 num_items, int type) 262 { 263 struct btrfs_trans_handle *h; 264 struct btrfs_transaction *cur_trans; 265 u64 num_bytes = 0; 266 int ret; 267 268 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 269 return ERR_PTR(-EROFS); 270 271 if (current->journal_info) { 272 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); 273 h = current->journal_info; 274 h->use_count++; 275 h->orig_rsv = h->block_rsv; 276 h->block_rsv = NULL; 277 goto got_it; 278 } 279 280 /* 281 * Do the reservation before we join the transaction so we can do all 282 * the appropriate flushing if need be. 283 */ 284 if (num_items > 0 && root != root->fs_info->chunk_root) { 285 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 286 ret = btrfs_block_rsv_add(root, 287 &root->fs_info->trans_block_rsv, 288 num_bytes); 289 if (ret) 290 return ERR_PTR(ret); 291 } 292 again: 293 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 294 if (!h) 295 return ERR_PTR(-ENOMEM); 296 297 if (may_wait_transaction(root, type)) 298 wait_current_trans(root); 299 300 do { 301 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); 302 if (ret == -EBUSY) 303 wait_current_trans(root); 304 } while (ret == -EBUSY); 305 306 if (ret < 0) { 307 kmem_cache_free(btrfs_trans_handle_cachep, h); 308 return ERR_PTR(ret); 309 } 310 311 cur_trans = root->fs_info->running_transaction; 312 313 h->transid = cur_trans->transid; 314 h->transaction = cur_trans; 315 h->blocks_used = 0; 316 h->bytes_reserved = 0; 317 h->delayed_ref_updates = 0; 318 h->use_count = 1; 319 h->block_rsv = NULL; 320 h->orig_rsv = NULL; 321 322 smp_mb(); 323 if (cur_trans->blocked && may_wait_transaction(root, type)) { 324 btrfs_commit_transaction(h, root); 325 goto again; 326 } 327 328 if (num_bytes) { 329 trace_btrfs_space_reservation(root->fs_info, "transaction", 330 (u64)(unsigned long)h, 331 num_bytes, 1); 332 h->block_rsv = &root->fs_info->trans_block_rsv; 333 h->bytes_reserved = num_bytes; 334 } 335 336 got_it: 337 btrfs_record_root_in_trans(h, root); 338 339 if (!current->journal_info && type != TRANS_USERSPACE) 340 current->journal_info = h; 341 return h; 342 } 343 344 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 345 int num_items) 346 { 347 return start_transaction(root, num_items, TRANS_START); 348 } 349 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 350 { 351 return start_transaction(root, 0, TRANS_JOIN); 352 } 353 354 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 355 { 356 return start_transaction(root, 0, TRANS_JOIN_NOLOCK); 357 } 358 359 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 360 { 361 return start_transaction(root, 0, TRANS_USERSPACE); 362 } 363 364 /* wait for a transaction commit to be fully complete */ 365 static noinline void wait_for_commit(struct btrfs_root *root, 366 struct btrfs_transaction *commit) 367 { 368 wait_event(commit->commit_wait, commit->commit_done); 369 } 370 371 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 372 { 373 struct btrfs_transaction *cur_trans = NULL, *t; 374 int ret; 375 376 ret = 0; 377 if (transid) { 378 if (transid <= root->fs_info->last_trans_committed) 379 goto out; 380 381 /* find specified transaction */ 382 spin_lock(&root->fs_info->trans_lock); 383 list_for_each_entry(t, &root->fs_info->trans_list, list) { 384 if (t->transid == transid) { 385 cur_trans = t; 386 atomic_inc(&cur_trans->use_count); 387 break; 388 } 389 if (t->transid > transid) 390 break; 391 } 392 spin_unlock(&root->fs_info->trans_lock); 393 ret = -EINVAL; 394 if (!cur_trans) 395 goto out; /* bad transid */ 396 } else { 397 /* find newest transaction that is committing | committed */ 398 spin_lock(&root->fs_info->trans_lock); 399 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 400 list) { 401 if (t->in_commit) { 402 if (t->commit_done) 403 break; 404 cur_trans = t; 405 atomic_inc(&cur_trans->use_count); 406 break; 407 } 408 } 409 spin_unlock(&root->fs_info->trans_lock); 410 if (!cur_trans) 411 goto out; /* nothing committing|committed */ 412 } 413 414 wait_for_commit(root, cur_trans); 415 416 put_transaction(cur_trans); 417 ret = 0; 418 out: 419 return ret; 420 } 421 422 void btrfs_throttle(struct btrfs_root *root) 423 { 424 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 425 wait_current_trans(root); 426 } 427 428 static int should_end_transaction(struct btrfs_trans_handle *trans, 429 struct btrfs_root *root) 430 { 431 int ret; 432 433 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 434 return ret ? 1 : 0; 435 } 436 437 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 438 struct btrfs_root *root) 439 { 440 struct btrfs_transaction *cur_trans = trans->transaction; 441 struct btrfs_block_rsv *rsv = trans->block_rsv; 442 int updates; 443 444 smp_mb(); 445 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 446 return 1; 447 448 /* 449 * We need to do this in case we're deleting csums so the global block 450 * rsv get's used instead of the csum block rsv. 451 */ 452 trans->block_rsv = NULL; 453 454 updates = trans->delayed_ref_updates; 455 trans->delayed_ref_updates = 0; 456 if (updates) 457 btrfs_run_delayed_refs(trans, root, updates); 458 459 trans->block_rsv = rsv; 460 461 return should_end_transaction(trans, root); 462 } 463 464 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 465 struct btrfs_root *root, int throttle, int lock) 466 { 467 struct btrfs_transaction *cur_trans = trans->transaction; 468 struct btrfs_fs_info *info = root->fs_info; 469 int count = 0; 470 471 if (--trans->use_count) { 472 trans->block_rsv = trans->orig_rsv; 473 return 0; 474 } 475 476 btrfs_trans_release_metadata(trans, root); 477 trans->block_rsv = NULL; 478 while (count < 2) { 479 unsigned long cur = trans->delayed_ref_updates; 480 trans->delayed_ref_updates = 0; 481 if (cur && 482 trans->transaction->delayed_refs.num_heads_ready > 64) { 483 trans->delayed_ref_updates = 0; 484 btrfs_run_delayed_refs(trans, root, cur); 485 } else { 486 break; 487 } 488 count++; 489 } 490 491 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 492 should_end_transaction(trans, root)) { 493 trans->transaction->blocked = 1; 494 smp_wmb(); 495 } 496 497 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 498 if (throttle) { 499 /* 500 * We may race with somebody else here so end up having 501 * to call end_transaction on ourselves again, so inc 502 * our use_count. 503 */ 504 trans->use_count++; 505 return btrfs_commit_transaction(trans, root); 506 } else { 507 wake_up_process(info->transaction_kthread); 508 } 509 } 510 511 WARN_ON(cur_trans != info->running_transaction); 512 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 513 atomic_dec(&cur_trans->num_writers); 514 515 smp_mb(); 516 if (waitqueue_active(&cur_trans->writer_wait)) 517 wake_up(&cur_trans->writer_wait); 518 put_transaction(cur_trans); 519 520 if (current->journal_info == trans) 521 current->journal_info = NULL; 522 memset(trans, 0, sizeof(*trans)); 523 kmem_cache_free(btrfs_trans_handle_cachep, trans); 524 525 if (throttle) 526 btrfs_run_delayed_iputs(root); 527 528 return 0; 529 } 530 531 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 532 struct btrfs_root *root) 533 { 534 int ret; 535 536 ret = __btrfs_end_transaction(trans, root, 0, 1); 537 if (ret) 538 return ret; 539 return 0; 540 } 541 542 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 543 struct btrfs_root *root) 544 { 545 int ret; 546 547 ret = __btrfs_end_transaction(trans, root, 1, 1); 548 if (ret) 549 return ret; 550 return 0; 551 } 552 553 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, 554 struct btrfs_root *root) 555 { 556 int ret; 557 558 ret = __btrfs_end_transaction(trans, root, 0, 0); 559 if (ret) 560 return ret; 561 return 0; 562 } 563 564 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, 565 struct btrfs_root *root) 566 { 567 return __btrfs_end_transaction(trans, root, 1, 1); 568 } 569 570 /* 571 * when btree blocks are allocated, they have some corresponding bits set for 572 * them in one of two extent_io trees. This is used to make sure all of 573 * those extents are sent to disk but does not wait on them 574 */ 575 int btrfs_write_marked_extents(struct btrfs_root *root, 576 struct extent_io_tree *dirty_pages, int mark) 577 { 578 int err = 0; 579 int werr = 0; 580 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 581 u64 start = 0; 582 u64 end; 583 584 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 585 mark)) { 586 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark, 587 GFP_NOFS); 588 err = filemap_fdatawrite_range(mapping, start, end); 589 if (err) 590 werr = err; 591 cond_resched(); 592 start = end + 1; 593 } 594 if (err) 595 werr = err; 596 return werr; 597 } 598 599 /* 600 * when btree blocks are allocated, they have some corresponding bits set for 601 * them in one of two extent_io trees. This is used to make sure all of 602 * those extents are on disk for transaction or log commit. We wait 603 * on all the pages and clear them from the dirty pages state tree 604 */ 605 int btrfs_wait_marked_extents(struct btrfs_root *root, 606 struct extent_io_tree *dirty_pages, int mark) 607 { 608 int err = 0; 609 int werr = 0; 610 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 611 u64 start = 0; 612 u64 end; 613 614 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 615 EXTENT_NEED_WAIT)) { 616 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS); 617 err = filemap_fdatawait_range(mapping, start, end); 618 if (err) 619 werr = err; 620 cond_resched(); 621 start = end + 1; 622 } 623 if (err) 624 werr = err; 625 return werr; 626 } 627 628 /* 629 * when btree blocks are allocated, they have some corresponding bits set for 630 * them in one of two extent_io trees. This is used to make sure all of 631 * those extents are on disk for transaction or log commit 632 */ 633 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 634 struct extent_io_tree *dirty_pages, int mark) 635 { 636 int ret; 637 int ret2; 638 639 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 640 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 641 642 if (ret) 643 return ret; 644 if (ret2) 645 return ret2; 646 return 0; 647 } 648 649 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 650 struct btrfs_root *root) 651 { 652 if (!trans || !trans->transaction) { 653 struct inode *btree_inode; 654 btree_inode = root->fs_info->btree_inode; 655 return filemap_write_and_wait(btree_inode->i_mapping); 656 } 657 return btrfs_write_and_wait_marked_extents(root, 658 &trans->transaction->dirty_pages, 659 EXTENT_DIRTY); 660 } 661 662 /* 663 * this is used to update the root pointer in the tree of tree roots. 664 * 665 * But, in the case of the extent allocation tree, updating the root 666 * pointer may allocate blocks which may change the root of the extent 667 * allocation tree. 668 * 669 * So, this loops and repeats and makes sure the cowonly root didn't 670 * change while the root pointer was being updated in the metadata. 671 */ 672 static int update_cowonly_root(struct btrfs_trans_handle *trans, 673 struct btrfs_root *root) 674 { 675 int ret; 676 u64 old_root_bytenr; 677 u64 old_root_used; 678 struct btrfs_root *tree_root = root->fs_info->tree_root; 679 680 old_root_used = btrfs_root_used(&root->root_item); 681 btrfs_write_dirty_block_groups(trans, root); 682 683 while (1) { 684 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 685 if (old_root_bytenr == root->node->start && 686 old_root_used == btrfs_root_used(&root->root_item)) 687 break; 688 689 btrfs_set_root_node(&root->root_item, root->node); 690 ret = btrfs_update_root(trans, tree_root, 691 &root->root_key, 692 &root->root_item); 693 BUG_ON(ret); 694 695 old_root_used = btrfs_root_used(&root->root_item); 696 ret = btrfs_write_dirty_block_groups(trans, root); 697 BUG_ON(ret); 698 } 699 700 if (root != root->fs_info->extent_root) 701 switch_commit_root(root); 702 703 return 0; 704 } 705 706 /* 707 * update all the cowonly tree roots on disk 708 */ 709 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 710 struct btrfs_root *root) 711 { 712 struct btrfs_fs_info *fs_info = root->fs_info; 713 struct list_head *next; 714 struct extent_buffer *eb; 715 int ret; 716 717 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 718 BUG_ON(ret); 719 720 eb = btrfs_lock_root_node(fs_info->tree_root); 721 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); 722 btrfs_tree_unlock(eb); 723 free_extent_buffer(eb); 724 725 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 726 BUG_ON(ret); 727 728 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 729 next = fs_info->dirty_cowonly_roots.next; 730 list_del_init(next); 731 root = list_entry(next, struct btrfs_root, dirty_list); 732 733 update_cowonly_root(trans, root); 734 } 735 736 down_write(&fs_info->extent_commit_sem); 737 switch_commit_root(fs_info->extent_root); 738 up_write(&fs_info->extent_commit_sem); 739 740 return 0; 741 } 742 743 /* 744 * dead roots are old snapshots that need to be deleted. This allocates 745 * a dirty root struct and adds it into the list of dead roots that need to 746 * be deleted 747 */ 748 int btrfs_add_dead_root(struct btrfs_root *root) 749 { 750 spin_lock(&root->fs_info->trans_lock); 751 list_add(&root->root_list, &root->fs_info->dead_roots); 752 spin_unlock(&root->fs_info->trans_lock); 753 return 0; 754 } 755 756 /* 757 * update all the cowonly tree roots on disk 758 */ 759 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 760 struct btrfs_root *root) 761 { 762 struct btrfs_root *gang[8]; 763 struct btrfs_fs_info *fs_info = root->fs_info; 764 int i; 765 int ret; 766 int err = 0; 767 768 spin_lock(&fs_info->fs_roots_radix_lock); 769 while (1) { 770 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 771 (void **)gang, 0, 772 ARRAY_SIZE(gang), 773 BTRFS_ROOT_TRANS_TAG); 774 if (ret == 0) 775 break; 776 for (i = 0; i < ret; i++) { 777 root = gang[i]; 778 radix_tree_tag_clear(&fs_info->fs_roots_radix, 779 (unsigned long)root->root_key.objectid, 780 BTRFS_ROOT_TRANS_TAG); 781 spin_unlock(&fs_info->fs_roots_radix_lock); 782 783 btrfs_free_log(trans, root); 784 btrfs_update_reloc_root(trans, root); 785 btrfs_orphan_commit_root(trans, root); 786 787 btrfs_save_ino_cache(root, trans); 788 789 /* see comments in should_cow_block() */ 790 root->force_cow = 0; 791 smp_wmb(); 792 793 if (root->commit_root != root->node) { 794 mutex_lock(&root->fs_commit_mutex); 795 switch_commit_root(root); 796 btrfs_unpin_free_ino(root); 797 mutex_unlock(&root->fs_commit_mutex); 798 799 btrfs_set_root_node(&root->root_item, 800 root->node); 801 } 802 803 err = btrfs_update_root(trans, fs_info->tree_root, 804 &root->root_key, 805 &root->root_item); 806 spin_lock(&fs_info->fs_roots_radix_lock); 807 if (err) 808 break; 809 } 810 } 811 spin_unlock(&fs_info->fs_roots_radix_lock); 812 return err; 813 } 814 815 /* 816 * defrag a given btree. If cacheonly == 1, this won't read from the disk, 817 * otherwise every leaf in the btree is read and defragged. 818 */ 819 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) 820 { 821 struct btrfs_fs_info *info = root->fs_info; 822 struct btrfs_trans_handle *trans; 823 int ret; 824 unsigned long nr; 825 826 if (xchg(&root->defrag_running, 1)) 827 return 0; 828 829 while (1) { 830 trans = btrfs_start_transaction(root, 0); 831 if (IS_ERR(trans)) 832 return PTR_ERR(trans); 833 834 ret = btrfs_defrag_leaves(trans, root, cacheonly); 835 836 nr = trans->blocks_used; 837 btrfs_end_transaction(trans, root); 838 btrfs_btree_balance_dirty(info->tree_root, nr); 839 cond_resched(); 840 841 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 842 break; 843 } 844 root->defrag_running = 0; 845 return ret; 846 } 847 848 /* 849 * new snapshots need to be created at a very specific time in the 850 * transaction commit. This does the actual creation 851 */ 852 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 853 struct btrfs_fs_info *fs_info, 854 struct btrfs_pending_snapshot *pending) 855 { 856 struct btrfs_key key; 857 struct btrfs_root_item *new_root_item; 858 struct btrfs_root *tree_root = fs_info->tree_root; 859 struct btrfs_root *root = pending->root; 860 struct btrfs_root *parent_root; 861 struct btrfs_block_rsv *rsv; 862 struct inode *parent_inode; 863 struct dentry *parent; 864 struct dentry *dentry; 865 struct extent_buffer *tmp; 866 struct extent_buffer *old; 867 int ret; 868 u64 to_reserve = 0; 869 u64 index = 0; 870 u64 objectid; 871 u64 root_flags; 872 873 rsv = trans->block_rsv; 874 875 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 876 if (!new_root_item) { 877 pending->error = -ENOMEM; 878 goto fail; 879 } 880 881 ret = btrfs_find_free_objectid(tree_root, &objectid); 882 if (ret) { 883 pending->error = ret; 884 goto fail; 885 } 886 887 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 888 889 if (to_reserve > 0) { 890 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv, 891 to_reserve); 892 if (ret) { 893 pending->error = ret; 894 goto fail; 895 } 896 } 897 898 key.objectid = objectid; 899 key.offset = (u64)-1; 900 key.type = BTRFS_ROOT_ITEM_KEY; 901 902 trans->block_rsv = &pending->block_rsv; 903 904 dentry = pending->dentry; 905 parent = dget_parent(dentry); 906 parent_inode = parent->d_inode; 907 parent_root = BTRFS_I(parent_inode)->root; 908 record_root_in_trans(trans, parent_root); 909 910 /* 911 * insert the directory item 912 */ 913 ret = btrfs_set_inode_index(parent_inode, &index); 914 BUG_ON(ret); 915 ret = btrfs_insert_dir_item(trans, parent_root, 916 dentry->d_name.name, dentry->d_name.len, 917 parent_inode, &key, 918 BTRFS_FT_DIR, index); 919 if (ret) { 920 pending->error = -EEXIST; 921 dput(parent); 922 goto fail; 923 } 924 925 btrfs_i_size_write(parent_inode, parent_inode->i_size + 926 dentry->d_name.len * 2); 927 ret = btrfs_update_inode(trans, parent_root, parent_inode); 928 BUG_ON(ret); 929 930 /* 931 * pull in the delayed directory update 932 * and the delayed inode item 933 * otherwise we corrupt the FS during 934 * snapshot 935 */ 936 ret = btrfs_run_delayed_items(trans, root); 937 BUG_ON(ret); 938 939 record_root_in_trans(trans, root); 940 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 941 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 942 btrfs_check_and_init_root_item(new_root_item); 943 944 root_flags = btrfs_root_flags(new_root_item); 945 if (pending->readonly) 946 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 947 else 948 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 949 btrfs_set_root_flags(new_root_item, root_flags); 950 951 old = btrfs_lock_root_node(root); 952 btrfs_cow_block(trans, root, old, NULL, 0, &old); 953 btrfs_set_lock_blocking(old); 954 955 btrfs_copy_root(trans, root, old, &tmp, objectid); 956 btrfs_tree_unlock(old); 957 free_extent_buffer(old); 958 959 /* see comments in should_cow_block() */ 960 root->force_cow = 1; 961 smp_wmb(); 962 963 btrfs_set_root_node(new_root_item, tmp); 964 /* record when the snapshot was created in key.offset */ 965 key.offset = trans->transid; 966 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 967 btrfs_tree_unlock(tmp); 968 free_extent_buffer(tmp); 969 BUG_ON(ret); 970 971 /* 972 * insert root back/forward references 973 */ 974 ret = btrfs_add_root_ref(trans, tree_root, objectid, 975 parent_root->root_key.objectid, 976 btrfs_ino(parent_inode), index, 977 dentry->d_name.name, dentry->d_name.len); 978 BUG_ON(ret); 979 dput(parent); 980 981 key.offset = (u64)-1; 982 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 983 BUG_ON(IS_ERR(pending->snap)); 984 985 btrfs_reloc_post_snapshot(trans, pending); 986 fail: 987 kfree(new_root_item); 988 trans->block_rsv = rsv; 989 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 990 return 0; 991 } 992 993 /* 994 * create all the snapshots we've scheduled for creation 995 */ 996 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 997 struct btrfs_fs_info *fs_info) 998 { 999 struct btrfs_pending_snapshot *pending; 1000 struct list_head *head = &trans->transaction->pending_snapshots; 1001 1002 list_for_each_entry(pending, head, list) 1003 create_pending_snapshot(trans, fs_info, pending); 1004 return 0; 1005 } 1006 1007 static void update_super_roots(struct btrfs_root *root) 1008 { 1009 struct btrfs_root_item *root_item; 1010 struct btrfs_super_block *super; 1011 1012 super = root->fs_info->super_copy; 1013 1014 root_item = &root->fs_info->chunk_root->root_item; 1015 super->chunk_root = root_item->bytenr; 1016 super->chunk_root_generation = root_item->generation; 1017 super->chunk_root_level = root_item->level; 1018 1019 root_item = &root->fs_info->tree_root->root_item; 1020 super->root = root_item->bytenr; 1021 super->generation = root_item->generation; 1022 super->root_level = root_item->level; 1023 if (btrfs_test_opt(root, SPACE_CACHE)) 1024 super->cache_generation = root_item->generation; 1025 } 1026 1027 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1028 { 1029 int ret = 0; 1030 spin_lock(&info->trans_lock); 1031 if (info->running_transaction) 1032 ret = info->running_transaction->in_commit; 1033 spin_unlock(&info->trans_lock); 1034 return ret; 1035 } 1036 1037 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1038 { 1039 int ret = 0; 1040 spin_lock(&info->trans_lock); 1041 if (info->running_transaction) 1042 ret = info->running_transaction->blocked; 1043 spin_unlock(&info->trans_lock); 1044 return ret; 1045 } 1046 1047 /* 1048 * wait for the current transaction commit to start and block subsequent 1049 * transaction joins 1050 */ 1051 static void wait_current_trans_commit_start(struct btrfs_root *root, 1052 struct btrfs_transaction *trans) 1053 { 1054 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit); 1055 } 1056 1057 /* 1058 * wait for the current transaction to start and then become unblocked. 1059 * caller holds ref. 1060 */ 1061 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1062 struct btrfs_transaction *trans) 1063 { 1064 wait_event(root->fs_info->transaction_wait, 1065 trans->commit_done || (trans->in_commit && !trans->blocked)); 1066 } 1067 1068 /* 1069 * commit transactions asynchronously. once btrfs_commit_transaction_async 1070 * returns, any subsequent transaction will not be allowed to join. 1071 */ 1072 struct btrfs_async_commit { 1073 struct btrfs_trans_handle *newtrans; 1074 struct btrfs_root *root; 1075 struct delayed_work work; 1076 }; 1077 1078 static void do_async_commit(struct work_struct *work) 1079 { 1080 struct btrfs_async_commit *ac = 1081 container_of(work, struct btrfs_async_commit, work.work); 1082 1083 btrfs_commit_transaction(ac->newtrans, ac->root); 1084 kfree(ac); 1085 } 1086 1087 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1088 struct btrfs_root *root, 1089 int wait_for_unblock) 1090 { 1091 struct btrfs_async_commit *ac; 1092 struct btrfs_transaction *cur_trans; 1093 1094 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1095 if (!ac) 1096 return -ENOMEM; 1097 1098 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1099 ac->root = root; 1100 ac->newtrans = btrfs_join_transaction(root); 1101 if (IS_ERR(ac->newtrans)) { 1102 int err = PTR_ERR(ac->newtrans); 1103 kfree(ac); 1104 return err; 1105 } 1106 1107 /* take transaction reference */ 1108 cur_trans = trans->transaction; 1109 atomic_inc(&cur_trans->use_count); 1110 1111 btrfs_end_transaction(trans, root); 1112 schedule_delayed_work(&ac->work, 0); 1113 1114 /* wait for transaction to start and unblock */ 1115 if (wait_for_unblock) 1116 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1117 else 1118 wait_current_trans_commit_start(root, cur_trans); 1119 1120 if (current->journal_info == trans) 1121 current->journal_info = NULL; 1122 1123 put_transaction(cur_trans); 1124 return 0; 1125 } 1126 1127 /* 1128 * btrfs_transaction state sequence: 1129 * in_commit = 0, blocked = 0 (initial) 1130 * in_commit = 1, blocked = 1 1131 * blocked = 0 1132 * commit_done = 1 1133 */ 1134 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1135 struct btrfs_root *root) 1136 { 1137 unsigned long joined = 0; 1138 struct btrfs_transaction *cur_trans; 1139 struct btrfs_transaction *prev_trans = NULL; 1140 DEFINE_WAIT(wait); 1141 int ret; 1142 int should_grow = 0; 1143 unsigned long now = get_seconds(); 1144 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1145 1146 btrfs_run_ordered_operations(root, 0); 1147 1148 btrfs_trans_release_metadata(trans, root); 1149 trans->block_rsv = NULL; 1150 1151 /* make a pass through all the delayed refs we have so far 1152 * any runnings procs may add more while we are here 1153 */ 1154 ret = btrfs_run_delayed_refs(trans, root, 0); 1155 BUG_ON(ret); 1156 1157 cur_trans = trans->transaction; 1158 /* 1159 * set the flushing flag so procs in this transaction have to 1160 * start sending their work down. 1161 */ 1162 cur_trans->delayed_refs.flushing = 1; 1163 1164 ret = btrfs_run_delayed_refs(trans, root, 0); 1165 BUG_ON(ret); 1166 1167 spin_lock(&cur_trans->commit_lock); 1168 if (cur_trans->in_commit) { 1169 spin_unlock(&cur_trans->commit_lock); 1170 atomic_inc(&cur_trans->use_count); 1171 btrfs_end_transaction(trans, root); 1172 1173 wait_for_commit(root, cur_trans); 1174 1175 put_transaction(cur_trans); 1176 1177 return 0; 1178 } 1179 1180 trans->transaction->in_commit = 1; 1181 trans->transaction->blocked = 1; 1182 spin_unlock(&cur_trans->commit_lock); 1183 wake_up(&root->fs_info->transaction_blocked_wait); 1184 1185 spin_lock(&root->fs_info->trans_lock); 1186 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1187 prev_trans = list_entry(cur_trans->list.prev, 1188 struct btrfs_transaction, list); 1189 if (!prev_trans->commit_done) { 1190 atomic_inc(&prev_trans->use_count); 1191 spin_unlock(&root->fs_info->trans_lock); 1192 1193 wait_for_commit(root, prev_trans); 1194 1195 put_transaction(prev_trans); 1196 } else { 1197 spin_unlock(&root->fs_info->trans_lock); 1198 } 1199 } else { 1200 spin_unlock(&root->fs_info->trans_lock); 1201 } 1202 1203 if (now < cur_trans->start_time || now - cur_trans->start_time < 1) 1204 should_grow = 1; 1205 1206 do { 1207 int snap_pending = 0; 1208 1209 joined = cur_trans->num_joined; 1210 if (!list_empty(&trans->transaction->pending_snapshots)) 1211 snap_pending = 1; 1212 1213 WARN_ON(cur_trans != trans->transaction); 1214 1215 if (flush_on_commit || snap_pending) { 1216 btrfs_start_delalloc_inodes(root, 1); 1217 ret = btrfs_wait_ordered_extents(root, 0, 1); 1218 BUG_ON(ret); 1219 } 1220 1221 ret = btrfs_run_delayed_items(trans, root); 1222 BUG_ON(ret); 1223 1224 /* 1225 * rename don't use btrfs_join_transaction, so, once we 1226 * set the transaction to blocked above, we aren't going 1227 * to get any new ordered operations. We can safely run 1228 * it here and no for sure that nothing new will be added 1229 * to the list 1230 */ 1231 btrfs_run_ordered_operations(root, 1); 1232 1233 prepare_to_wait(&cur_trans->writer_wait, &wait, 1234 TASK_UNINTERRUPTIBLE); 1235 1236 if (atomic_read(&cur_trans->num_writers) > 1) 1237 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1238 else if (should_grow) 1239 schedule_timeout(1); 1240 1241 finish_wait(&cur_trans->writer_wait, &wait); 1242 } while (atomic_read(&cur_trans->num_writers) > 1 || 1243 (should_grow && cur_trans->num_joined != joined)); 1244 1245 /* 1246 * Ok now we need to make sure to block out any other joins while we 1247 * commit the transaction. We could have started a join before setting 1248 * no_join so make sure to wait for num_writers to == 1 again. 1249 */ 1250 spin_lock(&root->fs_info->trans_lock); 1251 root->fs_info->trans_no_join = 1; 1252 spin_unlock(&root->fs_info->trans_lock); 1253 wait_event(cur_trans->writer_wait, 1254 atomic_read(&cur_trans->num_writers) == 1); 1255 1256 /* 1257 * the reloc mutex makes sure that we stop 1258 * the balancing code from coming in and moving 1259 * extents around in the middle of the commit 1260 */ 1261 mutex_lock(&root->fs_info->reloc_mutex); 1262 1263 ret = btrfs_run_delayed_items(trans, root); 1264 BUG_ON(ret); 1265 1266 ret = create_pending_snapshots(trans, root->fs_info); 1267 BUG_ON(ret); 1268 1269 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1270 BUG_ON(ret); 1271 1272 /* 1273 * make sure none of the code above managed to slip in a 1274 * delayed item 1275 */ 1276 btrfs_assert_delayed_root_empty(root); 1277 1278 WARN_ON(cur_trans != trans->transaction); 1279 1280 btrfs_scrub_pause(root); 1281 /* btrfs_commit_tree_roots is responsible for getting the 1282 * various roots consistent with each other. Every pointer 1283 * in the tree of tree roots has to point to the most up to date 1284 * root for every subvolume and other tree. So, we have to keep 1285 * the tree logging code from jumping in and changing any 1286 * of the trees. 1287 * 1288 * At this point in the commit, there can't be any tree-log 1289 * writers, but a little lower down we drop the trans mutex 1290 * and let new people in. By holding the tree_log_mutex 1291 * from now until after the super is written, we avoid races 1292 * with the tree-log code. 1293 */ 1294 mutex_lock(&root->fs_info->tree_log_mutex); 1295 1296 ret = commit_fs_roots(trans, root); 1297 BUG_ON(ret); 1298 1299 /* commit_fs_roots gets rid of all the tree log roots, it is now 1300 * safe to free the root of tree log roots 1301 */ 1302 btrfs_free_log_root_tree(trans, root->fs_info); 1303 1304 ret = commit_cowonly_roots(trans, root); 1305 BUG_ON(ret); 1306 1307 btrfs_prepare_extent_commit(trans, root); 1308 1309 cur_trans = root->fs_info->running_transaction; 1310 1311 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1312 root->fs_info->tree_root->node); 1313 switch_commit_root(root->fs_info->tree_root); 1314 1315 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1316 root->fs_info->chunk_root->node); 1317 switch_commit_root(root->fs_info->chunk_root); 1318 1319 update_super_roots(root); 1320 1321 if (!root->fs_info->log_root_recovering) { 1322 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1323 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 1324 } 1325 1326 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1327 sizeof(*root->fs_info->super_copy)); 1328 1329 trans->transaction->blocked = 0; 1330 spin_lock(&root->fs_info->trans_lock); 1331 root->fs_info->running_transaction = NULL; 1332 root->fs_info->trans_no_join = 0; 1333 spin_unlock(&root->fs_info->trans_lock); 1334 mutex_unlock(&root->fs_info->reloc_mutex); 1335 1336 wake_up(&root->fs_info->transaction_wait); 1337 1338 ret = btrfs_write_and_wait_transaction(trans, root); 1339 BUG_ON(ret); 1340 write_ctree_super(trans, root, 0); 1341 1342 /* 1343 * the super is written, we can safely allow the tree-loggers 1344 * to go about their business 1345 */ 1346 mutex_unlock(&root->fs_info->tree_log_mutex); 1347 1348 btrfs_finish_extent_commit(trans, root); 1349 1350 cur_trans->commit_done = 1; 1351 1352 root->fs_info->last_trans_committed = cur_trans->transid; 1353 1354 wake_up(&cur_trans->commit_wait); 1355 1356 spin_lock(&root->fs_info->trans_lock); 1357 list_del_init(&cur_trans->list); 1358 spin_unlock(&root->fs_info->trans_lock); 1359 1360 put_transaction(cur_trans); 1361 put_transaction(cur_trans); 1362 1363 trace_btrfs_transaction_commit(root); 1364 1365 btrfs_scrub_continue(root); 1366 1367 if (current->journal_info == trans) 1368 current->journal_info = NULL; 1369 1370 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1371 1372 if (current != root->fs_info->transaction_kthread) 1373 btrfs_run_delayed_iputs(root); 1374 1375 return ret; 1376 } 1377 1378 /* 1379 * interface function to delete all the snapshots we have scheduled for deletion 1380 */ 1381 int btrfs_clean_old_snapshots(struct btrfs_root *root) 1382 { 1383 LIST_HEAD(list); 1384 struct btrfs_fs_info *fs_info = root->fs_info; 1385 1386 spin_lock(&fs_info->trans_lock); 1387 list_splice_init(&fs_info->dead_roots, &list); 1388 spin_unlock(&fs_info->trans_lock); 1389 1390 while (!list_empty(&list)) { 1391 root = list_entry(list.next, struct btrfs_root, root_list); 1392 list_del(&root->root_list); 1393 1394 btrfs_kill_all_delayed_nodes(root); 1395 1396 if (btrfs_header_backref_rev(root->node) < 1397 BTRFS_MIXED_BACKREF_REV) 1398 btrfs_drop_snapshot(root, NULL, 0, 0); 1399 else 1400 btrfs_drop_snapshot(root, NULL, 1, 0); 1401 } 1402 return 0; 1403 } 1404