1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/writeback.h> 23 #include <linux/pagemap.h> 24 #include <linux/blkdev.h> 25 #include "ctree.h" 26 #include "disk-io.h" 27 #include "transaction.h" 28 #include "locking.h" 29 #include "tree-log.h" 30 #include "inode-map.h" 31 32 #define BTRFS_ROOT_TRANS_TAG 0 33 34 static noinline void put_transaction(struct btrfs_transaction *transaction) 35 { 36 WARN_ON(atomic_read(&transaction->use_count) == 0); 37 if (atomic_dec_and_test(&transaction->use_count)) { 38 BUG_ON(!list_empty(&transaction->list)); 39 WARN_ON(transaction->delayed_refs.root.rb_node); 40 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head)); 41 memset(transaction, 0, sizeof(*transaction)); 42 kmem_cache_free(btrfs_transaction_cachep, transaction); 43 } 44 } 45 46 static noinline void switch_commit_root(struct btrfs_root *root) 47 { 48 free_extent_buffer(root->commit_root); 49 root->commit_root = btrfs_root_node(root); 50 } 51 52 /* 53 * either allocate a new transaction or hop into the existing one 54 */ 55 static noinline int join_transaction(struct btrfs_root *root, int nofail) 56 { 57 struct btrfs_transaction *cur_trans; 58 59 spin_lock(&root->fs_info->trans_lock); 60 loop: 61 if (root->fs_info->trans_no_join) { 62 if (!nofail) { 63 spin_unlock(&root->fs_info->trans_lock); 64 return -EBUSY; 65 } 66 } 67 68 cur_trans = root->fs_info->running_transaction; 69 if (cur_trans) { 70 atomic_inc(&cur_trans->use_count); 71 atomic_inc(&cur_trans->num_writers); 72 cur_trans->num_joined++; 73 spin_unlock(&root->fs_info->trans_lock); 74 return 0; 75 } 76 spin_unlock(&root->fs_info->trans_lock); 77 78 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 79 if (!cur_trans) 80 return -ENOMEM; 81 82 spin_lock(&root->fs_info->trans_lock); 83 if (root->fs_info->running_transaction) { 84 /* 85 * someone started a transaction after we unlocked. Make sure 86 * to redo the trans_no_join checks above 87 */ 88 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 89 cur_trans = root->fs_info->running_transaction; 90 goto loop; 91 } 92 93 atomic_set(&cur_trans->num_writers, 1); 94 cur_trans->num_joined = 0; 95 init_waitqueue_head(&cur_trans->writer_wait); 96 init_waitqueue_head(&cur_trans->commit_wait); 97 cur_trans->in_commit = 0; 98 cur_trans->blocked = 0; 99 /* 100 * One for this trans handle, one so it will live on until we 101 * commit the transaction. 102 */ 103 atomic_set(&cur_trans->use_count, 2); 104 cur_trans->commit_done = 0; 105 cur_trans->start_time = get_seconds(); 106 107 cur_trans->delayed_refs.root = RB_ROOT; 108 cur_trans->delayed_refs.num_entries = 0; 109 cur_trans->delayed_refs.num_heads_ready = 0; 110 cur_trans->delayed_refs.num_heads = 0; 111 cur_trans->delayed_refs.flushing = 0; 112 cur_trans->delayed_refs.run_delayed_start = 0; 113 cur_trans->delayed_refs.seq = 1; 114 init_waitqueue_head(&cur_trans->delayed_refs.seq_wait); 115 spin_lock_init(&cur_trans->commit_lock); 116 spin_lock_init(&cur_trans->delayed_refs.lock); 117 INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head); 118 119 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 120 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 121 extent_io_tree_init(&cur_trans->dirty_pages, 122 root->fs_info->btree_inode->i_mapping); 123 root->fs_info->generation++; 124 cur_trans->transid = root->fs_info->generation; 125 root->fs_info->running_transaction = cur_trans; 126 spin_unlock(&root->fs_info->trans_lock); 127 128 return 0; 129 } 130 131 /* 132 * this does all the record keeping required to make sure that a reference 133 * counted root is properly recorded in a given transaction. This is required 134 * to make sure the old root from before we joined the transaction is deleted 135 * when the transaction commits 136 */ 137 static int record_root_in_trans(struct btrfs_trans_handle *trans, 138 struct btrfs_root *root) 139 { 140 if (root->ref_cows && root->last_trans < trans->transid) { 141 WARN_ON(root == root->fs_info->extent_root); 142 WARN_ON(root->commit_root != root->node); 143 144 /* 145 * see below for in_trans_setup usage rules 146 * we have the reloc mutex held now, so there 147 * is only one writer in this function 148 */ 149 root->in_trans_setup = 1; 150 151 /* make sure readers find in_trans_setup before 152 * they find our root->last_trans update 153 */ 154 smp_wmb(); 155 156 spin_lock(&root->fs_info->fs_roots_radix_lock); 157 if (root->last_trans == trans->transid) { 158 spin_unlock(&root->fs_info->fs_roots_radix_lock); 159 return 0; 160 } 161 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 162 (unsigned long)root->root_key.objectid, 163 BTRFS_ROOT_TRANS_TAG); 164 spin_unlock(&root->fs_info->fs_roots_radix_lock); 165 root->last_trans = trans->transid; 166 167 /* this is pretty tricky. We don't want to 168 * take the relocation lock in btrfs_record_root_in_trans 169 * unless we're really doing the first setup for this root in 170 * this transaction. 171 * 172 * Normally we'd use root->last_trans as a flag to decide 173 * if we want to take the expensive mutex. 174 * 175 * But, we have to set root->last_trans before we 176 * init the relocation root, otherwise, we trip over warnings 177 * in ctree.c. The solution used here is to flag ourselves 178 * with root->in_trans_setup. When this is 1, we're still 179 * fixing up the reloc trees and everyone must wait. 180 * 181 * When this is zero, they can trust root->last_trans and fly 182 * through btrfs_record_root_in_trans without having to take the 183 * lock. smp_wmb() makes sure that all the writes above are 184 * done before we pop in the zero below 185 */ 186 btrfs_init_reloc_root(trans, root); 187 smp_wmb(); 188 root->in_trans_setup = 0; 189 } 190 return 0; 191 } 192 193 194 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 195 struct btrfs_root *root) 196 { 197 if (!root->ref_cows) 198 return 0; 199 200 /* 201 * see record_root_in_trans for comments about in_trans_setup usage 202 * and barriers 203 */ 204 smp_rmb(); 205 if (root->last_trans == trans->transid && 206 !root->in_trans_setup) 207 return 0; 208 209 mutex_lock(&root->fs_info->reloc_mutex); 210 record_root_in_trans(trans, root); 211 mutex_unlock(&root->fs_info->reloc_mutex); 212 213 return 0; 214 } 215 216 /* wait for commit against the current transaction to become unblocked 217 * when this is done, it is safe to start a new transaction, but the current 218 * transaction might not be fully on disk. 219 */ 220 static void wait_current_trans(struct btrfs_root *root) 221 { 222 struct btrfs_transaction *cur_trans; 223 224 spin_lock(&root->fs_info->trans_lock); 225 cur_trans = root->fs_info->running_transaction; 226 if (cur_trans && cur_trans->blocked) { 227 atomic_inc(&cur_trans->use_count); 228 spin_unlock(&root->fs_info->trans_lock); 229 230 wait_event(root->fs_info->transaction_wait, 231 !cur_trans->blocked); 232 put_transaction(cur_trans); 233 } else { 234 spin_unlock(&root->fs_info->trans_lock); 235 } 236 } 237 238 enum btrfs_trans_type { 239 TRANS_START, 240 TRANS_JOIN, 241 TRANS_USERSPACE, 242 TRANS_JOIN_NOLOCK, 243 }; 244 245 static int may_wait_transaction(struct btrfs_root *root, int type) 246 { 247 if (root->fs_info->log_root_recovering) 248 return 0; 249 250 if (type == TRANS_USERSPACE) 251 return 1; 252 253 if (type == TRANS_START && 254 !atomic_read(&root->fs_info->open_ioctl_trans)) 255 return 1; 256 257 return 0; 258 } 259 260 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 261 u64 num_items, int type) 262 { 263 struct btrfs_trans_handle *h; 264 struct btrfs_transaction *cur_trans; 265 u64 num_bytes = 0; 266 int ret; 267 268 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 269 return ERR_PTR(-EROFS); 270 271 if (current->journal_info) { 272 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); 273 h = current->journal_info; 274 h->use_count++; 275 h->orig_rsv = h->block_rsv; 276 h->block_rsv = NULL; 277 goto got_it; 278 } 279 280 /* 281 * Do the reservation before we join the transaction so we can do all 282 * the appropriate flushing if need be. 283 */ 284 if (num_items > 0 && root != root->fs_info->chunk_root) { 285 num_bytes = btrfs_calc_trans_metadata_size(root, num_items); 286 ret = btrfs_block_rsv_add(root, 287 &root->fs_info->trans_block_rsv, 288 num_bytes); 289 if (ret) 290 return ERR_PTR(ret); 291 } 292 again: 293 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 294 if (!h) 295 return ERR_PTR(-ENOMEM); 296 297 if (may_wait_transaction(root, type)) 298 wait_current_trans(root); 299 300 do { 301 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); 302 if (ret == -EBUSY) 303 wait_current_trans(root); 304 } while (ret == -EBUSY); 305 306 if (ret < 0) { 307 kmem_cache_free(btrfs_trans_handle_cachep, h); 308 return ERR_PTR(ret); 309 } 310 311 cur_trans = root->fs_info->running_transaction; 312 313 h->transid = cur_trans->transid; 314 h->transaction = cur_trans; 315 h->blocks_used = 0; 316 h->bytes_reserved = 0; 317 h->delayed_ref_updates = 0; 318 h->use_count = 1; 319 h->block_rsv = NULL; 320 h->orig_rsv = NULL; 321 322 smp_mb(); 323 if (cur_trans->blocked && may_wait_transaction(root, type)) { 324 btrfs_commit_transaction(h, root); 325 goto again; 326 } 327 328 if (num_bytes) { 329 trace_btrfs_space_reservation(root->fs_info, "transaction", 330 (u64)h, num_bytes, 1); 331 h->block_rsv = &root->fs_info->trans_block_rsv; 332 h->bytes_reserved = num_bytes; 333 } 334 335 got_it: 336 btrfs_record_root_in_trans(h, root); 337 338 if (!current->journal_info && type != TRANS_USERSPACE) 339 current->journal_info = h; 340 return h; 341 } 342 343 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 344 int num_items) 345 { 346 return start_transaction(root, num_items, TRANS_START); 347 } 348 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 349 { 350 return start_transaction(root, 0, TRANS_JOIN); 351 } 352 353 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) 354 { 355 return start_transaction(root, 0, TRANS_JOIN_NOLOCK); 356 } 357 358 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) 359 { 360 return start_transaction(root, 0, TRANS_USERSPACE); 361 } 362 363 /* wait for a transaction commit to be fully complete */ 364 static noinline void wait_for_commit(struct btrfs_root *root, 365 struct btrfs_transaction *commit) 366 { 367 wait_event(commit->commit_wait, commit->commit_done); 368 } 369 370 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) 371 { 372 struct btrfs_transaction *cur_trans = NULL, *t; 373 int ret; 374 375 ret = 0; 376 if (transid) { 377 if (transid <= root->fs_info->last_trans_committed) 378 goto out; 379 380 /* find specified transaction */ 381 spin_lock(&root->fs_info->trans_lock); 382 list_for_each_entry(t, &root->fs_info->trans_list, list) { 383 if (t->transid == transid) { 384 cur_trans = t; 385 atomic_inc(&cur_trans->use_count); 386 break; 387 } 388 if (t->transid > transid) 389 break; 390 } 391 spin_unlock(&root->fs_info->trans_lock); 392 ret = -EINVAL; 393 if (!cur_trans) 394 goto out; /* bad transid */ 395 } else { 396 /* find newest transaction that is committing | committed */ 397 spin_lock(&root->fs_info->trans_lock); 398 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 399 list) { 400 if (t->in_commit) { 401 if (t->commit_done) 402 break; 403 cur_trans = t; 404 atomic_inc(&cur_trans->use_count); 405 break; 406 } 407 } 408 spin_unlock(&root->fs_info->trans_lock); 409 if (!cur_trans) 410 goto out; /* nothing committing|committed */ 411 } 412 413 wait_for_commit(root, cur_trans); 414 415 put_transaction(cur_trans); 416 ret = 0; 417 out: 418 return ret; 419 } 420 421 void btrfs_throttle(struct btrfs_root *root) 422 { 423 if (!atomic_read(&root->fs_info->open_ioctl_trans)) 424 wait_current_trans(root); 425 } 426 427 static int should_end_transaction(struct btrfs_trans_handle *trans, 428 struct btrfs_root *root) 429 { 430 int ret; 431 432 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); 433 return ret ? 1 : 0; 434 } 435 436 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, 437 struct btrfs_root *root) 438 { 439 struct btrfs_transaction *cur_trans = trans->transaction; 440 struct btrfs_block_rsv *rsv = trans->block_rsv; 441 int updates; 442 443 smp_mb(); 444 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 445 return 1; 446 447 /* 448 * We need to do this in case we're deleting csums so the global block 449 * rsv get's used instead of the csum block rsv. 450 */ 451 trans->block_rsv = NULL; 452 453 updates = trans->delayed_ref_updates; 454 trans->delayed_ref_updates = 0; 455 if (updates) 456 btrfs_run_delayed_refs(trans, root, updates); 457 458 trans->block_rsv = rsv; 459 460 return should_end_transaction(trans, root); 461 } 462 463 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 464 struct btrfs_root *root, int throttle, int lock) 465 { 466 struct btrfs_transaction *cur_trans = trans->transaction; 467 struct btrfs_fs_info *info = root->fs_info; 468 int count = 0; 469 470 if (--trans->use_count) { 471 trans->block_rsv = trans->orig_rsv; 472 return 0; 473 } 474 475 btrfs_trans_release_metadata(trans, root); 476 trans->block_rsv = NULL; 477 while (count < 2) { 478 unsigned long cur = trans->delayed_ref_updates; 479 trans->delayed_ref_updates = 0; 480 if (cur && 481 trans->transaction->delayed_refs.num_heads_ready > 64) { 482 trans->delayed_ref_updates = 0; 483 btrfs_run_delayed_refs(trans, root, cur); 484 } else { 485 break; 486 } 487 count++; 488 } 489 490 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && 491 should_end_transaction(trans, root)) { 492 trans->transaction->blocked = 1; 493 smp_wmb(); 494 } 495 496 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 497 if (throttle) { 498 /* 499 * We may race with somebody else here so end up having 500 * to call end_transaction on ourselves again, so inc 501 * our use_count. 502 */ 503 trans->use_count++; 504 return btrfs_commit_transaction(trans, root); 505 } else { 506 wake_up_process(info->transaction_kthread); 507 } 508 } 509 510 WARN_ON(cur_trans != info->running_transaction); 511 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 512 atomic_dec(&cur_trans->num_writers); 513 514 smp_mb(); 515 if (waitqueue_active(&cur_trans->writer_wait)) 516 wake_up(&cur_trans->writer_wait); 517 put_transaction(cur_trans); 518 519 if (current->journal_info == trans) 520 current->journal_info = NULL; 521 memset(trans, 0, sizeof(*trans)); 522 kmem_cache_free(btrfs_trans_handle_cachep, trans); 523 524 if (throttle) 525 btrfs_run_delayed_iputs(root); 526 527 return 0; 528 } 529 530 int btrfs_end_transaction(struct btrfs_trans_handle *trans, 531 struct btrfs_root *root) 532 { 533 int ret; 534 535 ret = __btrfs_end_transaction(trans, root, 0, 1); 536 if (ret) 537 return ret; 538 return 0; 539 } 540 541 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 542 struct btrfs_root *root) 543 { 544 int ret; 545 546 ret = __btrfs_end_transaction(trans, root, 1, 1); 547 if (ret) 548 return ret; 549 return 0; 550 } 551 552 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, 553 struct btrfs_root *root) 554 { 555 int ret; 556 557 ret = __btrfs_end_transaction(trans, root, 0, 0); 558 if (ret) 559 return ret; 560 return 0; 561 } 562 563 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, 564 struct btrfs_root *root) 565 { 566 return __btrfs_end_transaction(trans, root, 1, 1); 567 } 568 569 /* 570 * when btree blocks are allocated, they have some corresponding bits set for 571 * them in one of two extent_io trees. This is used to make sure all of 572 * those extents are sent to disk but does not wait on them 573 */ 574 int btrfs_write_marked_extents(struct btrfs_root *root, 575 struct extent_io_tree *dirty_pages, int mark) 576 { 577 int err = 0; 578 int werr = 0; 579 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 580 u64 start = 0; 581 u64 end; 582 583 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 584 mark)) { 585 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark, 586 GFP_NOFS); 587 err = filemap_fdatawrite_range(mapping, start, end); 588 if (err) 589 werr = err; 590 cond_resched(); 591 start = end + 1; 592 } 593 if (err) 594 werr = err; 595 return werr; 596 } 597 598 /* 599 * when btree blocks are allocated, they have some corresponding bits set for 600 * them in one of two extent_io trees. This is used to make sure all of 601 * those extents are on disk for transaction or log commit. We wait 602 * on all the pages and clear them from the dirty pages state tree 603 */ 604 int btrfs_wait_marked_extents(struct btrfs_root *root, 605 struct extent_io_tree *dirty_pages, int mark) 606 { 607 int err = 0; 608 int werr = 0; 609 struct address_space *mapping = root->fs_info->btree_inode->i_mapping; 610 u64 start = 0; 611 u64 end; 612 613 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 614 EXTENT_NEED_WAIT)) { 615 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS); 616 err = filemap_fdatawait_range(mapping, start, end); 617 if (err) 618 werr = err; 619 cond_resched(); 620 start = end + 1; 621 } 622 if (err) 623 werr = err; 624 return werr; 625 } 626 627 /* 628 * when btree blocks are allocated, they have some corresponding bits set for 629 * them in one of two extent_io trees. This is used to make sure all of 630 * those extents are on disk for transaction or log commit 631 */ 632 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, 633 struct extent_io_tree *dirty_pages, int mark) 634 { 635 int ret; 636 int ret2; 637 638 ret = btrfs_write_marked_extents(root, dirty_pages, mark); 639 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); 640 641 if (ret) 642 return ret; 643 if (ret2) 644 return ret2; 645 return 0; 646 } 647 648 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 649 struct btrfs_root *root) 650 { 651 if (!trans || !trans->transaction) { 652 struct inode *btree_inode; 653 btree_inode = root->fs_info->btree_inode; 654 return filemap_write_and_wait(btree_inode->i_mapping); 655 } 656 return btrfs_write_and_wait_marked_extents(root, 657 &trans->transaction->dirty_pages, 658 EXTENT_DIRTY); 659 } 660 661 /* 662 * this is used to update the root pointer in the tree of tree roots. 663 * 664 * But, in the case of the extent allocation tree, updating the root 665 * pointer may allocate blocks which may change the root of the extent 666 * allocation tree. 667 * 668 * So, this loops and repeats and makes sure the cowonly root didn't 669 * change while the root pointer was being updated in the metadata. 670 */ 671 static int update_cowonly_root(struct btrfs_trans_handle *trans, 672 struct btrfs_root *root) 673 { 674 int ret; 675 u64 old_root_bytenr; 676 u64 old_root_used; 677 struct btrfs_root *tree_root = root->fs_info->tree_root; 678 679 old_root_used = btrfs_root_used(&root->root_item); 680 btrfs_write_dirty_block_groups(trans, root); 681 682 while (1) { 683 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 684 if (old_root_bytenr == root->node->start && 685 old_root_used == btrfs_root_used(&root->root_item)) 686 break; 687 688 btrfs_set_root_node(&root->root_item, root->node); 689 ret = btrfs_update_root(trans, tree_root, 690 &root->root_key, 691 &root->root_item); 692 BUG_ON(ret); 693 694 old_root_used = btrfs_root_used(&root->root_item); 695 ret = btrfs_write_dirty_block_groups(trans, root); 696 BUG_ON(ret); 697 } 698 699 if (root != root->fs_info->extent_root) 700 switch_commit_root(root); 701 702 return 0; 703 } 704 705 /* 706 * update all the cowonly tree roots on disk 707 */ 708 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 709 struct btrfs_root *root) 710 { 711 struct btrfs_fs_info *fs_info = root->fs_info; 712 struct list_head *next; 713 struct extent_buffer *eb; 714 int ret; 715 716 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 717 BUG_ON(ret); 718 719 eb = btrfs_lock_root_node(fs_info->tree_root); 720 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); 721 btrfs_tree_unlock(eb); 722 free_extent_buffer(eb); 723 724 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 725 BUG_ON(ret); 726 727 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 728 next = fs_info->dirty_cowonly_roots.next; 729 list_del_init(next); 730 root = list_entry(next, struct btrfs_root, dirty_list); 731 732 update_cowonly_root(trans, root); 733 } 734 735 down_write(&fs_info->extent_commit_sem); 736 switch_commit_root(fs_info->extent_root); 737 up_write(&fs_info->extent_commit_sem); 738 739 return 0; 740 } 741 742 /* 743 * dead roots are old snapshots that need to be deleted. This allocates 744 * a dirty root struct and adds it into the list of dead roots that need to 745 * be deleted 746 */ 747 int btrfs_add_dead_root(struct btrfs_root *root) 748 { 749 spin_lock(&root->fs_info->trans_lock); 750 list_add(&root->root_list, &root->fs_info->dead_roots); 751 spin_unlock(&root->fs_info->trans_lock); 752 return 0; 753 } 754 755 /* 756 * update all the cowonly tree roots on disk 757 */ 758 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, 759 struct btrfs_root *root) 760 { 761 struct btrfs_root *gang[8]; 762 struct btrfs_fs_info *fs_info = root->fs_info; 763 int i; 764 int ret; 765 int err = 0; 766 767 spin_lock(&fs_info->fs_roots_radix_lock); 768 while (1) { 769 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 770 (void **)gang, 0, 771 ARRAY_SIZE(gang), 772 BTRFS_ROOT_TRANS_TAG); 773 if (ret == 0) 774 break; 775 for (i = 0; i < ret; i++) { 776 root = gang[i]; 777 radix_tree_tag_clear(&fs_info->fs_roots_radix, 778 (unsigned long)root->root_key.objectid, 779 BTRFS_ROOT_TRANS_TAG); 780 spin_unlock(&fs_info->fs_roots_radix_lock); 781 782 btrfs_free_log(trans, root); 783 btrfs_update_reloc_root(trans, root); 784 btrfs_orphan_commit_root(trans, root); 785 786 btrfs_save_ino_cache(root, trans); 787 788 /* see comments in should_cow_block() */ 789 root->force_cow = 0; 790 smp_wmb(); 791 792 if (root->commit_root != root->node) { 793 mutex_lock(&root->fs_commit_mutex); 794 switch_commit_root(root); 795 btrfs_unpin_free_ino(root); 796 mutex_unlock(&root->fs_commit_mutex); 797 798 btrfs_set_root_node(&root->root_item, 799 root->node); 800 } 801 802 err = btrfs_update_root(trans, fs_info->tree_root, 803 &root->root_key, 804 &root->root_item); 805 spin_lock(&fs_info->fs_roots_radix_lock); 806 if (err) 807 break; 808 } 809 } 810 spin_unlock(&fs_info->fs_roots_radix_lock); 811 return err; 812 } 813 814 /* 815 * defrag a given btree. If cacheonly == 1, this won't read from the disk, 816 * otherwise every leaf in the btree is read and defragged. 817 */ 818 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) 819 { 820 struct btrfs_fs_info *info = root->fs_info; 821 struct btrfs_trans_handle *trans; 822 int ret; 823 unsigned long nr; 824 825 if (xchg(&root->defrag_running, 1)) 826 return 0; 827 828 while (1) { 829 trans = btrfs_start_transaction(root, 0); 830 if (IS_ERR(trans)) 831 return PTR_ERR(trans); 832 833 ret = btrfs_defrag_leaves(trans, root, cacheonly); 834 835 nr = trans->blocks_used; 836 btrfs_end_transaction(trans, root); 837 btrfs_btree_balance_dirty(info->tree_root, nr); 838 cond_resched(); 839 840 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) 841 break; 842 } 843 root->defrag_running = 0; 844 return ret; 845 } 846 847 /* 848 * new snapshots need to be created at a very specific time in the 849 * transaction commit. This does the actual creation 850 */ 851 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 852 struct btrfs_fs_info *fs_info, 853 struct btrfs_pending_snapshot *pending) 854 { 855 struct btrfs_key key; 856 struct btrfs_root_item *new_root_item; 857 struct btrfs_root *tree_root = fs_info->tree_root; 858 struct btrfs_root *root = pending->root; 859 struct btrfs_root *parent_root; 860 struct btrfs_block_rsv *rsv; 861 struct inode *parent_inode; 862 struct dentry *parent; 863 struct dentry *dentry; 864 struct extent_buffer *tmp; 865 struct extent_buffer *old; 866 int ret; 867 u64 to_reserve = 0; 868 u64 index = 0; 869 u64 objectid; 870 u64 root_flags; 871 872 rsv = trans->block_rsv; 873 874 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 875 if (!new_root_item) { 876 pending->error = -ENOMEM; 877 goto fail; 878 } 879 880 ret = btrfs_find_free_objectid(tree_root, &objectid); 881 if (ret) { 882 pending->error = ret; 883 goto fail; 884 } 885 886 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); 887 888 if (to_reserve > 0) { 889 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv, 890 to_reserve); 891 if (ret) { 892 pending->error = ret; 893 goto fail; 894 } 895 } 896 897 key.objectid = objectid; 898 key.offset = (u64)-1; 899 key.type = BTRFS_ROOT_ITEM_KEY; 900 901 trans->block_rsv = &pending->block_rsv; 902 903 dentry = pending->dentry; 904 parent = dget_parent(dentry); 905 parent_inode = parent->d_inode; 906 parent_root = BTRFS_I(parent_inode)->root; 907 record_root_in_trans(trans, parent_root); 908 909 /* 910 * insert the directory item 911 */ 912 ret = btrfs_set_inode_index(parent_inode, &index); 913 BUG_ON(ret); 914 ret = btrfs_insert_dir_item(trans, parent_root, 915 dentry->d_name.name, dentry->d_name.len, 916 parent_inode, &key, 917 BTRFS_FT_DIR, index); 918 BUG_ON(ret); 919 920 btrfs_i_size_write(parent_inode, parent_inode->i_size + 921 dentry->d_name.len * 2); 922 ret = btrfs_update_inode(trans, parent_root, parent_inode); 923 BUG_ON(ret); 924 925 /* 926 * pull in the delayed directory update 927 * and the delayed inode item 928 * otherwise we corrupt the FS during 929 * snapshot 930 */ 931 ret = btrfs_run_delayed_items(trans, root); 932 BUG_ON(ret); 933 934 record_root_in_trans(trans, root); 935 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 936 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 937 btrfs_check_and_init_root_item(new_root_item); 938 939 root_flags = btrfs_root_flags(new_root_item); 940 if (pending->readonly) 941 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 942 else 943 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 944 btrfs_set_root_flags(new_root_item, root_flags); 945 946 old = btrfs_lock_root_node(root); 947 btrfs_cow_block(trans, root, old, NULL, 0, &old); 948 btrfs_set_lock_blocking(old); 949 950 btrfs_copy_root(trans, root, old, &tmp, objectid); 951 btrfs_tree_unlock(old); 952 free_extent_buffer(old); 953 954 /* see comments in should_cow_block() */ 955 root->force_cow = 1; 956 smp_wmb(); 957 958 btrfs_set_root_node(new_root_item, tmp); 959 /* record when the snapshot was created in key.offset */ 960 key.offset = trans->transid; 961 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 962 btrfs_tree_unlock(tmp); 963 free_extent_buffer(tmp); 964 BUG_ON(ret); 965 966 /* 967 * insert root back/forward references 968 */ 969 ret = btrfs_add_root_ref(trans, tree_root, objectid, 970 parent_root->root_key.objectid, 971 btrfs_ino(parent_inode), index, 972 dentry->d_name.name, dentry->d_name.len); 973 BUG_ON(ret); 974 dput(parent); 975 976 key.offset = (u64)-1; 977 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 978 BUG_ON(IS_ERR(pending->snap)); 979 980 btrfs_reloc_post_snapshot(trans, pending); 981 fail: 982 kfree(new_root_item); 983 trans->block_rsv = rsv; 984 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 985 return 0; 986 } 987 988 /* 989 * create all the snapshots we've scheduled for creation 990 */ 991 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, 992 struct btrfs_fs_info *fs_info) 993 { 994 struct btrfs_pending_snapshot *pending; 995 struct list_head *head = &trans->transaction->pending_snapshots; 996 int ret; 997 998 list_for_each_entry(pending, head, list) { 999 ret = create_pending_snapshot(trans, fs_info, pending); 1000 BUG_ON(ret); 1001 } 1002 return 0; 1003 } 1004 1005 static void update_super_roots(struct btrfs_root *root) 1006 { 1007 struct btrfs_root_item *root_item; 1008 struct btrfs_super_block *super; 1009 1010 super = root->fs_info->super_copy; 1011 1012 root_item = &root->fs_info->chunk_root->root_item; 1013 super->chunk_root = root_item->bytenr; 1014 super->chunk_root_generation = root_item->generation; 1015 super->chunk_root_level = root_item->level; 1016 1017 root_item = &root->fs_info->tree_root->root_item; 1018 super->root = root_item->bytenr; 1019 super->generation = root_item->generation; 1020 super->root_level = root_item->level; 1021 if (btrfs_test_opt(root, SPACE_CACHE)) 1022 super->cache_generation = root_item->generation; 1023 } 1024 1025 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1026 { 1027 int ret = 0; 1028 spin_lock(&info->trans_lock); 1029 if (info->running_transaction) 1030 ret = info->running_transaction->in_commit; 1031 spin_unlock(&info->trans_lock); 1032 return ret; 1033 } 1034 1035 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1036 { 1037 int ret = 0; 1038 spin_lock(&info->trans_lock); 1039 if (info->running_transaction) 1040 ret = info->running_transaction->blocked; 1041 spin_unlock(&info->trans_lock); 1042 return ret; 1043 } 1044 1045 /* 1046 * wait for the current transaction commit to start and block subsequent 1047 * transaction joins 1048 */ 1049 static void wait_current_trans_commit_start(struct btrfs_root *root, 1050 struct btrfs_transaction *trans) 1051 { 1052 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit); 1053 } 1054 1055 /* 1056 * wait for the current transaction to start and then become unblocked. 1057 * caller holds ref. 1058 */ 1059 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, 1060 struct btrfs_transaction *trans) 1061 { 1062 wait_event(root->fs_info->transaction_wait, 1063 trans->commit_done || (trans->in_commit && !trans->blocked)); 1064 } 1065 1066 /* 1067 * commit transactions asynchronously. once btrfs_commit_transaction_async 1068 * returns, any subsequent transaction will not be allowed to join. 1069 */ 1070 struct btrfs_async_commit { 1071 struct btrfs_trans_handle *newtrans; 1072 struct btrfs_root *root; 1073 struct delayed_work work; 1074 }; 1075 1076 static void do_async_commit(struct work_struct *work) 1077 { 1078 struct btrfs_async_commit *ac = 1079 container_of(work, struct btrfs_async_commit, work.work); 1080 1081 btrfs_commit_transaction(ac->newtrans, ac->root); 1082 kfree(ac); 1083 } 1084 1085 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 1086 struct btrfs_root *root, 1087 int wait_for_unblock) 1088 { 1089 struct btrfs_async_commit *ac; 1090 struct btrfs_transaction *cur_trans; 1091 1092 ac = kmalloc(sizeof(*ac), GFP_NOFS); 1093 if (!ac) 1094 return -ENOMEM; 1095 1096 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1097 ac->root = root; 1098 ac->newtrans = btrfs_join_transaction(root); 1099 if (IS_ERR(ac->newtrans)) { 1100 int err = PTR_ERR(ac->newtrans); 1101 kfree(ac); 1102 return err; 1103 } 1104 1105 /* take transaction reference */ 1106 cur_trans = trans->transaction; 1107 atomic_inc(&cur_trans->use_count); 1108 1109 btrfs_end_transaction(trans, root); 1110 schedule_delayed_work(&ac->work, 0); 1111 1112 /* wait for transaction to start and unblock */ 1113 if (wait_for_unblock) 1114 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1115 else 1116 wait_current_trans_commit_start(root, cur_trans); 1117 1118 if (current->journal_info == trans) 1119 current->journal_info = NULL; 1120 1121 put_transaction(cur_trans); 1122 return 0; 1123 } 1124 1125 /* 1126 * btrfs_transaction state sequence: 1127 * in_commit = 0, blocked = 0 (initial) 1128 * in_commit = 1, blocked = 1 1129 * blocked = 0 1130 * commit_done = 1 1131 */ 1132 int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1133 struct btrfs_root *root) 1134 { 1135 unsigned long joined = 0; 1136 struct btrfs_transaction *cur_trans; 1137 struct btrfs_transaction *prev_trans = NULL; 1138 DEFINE_WAIT(wait); 1139 int ret; 1140 int should_grow = 0; 1141 unsigned long now = get_seconds(); 1142 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1143 1144 btrfs_run_ordered_operations(root, 0); 1145 1146 btrfs_trans_release_metadata(trans, root); 1147 trans->block_rsv = NULL; 1148 1149 /* make a pass through all the delayed refs we have so far 1150 * any runnings procs may add more while we are here 1151 */ 1152 ret = btrfs_run_delayed_refs(trans, root, 0); 1153 BUG_ON(ret); 1154 1155 cur_trans = trans->transaction; 1156 /* 1157 * set the flushing flag so procs in this transaction have to 1158 * start sending their work down. 1159 */ 1160 cur_trans->delayed_refs.flushing = 1; 1161 1162 ret = btrfs_run_delayed_refs(trans, root, 0); 1163 BUG_ON(ret); 1164 1165 spin_lock(&cur_trans->commit_lock); 1166 if (cur_trans->in_commit) { 1167 spin_unlock(&cur_trans->commit_lock); 1168 atomic_inc(&cur_trans->use_count); 1169 btrfs_end_transaction(trans, root); 1170 1171 wait_for_commit(root, cur_trans); 1172 1173 put_transaction(cur_trans); 1174 1175 return 0; 1176 } 1177 1178 trans->transaction->in_commit = 1; 1179 trans->transaction->blocked = 1; 1180 spin_unlock(&cur_trans->commit_lock); 1181 wake_up(&root->fs_info->transaction_blocked_wait); 1182 1183 spin_lock(&root->fs_info->trans_lock); 1184 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1185 prev_trans = list_entry(cur_trans->list.prev, 1186 struct btrfs_transaction, list); 1187 if (!prev_trans->commit_done) { 1188 atomic_inc(&prev_trans->use_count); 1189 spin_unlock(&root->fs_info->trans_lock); 1190 1191 wait_for_commit(root, prev_trans); 1192 1193 put_transaction(prev_trans); 1194 } else { 1195 spin_unlock(&root->fs_info->trans_lock); 1196 } 1197 } else { 1198 spin_unlock(&root->fs_info->trans_lock); 1199 } 1200 1201 if (now < cur_trans->start_time || now - cur_trans->start_time < 1) 1202 should_grow = 1; 1203 1204 do { 1205 int snap_pending = 0; 1206 1207 joined = cur_trans->num_joined; 1208 if (!list_empty(&trans->transaction->pending_snapshots)) 1209 snap_pending = 1; 1210 1211 WARN_ON(cur_trans != trans->transaction); 1212 1213 if (flush_on_commit || snap_pending) { 1214 btrfs_start_delalloc_inodes(root, 1); 1215 ret = btrfs_wait_ordered_extents(root, 0, 1); 1216 BUG_ON(ret); 1217 } 1218 1219 ret = btrfs_run_delayed_items(trans, root); 1220 BUG_ON(ret); 1221 1222 /* 1223 * rename don't use btrfs_join_transaction, so, once we 1224 * set the transaction to blocked above, we aren't going 1225 * to get any new ordered operations. We can safely run 1226 * it here and no for sure that nothing new will be added 1227 * to the list 1228 */ 1229 btrfs_run_ordered_operations(root, 1); 1230 1231 prepare_to_wait(&cur_trans->writer_wait, &wait, 1232 TASK_UNINTERRUPTIBLE); 1233 1234 if (atomic_read(&cur_trans->num_writers) > 1) 1235 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1236 else if (should_grow) 1237 schedule_timeout(1); 1238 1239 finish_wait(&cur_trans->writer_wait, &wait); 1240 } while (atomic_read(&cur_trans->num_writers) > 1 || 1241 (should_grow && cur_trans->num_joined != joined)); 1242 1243 /* 1244 * Ok now we need to make sure to block out any other joins while we 1245 * commit the transaction. We could have started a join before setting 1246 * no_join so make sure to wait for num_writers to == 1 again. 1247 */ 1248 spin_lock(&root->fs_info->trans_lock); 1249 root->fs_info->trans_no_join = 1; 1250 spin_unlock(&root->fs_info->trans_lock); 1251 wait_event(cur_trans->writer_wait, 1252 atomic_read(&cur_trans->num_writers) == 1); 1253 1254 /* 1255 * the reloc mutex makes sure that we stop 1256 * the balancing code from coming in and moving 1257 * extents around in the middle of the commit 1258 */ 1259 mutex_lock(&root->fs_info->reloc_mutex); 1260 1261 ret = btrfs_run_delayed_items(trans, root); 1262 BUG_ON(ret); 1263 1264 ret = create_pending_snapshots(trans, root->fs_info); 1265 BUG_ON(ret); 1266 1267 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1268 BUG_ON(ret); 1269 1270 /* 1271 * make sure none of the code above managed to slip in a 1272 * delayed item 1273 */ 1274 btrfs_assert_delayed_root_empty(root); 1275 1276 WARN_ON(cur_trans != trans->transaction); 1277 1278 btrfs_scrub_pause(root); 1279 /* btrfs_commit_tree_roots is responsible for getting the 1280 * various roots consistent with each other. Every pointer 1281 * in the tree of tree roots has to point to the most up to date 1282 * root for every subvolume and other tree. So, we have to keep 1283 * the tree logging code from jumping in and changing any 1284 * of the trees. 1285 * 1286 * At this point in the commit, there can't be any tree-log 1287 * writers, but a little lower down we drop the trans mutex 1288 * and let new people in. By holding the tree_log_mutex 1289 * from now until after the super is written, we avoid races 1290 * with the tree-log code. 1291 */ 1292 mutex_lock(&root->fs_info->tree_log_mutex); 1293 1294 ret = commit_fs_roots(trans, root); 1295 BUG_ON(ret); 1296 1297 /* commit_fs_roots gets rid of all the tree log roots, it is now 1298 * safe to free the root of tree log roots 1299 */ 1300 btrfs_free_log_root_tree(trans, root->fs_info); 1301 1302 ret = commit_cowonly_roots(trans, root); 1303 BUG_ON(ret); 1304 1305 btrfs_prepare_extent_commit(trans, root); 1306 1307 cur_trans = root->fs_info->running_transaction; 1308 1309 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1310 root->fs_info->tree_root->node); 1311 switch_commit_root(root->fs_info->tree_root); 1312 1313 btrfs_set_root_node(&root->fs_info->chunk_root->root_item, 1314 root->fs_info->chunk_root->node); 1315 switch_commit_root(root->fs_info->chunk_root); 1316 1317 update_super_roots(root); 1318 1319 if (!root->fs_info->log_root_recovering) { 1320 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1321 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); 1322 } 1323 1324 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1325 sizeof(*root->fs_info->super_copy)); 1326 1327 trans->transaction->blocked = 0; 1328 spin_lock(&root->fs_info->trans_lock); 1329 root->fs_info->running_transaction = NULL; 1330 root->fs_info->trans_no_join = 0; 1331 spin_unlock(&root->fs_info->trans_lock); 1332 mutex_unlock(&root->fs_info->reloc_mutex); 1333 1334 wake_up(&root->fs_info->transaction_wait); 1335 1336 ret = btrfs_write_and_wait_transaction(trans, root); 1337 BUG_ON(ret); 1338 write_ctree_super(trans, root, 0); 1339 1340 /* 1341 * the super is written, we can safely allow the tree-loggers 1342 * to go about their business 1343 */ 1344 mutex_unlock(&root->fs_info->tree_log_mutex); 1345 1346 btrfs_finish_extent_commit(trans, root); 1347 1348 cur_trans->commit_done = 1; 1349 1350 root->fs_info->last_trans_committed = cur_trans->transid; 1351 1352 wake_up(&cur_trans->commit_wait); 1353 1354 spin_lock(&root->fs_info->trans_lock); 1355 list_del_init(&cur_trans->list); 1356 spin_unlock(&root->fs_info->trans_lock); 1357 1358 put_transaction(cur_trans); 1359 put_transaction(cur_trans); 1360 1361 trace_btrfs_transaction_commit(root); 1362 1363 btrfs_scrub_continue(root); 1364 1365 if (current->journal_info == trans) 1366 current->journal_info = NULL; 1367 1368 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1369 1370 if (current != root->fs_info->transaction_kthread) 1371 btrfs_run_delayed_iputs(root); 1372 1373 return ret; 1374 } 1375 1376 /* 1377 * interface function to delete all the snapshots we have scheduled for deletion 1378 */ 1379 int btrfs_clean_old_snapshots(struct btrfs_root *root) 1380 { 1381 LIST_HEAD(list); 1382 struct btrfs_fs_info *fs_info = root->fs_info; 1383 1384 spin_lock(&fs_info->trans_lock); 1385 list_splice_init(&fs_info->dead_roots, &list); 1386 spin_unlock(&fs_info->trans_lock); 1387 1388 while (!list_empty(&list)) { 1389 root = list_entry(list.next, struct btrfs_root, root_list); 1390 list_del(&root->root_list); 1391 1392 btrfs_kill_all_delayed_nodes(root); 1393 1394 if (btrfs_header_backref_rev(root->node) < 1395 BTRFS_MIXED_BACKREF_REV) 1396 btrfs_drop_snapshot(root, NULL, 0, 0); 1397 else 1398 btrfs_drop_snapshot(root, NULL, 1, 0); 1399 } 1400 return 0; 1401 } 1402