1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/slab.h> 8 #include <linux/sched.h> 9 #include <linux/sched/mm.h> 10 #include <linux/writeback.h> 11 #include <linux/pagemap.h> 12 #include <linux/blkdev.h> 13 #include <linux/uuid.h> 14 #include <linux/timekeeping.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "disk-io.h" 18 #include "transaction.h" 19 #include "locking.h" 20 #include "tree-log.h" 21 #include "volumes.h" 22 #include "dev-replace.h" 23 #include "qgroup.h" 24 #include "block-group.h" 25 #include "space-info.h" 26 #include "fs.h" 27 #include "accessors.h" 28 #include "extent-tree.h" 29 #include "root-tree.h" 30 #include "dir-item.h" 31 #include "uuid-tree.h" 32 #include "ioctl.h" 33 #include "relocation.h" 34 #include "scrub.h" 35 36 static struct kmem_cache *btrfs_trans_handle_cachep; 37 38 /* 39 * Transaction states and transitions 40 * 41 * No running transaction (fs tree blocks are not modified) 42 * | 43 * | To next stage: 44 * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). 45 * V 46 * Transaction N [[TRANS_STATE_RUNNING]] 47 * | 48 * | New trans handles can be attached to transaction N by calling all 49 * | start_transaction() variants. 50 * | 51 * | To next stage: 52 * | Call btrfs_commit_transaction() on any trans handle attached to 53 * | transaction N 54 * V 55 * Transaction N [[TRANS_STATE_COMMIT_PREP]] 56 * | 57 * | If there are simultaneous calls to btrfs_commit_transaction() one will win 58 * | the race and the rest will wait for the winner to commit the transaction. 59 * | 60 * | The winner will wait for previous running transaction to completely finish 61 * | if there is one. 62 * | 63 * Transaction N [[TRANS_STATE_COMMIT_START]] 64 * | 65 * | Then one of the following happens: 66 * | - Wait for all other trans handle holders to release. 67 * | The btrfs_commit_transaction() caller will do the commit work. 68 * | - Wait for current transaction to be committed by others. 69 * | Other btrfs_commit_transaction() caller will do the commit work. 70 * | 71 * | At this stage, only btrfs_join_transaction*() variants can attach 72 * | to this running transaction. 73 * | All other variants will wait for current one to finish and attach to 74 * | transaction N+1. 75 * | 76 * | To next stage: 77 * | Caller is chosen to commit transaction N, and all other trans handle 78 * | haven been released. 79 * V 80 * Transaction N [[TRANS_STATE_COMMIT_DOING]] 81 * | 82 * | The heavy lifting transaction work is started. 83 * | From running delayed refs (modifying extent tree) to creating pending 84 * | snapshots, running qgroups. 85 * | In short, modify supporting trees to reflect modifications of subvolume 86 * | trees. 87 * | 88 * | At this stage, all start_transaction() calls will wait for this 89 * | transaction to finish and attach to transaction N+1. 90 * | 91 * | To next stage: 92 * | Until all supporting trees are updated. 93 * V 94 * Transaction N [[TRANS_STATE_UNBLOCKED]] 95 * | Transaction N+1 96 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] 97 * | need to write them back to disk and update | 98 * | super blocks. | 99 * | | 100 * | At this stage, new transaction is allowed to | 101 * | start. | 102 * | All new start_transaction() calls will be | 103 * | attached to transid N+1. | 104 * | | 105 * | To next stage: | 106 * | Until all tree blocks are super blocks are | 107 * | written to block devices | 108 * V | 109 * Transaction N [[TRANS_STATE_COMPLETED]] V 110 * All tree blocks and super blocks are written. Transaction N+1 111 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] 112 * data structures will be cleaned up. | Life goes on 113 */ 114 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { 115 [TRANS_STATE_RUNNING] = 0U, 116 [TRANS_STATE_COMMIT_PREP] = 0U, 117 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), 118 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | 119 __TRANS_ATTACH | 120 __TRANS_JOIN | 121 __TRANS_JOIN_NOSTART), 122 [TRANS_STATE_UNBLOCKED] = (__TRANS_START | 123 __TRANS_ATTACH | 124 __TRANS_JOIN | 125 __TRANS_JOIN_NOLOCK | 126 __TRANS_JOIN_NOSTART), 127 [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START | 128 __TRANS_ATTACH | 129 __TRANS_JOIN | 130 __TRANS_JOIN_NOLOCK | 131 __TRANS_JOIN_NOSTART), 132 [TRANS_STATE_COMPLETED] = (__TRANS_START | 133 __TRANS_ATTACH | 134 __TRANS_JOIN | 135 __TRANS_JOIN_NOLOCK | 136 __TRANS_JOIN_NOSTART), 137 }; 138 139 void btrfs_put_transaction(struct btrfs_transaction *transaction) 140 { 141 WARN_ON(refcount_read(&transaction->use_count) == 0); 142 if (refcount_dec_and_test(&transaction->use_count)) { 143 BUG_ON(!list_empty(&transaction->list)); 144 WARN_ON(!RB_EMPTY_ROOT( 145 &transaction->delayed_refs.href_root.rb_root)); 146 WARN_ON(!RB_EMPTY_ROOT( 147 &transaction->delayed_refs.dirty_extent_root)); 148 if (transaction->delayed_refs.pending_csums) 149 btrfs_err(transaction->fs_info, 150 "pending csums is %llu", 151 transaction->delayed_refs.pending_csums); 152 /* 153 * If any block groups are found in ->deleted_bgs then it's 154 * because the transaction was aborted and a commit did not 155 * happen (things failed before writing the new superblock 156 * and calling btrfs_finish_extent_commit()), so we can not 157 * discard the physical locations of the block groups. 158 */ 159 while (!list_empty(&transaction->deleted_bgs)) { 160 struct btrfs_block_group *cache; 161 162 cache = list_first_entry(&transaction->deleted_bgs, 163 struct btrfs_block_group, 164 bg_list); 165 list_del_init(&cache->bg_list); 166 btrfs_unfreeze_block_group(cache); 167 btrfs_put_block_group(cache); 168 } 169 WARN_ON(!list_empty(&transaction->dev_update_list)); 170 kfree(transaction); 171 } 172 } 173 174 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) 175 { 176 struct btrfs_transaction *cur_trans = trans->transaction; 177 struct btrfs_fs_info *fs_info = trans->fs_info; 178 struct btrfs_root *root, *tmp; 179 180 /* 181 * At this point no one can be using this transaction to modify any tree 182 * and no one can start another transaction to modify any tree either. 183 */ 184 ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING); 185 186 down_write(&fs_info->commit_root_sem); 187 188 if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) 189 fs_info->last_reloc_trans = trans->transid; 190 191 list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, 192 dirty_list) { 193 list_del_init(&root->dirty_list); 194 free_extent_buffer(root->commit_root); 195 root->commit_root = btrfs_root_node(root); 196 extent_io_tree_release(&root->dirty_log_pages); 197 btrfs_qgroup_clean_swapped_blocks(root); 198 } 199 200 /* We can free old roots now. */ 201 spin_lock(&cur_trans->dropped_roots_lock); 202 while (!list_empty(&cur_trans->dropped_roots)) { 203 root = list_first_entry(&cur_trans->dropped_roots, 204 struct btrfs_root, root_list); 205 list_del_init(&root->root_list); 206 spin_unlock(&cur_trans->dropped_roots_lock); 207 btrfs_free_log(trans, root); 208 btrfs_drop_and_free_fs_root(fs_info, root); 209 spin_lock(&cur_trans->dropped_roots_lock); 210 } 211 spin_unlock(&cur_trans->dropped_roots_lock); 212 213 up_write(&fs_info->commit_root_sem); 214 } 215 216 static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 217 unsigned int type) 218 { 219 if (type & TRANS_EXTWRITERS) 220 atomic_inc(&trans->num_extwriters); 221 } 222 223 static inline void extwriter_counter_dec(struct btrfs_transaction *trans, 224 unsigned int type) 225 { 226 if (type & TRANS_EXTWRITERS) 227 atomic_dec(&trans->num_extwriters); 228 } 229 230 static inline void extwriter_counter_init(struct btrfs_transaction *trans, 231 unsigned int type) 232 { 233 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); 234 } 235 236 static inline int extwriter_counter_read(struct btrfs_transaction *trans) 237 { 238 return atomic_read(&trans->num_extwriters); 239 } 240 241 /* 242 * To be called after doing the chunk btree updates right after allocating a new 243 * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a 244 * chunk after all chunk btree updates and after finishing the second phase of 245 * chunk allocation (btrfs_create_pending_block_groups()) in case some block 246 * group had its chunk item insertion delayed to the second phase. 247 */ 248 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) 249 { 250 struct btrfs_fs_info *fs_info = trans->fs_info; 251 252 if (!trans->chunk_bytes_reserved) 253 return; 254 255 btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, 256 trans->chunk_bytes_reserved, NULL); 257 trans->chunk_bytes_reserved = 0; 258 } 259 260 /* 261 * either allocate a new transaction or hop into the existing one 262 */ 263 static noinline int join_transaction(struct btrfs_fs_info *fs_info, 264 unsigned int type) 265 { 266 struct btrfs_transaction *cur_trans; 267 268 spin_lock(&fs_info->trans_lock); 269 loop: 270 /* The file system has been taken offline. No new transactions. */ 271 if (BTRFS_FS_ERROR(fs_info)) { 272 spin_unlock(&fs_info->trans_lock); 273 return -EROFS; 274 } 275 276 cur_trans = fs_info->running_transaction; 277 if (cur_trans) { 278 if (TRANS_ABORTED(cur_trans)) { 279 spin_unlock(&fs_info->trans_lock); 280 return cur_trans->aborted; 281 } 282 if (btrfs_blocked_trans_types[cur_trans->state] & type) { 283 spin_unlock(&fs_info->trans_lock); 284 return -EBUSY; 285 } 286 refcount_inc(&cur_trans->use_count); 287 atomic_inc(&cur_trans->num_writers); 288 extwriter_counter_inc(cur_trans, type); 289 spin_unlock(&fs_info->trans_lock); 290 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); 291 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); 292 return 0; 293 } 294 spin_unlock(&fs_info->trans_lock); 295 296 /* 297 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the 298 * current transaction, and commit it. If there is no transaction, just 299 * return ENOENT. 300 */ 301 if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART) 302 return -ENOENT; 303 304 /* 305 * JOIN_NOLOCK only happens during the transaction commit, so 306 * it is impossible that ->running_transaction is NULL 307 */ 308 BUG_ON(type == TRANS_JOIN_NOLOCK); 309 310 cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS); 311 if (!cur_trans) 312 return -ENOMEM; 313 314 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); 315 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); 316 317 spin_lock(&fs_info->trans_lock); 318 if (fs_info->running_transaction) { 319 /* 320 * someone started a transaction after we unlocked. Make sure 321 * to redo the checks above 322 */ 323 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 324 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 325 kfree(cur_trans); 326 goto loop; 327 } else if (BTRFS_FS_ERROR(fs_info)) { 328 spin_unlock(&fs_info->trans_lock); 329 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 330 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 331 kfree(cur_trans); 332 return -EROFS; 333 } 334 335 cur_trans->fs_info = fs_info; 336 atomic_set(&cur_trans->pending_ordered, 0); 337 init_waitqueue_head(&cur_trans->pending_wait); 338 atomic_set(&cur_trans->num_writers, 1); 339 extwriter_counter_init(cur_trans, type); 340 init_waitqueue_head(&cur_trans->writer_wait); 341 init_waitqueue_head(&cur_trans->commit_wait); 342 cur_trans->state = TRANS_STATE_RUNNING; 343 /* 344 * One for this trans handle, one so it will live on until we 345 * commit the transaction. 346 */ 347 refcount_set(&cur_trans->use_count, 2); 348 cur_trans->flags = 0; 349 cur_trans->start_time = ktime_get_seconds(); 350 351 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); 352 353 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; 354 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; 355 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 356 357 /* 358 * although the tree mod log is per file system and not per transaction, 359 * the log must never go across transaction boundaries. 360 */ 361 smp_mb(); 362 if (!list_empty(&fs_info->tree_mod_seq_list)) 363 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n"); 364 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 365 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n"); 366 atomic64_set(&fs_info->tree_mod_seq, 0); 367 368 spin_lock_init(&cur_trans->delayed_refs.lock); 369 370 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 371 INIT_LIST_HEAD(&cur_trans->dev_update_list); 372 INIT_LIST_HEAD(&cur_trans->switch_commits); 373 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 374 INIT_LIST_HEAD(&cur_trans->io_bgs); 375 INIT_LIST_HEAD(&cur_trans->dropped_roots); 376 mutex_init(&cur_trans->cache_write_mutex); 377 spin_lock_init(&cur_trans->dirty_bgs_lock); 378 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 379 spin_lock_init(&cur_trans->dropped_roots_lock); 380 list_add_tail(&cur_trans->list, &fs_info->trans_list); 381 extent_io_tree_init(fs_info, &cur_trans->dirty_pages, 382 IO_TREE_TRANS_DIRTY_PAGES); 383 extent_io_tree_init(fs_info, &cur_trans->pinned_extents, 384 IO_TREE_FS_PINNED_EXTENTS); 385 btrfs_set_fs_generation(fs_info, fs_info->generation + 1); 386 cur_trans->transid = fs_info->generation; 387 fs_info->running_transaction = cur_trans; 388 cur_trans->aborted = 0; 389 spin_unlock(&fs_info->trans_lock); 390 391 return 0; 392 } 393 394 /* 395 * This does all the record keeping required to make sure that a shareable root 396 * is properly recorded in a given transaction. This is required to make sure 397 * the old root from before we joined the transaction is deleted when the 398 * transaction commits. 399 */ 400 static int record_root_in_trans(struct btrfs_trans_handle *trans, 401 struct btrfs_root *root, 402 int force) 403 { 404 struct btrfs_fs_info *fs_info = root->fs_info; 405 int ret = 0; 406 407 if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 408 root->last_trans < trans->transid) || force) { 409 WARN_ON(!force && root->commit_root != root->node); 410 411 /* 412 * see below for IN_TRANS_SETUP usage rules 413 * we have the reloc mutex held now, so there 414 * is only one writer in this function 415 */ 416 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 417 418 /* make sure readers find IN_TRANS_SETUP before 419 * they find our root->last_trans update 420 */ 421 smp_wmb(); 422 423 spin_lock(&fs_info->fs_roots_radix_lock); 424 if (root->last_trans == trans->transid && !force) { 425 spin_unlock(&fs_info->fs_roots_radix_lock); 426 return 0; 427 } 428 radix_tree_tag_set(&fs_info->fs_roots_radix, 429 (unsigned long)root->root_key.objectid, 430 BTRFS_ROOT_TRANS_TAG); 431 spin_unlock(&fs_info->fs_roots_radix_lock); 432 root->last_trans = trans->transid; 433 434 /* this is pretty tricky. We don't want to 435 * take the relocation lock in btrfs_record_root_in_trans 436 * unless we're really doing the first setup for this root in 437 * this transaction. 438 * 439 * Normally we'd use root->last_trans as a flag to decide 440 * if we want to take the expensive mutex. 441 * 442 * But, we have to set root->last_trans before we 443 * init the relocation root, otherwise, we trip over warnings 444 * in ctree.c. The solution used here is to flag ourselves 445 * with root IN_TRANS_SETUP. When this is 1, we're still 446 * fixing up the reloc trees and everyone must wait. 447 * 448 * When this is zero, they can trust root->last_trans and fly 449 * through btrfs_record_root_in_trans without having to take the 450 * lock. smp_wmb() makes sure that all the writes above are 451 * done before we pop in the zero below 452 */ 453 ret = btrfs_init_reloc_root(trans, root); 454 smp_mb__before_atomic(); 455 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 456 } 457 return ret; 458 } 459 460 461 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 462 struct btrfs_root *root) 463 { 464 struct btrfs_fs_info *fs_info = root->fs_info; 465 struct btrfs_transaction *cur_trans = trans->transaction; 466 467 /* Add ourselves to the transaction dropped list */ 468 spin_lock(&cur_trans->dropped_roots_lock); 469 list_add_tail(&root->root_list, &cur_trans->dropped_roots); 470 spin_unlock(&cur_trans->dropped_roots_lock); 471 472 /* Make sure we don't try to update the root at commit time */ 473 spin_lock(&fs_info->fs_roots_radix_lock); 474 radix_tree_tag_clear(&fs_info->fs_roots_radix, 475 (unsigned long)root->root_key.objectid, 476 BTRFS_ROOT_TRANS_TAG); 477 spin_unlock(&fs_info->fs_roots_radix_lock); 478 } 479 480 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 481 struct btrfs_root *root) 482 { 483 struct btrfs_fs_info *fs_info = root->fs_info; 484 int ret; 485 486 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 487 return 0; 488 489 /* 490 * see record_root_in_trans for comments about IN_TRANS_SETUP usage 491 * and barriers 492 */ 493 smp_rmb(); 494 if (root->last_trans == trans->transid && 495 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) 496 return 0; 497 498 mutex_lock(&fs_info->reloc_mutex); 499 ret = record_root_in_trans(trans, root, 0); 500 mutex_unlock(&fs_info->reloc_mutex); 501 502 return ret; 503 } 504 505 static inline int is_transaction_blocked(struct btrfs_transaction *trans) 506 { 507 return (trans->state >= TRANS_STATE_COMMIT_START && 508 trans->state < TRANS_STATE_UNBLOCKED && 509 !TRANS_ABORTED(trans)); 510 } 511 512 /* wait for commit against the current transaction to become unblocked 513 * when this is done, it is safe to start a new transaction, but the current 514 * transaction might not be fully on disk. 515 */ 516 static void wait_current_trans(struct btrfs_fs_info *fs_info) 517 { 518 struct btrfs_transaction *cur_trans; 519 520 spin_lock(&fs_info->trans_lock); 521 cur_trans = fs_info->running_transaction; 522 if (cur_trans && is_transaction_blocked(cur_trans)) { 523 refcount_inc(&cur_trans->use_count); 524 spin_unlock(&fs_info->trans_lock); 525 526 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 527 wait_event(fs_info->transaction_wait, 528 cur_trans->state >= TRANS_STATE_UNBLOCKED || 529 TRANS_ABORTED(cur_trans)); 530 btrfs_put_transaction(cur_trans); 531 } else { 532 spin_unlock(&fs_info->trans_lock); 533 } 534 } 535 536 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type) 537 { 538 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 539 return 0; 540 541 if (type == TRANS_START) 542 return 1; 543 544 return 0; 545 } 546 547 static inline bool need_reserve_reloc_root(struct btrfs_root *root) 548 { 549 struct btrfs_fs_info *fs_info = root->fs_info; 550 551 if (!fs_info->reloc_ctl || 552 !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 553 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 554 root->reloc_root) 555 return false; 556 557 return true; 558 } 559 560 static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info, 561 enum btrfs_reserve_flush_enum flush, 562 u64 num_bytes, 563 u64 *delayed_refs_bytes) 564 { 565 struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info; 566 u64 bytes = num_bytes + *delayed_refs_bytes; 567 int ret; 568 569 /* 570 * We want to reserve all the bytes we may need all at once, so we only 571 * do 1 enospc flushing cycle per transaction start. 572 */ 573 ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); 574 575 /* 576 * If we are an emergency flush, which can steal from the global block 577 * reserve, then attempt to not reserve space for the delayed refs, as 578 * we will consume space for them from the global block reserve. 579 */ 580 if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { 581 bytes -= *delayed_refs_bytes; 582 *delayed_refs_bytes = 0; 583 ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); 584 } 585 586 return ret; 587 } 588 589 static struct btrfs_trans_handle * 590 start_transaction(struct btrfs_root *root, unsigned int num_items, 591 unsigned int type, enum btrfs_reserve_flush_enum flush, 592 bool enforce_qgroups) 593 { 594 struct btrfs_fs_info *fs_info = root->fs_info; 595 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 596 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; 597 struct btrfs_trans_handle *h; 598 struct btrfs_transaction *cur_trans; 599 u64 num_bytes = 0; 600 u64 qgroup_reserved = 0; 601 u64 delayed_refs_bytes = 0; 602 bool reloc_reserved = false; 603 bool do_chunk_alloc = false; 604 int ret; 605 606 if (BTRFS_FS_ERROR(fs_info)) 607 return ERR_PTR(-EROFS); 608 609 if (current->journal_info) { 610 WARN_ON(type & TRANS_EXTWRITERS); 611 h = current->journal_info; 612 refcount_inc(&h->use_count); 613 WARN_ON(refcount_read(&h->use_count) > 2); 614 h->orig_rsv = h->block_rsv; 615 h->block_rsv = NULL; 616 goto got_it; 617 } 618 619 /* 620 * Do the reservation before we join the transaction so we can do all 621 * the appropriate flushing if need be. 622 */ 623 if (num_items && root != fs_info->chunk_root) { 624 qgroup_reserved = num_items * fs_info->nodesize; 625 /* 626 * Use prealloc for now, as there might be a currently running 627 * transaction that could free this reserved space prematurely 628 * by committing. 629 */ 630 ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved, 631 enforce_qgroups, false); 632 if (ret) 633 return ERR_PTR(ret); 634 635 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); 636 /* 637 * If we plan to insert/update/delete "num_items" from a btree, 638 * we will also generate delayed refs for extent buffers in the 639 * respective btree paths, so reserve space for the delayed refs 640 * that will be generated by the caller as it modifies btrees. 641 * Try to reserve them to avoid excessive use of the global 642 * block reserve. 643 */ 644 delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items); 645 646 /* 647 * Do the reservation for the relocation root creation 648 */ 649 if (need_reserve_reloc_root(root)) { 650 num_bytes += fs_info->nodesize; 651 reloc_reserved = true; 652 } 653 654 ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes, 655 &delayed_refs_bytes); 656 if (ret) 657 goto reserve_fail; 658 659 btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true); 660 661 if (trans_rsv->space_info->force_alloc) 662 do_chunk_alloc = true; 663 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && 664 !btrfs_block_rsv_full(delayed_refs_rsv)) { 665 /* 666 * Some people call with btrfs_start_transaction(root, 0) 667 * because they can be throttled, but have some other mechanism 668 * for reserving space. We still want these guys to refill the 669 * delayed block_rsv so just add 1 items worth of reservation 670 * here. 671 */ 672 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush); 673 if (ret) 674 goto reserve_fail; 675 } 676 again: 677 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); 678 if (!h) { 679 ret = -ENOMEM; 680 goto alloc_fail; 681 } 682 683 /* 684 * If we are JOIN_NOLOCK we're already committing a transaction and 685 * waiting on this guy, so we don't need to do the sb_start_intwrite 686 * because we're already holding a ref. We need this because we could 687 * have raced in and did an fsync() on a file which can kick a commit 688 * and then we deadlock with somebody doing a freeze. 689 * 690 * If we are ATTACH, it means we just want to catch the current 691 * transaction and commit it, so we needn't do sb_start_intwrite(). 692 */ 693 if (type & __TRANS_FREEZABLE) 694 sb_start_intwrite(fs_info->sb); 695 696 if (may_wait_transaction(fs_info, type)) 697 wait_current_trans(fs_info); 698 699 do { 700 ret = join_transaction(fs_info, type); 701 if (ret == -EBUSY) { 702 wait_current_trans(fs_info); 703 if (unlikely(type == TRANS_ATTACH || 704 type == TRANS_JOIN_NOSTART)) 705 ret = -ENOENT; 706 } 707 } while (ret == -EBUSY); 708 709 if (ret < 0) 710 goto join_fail; 711 712 cur_trans = fs_info->running_transaction; 713 714 h->transid = cur_trans->transid; 715 h->transaction = cur_trans; 716 refcount_set(&h->use_count, 1); 717 h->fs_info = root->fs_info; 718 719 h->type = type; 720 INIT_LIST_HEAD(&h->new_bgs); 721 btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS); 722 723 smp_mb(); 724 if (cur_trans->state >= TRANS_STATE_COMMIT_START && 725 may_wait_transaction(fs_info, type)) { 726 current->journal_info = h; 727 btrfs_commit_transaction(h); 728 goto again; 729 } 730 731 if (num_bytes) { 732 trace_btrfs_space_reservation(fs_info, "transaction", 733 h->transid, num_bytes, 1); 734 h->block_rsv = trans_rsv; 735 h->bytes_reserved = num_bytes; 736 if (delayed_refs_bytes > 0) { 737 trace_btrfs_space_reservation(fs_info, 738 "local_delayed_refs_rsv", 739 h->transid, 740 delayed_refs_bytes, 1); 741 h->delayed_refs_bytes_reserved = delayed_refs_bytes; 742 btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true); 743 delayed_refs_bytes = 0; 744 } 745 h->reloc_reserved = reloc_reserved; 746 } 747 748 /* 749 * Now that we have found a transaction to be a part of, convert the 750 * qgroup reservation from prealloc to pertrans. A different transaction 751 * can't race in and free our pertrans out from under us. 752 */ 753 if (qgroup_reserved) 754 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); 755 756 got_it: 757 if (!current->journal_info) 758 current->journal_info = h; 759 760 /* 761 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to 762 * ALLOC_FORCE the first run through, and then we won't allocate for 763 * anybody else who races in later. We don't care about the return 764 * value here. 765 */ 766 if (do_chunk_alloc && num_bytes) { 767 u64 flags = h->block_rsv->space_info->flags; 768 769 btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), 770 CHUNK_ALLOC_NO_FORCE); 771 } 772 773 /* 774 * btrfs_record_root_in_trans() needs to alloc new extents, and may 775 * call btrfs_join_transaction() while we're also starting a 776 * transaction. 777 * 778 * Thus it need to be called after current->journal_info initialized, 779 * or we can deadlock. 780 */ 781 ret = btrfs_record_root_in_trans(h, root); 782 if (ret) { 783 /* 784 * The transaction handle is fully initialized and linked with 785 * other structures so it needs to be ended in case of errors, 786 * not just freed. 787 */ 788 btrfs_end_transaction(h); 789 return ERR_PTR(ret); 790 } 791 792 return h; 793 794 join_fail: 795 if (type & __TRANS_FREEZABLE) 796 sb_end_intwrite(fs_info->sb); 797 kmem_cache_free(btrfs_trans_handle_cachep, h); 798 alloc_fail: 799 if (num_bytes) 800 btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL); 801 if (delayed_refs_bytes) 802 btrfs_space_info_free_bytes_may_use(fs_info, trans_rsv->space_info, 803 delayed_refs_bytes); 804 reserve_fail: 805 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); 806 return ERR_PTR(ret); 807 } 808 809 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 810 unsigned int num_items) 811 { 812 return start_transaction(root, num_items, TRANS_START, 813 BTRFS_RESERVE_FLUSH_ALL, true); 814 } 815 816 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 817 struct btrfs_root *root, 818 unsigned int num_items) 819 { 820 return start_transaction(root, num_items, TRANS_START, 821 BTRFS_RESERVE_FLUSH_ALL_STEAL, false); 822 } 823 824 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 825 { 826 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, 827 true); 828 } 829 830 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) 831 { 832 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 833 BTRFS_RESERVE_NO_FLUSH, true); 834 } 835 836 /* 837 * Similar to regular join but it never starts a transaction when none is 838 * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED. 839 * This is similar to btrfs_attach_transaction() but it allows the join to 840 * happen if the transaction commit already started but it's not yet in the 841 * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING). 842 */ 843 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) 844 { 845 return start_transaction(root, 0, TRANS_JOIN_NOSTART, 846 BTRFS_RESERVE_NO_FLUSH, true); 847 } 848 849 /* 850 * Catch the running transaction. 851 * 852 * It is used when we want to commit the current the transaction, but 853 * don't want to start a new one. 854 * 855 * Note: If this function return -ENOENT, it just means there is no 856 * running transaction. But it is possible that the inactive transaction 857 * is still in the memory, not fully on disk. If you hope there is no 858 * inactive transaction in the fs when -ENOENT is returned, you should 859 * invoke 860 * btrfs_attach_transaction_barrier() 861 */ 862 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 863 { 864 return start_transaction(root, 0, TRANS_ATTACH, 865 BTRFS_RESERVE_NO_FLUSH, true); 866 } 867 868 /* 869 * Catch the running transaction. 870 * 871 * It is similar to the above function, the difference is this one 872 * will wait for all the inactive transactions until they fully 873 * complete. 874 */ 875 struct btrfs_trans_handle * 876 btrfs_attach_transaction_barrier(struct btrfs_root *root) 877 { 878 struct btrfs_trans_handle *trans; 879 880 trans = start_transaction(root, 0, TRANS_ATTACH, 881 BTRFS_RESERVE_NO_FLUSH, true); 882 if (trans == ERR_PTR(-ENOENT)) { 883 int ret; 884 885 ret = btrfs_wait_for_commit(root->fs_info, 0); 886 if (ret) 887 return ERR_PTR(ret); 888 } 889 890 return trans; 891 } 892 893 /* Wait for a transaction commit to reach at least the given state. */ 894 static noinline void wait_for_commit(struct btrfs_transaction *commit, 895 const enum btrfs_trans_state min_state) 896 { 897 struct btrfs_fs_info *fs_info = commit->fs_info; 898 u64 transid = commit->transid; 899 bool put = false; 900 901 /* 902 * At the moment this function is called with min_state either being 903 * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED. 904 */ 905 if (min_state == TRANS_STATE_COMPLETED) 906 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 907 else 908 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 909 910 while (1) { 911 wait_event(commit->commit_wait, commit->state >= min_state); 912 if (put) 913 btrfs_put_transaction(commit); 914 915 if (min_state < TRANS_STATE_COMPLETED) 916 break; 917 918 /* 919 * A transaction isn't really completed until all of the 920 * previous transactions are completed, but with fsync we can 921 * end up with SUPER_COMMITTED transactions before a COMPLETED 922 * transaction. Wait for those. 923 */ 924 925 spin_lock(&fs_info->trans_lock); 926 commit = list_first_entry_or_null(&fs_info->trans_list, 927 struct btrfs_transaction, 928 list); 929 if (!commit || commit->transid > transid) { 930 spin_unlock(&fs_info->trans_lock); 931 break; 932 } 933 refcount_inc(&commit->use_count); 934 put = true; 935 spin_unlock(&fs_info->trans_lock); 936 } 937 } 938 939 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) 940 { 941 struct btrfs_transaction *cur_trans = NULL, *t; 942 int ret = 0; 943 944 if (transid) { 945 if (transid <= btrfs_get_last_trans_committed(fs_info)) 946 goto out; 947 948 /* find specified transaction */ 949 spin_lock(&fs_info->trans_lock); 950 list_for_each_entry(t, &fs_info->trans_list, list) { 951 if (t->transid == transid) { 952 cur_trans = t; 953 refcount_inc(&cur_trans->use_count); 954 ret = 0; 955 break; 956 } 957 if (t->transid > transid) { 958 ret = 0; 959 break; 960 } 961 } 962 spin_unlock(&fs_info->trans_lock); 963 964 /* 965 * The specified transaction doesn't exist, or we 966 * raced with btrfs_commit_transaction 967 */ 968 if (!cur_trans) { 969 if (transid > btrfs_get_last_trans_committed(fs_info)) 970 ret = -EINVAL; 971 goto out; 972 } 973 } else { 974 /* find newest transaction that is committing | committed */ 975 spin_lock(&fs_info->trans_lock); 976 list_for_each_entry_reverse(t, &fs_info->trans_list, 977 list) { 978 if (t->state >= TRANS_STATE_COMMIT_START) { 979 if (t->state == TRANS_STATE_COMPLETED) 980 break; 981 cur_trans = t; 982 refcount_inc(&cur_trans->use_count); 983 break; 984 } 985 } 986 spin_unlock(&fs_info->trans_lock); 987 if (!cur_trans) 988 goto out; /* nothing committing|committed */ 989 } 990 991 wait_for_commit(cur_trans, TRANS_STATE_COMPLETED); 992 ret = cur_trans->aborted; 993 btrfs_put_transaction(cur_trans); 994 out: 995 return ret; 996 } 997 998 void btrfs_throttle(struct btrfs_fs_info *fs_info) 999 { 1000 wait_current_trans(fs_info); 1001 } 1002 1003 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) 1004 { 1005 struct btrfs_transaction *cur_trans = trans->transaction; 1006 1007 if (cur_trans->state >= TRANS_STATE_COMMIT_START || 1008 test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) 1009 return true; 1010 1011 if (btrfs_check_space_for_delayed_refs(trans->fs_info)) 1012 return true; 1013 1014 return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50); 1015 } 1016 1017 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) 1018 1019 { 1020 struct btrfs_fs_info *fs_info = trans->fs_info; 1021 1022 if (!trans->block_rsv) { 1023 ASSERT(!trans->bytes_reserved); 1024 ASSERT(!trans->delayed_refs_bytes_reserved); 1025 return; 1026 } 1027 1028 if (!trans->bytes_reserved) { 1029 ASSERT(!trans->delayed_refs_bytes_reserved); 1030 return; 1031 } 1032 1033 ASSERT(trans->block_rsv == &fs_info->trans_block_rsv); 1034 trace_btrfs_space_reservation(fs_info, "transaction", 1035 trans->transid, trans->bytes_reserved, 0); 1036 btrfs_block_rsv_release(fs_info, trans->block_rsv, 1037 trans->bytes_reserved, NULL); 1038 trans->bytes_reserved = 0; 1039 1040 if (!trans->delayed_refs_bytes_reserved) 1041 return; 1042 1043 trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv", 1044 trans->transid, 1045 trans->delayed_refs_bytes_reserved, 0); 1046 btrfs_block_rsv_release(fs_info, &trans->delayed_rsv, 1047 trans->delayed_refs_bytes_reserved, NULL); 1048 trans->delayed_refs_bytes_reserved = 0; 1049 } 1050 1051 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 1052 int throttle) 1053 { 1054 struct btrfs_fs_info *info = trans->fs_info; 1055 struct btrfs_transaction *cur_trans = trans->transaction; 1056 int err = 0; 1057 1058 if (refcount_read(&trans->use_count) > 1) { 1059 refcount_dec(&trans->use_count); 1060 trans->block_rsv = trans->orig_rsv; 1061 return 0; 1062 } 1063 1064 btrfs_trans_release_metadata(trans); 1065 trans->block_rsv = NULL; 1066 1067 btrfs_create_pending_block_groups(trans); 1068 1069 btrfs_trans_release_chunk_metadata(trans); 1070 1071 if (trans->type & __TRANS_FREEZABLE) 1072 sb_end_intwrite(info->sb); 1073 1074 WARN_ON(cur_trans != info->running_transaction); 1075 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 1076 atomic_dec(&cur_trans->num_writers); 1077 extwriter_counter_dec(cur_trans, trans->type); 1078 1079 cond_wake_up(&cur_trans->writer_wait); 1080 1081 btrfs_lockdep_release(info, btrfs_trans_num_extwriters); 1082 btrfs_lockdep_release(info, btrfs_trans_num_writers); 1083 1084 btrfs_put_transaction(cur_trans); 1085 1086 if (current->journal_info == trans) 1087 current->journal_info = NULL; 1088 1089 if (throttle) 1090 btrfs_run_delayed_iputs(info); 1091 1092 if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) { 1093 wake_up_process(info->transaction_kthread); 1094 if (TRANS_ABORTED(trans)) 1095 err = trans->aborted; 1096 else 1097 err = -EROFS; 1098 } 1099 1100 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1101 return err; 1102 } 1103 1104 int btrfs_end_transaction(struct btrfs_trans_handle *trans) 1105 { 1106 return __btrfs_end_transaction(trans, 0); 1107 } 1108 1109 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans) 1110 { 1111 return __btrfs_end_transaction(trans, 1); 1112 } 1113 1114 /* 1115 * when btree blocks are allocated, they have some corresponding bits set for 1116 * them in one of two extent_io trees. This is used to make sure all of 1117 * those extents are sent to disk but does not wait on them 1118 */ 1119 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 1120 struct extent_io_tree *dirty_pages, int mark) 1121 { 1122 int err = 0; 1123 int werr = 0; 1124 struct address_space *mapping = fs_info->btree_inode->i_mapping; 1125 struct extent_state *cached_state = NULL; 1126 u64 start = 0; 1127 u64 end; 1128 1129 while (find_first_extent_bit(dirty_pages, start, &start, &end, 1130 mark, &cached_state)) { 1131 bool wait_writeback = false; 1132 1133 err = convert_extent_bit(dirty_pages, start, end, 1134 EXTENT_NEED_WAIT, 1135 mark, &cached_state); 1136 /* 1137 * convert_extent_bit can return -ENOMEM, which is most of the 1138 * time a temporary error. So when it happens, ignore the error 1139 * and wait for writeback of this range to finish - because we 1140 * failed to set the bit EXTENT_NEED_WAIT for the range, a call 1141 * to __btrfs_wait_marked_extents() would not know that 1142 * writeback for this range started and therefore wouldn't 1143 * wait for it to finish - we don't want to commit a 1144 * superblock that points to btree nodes/leafs for which 1145 * writeback hasn't finished yet (and without errors). 1146 * We cleanup any entries left in the io tree when committing 1147 * the transaction (through extent_io_tree_release()). 1148 */ 1149 if (err == -ENOMEM) { 1150 err = 0; 1151 wait_writeback = true; 1152 } 1153 if (!err) 1154 err = filemap_fdatawrite_range(mapping, start, end); 1155 if (err) 1156 werr = err; 1157 else if (wait_writeback) 1158 werr = filemap_fdatawait_range(mapping, start, end); 1159 free_extent_state(cached_state); 1160 cached_state = NULL; 1161 cond_resched(); 1162 start = end + 1; 1163 } 1164 return werr; 1165 } 1166 1167 /* 1168 * when btree blocks are allocated, they have some corresponding bits set for 1169 * them in one of two extent_io trees. This is used to make sure all of 1170 * those extents are on disk for transaction or log commit. We wait 1171 * on all the pages and clear them from the dirty pages state tree 1172 */ 1173 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, 1174 struct extent_io_tree *dirty_pages) 1175 { 1176 int err = 0; 1177 int werr = 0; 1178 struct address_space *mapping = fs_info->btree_inode->i_mapping; 1179 struct extent_state *cached_state = NULL; 1180 u64 start = 0; 1181 u64 end; 1182 1183 while (find_first_extent_bit(dirty_pages, start, &start, &end, 1184 EXTENT_NEED_WAIT, &cached_state)) { 1185 /* 1186 * Ignore -ENOMEM errors returned by clear_extent_bit(). 1187 * When committing the transaction, we'll remove any entries 1188 * left in the io tree. For a log commit, we don't remove them 1189 * after committing the log because the tree can be accessed 1190 * concurrently - we do it only at transaction commit time when 1191 * it's safe to do it (through extent_io_tree_release()). 1192 */ 1193 err = clear_extent_bit(dirty_pages, start, end, 1194 EXTENT_NEED_WAIT, &cached_state); 1195 if (err == -ENOMEM) 1196 err = 0; 1197 if (!err) 1198 err = filemap_fdatawait_range(mapping, start, end); 1199 if (err) 1200 werr = err; 1201 free_extent_state(cached_state); 1202 cached_state = NULL; 1203 cond_resched(); 1204 start = end + 1; 1205 } 1206 if (err) 1207 werr = err; 1208 return werr; 1209 } 1210 1211 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, 1212 struct extent_io_tree *dirty_pages) 1213 { 1214 bool errors = false; 1215 int err; 1216 1217 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1218 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) 1219 errors = true; 1220 1221 if (errors && !err) 1222 err = -EIO; 1223 return err; 1224 } 1225 1226 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) 1227 { 1228 struct btrfs_fs_info *fs_info = log_root->fs_info; 1229 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; 1230 bool errors = false; 1231 int err; 1232 1233 ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 1234 1235 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1236 if ((mark & EXTENT_DIRTY) && 1237 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) 1238 errors = true; 1239 1240 if ((mark & EXTENT_NEW) && 1241 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) 1242 errors = true; 1243 1244 if (errors && !err) 1245 err = -EIO; 1246 return err; 1247 } 1248 1249 /* 1250 * When btree blocks are allocated the corresponding extents are marked dirty. 1251 * This function ensures such extents are persisted on disk for transaction or 1252 * log commit. 1253 * 1254 * @trans: transaction whose dirty pages we'd like to write 1255 */ 1256 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans) 1257 { 1258 int ret; 1259 int ret2; 1260 struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages; 1261 struct btrfs_fs_info *fs_info = trans->fs_info; 1262 struct blk_plug plug; 1263 1264 blk_start_plug(&plug); 1265 ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY); 1266 blk_finish_plug(&plug); 1267 ret2 = btrfs_wait_extents(fs_info, dirty_pages); 1268 1269 extent_io_tree_release(&trans->transaction->dirty_pages); 1270 1271 if (ret) 1272 return ret; 1273 else if (ret2) 1274 return ret2; 1275 else 1276 return 0; 1277 } 1278 1279 /* 1280 * this is used to update the root pointer in the tree of tree roots. 1281 * 1282 * But, in the case of the extent allocation tree, updating the root 1283 * pointer may allocate blocks which may change the root of the extent 1284 * allocation tree. 1285 * 1286 * So, this loops and repeats and makes sure the cowonly root didn't 1287 * change while the root pointer was being updated in the metadata. 1288 */ 1289 static int update_cowonly_root(struct btrfs_trans_handle *trans, 1290 struct btrfs_root *root) 1291 { 1292 int ret; 1293 u64 old_root_bytenr; 1294 u64 old_root_used; 1295 struct btrfs_fs_info *fs_info = root->fs_info; 1296 struct btrfs_root *tree_root = fs_info->tree_root; 1297 1298 old_root_used = btrfs_root_used(&root->root_item); 1299 1300 while (1) { 1301 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1302 if (old_root_bytenr == root->node->start && 1303 old_root_used == btrfs_root_used(&root->root_item)) 1304 break; 1305 1306 btrfs_set_root_node(&root->root_item, root->node); 1307 ret = btrfs_update_root(trans, tree_root, 1308 &root->root_key, 1309 &root->root_item); 1310 if (ret) 1311 return ret; 1312 1313 old_root_used = btrfs_root_used(&root->root_item); 1314 } 1315 1316 return 0; 1317 } 1318 1319 /* 1320 * update all the cowonly tree roots on disk 1321 * 1322 * The error handling in this function may not be obvious. Any of the 1323 * failures will cause the file system to go offline. We still need 1324 * to clean up the delayed refs. 1325 */ 1326 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) 1327 { 1328 struct btrfs_fs_info *fs_info = trans->fs_info; 1329 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; 1330 struct list_head *io_bgs = &trans->transaction->io_bgs; 1331 struct list_head *next; 1332 struct extent_buffer *eb; 1333 int ret; 1334 1335 /* 1336 * At this point no one can be using this transaction to modify any tree 1337 * and no one can start another transaction to modify any tree either. 1338 */ 1339 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); 1340 1341 eb = btrfs_lock_root_node(fs_info->tree_root); 1342 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1343 0, &eb, BTRFS_NESTING_COW); 1344 btrfs_tree_unlock(eb); 1345 free_extent_buffer(eb); 1346 1347 if (ret) 1348 return ret; 1349 1350 ret = btrfs_run_dev_stats(trans); 1351 if (ret) 1352 return ret; 1353 ret = btrfs_run_dev_replace(trans); 1354 if (ret) 1355 return ret; 1356 ret = btrfs_run_qgroups(trans); 1357 if (ret) 1358 return ret; 1359 1360 ret = btrfs_setup_space_cache(trans); 1361 if (ret) 1362 return ret; 1363 1364 again: 1365 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1366 struct btrfs_root *root; 1367 next = fs_info->dirty_cowonly_roots.next; 1368 list_del_init(next); 1369 root = list_entry(next, struct btrfs_root, dirty_list); 1370 clear_bit(BTRFS_ROOT_DIRTY, &root->state); 1371 1372 list_add_tail(&root->dirty_list, 1373 &trans->transaction->switch_commits); 1374 ret = update_cowonly_root(trans, root); 1375 if (ret) 1376 return ret; 1377 } 1378 1379 /* Now flush any delayed refs generated by updating all of the roots */ 1380 ret = btrfs_run_delayed_refs(trans, U64_MAX); 1381 if (ret) 1382 return ret; 1383 1384 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { 1385 ret = btrfs_write_dirty_block_groups(trans); 1386 if (ret) 1387 return ret; 1388 1389 /* 1390 * We're writing the dirty block groups, which could generate 1391 * delayed refs, which could generate more dirty block groups, 1392 * so we want to keep this flushing in this loop to make sure 1393 * everything gets run. 1394 */ 1395 ret = btrfs_run_delayed_refs(trans, U64_MAX); 1396 if (ret) 1397 return ret; 1398 } 1399 1400 if (!list_empty(&fs_info->dirty_cowonly_roots)) 1401 goto again; 1402 1403 /* Update dev-replace pointer once everything is committed */ 1404 fs_info->dev_replace.committed_cursor_left = 1405 fs_info->dev_replace.cursor_left_last_write_of_item; 1406 1407 return 0; 1408 } 1409 1410 /* 1411 * If we had a pending drop we need to see if there are any others left in our 1412 * dead roots list, and if not clear our bit and wake any waiters. 1413 */ 1414 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info) 1415 { 1416 /* 1417 * We put the drop in progress roots at the front of the list, so if the 1418 * first entry doesn't have UNFINISHED_DROP set we can wake everybody 1419 * up. 1420 */ 1421 spin_lock(&fs_info->trans_lock); 1422 if (!list_empty(&fs_info->dead_roots)) { 1423 struct btrfs_root *root = list_first_entry(&fs_info->dead_roots, 1424 struct btrfs_root, 1425 root_list); 1426 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) { 1427 spin_unlock(&fs_info->trans_lock); 1428 return; 1429 } 1430 } 1431 spin_unlock(&fs_info->trans_lock); 1432 1433 btrfs_wake_unfinished_drop(fs_info); 1434 } 1435 1436 /* 1437 * dead roots are old snapshots that need to be deleted. This allocates 1438 * a dirty root struct and adds it into the list of dead roots that need to 1439 * be deleted 1440 */ 1441 void btrfs_add_dead_root(struct btrfs_root *root) 1442 { 1443 struct btrfs_fs_info *fs_info = root->fs_info; 1444 1445 spin_lock(&fs_info->trans_lock); 1446 if (list_empty(&root->root_list)) { 1447 btrfs_grab_root(root); 1448 1449 /* We want to process the partially complete drops first. */ 1450 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) 1451 list_add(&root->root_list, &fs_info->dead_roots); 1452 else 1453 list_add_tail(&root->root_list, &fs_info->dead_roots); 1454 } 1455 spin_unlock(&fs_info->trans_lock); 1456 } 1457 1458 /* 1459 * Update each subvolume root and its relocation root, if it exists, in the tree 1460 * of tree roots. Also free log roots if they exist. 1461 */ 1462 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) 1463 { 1464 struct btrfs_fs_info *fs_info = trans->fs_info; 1465 struct btrfs_root *gang[8]; 1466 int i; 1467 int ret; 1468 1469 /* 1470 * At this point no one can be using this transaction to modify any tree 1471 * and no one can start another transaction to modify any tree either. 1472 */ 1473 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); 1474 1475 spin_lock(&fs_info->fs_roots_radix_lock); 1476 while (1) { 1477 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 1478 (void **)gang, 0, 1479 ARRAY_SIZE(gang), 1480 BTRFS_ROOT_TRANS_TAG); 1481 if (ret == 0) 1482 break; 1483 for (i = 0; i < ret; i++) { 1484 struct btrfs_root *root = gang[i]; 1485 int ret2; 1486 1487 /* 1488 * At this point we can neither have tasks logging inodes 1489 * from a root nor trying to commit a log tree. 1490 */ 1491 ASSERT(atomic_read(&root->log_writers) == 0); 1492 ASSERT(atomic_read(&root->log_commit[0]) == 0); 1493 ASSERT(atomic_read(&root->log_commit[1]) == 0); 1494 1495 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1496 (unsigned long)root->root_key.objectid, 1497 BTRFS_ROOT_TRANS_TAG); 1498 spin_unlock(&fs_info->fs_roots_radix_lock); 1499 1500 btrfs_free_log(trans, root); 1501 ret2 = btrfs_update_reloc_root(trans, root); 1502 if (ret2) 1503 return ret2; 1504 1505 /* see comments in should_cow_block() */ 1506 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1507 smp_mb__after_atomic(); 1508 1509 if (root->commit_root != root->node) { 1510 list_add_tail(&root->dirty_list, 1511 &trans->transaction->switch_commits); 1512 btrfs_set_root_node(&root->root_item, 1513 root->node); 1514 } 1515 1516 ret2 = btrfs_update_root(trans, fs_info->tree_root, 1517 &root->root_key, 1518 &root->root_item); 1519 if (ret2) 1520 return ret2; 1521 spin_lock(&fs_info->fs_roots_radix_lock); 1522 btrfs_qgroup_free_meta_all_pertrans(root); 1523 } 1524 } 1525 spin_unlock(&fs_info->fs_roots_radix_lock); 1526 return 0; 1527 } 1528 1529 /* 1530 * Do all special snapshot related qgroup dirty hack. 1531 * 1532 * Will do all needed qgroup inherit and dirty hack like switch commit 1533 * roots inside one transaction and write all btree into disk, to make 1534 * qgroup works. 1535 */ 1536 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, 1537 struct btrfs_root *src, 1538 struct btrfs_root *parent, 1539 struct btrfs_qgroup_inherit *inherit, 1540 u64 dst_objectid) 1541 { 1542 struct btrfs_fs_info *fs_info = src->fs_info; 1543 int ret; 1544 1545 /* 1546 * Save some performance in the case that qgroups are not enabled. If 1547 * this check races with the ioctl, rescan will kick in anyway. 1548 */ 1549 if (!btrfs_qgroup_full_accounting(fs_info)) 1550 return 0; 1551 1552 /* 1553 * Ensure dirty @src will be committed. Or, after coming 1554 * commit_fs_roots() and switch_commit_roots(), any dirty but not 1555 * recorded root will never be updated again, causing an outdated root 1556 * item. 1557 */ 1558 ret = record_root_in_trans(trans, src, 1); 1559 if (ret) 1560 return ret; 1561 1562 /* 1563 * btrfs_qgroup_inherit relies on a consistent view of the usage for the 1564 * src root, so we must run the delayed refs here. 1565 * 1566 * However this isn't particularly fool proof, because there's no 1567 * synchronization keeping us from changing the tree after this point 1568 * before we do the qgroup_inherit, or even from making changes while 1569 * we're doing the qgroup_inherit. But that's a problem for the future, 1570 * for now flush the delayed refs to narrow the race window where the 1571 * qgroup counters could end up wrong. 1572 */ 1573 ret = btrfs_run_delayed_refs(trans, U64_MAX); 1574 if (ret) { 1575 btrfs_abort_transaction(trans, ret); 1576 return ret; 1577 } 1578 1579 ret = commit_fs_roots(trans); 1580 if (ret) 1581 goto out; 1582 ret = btrfs_qgroup_account_extents(trans); 1583 if (ret < 0) 1584 goto out; 1585 1586 /* Now qgroup are all updated, we can inherit it to new qgroups */ 1587 ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid, 1588 parent->root_key.objectid, inherit); 1589 if (ret < 0) 1590 goto out; 1591 1592 /* 1593 * Now we do a simplified commit transaction, which will: 1594 * 1) commit all subvolume and extent tree 1595 * To ensure all subvolume and extent tree have a valid 1596 * commit_root to accounting later insert_dir_item() 1597 * 2) write all btree blocks onto disk 1598 * This is to make sure later btree modification will be cowed 1599 * Or commit_root can be populated and cause wrong qgroup numbers 1600 * In this simplified commit, we don't really care about other trees 1601 * like chunk and root tree, as they won't affect qgroup. 1602 * And we don't write super to avoid half committed status. 1603 */ 1604 ret = commit_cowonly_roots(trans); 1605 if (ret) 1606 goto out; 1607 switch_commit_roots(trans); 1608 ret = btrfs_write_and_wait_transaction(trans); 1609 if (ret) 1610 btrfs_handle_fs_error(fs_info, ret, 1611 "Error while writing out transaction for qgroup"); 1612 1613 out: 1614 /* 1615 * Force parent root to be updated, as we recorded it before so its 1616 * last_trans == cur_transid. 1617 * Or it won't be committed again onto disk after later 1618 * insert_dir_item() 1619 */ 1620 if (!ret) 1621 ret = record_root_in_trans(trans, parent, 1); 1622 return ret; 1623 } 1624 1625 /* 1626 * new snapshots need to be created at a very specific time in the 1627 * transaction commit. This does the actual creation. 1628 * 1629 * Note: 1630 * If the error which may affect the commitment of the current transaction 1631 * happens, we should return the error number. If the error which just affect 1632 * the creation of the pending snapshots, just return 0. 1633 */ 1634 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1635 struct btrfs_pending_snapshot *pending) 1636 { 1637 1638 struct btrfs_fs_info *fs_info = trans->fs_info; 1639 struct btrfs_key key; 1640 struct btrfs_root_item *new_root_item; 1641 struct btrfs_root *tree_root = fs_info->tree_root; 1642 struct btrfs_root *root = pending->root; 1643 struct btrfs_root *parent_root; 1644 struct btrfs_block_rsv *rsv; 1645 struct inode *parent_inode = pending->dir; 1646 struct btrfs_path *path; 1647 struct btrfs_dir_item *dir_item; 1648 struct extent_buffer *tmp; 1649 struct extent_buffer *old; 1650 struct timespec64 cur_time; 1651 int ret = 0; 1652 u64 to_reserve = 0; 1653 u64 index = 0; 1654 u64 objectid; 1655 u64 root_flags; 1656 unsigned int nofs_flags; 1657 struct fscrypt_name fname; 1658 1659 ASSERT(pending->path); 1660 path = pending->path; 1661 1662 ASSERT(pending->root_item); 1663 new_root_item = pending->root_item; 1664 1665 /* 1666 * We're inside a transaction and must make sure that any potential 1667 * allocations with GFP_KERNEL in fscrypt won't recurse back to 1668 * filesystem. 1669 */ 1670 nofs_flags = memalloc_nofs_save(); 1671 pending->error = fscrypt_setup_filename(parent_inode, 1672 &pending->dentry->d_name, 0, 1673 &fname); 1674 memalloc_nofs_restore(nofs_flags); 1675 if (pending->error) 1676 goto free_pending; 1677 1678 pending->error = btrfs_get_free_objectid(tree_root, &objectid); 1679 if (pending->error) 1680 goto free_fname; 1681 1682 /* 1683 * Make qgroup to skip current new snapshot's qgroupid, as it is 1684 * accounted by later btrfs_qgroup_inherit(). 1685 */ 1686 btrfs_set_skip_qgroup(trans, objectid); 1687 1688 btrfs_reloc_pre_snapshot(pending, &to_reserve); 1689 1690 if (to_reserve > 0) { 1691 pending->error = btrfs_block_rsv_add(fs_info, 1692 &pending->block_rsv, 1693 to_reserve, 1694 BTRFS_RESERVE_NO_FLUSH); 1695 if (pending->error) 1696 goto clear_skip_qgroup; 1697 } 1698 1699 key.objectid = objectid; 1700 key.offset = (u64)-1; 1701 key.type = BTRFS_ROOT_ITEM_KEY; 1702 1703 rsv = trans->block_rsv; 1704 trans->block_rsv = &pending->block_rsv; 1705 trans->bytes_reserved = trans->block_rsv->reserved; 1706 trace_btrfs_space_reservation(fs_info, "transaction", 1707 trans->transid, 1708 trans->bytes_reserved, 1); 1709 parent_root = BTRFS_I(parent_inode)->root; 1710 ret = record_root_in_trans(trans, parent_root, 0); 1711 if (ret) 1712 goto fail; 1713 cur_time = current_time(parent_inode); 1714 1715 /* 1716 * insert the directory item 1717 */ 1718 ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); 1719 if (ret) { 1720 btrfs_abort_transaction(trans, ret); 1721 goto fail; 1722 } 1723 1724 /* check if there is a file/dir which has the same name. */ 1725 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1726 btrfs_ino(BTRFS_I(parent_inode)), 1727 &fname.disk_name, 0); 1728 if (dir_item != NULL && !IS_ERR(dir_item)) { 1729 pending->error = -EEXIST; 1730 goto dir_item_existed; 1731 } else if (IS_ERR(dir_item)) { 1732 ret = PTR_ERR(dir_item); 1733 btrfs_abort_transaction(trans, ret); 1734 goto fail; 1735 } 1736 btrfs_release_path(path); 1737 1738 ret = btrfs_create_qgroup(trans, objectid); 1739 if (ret && ret != -EEXIST) { 1740 btrfs_abort_transaction(trans, ret); 1741 goto fail; 1742 } 1743 1744 /* 1745 * pull in the delayed directory update 1746 * and the delayed inode item 1747 * otherwise we corrupt the FS during 1748 * snapshot 1749 */ 1750 ret = btrfs_run_delayed_items(trans); 1751 if (ret) { /* Transaction aborted */ 1752 btrfs_abort_transaction(trans, ret); 1753 goto fail; 1754 } 1755 1756 ret = record_root_in_trans(trans, root, 0); 1757 if (ret) { 1758 btrfs_abort_transaction(trans, ret); 1759 goto fail; 1760 } 1761 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1762 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1763 btrfs_check_and_init_root_item(new_root_item); 1764 1765 root_flags = btrfs_root_flags(new_root_item); 1766 if (pending->readonly) 1767 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1768 else 1769 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1770 btrfs_set_root_flags(new_root_item, root_flags); 1771 1772 btrfs_set_root_generation_v2(new_root_item, 1773 trans->transid); 1774 generate_random_guid(new_root_item->uuid); 1775 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1776 BTRFS_UUID_SIZE); 1777 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1778 memset(new_root_item->received_uuid, 0, 1779 sizeof(new_root_item->received_uuid)); 1780 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1781 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1782 btrfs_set_root_stransid(new_root_item, 0); 1783 btrfs_set_root_rtransid(new_root_item, 0); 1784 } 1785 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); 1786 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); 1787 btrfs_set_root_otransid(new_root_item, trans->transid); 1788 1789 old = btrfs_lock_root_node(root); 1790 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old, 1791 BTRFS_NESTING_COW); 1792 if (ret) { 1793 btrfs_tree_unlock(old); 1794 free_extent_buffer(old); 1795 btrfs_abort_transaction(trans, ret); 1796 goto fail; 1797 } 1798 1799 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1800 /* clean up in any case */ 1801 btrfs_tree_unlock(old); 1802 free_extent_buffer(old); 1803 if (ret) { 1804 btrfs_abort_transaction(trans, ret); 1805 goto fail; 1806 } 1807 /* see comments in should_cow_block() */ 1808 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1809 smp_wmb(); 1810 1811 btrfs_set_root_node(new_root_item, tmp); 1812 /* record when the snapshot was created in key.offset */ 1813 key.offset = trans->transid; 1814 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1815 btrfs_tree_unlock(tmp); 1816 free_extent_buffer(tmp); 1817 if (ret) { 1818 btrfs_abort_transaction(trans, ret); 1819 goto fail; 1820 } 1821 1822 /* 1823 * insert root back/forward references 1824 */ 1825 ret = btrfs_add_root_ref(trans, objectid, 1826 parent_root->root_key.objectid, 1827 btrfs_ino(BTRFS_I(parent_inode)), index, 1828 &fname.disk_name); 1829 if (ret) { 1830 btrfs_abort_transaction(trans, ret); 1831 goto fail; 1832 } 1833 1834 key.offset = (u64)-1; 1835 pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev); 1836 if (IS_ERR(pending->snap)) { 1837 ret = PTR_ERR(pending->snap); 1838 pending->snap = NULL; 1839 btrfs_abort_transaction(trans, ret); 1840 goto fail; 1841 } 1842 1843 ret = btrfs_reloc_post_snapshot(trans, pending); 1844 if (ret) { 1845 btrfs_abort_transaction(trans, ret); 1846 goto fail; 1847 } 1848 1849 /* 1850 * Do special qgroup accounting for snapshot, as we do some qgroup 1851 * snapshot hack to do fast snapshot. 1852 * To co-operate with that hack, we do hack again. 1853 * Or snapshot will be greatly slowed down by a subtree qgroup rescan 1854 */ 1855 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) 1856 ret = qgroup_account_snapshot(trans, root, parent_root, 1857 pending->inherit, objectid); 1858 else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 1859 ret = btrfs_qgroup_inherit(trans, root->root_key.objectid, objectid, 1860 parent_root->root_key.objectid, pending->inherit); 1861 if (ret < 0) 1862 goto fail; 1863 1864 ret = btrfs_insert_dir_item(trans, &fname.disk_name, 1865 BTRFS_I(parent_inode), &key, BTRFS_FT_DIR, 1866 index); 1867 /* We have check then name at the beginning, so it is impossible. */ 1868 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1869 if (ret) { 1870 btrfs_abort_transaction(trans, ret); 1871 goto fail; 1872 } 1873 1874 btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + 1875 fname.disk_name.len * 2); 1876 inode_set_mtime_to_ts(parent_inode, 1877 inode_set_ctime_current(parent_inode)); 1878 ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode)); 1879 if (ret) { 1880 btrfs_abort_transaction(trans, ret); 1881 goto fail; 1882 } 1883 ret = btrfs_uuid_tree_add(trans, new_root_item->uuid, 1884 BTRFS_UUID_KEY_SUBVOL, 1885 objectid); 1886 if (ret) { 1887 btrfs_abort_transaction(trans, ret); 1888 goto fail; 1889 } 1890 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { 1891 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, 1892 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1893 objectid); 1894 if (ret && ret != -EEXIST) { 1895 btrfs_abort_transaction(trans, ret); 1896 goto fail; 1897 } 1898 } 1899 1900 fail: 1901 pending->error = ret; 1902 dir_item_existed: 1903 trans->block_rsv = rsv; 1904 trans->bytes_reserved = 0; 1905 clear_skip_qgroup: 1906 btrfs_clear_skip_qgroup(trans); 1907 free_fname: 1908 fscrypt_free_filename(&fname); 1909 free_pending: 1910 kfree(new_root_item); 1911 pending->root_item = NULL; 1912 btrfs_free_path(path); 1913 pending->path = NULL; 1914 1915 return ret; 1916 } 1917 1918 /* 1919 * create all the snapshots we've scheduled for creation 1920 */ 1921 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans) 1922 { 1923 struct btrfs_pending_snapshot *pending, *next; 1924 struct list_head *head = &trans->transaction->pending_snapshots; 1925 int ret = 0; 1926 1927 list_for_each_entry_safe(pending, next, head, list) { 1928 list_del(&pending->list); 1929 ret = create_pending_snapshot(trans, pending); 1930 if (ret) 1931 break; 1932 } 1933 return ret; 1934 } 1935 1936 static void update_super_roots(struct btrfs_fs_info *fs_info) 1937 { 1938 struct btrfs_root_item *root_item; 1939 struct btrfs_super_block *super; 1940 1941 super = fs_info->super_copy; 1942 1943 root_item = &fs_info->chunk_root->root_item; 1944 super->chunk_root = root_item->bytenr; 1945 super->chunk_root_generation = root_item->generation; 1946 super->chunk_root_level = root_item->level; 1947 1948 root_item = &fs_info->tree_root->root_item; 1949 super->root = root_item->bytenr; 1950 super->generation = root_item->generation; 1951 super->root_level = root_item->level; 1952 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 1953 super->cache_generation = root_item->generation; 1954 else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags)) 1955 super->cache_generation = 0; 1956 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) 1957 super->uuid_tree_generation = root_item->generation; 1958 } 1959 1960 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1961 { 1962 struct btrfs_transaction *trans; 1963 int ret = 0; 1964 1965 spin_lock(&info->trans_lock); 1966 trans = info->running_transaction; 1967 if (trans) 1968 ret = is_transaction_blocked(trans); 1969 spin_unlock(&info->trans_lock); 1970 return ret; 1971 } 1972 1973 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans) 1974 { 1975 struct btrfs_fs_info *fs_info = trans->fs_info; 1976 struct btrfs_transaction *cur_trans; 1977 1978 /* Kick the transaction kthread. */ 1979 set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); 1980 wake_up_process(fs_info->transaction_kthread); 1981 1982 /* take transaction reference */ 1983 cur_trans = trans->transaction; 1984 refcount_inc(&cur_trans->use_count); 1985 1986 btrfs_end_transaction(trans); 1987 1988 /* 1989 * Wait for the current transaction commit to start and block 1990 * subsequent transaction joins 1991 */ 1992 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); 1993 wait_event(fs_info->transaction_blocked_wait, 1994 cur_trans->state >= TRANS_STATE_COMMIT_START || 1995 TRANS_ABORTED(cur_trans)); 1996 btrfs_put_transaction(cur_trans); 1997 } 1998 1999 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) 2000 { 2001 struct btrfs_fs_info *fs_info = trans->fs_info; 2002 struct btrfs_transaction *cur_trans = trans->transaction; 2003 2004 WARN_ON(refcount_read(&trans->use_count) > 1); 2005 2006 btrfs_abort_transaction(trans, err); 2007 2008 spin_lock(&fs_info->trans_lock); 2009 2010 /* 2011 * If the transaction is removed from the list, it means this 2012 * transaction has been committed successfully, so it is impossible 2013 * to call the cleanup function. 2014 */ 2015 BUG_ON(list_empty(&cur_trans->list)); 2016 2017 if (cur_trans == fs_info->running_transaction) { 2018 cur_trans->state = TRANS_STATE_COMMIT_DOING; 2019 spin_unlock(&fs_info->trans_lock); 2020 2021 /* 2022 * The thread has already released the lockdep map as reader 2023 * already in btrfs_commit_transaction(). 2024 */ 2025 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); 2026 wait_event(cur_trans->writer_wait, 2027 atomic_read(&cur_trans->num_writers) == 1); 2028 2029 spin_lock(&fs_info->trans_lock); 2030 } 2031 2032 /* 2033 * Now that we know no one else is still using the transaction we can 2034 * remove the transaction from the list of transactions. This avoids 2035 * the transaction kthread from cleaning up the transaction while some 2036 * other task is still using it, which could result in a use-after-free 2037 * on things like log trees, as it forces the transaction kthread to 2038 * wait for this transaction to be cleaned up by us. 2039 */ 2040 list_del_init(&cur_trans->list); 2041 2042 spin_unlock(&fs_info->trans_lock); 2043 2044 btrfs_cleanup_one_transaction(trans->transaction, fs_info); 2045 2046 spin_lock(&fs_info->trans_lock); 2047 if (cur_trans == fs_info->running_transaction) 2048 fs_info->running_transaction = NULL; 2049 spin_unlock(&fs_info->trans_lock); 2050 2051 if (trans->type & __TRANS_FREEZABLE) 2052 sb_end_intwrite(fs_info->sb); 2053 btrfs_put_transaction(cur_trans); 2054 btrfs_put_transaction(cur_trans); 2055 2056 trace_btrfs_transaction_commit(fs_info); 2057 2058 if (current->journal_info == trans) 2059 current->journal_info = NULL; 2060 2061 /* 2062 * If relocation is running, we can't cancel scrub because that will 2063 * result in a deadlock. Before relocating a block group, relocation 2064 * pauses scrub, then starts and commits a transaction before unpausing 2065 * scrub. If the transaction commit is being done by the relocation 2066 * task or triggered by another task and the relocation task is waiting 2067 * for the commit, and we end up here due to an error in the commit 2068 * path, then calling btrfs_scrub_cancel() will deadlock, as we are 2069 * asking for scrub to stop while having it asked to be paused higher 2070 * above in relocation code. 2071 */ 2072 if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) 2073 btrfs_scrub_cancel(fs_info); 2074 2075 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2076 } 2077 2078 /* 2079 * Release reserved delayed ref space of all pending block groups of the 2080 * transaction and remove them from the list 2081 */ 2082 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) 2083 { 2084 struct btrfs_fs_info *fs_info = trans->fs_info; 2085 struct btrfs_block_group *block_group, *tmp; 2086 2087 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 2088 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); 2089 list_del_init(&block_group->bg_list); 2090 } 2091 } 2092 2093 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 2094 { 2095 /* 2096 * We use try_to_writeback_inodes_sb() here because if we used 2097 * btrfs_start_delalloc_roots we would deadlock with fs freeze. 2098 * Currently are holding the fs freeze lock, if we do an async flush 2099 * we'll do btrfs_join_transaction() and deadlock because we need to 2100 * wait for the fs freeze lock. Using the direct flushing we benefit 2101 * from already being in a transaction and our join_transaction doesn't 2102 * have to re-take the fs freeze lock. 2103 * 2104 * Note that try_to_writeback_inodes_sb() will only trigger writeback 2105 * if it can read lock sb->s_umount. It will always be able to lock it, 2106 * except when the filesystem is being unmounted or being frozen, but in 2107 * those cases sync_filesystem() is called, which results in calling 2108 * writeback_inodes_sb() while holding a write lock on sb->s_umount. 2109 * Note that we don't call writeback_inodes_sb() directly, because it 2110 * will emit a warning if sb->s_umount is not locked. 2111 */ 2112 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 2113 try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); 2114 return 0; 2115 } 2116 2117 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 2118 { 2119 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 2120 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 2121 } 2122 2123 /* 2124 * Add a pending snapshot associated with the given transaction handle to the 2125 * respective handle. This must be called after the transaction commit started 2126 * and while holding fs_info->trans_lock. 2127 * This serves to guarantee a caller of btrfs_commit_transaction() that it can 2128 * safely free the pending snapshot pointer in case btrfs_commit_transaction() 2129 * returns an error. 2130 */ 2131 static void add_pending_snapshot(struct btrfs_trans_handle *trans) 2132 { 2133 struct btrfs_transaction *cur_trans = trans->transaction; 2134 2135 if (!trans->pending_snapshot) 2136 return; 2137 2138 lockdep_assert_held(&trans->fs_info->trans_lock); 2139 ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP); 2140 2141 list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); 2142 } 2143 2144 static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval) 2145 { 2146 fs_info->commit_stats.commit_count++; 2147 fs_info->commit_stats.last_commit_dur = interval; 2148 fs_info->commit_stats.max_commit_dur = 2149 max_t(u64, fs_info->commit_stats.max_commit_dur, interval); 2150 fs_info->commit_stats.total_commit_dur += interval; 2151 } 2152 2153 int btrfs_commit_transaction(struct btrfs_trans_handle *trans) 2154 { 2155 struct btrfs_fs_info *fs_info = trans->fs_info; 2156 struct btrfs_transaction *cur_trans = trans->transaction; 2157 struct btrfs_transaction *prev_trans = NULL; 2158 int ret; 2159 ktime_t start_time; 2160 ktime_t interval; 2161 2162 ASSERT(refcount_read(&trans->use_count) == 1); 2163 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); 2164 2165 clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags); 2166 2167 /* Stop the commit early if ->aborted is set */ 2168 if (TRANS_ABORTED(cur_trans)) { 2169 ret = cur_trans->aborted; 2170 goto lockdep_trans_commit_start_release; 2171 } 2172 2173 btrfs_trans_release_metadata(trans); 2174 trans->block_rsv = NULL; 2175 2176 /* 2177 * We only want one transaction commit doing the flushing so we do not 2178 * waste a bunch of time on lock contention on the extent root node. 2179 */ 2180 if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING, 2181 &cur_trans->delayed_refs.flags)) { 2182 /* 2183 * Make a pass through all the delayed refs we have so far. 2184 * Any running threads may add more while we are here. 2185 */ 2186 ret = btrfs_run_delayed_refs(trans, 0); 2187 if (ret) 2188 goto lockdep_trans_commit_start_release; 2189 } 2190 2191 btrfs_create_pending_block_groups(trans); 2192 2193 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { 2194 int run_it = 0; 2195 2196 /* this mutex is also taken before trying to set 2197 * block groups readonly. We need to make sure 2198 * that nobody has set a block group readonly 2199 * after a extents from that block group have been 2200 * allocated for cache files. btrfs_set_block_group_ro 2201 * will wait for the transaction to commit if it 2202 * finds BTRFS_TRANS_DIRTY_BG_RUN set. 2203 * 2204 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure 2205 * only one process starts all the block group IO. It wouldn't 2206 * hurt to have more than one go through, but there's no 2207 * real advantage to it either. 2208 */ 2209 mutex_lock(&fs_info->ro_block_group_mutex); 2210 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, 2211 &cur_trans->flags)) 2212 run_it = 1; 2213 mutex_unlock(&fs_info->ro_block_group_mutex); 2214 2215 if (run_it) { 2216 ret = btrfs_start_dirty_block_groups(trans); 2217 if (ret) 2218 goto lockdep_trans_commit_start_release; 2219 } 2220 } 2221 2222 spin_lock(&fs_info->trans_lock); 2223 if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) { 2224 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; 2225 2226 add_pending_snapshot(trans); 2227 2228 spin_unlock(&fs_info->trans_lock); 2229 refcount_inc(&cur_trans->use_count); 2230 2231 if (trans->in_fsync) 2232 want_state = TRANS_STATE_SUPER_COMMITTED; 2233 2234 btrfs_trans_state_lockdep_release(fs_info, 2235 BTRFS_LOCKDEP_TRANS_COMMIT_PREP); 2236 ret = btrfs_end_transaction(trans); 2237 wait_for_commit(cur_trans, want_state); 2238 2239 if (TRANS_ABORTED(cur_trans)) 2240 ret = cur_trans->aborted; 2241 2242 btrfs_put_transaction(cur_trans); 2243 2244 return ret; 2245 } 2246 2247 cur_trans->state = TRANS_STATE_COMMIT_PREP; 2248 wake_up(&fs_info->transaction_blocked_wait); 2249 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); 2250 2251 if (cur_trans->list.prev != &fs_info->trans_list) { 2252 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; 2253 2254 if (trans->in_fsync) 2255 want_state = TRANS_STATE_SUPER_COMMITTED; 2256 2257 prev_trans = list_entry(cur_trans->list.prev, 2258 struct btrfs_transaction, list); 2259 if (prev_trans->state < want_state) { 2260 refcount_inc(&prev_trans->use_count); 2261 spin_unlock(&fs_info->trans_lock); 2262 2263 wait_for_commit(prev_trans, want_state); 2264 2265 ret = READ_ONCE(prev_trans->aborted); 2266 2267 btrfs_put_transaction(prev_trans); 2268 if (ret) 2269 goto lockdep_release; 2270 spin_lock(&fs_info->trans_lock); 2271 } 2272 } else { 2273 /* 2274 * The previous transaction was aborted and was already removed 2275 * from the list of transactions at fs_info->trans_list. So we 2276 * abort to prevent writing a new superblock that reflects a 2277 * corrupt state (pointing to trees with unwritten nodes/leafs). 2278 */ 2279 if (BTRFS_FS_ERROR(fs_info)) { 2280 spin_unlock(&fs_info->trans_lock); 2281 ret = -EROFS; 2282 goto lockdep_release; 2283 } 2284 } 2285 2286 cur_trans->state = TRANS_STATE_COMMIT_START; 2287 wake_up(&fs_info->transaction_blocked_wait); 2288 spin_unlock(&fs_info->trans_lock); 2289 2290 /* 2291 * Get the time spent on the work done by the commit thread and not 2292 * the time spent waiting on a previous commit 2293 */ 2294 start_time = ktime_get_ns(); 2295 2296 extwriter_counter_dec(cur_trans, trans->type); 2297 2298 ret = btrfs_start_delalloc_flush(fs_info); 2299 if (ret) 2300 goto lockdep_release; 2301 2302 ret = btrfs_run_delayed_items(trans); 2303 if (ret) 2304 goto lockdep_release; 2305 2306 /* 2307 * The thread has started/joined the transaction thus it holds the 2308 * lockdep map as a reader. It has to release it before acquiring the 2309 * lockdep map as a writer. 2310 */ 2311 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 2312 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters); 2313 wait_event(cur_trans->writer_wait, 2314 extwriter_counter_read(cur_trans) == 0); 2315 2316 /* some pending stuffs might be added after the previous flush. */ 2317 ret = btrfs_run_delayed_items(trans); 2318 if (ret) { 2319 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 2320 goto cleanup_transaction; 2321 } 2322 2323 btrfs_wait_delalloc_flush(fs_info); 2324 2325 /* 2326 * Wait for all ordered extents started by a fast fsync that joined this 2327 * transaction. Otherwise if this transaction commits before the ordered 2328 * extents complete we lose logged data after a power failure. 2329 */ 2330 btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered); 2331 wait_event(cur_trans->pending_wait, 2332 atomic_read(&cur_trans->pending_ordered) == 0); 2333 2334 btrfs_scrub_pause(fs_info); 2335 /* 2336 * Ok now we need to make sure to block out any other joins while we 2337 * commit the transaction. We could have started a join before setting 2338 * COMMIT_DOING so make sure to wait for num_writers to == 1 again. 2339 */ 2340 spin_lock(&fs_info->trans_lock); 2341 add_pending_snapshot(trans); 2342 cur_trans->state = TRANS_STATE_COMMIT_DOING; 2343 spin_unlock(&fs_info->trans_lock); 2344 2345 /* 2346 * The thread has started/joined the transaction thus it holds the 2347 * lockdep map as a reader. It has to release it before acquiring the 2348 * lockdep map as a writer. 2349 */ 2350 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 2351 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); 2352 wait_event(cur_trans->writer_wait, 2353 atomic_read(&cur_trans->num_writers) == 1); 2354 2355 /* 2356 * Make lockdep happy by acquiring the state locks after 2357 * btrfs_trans_num_writers is released. If we acquired the state locks 2358 * before releasing the btrfs_trans_num_writers lock then lockdep would 2359 * complain because we did not follow the reverse order unlocking rule. 2360 */ 2361 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 2362 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 2363 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2364 2365 /* 2366 * We've started the commit, clear the flag in case we were triggered to 2367 * do an async commit but somebody else started before the transaction 2368 * kthread could do the work. 2369 */ 2370 clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); 2371 2372 if (TRANS_ABORTED(cur_trans)) { 2373 ret = cur_trans->aborted; 2374 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2375 goto scrub_continue; 2376 } 2377 /* 2378 * the reloc mutex makes sure that we stop 2379 * the balancing code from coming in and moving 2380 * extents around in the middle of the commit 2381 */ 2382 mutex_lock(&fs_info->reloc_mutex); 2383 2384 /* 2385 * We needn't worry about the delayed items because we will 2386 * deal with them in create_pending_snapshot(), which is the 2387 * core function of the snapshot creation. 2388 */ 2389 ret = create_pending_snapshots(trans); 2390 if (ret) 2391 goto unlock_reloc; 2392 2393 /* 2394 * We insert the dir indexes of the snapshots and update the inode 2395 * of the snapshots' parents after the snapshot creation, so there 2396 * are some delayed items which are not dealt with. Now deal with 2397 * them. 2398 * 2399 * We needn't worry that this operation will corrupt the snapshots, 2400 * because all the tree which are snapshoted will be forced to COW 2401 * the nodes and leaves. 2402 */ 2403 ret = btrfs_run_delayed_items(trans); 2404 if (ret) 2405 goto unlock_reloc; 2406 2407 ret = btrfs_run_delayed_refs(trans, U64_MAX); 2408 if (ret) 2409 goto unlock_reloc; 2410 2411 /* 2412 * make sure none of the code above managed to slip in a 2413 * delayed item 2414 */ 2415 btrfs_assert_delayed_root_empty(fs_info); 2416 2417 WARN_ON(cur_trans != trans->transaction); 2418 2419 ret = commit_fs_roots(trans); 2420 if (ret) 2421 goto unlock_reloc; 2422 2423 /* commit_fs_roots gets rid of all the tree log roots, it is now 2424 * safe to free the root of tree log roots 2425 */ 2426 btrfs_free_log_root_tree(trans, fs_info); 2427 2428 /* 2429 * Since fs roots are all committed, we can get a quite accurate 2430 * new_roots. So let's do quota accounting. 2431 */ 2432 ret = btrfs_qgroup_account_extents(trans); 2433 if (ret < 0) 2434 goto unlock_reloc; 2435 2436 ret = commit_cowonly_roots(trans); 2437 if (ret) 2438 goto unlock_reloc; 2439 2440 /* 2441 * The tasks which save the space cache and inode cache may also 2442 * update ->aborted, check it. 2443 */ 2444 if (TRANS_ABORTED(cur_trans)) { 2445 ret = cur_trans->aborted; 2446 goto unlock_reloc; 2447 } 2448 2449 cur_trans = fs_info->running_transaction; 2450 2451 btrfs_set_root_node(&fs_info->tree_root->root_item, 2452 fs_info->tree_root->node); 2453 list_add_tail(&fs_info->tree_root->dirty_list, 2454 &cur_trans->switch_commits); 2455 2456 btrfs_set_root_node(&fs_info->chunk_root->root_item, 2457 fs_info->chunk_root->node); 2458 list_add_tail(&fs_info->chunk_root->dirty_list, 2459 &cur_trans->switch_commits); 2460 2461 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2462 btrfs_set_root_node(&fs_info->block_group_root->root_item, 2463 fs_info->block_group_root->node); 2464 list_add_tail(&fs_info->block_group_root->dirty_list, 2465 &cur_trans->switch_commits); 2466 } 2467 2468 switch_commit_roots(trans); 2469 2470 ASSERT(list_empty(&cur_trans->dirty_bgs)); 2471 ASSERT(list_empty(&cur_trans->io_bgs)); 2472 update_super_roots(fs_info); 2473 2474 btrfs_set_super_log_root(fs_info->super_copy, 0); 2475 btrfs_set_super_log_root_level(fs_info->super_copy, 0); 2476 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2477 sizeof(*fs_info->super_copy)); 2478 2479 btrfs_commit_device_sizes(cur_trans); 2480 2481 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); 2482 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); 2483 2484 btrfs_trans_release_chunk_metadata(trans); 2485 2486 /* 2487 * Before changing the transaction state to TRANS_STATE_UNBLOCKED and 2488 * setting fs_info->running_transaction to NULL, lock tree_log_mutex to 2489 * make sure that before we commit our superblock, no other task can 2490 * start a new transaction and commit a log tree before we commit our 2491 * superblock. Anyone trying to commit a log tree locks this mutex before 2492 * writing its superblock. 2493 */ 2494 mutex_lock(&fs_info->tree_log_mutex); 2495 2496 spin_lock(&fs_info->trans_lock); 2497 cur_trans->state = TRANS_STATE_UNBLOCKED; 2498 fs_info->running_transaction = NULL; 2499 spin_unlock(&fs_info->trans_lock); 2500 mutex_unlock(&fs_info->reloc_mutex); 2501 2502 wake_up(&fs_info->transaction_wait); 2503 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2504 2505 /* If we have features changed, wake up the cleaner to update sysfs. */ 2506 if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) && 2507 fs_info->cleaner_kthread) 2508 wake_up_process(fs_info->cleaner_kthread); 2509 2510 ret = btrfs_write_and_wait_transaction(trans); 2511 if (ret) { 2512 btrfs_handle_fs_error(fs_info, ret, 2513 "Error while writing out transaction"); 2514 mutex_unlock(&fs_info->tree_log_mutex); 2515 goto scrub_continue; 2516 } 2517 2518 ret = write_all_supers(fs_info, 0); 2519 /* 2520 * the super is written, we can safely allow the tree-loggers 2521 * to go about their business 2522 */ 2523 mutex_unlock(&fs_info->tree_log_mutex); 2524 if (ret) 2525 goto scrub_continue; 2526 2527 /* 2528 * We needn't acquire the lock here because there is no other task 2529 * which can change it. 2530 */ 2531 cur_trans->state = TRANS_STATE_SUPER_COMMITTED; 2532 wake_up(&cur_trans->commit_wait); 2533 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 2534 2535 btrfs_finish_extent_commit(trans); 2536 2537 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) 2538 btrfs_clear_space_info_full(fs_info); 2539 2540 btrfs_set_last_trans_committed(fs_info, cur_trans->transid); 2541 /* 2542 * We needn't acquire the lock here because there is no other task 2543 * which can change it. 2544 */ 2545 cur_trans->state = TRANS_STATE_COMPLETED; 2546 wake_up(&cur_trans->commit_wait); 2547 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 2548 2549 spin_lock(&fs_info->trans_lock); 2550 list_del_init(&cur_trans->list); 2551 spin_unlock(&fs_info->trans_lock); 2552 2553 btrfs_put_transaction(cur_trans); 2554 btrfs_put_transaction(cur_trans); 2555 2556 if (trans->type & __TRANS_FREEZABLE) 2557 sb_end_intwrite(fs_info->sb); 2558 2559 trace_btrfs_transaction_commit(fs_info); 2560 2561 interval = ktime_get_ns() - start_time; 2562 2563 btrfs_scrub_continue(fs_info); 2564 2565 if (current->journal_info == trans) 2566 current->journal_info = NULL; 2567 2568 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2569 2570 update_commit_stats(fs_info, interval); 2571 2572 return ret; 2573 2574 unlock_reloc: 2575 mutex_unlock(&fs_info->reloc_mutex); 2576 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2577 scrub_continue: 2578 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 2579 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 2580 btrfs_scrub_continue(fs_info); 2581 cleanup_transaction: 2582 btrfs_trans_release_metadata(trans); 2583 btrfs_cleanup_pending_block_groups(trans); 2584 btrfs_trans_release_chunk_metadata(trans); 2585 trans->block_rsv = NULL; 2586 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2587 if (current->journal_info == trans) 2588 current->journal_info = NULL; 2589 cleanup_transaction(trans, ret); 2590 2591 return ret; 2592 2593 lockdep_release: 2594 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 2595 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 2596 goto cleanup_transaction; 2597 2598 lockdep_trans_commit_start_release: 2599 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); 2600 btrfs_end_transaction(trans); 2601 return ret; 2602 } 2603 2604 /* 2605 * return < 0 if error 2606 * 0 if there are no more dead_roots at the time of call 2607 * 1 there are more to be processed, call me again 2608 * 2609 * The return value indicates there are certainly more snapshots to delete, but 2610 * if there comes a new one during processing, it may return 0. We don't mind, 2611 * because btrfs_commit_super will poke cleaner thread and it will process it a 2612 * few seconds later. 2613 */ 2614 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info) 2615 { 2616 struct btrfs_root *root; 2617 int ret; 2618 2619 spin_lock(&fs_info->trans_lock); 2620 if (list_empty(&fs_info->dead_roots)) { 2621 spin_unlock(&fs_info->trans_lock); 2622 return 0; 2623 } 2624 root = list_first_entry(&fs_info->dead_roots, 2625 struct btrfs_root, root_list); 2626 list_del_init(&root->root_list); 2627 spin_unlock(&fs_info->trans_lock); 2628 2629 btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid); 2630 2631 btrfs_kill_all_delayed_nodes(root); 2632 2633 if (btrfs_header_backref_rev(root->node) < 2634 BTRFS_MIXED_BACKREF_REV) 2635 ret = btrfs_drop_snapshot(root, 0, 0); 2636 else 2637 ret = btrfs_drop_snapshot(root, 1, 0); 2638 2639 btrfs_put_root(root); 2640 return (ret < 0) ? 0 : 1; 2641 } 2642 2643 /* 2644 * We only mark the transaction aborted and then set the file system read-only. 2645 * This will prevent new transactions from starting or trying to join this 2646 * one. 2647 * 2648 * This means that error recovery at the call site is limited to freeing 2649 * any local memory allocations and passing the error code up without 2650 * further cleanup. The transaction should complete as it normally would 2651 * in the call path but will return -EIO. 2652 * 2653 * We'll complete the cleanup in btrfs_end_transaction and 2654 * btrfs_commit_transaction. 2655 */ 2656 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 2657 const char *function, 2658 unsigned int line, int error, bool first_hit) 2659 { 2660 struct btrfs_fs_info *fs_info = trans->fs_info; 2661 2662 WRITE_ONCE(trans->aborted, error); 2663 WRITE_ONCE(trans->transaction->aborted, error); 2664 if (first_hit && error == -ENOSPC) 2665 btrfs_dump_space_info_for_trans_abort(fs_info); 2666 /* Wake up anybody who may be waiting on this transaction */ 2667 wake_up(&fs_info->transaction_wait); 2668 wake_up(&fs_info->transaction_blocked_wait); 2669 __btrfs_handle_fs_error(fs_info, function, line, error, NULL); 2670 } 2671 2672 int __init btrfs_transaction_init(void) 2673 { 2674 btrfs_trans_handle_cachep = KMEM_CACHE(btrfs_trans_handle, SLAB_TEMPORARY); 2675 if (!btrfs_trans_handle_cachep) 2676 return -ENOMEM; 2677 return 0; 2678 } 2679 2680 void __cold btrfs_transaction_exit(void) 2681 { 2682 kmem_cache_destroy(btrfs_trans_handle_cachep); 2683 } 2684