1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/slab.h> 8 #include <linux/sched.h> 9 #include <linux/sched/mm.h> 10 #include <linux/writeback.h> 11 #include <linux/pagemap.h> 12 #include <linux/blkdev.h> 13 #include <linux/uuid.h> 14 #include <linux/timekeeping.h> 15 #include "misc.h" 16 #include "ctree.h" 17 #include "disk-io.h" 18 #include "transaction.h" 19 #include "locking.h" 20 #include "tree-log.h" 21 #include "volumes.h" 22 #include "dev-replace.h" 23 #include "qgroup.h" 24 #include "block-group.h" 25 #include "space-info.h" 26 #include "zoned.h" 27 #include "fs.h" 28 #include "accessors.h" 29 #include "extent-tree.h" 30 #include "root-tree.h" 31 #include "defrag.h" 32 #include "dir-item.h" 33 #include "uuid-tree.h" 34 #include "ioctl.h" 35 #include "relocation.h" 36 #include "scrub.h" 37 38 static struct kmem_cache *btrfs_trans_handle_cachep; 39 40 #define BTRFS_ROOT_TRANS_TAG 0 41 42 /* 43 * Transaction states and transitions 44 * 45 * No running transaction (fs tree blocks are not modified) 46 * | 47 * | To next stage: 48 * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). 49 * V 50 * Transaction N [[TRANS_STATE_RUNNING]] 51 * | 52 * | New trans handles can be attached to transaction N by calling all 53 * | start_transaction() variants. 54 * | 55 * | To next stage: 56 * | Call btrfs_commit_transaction() on any trans handle attached to 57 * | transaction N 58 * V 59 * Transaction N [[TRANS_STATE_COMMIT_START]] 60 * | 61 * | Will wait for previous running transaction to completely finish if there 62 * | is one 63 * | 64 * | Then one of the following happes: 65 * | - Wait for all other trans handle holders to release. 66 * | The btrfs_commit_transaction() caller will do the commit work. 67 * | - Wait for current transaction to be committed by others. 68 * | Other btrfs_commit_transaction() caller will do the commit work. 69 * | 70 * | At this stage, only btrfs_join_transaction*() variants can attach 71 * | to this running transaction. 72 * | All other variants will wait for current one to finish and attach to 73 * | transaction N+1. 74 * | 75 * | To next stage: 76 * | Caller is chosen to commit transaction N, and all other trans handle 77 * | haven been released. 78 * V 79 * Transaction N [[TRANS_STATE_COMMIT_DOING]] 80 * | 81 * | The heavy lifting transaction work is started. 82 * | From running delayed refs (modifying extent tree) to creating pending 83 * | snapshots, running qgroups. 84 * | In short, modify supporting trees to reflect modifications of subvolume 85 * | trees. 86 * | 87 * | At this stage, all start_transaction() calls will wait for this 88 * | transaction to finish and attach to transaction N+1. 89 * | 90 * | To next stage: 91 * | Until all supporting trees are updated. 92 * V 93 * Transaction N [[TRANS_STATE_UNBLOCKED]] 94 * | Transaction N+1 95 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] 96 * | need to write them back to disk and update | 97 * | super blocks. | 98 * | | 99 * | At this stage, new transaction is allowed to | 100 * | start. | 101 * | All new start_transaction() calls will be | 102 * | attached to transid N+1. | 103 * | | 104 * | To next stage: | 105 * | Until all tree blocks are super blocks are | 106 * | written to block devices | 107 * V | 108 * Transaction N [[TRANS_STATE_COMPLETED]] V 109 * All tree blocks and super blocks are written. Transaction N+1 110 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] 111 * data structures will be cleaned up. | Life goes on 112 */ 113 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { 114 [TRANS_STATE_RUNNING] = 0U, 115 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), 116 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | 117 __TRANS_ATTACH | 118 __TRANS_JOIN | 119 __TRANS_JOIN_NOSTART), 120 [TRANS_STATE_UNBLOCKED] = (__TRANS_START | 121 __TRANS_ATTACH | 122 __TRANS_JOIN | 123 __TRANS_JOIN_NOLOCK | 124 __TRANS_JOIN_NOSTART), 125 [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START | 126 __TRANS_ATTACH | 127 __TRANS_JOIN | 128 __TRANS_JOIN_NOLOCK | 129 __TRANS_JOIN_NOSTART), 130 [TRANS_STATE_COMPLETED] = (__TRANS_START | 131 __TRANS_ATTACH | 132 __TRANS_JOIN | 133 __TRANS_JOIN_NOLOCK | 134 __TRANS_JOIN_NOSTART), 135 }; 136 137 void btrfs_put_transaction(struct btrfs_transaction *transaction) 138 { 139 WARN_ON(refcount_read(&transaction->use_count) == 0); 140 if (refcount_dec_and_test(&transaction->use_count)) { 141 BUG_ON(!list_empty(&transaction->list)); 142 WARN_ON(!RB_EMPTY_ROOT( 143 &transaction->delayed_refs.href_root.rb_root)); 144 WARN_ON(!RB_EMPTY_ROOT( 145 &transaction->delayed_refs.dirty_extent_root)); 146 if (transaction->delayed_refs.pending_csums) 147 btrfs_err(transaction->fs_info, 148 "pending csums is %llu", 149 transaction->delayed_refs.pending_csums); 150 /* 151 * If any block groups are found in ->deleted_bgs then it's 152 * because the transaction was aborted and a commit did not 153 * happen (things failed before writing the new superblock 154 * and calling btrfs_finish_extent_commit()), so we can not 155 * discard the physical locations of the block groups. 156 */ 157 while (!list_empty(&transaction->deleted_bgs)) { 158 struct btrfs_block_group *cache; 159 160 cache = list_first_entry(&transaction->deleted_bgs, 161 struct btrfs_block_group, 162 bg_list); 163 list_del_init(&cache->bg_list); 164 btrfs_unfreeze_block_group(cache); 165 btrfs_put_block_group(cache); 166 } 167 WARN_ON(!list_empty(&transaction->dev_update_list)); 168 kfree(transaction); 169 } 170 } 171 172 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) 173 { 174 struct btrfs_transaction *cur_trans = trans->transaction; 175 struct btrfs_fs_info *fs_info = trans->fs_info; 176 struct btrfs_root *root, *tmp; 177 178 /* 179 * At this point no one can be using this transaction to modify any tree 180 * and no one can start another transaction to modify any tree either. 181 */ 182 ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING); 183 184 down_write(&fs_info->commit_root_sem); 185 186 if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) 187 fs_info->last_reloc_trans = trans->transid; 188 189 list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, 190 dirty_list) { 191 list_del_init(&root->dirty_list); 192 free_extent_buffer(root->commit_root); 193 root->commit_root = btrfs_root_node(root); 194 extent_io_tree_release(&root->dirty_log_pages); 195 btrfs_qgroup_clean_swapped_blocks(root); 196 } 197 198 /* We can free old roots now. */ 199 spin_lock(&cur_trans->dropped_roots_lock); 200 while (!list_empty(&cur_trans->dropped_roots)) { 201 root = list_first_entry(&cur_trans->dropped_roots, 202 struct btrfs_root, root_list); 203 list_del_init(&root->root_list); 204 spin_unlock(&cur_trans->dropped_roots_lock); 205 btrfs_free_log(trans, root); 206 btrfs_drop_and_free_fs_root(fs_info, root); 207 spin_lock(&cur_trans->dropped_roots_lock); 208 } 209 spin_unlock(&cur_trans->dropped_roots_lock); 210 211 up_write(&fs_info->commit_root_sem); 212 } 213 214 static inline void extwriter_counter_inc(struct btrfs_transaction *trans, 215 unsigned int type) 216 { 217 if (type & TRANS_EXTWRITERS) 218 atomic_inc(&trans->num_extwriters); 219 } 220 221 static inline void extwriter_counter_dec(struct btrfs_transaction *trans, 222 unsigned int type) 223 { 224 if (type & TRANS_EXTWRITERS) 225 atomic_dec(&trans->num_extwriters); 226 } 227 228 static inline void extwriter_counter_init(struct btrfs_transaction *trans, 229 unsigned int type) 230 { 231 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); 232 } 233 234 static inline int extwriter_counter_read(struct btrfs_transaction *trans) 235 { 236 return atomic_read(&trans->num_extwriters); 237 } 238 239 /* 240 * To be called after doing the chunk btree updates right after allocating a new 241 * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a 242 * chunk after all chunk btree updates and after finishing the second phase of 243 * chunk allocation (btrfs_create_pending_block_groups()) in case some block 244 * group had its chunk item insertion delayed to the second phase. 245 */ 246 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) 247 { 248 struct btrfs_fs_info *fs_info = trans->fs_info; 249 250 if (!trans->chunk_bytes_reserved) 251 return; 252 253 btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, 254 trans->chunk_bytes_reserved, NULL); 255 trans->chunk_bytes_reserved = 0; 256 } 257 258 /* 259 * either allocate a new transaction or hop into the existing one 260 */ 261 static noinline int join_transaction(struct btrfs_fs_info *fs_info, 262 unsigned int type) 263 { 264 struct btrfs_transaction *cur_trans; 265 266 spin_lock(&fs_info->trans_lock); 267 loop: 268 /* The file system has been taken offline. No new transactions. */ 269 if (BTRFS_FS_ERROR(fs_info)) { 270 spin_unlock(&fs_info->trans_lock); 271 return -EROFS; 272 } 273 274 cur_trans = fs_info->running_transaction; 275 if (cur_trans) { 276 if (TRANS_ABORTED(cur_trans)) { 277 spin_unlock(&fs_info->trans_lock); 278 return cur_trans->aborted; 279 } 280 if (btrfs_blocked_trans_types[cur_trans->state] & type) { 281 spin_unlock(&fs_info->trans_lock); 282 return -EBUSY; 283 } 284 refcount_inc(&cur_trans->use_count); 285 atomic_inc(&cur_trans->num_writers); 286 extwriter_counter_inc(cur_trans, type); 287 spin_unlock(&fs_info->trans_lock); 288 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); 289 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); 290 return 0; 291 } 292 spin_unlock(&fs_info->trans_lock); 293 294 /* 295 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the 296 * current transaction, and commit it. If there is no transaction, just 297 * return ENOENT. 298 */ 299 if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART) 300 return -ENOENT; 301 302 /* 303 * JOIN_NOLOCK only happens during the transaction commit, so 304 * it is impossible that ->running_transaction is NULL 305 */ 306 BUG_ON(type == TRANS_JOIN_NOLOCK); 307 308 cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS); 309 if (!cur_trans) 310 return -ENOMEM; 311 312 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); 313 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); 314 315 spin_lock(&fs_info->trans_lock); 316 if (fs_info->running_transaction) { 317 /* 318 * someone started a transaction after we unlocked. Make sure 319 * to redo the checks above 320 */ 321 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 322 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 323 kfree(cur_trans); 324 goto loop; 325 } else if (BTRFS_FS_ERROR(fs_info)) { 326 spin_unlock(&fs_info->trans_lock); 327 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 328 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 329 kfree(cur_trans); 330 return -EROFS; 331 } 332 333 cur_trans->fs_info = fs_info; 334 atomic_set(&cur_trans->pending_ordered, 0); 335 init_waitqueue_head(&cur_trans->pending_wait); 336 atomic_set(&cur_trans->num_writers, 1); 337 extwriter_counter_init(cur_trans, type); 338 init_waitqueue_head(&cur_trans->writer_wait); 339 init_waitqueue_head(&cur_trans->commit_wait); 340 cur_trans->state = TRANS_STATE_RUNNING; 341 /* 342 * One for this trans handle, one so it will live on until we 343 * commit the transaction. 344 */ 345 refcount_set(&cur_trans->use_count, 2); 346 cur_trans->flags = 0; 347 cur_trans->start_time = ktime_get_seconds(); 348 349 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); 350 351 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; 352 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; 353 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 354 355 /* 356 * although the tree mod log is per file system and not per transaction, 357 * the log must never go across transaction boundaries. 358 */ 359 smp_mb(); 360 if (!list_empty(&fs_info->tree_mod_seq_list)) 361 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n"); 362 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) 363 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n"); 364 atomic64_set(&fs_info->tree_mod_seq, 0); 365 366 spin_lock_init(&cur_trans->delayed_refs.lock); 367 368 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 369 INIT_LIST_HEAD(&cur_trans->dev_update_list); 370 INIT_LIST_HEAD(&cur_trans->switch_commits); 371 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 372 INIT_LIST_HEAD(&cur_trans->io_bgs); 373 INIT_LIST_HEAD(&cur_trans->dropped_roots); 374 mutex_init(&cur_trans->cache_write_mutex); 375 spin_lock_init(&cur_trans->dirty_bgs_lock); 376 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 377 spin_lock_init(&cur_trans->dropped_roots_lock); 378 list_add_tail(&cur_trans->list, &fs_info->trans_list); 379 extent_io_tree_init(fs_info, &cur_trans->dirty_pages, 380 IO_TREE_TRANS_DIRTY_PAGES); 381 extent_io_tree_init(fs_info, &cur_trans->pinned_extents, 382 IO_TREE_FS_PINNED_EXTENTS); 383 fs_info->generation++; 384 cur_trans->transid = fs_info->generation; 385 fs_info->running_transaction = cur_trans; 386 cur_trans->aborted = 0; 387 spin_unlock(&fs_info->trans_lock); 388 389 return 0; 390 } 391 392 /* 393 * This does all the record keeping required to make sure that a shareable root 394 * is properly recorded in a given transaction. This is required to make sure 395 * the old root from before we joined the transaction is deleted when the 396 * transaction commits. 397 */ 398 static int record_root_in_trans(struct btrfs_trans_handle *trans, 399 struct btrfs_root *root, 400 int force) 401 { 402 struct btrfs_fs_info *fs_info = root->fs_info; 403 int ret = 0; 404 405 if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 406 root->last_trans < trans->transid) || force) { 407 WARN_ON(!force && root->commit_root != root->node); 408 409 /* 410 * see below for IN_TRANS_SETUP usage rules 411 * we have the reloc mutex held now, so there 412 * is only one writer in this function 413 */ 414 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 415 416 /* make sure readers find IN_TRANS_SETUP before 417 * they find our root->last_trans update 418 */ 419 smp_wmb(); 420 421 spin_lock(&fs_info->fs_roots_radix_lock); 422 if (root->last_trans == trans->transid && !force) { 423 spin_unlock(&fs_info->fs_roots_radix_lock); 424 return 0; 425 } 426 radix_tree_tag_set(&fs_info->fs_roots_radix, 427 (unsigned long)root->root_key.objectid, 428 BTRFS_ROOT_TRANS_TAG); 429 spin_unlock(&fs_info->fs_roots_radix_lock); 430 root->last_trans = trans->transid; 431 432 /* this is pretty tricky. We don't want to 433 * take the relocation lock in btrfs_record_root_in_trans 434 * unless we're really doing the first setup for this root in 435 * this transaction. 436 * 437 * Normally we'd use root->last_trans as a flag to decide 438 * if we want to take the expensive mutex. 439 * 440 * But, we have to set root->last_trans before we 441 * init the relocation root, otherwise, we trip over warnings 442 * in ctree.c. The solution used here is to flag ourselves 443 * with root IN_TRANS_SETUP. When this is 1, we're still 444 * fixing up the reloc trees and everyone must wait. 445 * 446 * When this is zero, they can trust root->last_trans and fly 447 * through btrfs_record_root_in_trans without having to take the 448 * lock. smp_wmb() makes sure that all the writes above are 449 * done before we pop in the zero below 450 */ 451 ret = btrfs_init_reloc_root(trans, root); 452 smp_mb__before_atomic(); 453 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); 454 } 455 return ret; 456 } 457 458 459 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 460 struct btrfs_root *root) 461 { 462 struct btrfs_fs_info *fs_info = root->fs_info; 463 struct btrfs_transaction *cur_trans = trans->transaction; 464 465 /* Add ourselves to the transaction dropped list */ 466 spin_lock(&cur_trans->dropped_roots_lock); 467 list_add_tail(&root->root_list, &cur_trans->dropped_roots); 468 spin_unlock(&cur_trans->dropped_roots_lock); 469 470 /* Make sure we don't try to update the root at commit time */ 471 spin_lock(&fs_info->fs_roots_radix_lock); 472 radix_tree_tag_clear(&fs_info->fs_roots_radix, 473 (unsigned long)root->root_key.objectid, 474 BTRFS_ROOT_TRANS_TAG); 475 spin_unlock(&fs_info->fs_roots_radix_lock); 476 } 477 478 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 479 struct btrfs_root *root) 480 { 481 struct btrfs_fs_info *fs_info = root->fs_info; 482 int ret; 483 484 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 485 return 0; 486 487 /* 488 * see record_root_in_trans for comments about IN_TRANS_SETUP usage 489 * and barriers 490 */ 491 smp_rmb(); 492 if (root->last_trans == trans->transid && 493 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) 494 return 0; 495 496 mutex_lock(&fs_info->reloc_mutex); 497 ret = record_root_in_trans(trans, root, 0); 498 mutex_unlock(&fs_info->reloc_mutex); 499 500 return ret; 501 } 502 503 static inline int is_transaction_blocked(struct btrfs_transaction *trans) 504 { 505 return (trans->state >= TRANS_STATE_COMMIT_START && 506 trans->state < TRANS_STATE_UNBLOCKED && 507 !TRANS_ABORTED(trans)); 508 } 509 510 /* wait for commit against the current transaction to become unblocked 511 * when this is done, it is safe to start a new transaction, but the current 512 * transaction might not be fully on disk. 513 */ 514 static void wait_current_trans(struct btrfs_fs_info *fs_info) 515 { 516 struct btrfs_transaction *cur_trans; 517 518 spin_lock(&fs_info->trans_lock); 519 cur_trans = fs_info->running_transaction; 520 if (cur_trans && is_transaction_blocked(cur_trans)) { 521 refcount_inc(&cur_trans->use_count); 522 spin_unlock(&fs_info->trans_lock); 523 524 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 525 wait_event(fs_info->transaction_wait, 526 cur_trans->state >= TRANS_STATE_UNBLOCKED || 527 TRANS_ABORTED(cur_trans)); 528 btrfs_put_transaction(cur_trans); 529 } else { 530 spin_unlock(&fs_info->trans_lock); 531 } 532 } 533 534 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type) 535 { 536 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 537 return 0; 538 539 if (type == TRANS_START) 540 return 1; 541 542 return 0; 543 } 544 545 static inline bool need_reserve_reloc_root(struct btrfs_root *root) 546 { 547 struct btrfs_fs_info *fs_info = root->fs_info; 548 549 if (!fs_info->reloc_ctl || 550 !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || 551 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 552 root->reloc_root) 553 return false; 554 555 return true; 556 } 557 558 static struct btrfs_trans_handle * 559 start_transaction(struct btrfs_root *root, unsigned int num_items, 560 unsigned int type, enum btrfs_reserve_flush_enum flush, 561 bool enforce_qgroups) 562 { 563 struct btrfs_fs_info *fs_info = root->fs_info; 564 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; 565 struct btrfs_trans_handle *h; 566 struct btrfs_transaction *cur_trans; 567 u64 num_bytes = 0; 568 u64 qgroup_reserved = 0; 569 bool reloc_reserved = false; 570 bool do_chunk_alloc = false; 571 int ret; 572 573 if (BTRFS_FS_ERROR(fs_info)) 574 return ERR_PTR(-EROFS); 575 576 if (current->journal_info) { 577 WARN_ON(type & TRANS_EXTWRITERS); 578 h = current->journal_info; 579 refcount_inc(&h->use_count); 580 WARN_ON(refcount_read(&h->use_count) > 2); 581 h->orig_rsv = h->block_rsv; 582 h->block_rsv = NULL; 583 goto got_it; 584 } 585 586 /* 587 * Do the reservation before we join the transaction so we can do all 588 * the appropriate flushing if need be. 589 */ 590 if (num_items && root != fs_info->chunk_root) { 591 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv; 592 u64 delayed_refs_bytes = 0; 593 594 qgroup_reserved = num_items * fs_info->nodesize; 595 /* 596 * Use prealloc for now, as there might be a currently running 597 * transaction that could free this reserved space prematurely 598 * by committing. 599 */ 600 ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved, 601 enforce_qgroups, false); 602 if (ret) 603 return ERR_PTR(ret); 604 605 /* 606 * We want to reserve all the bytes we may need all at once, so 607 * we only do 1 enospc flushing cycle per transaction start. We 608 * accomplish this by simply assuming we'll do num_items worth 609 * of delayed refs updates in this trans handle, and refill that 610 * amount for whatever is missing in the reserve. 611 */ 612 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); 613 if (flush == BTRFS_RESERVE_FLUSH_ALL && 614 !btrfs_block_rsv_full(delayed_refs_rsv)) { 615 delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, 616 num_items); 617 num_bytes += delayed_refs_bytes; 618 } 619 620 /* 621 * Do the reservation for the relocation root creation 622 */ 623 if (need_reserve_reloc_root(root)) { 624 num_bytes += fs_info->nodesize; 625 reloc_reserved = true; 626 } 627 628 ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush); 629 if (ret) 630 goto reserve_fail; 631 if (delayed_refs_bytes) { 632 btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv, 633 delayed_refs_bytes); 634 num_bytes -= delayed_refs_bytes; 635 } 636 637 if (rsv->space_info->force_alloc) 638 do_chunk_alloc = true; 639 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && 640 !btrfs_block_rsv_full(delayed_refs_rsv)) { 641 /* 642 * Some people call with btrfs_start_transaction(root, 0) 643 * because they can be throttled, but have some other mechanism 644 * for reserving space. We still want these guys to refill the 645 * delayed block_rsv so just add 1 items worth of reservation 646 * here. 647 */ 648 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush); 649 if (ret) 650 goto reserve_fail; 651 } 652 again: 653 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); 654 if (!h) { 655 ret = -ENOMEM; 656 goto alloc_fail; 657 } 658 659 /* 660 * If we are JOIN_NOLOCK we're already committing a transaction and 661 * waiting on this guy, so we don't need to do the sb_start_intwrite 662 * because we're already holding a ref. We need this because we could 663 * have raced in and did an fsync() on a file which can kick a commit 664 * and then we deadlock with somebody doing a freeze. 665 * 666 * If we are ATTACH, it means we just want to catch the current 667 * transaction and commit it, so we needn't do sb_start_intwrite(). 668 */ 669 if (type & __TRANS_FREEZABLE) 670 sb_start_intwrite(fs_info->sb); 671 672 if (may_wait_transaction(fs_info, type)) 673 wait_current_trans(fs_info); 674 675 do { 676 ret = join_transaction(fs_info, type); 677 if (ret == -EBUSY) { 678 wait_current_trans(fs_info); 679 if (unlikely(type == TRANS_ATTACH || 680 type == TRANS_JOIN_NOSTART)) 681 ret = -ENOENT; 682 } 683 } while (ret == -EBUSY); 684 685 if (ret < 0) 686 goto join_fail; 687 688 cur_trans = fs_info->running_transaction; 689 690 h->transid = cur_trans->transid; 691 h->transaction = cur_trans; 692 refcount_set(&h->use_count, 1); 693 h->fs_info = root->fs_info; 694 695 h->type = type; 696 INIT_LIST_HEAD(&h->new_bgs); 697 698 smp_mb(); 699 if (cur_trans->state >= TRANS_STATE_COMMIT_START && 700 may_wait_transaction(fs_info, type)) { 701 current->journal_info = h; 702 btrfs_commit_transaction(h); 703 goto again; 704 } 705 706 if (num_bytes) { 707 trace_btrfs_space_reservation(fs_info, "transaction", 708 h->transid, num_bytes, 1); 709 h->block_rsv = &fs_info->trans_block_rsv; 710 h->bytes_reserved = num_bytes; 711 h->reloc_reserved = reloc_reserved; 712 } 713 714 /* 715 * Now that we have found a transaction to be a part of, convert the 716 * qgroup reservation from prealloc to pertrans. A different transaction 717 * can't race in and free our pertrans out from under us. 718 */ 719 if (qgroup_reserved) 720 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); 721 722 got_it: 723 if (!current->journal_info) 724 current->journal_info = h; 725 726 /* 727 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to 728 * ALLOC_FORCE the first run through, and then we won't allocate for 729 * anybody else who races in later. We don't care about the return 730 * value here. 731 */ 732 if (do_chunk_alloc && num_bytes) { 733 u64 flags = h->block_rsv->space_info->flags; 734 735 btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), 736 CHUNK_ALLOC_NO_FORCE); 737 } 738 739 /* 740 * btrfs_record_root_in_trans() needs to alloc new extents, and may 741 * call btrfs_join_transaction() while we're also starting a 742 * transaction. 743 * 744 * Thus it need to be called after current->journal_info initialized, 745 * or we can deadlock. 746 */ 747 ret = btrfs_record_root_in_trans(h, root); 748 if (ret) { 749 /* 750 * The transaction handle is fully initialized and linked with 751 * other structures so it needs to be ended in case of errors, 752 * not just freed. 753 */ 754 btrfs_end_transaction(h); 755 return ERR_PTR(ret); 756 } 757 758 return h; 759 760 join_fail: 761 if (type & __TRANS_FREEZABLE) 762 sb_end_intwrite(fs_info->sb); 763 kmem_cache_free(btrfs_trans_handle_cachep, h); 764 alloc_fail: 765 if (num_bytes) 766 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv, 767 num_bytes, NULL); 768 reserve_fail: 769 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); 770 return ERR_PTR(ret); 771 } 772 773 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 774 unsigned int num_items) 775 { 776 return start_transaction(root, num_items, TRANS_START, 777 BTRFS_RESERVE_FLUSH_ALL, true); 778 } 779 780 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 781 struct btrfs_root *root, 782 unsigned int num_items) 783 { 784 return start_transaction(root, num_items, TRANS_START, 785 BTRFS_RESERVE_FLUSH_ALL_STEAL, false); 786 } 787 788 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) 789 { 790 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, 791 true); 792 } 793 794 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) 795 { 796 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 797 BTRFS_RESERVE_NO_FLUSH, true); 798 } 799 800 /* 801 * Similar to regular join but it never starts a transaction when none is 802 * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED. 803 * This is similar to btrfs_attach_transaction() but it allows the join to 804 * happen if the transaction commit already started but it's not yet in the 805 * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING). 806 */ 807 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) 808 { 809 return start_transaction(root, 0, TRANS_JOIN_NOSTART, 810 BTRFS_RESERVE_NO_FLUSH, true); 811 } 812 813 /* 814 * btrfs_attach_transaction() - catch the running transaction 815 * 816 * It is used when we want to commit the current the transaction, but 817 * don't want to start a new one. 818 * 819 * Note: If this function return -ENOENT, it just means there is no 820 * running transaction. But it is possible that the inactive transaction 821 * is still in the memory, not fully on disk. If you hope there is no 822 * inactive transaction in the fs when -ENOENT is returned, you should 823 * invoke 824 * btrfs_attach_transaction_barrier() 825 */ 826 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) 827 { 828 return start_transaction(root, 0, TRANS_ATTACH, 829 BTRFS_RESERVE_NO_FLUSH, true); 830 } 831 832 /* 833 * btrfs_attach_transaction_barrier() - catch the running transaction 834 * 835 * It is similar to the above function, the difference is this one 836 * will wait for all the inactive transactions until they fully 837 * complete. 838 */ 839 struct btrfs_trans_handle * 840 btrfs_attach_transaction_barrier(struct btrfs_root *root) 841 { 842 struct btrfs_trans_handle *trans; 843 844 trans = start_transaction(root, 0, TRANS_ATTACH, 845 BTRFS_RESERVE_NO_FLUSH, true); 846 if (trans == ERR_PTR(-ENOENT)) { 847 int ret; 848 849 ret = btrfs_wait_for_commit(root->fs_info, 0); 850 if (ret) 851 return ERR_PTR(ret); 852 } 853 854 return trans; 855 } 856 857 /* Wait for a transaction commit to reach at least the given state. */ 858 static noinline void wait_for_commit(struct btrfs_transaction *commit, 859 const enum btrfs_trans_state min_state) 860 { 861 struct btrfs_fs_info *fs_info = commit->fs_info; 862 u64 transid = commit->transid; 863 bool put = false; 864 865 /* 866 * At the moment this function is called with min_state either being 867 * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED. 868 */ 869 if (min_state == TRANS_STATE_COMPLETED) 870 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 871 else 872 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 873 874 while (1) { 875 wait_event(commit->commit_wait, commit->state >= min_state); 876 if (put) 877 btrfs_put_transaction(commit); 878 879 if (min_state < TRANS_STATE_COMPLETED) 880 break; 881 882 /* 883 * A transaction isn't really completed until all of the 884 * previous transactions are completed, but with fsync we can 885 * end up with SUPER_COMMITTED transactions before a COMPLETED 886 * transaction. Wait for those. 887 */ 888 889 spin_lock(&fs_info->trans_lock); 890 commit = list_first_entry_or_null(&fs_info->trans_list, 891 struct btrfs_transaction, 892 list); 893 if (!commit || commit->transid > transid) { 894 spin_unlock(&fs_info->trans_lock); 895 break; 896 } 897 refcount_inc(&commit->use_count); 898 put = true; 899 spin_unlock(&fs_info->trans_lock); 900 } 901 } 902 903 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) 904 { 905 struct btrfs_transaction *cur_trans = NULL, *t; 906 int ret = 0; 907 908 if (transid) { 909 if (transid <= fs_info->last_trans_committed) 910 goto out; 911 912 /* find specified transaction */ 913 spin_lock(&fs_info->trans_lock); 914 list_for_each_entry(t, &fs_info->trans_list, list) { 915 if (t->transid == transid) { 916 cur_trans = t; 917 refcount_inc(&cur_trans->use_count); 918 ret = 0; 919 break; 920 } 921 if (t->transid > transid) { 922 ret = 0; 923 break; 924 } 925 } 926 spin_unlock(&fs_info->trans_lock); 927 928 /* 929 * The specified transaction doesn't exist, or we 930 * raced with btrfs_commit_transaction 931 */ 932 if (!cur_trans) { 933 if (transid > fs_info->last_trans_committed) 934 ret = -EINVAL; 935 goto out; 936 } 937 } else { 938 /* find newest transaction that is committing | committed */ 939 spin_lock(&fs_info->trans_lock); 940 list_for_each_entry_reverse(t, &fs_info->trans_list, 941 list) { 942 if (t->state >= TRANS_STATE_COMMIT_START) { 943 if (t->state == TRANS_STATE_COMPLETED) 944 break; 945 cur_trans = t; 946 refcount_inc(&cur_trans->use_count); 947 break; 948 } 949 } 950 spin_unlock(&fs_info->trans_lock); 951 if (!cur_trans) 952 goto out; /* nothing committing|committed */ 953 } 954 955 wait_for_commit(cur_trans, TRANS_STATE_COMPLETED); 956 ret = cur_trans->aborted; 957 btrfs_put_transaction(cur_trans); 958 out: 959 return ret; 960 } 961 962 void btrfs_throttle(struct btrfs_fs_info *fs_info) 963 { 964 wait_current_trans(fs_info); 965 } 966 967 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) 968 { 969 struct btrfs_transaction *cur_trans = trans->transaction; 970 971 if (cur_trans->state >= TRANS_STATE_COMMIT_START || 972 test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) 973 return true; 974 975 if (btrfs_check_space_for_delayed_refs(trans->fs_info)) 976 return true; 977 978 return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50); 979 } 980 981 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) 982 983 { 984 struct btrfs_fs_info *fs_info = trans->fs_info; 985 986 if (!trans->block_rsv) { 987 ASSERT(!trans->bytes_reserved); 988 return; 989 } 990 991 if (!trans->bytes_reserved) 992 return; 993 994 ASSERT(trans->block_rsv == &fs_info->trans_block_rsv); 995 trace_btrfs_space_reservation(fs_info, "transaction", 996 trans->transid, trans->bytes_reserved, 0); 997 btrfs_block_rsv_release(fs_info, trans->block_rsv, 998 trans->bytes_reserved, NULL); 999 trans->bytes_reserved = 0; 1000 } 1001 1002 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 1003 int throttle) 1004 { 1005 struct btrfs_fs_info *info = trans->fs_info; 1006 struct btrfs_transaction *cur_trans = trans->transaction; 1007 int err = 0; 1008 1009 if (refcount_read(&trans->use_count) > 1) { 1010 refcount_dec(&trans->use_count); 1011 trans->block_rsv = trans->orig_rsv; 1012 return 0; 1013 } 1014 1015 btrfs_trans_release_metadata(trans); 1016 trans->block_rsv = NULL; 1017 1018 btrfs_create_pending_block_groups(trans); 1019 1020 btrfs_trans_release_chunk_metadata(trans); 1021 1022 if (trans->type & __TRANS_FREEZABLE) 1023 sb_end_intwrite(info->sb); 1024 1025 WARN_ON(cur_trans != info->running_transaction); 1026 WARN_ON(atomic_read(&cur_trans->num_writers) < 1); 1027 atomic_dec(&cur_trans->num_writers); 1028 extwriter_counter_dec(cur_trans, trans->type); 1029 1030 cond_wake_up(&cur_trans->writer_wait); 1031 1032 btrfs_lockdep_release(info, btrfs_trans_num_extwriters); 1033 btrfs_lockdep_release(info, btrfs_trans_num_writers); 1034 1035 btrfs_put_transaction(cur_trans); 1036 1037 if (current->journal_info == trans) 1038 current->journal_info = NULL; 1039 1040 if (throttle) 1041 btrfs_run_delayed_iputs(info); 1042 1043 if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) { 1044 wake_up_process(info->transaction_kthread); 1045 if (TRANS_ABORTED(trans)) 1046 err = trans->aborted; 1047 else 1048 err = -EROFS; 1049 } 1050 1051 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1052 return err; 1053 } 1054 1055 int btrfs_end_transaction(struct btrfs_trans_handle *trans) 1056 { 1057 return __btrfs_end_transaction(trans, 0); 1058 } 1059 1060 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans) 1061 { 1062 return __btrfs_end_transaction(trans, 1); 1063 } 1064 1065 /* 1066 * when btree blocks are allocated, they have some corresponding bits set for 1067 * them in one of two extent_io trees. This is used to make sure all of 1068 * those extents are sent to disk but does not wait on them 1069 */ 1070 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 1071 struct extent_io_tree *dirty_pages, int mark) 1072 { 1073 int err = 0; 1074 int werr = 0; 1075 struct address_space *mapping = fs_info->btree_inode->i_mapping; 1076 struct extent_state *cached_state = NULL; 1077 u64 start = 0; 1078 u64 end; 1079 1080 while (find_first_extent_bit(dirty_pages, start, &start, &end, 1081 mark, &cached_state)) { 1082 bool wait_writeback = false; 1083 1084 err = convert_extent_bit(dirty_pages, start, end, 1085 EXTENT_NEED_WAIT, 1086 mark, &cached_state); 1087 /* 1088 * convert_extent_bit can return -ENOMEM, which is most of the 1089 * time a temporary error. So when it happens, ignore the error 1090 * and wait for writeback of this range to finish - because we 1091 * failed to set the bit EXTENT_NEED_WAIT for the range, a call 1092 * to __btrfs_wait_marked_extents() would not know that 1093 * writeback for this range started and therefore wouldn't 1094 * wait for it to finish - we don't want to commit a 1095 * superblock that points to btree nodes/leafs for which 1096 * writeback hasn't finished yet (and without errors). 1097 * We cleanup any entries left in the io tree when committing 1098 * the transaction (through extent_io_tree_release()). 1099 */ 1100 if (err == -ENOMEM) { 1101 err = 0; 1102 wait_writeback = true; 1103 } 1104 if (!err) 1105 err = filemap_fdatawrite_range(mapping, start, end); 1106 if (err) 1107 werr = err; 1108 else if (wait_writeback) 1109 werr = filemap_fdatawait_range(mapping, start, end); 1110 free_extent_state(cached_state); 1111 cached_state = NULL; 1112 cond_resched(); 1113 start = end + 1; 1114 } 1115 return werr; 1116 } 1117 1118 /* 1119 * when btree blocks are allocated, they have some corresponding bits set for 1120 * them in one of two extent_io trees. This is used to make sure all of 1121 * those extents are on disk for transaction or log commit. We wait 1122 * on all the pages and clear them from the dirty pages state tree 1123 */ 1124 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, 1125 struct extent_io_tree *dirty_pages) 1126 { 1127 int err = 0; 1128 int werr = 0; 1129 struct address_space *mapping = fs_info->btree_inode->i_mapping; 1130 struct extent_state *cached_state = NULL; 1131 u64 start = 0; 1132 u64 end; 1133 1134 while (find_first_extent_bit(dirty_pages, start, &start, &end, 1135 EXTENT_NEED_WAIT, &cached_state)) { 1136 /* 1137 * Ignore -ENOMEM errors returned by clear_extent_bit(). 1138 * When committing the transaction, we'll remove any entries 1139 * left in the io tree. For a log commit, we don't remove them 1140 * after committing the log because the tree can be accessed 1141 * concurrently - we do it only at transaction commit time when 1142 * it's safe to do it (through extent_io_tree_release()). 1143 */ 1144 err = clear_extent_bit(dirty_pages, start, end, 1145 EXTENT_NEED_WAIT, &cached_state); 1146 if (err == -ENOMEM) 1147 err = 0; 1148 if (!err) 1149 err = filemap_fdatawait_range(mapping, start, end); 1150 if (err) 1151 werr = err; 1152 free_extent_state(cached_state); 1153 cached_state = NULL; 1154 cond_resched(); 1155 start = end + 1; 1156 } 1157 if (err) 1158 werr = err; 1159 return werr; 1160 } 1161 1162 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, 1163 struct extent_io_tree *dirty_pages) 1164 { 1165 bool errors = false; 1166 int err; 1167 1168 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1169 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) 1170 errors = true; 1171 1172 if (errors && !err) 1173 err = -EIO; 1174 return err; 1175 } 1176 1177 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) 1178 { 1179 struct btrfs_fs_info *fs_info = log_root->fs_info; 1180 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; 1181 bool errors = false; 1182 int err; 1183 1184 ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 1185 1186 err = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1187 if ((mark & EXTENT_DIRTY) && 1188 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) 1189 errors = true; 1190 1191 if ((mark & EXTENT_NEW) && 1192 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) 1193 errors = true; 1194 1195 if (errors && !err) 1196 err = -EIO; 1197 return err; 1198 } 1199 1200 /* 1201 * When btree blocks are allocated the corresponding extents are marked dirty. 1202 * This function ensures such extents are persisted on disk for transaction or 1203 * log commit. 1204 * 1205 * @trans: transaction whose dirty pages we'd like to write 1206 */ 1207 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans) 1208 { 1209 int ret; 1210 int ret2; 1211 struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages; 1212 struct btrfs_fs_info *fs_info = trans->fs_info; 1213 struct blk_plug plug; 1214 1215 blk_start_plug(&plug); 1216 ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY); 1217 blk_finish_plug(&plug); 1218 ret2 = btrfs_wait_extents(fs_info, dirty_pages); 1219 1220 extent_io_tree_release(&trans->transaction->dirty_pages); 1221 1222 if (ret) 1223 return ret; 1224 else if (ret2) 1225 return ret2; 1226 else 1227 return 0; 1228 } 1229 1230 /* 1231 * this is used to update the root pointer in the tree of tree roots. 1232 * 1233 * But, in the case of the extent allocation tree, updating the root 1234 * pointer may allocate blocks which may change the root of the extent 1235 * allocation tree. 1236 * 1237 * So, this loops and repeats and makes sure the cowonly root didn't 1238 * change while the root pointer was being updated in the metadata. 1239 */ 1240 static int update_cowonly_root(struct btrfs_trans_handle *trans, 1241 struct btrfs_root *root) 1242 { 1243 int ret; 1244 u64 old_root_bytenr; 1245 u64 old_root_used; 1246 struct btrfs_fs_info *fs_info = root->fs_info; 1247 struct btrfs_root *tree_root = fs_info->tree_root; 1248 1249 old_root_used = btrfs_root_used(&root->root_item); 1250 1251 while (1) { 1252 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 1253 if (old_root_bytenr == root->node->start && 1254 old_root_used == btrfs_root_used(&root->root_item)) 1255 break; 1256 1257 btrfs_set_root_node(&root->root_item, root->node); 1258 ret = btrfs_update_root(trans, tree_root, 1259 &root->root_key, 1260 &root->root_item); 1261 if (ret) 1262 return ret; 1263 1264 old_root_used = btrfs_root_used(&root->root_item); 1265 } 1266 1267 return 0; 1268 } 1269 1270 /* 1271 * update all the cowonly tree roots on disk 1272 * 1273 * The error handling in this function may not be obvious. Any of the 1274 * failures will cause the file system to go offline. We still need 1275 * to clean up the delayed refs. 1276 */ 1277 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) 1278 { 1279 struct btrfs_fs_info *fs_info = trans->fs_info; 1280 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; 1281 struct list_head *io_bgs = &trans->transaction->io_bgs; 1282 struct list_head *next; 1283 struct extent_buffer *eb; 1284 int ret; 1285 1286 /* 1287 * At this point no one can be using this transaction to modify any tree 1288 * and no one can start another transaction to modify any tree either. 1289 */ 1290 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); 1291 1292 eb = btrfs_lock_root_node(fs_info->tree_root); 1293 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 1294 0, &eb, BTRFS_NESTING_COW); 1295 btrfs_tree_unlock(eb); 1296 free_extent_buffer(eb); 1297 1298 if (ret) 1299 return ret; 1300 1301 ret = btrfs_run_dev_stats(trans); 1302 if (ret) 1303 return ret; 1304 ret = btrfs_run_dev_replace(trans); 1305 if (ret) 1306 return ret; 1307 ret = btrfs_run_qgroups(trans); 1308 if (ret) 1309 return ret; 1310 1311 ret = btrfs_setup_space_cache(trans); 1312 if (ret) 1313 return ret; 1314 1315 again: 1316 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 1317 struct btrfs_root *root; 1318 next = fs_info->dirty_cowonly_roots.next; 1319 list_del_init(next); 1320 root = list_entry(next, struct btrfs_root, dirty_list); 1321 clear_bit(BTRFS_ROOT_DIRTY, &root->state); 1322 1323 list_add_tail(&root->dirty_list, 1324 &trans->transaction->switch_commits); 1325 ret = update_cowonly_root(trans, root); 1326 if (ret) 1327 return ret; 1328 } 1329 1330 /* Now flush any delayed refs generated by updating all of the roots */ 1331 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1332 if (ret) 1333 return ret; 1334 1335 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { 1336 ret = btrfs_write_dirty_block_groups(trans); 1337 if (ret) 1338 return ret; 1339 1340 /* 1341 * We're writing the dirty block groups, which could generate 1342 * delayed refs, which could generate more dirty block groups, 1343 * so we want to keep this flushing in this loop to make sure 1344 * everything gets run. 1345 */ 1346 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1347 if (ret) 1348 return ret; 1349 } 1350 1351 if (!list_empty(&fs_info->dirty_cowonly_roots)) 1352 goto again; 1353 1354 /* Update dev-replace pointer once everything is committed */ 1355 fs_info->dev_replace.committed_cursor_left = 1356 fs_info->dev_replace.cursor_left_last_write_of_item; 1357 1358 return 0; 1359 } 1360 1361 /* 1362 * If we had a pending drop we need to see if there are any others left in our 1363 * dead roots list, and if not clear our bit and wake any waiters. 1364 */ 1365 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info) 1366 { 1367 /* 1368 * We put the drop in progress roots at the front of the list, so if the 1369 * first entry doesn't have UNFINISHED_DROP set we can wake everybody 1370 * up. 1371 */ 1372 spin_lock(&fs_info->trans_lock); 1373 if (!list_empty(&fs_info->dead_roots)) { 1374 struct btrfs_root *root = list_first_entry(&fs_info->dead_roots, 1375 struct btrfs_root, 1376 root_list); 1377 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) { 1378 spin_unlock(&fs_info->trans_lock); 1379 return; 1380 } 1381 } 1382 spin_unlock(&fs_info->trans_lock); 1383 1384 btrfs_wake_unfinished_drop(fs_info); 1385 } 1386 1387 /* 1388 * dead roots are old snapshots that need to be deleted. This allocates 1389 * a dirty root struct and adds it into the list of dead roots that need to 1390 * be deleted 1391 */ 1392 void btrfs_add_dead_root(struct btrfs_root *root) 1393 { 1394 struct btrfs_fs_info *fs_info = root->fs_info; 1395 1396 spin_lock(&fs_info->trans_lock); 1397 if (list_empty(&root->root_list)) { 1398 btrfs_grab_root(root); 1399 1400 /* We want to process the partially complete drops first. */ 1401 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) 1402 list_add(&root->root_list, &fs_info->dead_roots); 1403 else 1404 list_add_tail(&root->root_list, &fs_info->dead_roots); 1405 } 1406 spin_unlock(&fs_info->trans_lock); 1407 } 1408 1409 /* 1410 * Update each subvolume root and its relocation root, if it exists, in the tree 1411 * of tree roots. Also free log roots if they exist. 1412 */ 1413 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) 1414 { 1415 struct btrfs_fs_info *fs_info = trans->fs_info; 1416 struct btrfs_root *gang[8]; 1417 int i; 1418 int ret; 1419 1420 /* 1421 * At this point no one can be using this transaction to modify any tree 1422 * and no one can start another transaction to modify any tree either. 1423 */ 1424 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); 1425 1426 spin_lock(&fs_info->fs_roots_radix_lock); 1427 while (1) { 1428 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 1429 (void **)gang, 0, 1430 ARRAY_SIZE(gang), 1431 BTRFS_ROOT_TRANS_TAG); 1432 if (ret == 0) 1433 break; 1434 for (i = 0; i < ret; i++) { 1435 struct btrfs_root *root = gang[i]; 1436 int ret2; 1437 1438 /* 1439 * At this point we can neither have tasks logging inodes 1440 * from a root nor trying to commit a log tree. 1441 */ 1442 ASSERT(atomic_read(&root->log_writers) == 0); 1443 ASSERT(atomic_read(&root->log_commit[0]) == 0); 1444 ASSERT(atomic_read(&root->log_commit[1]) == 0); 1445 1446 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1447 (unsigned long)root->root_key.objectid, 1448 BTRFS_ROOT_TRANS_TAG); 1449 spin_unlock(&fs_info->fs_roots_radix_lock); 1450 1451 btrfs_free_log(trans, root); 1452 ret2 = btrfs_update_reloc_root(trans, root); 1453 if (ret2) 1454 return ret2; 1455 1456 /* see comments in should_cow_block() */ 1457 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1458 smp_mb__after_atomic(); 1459 1460 if (root->commit_root != root->node) { 1461 list_add_tail(&root->dirty_list, 1462 &trans->transaction->switch_commits); 1463 btrfs_set_root_node(&root->root_item, 1464 root->node); 1465 } 1466 1467 ret2 = btrfs_update_root(trans, fs_info->tree_root, 1468 &root->root_key, 1469 &root->root_item); 1470 if (ret2) 1471 return ret2; 1472 spin_lock(&fs_info->fs_roots_radix_lock); 1473 btrfs_qgroup_free_meta_all_pertrans(root); 1474 } 1475 } 1476 spin_unlock(&fs_info->fs_roots_radix_lock); 1477 return 0; 1478 } 1479 1480 /* 1481 * defrag a given btree. 1482 * Every leaf in the btree is read and defragged. 1483 */ 1484 int btrfs_defrag_root(struct btrfs_root *root) 1485 { 1486 struct btrfs_fs_info *info = root->fs_info; 1487 struct btrfs_trans_handle *trans; 1488 int ret; 1489 1490 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state)) 1491 return 0; 1492 1493 while (1) { 1494 trans = btrfs_start_transaction(root, 0); 1495 if (IS_ERR(trans)) { 1496 ret = PTR_ERR(trans); 1497 break; 1498 } 1499 1500 ret = btrfs_defrag_leaves(trans, root); 1501 1502 btrfs_end_transaction(trans); 1503 btrfs_btree_balance_dirty(info); 1504 cond_resched(); 1505 1506 if (btrfs_fs_closing(info) || ret != -EAGAIN) 1507 break; 1508 1509 if (btrfs_defrag_cancelled(info)) { 1510 btrfs_debug(info, "defrag_root cancelled"); 1511 ret = -EAGAIN; 1512 break; 1513 } 1514 } 1515 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state); 1516 return ret; 1517 } 1518 1519 /* 1520 * Do all special snapshot related qgroup dirty hack. 1521 * 1522 * Will do all needed qgroup inherit and dirty hack like switch commit 1523 * roots inside one transaction and write all btree into disk, to make 1524 * qgroup works. 1525 */ 1526 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, 1527 struct btrfs_root *src, 1528 struct btrfs_root *parent, 1529 struct btrfs_qgroup_inherit *inherit, 1530 u64 dst_objectid) 1531 { 1532 struct btrfs_fs_info *fs_info = src->fs_info; 1533 int ret; 1534 1535 /* 1536 * Save some performance in the case that qgroups are not 1537 * enabled. If this check races with the ioctl, rescan will 1538 * kick in anyway. 1539 */ 1540 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1541 return 0; 1542 1543 /* 1544 * Ensure dirty @src will be committed. Or, after coming 1545 * commit_fs_roots() and switch_commit_roots(), any dirty but not 1546 * recorded root will never be updated again, causing an outdated root 1547 * item. 1548 */ 1549 ret = record_root_in_trans(trans, src, 1); 1550 if (ret) 1551 return ret; 1552 1553 /* 1554 * btrfs_qgroup_inherit relies on a consistent view of the usage for the 1555 * src root, so we must run the delayed refs here. 1556 * 1557 * However this isn't particularly fool proof, because there's no 1558 * synchronization keeping us from changing the tree after this point 1559 * before we do the qgroup_inherit, or even from making changes while 1560 * we're doing the qgroup_inherit. But that's a problem for the future, 1561 * for now flush the delayed refs to narrow the race window where the 1562 * qgroup counters could end up wrong. 1563 */ 1564 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 1565 if (ret) { 1566 btrfs_abort_transaction(trans, ret); 1567 return ret; 1568 } 1569 1570 ret = commit_fs_roots(trans); 1571 if (ret) 1572 goto out; 1573 ret = btrfs_qgroup_account_extents(trans); 1574 if (ret < 0) 1575 goto out; 1576 1577 /* Now qgroup are all updated, we can inherit it to new qgroups */ 1578 ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid, 1579 inherit); 1580 if (ret < 0) 1581 goto out; 1582 1583 /* 1584 * Now we do a simplified commit transaction, which will: 1585 * 1) commit all subvolume and extent tree 1586 * To ensure all subvolume and extent tree have a valid 1587 * commit_root to accounting later insert_dir_item() 1588 * 2) write all btree blocks onto disk 1589 * This is to make sure later btree modification will be cowed 1590 * Or commit_root can be populated and cause wrong qgroup numbers 1591 * In this simplified commit, we don't really care about other trees 1592 * like chunk and root tree, as they won't affect qgroup. 1593 * And we don't write super to avoid half committed status. 1594 */ 1595 ret = commit_cowonly_roots(trans); 1596 if (ret) 1597 goto out; 1598 switch_commit_roots(trans); 1599 ret = btrfs_write_and_wait_transaction(trans); 1600 if (ret) 1601 btrfs_handle_fs_error(fs_info, ret, 1602 "Error while writing out transaction for qgroup"); 1603 1604 out: 1605 /* 1606 * Force parent root to be updated, as we recorded it before so its 1607 * last_trans == cur_transid. 1608 * Or it won't be committed again onto disk after later 1609 * insert_dir_item() 1610 */ 1611 if (!ret) 1612 ret = record_root_in_trans(trans, parent, 1); 1613 return ret; 1614 } 1615 1616 /* 1617 * new snapshots need to be created at a very specific time in the 1618 * transaction commit. This does the actual creation. 1619 * 1620 * Note: 1621 * If the error which may affect the commitment of the current transaction 1622 * happens, we should return the error number. If the error which just affect 1623 * the creation of the pending snapshots, just return 0. 1624 */ 1625 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, 1626 struct btrfs_pending_snapshot *pending) 1627 { 1628 1629 struct btrfs_fs_info *fs_info = trans->fs_info; 1630 struct btrfs_key key; 1631 struct btrfs_root_item *new_root_item; 1632 struct btrfs_root *tree_root = fs_info->tree_root; 1633 struct btrfs_root *root = pending->root; 1634 struct btrfs_root *parent_root; 1635 struct btrfs_block_rsv *rsv; 1636 struct inode *parent_inode = pending->dir; 1637 struct btrfs_path *path; 1638 struct btrfs_dir_item *dir_item; 1639 struct extent_buffer *tmp; 1640 struct extent_buffer *old; 1641 struct timespec64 cur_time; 1642 int ret = 0; 1643 u64 to_reserve = 0; 1644 u64 index = 0; 1645 u64 objectid; 1646 u64 root_flags; 1647 unsigned int nofs_flags; 1648 struct fscrypt_name fname; 1649 1650 ASSERT(pending->path); 1651 path = pending->path; 1652 1653 ASSERT(pending->root_item); 1654 new_root_item = pending->root_item; 1655 1656 /* 1657 * We're inside a transaction and must make sure that any potential 1658 * allocations with GFP_KERNEL in fscrypt won't recurse back to 1659 * filesystem. 1660 */ 1661 nofs_flags = memalloc_nofs_save(); 1662 pending->error = fscrypt_setup_filename(parent_inode, 1663 &pending->dentry->d_name, 0, 1664 &fname); 1665 memalloc_nofs_restore(nofs_flags); 1666 if (pending->error) 1667 goto free_pending; 1668 1669 pending->error = btrfs_get_free_objectid(tree_root, &objectid); 1670 if (pending->error) 1671 goto free_fname; 1672 1673 /* 1674 * Make qgroup to skip current new snapshot's qgroupid, as it is 1675 * accounted by later btrfs_qgroup_inherit(). 1676 */ 1677 btrfs_set_skip_qgroup(trans, objectid); 1678 1679 btrfs_reloc_pre_snapshot(pending, &to_reserve); 1680 1681 if (to_reserve > 0) { 1682 pending->error = btrfs_block_rsv_add(fs_info, 1683 &pending->block_rsv, 1684 to_reserve, 1685 BTRFS_RESERVE_NO_FLUSH); 1686 if (pending->error) 1687 goto clear_skip_qgroup; 1688 } 1689 1690 key.objectid = objectid; 1691 key.offset = (u64)-1; 1692 key.type = BTRFS_ROOT_ITEM_KEY; 1693 1694 rsv = trans->block_rsv; 1695 trans->block_rsv = &pending->block_rsv; 1696 trans->bytes_reserved = trans->block_rsv->reserved; 1697 trace_btrfs_space_reservation(fs_info, "transaction", 1698 trans->transid, 1699 trans->bytes_reserved, 1); 1700 parent_root = BTRFS_I(parent_inode)->root; 1701 ret = record_root_in_trans(trans, parent_root, 0); 1702 if (ret) 1703 goto fail; 1704 cur_time = current_time(parent_inode); 1705 1706 /* 1707 * insert the directory item 1708 */ 1709 ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); 1710 if (ret) { 1711 btrfs_abort_transaction(trans, ret); 1712 goto fail; 1713 } 1714 1715 /* check if there is a file/dir which has the same name. */ 1716 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, 1717 btrfs_ino(BTRFS_I(parent_inode)), 1718 &fname.disk_name, 0); 1719 if (dir_item != NULL && !IS_ERR(dir_item)) { 1720 pending->error = -EEXIST; 1721 goto dir_item_existed; 1722 } else if (IS_ERR(dir_item)) { 1723 ret = PTR_ERR(dir_item); 1724 btrfs_abort_transaction(trans, ret); 1725 goto fail; 1726 } 1727 btrfs_release_path(path); 1728 1729 /* 1730 * pull in the delayed directory update 1731 * and the delayed inode item 1732 * otherwise we corrupt the FS during 1733 * snapshot 1734 */ 1735 ret = btrfs_run_delayed_items(trans); 1736 if (ret) { /* Transaction aborted */ 1737 btrfs_abort_transaction(trans, ret); 1738 goto fail; 1739 } 1740 1741 ret = record_root_in_trans(trans, root, 0); 1742 if (ret) { 1743 btrfs_abort_transaction(trans, ret); 1744 goto fail; 1745 } 1746 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 1747 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 1748 btrfs_check_and_init_root_item(new_root_item); 1749 1750 root_flags = btrfs_root_flags(new_root_item); 1751 if (pending->readonly) 1752 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; 1753 else 1754 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; 1755 btrfs_set_root_flags(new_root_item, root_flags); 1756 1757 btrfs_set_root_generation_v2(new_root_item, 1758 trans->transid); 1759 generate_random_guid(new_root_item->uuid); 1760 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1761 BTRFS_UUID_SIZE); 1762 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { 1763 memset(new_root_item->received_uuid, 0, 1764 sizeof(new_root_item->received_uuid)); 1765 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1766 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1767 btrfs_set_root_stransid(new_root_item, 0); 1768 btrfs_set_root_rtransid(new_root_item, 0); 1769 } 1770 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); 1771 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); 1772 btrfs_set_root_otransid(new_root_item, trans->transid); 1773 1774 old = btrfs_lock_root_node(root); 1775 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old, 1776 BTRFS_NESTING_COW); 1777 if (ret) { 1778 btrfs_tree_unlock(old); 1779 free_extent_buffer(old); 1780 btrfs_abort_transaction(trans, ret); 1781 goto fail; 1782 } 1783 1784 ret = btrfs_copy_root(trans, root, old, &tmp, objectid); 1785 /* clean up in any case */ 1786 btrfs_tree_unlock(old); 1787 free_extent_buffer(old); 1788 if (ret) { 1789 btrfs_abort_transaction(trans, ret); 1790 goto fail; 1791 } 1792 /* see comments in should_cow_block() */ 1793 set_bit(BTRFS_ROOT_FORCE_COW, &root->state); 1794 smp_wmb(); 1795 1796 btrfs_set_root_node(new_root_item, tmp); 1797 /* record when the snapshot was created in key.offset */ 1798 key.offset = trans->transid; 1799 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1800 btrfs_tree_unlock(tmp); 1801 free_extent_buffer(tmp); 1802 if (ret) { 1803 btrfs_abort_transaction(trans, ret); 1804 goto fail; 1805 } 1806 1807 /* 1808 * insert root back/forward references 1809 */ 1810 ret = btrfs_add_root_ref(trans, objectid, 1811 parent_root->root_key.objectid, 1812 btrfs_ino(BTRFS_I(parent_inode)), index, 1813 &fname.disk_name); 1814 if (ret) { 1815 btrfs_abort_transaction(trans, ret); 1816 goto fail; 1817 } 1818 1819 key.offset = (u64)-1; 1820 pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev); 1821 if (IS_ERR(pending->snap)) { 1822 ret = PTR_ERR(pending->snap); 1823 pending->snap = NULL; 1824 btrfs_abort_transaction(trans, ret); 1825 goto fail; 1826 } 1827 1828 ret = btrfs_reloc_post_snapshot(trans, pending); 1829 if (ret) { 1830 btrfs_abort_transaction(trans, ret); 1831 goto fail; 1832 } 1833 1834 /* 1835 * Do special qgroup accounting for snapshot, as we do some qgroup 1836 * snapshot hack to do fast snapshot. 1837 * To co-operate with that hack, we do hack again. 1838 * Or snapshot will be greatly slowed down by a subtree qgroup rescan 1839 */ 1840 ret = qgroup_account_snapshot(trans, root, parent_root, 1841 pending->inherit, objectid); 1842 if (ret < 0) 1843 goto fail; 1844 1845 ret = btrfs_insert_dir_item(trans, &fname.disk_name, 1846 BTRFS_I(parent_inode), &key, BTRFS_FT_DIR, 1847 index); 1848 /* We have check then name at the beginning, so it is impossible. */ 1849 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); 1850 if (ret) { 1851 btrfs_abort_transaction(trans, ret); 1852 goto fail; 1853 } 1854 1855 btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + 1856 fname.disk_name.len * 2); 1857 parent_inode->i_mtime = inode_set_ctime_current(parent_inode); 1858 ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode)); 1859 if (ret) { 1860 btrfs_abort_transaction(trans, ret); 1861 goto fail; 1862 } 1863 ret = btrfs_uuid_tree_add(trans, new_root_item->uuid, 1864 BTRFS_UUID_KEY_SUBVOL, 1865 objectid); 1866 if (ret) { 1867 btrfs_abort_transaction(trans, ret); 1868 goto fail; 1869 } 1870 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { 1871 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, 1872 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1873 objectid); 1874 if (ret && ret != -EEXIST) { 1875 btrfs_abort_transaction(trans, ret); 1876 goto fail; 1877 } 1878 } 1879 1880 fail: 1881 pending->error = ret; 1882 dir_item_existed: 1883 trans->block_rsv = rsv; 1884 trans->bytes_reserved = 0; 1885 clear_skip_qgroup: 1886 btrfs_clear_skip_qgroup(trans); 1887 free_fname: 1888 fscrypt_free_filename(&fname); 1889 free_pending: 1890 kfree(new_root_item); 1891 pending->root_item = NULL; 1892 btrfs_free_path(path); 1893 pending->path = NULL; 1894 1895 return ret; 1896 } 1897 1898 /* 1899 * create all the snapshots we've scheduled for creation 1900 */ 1901 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans) 1902 { 1903 struct btrfs_pending_snapshot *pending, *next; 1904 struct list_head *head = &trans->transaction->pending_snapshots; 1905 int ret = 0; 1906 1907 list_for_each_entry_safe(pending, next, head, list) { 1908 list_del(&pending->list); 1909 ret = create_pending_snapshot(trans, pending); 1910 if (ret) 1911 break; 1912 } 1913 return ret; 1914 } 1915 1916 static void update_super_roots(struct btrfs_fs_info *fs_info) 1917 { 1918 struct btrfs_root_item *root_item; 1919 struct btrfs_super_block *super; 1920 1921 super = fs_info->super_copy; 1922 1923 root_item = &fs_info->chunk_root->root_item; 1924 super->chunk_root = root_item->bytenr; 1925 super->chunk_root_generation = root_item->generation; 1926 super->chunk_root_level = root_item->level; 1927 1928 root_item = &fs_info->tree_root->root_item; 1929 super->root = root_item->bytenr; 1930 super->generation = root_item->generation; 1931 super->root_level = root_item->level; 1932 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 1933 super->cache_generation = root_item->generation; 1934 else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags)) 1935 super->cache_generation = 0; 1936 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) 1937 super->uuid_tree_generation = root_item->generation; 1938 } 1939 1940 int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1941 { 1942 struct btrfs_transaction *trans; 1943 int ret = 0; 1944 1945 spin_lock(&info->trans_lock); 1946 trans = info->running_transaction; 1947 if (trans) 1948 ret = (trans->state >= TRANS_STATE_COMMIT_START); 1949 spin_unlock(&info->trans_lock); 1950 return ret; 1951 } 1952 1953 int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1954 { 1955 struct btrfs_transaction *trans; 1956 int ret = 0; 1957 1958 spin_lock(&info->trans_lock); 1959 trans = info->running_transaction; 1960 if (trans) 1961 ret = is_transaction_blocked(trans); 1962 spin_unlock(&info->trans_lock); 1963 return ret; 1964 } 1965 1966 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans) 1967 { 1968 struct btrfs_fs_info *fs_info = trans->fs_info; 1969 struct btrfs_transaction *cur_trans; 1970 1971 /* Kick the transaction kthread. */ 1972 set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); 1973 wake_up_process(fs_info->transaction_kthread); 1974 1975 /* take transaction reference */ 1976 cur_trans = trans->transaction; 1977 refcount_inc(&cur_trans->use_count); 1978 1979 btrfs_end_transaction(trans); 1980 1981 /* 1982 * Wait for the current transaction commit to start and block 1983 * subsequent transaction joins 1984 */ 1985 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); 1986 wait_event(fs_info->transaction_blocked_wait, 1987 cur_trans->state >= TRANS_STATE_COMMIT_START || 1988 TRANS_ABORTED(cur_trans)); 1989 btrfs_put_transaction(cur_trans); 1990 } 1991 1992 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) 1993 { 1994 struct btrfs_fs_info *fs_info = trans->fs_info; 1995 struct btrfs_transaction *cur_trans = trans->transaction; 1996 1997 WARN_ON(refcount_read(&trans->use_count) > 1); 1998 1999 btrfs_abort_transaction(trans, err); 2000 2001 spin_lock(&fs_info->trans_lock); 2002 2003 /* 2004 * If the transaction is removed from the list, it means this 2005 * transaction has been committed successfully, so it is impossible 2006 * to call the cleanup function. 2007 */ 2008 BUG_ON(list_empty(&cur_trans->list)); 2009 2010 if (cur_trans == fs_info->running_transaction) { 2011 cur_trans->state = TRANS_STATE_COMMIT_DOING; 2012 spin_unlock(&fs_info->trans_lock); 2013 2014 /* 2015 * The thread has already released the lockdep map as reader 2016 * already in btrfs_commit_transaction(). 2017 */ 2018 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); 2019 wait_event(cur_trans->writer_wait, 2020 atomic_read(&cur_trans->num_writers) == 1); 2021 2022 spin_lock(&fs_info->trans_lock); 2023 } 2024 2025 /* 2026 * Now that we know no one else is still using the transaction we can 2027 * remove the transaction from the list of transactions. This avoids 2028 * the transaction kthread from cleaning up the transaction while some 2029 * other task is still using it, which could result in a use-after-free 2030 * on things like log trees, as it forces the transaction kthread to 2031 * wait for this transaction to be cleaned up by us. 2032 */ 2033 list_del_init(&cur_trans->list); 2034 2035 spin_unlock(&fs_info->trans_lock); 2036 2037 btrfs_cleanup_one_transaction(trans->transaction, fs_info); 2038 2039 spin_lock(&fs_info->trans_lock); 2040 if (cur_trans == fs_info->running_transaction) 2041 fs_info->running_transaction = NULL; 2042 spin_unlock(&fs_info->trans_lock); 2043 2044 if (trans->type & __TRANS_FREEZABLE) 2045 sb_end_intwrite(fs_info->sb); 2046 btrfs_put_transaction(cur_trans); 2047 btrfs_put_transaction(cur_trans); 2048 2049 trace_btrfs_transaction_commit(fs_info); 2050 2051 if (current->journal_info == trans) 2052 current->journal_info = NULL; 2053 2054 /* 2055 * If relocation is running, we can't cancel scrub because that will 2056 * result in a deadlock. Before relocating a block group, relocation 2057 * pauses scrub, then starts and commits a transaction before unpausing 2058 * scrub. If the transaction commit is being done by the relocation 2059 * task or triggered by another task and the relocation task is waiting 2060 * for the commit, and we end up here due to an error in the commit 2061 * path, then calling btrfs_scrub_cancel() will deadlock, as we are 2062 * asking for scrub to stop while having it asked to be paused higher 2063 * above in relocation code. 2064 */ 2065 if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) 2066 btrfs_scrub_cancel(fs_info); 2067 2068 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2069 } 2070 2071 /* 2072 * Release reserved delayed ref space of all pending block groups of the 2073 * transaction and remove them from the list 2074 */ 2075 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) 2076 { 2077 struct btrfs_fs_info *fs_info = trans->fs_info; 2078 struct btrfs_block_group *block_group, *tmp; 2079 2080 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 2081 btrfs_delayed_refs_rsv_release(fs_info, 1); 2082 list_del_init(&block_group->bg_list); 2083 } 2084 } 2085 2086 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 2087 { 2088 /* 2089 * We use try_to_writeback_inodes_sb() here because if we used 2090 * btrfs_start_delalloc_roots we would deadlock with fs freeze. 2091 * Currently are holding the fs freeze lock, if we do an async flush 2092 * we'll do btrfs_join_transaction() and deadlock because we need to 2093 * wait for the fs freeze lock. Using the direct flushing we benefit 2094 * from already being in a transaction and our join_transaction doesn't 2095 * have to re-take the fs freeze lock. 2096 * 2097 * Note that try_to_writeback_inodes_sb() will only trigger writeback 2098 * if it can read lock sb->s_umount. It will always be able to lock it, 2099 * except when the filesystem is being unmounted or being frozen, but in 2100 * those cases sync_filesystem() is called, which results in calling 2101 * writeback_inodes_sb() while holding a write lock on sb->s_umount. 2102 * Note that we don't call writeback_inodes_sb() directly, because it 2103 * will emit a warning if sb->s_umount is not locked. 2104 */ 2105 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 2106 try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); 2107 return 0; 2108 } 2109 2110 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 2111 { 2112 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 2113 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 2114 } 2115 2116 /* 2117 * Add a pending snapshot associated with the given transaction handle to the 2118 * respective handle. This must be called after the transaction commit started 2119 * and while holding fs_info->trans_lock. 2120 * This serves to guarantee a caller of btrfs_commit_transaction() that it can 2121 * safely free the pending snapshot pointer in case btrfs_commit_transaction() 2122 * returns an error. 2123 */ 2124 static void add_pending_snapshot(struct btrfs_trans_handle *trans) 2125 { 2126 struct btrfs_transaction *cur_trans = trans->transaction; 2127 2128 if (!trans->pending_snapshot) 2129 return; 2130 2131 lockdep_assert_held(&trans->fs_info->trans_lock); 2132 ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START); 2133 2134 list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); 2135 } 2136 2137 static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval) 2138 { 2139 fs_info->commit_stats.commit_count++; 2140 fs_info->commit_stats.last_commit_dur = interval; 2141 fs_info->commit_stats.max_commit_dur = 2142 max_t(u64, fs_info->commit_stats.max_commit_dur, interval); 2143 fs_info->commit_stats.total_commit_dur += interval; 2144 } 2145 2146 int btrfs_commit_transaction(struct btrfs_trans_handle *trans) 2147 { 2148 struct btrfs_fs_info *fs_info = trans->fs_info; 2149 struct btrfs_transaction *cur_trans = trans->transaction; 2150 struct btrfs_transaction *prev_trans = NULL; 2151 int ret; 2152 ktime_t start_time; 2153 ktime_t interval; 2154 2155 ASSERT(refcount_read(&trans->use_count) == 1); 2156 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); 2157 2158 clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags); 2159 2160 /* Stop the commit early if ->aborted is set */ 2161 if (TRANS_ABORTED(cur_trans)) { 2162 ret = cur_trans->aborted; 2163 goto lockdep_trans_commit_start_release; 2164 } 2165 2166 btrfs_trans_release_metadata(trans); 2167 trans->block_rsv = NULL; 2168 2169 /* 2170 * We only want one transaction commit doing the flushing so we do not 2171 * waste a bunch of time on lock contention on the extent root node. 2172 */ 2173 if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING, 2174 &cur_trans->delayed_refs.flags)) { 2175 /* 2176 * Make a pass through all the delayed refs we have so far. 2177 * Any running threads may add more while we are here. 2178 */ 2179 ret = btrfs_run_delayed_refs(trans, 0); 2180 if (ret) 2181 goto lockdep_trans_commit_start_release; 2182 } 2183 2184 btrfs_create_pending_block_groups(trans); 2185 2186 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { 2187 int run_it = 0; 2188 2189 /* this mutex is also taken before trying to set 2190 * block groups readonly. We need to make sure 2191 * that nobody has set a block group readonly 2192 * after a extents from that block group have been 2193 * allocated for cache files. btrfs_set_block_group_ro 2194 * will wait for the transaction to commit if it 2195 * finds BTRFS_TRANS_DIRTY_BG_RUN set. 2196 * 2197 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure 2198 * only one process starts all the block group IO. It wouldn't 2199 * hurt to have more than one go through, but there's no 2200 * real advantage to it either. 2201 */ 2202 mutex_lock(&fs_info->ro_block_group_mutex); 2203 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, 2204 &cur_trans->flags)) 2205 run_it = 1; 2206 mutex_unlock(&fs_info->ro_block_group_mutex); 2207 2208 if (run_it) { 2209 ret = btrfs_start_dirty_block_groups(trans); 2210 if (ret) 2211 goto lockdep_trans_commit_start_release; 2212 } 2213 } 2214 2215 spin_lock(&fs_info->trans_lock); 2216 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 2217 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; 2218 2219 add_pending_snapshot(trans); 2220 2221 spin_unlock(&fs_info->trans_lock); 2222 refcount_inc(&cur_trans->use_count); 2223 2224 if (trans->in_fsync) 2225 want_state = TRANS_STATE_SUPER_COMMITTED; 2226 2227 btrfs_trans_state_lockdep_release(fs_info, 2228 BTRFS_LOCKDEP_TRANS_COMMIT_START); 2229 ret = btrfs_end_transaction(trans); 2230 wait_for_commit(cur_trans, want_state); 2231 2232 if (TRANS_ABORTED(cur_trans)) 2233 ret = cur_trans->aborted; 2234 2235 btrfs_put_transaction(cur_trans); 2236 2237 return ret; 2238 } 2239 2240 cur_trans->state = TRANS_STATE_COMMIT_START; 2241 wake_up(&fs_info->transaction_blocked_wait); 2242 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); 2243 2244 if (cur_trans->list.prev != &fs_info->trans_list) { 2245 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; 2246 2247 if (trans->in_fsync) 2248 want_state = TRANS_STATE_SUPER_COMMITTED; 2249 2250 prev_trans = list_entry(cur_trans->list.prev, 2251 struct btrfs_transaction, list); 2252 if (prev_trans->state < want_state) { 2253 refcount_inc(&prev_trans->use_count); 2254 spin_unlock(&fs_info->trans_lock); 2255 2256 wait_for_commit(prev_trans, want_state); 2257 2258 ret = READ_ONCE(prev_trans->aborted); 2259 2260 btrfs_put_transaction(prev_trans); 2261 if (ret) 2262 goto lockdep_release; 2263 } else { 2264 spin_unlock(&fs_info->trans_lock); 2265 } 2266 } else { 2267 spin_unlock(&fs_info->trans_lock); 2268 /* 2269 * The previous transaction was aborted and was already removed 2270 * from the list of transactions at fs_info->trans_list. So we 2271 * abort to prevent writing a new superblock that reflects a 2272 * corrupt state (pointing to trees with unwritten nodes/leafs). 2273 */ 2274 if (BTRFS_FS_ERROR(fs_info)) { 2275 ret = -EROFS; 2276 goto lockdep_release; 2277 } 2278 } 2279 2280 /* 2281 * Get the time spent on the work done by the commit thread and not 2282 * the time spent waiting on a previous commit 2283 */ 2284 start_time = ktime_get_ns(); 2285 2286 extwriter_counter_dec(cur_trans, trans->type); 2287 2288 ret = btrfs_start_delalloc_flush(fs_info); 2289 if (ret) 2290 goto lockdep_release; 2291 2292 ret = btrfs_run_delayed_items(trans); 2293 if (ret) 2294 goto lockdep_release; 2295 2296 /* 2297 * The thread has started/joined the transaction thus it holds the 2298 * lockdep map as a reader. It has to release it before acquiring the 2299 * lockdep map as a writer. 2300 */ 2301 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 2302 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters); 2303 wait_event(cur_trans->writer_wait, 2304 extwriter_counter_read(cur_trans) == 0); 2305 2306 /* some pending stuffs might be added after the previous flush. */ 2307 ret = btrfs_run_delayed_items(trans); 2308 if (ret) { 2309 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 2310 goto cleanup_transaction; 2311 } 2312 2313 btrfs_wait_delalloc_flush(fs_info); 2314 2315 /* 2316 * Wait for all ordered extents started by a fast fsync that joined this 2317 * transaction. Otherwise if this transaction commits before the ordered 2318 * extents complete we lose logged data after a power failure. 2319 */ 2320 btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered); 2321 wait_event(cur_trans->pending_wait, 2322 atomic_read(&cur_trans->pending_ordered) == 0); 2323 2324 btrfs_scrub_pause(fs_info); 2325 /* 2326 * Ok now we need to make sure to block out any other joins while we 2327 * commit the transaction. We could have started a join before setting 2328 * COMMIT_DOING so make sure to wait for num_writers to == 1 again. 2329 */ 2330 spin_lock(&fs_info->trans_lock); 2331 add_pending_snapshot(trans); 2332 cur_trans->state = TRANS_STATE_COMMIT_DOING; 2333 spin_unlock(&fs_info->trans_lock); 2334 2335 /* 2336 * The thread has started/joined the transaction thus it holds the 2337 * lockdep map as a reader. It has to release it before acquiring the 2338 * lockdep map as a writer. 2339 */ 2340 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 2341 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); 2342 wait_event(cur_trans->writer_wait, 2343 atomic_read(&cur_trans->num_writers) == 1); 2344 2345 /* 2346 * Make lockdep happy by acquiring the state locks after 2347 * btrfs_trans_num_writers is released. If we acquired the state locks 2348 * before releasing the btrfs_trans_num_writers lock then lockdep would 2349 * complain because we did not follow the reverse order unlocking rule. 2350 */ 2351 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 2352 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 2353 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2354 2355 /* 2356 * We've started the commit, clear the flag in case we were triggered to 2357 * do an async commit but somebody else started before the transaction 2358 * kthread could do the work. 2359 */ 2360 clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); 2361 2362 if (TRANS_ABORTED(cur_trans)) { 2363 ret = cur_trans->aborted; 2364 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2365 goto scrub_continue; 2366 } 2367 /* 2368 * the reloc mutex makes sure that we stop 2369 * the balancing code from coming in and moving 2370 * extents around in the middle of the commit 2371 */ 2372 mutex_lock(&fs_info->reloc_mutex); 2373 2374 /* 2375 * We needn't worry about the delayed items because we will 2376 * deal with them in create_pending_snapshot(), which is the 2377 * core function of the snapshot creation. 2378 */ 2379 ret = create_pending_snapshots(trans); 2380 if (ret) 2381 goto unlock_reloc; 2382 2383 /* 2384 * We insert the dir indexes of the snapshots and update the inode 2385 * of the snapshots' parents after the snapshot creation, so there 2386 * are some delayed items which are not dealt with. Now deal with 2387 * them. 2388 * 2389 * We needn't worry that this operation will corrupt the snapshots, 2390 * because all the tree which are snapshoted will be forced to COW 2391 * the nodes and leaves. 2392 */ 2393 ret = btrfs_run_delayed_items(trans); 2394 if (ret) 2395 goto unlock_reloc; 2396 2397 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1); 2398 if (ret) 2399 goto unlock_reloc; 2400 2401 /* 2402 * make sure none of the code above managed to slip in a 2403 * delayed item 2404 */ 2405 btrfs_assert_delayed_root_empty(fs_info); 2406 2407 WARN_ON(cur_trans != trans->transaction); 2408 2409 ret = commit_fs_roots(trans); 2410 if (ret) 2411 goto unlock_reloc; 2412 2413 /* commit_fs_roots gets rid of all the tree log roots, it is now 2414 * safe to free the root of tree log roots 2415 */ 2416 btrfs_free_log_root_tree(trans, fs_info); 2417 2418 /* 2419 * Since fs roots are all committed, we can get a quite accurate 2420 * new_roots. So let's do quota accounting. 2421 */ 2422 ret = btrfs_qgroup_account_extents(trans); 2423 if (ret < 0) 2424 goto unlock_reloc; 2425 2426 ret = commit_cowonly_roots(trans); 2427 if (ret) 2428 goto unlock_reloc; 2429 2430 /* 2431 * The tasks which save the space cache and inode cache may also 2432 * update ->aborted, check it. 2433 */ 2434 if (TRANS_ABORTED(cur_trans)) { 2435 ret = cur_trans->aborted; 2436 goto unlock_reloc; 2437 } 2438 2439 cur_trans = fs_info->running_transaction; 2440 2441 btrfs_set_root_node(&fs_info->tree_root->root_item, 2442 fs_info->tree_root->node); 2443 list_add_tail(&fs_info->tree_root->dirty_list, 2444 &cur_trans->switch_commits); 2445 2446 btrfs_set_root_node(&fs_info->chunk_root->root_item, 2447 fs_info->chunk_root->node); 2448 list_add_tail(&fs_info->chunk_root->dirty_list, 2449 &cur_trans->switch_commits); 2450 2451 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 2452 btrfs_set_root_node(&fs_info->block_group_root->root_item, 2453 fs_info->block_group_root->node); 2454 list_add_tail(&fs_info->block_group_root->dirty_list, 2455 &cur_trans->switch_commits); 2456 } 2457 2458 switch_commit_roots(trans); 2459 2460 ASSERT(list_empty(&cur_trans->dirty_bgs)); 2461 ASSERT(list_empty(&cur_trans->io_bgs)); 2462 update_super_roots(fs_info); 2463 2464 btrfs_set_super_log_root(fs_info->super_copy, 0); 2465 btrfs_set_super_log_root_level(fs_info->super_copy, 0); 2466 memcpy(fs_info->super_for_commit, fs_info->super_copy, 2467 sizeof(*fs_info->super_copy)); 2468 2469 btrfs_commit_device_sizes(cur_trans); 2470 2471 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); 2472 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); 2473 2474 btrfs_trans_release_chunk_metadata(trans); 2475 2476 /* 2477 * Before changing the transaction state to TRANS_STATE_UNBLOCKED and 2478 * setting fs_info->running_transaction to NULL, lock tree_log_mutex to 2479 * make sure that before we commit our superblock, no other task can 2480 * start a new transaction and commit a log tree before we commit our 2481 * superblock. Anyone trying to commit a log tree locks this mutex before 2482 * writing its superblock. 2483 */ 2484 mutex_lock(&fs_info->tree_log_mutex); 2485 2486 spin_lock(&fs_info->trans_lock); 2487 cur_trans->state = TRANS_STATE_UNBLOCKED; 2488 fs_info->running_transaction = NULL; 2489 spin_unlock(&fs_info->trans_lock); 2490 mutex_unlock(&fs_info->reloc_mutex); 2491 2492 wake_up(&fs_info->transaction_wait); 2493 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2494 2495 /* If we have features changed, wake up the cleaner to update sysfs. */ 2496 if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) && 2497 fs_info->cleaner_kthread) 2498 wake_up_process(fs_info->cleaner_kthread); 2499 2500 ret = btrfs_write_and_wait_transaction(trans); 2501 if (ret) { 2502 btrfs_handle_fs_error(fs_info, ret, 2503 "Error while writing out transaction"); 2504 mutex_unlock(&fs_info->tree_log_mutex); 2505 goto scrub_continue; 2506 } 2507 2508 ret = write_all_supers(fs_info, 0); 2509 /* 2510 * the super is written, we can safely allow the tree-loggers 2511 * to go about their business 2512 */ 2513 mutex_unlock(&fs_info->tree_log_mutex); 2514 if (ret) 2515 goto scrub_continue; 2516 2517 /* 2518 * We needn't acquire the lock here because there is no other task 2519 * which can change it. 2520 */ 2521 cur_trans->state = TRANS_STATE_SUPER_COMMITTED; 2522 wake_up(&cur_trans->commit_wait); 2523 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 2524 2525 btrfs_finish_extent_commit(trans); 2526 2527 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) 2528 btrfs_clear_space_info_full(fs_info); 2529 2530 fs_info->last_trans_committed = cur_trans->transid; 2531 /* 2532 * We needn't acquire the lock here because there is no other task 2533 * which can change it. 2534 */ 2535 cur_trans->state = TRANS_STATE_COMPLETED; 2536 wake_up(&cur_trans->commit_wait); 2537 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 2538 2539 spin_lock(&fs_info->trans_lock); 2540 list_del_init(&cur_trans->list); 2541 spin_unlock(&fs_info->trans_lock); 2542 2543 btrfs_put_transaction(cur_trans); 2544 btrfs_put_transaction(cur_trans); 2545 2546 if (trans->type & __TRANS_FREEZABLE) 2547 sb_end_intwrite(fs_info->sb); 2548 2549 trace_btrfs_transaction_commit(fs_info); 2550 2551 interval = ktime_get_ns() - start_time; 2552 2553 btrfs_scrub_continue(fs_info); 2554 2555 if (current->journal_info == trans) 2556 current->journal_info = NULL; 2557 2558 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2559 2560 update_commit_stats(fs_info, interval); 2561 2562 return ret; 2563 2564 unlock_reloc: 2565 mutex_unlock(&fs_info->reloc_mutex); 2566 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); 2567 scrub_continue: 2568 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); 2569 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); 2570 btrfs_scrub_continue(fs_info); 2571 cleanup_transaction: 2572 btrfs_trans_release_metadata(trans); 2573 btrfs_cleanup_pending_block_groups(trans); 2574 btrfs_trans_release_chunk_metadata(trans); 2575 trans->block_rsv = NULL; 2576 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2577 if (current->journal_info == trans) 2578 current->journal_info = NULL; 2579 cleanup_transaction(trans, ret); 2580 2581 return ret; 2582 2583 lockdep_release: 2584 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); 2585 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); 2586 goto cleanup_transaction; 2587 2588 lockdep_trans_commit_start_release: 2589 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); 2590 btrfs_end_transaction(trans); 2591 return ret; 2592 } 2593 2594 /* 2595 * return < 0 if error 2596 * 0 if there are no more dead_roots at the time of call 2597 * 1 there are more to be processed, call me again 2598 * 2599 * The return value indicates there are certainly more snapshots to delete, but 2600 * if there comes a new one during processing, it may return 0. We don't mind, 2601 * because btrfs_commit_super will poke cleaner thread and it will process it a 2602 * few seconds later. 2603 */ 2604 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info) 2605 { 2606 struct btrfs_root *root; 2607 int ret; 2608 2609 spin_lock(&fs_info->trans_lock); 2610 if (list_empty(&fs_info->dead_roots)) { 2611 spin_unlock(&fs_info->trans_lock); 2612 return 0; 2613 } 2614 root = list_first_entry(&fs_info->dead_roots, 2615 struct btrfs_root, root_list); 2616 list_del_init(&root->root_list); 2617 spin_unlock(&fs_info->trans_lock); 2618 2619 btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid); 2620 2621 btrfs_kill_all_delayed_nodes(root); 2622 2623 if (btrfs_header_backref_rev(root->node) < 2624 BTRFS_MIXED_BACKREF_REV) 2625 ret = btrfs_drop_snapshot(root, 0, 0); 2626 else 2627 ret = btrfs_drop_snapshot(root, 1, 0); 2628 2629 btrfs_put_root(root); 2630 return (ret < 0) ? 0 : 1; 2631 } 2632 2633 /* 2634 * We only mark the transaction aborted and then set the file system read-only. 2635 * This will prevent new transactions from starting or trying to join this 2636 * one. 2637 * 2638 * This means that error recovery at the call site is limited to freeing 2639 * any local memory allocations and passing the error code up without 2640 * further cleanup. The transaction should complete as it normally would 2641 * in the call path but will return -EIO. 2642 * 2643 * We'll complete the cleanup in btrfs_end_transaction and 2644 * btrfs_commit_transaction. 2645 */ 2646 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 2647 const char *function, 2648 unsigned int line, int errno, bool first_hit) 2649 { 2650 struct btrfs_fs_info *fs_info = trans->fs_info; 2651 2652 WRITE_ONCE(trans->aborted, errno); 2653 WRITE_ONCE(trans->transaction->aborted, errno); 2654 if (first_hit && errno == -ENOSPC) 2655 btrfs_dump_space_info_for_trans_abort(fs_info); 2656 /* Wake up anybody who may be waiting on this transaction */ 2657 wake_up(&fs_info->transaction_wait); 2658 wake_up(&fs_info->transaction_blocked_wait); 2659 __btrfs_handle_fs_error(fs_info, function, line, errno, NULL); 2660 } 2661 2662 int __init btrfs_transaction_init(void) 2663 { 2664 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", 2665 sizeof(struct btrfs_trans_handle), 0, 2666 SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL); 2667 if (!btrfs_trans_handle_cachep) 2668 return -ENOMEM; 2669 return 0; 2670 } 2671 2672 void __cold btrfs_transaction_exit(void) 2673 { 2674 kmem_cache_destroy(btrfs_trans_handle_cachep); 2675 } 2676