1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/writeback.h>
11 #include <linux/pagemap.h>
12 #include <linux/blkdev.h>
13 #include <linux/uuid.h>
14 #include <linux/timekeeping.h>
15 #include "misc.h"
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "transaction.h"
19 #include "locking.h"
20 #include "tree-log.h"
21 #include "volumes.h"
22 #include "dev-replace.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "space-info.h"
26 #include "fs.h"
27 #include "accessors.h"
28 #include "extent-tree.h"
29 #include "root-tree.h"
30 #include "dir-item.h"
31 #include "uuid-tree.h"
32 #include "ioctl.h"
33 #include "relocation.h"
34 #include "scrub.h"
35 #include "ordered-data.h"
36 #include "delayed-inode.h"
37
38 static struct kmem_cache *btrfs_trans_handle_cachep;
39
40 /*
41 * Transaction states and transitions
42 *
43 * No running transaction (fs tree blocks are not modified)
44 * |
45 * | To next stage:
46 * | Call start_transaction() variants. Except btrfs_join_transaction_nostart().
47 * V
48 * Transaction N [[TRANS_STATE_RUNNING]]
49 * |
50 * | New trans handles can be attached to transaction N by calling all
51 * | start_transaction() variants.
52 * |
53 * | To next stage:
54 * | Call btrfs_commit_transaction() on any trans handle attached to
55 * | transaction N
56 * V
57 * Transaction N [[TRANS_STATE_COMMIT_PREP]]
58 * |
59 * | If there are simultaneous calls to btrfs_commit_transaction() one will win
60 * | the race and the rest will wait for the winner to commit the transaction.
61 * |
62 * | The winner will wait for previous running transaction to completely finish
63 * | if there is one.
64 * |
65 * Transaction N [[TRANS_STATE_COMMIT_START]]
66 * |
67 * | Then one of the following happens:
68 * | - Wait for all other trans handle holders to release.
69 * | The btrfs_commit_transaction() caller will do the commit work.
70 * | - Wait for current transaction to be committed by others.
71 * | Other btrfs_commit_transaction() caller will do the commit work.
72 * |
73 * | At this stage, only btrfs_join_transaction*() variants can attach
74 * | to this running transaction.
75 * | All other variants will wait for current one to finish and attach to
76 * | transaction N+1.
77 * |
78 * | To next stage:
79 * | Caller is chosen to commit transaction N, and all other trans handle
80 * | haven been released.
81 * V
82 * Transaction N [[TRANS_STATE_COMMIT_DOING]]
83 * |
84 * | The heavy lifting transaction work is started.
85 * | From running delayed refs (modifying extent tree) to creating pending
86 * | snapshots, running qgroups.
87 * | In short, modify supporting trees to reflect modifications of subvolume
88 * | trees.
89 * |
90 * | At this stage, all start_transaction() calls will wait for this
91 * | transaction to finish and attach to transaction N+1.
92 * |
93 * | To next stage:
94 * | Until all supporting trees are updated.
95 * V
96 * Transaction N [[TRANS_STATE_UNBLOCKED]]
97 * | Transaction N+1
98 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]]
99 * | need to write them back to disk and update |
100 * | super blocks. |
101 * | |
102 * | At this stage, new transaction is allowed to |
103 * | start. |
104 * | All new start_transaction() calls will be |
105 * | attached to transid N+1. |
106 * | |
107 * | To next stage: |
108 * | Until all tree blocks and super blocks are |
109 * | written to block devices |
110 * V |
111 * Transaction N [[TRANS_STATE_COMPLETED]] V
112 * All tree blocks and super blocks are written. Transaction N+1
113 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]]
114 * data structures will be cleaned up. | Life goes on
115 */
116 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
117 [TRANS_STATE_RUNNING] = 0U,
118 [TRANS_STATE_COMMIT_PREP] = 0U,
119 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
120 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
121 __TRANS_ATTACH |
122 __TRANS_JOIN |
123 __TRANS_JOIN_NOSTART),
124 [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
125 __TRANS_ATTACH |
126 __TRANS_JOIN |
127 __TRANS_JOIN_NOLOCK |
128 __TRANS_JOIN_NOSTART),
129 [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START |
130 __TRANS_ATTACH |
131 __TRANS_JOIN |
132 __TRANS_JOIN_NOLOCK |
133 __TRANS_JOIN_NOSTART),
134 [TRANS_STATE_COMPLETED] = (__TRANS_START |
135 __TRANS_ATTACH |
136 __TRANS_JOIN |
137 __TRANS_JOIN_NOLOCK |
138 __TRANS_JOIN_NOSTART),
139 };
140
btrfs_put_transaction(struct btrfs_transaction * transaction)141 void btrfs_put_transaction(struct btrfs_transaction *transaction)
142 {
143 if (refcount_dec_and_test(&transaction->use_count)) {
144 BUG_ON(!list_empty(&transaction->list));
145 WARN_ON(!xa_empty(&transaction->delayed_refs.head_refs));
146 WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents));
147 if (transaction->delayed_refs.pending_csums)
148 btrfs_err(transaction->fs_info,
149 "pending csums is %llu",
150 transaction->delayed_refs.pending_csums);
151 /*
152 * If any block groups are found in ->deleted_bgs then it's
153 * because the transaction was aborted and a commit did not
154 * happen (things failed before writing the new superblock
155 * and calling btrfs_finish_extent_commit()), so we can not
156 * discard the physical locations of the block groups.
157 */
158 while (!list_empty(&transaction->deleted_bgs)) {
159 struct btrfs_block_group *cache;
160
161 cache = list_first_entry(&transaction->deleted_bgs,
162 struct btrfs_block_group,
163 bg_list);
164 /*
165 * Not strictly necessary to lock, as no other task will be using a
166 * block_group on the deleted_bgs list during a transaction abort.
167 */
168 spin_lock(&transaction->fs_info->unused_bgs_lock);
169 list_del_init(&cache->bg_list);
170 spin_unlock(&transaction->fs_info->unused_bgs_lock);
171 btrfs_unfreeze_block_group(cache);
172 btrfs_put_block_group(cache);
173 }
174 WARN_ON(!list_empty(&transaction->dev_update_list));
175 kfree(transaction);
176 }
177 }
178
switch_commit_roots(struct btrfs_trans_handle * trans)179 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
180 {
181 struct btrfs_transaction *cur_trans = trans->transaction;
182 struct btrfs_fs_info *fs_info = trans->fs_info;
183 struct btrfs_root *root, *tmp;
184
185 /*
186 * At this point no one can be using this transaction to modify any tree
187 * and no one can start another transaction to modify any tree either.
188 */
189 ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING,
190 "cur_trans->state=%d", cur_trans->state);
191
192 down_write(&fs_info->commit_root_sem);
193
194 if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
195 fs_info->last_reloc_trans = trans->transid;
196
197 list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
198 dirty_list) {
199 list_del_init(&root->dirty_list);
200 free_extent_buffer(root->commit_root);
201 root->commit_root = btrfs_root_node(root);
202 btrfs_extent_io_tree_release(&root->dirty_log_pages);
203 btrfs_qgroup_clean_swapped_blocks(root);
204 }
205
206 /* We can free old roots now. */
207 spin_lock(&cur_trans->dropped_roots_lock);
208 while (!list_empty(&cur_trans->dropped_roots)) {
209 root = list_first_entry(&cur_trans->dropped_roots,
210 struct btrfs_root, root_list);
211 list_del_init(&root->root_list);
212 spin_unlock(&cur_trans->dropped_roots_lock);
213 btrfs_free_log(trans, root);
214 btrfs_drop_and_free_fs_root(fs_info, root);
215 spin_lock(&cur_trans->dropped_roots_lock);
216 }
217 spin_unlock(&cur_trans->dropped_roots_lock);
218
219 up_write(&fs_info->commit_root_sem);
220 }
221
extwriter_counter_inc(struct btrfs_transaction * trans,unsigned int type)222 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
223 unsigned int type)
224 {
225 if (type & TRANS_EXTWRITERS)
226 atomic_inc(&trans->num_extwriters);
227 }
228
extwriter_counter_dec(struct btrfs_transaction * trans,unsigned int type)229 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
230 unsigned int type)
231 {
232 if (type & TRANS_EXTWRITERS)
233 atomic_dec(&trans->num_extwriters);
234 }
235
extwriter_counter_init(struct btrfs_transaction * trans,unsigned int type)236 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
237 unsigned int type)
238 {
239 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
240 }
241
extwriter_counter_read(struct btrfs_transaction * trans)242 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
243 {
244 return atomic_read(&trans->num_extwriters);
245 }
246
247 /*
248 * To be called after doing the chunk btree updates right after allocating a new
249 * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
250 * chunk after all chunk btree updates and after finishing the second phase of
251 * chunk allocation (btrfs_create_pending_block_groups()) in case some block
252 * group had its chunk item insertion delayed to the second phase.
253 */
btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle * trans)254 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
255 {
256 struct btrfs_fs_info *fs_info = trans->fs_info;
257
258 if (!trans->chunk_bytes_reserved)
259 return;
260
261 btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
262 trans->chunk_bytes_reserved, NULL);
263 trans->chunk_bytes_reserved = 0;
264 }
265
266 /*
267 * either allocate a new transaction or hop into the existing one
268 */
join_transaction(struct btrfs_fs_info * fs_info,unsigned int type)269 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
270 unsigned int type)
271 {
272 struct btrfs_transaction *cur_trans;
273
274 spin_lock(&fs_info->trans_lock);
275 loop:
276 /* The file system has been taken offline. No new transactions. */
277 if (BTRFS_FS_ERROR(fs_info)) {
278 spin_unlock(&fs_info->trans_lock);
279 return -EROFS;
280 }
281
282 cur_trans = fs_info->running_transaction;
283 if (cur_trans) {
284 if (TRANS_ABORTED(cur_trans)) {
285 const int abort_error = cur_trans->aborted;
286
287 spin_unlock(&fs_info->trans_lock);
288 return abort_error;
289 }
290 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
291 spin_unlock(&fs_info->trans_lock);
292 return -EBUSY;
293 }
294 refcount_inc(&cur_trans->use_count);
295 atomic_inc(&cur_trans->num_writers);
296 extwriter_counter_inc(cur_trans, type);
297 spin_unlock(&fs_info->trans_lock);
298 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
299 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
300 return 0;
301 }
302 spin_unlock(&fs_info->trans_lock);
303
304 /*
305 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
306 * current transaction, and commit it. If there is no transaction, just
307 * return ENOENT.
308 */
309 if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
310 return -ENOENT;
311
312 /*
313 * JOIN_NOLOCK only happens during the transaction commit, so
314 * it is impossible that ->running_transaction is NULL
315 */
316 BUG_ON(type == TRANS_JOIN_NOLOCK);
317
318 cur_trans = kmalloc_obj(*cur_trans, GFP_NOFS);
319 if (!cur_trans)
320 return -ENOMEM;
321
322 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
323 btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
324
325 spin_lock(&fs_info->trans_lock);
326 if (fs_info->running_transaction) {
327 /*
328 * someone started a transaction after we unlocked. Make sure
329 * to redo the checks above
330 */
331 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
332 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
333 kfree(cur_trans);
334 goto loop;
335 } else if (BTRFS_FS_ERROR(fs_info)) {
336 spin_unlock(&fs_info->trans_lock);
337 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
338 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
339 kfree(cur_trans);
340 return -EROFS;
341 }
342
343 cur_trans->fs_info = fs_info;
344 atomic_set(&cur_trans->pending_ordered, 0);
345 init_waitqueue_head(&cur_trans->pending_wait);
346 atomic_set(&cur_trans->num_writers, 1);
347 extwriter_counter_init(cur_trans, type);
348 init_waitqueue_head(&cur_trans->writer_wait);
349 init_waitqueue_head(&cur_trans->commit_wait);
350 cur_trans->state = TRANS_STATE_RUNNING;
351 /*
352 * One for this trans handle, one so it will live on until we
353 * commit the transaction.
354 */
355 refcount_set(&cur_trans->use_count, 2);
356 cur_trans->flags = 0;
357 cur_trans->start_time = ktime_get_seconds();
358
359 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
360
361 xa_init(&cur_trans->delayed_refs.head_refs);
362 xa_init(&cur_trans->delayed_refs.dirty_extents);
363
364 /*
365 * although the tree mod log is per file system and not per transaction,
366 * the log must never go across transaction boundaries.
367 */
368 smp_mb();
369 if (!list_empty(&fs_info->tree_mod_seq_list))
370 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
371 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
372 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
373 atomic64_set(&fs_info->tree_mod_seq, 0);
374
375 spin_lock_init(&cur_trans->delayed_refs.lock);
376
377 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
378 INIT_LIST_HEAD(&cur_trans->dev_update_list);
379 INIT_LIST_HEAD(&cur_trans->switch_commits);
380 INIT_LIST_HEAD(&cur_trans->dirty_bgs);
381 INIT_LIST_HEAD(&cur_trans->io_bgs);
382 INIT_LIST_HEAD(&cur_trans->dropped_roots);
383 mutex_init(&cur_trans->cache_write_mutex);
384 spin_lock_init(&cur_trans->dirty_bgs_lock);
385 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
386 spin_lock_init(&cur_trans->dropped_roots_lock);
387 list_add_tail(&cur_trans->list, &fs_info->trans_list);
388 btrfs_extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
389 IO_TREE_TRANS_DIRTY_PAGES);
390 btrfs_extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
391 IO_TREE_FS_PINNED_EXTENTS);
392 btrfs_set_fs_generation(fs_info, fs_info->generation + 1);
393 cur_trans->transid = fs_info->generation;
394 fs_info->running_transaction = cur_trans;
395 cur_trans->aborted = 0;
396 spin_unlock(&fs_info->trans_lock);
397
398 return 0;
399 }
400
401 /*
402 * This does all the record keeping required to make sure that a shareable root
403 * is properly recorded in a given transaction. This is required to make sure
404 * the old root from before we joined the transaction is deleted when the
405 * transaction commits.
406 */
record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root,bool force)407 static int record_root_in_trans(struct btrfs_trans_handle *trans,
408 struct btrfs_root *root,
409 bool force)
410 {
411 struct btrfs_fs_info *fs_info = root->fs_info;
412 int ret = 0;
413
414 if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
415 btrfs_get_root_last_trans(root) < trans->transid) || force) {
416 WARN_ON(!force && root->commit_root != root->node);
417
418 /*
419 * see below for IN_TRANS_SETUP usage rules
420 * we have the reloc mutex held now, so there
421 * is only one writer in this function
422 */
423 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
424
425 /* make sure readers find IN_TRANS_SETUP before
426 * they find our root->last_trans update
427 */
428 smp_wmb();
429
430 spin_lock(&fs_info->fs_roots_radix_lock);
431 if (btrfs_get_root_last_trans(root) == trans->transid && !force) {
432 spin_unlock(&fs_info->fs_roots_radix_lock);
433 return 0;
434 }
435 radix_tree_tag_set(&fs_info->fs_roots_radix,
436 (unsigned long)btrfs_root_id(root),
437 BTRFS_ROOT_TRANS_TAG);
438 spin_unlock(&fs_info->fs_roots_radix_lock);
439 btrfs_set_root_last_trans(root, trans->transid);
440
441 /* this is pretty tricky. We don't want to
442 * take the relocation lock in btrfs_record_root_in_trans
443 * unless we're really doing the first setup for this root in
444 * this transaction.
445 *
446 * Normally we'd use root->last_trans as a flag to decide
447 * if we want to take the expensive mutex.
448 *
449 * But, we have to set root->last_trans before we
450 * init the relocation root, otherwise, we trip over warnings
451 * in ctree.c. The solution used here is to flag ourselves
452 * with root IN_TRANS_SETUP. When this is 1, we're still
453 * fixing up the reloc trees and everyone must wait.
454 *
455 * When this is zero, they can trust root->last_trans and fly
456 * through btrfs_record_root_in_trans without having to take the
457 * lock. smp_wmb() makes sure that all the writes above are
458 * done before we pop in the zero below
459 */
460 ret = btrfs_init_reloc_root(trans, root);
461 smp_mb__before_atomic();
462 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
463 }
464 return ret;
465 }
466
467
btrfs_add_dropped_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)468 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
469 struct btrfs_root *root)
470 {
471 struct btrfs_fs_info *fs_info = root->fs_info;
472 struct btrfs_transaction *cur_trans = trans->transaction;
473
474 /* Add ourselves to the transaction dropped list */
475 spin_lock(&cur_trans->dropped_roots_lock);
476 list_add_tail(&root->root_list, &cur_trans->dropped_roots);
477 spin_unlock(&cur_trans->dropped_roots_lock);
478
479 /* Make sure we don't try to update the root at commit time */
480 spin_lock(&fs_info->fs_roots_radix_lock);
481 radix_tree_tag_clear(&fs_info->fs_roots_radix,
482 (unsigned long)btrfs_root_id(root),
483 BTRFS_ROOT_TRANS_TAG);
484 spin_unlock(&fs_info->fs_roots_radix_lock);
485 }
486
btrfs_record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root)487 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
488 struct btrfs_root *root)
489 {
490 struct btrfs_fs_info *fs_info = root->fs_info;
491 int ret;
492
493 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
494 return 0;
495
496 /*
497 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
498 * and barriers
499 */
500 smp_rmb();
501 if (btrfs_get_root_last_trans(root) == trans->transid &&
502 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
503 return 0;
504
505 mutex_lock(&fs_info->reloc_mutex);
506 ret = record_root_in_trans(trans, root, 0);
507 mutex_unlock(&fs_info->reloc_mutex);
508
509 return ret;
510 }
511
is_transaction_blocked(struct btrfs_transaction * trans)512 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
513 {
514 return (trans->state >= TRANS_STATE_COMMIT_START &&
515 trans->state < TRANS_STATE_UNBLOCKED &&
516 !TRANS_ABORTED(trans));
517 }
518
519 /* wait for commit against the current transaction to become unblocked
520 * when this is done, it is safe to start a new transaction, but the current
521 * transaction might not be fully on disk.
522 */
wait_current_trans(struct btrfs_fs_info * fs_info,unsigned int type)523 static void wait_current_trans(struct btrfs_fs_info *fs_info, unsigned int type)
524 {
525 struct btrfs_transaction *cur_trans;
526
527 spin_lock(&fs_info->trans_lock);
528 cur_trans = fs_info->running_transaction;
529 if (cur_trans && is_transaction_blocked(cur_trans) &&
530 (btrfs_blocked_trans_types[cur_trans->state] & type)) {
531 refcount_inc(&cur_trans->use_count);
532 spin_unlock(&fs_info->trans_lock);
533
534 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
535 wait_event(fs_info->transaction_wait,
536 cur_trans->state >= TRANS_STATE_UNBLOCKED ||
537 TRANS_ABORTED(cur_trans));
538 btrfs_put_transaction(cur_trans);
539 } else {
540 spin_unlock(&fs_info->trans_lock);
541 }
542 }
543
may_wait_transaction(struct btrfs_fs_info * fs_info,int type)544 static bool may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
545 {
546 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
547 return false;
548
549 if (type == TRANS_START)
550 return true;
551
552 return false;
553 }
554
need_reserve_reloc_root(struct btrfs_root * root)555 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
556 {
557 struct btrfs_fs_info *fs_info = root->fs_info;
558
559 if (!fs_info->reloc_ctl ||
560 !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
561 btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
562 root->reloc_root)
563 return false;
564
565 return true;
566 }
567
btrfs_reserve_trans_metadata(struct btrfs_fs_info * fs_info,enum btrfs_reserve_flush_enum flush,u64 num_bytes,u64 * delayed_refs_bytes)568 static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
569 enum btrfs_reserve_flush_enum flush,
570 u64 num_bytes,
571 u64 *delayed_refs_bytes)
572 {
573 struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info;
574 u64 bytes = num_bytes + *delayed_refs_bytes;
575 int ret;
576
577 /*
578 * We want to reserve all the bytes we may need all at once, so we only
579 * do 1 enospc flushing cycle per transaction start.
580 */
581 ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
582
583 /*
584 * If we are an emergency flush, which can steal from the global block
585 * reserve, then attempt to not reserve space for the delayed refs, as
586 * we will consume space for them from the global block reserve.
587 */
588 if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
589 bytes -= *delayed_refs_bytes;
590 *delayed_refs_bytes = 0;
591 ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
592 }
593
594 return ret;
595 }
596
597 static struct btrfs_trans_handle *
start_transaction(struct btrfs_root * root,unsigned int num_items,unsigned int type,enum btrfs_reserve_flush_enum flush,bool enforce_qgroups)598 start_transaction(struct btrfs_root *root, unsigned int num_items,
599 unsigned int type, enum btrfs_reserve_flush_enum flush,
600 bool enforce_qgroups)
601 {
602 struct btrfs_fs_info *fs_info = root->fs_info;
603 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
604 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
605 struct btrfs_trans_handle *h;
606 struct btrfs_transaction *cur_trans;
607 u64 num_bytes = 0;
608 u64 qgroup_reserved = 0;
609 u64 delayed_refs_bytes = 0;
610 bool reloc_reserved = false;
611 bool do_chunk_alloc = false;
612 int ret;
613
614 if (BTRFS_FS_ERROR(fs_info))
615 return ERR_PTR(-EROFS);
616
617 if (current->journal_info) {
618 WARN_ON(type & TRANS_EXTWRITERS);
619 h = current->journal_info;
620 refcount_inc(&h->use_count);
621 WARN_ON(refcount_read(&h->use_count) > 2);
622 h->orig_rsv = h->block_rsv;
623 h->block_rsv = NULL;
624 goto got_it;
625 }
626
627 /*
628 * Do the reservation before we join the transaction so we can do all
629 * the appropriate flushing if need be.
630 */
631 if (num_items && root != fs_info->chunk_root) {
632 qgroup_reserved = num_items * fs_info->nodesize;
633 /*
634 * Use prealloc for now, as there might be a currently running
635 * transaction that could free this reserved space prematurely
636 * by committing.
637 */
638 ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved,
639 enforce_qgroups, false);
640 if (ret)
641 return ERR_PTR(ret);
642
643 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
644 /*
645 * If we plan to insert/update/delete "num_items" from a btree,
646 * we will also generate delayed refs for extent buffers in the
647 * respective btree paths, so reserve space for the delayed refs
648 * that will be generated by the caller as it modifies btrees.
649 * Try to reserve them to avoid excessive use of the global
650 * block reserve.
651 */
652 delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items);
653
654 /*
655 * Do the reservation for the relocation root creation
656 */
657 if (need_reserve_reloc_root(root)) {
658 num_bytes += fs_info->nodesize;
659 reloc_reserved = true;
660 }
661
662 ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes,
663 &delayed_refs_bytes);
664 if (ret)
665 goto reserve_fail;
666
667 btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true);
668
669 if (trans_rsv->space_info->force_alloc)
670 do_chunk_alloc = true;
671 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
672 !btrfs_block_rsv_full(delayed_refs_rsv)) {
673 /*
674 * Some people call with btrfs_start_transaction(root, 0)
675 * because they can be throttled, but have some other mechanism
676 * for reserving space. We still want these guys to refill the
677 * delayed block_rsv so just add 1 items worth of reservation
678 * here.
679 */
680 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
681 if (ret)
682 goto reserve_fail;
683 }
684 again:
685 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
686 if (!h) {
687 ret = -ENOMEM;
688 goto alloc_fail;
689 }
690
691 /*
692 * If we are JOIN_NOLOCK we're already committing a transaction and
693 * waiting on this guy, so we don't need to do the sb_start_intwrite
694 * because we're already holding a ref. We need this because we could
695 * have raced in and did an fsync() on a file which can kick a commit
696 * and then we deadlock with somebody doing a freeze.
697 *
698 * If we are ATTACH, it means we just want to catch the current
699 * transaction and commit it, so we needn't do sb_start_intwrite().
700 */
701 if (type & __TRANS_FREEZABLE)
702 sb_start_intwrite(fs_info->sb);
703
704 if (may_wait_transaction(fs_info, type))
705 wait_current_trans(fs_info, type);
706
707 do {
708 ret = join_transaction(fs_info, type);
709 if (ret == -EBUSY) {
710 wait_current_trans(fs_info, type);
711 if (unlikely(type == TRANS_ATTACH ||
712 type == TRANS_JOIN_NOSTART))
713 ret = -ENOENT;
714 }
715 } while (ret == -EBUSY);
716
717 if (ret < 0)
718 goto join_fail;
719
720 cur_trans = fs_info->running_transaction;
721
722 h->transid = cur_trans->transid;
723 h->transaction = cur_trans;
724 refcount_set(&h->use_count, 1);
725 h->fs_info = root->fs_info;
726
727 h->type = type;
728 INIT_LIST_HEAD(&h->new_bgs);
729 btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELREFS);
730
731 smp_mb();
732 if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
733 may_wait_transaction(fs_info, type)) {
734 current->journal_info = h;
735 btrfs_commit_transaction(h);
736 goto again;
737 }
738
739 if (num_bytes) {
740 trace_btrfs_space_reservation(fs_info, "transaction",
741 h->transid, num_bytes, 1);
742 h->block_rsv = trans_rsv;
743 h->bytes_reserved = num_bytes;
744 if (delayed_refs_bytes > 0) {
745 trace_btrfs_space_reservation(fs_info,
746 "local_delayed_refs_rsv",
747 h->transid,
748 delayed_refs_bytes, 1);
749 h->delayed_refs_bytes_reserved = delayed_refs_bytes;
750 btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true);
751 delayed_refs_bytes = 0;
752 }
753 h->reloc_reserved = reloc_reserved;
754 }
755
756 got_it:
757 if (!current->journal_info)
758 current->journal_info = h;
759
760 /*
761 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
762 * ALLOC_FORCE the first run through, and then we won't allocate for
763 * anybody else who races in later. We don't care about the return
764 * value here.
765 */
766 if (do_chunk_alloc && num_bytes) {
767 struct btrfs_space_info *space_info = h->block_rsv->space_info;
768 u64 flags = space_info->flags;
769
770 btrfs_chunk_alloc(h, space_info, btrfs_get_alloc_profile(fs_info, flags),
771 CHUNK_ALLOC_NO_FORCE);
772 }
773
774 /*
775 * btrfs_record_root_in_trans() needs to alloc new extents, and may
776 * call btrfs_join_transaction() while we're also starting a
777 * transaction.
778 *
779 * Thus it need to be called after current->journal_info initialized,
780 * or we can deadlock.
781 */
782 ret = btrfs_record_root_in_trans(h, root);
783 if (ret) {
784 /*
785 * The transaction handle is fully initialized and linked with
786 * other structures so it needs to be ended in case of errors,
787 * not just freed.
788 */
789 btrfs_end_transaction(h);
790 goto reserve_fail;
791 }
792 /*
793 * Now that we have found a transaction to be a part of, convert the
794 * qgroup reservation from prealloc to pertrans. A different transaction
795 * can't race in and free our pertrans out from under us.
796 */
797 if (qgroup_reserved)
798 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
799
800 return h;
801
802 join_fail:
803 if (type & __TRANS_FREEZABLE)
804 sb_end_intwrite(fs_info->sb);
805 kmem_cache_free(btrfs_trans_handle_cachep, h);
806 alloc_fail:
807 if (num_bytes)
808 btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL);
809 if (delayed_refs_bytes)
810 btrfs_space_info_free_bytes_may_use(trans_rsv->space_info, delayed_refs_bytes);
811 reserve_fail:
812 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
813 return ERR_PTR(ret);
814 }
815
btrfs_start_transaction(struct btrfs_root * root,unsigned int num_items)816 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
817 unsigned int num_items)
818 {
819 return start_transaction(root, num_items, TRANS_START,
820 BTRFS_RESERVE_FLUSH_ALL, true);
821 }
822
btrfs_start_transaction_fallback_global_rsv(struct btrfs_root * root,unsigned int num_items)823 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
824 struct btrfs_root *root,
825 unsigned int num_items)
826 {
827 return start_transaction(root, num_items, TRANS_START,
828 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
829 }
830
btrfs_join_transaction(struct btrfs_root * root)831 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
832 {
833 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
834 true);
835 }
836
btrfs_join_transaction_spacecache(struct btrfs_root * root)837 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
838 {
839 return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
840 BTRFS_RESERVE_NO_FLUSH, true);
841 }
842
843 /*
844 * Similar to regular join but it never starts a transaction when none is
845 * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED.
846 * This is similar to btrfs_attach_transaction() but it allows the join to
847 * happen if the transaction commit already started but it's not yet in the
848 * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING).
849 */
btrfs_join_transaction_nostart(struct btrfs_root * root)850 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
851 {
852 return start_transaction(root, 0, TRANS_JOIN_NOSTART,
853 BTRFS_RESERVE_NO_FLUSH, true);
854 }
855
856 /*
857 * Catch the running transaction.
858 *
859 * It is used when we want to commit the current the transaction, but
860 * don't want to start a new one.
861 *
862 * Note: If this function return -ENOENT, it just means there is no
863 * running transaction. But it is possible that the inactive transaction
864 * is still in the memory, not fully on disk. If you hope there is no
865 * inactive transaction in the fs when -ENOENT is returned, you should
866 * invoke
867 * btrfs_attach_transaction_barrier()
868 */
btrfs_attach_transaction(struct btrfs_root * root)869 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
870 {
871 return start_transaction(root, 0, TRANS_ATTACH,
872 BTRFS_RESERVE_NO_FLUSH, true);
873 }
874
875 /*
876 * Catch the running transaction.
877 *
878 * It is similar to the above function, the difference is this one
879 * will wait for all the inactive transactions until they fully
880 * complete.
881 */
882 struct btrfs_trans_handle *
btrfs_attach_transaction_barrier(struct btrfs_root * root)883 btrfs_attach_transaction_barrier(struct btrfs_root *root)
884 {
885 struct btrfs_trans_handle *trans;
886
887 trans = start_transaction(root, 0, TRANS_ATTACH,
888 BTRFS_RESERVE_NO_FLUSH, true);
889 if (trans == ERR_PTR(-ENOENT)) {
890 int ret;
891
892 ret = btrfs_wait_for_commit(root->fs_info, 0);
893 if (ret)
894 return ERR_PTR(ret);
895 }
896
897 return trans;
898 }
899
900 /* Wait for a transaction commit to reach at least the given state. */
wait_for_commit(struct btrfs_transaction * commit,const enum btrfs_trans_state min_state)901 static noinline void wait_for_commit(struct btrfs_transaction *commit,
902 const enum btrfs_trans_state min_state)
903 {
904 struct btrfs_fs_info *fs_info = commit->fs_info;
905 u64 transid = commit->transid;
906 bool put = false;
907
908 /*
909 * At the moment this function is called with min_state either being
910 * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED.
911 */
912 if (min_state == TRANS_STATE_COMPLETED)
913 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
914 else
915 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
916
917 while (1) {
918 wait_event(commit->commit_wait, commit->state >= min_state);
919 if (put)
920 btrfs_put_transaction(commit);
921
922 if (min_state < TRANS_STATE_COMPLETED)
923 break;
924
925 /*
926 * A transaction isn't really completed until all of the
927 * previous transactions are completed, but with fsync we can
928 * end up with SUPER_COMMITTED transactions before a COMPLETED
929 * transaction. Wait for those.
930 */
931
932 spin_lock(&fs_info->trans_lock);
933 commit = list_first_entry_or_null(&fs_info->trans_list,
934 struct btrfs_transaction,
935 list);
936 if (!commit || commit->transid > transid) {
937 spin_unlock(&fs_info->trans_lock);
938 break;
939 }
940 refcount_inc(&commit->use_count);
941 put = true;
942 spin_unlock(&fs_info->trans_lock);
943 }
944 }
945
btrfs_wait_for_commit(struct btrfs_fs_info * fs_info,u64 transid)946 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
947 {
948 struct btrfs_transaction *cur_trans = NULL, *t;
949 int ret = 0;
950
951 if (transid) {
952 if (transid <= btrfs_get_last_trans_committed(fs_info))
953 return 0;
954
955 /* find specified transaction */
956 spin_lock(&fs_info->trans_lock);
957 list_for_each_entry(t, &fs_info->trans_list, list) {
958 if (t->transid == transid) {
959 cur_trans = t;
960 refcount_inc(&cur_trans->use_count);
961 ret = 0;
962 break;
963 }
964 if (t->transid > transid) {
965 ret = 0;
966 break;
967 }
968 }
969 spin_unlock(&fs_info->trans_lock);
970
971 /*
972 * The specified transaction doesn't exist, or we
973 * raced with btrfs_commit_transaction
974 */
975 if (!cur_trans) {
976 if (transid > btrfs_get_last_trans_committed(fs_info))
977 ret = -EINVAL;
978 return ret;
979 }
980 } else {
981 /* find newest transaction that is committing | committed */
982 spin_lock(&fs_info->trans_lock);
983 list_for_each_entry_reverse(t, &fs_info->trans_list,
984 list) {
985 if (t->state >= TRANS_STATE_COMMIT_START) {
986 if (t->state == TRANS_STATE_COMPLETED)
987 break;
988 cur_trans = t;
989 refcount_inc(&cur_trans->use_count);
990 break;
991 }
992 }
993 spin_unlock(&fs_info->trans_lock);
994 /* Nothing committing or committed. */
995 if (!cur_trans)
996 return ret;
997 }
998
999 wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
1000 ret = cur_trans->aborted;
1001 btrfs_put_transaction(cur_trans);
1002
1003 return ret;
1004 }
1005
btrfs_throttle(struct btrfs_fs_info * fs_info)1006 void btrfs_throttle(struct btrfs_fs_info *fs_info)
1007 {
1008 wait_current_trans(fs_info, TRANS_START);
1009 }
1010
btrfs_should_end_transaction(struct btrfs_trans_handle * trans)1011 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
1012 {
1013 struct btrfs_transaction *cur_trans = trans->transaction;
1014
1015 if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
1016 test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
1017 return true;
1018
1019 if (btrfs_check_space_for_delayed_refs(trans->fs_info))
1020 return true;
1021
1022 return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50);
1023 }
1024
btrfs_trans_release_metadata(struct btrfs_trans_handle * trans)1025 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
1026
1027 {
1028 struct btrfs_fs_info *fs_info = trans->fs_info;
1029
1030 if (!trans->block_rsv) {
1031 ASSERT(trans->bytes_reserved == 0,
1032 "trans->bytes_reserved=%llu", trans->bytes_reserved);
1033 ASSERT(trans->delayed_refs_bytes_reserved == 0,
1034 "trans->delayed_refs_bytes_reserved=%llu",
1035 trans->delayed_refs_bytes_reserved);
1036 return;
1037 }
1038
1039 if (!trans->bytes_reserved) {
1040 ASSERT(trans->delayed_refs_bytes_reserved == 0,
1041 "trans->delayed_refs_bytes_reserved=%llu",
1042 trans->delayed_refs_bytes_reserved);
1043 return;
1044 }
1045
1046 ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
1047 trace_btrfs_space_reservation(fs_info, "transaction",
1048 trans->transid, trans->bytes_reserved, 0);
1049 btrfs_block_rsv_release(fs_info, trans->block_rsv,
1050 trans->bytes_reserved, NULL);
1051 trans->bytes_reserved = 0;
1052
1053 if (!trans->delayed_refs_bytes_reserved)
1054 return;
1055
1056 trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv",
1057 trans->transid,
1058 trans->delayed_refs_bytes_reserved, 0);
1059 btrfs_block_rsv_release(fs_info, &trans->delayed_rsv,
1060 trans->delayed_refs_bytes_reserved, NULL);
1061 trans->delayed_refs_bytes_reserved = 0;
1062 }
1063
__btrfs_end_transaction(struct btrfs_trans_handle * trans,int throttle)1064 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
1065 int throttle)
1066 {
1067 struct btrfs_fs_info *info = trans->fs_info;
1068 struct btrfs_transaction *cur_trans = trans->transaction;
1069 int ret = 0;
1070
1071 if (refcount_read(&trans->use_count) > 1) {
1072 refcount_dec(&trans->use_count);
1073 trans->block_rsv = trans->orig_rsv;
1074 return 0;
1075 }
1076
1077 btrfs_trans_release_metadata(trans);
1078 trans->block_rsv = NULL;
1079
1080 btrfs_create_pending_block_groups(trans);
1081
1082 btrfs_trans_release_chunk_metadata(trans);
1083
1084 if (trans->type & __TRANS_FREEZABLE)
1085 sb_end_intwrite(info->sb);
1086
1087 WARN_ON(cur_trans != info->running_transaction);
1088 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
1089 atomic_dec(&cur_trans->num_writers);
1090 extwriter_counter_dec(cur_trans, trans->type);
1091
1092 cond_wake_up(&cur_trans->writer_wait);
1093
1094 btrfs_lockdep_release(info, btrfs_trans_num_extwriters);
1095 btrfs_lockdep_release(info, btrfs_trans_num_writers);
1096
1097 btrfs_put_transaction(cur_trans);
1098
1099 if (current->journal_info == trans)
1100 current->journal_info = NULL;
1101
1102 if (throttle)
1103 btrfs_run_delayed_iputs(info);
1104
1105 if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) {
1106 wake_up_process(info->transaction_kthread);
1107 if (TRANS_ABORTED(trans))
1108 ret = trans->aborted;
1109 else
1110 ret = -EROFS;
1111 }
1112
1113 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1114 return ret;
1115 }
1116
btrfs_end_transaction(struct btrfs_trans_handle * trans)1117 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
1118 {
1119 return __btrfs_end_transaction(trans, 0);
1120 }
1121
btrfs_end_transaction_throttle(struct btrfs_trans_handle * trans)1122 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
1123 {
1124 return __btrfs_end_transaction(trans, 1);
1125 }
1126
1127 /*
1128 * when btree blocks are allocated, they have some corresponding bits set for
1129 * them in one of two extent_io trees. This is used to make sure all of
1130 * those extents are sent to disk but does not wait on them
1131 */
btrfs_write_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)1132 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
1133 struct extent_io_tree *dirty_pages, int mark)
1134 {
1135 int ret = 0;
1136 struct address_space *mapping = fs_info->btree_inode->i_mapping;
1137 struct extent_state *cached_state = NULL;
1138 u64 start = 0;
1139 u64 end;
1140
1141 while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
1142 mark, &cached_state)) {
1143 bool wait_writeback = false;
1144
1145 ret = btrfs_convert_extent_bit(dirty_pages, start, end,
1146 EXTENT_NEED_WAIT,
1147 mark, &cached_state);
1148 /*
1149 * convert_extent_bit can return -ENOMEM, which is most of the
1150 * time a temporary error. So when it happens, ignore the error
1151 * and wait for writeback of this range to finish - because we
1152 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1153 * to __btrfs_wait_marked_extents() would not know that
1154 * writeback for this range started and therefore wouldn't
1155 * wait for it to finish - we don't want to commit a
1156 * superblock that points to btree nodes/leafs for which
1157 * writeback hasn't finished yet (and without errors).
1158 * We cleanup any entries left in the io tree when committing
1159 * the transaction (through extent_io_tree_release()).
1160 */
1161 if (ret == -ENOMEM) {
1162 ret = 0;
1163 wait_writeback = true;
1164 }
1165 if (!ret)
1166 ret = filemap_fdatawrite_range(mapping, start, end);
1167 if (!ret && wait_writeback)
1168 btrfs_btree_wait_writeback_range(fs_info, start, end);
1169 btrfs_free_extent_state(cached_state);
1170 if (ret)
1171 break;
1172 cached_state = NULL;
1173 cond_resched();
1174 start = end + 1;
1175 }
1176 return ret;
1177 }
1178
1179 /*
1180 * when btree blocks are allocated, they have some corresponding bits set for
1181 * them in one of two extent_io trees. This is used to make sure all of
1182 * those extents are on disk for transaction or log commit. We wait
1183 * on all the pages and clear them from the dirty pages state tree
1184 */
__btrfs_wait_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)1185 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1186 struct extent_io_tree *dirty_pages)
1187 {
1188 struct extent_state *cached_state = NULL;
1189 u64 start = 0;
1190 u64 end;
1191 int ret = 0;
1192
1193 while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
1194 EXTENT_NEED_WAIT, &cached_state)) {
1195 /*
1196 * Ignore -ENOMEM errors returned by clear_extent_bit().
1197 * When committing the transaction, we'll remove any entries
1198 * left in the io tree. For a log commit, we don't remove them
1199 * after committing the log because the tree can be accessed
1200 * concurrently - we do it only at transaction commit time when
1201 * it's safe to do it (through extent_io_tree_release()).
1202 */
1203 ret = btrfs_clear_extent_bit(dirty_pages, start, end,
1204 EXTENT_NEED_WAIT, &cached_state);
1205 if (ret == -ENOMEM)
1206 ret = 0;
1207 if (!ret)
1208 btrfs_btree_wait_writeback_range(fs_info, start, end);
1209 btrfs_free_extent_state(cached_state);
1210 if (ret)
1211 break;
1212 cached_state = NULL;
1213 cond_resched();
1214 start = end + 1;
1215 }
1216 return ret;
1217 }
1218
btrfs_wait_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)1219 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1220 struct extent_io_tree *dirty_pages)
1221 {
1222 bool errors = false;
1223 int ret;
1224
1225 ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1226 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1227 errors = true;
1228
1229 if (errors && !ret)
1230 ret = -EIO;
1231 return ret;
1232 }
1233
btrfs_wait_tree_log_extents(struct btrfs_root * log_root,int mark)1234 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1235 {
1236 struct btrfs_fs_info *fs_info = log_root->fs_info;
1237 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1238 bool errors = false;
1239 int ret;
1240
1241 ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID,
1242 "root_id(log_root)=%llu", btrfs_root_id(log_root));
1243
1244 ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1245 if ((mark & EXTENT_DIRTY_LOG1) &&
1246 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1247 errors = true;
1248
1249 if ((mark & EXTENT_DIRTY_LOG2) &&
1250 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1251 errors = true;
1252
1253 if (errors && !ret)
1254 ret = -EIO;
1255 return ret;
1256 }
1257
1258 /*
1259 * When btree blocks are allocated the corresponding extents are marked dirty.
1260 * This function ensures such extents are persisted on disk for transaction or
1261 * log commit.
1262 *
1263 * @trans: transaction whose dirty pages we'd like to write
1264 */
btrfs_write_and_wait_transaction(struct btrfs_trans_handle * trans)1265 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1266 {
1267 int ret;
1268 int ret2;
1269 struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1270 struct btrfs_fs_info *fs_info = trans->fs_info;
1271 struct blk_plug plug;
1272
1273 blk_start_plug(&plug);
1274 ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1275 blk_finish_plug(&plug);
1276 ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1277
1278 btrfs_extent_io_tree_release(&trans->transaction->dirty_pages);
1279
1280 if (ret)
1281 return ret;
1282 else if (ret2)
1283 return ret2;
1284 else
1285 return 0;
1286 }
1287
1288 /*
1289 * this is used to update the root pointer in the tree of tree roots.
1290 *
1291 * But, in the case of the extent allocation tree, updating the root
1292 * pointer may allocate blocks which may change the root of the extent
1293 * allocation tree.
1294 *
1295 * So, this loops and repeats and makes sure the cowonly root didn't
1296 * change while the root pointer was being updated in the metadata.
1297 */
update_cowonly_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)1298 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1299 struct btrfs_root *root)
1300 {
1301 int ret;
1302 u64 old_root_bytenr;
1303 u64 old_root_used;
1304 struct btrfs_fs_info *fs_info = root->fs_info;
1305 struct btrfs_root *tree_root = fs_info->tree_root;
1306
1307 old_root_used = btrfs_root_used(&root->root_item);
1308
1309 while (1) {
1310 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1311 if (old_root_bytenr == root->node->start &&
1312 old_root_used == btrfs_root_used(&root->root_item))
1313 break;
1314
1315 btrfs_set_root_node(&root->root_item, root->node);
1316 ret = btrfs_update_root(trans, tree_root,
1317 &root->root_key,
1318 &root->root_item);
1319 if (ret)
1320 return ret;
1321
1322 old_root_used = btrfs_root_used(&root->root_item);
1323 }
1324
1325 return 0;
1326 }
1327
1328 /*
1329 * update all the cowonly tree roots on disk
1330 *
1331 * The error handling in this function may not be obvious. Any of the
1332 * failures will cause the file system to go offline. We still need
1333 * to clean up the delayed refs.
1334 */
commit_cowonly_roots(struct btrfs_trans_handle * trans)1335 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1336 {
1337 struct btrfs_fs_info *fs_info = trans->fs_info;
1338 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1339 struct list_head *io_bgs = &trans->transaction->io_bgs;
1340 struct extent_buffer *eb;
1341 int ret;
1342
1343 /*
1344 * At this point no one can be using this transaction to modify any tree
1345 * and no one can start another transaction to modify any tree either.
1346 */
1347 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
1348 "trans->transaction->state=%d", trans->transaction->state);
1349
1350 eb = btrfs_lock_root_node(fs_info->tree_root);
1351 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1352 0, &eb, BTRFS_NESTING_COW);
1353 btrfs_tree_unlock(eb);
1354 free_extent_buffer(eb);
1355
1356 if (ret)
1357 return ret;
1358
1359 ret = btrfs_run_dev_stats(trans);
1360 if (ret)
1361 return ret;
1362 ret = btrfs_run_dev_replace(trans);
1363 if (ret)
1364 return ret;
1365 ret = btrfs_run_qgroups(trans);
1366 if (ret)
1367 return ret;
1368
1369 ret = btrfs_setup_space_cache(trans);
1370 if (ret)
1371 return ret;
1372
1373 again:
1374 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1375 struct btrfs_root *root;
1376
1377 root = list_first_entry(&fs_info->dirty_cowonly_roots,
1378 struct btrfs_root, dirty_list);
1379 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1380 list_move_tail(&root->dirty_list,
1381 &trans->transaction->switch_commits);
1382
1383 ret = update_cowonly_root(trans, root);
1384 if (ret)
1385 return ret;
1386 }
1387
1388 /* Now flush any delayed refs generated by updating all of the roots */
1389 ret = btrfs_run_delayed_refs(trans, U64_MAX);
1390 if (ret)
1391 return ret;
1392
1393 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1394 ret = btrfs_write_dirty_block_groups(trans);
1395 if (ret)
1396 return ret;
1397
1398 /*
1399 * We're writing the dirty block groups, which could generate
1400 * delayed refs, which could generate more dirty block groups,
1401 * so we want to keep this flushing in this loop to make sure
1402 * everything gets run.
1403 */
1404 ret = btrfs_run_delayed_refs(trans, U64_MAX);
1405 if (ret)
1406 return ret;
1407 }
1408
1409 if (!list_empty(&fs_info->dirty_cowonly_roots))
1410 goto again;
1411
1412 /* Update dev-replace pointer once everything is committed */
1413 fs_info->dev_replace.committed_cursor_left =
1414 fs_info->dev_replace.cursor_left_last_write_of_item;
1415
1416 return 0;
1417 }
1418
1419 /*
1420 * If we had a pending drop we need to see if there are any others left in our
1421 * dead roots list, and if not clear our bit and wake any waiters.
1422 */
btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info * fs_info)1423 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1424 {
1425 /*
1426 * We put the drop in progress roots at the front of the list, so if the
1427 * first entry doesn't have UNFINISHED_DROP set we can wake everybody
1428 * up.
1429 */
1430 spin_lock(&fs_info->trans_lock);
1431 if (!list_empty(&fs_info->dead_roots)) {
1432 struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
1433 struct btrfs_root,
1434 root_list);
1435 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
1436 spin_unlock(&fs_info->trans_lock);
1437 return;
1438 }
1439 }
1440 spin_unlock(&fs_info->trans_lock);
1441
1442 btrfs_wake_unfinished_drop(fs_info);
1443 }
1444
1445 /*
1446 * dead roots are old snapshots that need to be deleted. This allocates
1447 * a dirty root struct and adds it into the list of dead roots that need to
1448 * be deleted
1449 */
btrfs_add_dead_root(struct btrfs_root * root)1450 void btrfs_add_dead_root(struct btrfs_root *root)
1451 {
1452 struct btrfs_fs_info *fs_info = root->fs_info;
1453
1454 spin_lock(&fs_info->trans_lock);
1455 if (list_empty(&root->root_list)) {
1456 btrfs_grab_root(root);
1457
1458 /* We want to process the partially complete drops first. */
1459 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
1460 list_add(&root->root_list, &fs_info->dead_roots);
1461 else
1462 list_add_tail(&root->root_list, &fs_info->dead_roots);
1463 }
1464 spin_unlock(&fs_info->trans_lock);
1465 }
1466
1467 /*
1468 * Update each subvolume root and its relocation root, if it exists, in the tree
1469 * of tree roots. Also free log roots if they exist.
1470 */
commit_fs_roots(struct btrfs_trans_handle * trans)1471 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1472 {
1473 struct btrfs_fs_info *fs_info = trans->fs_info;
1474 struct btrfs_root *gang[8];
1475 int i;
1476 int ret;
1477
1478 /*
1479 * At this point no one can be using this transaction to modify any tree
1480 * and no one can start another transaction to modify any tree either.
1481 */
1482 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
1483 "trans->transaction->state=%d", trans->transaction->state);
1484
1485 spin_lock(&fs_info->fs_roots_radix_lock);
1486 while (1) {
1487 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1488 (void **)gang, 0,
1489 ARRAY_SIZE(gang),
1490 BTRFS_ROOT_TRANS_TAG);
1491 if (ret == 0)
1492 break;
1493 for (i = 0; i < ret; i++) {
1494 struct btrfs_root *root = gang[i];
1495 int ret2;
1496
1497 /*
1498 * At this point we can neither have tasks logging inodes
1499 * from a root nor trying to commit a log tree.
1500 */
1501 ASSERT(atomic_read(&root->log_writers) == 0,
1502 "atomic_read(&root->log_writers)=%d",
1503 atomic_read(&root->log_writers));
1504 ASSERT(atomic_read(&root->log_commit[0]) == 0,
1505 "atomic_read(&root->log_commit[0])=%d",
1506 atomic_read(&root->log_commit[0]));
1507 ASSERT(atomic_read(&root->log_commit[1]) == 0,
1508 "atomic_read(&root->log_commit[1])=%d",
1509 atomic_read(&root->log_commit[1]));
1510
1511 radix_tree_tag_clear(&fs_info->fs_roots_radix,
1512 (unsigned long)btrfs_root_id(root),
1513 BTRFS_ROOT_TRANS_TAG);
1514 btrfs_qgroup_free_meta_all_pertrans(root);
1515 spin_unlock(&fs_info->fs_roots_radix_lock);
1516
1517 btrfs_free_log(trans, root);
1518 ret2 = btrfs_update_reloc_root(trans, root);
1519 if (unlikely(ret2))
1520 return ret2;
1521
1522 /* see comments in should_cow_block() */
1523 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1524 smp_mb__after_atomic();
1525
1526 if (root->commit_root != root->node) {
1527 list_add_tail(&root->dirty_list,
1528 &trans->transaction->switch_commits);
1529 btrfs_set_root_node(&root->root_item,
1530 root->node);
1531 }
1532
1533 ret2 = btrfs_update_root(trans, fs_info->tree_root,
1534 &root->root_key,
1535 &root->root_item);
1536 if (unlikely(ret2))
1537 return ret2;
1538 spin_lock(&fs_info->fs_roots_radix_lock);
1539 }
1540 }
1541 spin_unlock(&fs_info->fs_roots_radix_lock);
1542 return 0;
1543 }
1544
1545 /*
1546 * Do all special snapshot related qgroup dirty hack.
1547 *
1548 * Will do all needed qgroup inherit and dirty hack like switch commit
1549 * roots inside one transaction and write all btree into disk, to make
1550 * qgroup works.
1551 */
qgroup_account_snapshot(struct btrfs_trans_handle * trans,struct btrfs_root * src,struct btrfs_root * parent,struct btrfs_qgroup_inherit * inherit,u64 dst_objectid)1552 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1553 struct btrfs_root *src,
1554 struct btrfs_root *parent,
1555 struct btrfs_qgroup_inherit *inherit,
1556 u64 dst_objectid)
1557 {
1558 struct btrfs_fs_info *fs_info = src->fs_info;
1559 int ret;
1560
1561 /*
1562 * Save some performance in the case that qgroups are not enabled. If
1563 * this check races with the ioctl, rescan will kick in anyway.
1564 */
1565 if (!btrfs_qgroup_full_accounting(fs_info))
1566 return 0;
1567
1568 /*
1569 * Ensure dirty @src will be committed. Or, after coming
1570 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1571 * recorded root will never be updated again, causing an outdated root
1572 * item.
1573 */
1574 ret = record_root_in_trans(trans, src, 1);
1575 if (ret)
1576 return ret;
1577
1578 /*
1579 * btrfs_qgroup_inherit relies on a consistent view of the usage for the
1580 * src root, so we must run the delayed refs here.
1581 *
1582 * However this isn't particularly fool proof, because there's no
1583 * synchronization keeping us from changing the tree after this point
1584 * before we do the qgroup_inherit, or even from making changes while
1585 * we're doing the qgroup_inherit. But that's a problem for the future,
1586 * for now flush the delayed refs to narrow the race window where the
1587 * qgroup counters could end up wrong.
1588 */
1589 ret = btrfs_run_delayed_refs(trans, U64_MAX);
1590 if (unlikely(ret)) {
1591 btrfs_abort_transaction(trans, ret);
1592 return ret;
1593 }
1594
1595 ret = commit_fs_roots(trans);
1596 if (ret)
1597 goto out;
1598 ret = btrfs_qgroup_account_extents(trans);
1599 if (ret < 0)
1600 goto out;
1601
1602 /* Now qgroup are all updated, we can inherit it to new qgroups */
1603 ret = btrfs_qgroup_inherit(trans, btrfs_root_id(src), dst_objectid,
1604 btrfs_root_id(parent), inherit);
1605 if (ret < 0)
1606 goto out;
1607
1608 /*
1609 * Now we do a simplified commit transaction, which will:
1610 * 1) commit all subvolume and extent tree
1611 * To ensure all subvolume and extent tree have a valid
1612 * commit_root to accounting later insert_dir_item()
1613 * 2) write all btree blocks onto disk
1614 * This is to make sure later btree modification will be cowed
1615 * Or commit_root can be populated and cause wrong qgroup numbers
1616 * In this simplified commit, we don't really care about other trees
1617 * like chunk and root tree, as they won't affect qgroup.
1618 * And we don't write super to avoid half committed status.
1619 */
1620 ret = commit_cowonly_roots(trans);
1621 if (ret)
1622 goto out;
1623 switch_commit_roots(trans);
1624 ret = btrfs_write_and_wait_transaction(trans);
1625 if (unlikely(ret))
1626 btrfs_err(fs_info,
1627 "error while writing out transaction during qgroup snapshot accounting: %d", ret);
1628
1629 out:
1630 /*
1631 * Force parent root to be updated, as we recorded it before so its
1632 * last_trans == cur_transid.
1633 * Or it won't be committed again onto disk after later
1634 * insert_dir_item()
1635 */
1636 if (!ret)
1637 ret = record_root_in_trans(trans, parent, 1);
1638 return ret;
1639 }
1640
1641 /*
1642 * new snapshots need to be created at a very specific time in the
1643 * transaction commit. This does the actual creation.
1644 *
1645 * Note:
1646 * If the error which may affect the commitment of the current transaction
1647 * happens, we should return the error number. If the error which just affect
1648 * the creation of the pending snapshots, just return 0.
1649 */
create_pending_snapshot(struct btrfs_trans_handle * trans,struct btrfs_pending_snapshot * pending)1650 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1651 struct btrfs_pending_snapshot *pending)
1652 {
1653
1654 struct btrfs_fs_info *fs_info = trans->fs_info;
1655 struct btrfs_key key;
1656 struct btrfs_root_item *new_root_item;
1657 struct btrfs_root *tree_root = fs_info->tree_root;
1658 struct btrfs_root *root = pending->root;
1659 struct btrfs_root *parent_root;
1660 struct btrfs_block_rsv *rsv;
1661 struct btrfs_inode *parent_inode = pending->dir;
1662 BTRFS_PATH_AUTO_FREE(path);
1663 struct btrfs_dir_item *dir_item;
1664 struct extent_buffer *tmp;
1665 struct extent_buffer *old;
1666 struct timespec64 cur_time;
1667 int ret = 0;
1668 u64 to_reserve = 0;
1669 u64 index = 0;
1670 u64 objectid;
1671 u64 root_flags;
1672 unsigned int nofs_flags;
1673 struct fscrypt_name fname;
1674
1675 ASSERT(pending->path);
1676 path = pending->path;
1677
1678 ASSERT(pending->root_item);
1679 new_root_item = pending->root_item;
1680
1681 /*
1682 * We're inside a transaction and must make sure that any potential
1683 * allocations with GFP_KERNEL in fscrypt won't recurse back to
1684 * filesystem.
1685 */
1686 nofs_flags = memalloc_nofs_save();
1687 pending->error = fscrypt_setup_filename(&parent_inode->vfs_inode,
1688 &pending->dentry->d_name, 0,
1689 &fname);
1690 memalloc_nofs_restore(nofs_flags);
1691 if (unlikely(pending->error))
1692 goto free_pending;
1693
1694 pending->error = btrfs_get_free_objectid(tree_root, &objectid);
1695 if (unlikely(pending->error))
1696 goto free_fname;
1697
1698 /*
1699 * Make qgroup to skip current new snapshot's qgroupid, as it is
1700 * accounted by later btrfs_qgroup_inherit().
1701 */
1702 btrfs_set_skip_qgroup(trans, objectid);
1703
1704 btrfs_reloc_pre_snapshot(pending, &to_reserve);
1705
1706 if (to_reserve > 0) {
1707 pending->error = btrfs_block_rsv_add(fs_info,
1708 &pending->block_rsv,
1709 to_reserve,
1710 BTRFS_RESERVE_NO_FLUSH);
1711 if (unlikely(pending->error))
1712 goto clear_skip_qgroup;
1713 }
1714
1715 rsv = trans->block_rsv;
1716 trans->block_rsv = &pending->block_rsv;
1717 trans->bytes_reserved = trans->block_rsv->reserved;
1718 trace_btrfs_space_reservation(fs_info, "transaction",
1719 trans->transid,
1720 trans->bytes_reserved, 1);
1721 parent_root = parent_inode->root;
1722 ret = record_root_in_trans(trans, parent_root, 0);
1723 if (unlikely(ret))
1724 goto fail;
1725 cur_time = current_time(&parent_inode->vfs_inode);
1726
1727 /*
1728 * insert the directory item
1729 */
1730 ret = btrfs_set_inode_index(parent_inode, &index);
1731 if (unlikely(ret)) {
1732 btrfs_abort_transaction(trans, ret);
1733 goto fail;
1734 }
1735
1736 /* check if there is a file/dir which has the same name. */
1737 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1738 btrfs_ino(parent_inode),
1739 &fname.disk_name, 0);
1740 if (unlikely(dir_item != NULL && !IS_ERR(dir_item))) {
1741 pending->error = -EEXIST;
1742 goto dir_item_existed;
1743 } else if (IS_ERR(dir_item)) {
1744 ret = PTR_ERR(dir_item);
1745 btrfs_abort_transaction(trans, ret);
1746 goto fail;
1747 }
1748 btrfs_release_path(path);
1749
1750 ret = btrfs_create_qgroup(trans, objectid);
1751 if (ret && ret != -EEXIST) {
1752 if (unlikely(ret != -ENOTCONN || btrfs_qgroup_enabled(fs_info))) {
1753 btrfs_abort_transaction(trans, ret);
1754 goto fail;
1755 }
1756 }
1757
1758 /*
1759 * pull in the delayed directory update
1760 * and the delayed inode item
1761 * otherwise we corrupt the FS during
1762 * snapshot
1763 */
1764 ret = btrfs_run_delayed_items(trans);
1765 if (unlikely(ret)) {
1766 btrfs_abort_transaction(trans, ret);
1767 goto fail;
1768 }
1769
1770 ret = record_root_in_trans(trans, root, 0);
1771 if (unlikely(ret)) {
1772 btrfs_abort_transaction(trans, ret);
1773 goto fail;
1774 }
1775 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1776 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1777 btrfs_check_and_init_root_item(new_root_item);
1778
1779 root_flags = btrfs_root_flags(new_root_item);
1780 if (pending->readonly)
1781 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1782 else
1783 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1784 btrfs_set_root_flags(new_root_item, root_flags);
1785
1786 btrfs_set_root_generation_v2(new_root_item,
1787 trans->transid);
1788 generate_random_guid(new_root_item->uuid);
1789 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1790 BTRFS_UUID_SIZE);
1791 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1792 memset(new_root_item->received_uuid, 0,
1793 sizeof(new_root_item->received_uuid));
1794 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1795 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1796 btrfs_set_root_stransid(new_root_item, 0);
1797 btrfs_set_root_rtransid(new_root_item, 0);
1798 }
1799 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1800 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1801 btrfs_set_root_otransid(new_root_item, trans->transid);
1802
1803 old = btrfs_lock_root_node(root);
1804 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
1805 BTRFS_NESTING_COW);
1806 if (unlikely(ret)) {
1807 btrfs_tree_unlock(old);
1808 free_extent_buffer(old);
1809 btrfs_abort_transaction(trans, ret);
1810 goto fail;
1811 }
1812
1813 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1814 /* clean up in any case */
1815 btrfs_tree_unlock(old);
1816 free_extent_buffer(old);
1817 if (unlikely(ret)) {
1818 btrfs_abort_transaction(trans, ret);
1819 goto fail;
1820 }
1821 /* see comments in should_cow_block() */
1822 set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1823 smp_mb__after_atomic();
1824
1825 btrfs_set_root_node(new_root_item, tmp);
1826 /* record when the snapshot was created in key.offset */
1827 key.objectid = objectid;
1828 key.type = BTRFS_ROOT_ITEM_KEY;
1829 key.offset = trans->transid;
1830 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1831 btrfs_tree_unlock(tmp);
1832 free_extent_buffer(tmp);
1833 if (unlikely(ret)) {
1834 btrfs_abort_transaction(trans, ret);
1835 goto fail;
1836 }
1837
1838 /*
1839 * insert root back/forward references
1840 */
1841 ret = btrfs_add_root_ref(trans, objectid,
1842 btrfs_root_id(parent_root),
1843 btrfs_ino(parent_inode), index,
1844 &fname.disk_name);
1845 if (unlikely(ret)) {
1846 btrfs_abort_transaction(trans, ret);
1847 goto fail;
1848 }
1849
1850 key.offset = (u64)-1;
1851 pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
1852 if (IS_ERR(pending->snap)) {
1853 ret = PTR_ERR(pending->snap);
1854 pending->snap = NULL;
1855 btrfs_abort_transaction(trans, ret);
1856 goto fail;
1857 }
1858
1859 ret = btrfs_reloc_post_snapshot(trans, pending);
1860 if (unlikely(ret)) {
1861 btrfs_abort_transaction(trans, ret);
1862 goto fail;
1863 }
1864
1865 /*
1866 * Do special qgroup accounting for snapshot, as we do some qgroup
1867 * snapshot hack to do fast snapshot.
1868 * To co-operate with that hack, we do hack again.
1869 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1870 */
1871 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL)
1872 ret = qgroup_account_snapshot(trans, root, parent_root,
1873 pending->inherit, objectid);
1874 else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1875 ret = btrfs_qgroup_inherit(trans, btrfs_root_id(root), objectid,
1876 btrfs_root_id(parent_root), pending->inherit);
1877 if (unlikely(ret < 0))
1878 goto fail;
1879
1880 ret = btrfs_insert_dir_item(trans, &fname.disk_name,
1881 parent_inode, &key, BTRFS_FT_DIR,
1882 index);
1883 if (unlikely(ret)) {
1884 btrfs_abort_transaction(trans, ret);
1885 goto fail;
1886 }
1887
1888 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
1889 fname.disk_name.len * 2);
1890 inode_set_mtime_to_ts(&parent_inode->vfs_inode,
1891 inode_set_ctime_current(&parent_inode->vfs_inode));
1892 ret = btrfs_update_inode_fallback(trans, parent_inode);
1893 if (unlikely(ret)) {
1894 btrfs_abort_transaction(trans, ret);
1895 goto fail;
1896 }
1897 ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1898 BTRFS_UUID_KEY_SUBVOL,
1899 objectid);
1900 if (unlikely(ret)) {
1901 btrfs_abort_transaction(trans, ret);
1902 goto fail;
1903 }
1904 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1905 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1906 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1907 objectid);
1908 if (unlikely(ret && ret != -EEXIST)) {
1909 btrfs_abort_transaction(trans, ret);
1910 goto fail;
1911 }
1912 }
1913
1914 fail:
1915 pending->error = ret;
1916 dir_item_existed:
1917 trans->block_rsv = rsv;
1918 trans->bytes_reserved = 0;
1919 clear_skip_qgroup:
1920 btrfs_clear_skip_qgroup(trans);
1921 free_fname:
1922 fscrypt_free_filename(&fname);
1923 free_pending:
1924 kfree(new_root_item);
1925 pending->root_item = NULL;
1926 pending->path = NULL;
1927
1928 return ret;
1929 }
1930
1931 /*
1932 * create all the snapshots we've scheduled for creation
1933 */
create_pending_snapshots(struct btrfs_trans_handle * trans)1934 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1935 {
1936 struct btrfs_pending_snapshot *pending, *next;
1937 struct list_head *head = &trans->transaction->pending_snapshots;
1938 int ret = 0;
1939
1940 list_for_each_entry_safe(pending, next, head, list) {
1941 list_del(&pending->list);
1942 ret = create_pending_snapshot(trans, pending);
1943 if (unlikely(ret))
1944 break;
1945 }
1946 return ret;
1947 }
1948
update_super_roots(struct btrfs_fs_info * fs_info)1949 static void update_super_roots(struct btrfs_fs_info *fs_info)
1950 {
1951 struct btrfs_root_item *root_item;
1952 struct btrfs_super_block *super;
1953
1954 super = fs_info->super_copy;
1955
1956 root_item = &fs_info->chunk_root->root_item;
1957 super->chunk_root = root_item->bytenr;
1958 super->chunk_root_generation = root_item->generation;
1959 super->chunk_root_level = root_item->level;
1960
1961 root_item = &fs_info->tree_root->root_item;
1962 super->root = root_item->bytenr;
1963 super->generation = root_item->generation;
1964 super->root_level = root_item->level;
1965 if (btrfs_test_opt(fs_info, SPACE_CACHE))
1966 super->cache_generation = root_item->generation;
1967 else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
1968 super->cache_generation = 0;
1969 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1970 super->uuid_tree_generation = root_item->generation;
1971
1972 if (btrfs_fs_incompat(fs_info, REMAP_TREE)) {
1973 root_item = &fs_info->remap_root->root_item;
1974 super->remap_root = root_item->bytenr;
1975 super->remap_root_generation = root_item->generation;
1976 super->remap_root_level = root_item->level;
1977 }
1978 }
1979
btrfs_transaction_blocked(struct btrfs_fs_info * info)1980 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1981 {
1982 struct btrfs_transaction *trans;
1983 int ret = 0;
1984
1985 spin_lock(&info->trans_lock);
1986 trans = info->running_transaction;
1987 if (trans)
1988 ret = is_transaction_blocked(trans);
1989 spin_unlock(&info->trans_lock);
1990 return ret;
1991 }
1992
btrfs_commit_transaction_async(struct btrfs_trans_handle * trans)1993 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
1994 {
1995 struct btrfs_fs_info *fs_info = trans->fs_info;
1996 struct btrfs_transaction *cur_trans;
1997
1998 /* Kick the transaction kthread. */
1999 set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
2000 wake_up_process(fs_info->transaction_kthread);
2001
2002 /* take transaction reference */
2003 cur_trans = trans->transaction;
2004 refcount_inc(&cur_trans->use_count);
2005
2006 btrfs_end_transaction(trans);
2007
2008 /*
2009 * Wait for the current transaction commit to start and block
2010 * subsequent transaction joins
2011 */
2012 btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2013 wait_event(fs_info->transaction_blocked_wait,
2014 cur_trans->state >= TRANS_STATE_COMMIT_START ||
2015 TRANS_ABORTED(cur_trans));
2016 btrfs_put_transaction(cur_trans);
2017 }
2018
2019 /*
2020 * If there is a running transaction commit it or if it's already committing,
2021 * wait for its commit to complete. Does not start and commit a new transaction
2022 * if there isn't any running.
2023 */
btrfs_commit_current_transaction(struct btrfs_root * root)2024 int btrfs_commit_current_transaction(struct btrfs_root *root)
2025 {
2026 struct btrfs_trans_handle *trans;
2027
2028 trans = btrfs_attach_transaction_barrier(root);
2029 if (IS_ERR(trans)) {
2030 int ret = PTR_ERR(trans);
2031
2032 return (ret == -ENOENT) ? 0 : ret;
2033 }
2034
2035 return btrfs_commit_transaction(trans);
2036 }
2037
cleanup_transaction(struct btrfs_trans_handle * trans,int err)2038 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
2039 {
2040 struct btrfs_fs_info *fs_info = trans->fs_info;
2041 struct btrfs_transaction *cur_trans = trans->transaction;
2042
2043 WARN_ON(refcount_read(&trans->use_count) > 1);
2044
2045 btrfs_abort_transaction(trans, err);
2046
2047 spin_lock(&fs_info->trans_lock);
2048
2049 /*
2050 * If the transaction is removed from the list, it means this
2051 * transaction has been committed successfully, so it is impossible
2052 * to call the cleanup function.
2053 */
2054 BUG_ON(list_empty(&cur_trans->list));
2055
2056 if (cur_trans == fs_info->running_transaction) {
2057 cur_trans->state = TRANS_STATE_COMMIT_DOING;
2058 spin_unlock(&fs_info->trans_lock);
2059
2060 /*
2061 * The thread has already released the lockdep map as reader
2062 * already in btrfs_commit_transaction().
2063 */
2064 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2065 wait_event(cur_trans->writer_wait,
2066 atomic_read(&cur_trans->num_writers) == 1);
2067
2068 spin_lock(&fs_info->trans_lock);
2069 }
2070
2071 /*
2072 * Now that we know no one else is still using the transaction we can
2073 * remove the transaction from the list of transactions. This avoids
2074 * the transaction kthread from cleaning up the transaction while some
2075 * other task is still using it, which could result in a use-after-free
2076 * on things like log trees, as it forces the transaction kthread to
2077 * wait for this transaction to be cleaned up by us.
2078 */
2079 list_del_init(&cur_trans->list);
2080
2081 spin_unlock(&fs_info->trans_lock);
2082
2083 btrfs_cleanup_one_transaction(trans->transaction);
2084
2085 spin_lock(&fs_info->trans_lock);
2086 if (cur_trans == fs_info->running_transaction)
2087 fs_info->running_transaction = NULL;
2088 spin_unlock(&fs_info->trans_lock);
2089
2090 if (trans->type & __TRANS_FREEZABLE)
2091 sb_end_intwrite(fs_info->sb);
2092 btrfs_put_transaction(cur_trans);
2093 btrfs_put_transaction(cur_trans);
2094
2095 trace_btrfs_transaction_commit(fs_info);
2096
2097 if (current->journal_info == trans)
2098 current->journal_info = NULL;
2099
2100 /*
2101 * If relocation is running, we can't cancel scrub because that will
2102 * result in a deadlock. Before relocating a block group, relocation
2103 * pauses scrub, then starts and commits a transaction before unpausing
2104 * scrub. If the transaction commit is being done by the relocation
2105 * task or triggered by another task and the relocation task is waiting
2106 * for the commit, and we end up here due to an error in the commit
2107 * path, then calling btrfs_scrub_cancel() will deadlock, as we are
2108 * asking for scrub to stop while having it asked to be paused higher
2109 * above in relocation code.
2110 */
2111 if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
2112 btrfs_scrub_cancel(fs_info);
2113
2114 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2115 }
2116
2117 /*
2118 * Release reserved delayed ref space of all pending block groups of the
2119 * transaction and remove them from the list
2120 */
btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle * trans)2121 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
2122 {
2123 struct btrfs_fs_info *fs_info = trans->fs_info;
2124 struct btrfs_block_group *block_group, *tmp;
2125
2126 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
2127 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
2128 /*
2129 * Not strictly necessary to lock, as no other task will be using a
2130 * block_group on the new_bgs list during a transaction abort.
2131 */
2132 spin_lock(&fs_info->unused_bgs_lock);
2133 list_del_init(&block_group->bg_list);
2134 btrfs_put_block_group(block_group);
2135 spin_unlock(&fs_info->unused_bgs_lock);
2136 }
2137 }
2138
btrfs_start_delalloc_flush(struct btrfs_fs_info * fs_info)2139 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2140 {
2141 /*
2142 * We use try_to_writeback_inodes_sb() here because if we used
2143 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2144 * Currently are holding the fs freeze lock, if we do an async flush
2145 * we'll do btrfs_join_transaction() and deadlock because we need to
2146 * wait for the fs freeze lock. Using the direct flushing we benefit
2147 * from already being in a transaction and our join_transaction doesn't
2148 * have to re-take the fs freeze lock.
2149 *
2150 * Note that try_to_writeback_inodes_sb() will only trigger writeback
2151 * if it can read lock sb->s_umount. It will always be able to lock it,
2152 * except when the filesystem is being unmounted or being frozen, but in
2153 * those cases sync_filesystem() is called, which results in calling
2154 * writeback_inodes_sb() while holding a write lock on sb->s_umount.
2155 * Note that we don't call writeback_inodes_sb() directly, because it
2156 * will emit a warning if sb->s_umount is not locked.
2157 */
2158 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2159 try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
2160 return 0;
2161 }
2162
btrfs_wait_delalloc_flush(struct btrfs_fs_info * fs_info)2163 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2164 {
2165 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2166 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
2167 }
2168
2169 /*
2170 * Add a pending snapshot associated with the given transaction handle to the
2171 * respective handle. This must be called after the transaction commit started
2172 * and while holding fs_info->trans_lock.
2173 * This serves to guarantee a caller of btrfs_commit_transaction() that it can
2174 * safely free the pending snapshot pointer in case btrfs_commit_transaction()
2175 * returns an error.
2176 */
add_pending_snapshot(struct btrfs_trans_handle * trans)2177 static void add_pending_snapshot(struct btrfs_trans_handle *trans)
2178 {
2179 struct btrfs_transaction *cur_trans = trans->transaction;
2180
2181 if (!trans->pending_snapshot)
2182 return;
2183
2184 lockdep_assert_held(&trans->fs_info->trans_lock);
2185 ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP,
2186 "cur_trans->state=%d", cur_trans->state);
2187
2188 list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
2189 }
2190
update_commit_stats(struct btrfs_fs_info * fs_info)2191 static void update_commit_stats(struct btrfs_fs_info *fs_info)
2192 {
2193 ktime_t now = ktime_get_ns();
2194 ktime_t interval = now - fs_info->commit_stats.critical_section_start_time;
2195
2196 ASSERT(fs_info->commit_stats.critical_section_start_time);
2197
2198 fs_info->commit_stats.commit_count++;
2199 fs_info->commit_stats.last_commit_dur = interval;
2200 fs_info->commit_stats.max_commit_dur =
2201 max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
2202 fs_info->commit_stats.total_commit_dur += interval;
2203 fs_info->commit_stats.critical_section_start_time = 0;
2204 }
2205
btrfs_commit_transaction(struct btrfs_trans_handle * trans)2206 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2207 {
2208 struct btrfs_fs_info *fs_info = trans->fs_info;
2209 struct btrfs_transaction *cur_trans = trans->transaction;
2210 struct btrfs_transaction *prev_trans = NULL;
2211 int ret;
2212
2213 ASSERT(refcount_read(&trans->use_count) == 1,
2214 "refcount_read(&trans->use_count)=%d", refcount_read(&trans->use_count));
2215 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2216
2217 clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
2218
2219 /* Stop the commit early if ->aborted is set */
2220 if (TRANS_ABORTED(cur_trans)) {
2221 ret = cur_trans->aborted;
2222 goto lockdep_trans_commit_start_release;
2223 }
2224
2225 btrfs_trans_release_metadata(trans);
2226 trans->block_rsv = NULL;
2227
2228 /*
2229 * We only want one transaction commit doing the flushing so we do not
2230 * waste a bunch of time on lock contention on the extent root node.
2231 */
2232 if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
2233 &cur_trans->delayed_refs.flags)) {
2234 /*
2235 * Make a pass through all the delayed refs we have so far.
2236 * Any running threads may add more while we are here.
2237 */
2238 ret = btrfs_run_delayed_refs(trans, 0);
2239 if (ret)
2240 goto lockdep_trans_commit_start_release;
2241 }
2242
2243 btrfs_create_pending_block_groups(trans);
2244
2245 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2246 int run_it = 0;
2247
2248 /* this mutex is also taken before trying to set
2249 * block groups readonly. We need to make sure
2250 * that nobody has set a block group readonly
2251 * after a extents from that block group have been
2252 * allocated for cache files. btrfs_set_block_group_ro
2253 * will wait for the transaction to commit if it
2254 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2255 *
2256 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2257 * only one process starts all the block group IO. It wouldn't
2258 * hurt to have more than one go through, but there's no
2259 * real advantage to it either.
2260 */
2261 mutex_lock(&fs_info->ro_block_group_mutex);
2262 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2263 &cur_trans->flags))
2264 run_it = 1;
2265 mutex_unlock(&fs_info->ro_block_group_mutex);
2266
2267 if (run_it) {
2268 ret = btrfs_start_dirty_block_groups(trans);
2269 if (unlikely(ret))
2270 goto lockdep_trans_commit_start_release;
2271 }
2272 }
2273
2274 spin_lock(&fs_info->trans_lock);
2275 if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) {
2276 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2277
2278 add_pending_snapshot(trans);
2279
2280 spin_unlock(&fs_info->trans_lock);
2281 refcount_inc(&cur_trans->use_count);
2282
2283 if (trans->in_fsync)
2284 want_state = TRANS_STATE_SUPER_COMMITTED;
2285
2286 btrfs_trans_state_lockdep_release(fs_info,
2287 BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2288 ret = btrfs_end_transaction(trans);
2289 wait_for_commit(cur_trans, want_state);
2290
2291 if (TRANS_ABORTED(cur_trans))
2292 ret = cur_trans->aborted;
2293
2294 btrfs_put_transaction(cur_trans);
2295
2296 return ret;
2297 }
2298
2299 cur_trans->state = TRANS_STATE_COMMIT_PREP;
2300 wake_up(&fs_info->transaction_blocked_wait);
2301 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2302
2303 if (!list_is_first(&cur_trans->list, &fs_info->trans_list)) {
2304 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2305
2306 if (trans->in_fsync)
2307 want_state = TRANS_STATE_SUPER_COMMITTED;
2308
2309 prev_trans = list_prev_entry(cur_trans, list);
2310 if (prev_trans->state < want_state) {
2311 refcount_inc(&prev_trans->use_count);
2312 spin_unlock(&fs_info->trans_lock);
2313
2314 wait_for_commit(prev_trans, want_state);
2315
2316 ret = READ_ONCE(prev_trans->aborted);
2317
2318 btrfs_put_transaction(prev_trans);
2319 if (unlikely(ret))
2320 goto lockdep_release;
2321 spin_lock(&fs_info->trans_lock);
2322 }
2323 } else {
2324 /*
2325 * The previous transaction was aborted and was already removed
2326 * from the list of transactions at fs_info->trans_list. So we
2327 * abort to prevent writing a new superblock that reflects a
2328 * corrupt state (pointing to trees with unwritten nodes/leafs).
2329 */
2330 if (BTRFS_FS_ERROR(fs_info)) {
2331 spin_unlock(&fs_info->trans_lock);
2332 ret = -EROFS;
2333 goto lockdep_release;
2334 }
2335 }
2336
2337 cur_trans->state = TRANS_STATE_COMMIT_START;
2338 wake_up(&fs_info->transaction_blocked_wait);
2339 spin_unlock(&fs_info->trans_lock);
2340
2341 /*
2342 * Get the time spent on the work done by the commit thread and not
2343 * the time spent waiting on a previous commit
2344 */
2345 fs_info->commit_stats.critical_section_start_time = ktime_get_ns();
2346 extwriter_counter_dec(cur_trans, trans->type);
2347
2348 ret = btrfs_start_delalloc_flush(fs_info);
2349 if (unlikely(ret))
2350 goto lockdep_release;
2351
2352 ret = btrfs_run_delayed_items(trans);
2353 if (unlikely(ret))
2354 goto lockdep_release;
2355
2356 /*
2357 * The thread has started/joined the transaction thus it holds the
2358 * lockdep map as a reader. It has to release it before acquiring the
2359 * lockdep map as a writer.
2360 */
2361 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2362 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters);
2363 wait_event(cur_trans->writer_wait,
2364 extwriter_counter_read(cur_trans) == 0);
2365
2366 /* some pending stuffs might be added after the previous flush. */
2367 ret = btrfs_run_delayed_items(trans);
2368 if (unlikely(ret)) {
2369 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2370 goto cleanup_transaction;
2371 }
2372
2373 btrfs_wait_delalloc_flush(fs_info);
2374
2375 /*
2376 * Wait for all ordered extents started by a fast fsync that joined this
2377 * transaction. Otherwise if this transaction commits before the ordered
2378 * extents complete we lose logged data after a power failure.
2379 */
2380 btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered);
2381 wait_event(cur_trans->pending_wait,
2382 atomic_read(&cur_trans->pending_ordered) == 0);
2383
2384 btrfs_scrub_pause(fs_info);
2385 /*
2386 * Ok now we need to make sure to block out any other joins while we
2387 * commit the transaction. We could have started a join before setting
2388 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2389 */
2390 spin_lock(&fs_info->trans_lock);
2391 add_pending_snapshot(trans);
2392 cur_trans->state = TRANS_STATE_COMMIT_DOING;
2393 spin_unlock(&fs_info->trans_lock);
2394
2395 /*
2396 * The thread has started/joined the transaction thus it holds the
2397 * lockdep map as a reader. It has to release it before acquiring the
2398 * lockdep map as a writer.
2399 */
2400 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2401 btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2402 wait_event(cur_trans->writer_wait,
2403 atomic_read(&cur_trans->num_writers) == 1);
2404
2405 /*
2406 * Make lockdep happy by acquiring the state locks after
2407 * btrfs_trans_num_writers is released. If we acquired the state locks
2408 * before releasing the btrfs_trans_num_writers lock then lockdep would
2409 * complain because we did not follow the reverse order unlocking rule.
2410 */
2411 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2412 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2413 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2414
2415 /*
2416 * We've started the commit, clear the flag in case we were triggered to
2417 * do an async commit but somebody else started before the transaction
2418 * kthread could do the work.
2419 */
2420 clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
2421
2422 if (TRANS_ABORTED(cur_trans)) {
2423 ret = cur_trans->aborted;
2424 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2425 goto scrub_continue;
2426 }
2427 /*
2428 * the reloc mutex makes sure that we stop
2429 * the balancing code from coming in and moving
2430 * extents around in the middle of the commit
2431 */
2432 mutex_lock(&fs_info->reloc_mutex);
2433
2434 /*
2435 * We needn't worry about the delayed items because we will
2436 * deal with them in create_pending_snapshot(), which is the
2437 * core function of the snapshot creation.
2438 */
2439 ret = create_pending_snapshots(trans);
2440 if (unlikely(ret))
2441 goto unlock_reloc;
2442
2443 /*
2444 * We insert the dir indexes of the snapshots and update the inode
2445 * of the snapshots' parents after the snapshot creation, so there
2446 * are some delayed items which are not dealt with. Now deal with
2447 * them.
2448 *
2449 * We needn't worry that this operation will corrupt the snapshots,
2450 * because all the tree which are snapshotted will be forced to COW
2451 * the nodes and leaves.
2452 */
2453 ret = btrfs_run_delayed_items(trans);
2454 if (unlikely(ret))
2455 goto unlock_reloc;
2456
2457 ret = btrfs_run_delayed_refs(trans, U64_MAX);
2458 if (unlikely(ret))
2459 goto unlock_reloc;
2460
2461 /*
2462 * make sure none of the code above managed to slip in a
2463 * delayed item
2464 */
2465 btrfs_assert_delayed_root_empty(fs_info);
2466
2467 WARN_ON(cur_trans != trans->transaction);
2468
2469 ret = commit_fs_roots(trans);
2470 if (unlikely(ret))
2471 goto unlock_reloc;
2472
2473 /* commit_fs_roots gets rid of all the tree log roots, it is now
2474 * safe to free the root of tree log roots
2475 */
2476 btrfs_free_log_root_tree(trans, fs_info);
2477
2478 /*
2479 * Since fs roots are all committed, we can get a quite accurate
2480 * new_roots. So let's do quota accounting.
2481 */
2482 ret = btrfs_qgroup_account_extents(trans);
2483 if (unlikely(ret < 0))
2484 goto unlock_reloc;
2485
2486 ret = commit_cowonly_roots(trans);
2487 if (unlikely(ret))
2488 goto unlock_reloc;
2489
2490 /*
2491 * The tasks which save the space cache and inode cache may also
2492 * update ->aborted, check it.
2493 */
2494 if (TRANS_ABORTED(cur_trans)) {
2495 ret = cur_trans->aborted;
2496 goto unlock_reloc;
2497 }
2498
2499 cur_trans = fs_info->running_transaction;
2500
2501 btrfs_set_root_node(&fs_info->tree_root->root_item,
2502 fs_info->tree_root->node);
2503 list_add_tail(&fs_info->tree_root->dirty_list,
2504 &cur_trans->switch_commits);
2505
2506 btrfs_set_root_node(&fs_info->chunk_root->root_item,
2507 fs_info->chunk_root->node);
2508 list_add_tail(&fs_info->chunk_root->dirty_list,
2509 &cur_trans->switch_commits);
2510
2511 switch_commit_roots(trans);
2512
2513 ASSERT(list_empty(&cur_trans->dirty_bgs));
2514 ASSERT(list_empty(&cur_trans->io_bgs));
2515 update_super_roots(fs_info);
2516
2517 btrfs_set_super_log_root(fs_info->super_copy, 0);
2518 btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2519 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2520 sizeof(*fs_info->super_copy));
2521
2522 btrfs_commit_device_sizes(cur_trans);
2523
2524 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2525 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2526
2527 btrfs_trans_release_chunk_metadata(trans);
2528
2529 /*
2530 * Before changing the transaction state to TRANS_STATE_UNBLOCKED and
2531 * setting fs_info->running_transaction to NULL, lock tree_log_mutex to
2532 * make sure that before we commit our superblock, no other task can
2533 * start a new transaction and commit a log tree before we commit our
2534 * superblock. Anyone trying to commit a log tree locks this mutex before
2535 * writing its superblock.
2536 */
2537 mutex_lock(&fs_info->tree_log_mutex);
2538
2539 spin_lock(&fs_info->trans_lock);
2540 cur_trans->state = TRANS_STATE_UNBLOCKED;
2541 fs_info->running_transaction = NULL;
2542 spin_unlock(&fs_info->trans_lock);
2543 mutex_unlock(&fs_info->reloc_mutex);
2544
2545 wake_up(&fs_info->transaction_wait);
2546 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2547
2548 /* If we have features changed, wake up the cleaner to update sysfs. */
2549 if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) &&
2550 fs_info->cleaner_kthread)
2551 wake_up_process(fs_info->cleaner_kthread);
2552
2553 ret = btrfs_write_and_wait_transaction(trans);
2554 if (unlikely(ret)) {
2555 btrfs_err(fs_info, "error while writing out transaction: %d", ret);
2556 mutex_unlock(&fs_info->tree_log_mutex);
2557 goto scrub_continue;
2558 }
2559
2560 ret = write_all_supers(fs_info, 0);
2561 /*
2562 * the super is written, we can safely allow the tree-loggers
2563 * to go about their business
2564 */
2565 mutex_unlock(&fs_info->tree_log_mutex);
2566 if (unlikely(ret))
2567 goto scrub_continue;
2568
2569 update_commit_stats(fs_info);
2570 /*
2571 * We needn't acquire the lock here because there is no other task
2572 * which can change it.
2573 */
2574 cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
2575 wake_up(&cur_trans->commit_wait);
2576 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2577
2578 ret = btrfs_finish_extent_commit(trans);
2579 if (unlikely(ret))
2580 goto scrub_continue;
2581
2582 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2583 btrfs_clear_space_info_full(fs_info);
2584
2585 btrfs_set_last_trans_committed(fs_info, cur_trans->transid);
2586 /*
2587 * We needn't acquire the lock here because there is no other task
2588 * which can change it.
2589 */
2590 cur_trans->state = TRANS_STATE_COMPLETED;
2591 wake_up(&cur_trans->commit_wait);
2592 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2593
2594 spin_lock(&fs_info->trans_lock);
2595 list_del_init(&cur_trans->list);
2596 spin_unlock(&fs_info->trans_lock);
2597
2598 btrfs_put_transaction(cur_trans);
2599 btrfs_put_transaction(cur_trans);
2600
2601 if (trans->type & __TRANS_FREEZABLE)
2602 sb_end_intwrite(fs_info->sb);
2603
2604 trace_btrfs_transaction_commit(fs_info);
2605
2606 btrfs_scrub_continue(fs_info);
2607
2608 if (current->journal_info == trans)
2609 current->journal_info = NULL;
2610
2611 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2612
2613 return ret;
2614
2615 unlock_reloc:
2616 mutex_unlock(&fs_info->reloc_mutex);
2617 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2618 scrub_continue:
2619 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2620 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2621 btrfs_scrub_continue(fs_info);
2622 cleanup_transaction:
2623 btrfs_trans_release_metadata(trans);
2624 btrfs_cleanup_pending_block_groups(trans);
2625 btrfs_trans_release_chunk_metadata(trans);
2626 trans->block_rsv = NULL;
2627 btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2628 if (current->journal_info == trans)
2629 current->journal_info = NULL;
2630 cleanup_transaction(trans, ret);
2631
2632 return ret;
2633
2634 lockdep_release:
2635 btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2636 btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2637 goto cleanup_transaction;
2638
2639 lockdep_trans_commit_start_release:
2640 btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2641 btrfs_end_transaction(trans);
2642 return ret;
2643 }
2644
2645 /*
2646 * return < 0 if error
2647 * 0 if there are no more dead_roots at the time of call
2648 * 1 there are more to be processed, call me again
2649 *
2650 * The return value indicates there are certainly more snapshots to delete, but
2651 * if there comes a new one during processing, it may return 0. We don't mind,
2652 * because btrfs_commit_super will poke cleaner thread and it will process it a
2653 * few seconds later.
2654 */
btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info * fs_info)2655 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
2656 {
2657 struct btrfs_root *root;
2658 int ret;
2659
2660 spin_lock(&fs_info->trans_lock);
2661 if (list_empty(&fs_info->dead_roots)) {
2662 spin_unlock(&fs_info->trans_lock);
2663 return 0;
2664 }
2665 root = list_first_entry(&fs_info->dead_roots,
2666 struct btrfs_root, root_list);
2667 list_del_init(&root->root_list);
2668 spin_unlock(&fs_info->trans_lock);
2669
2670 btrfs_debug(fs_info, "cleaner removing %llu", btrfs_root_id(root));
2671
2672 btrfs_kill_all_delayed_nodes(root);
2673
2674 if (btrfs_header_backref_rev(root->node) <
2675 BTRFS_MIXED_BACKREF_REV)
2676 ret = btrfs_drop_snapshot(root, false, false);
2677 else
2678 ret = btrfs_drop_snapshot(root, true, false);
2679
2680 btrfs_put_root(root);
2681 return (ret < 0) ? 0 : 1;
2682 }
2683
2684 /*
2685 * We only mark the transaction aborted and then set the file system read-only.
2686 * This will prevent new transactions from starting or trying to join this
2687 * one.
2688 *
2689 * This means that error recovery at the call site is limited to freeing
2690 * any local memory allocations and passing the error code up without
2691 * further cleanup. The transaction should complete as it normally would
2692 * in the call path but will return -EIO.
2693 *
2694 * We'll complete the cleanup in btrfs_end_transaction and
2695 * btrfs_commit_transaction.
2696 */
__btrfs_abort_transaction(struct btrfs_trans_handle * trans,const char * function,unsigned int line,int error,bool first_hit)2697 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
2698 const char *function,
2699 unsigned int line, int error, bool first_hit)
2700 {
2701 struct btrfs_fs_info *fs_info = trans->fs_info;
2702
2703 WRITE_ONCE(trans->aborted, error);
2704 WRITE_ONCE(trans->transaction->aborted, error);
2705 if (first_hit && error == -ENOSPC)
2706 btrfs_dump_space_info_for_trans_abort(fs_info);
2707 /* Wake up anybody who may be waiting on this transaction */
2708 wake_up(&fs_info->transaction_wait);
2709 wake_up(&fs_info->transaction_blocked_wait);
2710 __btrfs_handle_fs_error(fs_info, function, line, error, NULL);
2711 }
2712
btrfs_transaction_init(void)2713 int __init btrfs_transaction_init(void)
2714 {
2715 btrfs_trans_handle_cachep = KMEM_CACHE(btrfs_trans_handle, SLAB_TEMPORARY);
2716 if (!btrfs_trans_handle_cachep)
2717 return -ENOMEM;
2718 return 0;
2719 }
2720
btrfs_transaction_exit(void)2721 void __cold btrfs_transaction_exit(void)
2722 {
2723 kmem_cache_destroy(btrfs_trans_handle_cachep);
2724 }
2725