xref: /linux/fs/btrfs/transaction.c (revision a8b0b72255d09bb12ada5620cd6ced91adde5ac8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/writeback.h>
11 #include <linux/pagemap.h>
12 #include <linux/blkdev.h>
13 #include <linux/uuid.h>
14 #include <linux/timekeeping.h>
15 #include "misc.h"
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "extent_io.h"
19 #include "transaction.h"
20 #include "locking.h"
21 #include "tree-log.h"
22 #include "volumes.h"
23 #include "dev-replace.h"
24 #include "qgroup.h"
25 #include "block-group.h"
26 #include "space-info.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "dir-item.h"
32 #include "uuid-tree.h"
33 #include "ioctl.h"
34 #include "relocation.h"
35 #include "scrub.h"
36 #include "ordered-data.h"
37 #include "delayed-inode.h"
38 
39 static struct kmem_cache *btrfs_trans_handle_cachep;
40 
41 /*
42  * Transaction states and transitions
43  *
44  * No running transaction (fs tree blocks are not modified)
45  * |
46  * | To next stage:
47  * |  Call start_transaction() variants. Except btrfs_join_transaction_nostart().
48  * V
49  * Transaction N [[TRANS_STATE_RUNNING]]
50  * |
51  * | New trans handles can be attached to transaction N by calling all
52  * | start_transaction() variants.
53  * |
54  * | To next stage:
55  * |  Call btrfs_commit_transaction() on any trans handle attached to
56  * |  transaction N
57  * V
58  * Transaction N [[TRANS_STATE_COMMIT_PREP]]
59  * |
60  * | If there are simultaneous calls to btrfs_commit_transaction() one will win
61  * | the race and the rest will wait for the winner to commit the transaction.
62  * |
63  * | The winner will wait for previous running transaction to completely finish
64  * | if there is one.
65  * |
66  * Transaction N [[TRANS_STATE_COMMIT_START]]
67  * |
68  * | Then one of the following happens:
69  * | - Wait for all other trans handle holders to release.
70  * |   The btrfs_commit_transaction() caller will do the commit work.
71  * | - Wait for current transaction to be committed by others.
72  * |   Other btrfs_commit_transaction() caller will do the commit work.
73  * |
74  * | At this stage, only btrfs_join_transaction*() variants can attach
75  * | to this running transaction.
76  * | All other variants will wait for current one to finish and attach to
77  * | transaction N+1.
78  * |
79  * | To next stage:
80  * |  Caller is chosen to commit transaction N, and all other trans handle
81  * |  haven been released.
82  * V
83  * Transaction N [[TRANS_STATE_COMMIT_DOING]]
84  * |
85  * | The heavy lifting transaction work is started.
86  * | From running delayed refs (modifying extent tree) to creating pending
87  * | snapshots, running qgroups.
88  * | In short, modify supporting trees to reflect modifications of subvolume
89  * | trees.
90  * |
91  * | At this stage, all start_transaction() calls will wait for this
92  * | transaction to finish and attach to transaction N+1.
93  * |
94  * | To next stage:
95  * |  Until all supporting trees are updated.
96  * V
97  * Transaction N [[TRANS_STATE_UNBLOCKED]]
98  * |						    Transaction N+1
99  * | All needed trees are modified, thus we only    [[TRANS_STATE_RUNNING]]
100  * | need to write them back to disk and update	    |
101  * | super blocks.				    |
102  * |						    |
103  * | At this stage, new transaction is allowed to   |
104  * | start.					    |
105  * | All new start_transaction() calls will be	    |
106  * | attached to transid N+1.			    |
107  * |						    |
108  * | To next stage:				    |
109  * |  Until all tree blocks and super blocks are    |
110  * |  written to block devices			    |
111  * V						    |
112  * Transaction N [[TRANS_STATE_COMPLETED]]	    V
113  *   All tree blocks and super blocks are written.  Transaction N+1
114  *   This transaction is finished and all its	    [[TRANS_STATE_COMMIT_START]]
115  *   data structures will be cleaned up.	    | Life goes on
116  */
117 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
118 	[TRANS_STATE_RUNNING]		= 0U,
119 	[TRANS_STATE_COMMIT_PREP]	= 0U,
120 	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
121 	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
122 					   __TRANS_ATTACH |
123 					   __TRANS_JOIN |
124 					   __TRANS_JOIN_NOSTART),
125 	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
126 					   __TRANS_ATTACH |
127 					   __TRANS_JOIN |
128 					   __TRANS_JOIN_NOLOCK |
129 					   __TRANS_JOIN_NOSTART),
130 	[TRANS_STATE_SUPER_COMMITTED]	= (__TRANS_START |
131 					   __TRANS_ATTACH |
132 					   __TRANS_JOIN |
133 					   __TRANS_JOIN_NOLOCK |
134 					   __TRANS_JOIN_NOSTART),
135 	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
136 					   __TRANS_ATTACH |
137 					   __TRANS_JOIN |
138 					   __TRANS_JOIN_NOLOCK |
139 					   __TRANS_JOIN_NOSTART),
140 };
141 
btrfs_put_transaction(struct btrfs_transaction * transaction)142 void btrfs_put_transaction(struct btrfs_transaction *transaction)
143 {
144 	if (refcount_dec_and_test(&transaction->use_count)) {
145 		BUG_ON(!list_empty(&transaction->list));
146 		WARN_ON(!xa_empty(&transaction->delayed_refs.head_refs));
147 		WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents));
148 		if (transaction->delayed_refs.pending_csums)
149 			btrfs_err(transaction->fs_info,
150 				  "pending csums is %llu",
151 				  transaction->delayed_refs.pending_csums);
152 		/*
153 		 * If any block groups are found in ->deleted_bgs then it's
154 		 * because the transaction was aborted and a commit did not
155 		 * happen (things failed before writing the new superblock
156 		 * and calling btrfs_finish_extent_commit()), so we can not
157 		 * discard the physical locations of the block groups.
158 		 */
159 		while (!list_empty(&transaction->deleted_bgs)) {
160 			struct btrfs_block_group *cache;
161 
162 			cache = list_first_entry(&transaction->deleted_bgs,
163 						 struct btrfs_block_group,
164 						 bg_list);
165 			/*
166 			 * Not strictly necessary to lock, as no other task will be using a
167 			 * block_group on the deleted_bgs list during a transaction abort.
168 			 */
169 			spin_lock(&transaction->fs_info->unused_bgs_lock);
170 			list_del_init(&cache->bg_list);
171 			spin_unlock(&transaction->fs_info->unused_bgs_lock);
172 			btrfs_unfreeze_block_group(cache);
173 			btrfs_put_block_group(cache);
174 		}
175 		WARN_ON(!list_empty(&transaction->dev_update_list));
176 		kfree(transaction);
177 	}
178 }
179 
switch_commit_roots(struct btrfs_trans_handle * trans)180 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
181 {
182 	struct btrfs_transaction *cur_trans = trans->transaction;
183 	struct btrfs_fs_info *fs_info = trans->fs_info;
184 	struct btrfs_root *root, *tmp;
185 
186 	/*
187 	 * At this point no one can be using this transaction to modify any tree
188 	 * and no one can start another transaction to modify any tree either.
189 	 */
190 	ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING,
191 	       "cur_trans->state=%d", cur_trans->state);
192 
193 	down_write(&fs_info->commit_root_sem);
194 
195 	if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
196 		fs_info->last_reloc_trans = trans->transid;
197 
198 	list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
199 				 dirty_list) {
200 		list_del_init(&root->dirty_list);
201 		free_extent_buffer(root->commit_root);
202 		root->commit_root = btrfs_root_node(root);
203 		btrfs_extent_io_tree_release(&root->dirty_log_pages);
204 		btrfs_qgroup_clean_swapped_blocks(root);
205 	}
206 
207 	/* We can free old roots now. */
208 	spin_lock(&cur_trans->dropped_roots_lock);
209 	while (!list_empty(&cur_trans->dropped_roots)) {
210 		root = list_first_entry(&cur_trans->dropped_roots,
211 					struct btrfs_root, root_list);
212 		list_del_init(&root->root_list);
213 		spin_unlock(&cur_trans->dropped_roots_lock);
214 		btrfs_free_log(trans, root);
215 		btrfs_drop_and_free_fs_root(fs_info, root);
216 		spin_lock(&cur_trans->dropped_roots_lock);
217 	}
218 	spin_unlock(&cur_trans->dropped_roots_lock);
219 
220 	up_write(&fs_info->commit_root_sem);
221 }
222 
extwriter_counter_inc(struct btrfs_transaction * trans,unsigned int type)223 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
224 					 unsigned int type)
225 {
226 	if (type & TRANS_EXTWRITERS)
227 		atomic_inc(&trans->num_extwriters);
228 }
229 
extwriter_counter_dec(struct btrfs_transaction * trans,unsigned int type)230 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
231 					 unsigned int type)
232 {
233 	if (type & TRANS_EXTWRITERS)
234 		atomic_dec(&trans->num_extwriters);
235 }
236 
extwriter_counter_init(struct btrfs_transaction * trans,unsigned int type)237 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
238 					  unsigned int type)
239 {
240 	atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
241 }
242 
extwriter_counter_read(struct btrfs_transaction * trans)243 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
244 {
245 	return atomic_read(&trans->num_extwriters);
246 }
247 
248 /*
249  * To be called after doing the chunk btree updates right after allocating a new
250  * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
251  * chunk after all chunk btree updates and after finishing the second phase of
252  * chunk allocation (btrfs_create_pending_block_groups()) in case some block
253  * group had its chunk item insertion delayed to the second phase.
254  */
btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle * trans)255 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
256 {
257 	struct btrfs_fs_info *fs_info = trans->fs_info;
258 
259 	if (!trans->chunk_bytes_reserved)
260 		return;
261 
262 	btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
263 				trans->chunk_bytes_reserved, NULL);
264 	trans->chunk_bytes_reserved = 0;
265 }
266 
267 /*
268  * either allocate a new transaction or hop into the existing one
269  */
join_transaction(struct btrfs_fs_info * fs_info,unsigned int type)270 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
271 				     unsigned int type)
272 {
273 	struct btrfs_transaction *cur_trans;
274 
275 	spin_lock(&fs_info->trans_lock);
276 loop:
277 	/* The file system has been taken offline. No new transactions. */
278 	if (unlikely(BTRFS_FS_ERROR(fs_info))) {
279 		spin_unlock(&fs_info->trans_lock);
280 		return -EROFS;
281 	}
282 
283 	cur_trans = fs_info->running_transaction;
284 	if (cur_trans) {
285 		if (TRANS_ABORTED(cur_trans)) {
286 			const int abort_error = cur_trans->aborted;
287 
288 			spin_unlock(&fs_info->trans_lock);
289 			return abort_error;
290 		}
291 		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
292 			spin_unlock(&fs_info->trans_lock);
293 			return -EBUSY;
294 		}
295 		refcount_inc(&cur_trans->use_count);
296 		atomic_inc(&cur_trans->num_writers);
297 		extwriter_counter_inc(cur_trans, type);
298 		spin_unlock(&fs_info->trans_lock);
299 		btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
300 		btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
301 		return 0;
302 	}
303 	spin_unlock(&fs_info->trans_lock);
304 
305 	/*
306 	 * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
307 	 * current transaction, and commit it. If there is no transaction, just
308 	 * return ENOENT.
309 	 */
310 	if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
311 		return -ENOENT;
312 
313 	/*
314 	 * JOIN_NOLOCK only happens during the transaction commit, so
315 	 * it is impossible that ->running_transaction is NULL
316 	 */
317 	BUG_ON(type == TRANS_JOIN_NOLOCK);
318 
319 	cur_trans = kmalloc_obj(*cur_trans, GFP_NOFS);
320 	if (!cur_trans)
321 		return -ENOMEM;
322 
323 	btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers);
324 	btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters);
325 
326 	spin_lock(&fs_info->trans_lock);
327 	if (fs_info->running_transaction) {
328 		/*
329 		 * someone started a transaction after we unlocked.  Make sure
330 		 * to redo the checks above
331 		 */
332 		btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
333 		btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
334 		kfree(cur_trans);
335 		goto loop;
336 	} else if (unlikely(BTRFS_FS_ERROR(fs_info))) {
337 		spin_unlock(&fs_info->trans_lock);
338 		btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
339 		btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
340 		kfree(cur_trans);
341 		return -EROFS;
342 	}
343 
344 	cur_trans->fs_info = fs_info;
345 	atomic_set(&cur_trans->pending_ordered, 0);
346 	init_waitqueue_head(&cur_trans->pending_wait);
347 	atomic_set(&cur_trans->num_writers, 1);
348 	extwriter_counter_init(cur_trans, type);
349 	init_waitqueue_head(&cur_trans->writer_wait);
350 	init_waitqueue_head(&cur_trans->commit_wait);
351 	cur_trans->state = TRANS_STATE_RUNNING;
352 	/*
353 	 * One for this trans handle, one so it will live on until we
354 	 * commit the transaction.
355 	 */
356 	refcount_set(&cur_trans->use_count, 2);
357 	cur_trans->flags = 0;
358 	cur_trans->start_time = ktime_get_seconds();
359 
360 	memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
361 
362 	xa_init(&cur_trans->delayed_refs.head_refs);
363 	xa_init(&cur_trans->delayed_refs.dirty_extents);
364 
365 	/*
366 	 * although the tree mod log is per file system and not per transaction,
367 	 * the log must never go across transaction boundaries.
368 	 */
369 	smp_mb();
370 	if (!list_empty(&fs_info->tree_mod_seq_list))
371 		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
372 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
373 		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
374 	atomic64_set(&fs_info->tree_mod_seq, 0);
375 
376 	spin_lock_init(&cur_trans->delayed_refs.lock);
377 
378 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
379 	INIT_LIST_HEAD(&cur_trans->dev_update_list);
380 	INIT_LIST_HEAD(&cur_trans->switch_commits);
381 	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
382 	INIT_LIST_HEAD(&cur_trans->io_bgs);
383 	INIT_LIST_HEAD(&cur_trans->dropped_roots);
384 	mutex_init(&cur_trans->cache_write_mutex);
385 	spin_lock_init(&cur_trans->dirty_bgs_lock);
386 	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
387 	spin_lock_init(&cur_trans->dropped_roots_lock);
388 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
389 	btrfs_extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
390 				  IO_TREE_TRANS_DIRTY_PAGES);
391 	btrfs_extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
392 				  IO_TREE_FS_PINNED_EXTENTS);
393 	btrfs_set_fs_generation(fs_info, fs_info->generation + 1);
394 	cur_trans->transid = fs_info->generation;
395 	fs_info->running_transaction = cur_trans;
396 	cur_trans->aborted = 0;
397 	spin_unlock(&fs_info->trans_lock);
398 
399 	return 0;
400 }
401 
402 /*
403  * This does all the record keeping required to make sure that a shareable root
404  * is properly recorded in a given transaction.  This is required to make sure
405  * the old root from before we joined the transaction is deleted when the
406  * transaction commits.
407  */
record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root,bool force)408 static int record_root_in_trans(struct btrfs_trans_handle *trans,
409 			       struct btrfs_root *root,
410 			       bool force)
411 {
412 	struct btrfs_fs_info *fs_info = root->fs_info;
413 	int ret = 0;
414 
415 	if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
416 	    btrfs_get_root_last_trans(root) < trans->transid) || force) {
417 		WARN_ON(!force && root->commit_root != root->node);
418 
419 		/*
420 		 * see below for IN_TRANS_SETUP usage rules
421 		 * we have the reloc mutex held now, so there
422 		 * is only one writer in this function
423 		 */
424 		set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
425 
426 		/* make sure readers find IN_TRANS_SETUP before
427 		 * they find our root->last_trans update
428 		 */
429 		smp_wmb();
430 
431 		spin_lock(&fs_info->fs_roots_radix_lock);
432 		if (btrfs_get_root_last_trans(root) == trans->transid && !force) {
433 			spin_unlock(&fs_info->fs_roots_radix_lock);
434 			return 0;
435 		}
436 		radix_tree_tag_set(&fs_info->fs_roots_radix,
437 				   (unsigned long)btrfs_root_id(root),
438 				   BTRFS_ROOT_TRANS_TAG);
439 		spin_unlock(&fs_info->fs_roots_radix_lock);
440 		btrfs_set_root_last_trans(root, trans->transid);
441 
442 		/* this is pretty tricky.  We don't want to
443 		 * take the relocation lock in btrfs_record_root_in_trans
444 		 * unless we're really doing the first setup for this root in
445 		 * this transaction.
446 		 *
447 		 * Normally we'd use root->last_trans as a flag to decide
448 		 * if we want to take the expensive mutex.
449 		 *
450 		 * But, we have to set root->last_trans before we
451 		 * init the relocation root, otherwise, we trip over warnings
452 		 * in ctree.c.  The solution used here is to flag ourselves
453 		 * with root IN_TRANS_SETUP.  When this is 1, we're still
454 		 * fixing up the reloc trees and everyone must wait.
455 		 *
456 		 * When this is zero, they can trust root->last_trans and fly
457 		 * through btrfs_record_root_in_trans without having to take the
458 		 * lock.  smp_wmb() makes sure that all the writes above are
459 		 * done before we pop in the zero below
460 		 */
461 		ret = btrfs_init_reloc_root(trans, root);
462 		smp_mb__before_atomic();
463 		clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
464 	}
465 	return ret;
466 }
467 
468 
btrfs_add_dropped_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)469 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
470 			    struct btrfs_root *root)
471 {
472 	struct btrfs_fs_info *fs_info = root->fs_info;
473 	struct btrfs_transaction *cur_trans = trans->transaction;
474 
475 	/* Add ourselves to the transaction dropped list */
476 	spin_lock(&cur_trans->dropped_roots_lock);
477 	list_add_tail(&root->root_list, &cur_trans->dropped_roots);
478 	spin_unlock(&cur_trans->dropped_roots_lock);
479 
480 	/* Make sure we don't try to update the root at commit time */
481 	spin_lock(&fs_info->fs_roots_radix_lock);
482 	radix_tree_tag_clear(&fs_info->fs_roots_radix,
483 			     (unsigned long)btrfs_root_id(root),
484 			     BTRFS_ROOT_TRANS_TAG);
485 	spin_unlock(&fs_info->fs_roots_radix_lock);
486 }
487 
btrfs_record_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * root)488 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
489 			       struct btrfs_root *root)
490 {
491 	struct btrfs_fs_info *fs_info = root->fs_info;
492 	int ret;
493 
494 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
495 		return 0;
496 
497 	/*
498 	 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
499 	 * and barriers
500 	 */
501 	smp_rmb();
502 	if (btrfs_get_root_last_trans(root) == trans->transid &&
503 	    !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
504 		return 0;
505 
506 	mutex_lock(&fs_info->reloc_mutex);
507 	ret = record_root_in_trans(trans, root, false);
508 	mutex_unlock(&fs_info->reloc_mutex);
509 
510 	return ret;
511 }
512 
is_transaction_blocked(struct btrfs_transaction * trans)513 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
514 {
515 	return (trans->state >= TRANS_STATE_COMMIT_START &&
516 		trans->state < TRANS_STATE_UNBLOCKED &&
517 		!TRANS_ABORTED(trans));
518 }
519 
520 /* wait for commit against the current transaction to become unblocked
521  * when this is done, it is safe to start a new transaction, but the current
522  * transaction might not be fully on disk.
523  */
wait_current_trans(struct btrfs_fs_info * fs_info,unsigned int type)524 static void wait_current_trans(struct btrfs_fs_info *fs_info, unsigned int type)
525 {
526 	struct btrfs_transaction *cur_trans;
527 
528 	spin_lock(&fs_info->trans_lock);
529 	cur_trans = fs_info->running_transaction;
530 	if (cur_trans && is_transaction_blocked(cur_trans) &&
531 	    (btrfs_blocked_trans_types[cur_trans->state] & type)) {
532 		refcount_inc(&cur_trans->use_count);
533 		spin_unlock(&fs_info->trans_lock);
534 
535 		btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
536 		wait_event(fs_info->transaction_wait,
537 			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
538 			   TRANS_ABORTED(cur_trans));
539 		btrfs_put_transaction(cur_trans);
540 	} else {
541 		spin_unlock(&fs_info->trans_lock);
542 	}
543 }
544 
may_wait_transaction(struct btrfs_fs_info * fs_info,int type)545 static bool may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
546 {
547 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
548 		return false;
549 
550 	if (type == TRANS_START)
551 		return true;
552 
553 	return false;
554 }
555 
need_reserve_reloc_root(struct btrfs_root * root)556 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
557 {
558 	struct btrfs_fs_info *fs_info = root->fs_info;
559 
560 	if (!fs_info->reloc_ctl ||
561 	    !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
562 	    btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
563 	    root->reloc_root)
564 		return false;
565 
566 	return true;
567 }
568 
btrfs_reserve_trans_metadata(struct btrfs_fs_info * fs_info,enum btrfs_reserve_flush_enum flush,u64 num_bytes,u64 * delayed_refs_bytes)569 static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
570 					enum btrfs_reserve_flush_enum flush,
571 					u64 num_bytes,
572 					u64 *delayed_refs_bytes)
573 {
574 	struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info;
575 	u64 bytes = num_bytes + *delayed_refs_bytes;
576 	int ret;
577 
578 	/*
579 	 * We want to reserve all the bytes we may need all at once, so we only
580 	 * do 1 enospc flushing cycle per transaction start.
581 	 */
582 	ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
583 
584 	/*
585 	 * If we are an emergency flush, which can steal from the global block
586 	 * reserve, then attempt to not reserve space for the delayed refs, as
587 	 * we will consume space for them from the global block reserve.
588 	 */
589 	if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
590 		bytes -= *delayed_refs_bytes;
591 		*delayed_refs_bytes = 0;
592 		ret = btrfs_reserve_metadata_bytes(si, bytes, flush);
593 	}
594 
595 	return ret;
596 }
597 
598 static struct btrfs_trans_handle *
start_transaction(struct btrfs_root * root,unsigned int num_items,unsigned int type,enum btrfs_reserve_flush_enum flush,bool enforce_qgroups)599 start_transaction(struct btrfs_root *root, unsigned int num_items,
600 		  unsigned int type, enum btrfs_reserve_flush_enum flush,
601 		  bool enforce_qgroups)
602 {
603 	struct btrfs_fs_info *fs_info = root->fs_info;
604 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
605 	struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
606 	struct btrfs_trans_handle *h;
607 	struct btrfs_transaction *cur_trans;
608 	u64 num_bytes = 0;
609 	u64 qgroup_reserved = 0;
610 	u64 delayed_refs_bytes = 0;
611 	bool reloc_reserved = false;
612 	bool do_chunk_alloc = false;
613 	int ret;
614 
615 	if (unlikely(BTRFS_FS_ERROR(fs_info)))
616 		return ERR_PTR(-EROFS);
617 
618 	if (current->journal_info) {
619 		WARN_ON(type & TRANS_EXTWRITERS);
620 		h = current->journal_info;
621 		refcount_inc(&h->use_count);
622 		WARN_ON(refcount_read(&h->use_count) > 2);
623 		h->orig_rsv = h->block_rsv;
624 		h->block_rsv = NULL;
625 		goto got_it;
626 	}
627 
628 	/*
629 	 * Do the reservation before we join the transaction so we can do all
630 	 * the appropriate flushing if need be.
631 	 */
632 	if (num_items && root != fs_info->chunk_root) {
633 		qgroup_reserved = num_items * fs_info->nodesize;
634 		/*
635 		 * Use prealloc for now, as there might be a currently running
636 		 * transaction that could free this reserved space prematurely
637 		 * by committing.
638 		 */
639 		ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved,
640 							 enforce_qgroups, false);
641 		if (ret)
642 			return ERR_PTR(ret);
643 
644 		num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
645 		/*
646 		 * If we plan to insert/update/delete "num_items" from a btree,
647 		 * we will also generate delayed refs for extent buffers in the
648 		 * respective btree paths, so reserve space for the delayed refs
649 		 * that will be generated by the caller as it modifies btrees.
650 		 * Try to reserve them to avoid excessive use of the global
651 		 * block reserve.
652 		 */
653 		delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items);
654 
655 		/*
656 		 * Do the reservation for the relocation root creation
657 		 */
658 		if (need_reserve_reloc_root(root)) {
659 			num_bytes += fs_info->nodesize;
660 			reloc_reserved = true;
661 		}
662 
663 		ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes,
664 						   &delayed_refs_bytes);
665 		if (ret)
666 			goto reserve_fail;
667 
668 		btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true);
669 
670 		if (trans_rsv->space_info->force_alloc)
671 			do_chunk_alloc = true;
672 	} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
673 		   !btrfs_block_rsv_full(delayed_refs_rsv)) {
674 		/*
675 		 * Some people call with btrfs_start_transaction(root, 0)
676 		 * because they can be throttled, but have some other mechanism
677 		 * for reserving space.  We still want these guys to refill the
678 		 * delayed block_rsv so just add 1 items worth of reservation
679 		 * here.
680 		 */
681 		ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
682 		if (ret == -EAGAIN) {
683 			ASSERT(btrfs_is_zoned(fs_info));
684 			ret = btrfs_commit_current_transaction(root);
685 			if (ret)
686 				goto reserve_fail;
687 			ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
688 		}
689 
690 		if (ret)
691 			goto reserve_fail;
692 	}
693 again:
694 	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
695 	if (!h) {
696 		ret = -ENOMEM;
697 		goto alloc_fail;
698 	}
699 
700 	xa_init(&h->writeback_inhibited_ebs);
701 
702 	/*
703 	 * If we are JOIN_NOLOCK we're already committing a transaction and
704 	 * waiting on this guy, so we don't need to do the sb_start_intwrite
705 	 * because we're already holding a ref.  We need this because we could
706 	 * have raced in and did an fsync() on a file which can kick a commit
707 	 * and then we deadlock with somebody doing a freeze.
708 	 *
709 	 * If we are ATTACH, it means we just want to catch the current
710 	 * transaction and commit it, so we needn't do sb_start_intwrite().
711 	 */
712 	if (type & __TRANS_FREEZABLE)
713 		sb_start_intwrite(fs_info->sb);
714 
715 	if (may_wait_transaction(fs_info, type))
716 		wait_current_trans(fs_info, type);
717 
718 	do {
719 		ret = join_transaction(fs_info, type);
720 		if (ret == -EBUSY) {
721 			wait_current_trans(fs_info, type);
722 			if (unlikely(type == TRANS_ATTACH ||
723 				     type == TRANS_JOIN_NOSTART))
724 				ret = -ENOENT;
725 		}
726 	} while (ret == -EBUSY);
727 
728 	if (ret < 0)
729 		goto join_fail;
730 
731 	cur_trans = fs_info->running_transaction;
732 
733 	h->transid = cur_trans->transid;
734 	h->transaction = cur_trans;
735 	refcount_set(&h->use_count, 1);
736 	h->fs_info = root->fs_info;
737 
738 	h->type = type;
739 	INIT_LIST_HEAD(&h->new_bgs);
740 	btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELREFS);
741 
742 	smp_mb();
743 	if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
744 	    may_wait_transaction(fs_info, type)) {
745 		current->journal_info = h;
746 		btrfs_commit_transaction(h);
747 		goto again;
748 	}
749 
750 	if (num_bytes) {
751 		trace_btrfs_space_reservation(fs_info, "transaction",
752 					      h->transid, num_bytes, 1);
753 		h->block_rsv = trans_rsv;
754 		h->bytes_reserved = num_bytes;
755 		if (delayed_refs_bytes > 0) {
756 			trace_btrfs_space_reservation(fs_info,
757 						      "local_delayed_refs_rsv",
758 						      h->transid,
759 						      delayed_refs_bytes, 1);
760 			h->delayed_refs_bytes_reserved = delayed_refs_bytes;
761 			btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true);
762 			delayed_refs_bytes = 0;
763 		}
764 		h->reloc_reserved = reloc_reserved;
765 	}
766 
767 got_it:
768 	if (!current->journal_info)
769 		current->journal_info = h;
770 
771 	/*
772 	 * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
773 	 * ALLOC_FORCE the first run through, and then we won't allocate for
774 	 * anybody else who races in later.  We don't care about the return
775 	 * value here.
776 	 */
777 	if (do_chunk_alloc && num_bytes) {
778 		struct btrfs_space_info *space_info = h->block_rsv->space_info;
779 		u64 flags = space_info->flags;
780 
781 		btrfs_chunk_alloc(h, space_info, btrfs_get_alloc_profile(fs_info, flags),
782 				  CHUNK_ALLOC_NO_FORCE);
783 	}
784 
785 	/*
786 	 * btrfs_record_root_in_trans() needs to alloc new extents, and may
787 	 * call btrfs_join_transaction() while we're also starting a
788 	 * transaction.
789 	 *
790 	 * Thus it need to be called after current->journal_info initialized,
791 	 * or we can deadlock.
792 	 */
793 	ret = btrfs_record_root_in_trans(h, root);
794 	if (ret) {
795 		/*
796 		 * The transaction handle is fully initialized and linked with
797 		 * other structures so it needs to be ended in case of errors,
798 		 * not just freed.
799 		 */
800 		btrfs_end_transaction(h);
801 		goto reserve_fail;
802 	}
803 	/*
804 	 * Now that we have found a transaction to be a part of, convert the
805 	 * qgroup reservation from prealloc to pertrans. A different transaction
806 	 * can't race in and free our pertrans out from under us.
807 	 */
808 	if (qgroup_reserved)
809 		btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
810 
811 	return h;
812 
813 join_fail:
814 	if (type & __TRANS_FREEZABLE)
815 		sb_end_intwrite(fs_info->sb);
816 	kmem_cache_free(btrfs_trans_handle_cachep, h);
817 alloc_fail:
818 	if (num_bytes)
819 		btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL);
820 	if (delayed_refs_bytes)
821 		btrfs_space_info_free_bytes_may_use(trans_rsv->space_info, delayed_refs_bytes);
822 reserve_fail:
823 	btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
824 	return ERR_PTR(ret);
825 }
826 
btrfs_start_transaction(struct btrfs_root * root,unsigned int num_items)827 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
828 						   unsigned int num_items)
829 {
830 	return start_transaction(root, num_items, TRANS_START,
831 				 BTRFS_RESERVE_FLUSH_ALL, true);
832 }
833 
btrfs_start_transaction_fallback_global_rsv(struct btrfs_root * root,unsigned int num_items)834 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
835 					struct btrfs_root *root,
836 					unsigned int num_items)
837 {
838 	return start_transaction(root, num_items, TRANS_START,
839 				 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
840 }
841 
btrfs_join_transaction(struct btrfs_root * root)842 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
843 {
844 	return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
845 				 true);
846 }
847 
btrfs_join_transaction_spacecache(struct btrfs_root * root)848 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
849 {
850 	return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
851 				 BTRFS_RESERVE_NO_FLUSH, true);
852 }
853 
854 /*
855  * Similar to regular join but it never starts a transaction when none is
856  * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED.
857  * This is similar to btrfs_attach_transaction() but it allows the join to
858  * happen if the transaction commit already started but it's not yet in the
859  * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING).
860  */
btrfs_join_transaction_nostart(struct btrfs_root * root)861 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
862 {
863 	return start_transaction(root, 0, TRANS_JOIN_NOSTART,
864 				 BTRFS_RESERVE_NO_FLUSH, true);
865 }
866 
867 /*
868  * Catch the running transaction.
869  *
870  * It is used when we want to commit the current the transaction, but
871  * don't want to start a new one.
872  *
873  * Note: If this function return -ENOENT, it just means there is no
874  * running transaction. But it is possible that the inactive transaction
875  * is still in the memory, not fully on disk. If you hope there is no
876  * inactive transaction in the fs when -ENOENT is returned, you should
877  * invoke
878  *     btrfs_attach_transaction_barrier()
879  */
btrfs_attach_transaction(struct btrfs_root * root)880 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
881 {
882 	return start_transaction(root, 0, TRANS_ATTACH,
883 				 BTRFS_RESERVE_NO_FLUSH, true);
884 }
885 
886 /*
887  * Catch the running transaction.
888  *
889  * It is similar to the above function, the difference is this one
890  * will wait for all the inactive transactions until they fully
891  * complete.
892  */
893 struct btrfs_trans_handle *
btrfs_attach_transaction_barrier(struct btrfs_root * root)894 btrfs_attach_transaction_barrier(struct btrfs_root *root)
895 {
896 	struct btrfs_trans_handle *trans;
897 
898 	trans = start_transaction(root, 0, TRANS_ATTACH,
899 				  BTRFS_RESERVE_NO_FLUSH, true);
900 	if (trans == ERR_PTR(-ENOENT)) {
901 		int ret;
902 
903 		ret = btrfs_wait_for_commit(root->fs_info, 0);
904 		if (ret)
905 			return ERR_PTR(ret);
906 	}
907 
908 	return trans;
909 }
910 
911 /* Wait for a transaction commit to reach at least the given state. */
wait_for_commit(struct btrfs_transaction * commit,const enum btrfs_trans_state min_state)912 static noinline void wait_for_commit(struct btrfs_transaction *commit,
913 				     const enum btrfs_trans_state min_state)
914 {
915 	struct btrfs_fs_info *fs_info = commit->fs_info;
916 	u64 transid = commit->transid;
917 	bool put = false;
918 
919 	/*
920 	 * At the moment this function is called with min_state either being
921 	 * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED.
922 	 */
923 	if (min_state == TRANS_STATE_COMPLETED)
924 		btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
925 	else
926 		btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
927 
928 	while (1) {
929 		wait_event(commit->commit_wait, commit->state >= min_state);
930 		if (put)
931 			btrfs_put_transaction(commit);
932 
933 		if (min_state < TRANS_STATE_COMPLETED)
934 			break;
935 
936 		/*
937 		 * A transaction isn't really completed until all of the
938 		 * previous transactions are completed, but with fsync we can
939 		 * end up with SUPER_COMMITTED transactions before a COMPLETED
940 		 * transaction. Wait for those.
941 		 */
942 
943 		spin_lock(&fs_info->trans_lock);
944 		commit = list_first_entry_or_null(&fs_info->trans_list,
945 						  struct btrfs_transaction,
946 						  list);
947 		if (!commit || commit->transid > transid) {
948 			spin_unlock(&fs_info->trans_lock);
949 			break;
950 		}
951 		refcount_inc(&commit->use_count);
952 		put = true;
953 		spin_unlock(&fs_info->trans_lock);
954 	}
955 }
956 
btrfs_wait_for_commit(struct btrfs_fs_info * fs_info,u64 transid)957 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
958 {
959 	struct btrfs_transaction *cur_trans = NULL, *t;
960 	int ret = 0;
961 
962 	if (transid) {
963 		if (transid <= btrfs_get_last_trans_committed(fs_info))
964 			return 0;
965 
966 		/* find specified transaction */
967 		spin_lock(&fs_info->trans_lock);
968 		list_for_each_entry(t, &fs_info->trans_list, list) {
969 			if (t->transid == transid) {
970 				cur_trans = t;
971 				refcount_inc(&cur_trans->use_count);
972 				ret = 0;
973 				break;
974 			}
975 			if (t->transid > transid) {
976 				ret = 0;
977 				break;
978 			}
979 		}
980 		spin_unlock(&fs_info->trans_lock);
981 
982 		/*
983 		 * The specified transaction doesn't exist, or we
984 		 * raced with btrfs_commit_transaction
985 		 */
986 		if (!cur_trans) {
987 			if (transid > btrfs_get_last_trans_committed(fs_info))
988 				ret = -EINVAL;
989 			return ret;
990 		}
991 	} else {
992 		/* find newest transaction that is committing | committed */
993 		spin_lock(&fs_info->trans_lock);
994 		list_for_each_entry_reverse(t, &fs_info->trans_list,
995 					    list) {
996 			if (t->state >= TRANS_STATE_COMMIT_START) {
997 				if (t->state == TRANS_STATE_COMPLETED)
998 					break;
999 				cur_trans = t;
1000 				refcount_inc(&cur_trans->use_count);
1001 				break;
1002 			}
1003 		}
1004 		spin_unlock(&fs_info->trans_lock);
1005 		/* Nothing committing or committed. */
1006 		if (!cur_trans)
1007 			return ret;
1008 	}
1009 
1010 	wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
1011 	ret = cur_trans->aborted;
1012 	btrfs_put_transaction(cur_trans);
1013 
1014 	return ret;
1015 }
1016 
btrfs_throttle(struct btrfs_fs_info * fs_info)1017 void btrfs_throttle(struct btrfs_fs_info *fs_info)
1018 {
1019 	wait_current_trans(fs_info, TRANS_START);
1020 }
1021 
btrfs_should_end_transaction(struct btrfs_trans_handle * trans)1022 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
1023 {
1024 	struct btrfs_transaction *cur_trans = trans->transaction;
1025 
1026 	if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
1027 	    test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
1028 		return true;
1029 
1030 	if (btrfs_check_space_for_delayed_refs(trans->fs_info))
1031 		return true;
1032 
1033 	return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50);
1034 }
1035 
btrfs_trans_release_metadata(struct btrfs_trans_handle * trans)1036 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
1037 
1038 {
1039 	struct btrfs_fs_info *fs_info = trans->fs_info;
1040 
1041 	if (!trans->block_rsv) {
1042 		ASSERT(trans->bytes_reserved == 0,
1043 		       "trans->bytes_reserved=%llu", trans->bytes_reserved);
1044 		ASSERT(trans->delayed_refs_bytes_reserved == 0,
1045 		       "trans->delayed_refs_bytes_reserved=%llu",
1046 		       trans->delayed_refs_bytes_reserved);
1047 		return;
1048 	}
1049 
1050 	if (!trans->bytes_reserved) {
1051 		ASSERT(trans->delayed_refs_bytes_reserved == 0,
1052 		       "trans->delayed_refs_bytes_reserved=%llu",
1053 		       trans->delayed_refs_bytes_reserved);
1054 		return;
1055 	}
1056 
1057 	ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
1058 	trace_btrfs_space_reservation(fs_info, "transaction",
1059 				      trans->transid, trans->bytes_reserved, 0);
1060 	btrfs_block_rsv_release(fs_info, trans->block_rsv,
1061 				trans->bytes_reserved, NULL);
1062 	trans->bytes_reserved = 0;
1063 
1064 	if (!trans->delayed_refs_bytes_reserved)
1065 		return;
1066 
1067 	trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv",
1068 				      trans->transid,
1069 				      trans->delayed_refs_bytes_reserved, 0);
1070 	btrfs_block_rsv_release(fs_info, &trans->delayed_rsv,
1071 				trans->delayed_refs_bytes_reserved, NULL);
1072 	trans->delayed_refs_bytes_reserved = 0;
1073 }
1074 
__btrfs_end_transaction(struct btrfs_trans_handle * trans,int throttle)1075 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
1076 				   int throttle)
1077 {
1078 	struct btrfs_fs_info *info = trans->fs_info;
1079 	struct btrfs_transaction *cur_trans = trans->transaction;
1080 	int ret = 0;
1081 
1082 	if (refcount_read(&trans->use_count) > 1) {
1083 		refcount_dec(&trans->use_count);
1084 		trans->block_rsv = trans->orig_rsv;
1085 		return 0;
1086 	}
1087 
1088 	btrfs_trans_release_metadata(trans);
1089 	trans->block_rsv = NULL;
1090 
1091 	btrfs_create_pending_block_groups(trans);
1092 
1093 	btrfs_trans_release_chunk_metadata(trans);
1094 
1095 	if (trans->type & __TRANS_FREEZABLE)
1096 		sb_end_intwrite(info->sb);
1097 
1098 	/*
1099 	 * Uninhibit extent buffer writeback before decrementing num_writers,
1100 	 * since the decrement wakes the committing thread which needs all
1101 	 * buffers uninhibited to write them to disk.
1102 	 */
1103 	btrfs_uninhibit_all_eb_writeback(trans);
1104 
1105 	WARN_ON(cur_trans != info->running_transaction);
1106 	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
1107 	atomic_dec(&cur_trans->num_writers);
1108 	extwriter_counter_dec(cur_trans, trans->type);
1109 
1110 	cond_wake_up(&cur_trans->writer_wait);
1111 
1112 	btrfs_lockdep_release(info, btrfs_trans_num_extwriters);
1113 	btrfs_lockdep_release(info, btrfs_trans_num_writers);
1114 
1115 	btrfs_put_transaction(cur_trans);
1116 
1117 	if (current->journal_info == trans)
1118 		current->journal_info = NULL;
1119 
1120 	if (throttle)
1121 		btrfs_run_delayed_iputs(info);
1122 
1123 	if (unlikely(TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info))) {
1124 		wake_up_process(info->transaction_kthread);
1125 		if (TRANS_ABORTED(trans))
1126 			ret = trans->aborted;
1127 		else
1128 			ret = -EROFS;
1129 	}
1130 
1131 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
1132 	return ret;
1133 }
1134 
btrfs_end_transaction(struct btrfs_trans_handle * trans)1135 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
1136 {
1137 	return __btrfs_end_transaction(trans, 0);
1138 }
1139 
btrfs_end_transaction_throttle(struct btrfs_trans_handle * trans)1140 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
1141 {
1142 	return __btrfs_end_transaction(trans, 1);
1143 }
1144 
1145 /*
1146  * when btree blocks are allocated, they have some corresponding bits set for
1147  * them in one of two extent_io trees.  This is used to make sure all of
1148  * those extents are sent to disk but does not wait on them
1149  */
btrfs_write_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)1150 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
1151 			       struct extent_io_tree *dirty_pages, int mark)
1152 {
1153 	int ret = 0;
1154 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
1155 	struct extent_state *cached_state = NULL;
1156 	u64 start = 0;
1157 	u64 end;
1158 
1159 	while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
1160 					   mark, &cached_state)) {
1161 		bool wait_writeback = false;
1162 
1163 		ret = btrfs_convert_extent_bit(dirty_pages, start, end,
1164 					       EXTENT_NEED_WAIT,
1165 					       mark, &cached_state);
1166 		/*
1167 		 * convert_extent_bit can return -ENOMEM, which is most of the
1168 		 * time a temporary error. So when it happens, ignore the error
1169 		 * and wait for writeback of this range to finish - because we
1170 		 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
1171 		 * to __btrfs_wait_marked_extents() would not know that
1172 		 * writeback for this range started and therefore wouldn't
1173 		 * wait for it to finish - we don't want to commit a
1174 		 * superblock that points to btree nodes/leafs for which
1175 		 * writeback hasn't finished yet (and without errors).
1176 		 * We cleanup any entries left in the io tree when committing
1177 		 * the transaction (through extent_io_tree_release()).
1178 		 */
1179 		if (ret == -ENOMEM) {
1180 			ret = 0;
1181 			wait_writeback = true;
1182 		}
1183 		if (!ret)
1184 			ret = filemap_fdatawrite_range(mapping, start, end);
1185 		if (!ret && wait_writeback)
1186 			btrfs_btree_wait_writeback_range(fs_info, start, end);
1187 		btrfs_free_extent_state(cached_state);
1188 		if (ret)
1189 			break;
1190 		cached_state = NULL;
1191 		cond_resched();
1192 		start = end + 1;
1193 	}
1194 	return ret;
1195 }
1196 
1197 /*
1198  * when btree blocks are allocated, they have some corresponding bits set for
1199  * them in one of two extent_io trees.  This is used to make sure all of
1200  * those extents are on disk for transaction or log commit.  We wait
1201  * on all the pages and clear them from the dirty pages state tree
1202  */
__btrfs_wait_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)1203 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1204 				       struct extent_io_tree *dirty_pages)
1205 {
1206 	struct extent_state *cached_state = NULL;
1207 	u64 start = 0;
1208 	u64 end;
1209 	int ret = 0;
1210 
1211 	while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
1212 					   EXTENT_NEED_WAIT, &cached_state)) {
1213 		/*
1214 		 * Ignore -ENOMEM errors returned by clear_extent_bit().
1215 		 * When committing the transaction, we'll remove any entries
1216 		 * left in the io tree. For a log commit, we don't remove them
1217 		 * after committing the log because the tree can be accessed
1218 		 * concurrently - we do it only at transaction commit time when
1219 		 * it's safe to do it (through extent_io_tree_release()).
1220 		 */
1221 		ret = btrfs_clear_extent_bit(dirty_pages, start, end,
1222 					     EXTENT_NEED_WAIT, &cached_state);
1223 		if (ret == -ENOMEM)
1224 			ret = 0;
1225 		if (!ret)
1226 			btrfs_btree_wait_writeback_range(fs_info, start, end);
1227 		btrfs_free_extent_state(cached_state);
1228 		if (ret)
1229 			break;
1230 		cached_state = NULL;
1231 		cond_resched();
1232 		start = end + 1;
1233 	}
1234 	return ret;
1235 }
1236 
btrfs_wait_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages)1237 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1238 		       struct extent_io_tree *dirty_pages)
1239 {
1240 	bool errors = false;
1241 	int ret;
1242 
1243 	ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1244 	if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1245 		errors = true;
1246 
1247 	if (errors && !ret)
1248 		ret = -EIO;
1249 	return ret;
1250 }
1251 
btrfs_wait_tree_log_extents(struct btrfs_root * log_root,int mark)1252 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1253 {
1254 	struct btrfs_fs_info *fs_info = log_root->fs_info;
1255 	struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1256 	bool errors = false;
1257 	int ret;
1258 
1259 	ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID,
1260 	       "root_id(log_root)=%llu", btrfs_root_id(log_root));
1261 
1262 	ret = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1263 	if ((mark & EXTENT_DIRTY_LOG1) &&
1264 	    test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1265 		errors = true;
1266 
1267 	if ((mark & EXTENT_DIRTY_LOG2) &&
1268 	    test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1269 		errors = true;
1270 
1271 	if (errors && !ret)
1272 		ret = -EIO;
1273 	return ret;
1274 }
1275 
1276 /*
1277  * When btree blocks are allocated the corresponding extents are marked dirty.
1278  * This function ensures such extents are persisted on disk for transaction or
1279  * log commit.
1280  *
1281  * @trans: transaction whose dirty pages we'd like to write
1282  */
btrfs_write_and_wait_transaction(struct btrfs_trans_handle * trans)1283 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1284 {
1285 	int ret;
1286 	int ret2;
1287 	struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1288 	struct btrfs_fs_info *fs_info = trans->fs_info;
1289 	struct blk_plug plug;
1290 
1291 	blk_start_plug(&plug);
1292 	ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1293 	blk_finish_plug(&plug);
1294 	ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1295 
1296 	if (ret)
1297 		return ret;
1298 	if (ret2)
1299 		return ret2;
1300 
1301 	btrfs_extent_io_tree_release(&trans->transaction->dirty_pages);
1302 	return 0;
1303 }
1304 
1305 /*
1306  * this is used to update the root pointer in the tree of tree roots.
1307  *
1308  * But, in the case of the extent allocation tree, updating the root
1309  * pointer may allocate blocks which may change the root of the extent
1310  * allocation tree.
1311  *
1312  * So, this loops and repeats and makes sure the cowonly root didn't
1313  * change while the root pointer was being updated in the metadata.
1314  */
update_cowonly_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)1315 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1316 			       struct btrfs_root *root)
1317 {
1318 	int ret;
1319 	u64 old_root_bytenr;
1320 	u64 old_root_used;
1321 	struct btrfs_fs_info *fs_info = root->fs_info;
1322 	struct btrfs_root *tree_root = fs_info->tree_root;
1323 
1324 	old_root_used = btrfs_root_used(&root->root_item);
1325 
1326 	while (1) {
1327 		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1328 		if (old_root_bytenr == root->node->start &&
1329 		    old_root_used == btrfs_root_used(&root->root_item))
1330 			break;
1331 
1332 		btrfs_set_root_node(&root->root_item, root->node);
1333 		ret = btrfs_update_root(trans, tree_root,
1334 					&root->root_key,
1335 					&root->root_item);
1336 		if (ret)
1337 			return ret;
1338 
1339 		old_root_used = btrfs_root_used(&root->root_item);
1340 	}
1341 
1342 	return 0;
1343 }
1344 
1345 /*
1346  * update all the cowonly tree roots on disk
1347  *
1348  * The error handling in this function may not be obvious. Any of the
1349  * failures will cause the file system to go offline. We still need
1350  * to clean up the delayed refs.
1351  */
commit_cowonly_roots(struct btrfs_trans_handle * trans)1352 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1353 {
1354 	struct btrfs_fs_info *fs_info = trans->fs_info;
1355 	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1356 	struct list_head *io_bgs = &trans->transaction->io_bgs;
1357 	struct extent_buffer *eb;
1358 	int ret;
1359 
1360 	/*
1361 	 * At this point no one can be using this transaction to modify any tree
1362 	 * and no one can start another transaction to modify any tree either.
1363 	 */
1364 	ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
1365 	       "trans->transaction->state=%d", trans->transaction->state);
1366 
1367 	eb = btrfs_lock_root_node(fs_info->tree_root);
1368 	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1369 			      0, &eb, BTRFS_NESTING_COW);
1370 	btrfs_tree_unlock(eb);
1371 	free_extent_buffer(eb);
1372 
1373 	if (ret)
1374 		return ret;
1375 
1376 	ret = btrfs_run_dev_stats(trans);
1377 	if (ret)
1378 		return ret;
1379 	ret = btrfs_run_dev_replace(trans);
1380 	if (ret)
1381 		return ret;
1382 	ret = btrfs_run_qgroups(trans);
1383 	if (ret)
1384 		return ret;
1385 
1386 	ret = btrfs_setup_space_cache(trans);
1387 	if (ret)
1388 		return ret;
1389 
1390 again:
1391 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1392 		struct btrfs_root *root;
1393 
1394 		root = list_first_entry(&fs_info->dirty_cowonly_roots,
1395 					struct btrfs_root, dirty_list);
1396 		clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1397 		list_move_tail(&root->dirty_list,
1398 			       &trans->transaction->switch_commits);
1399 
1400 		ret = update_cowonly_root(trans, root);
1401 		if (ret)
1402 			return ret;
1403 	}
1404 
1405 	/* Now flush any delayed refs generated by updating all of the roots */
1406 	ret = btrfs_run_delayed_refs(trans, U64_MAX);
1407 	if (ret)
1408 		return ret;
1409 
1410 	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1411 		ret = btrfs_write_dirty_block_groups(trans);
1412 		if (ret)
1413 			return ret;
1414 
1415 		/*
1416 		 * We're writing the dirty block groups, which could generate
1417 		 * delayed refs, which could generate more dirty block groups,
1418 		 * so we want to keep this flushing in this loop to make sure
1419 		 * everything gets run.
1420 		 */
1421 		ret = btrfs_run_delayed_refs(trans, U64_MAX);
1422 		if (ret)
1423 			return ret;
1424 	}
1425 
1426 	if (!list_empty(&fs_info->dirty_cowonly_roots))
1427 		goto again;
1428 
1429 	/* Update dev-replace pointer once everything is committed */
1430 	fs_info->dev_replace.committed_cursor_left =
1431 		fs_info->dev_replace.cursor_left_last_write_of_item;
1432 
1433 	return 0;
1434 }
1435 
1436 /*
1437  * If we had a pending drop we need to see if there are any others left in our
1438  * dead roots list, and if not clear our bit and wake any waiters.
1439  */
btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info * fs_info)1440 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1441 {
1442 	/*
1443 	 * We put the drop in progress roots at the front of the list, so if the
1444 	 * first entry doesn't have UNFINISHED_DROP set we can wake everybody
1445 	 * up.
1446 	 */
1447 	spin_lock(&fs_info->trans_lock);
1448 	if (!list_empty(&fs_info->dead_roots)) {
1449 		struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
1450 							   struct btrfs_root,
1451 							   root_list);
1452 		if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
1453 			spin_unlock(&fs_info->trans_lock);
1454 			return;
1455 		}
1456 	}
1457 	spin_unlock(&fs_info->trans_lock);
1458 
1459 	btrfs_wake_unfinished_drop(fs_info);
1460 }
1461 
1462 /*
1463  * dead roots are old snapshots that need to be deleted.  This allocates
1464  * a dirty root struct and adds it into the list of dead roots that need to
1465  * be deleted
1466  */
btrfs_add_dead_root(struct btrfs_root * root)1467 void btrfs_add_dead_root(struct btrfs_root *root)
1468 {
1469 	struct btrfs_fs_info *fs_info = root->fs_info;
1470 
1471 	spin_lock(&fs_info->trans_lock);
1472 	if (list_empty(&root->root_list)) {
1473 		btrfs_grab_root(root);
1474 
1475 		/* We want to process the partially complete drops first. */
1476 		if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
1477 			list_add(&root->root_list, &fs_info->dead_roots);
1478 		else
1479 			list_add_tail(&root->root_list, &fs_info->dead_roots);
1480 	}
1481 	spin_unlock(&fs_info->trans_lock);
1482 }
1483 
1484 /*
1485  * Update each subvolume root and its relocation root, if it exists, in the tree
1486  * of tree roots. Also free log roots if they exist.
1487  */
commit_fs_roots(struct btrfs_trans_handle * trans)1488 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1489 {
1490 	struct btrfs_fs_info *fs_info = trans->fs_info;
1491 	struct btrfs_root *gang[8];
1492 	int i;
1493 	int ret;
1494 
1495 	/*
1496 	 * At this point no one can be using this transaction to modify any tree
1497 	 * and no one can start another transaction to modify any tree either.
1498 	 */
1499 	ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING,
1500 	       "trans->transaction->state=%d", trans->transaction->state);
1501 
1502 	spin_lock(&fs_info->fs_roots_radix_lock);
1503 	while (1) {
1504 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1505 						 (void **)gang, 0,
1506 						 ARRAY_SIZE(gang),
1507 						 BTRFS_ROOT_TRANS_TAG);
1508 		if (ret == 0)
1509 			break;
1510 		for (i = 0; i < ret; i++) {
1511 			struct btrfs_root *root = gang[i];
1512 			int ret2;
1513 
1514 			/*
1515 			 * At this point we can neither have tasks logging inodes
1516 			 * from a root nor trying to commit a log tree.
1517 			 */
1518 			ASSERT(atomic_read(&root->log_writers) == 0,
1519 			       "atomic_read(&root->log_writers)=%d",
1520 			       atomic_read(&root->log_writers));
1521 			ASSERT(atomic_read(&root->log_commit[0]) == 0,
1522 			       "atomic_read(&root->log_commit[0])=%d",
1523 			       atomic_read(&root->log_commit[0]));
1524 			ASSERT(atomic_read(&root->log_commit[1]) == 0,
1525 			       "atomic_read(&root->log_commit[1])=%d",
1526 			       atomic_read(&root->log_commit[1]));
1527 
1528 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
1529 					(unsigned long)btrfs_root_id(root),
1530 					BTRFS_ROOT_TRANS_TAG);
1531 			btrfs_qgroup_free_meta_all_pertrans(root);
1532 			spin_unlock(&fs_info->fs_roots_radix_lock);
1533 
1534 			btrfs_free_log(trans, root);
1535 			ret2 = btrfs_update_reloc_root(trans, root);
1536 			if (unlikely(ret2))
1537 				return ret2;
1538 
1539 			/* see comments in should_cow_block() */
1540 			clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1541 			smp_mb__after_atomic();
1542 
1543 			if (root->commit_root != root->node) {
1544 				list_add_tail(&root->dirty_list,
1545 					&trans->transaction->switch_commits);
1546 				btrfs_set_root_node(&root->root_item,
1547 						    root->node);
1548 			}
1549 
1550 			ret2 = btrfs_update_root(trans, fs_info->tree_root,
1551 						&root->root_key,
1552 						&root->root_item);
1553 			if (unlikely(ret2))
1554 				return ret2;
1555 			spin_lock(&fs_info->fs_roots_radix_lock);
1556 		}
1557 	}
1558 	spin_unlock(&fs_info->fs_roots_radix_lock);
1559 	return 0;
1560 }
1561 
1562 /*
1563  * Do all special snapshot related qgroup dirty hack.
1564  *
1565  * Will do all needed qgroup inherit and dirty hack like switch commit
1566  * roots inside one transaction and write all btree into disk, to make
1567  * qgroup works.
1568  */
qgroup_account_snapshot(struct btrfs_trans_handle * trans,struct btrfs_root * src,struct btrfs_root * parent,struct btrfs_qgroup_inherit * inherit,u64 dst_objectid)1569 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1570 				   struct btrfs_root *src,
1571 				   struct btrfs_root *parent,
1572 				   struct btrfs_qgroup_inherit *inherit,
1573 				   u64 dst_objectid)
1574 {
1575 	struct btrfs_fs_info *fs_info = src->fs_info;
1576 	int ret;
1577 
1578 	/*
1579 	 * Save some performance in the case that qgroups are not enabled. If
1580 	 * this check races with the ioctl, rescan will kick in anyway.
1581 	 */
1582 	if (!btrfs_qgroup_full_accounting(fs_info))
1583 		return 0;
1584 
1585 	/*
1586 	 * Ensure dirty @src will be committed.  Or, after coming
1587 	 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1588 	 * recorded root will never be updated again, causing an outdated root
1589 	 * item.
1590 	 */
1591 	ret = record_root_in_trans(trans, src, true);
1592 	if (ret)
1593 		return ret;
1594 
1595 	/*
1596 	 * btrfs_qgroup_inherit relies on a consistent view of the usage for the
1597 	 * src root, so we must run the delayed refs here.
1598 	 *
1599 	 * However this isn't particularly fool proof, because there's no
1600 	 * synchronization keeping us from changing the tree after this point
1601 	 * before we do the qgroup_inherit, or even from making changes while
1602 	 * we're doing the qgroup_inherit.  But that's a problem for the future,
1603 	 * for now flush the delayed refs to narrow the race window where the
1604 	 * qgroup counters could end up wrong.
1605 	 */
1606 	ret = btrfs_run_delayed_refs(trans, U64_MAX);
1607 	if (unlikely(ret)) {
1608 		btrfs_abort_transaction(trans, ret);
1609 		return ret;
1610 	}
1611 
1612 	ret = commit_fs_roots(trans);
1613 	if (ret)
1614 		return ret;
1615 	ret = btrfs_qgroup_account_extents(trans);
1616 	if (ret < 0)
1617 		return ret;
1618 
1619 	/* Now qgroup are all updated, we can inherit it to new qgroups */
1620 	ret = btrfs_qgroup_inherit(trans, btrfs_root_id(src), dst_objectid,
1621 				   btrfs_root_id(parent), inherit);
1622 	if (ret < 0)
1623 		return ret;
1624 
1625 	/*
1626 	 * Now we do a simplified commit transaction, which will:
1627 	 * 1) commit all subvolume and extent tree
1628 	 *    To ensure all subvolume and extent tree have a valid
1629 	 *    commit_root to accounting later insert_dir_item()
1630 	 * 2) write all btree blocks onto disk
1631 	 *    This is to make sure later btree modification will be cowed
1632 	 *    Or commit_root can be populated and cause wrong qgroup numbers
1633 	 * In this simplified commit, we don't really care about other trees
1634 	 * like chunk and root tree, as they won't affect qgroup.
1635 	 * And we don't write super to avoid half committed status.
1636 	 */
1637 	ret = commit_cowonly_roots(trans);
1638 	if (ret)
1639 		return ret;
1640 	switch_commit_roots(trans);
1641 	ret = btrfs_write_and_wait_transaction(trans);
1642 	if (unlikely(ret)) {
1643 		btrfs_err(fs_info,
1644 "error while writing out transaction during qgroup snapshot accounting: %d", ret);
1645 		return ret;
1646 	}
1647 
1648 	/*
1649 	 * Force parent root to be updated, as we recorded it before so its
1650 	 * last_trans == cur_transid.
1651 	 * Or it won't be committed again onto disk after later
1652 	 * insert_dir_item()
1653 	 */
1654 	return record_root_in_trans(trans, parent, true);
1655 }
1656 
1657 /*
1658  * new snapshots need to be created at a very specific time in the
1659  * transaction commit.  This does the actual creation.
1660  *
1661  * Note:
1662  * If the error which may affect the commitment of the current transaction
1663  * happens, we should return the error number. If the error which just affect
1664  * the creation of the pending snapshots, just return 0.
1665  */
create_pending_snapshot(struct btrfs_trans_handle * trans,struct btrfs_pending_snapshot * pending)1666 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1667 				   struct btrfs_pending_snapshot *pending)
1668 {
1669 
1670 	struct btrfs_fs_info *fs_info = trans->fs_info;
1671 	struct btrfs_key key;
1672 	struct btrfs_root_item *new_root_item;
1673 	struct btrfs_root *tree_root = fs_info->tree_root;
1674 	struct btrfs_root *root = pending->root;
1675 	struct btrfs_root *parent_root;
1676 	struct btrfs_block_rsv *rsv;
1677 	struct btrfs_inode *parent_inode = pending->dir;
1678 	BTRFS_PATH_AUTO_FREE(path);
1679 	struct btrfs_dir_item *dir_item;
1680 	struct extent_buffer *tmp;
1681 	struct extent_buffer *root_eb;
1682 	struct timespec64 cur_time;
1683 	int ret = 0;
1684 	u64 to_reserve = 0;
1685 	u64 index = 0;
1686 	u64 objectid;
1687 	u64 root_flags;
1688 	unsigned int nofs_flags;
1689 	struct fscrypt_name fname;
1690 
1691 	ASSERT(pending->path);
1692 	path = pending->path;
1693 
1694 	ASSERT(pending->root_item);
1695 	new_root_item = pending->root_item;
1696 
1697 	/*
1698 	 * We're inside a transaction and must make sure that any potential
1699 	 * allocations with GFP_KERNEL in fscrypt won't recurse back to
1700 	 * filesystem.
1701 	 */
1702 	nofs_flags = memalloc_nofs_save();
1703 	pending->error = fscrypt_setup_filename(&parent_inode->vfs_inode,
1704 						&pending->dentry->d_name, 0,
1705 						&fname);
1706 	memalloc_nofs_restore(nofs_flags);
1707 	if (unlikely(pending->error))
1708 		goto free_pending;
1709 
1710 	pending->error = btrfs_get_free_objectid(tree_root, &objectid);
1711 	if (unlikely(pending->error))
1712 		goto free_fname;
1713 
1714 	/*
1715 	 * Make qgroup to skip current new snapshot's qgroupid, as it is
1716 	 * accounted by later btrfs_qgroup_inherit().
1717 	 */
1718 	btrfs_set_skip_qgroup(trans, objectid);
1719 
1720 	btrfs_reloc_pre_snapshot(pending, &to_reserve);
1721 
1722 	if (to_reserve > 0) {
1723 		pending->error = btrfs_block_rsv_add(fs_info,
1724 						     &pending->block_rsv,
1725 						     to_reserve,
1726 						     BTRFS_RESERVE_NO_FLUSH);
1727 		if (unlikely(pending->error))
1728 			goto clear_skip_qgroup;
1729 	}
1730 
1731 	rsv = trans->block_rsv;
1732 	trans->block_rsv = &pending->block_rsv;
1733 	trans->bytes_reserved = trans->block_rsv->reserved;
1734 	trace_btrfs_space_reservation(fs_info, "transaction",
1735 				      trans->transid,
1736 				      trans->bytes_reserved, 1);
1737 	parent_root = parent_inode->root;
1738 	ret = record_root_in_trans(trans, parent_root, false);
1739 	if (unlikely(ret))
1740 		goto fail;
1741 	cur_time = current_time(&parent_inode->vfs_inode);
1742 
1743 	/*
1744 	 * insert the directory item
1745 	 */
1746 	ret = btrfs_set_inode_index(parent_inode, &index);
1747 	if (unlikely(ret)) {
1748 		btrfs_abort_transaction(trans, ret);
1749 		goto fail;
1750 	}
1751 
1752 	/* check if there is a file/dir which has the same name. */
1753 	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1754 					 btrfs_ino(parent_inode),
1755 					 &fname.disk_name, 0);
1756 	if (!IS_ERR_OR_NULL(dir_item)) {
1757 		pending->error = -EEXIST;
1758 		goto dir_item_existed;
1759 	} else if (IS_ERR(dir_item)) {
1760 		ret = PTR_ERR(dir_item);
1761 		btrfs_abort_transaction(trans, ret);
1762 		goto fail;
1763 	}
1764 	btrfs_release_path(path);
1765 
1766 	ret = btrfs_create_qgroup(trans, objectid);
1767 	if (ret && ret != -EEXIST) {
1768 		if (unlikely(ret != -ENOTCONN || btrfs_qgroup_enabled(fs_info))) {
1769 			btrfs_abort_transaction(trans, ret);
1770 			goto fail;
1771 		}
1772 	}
1773 
1774 	/*
1775 	 * pull in the delayed directory update
1776 	 * and the delayed inode item
1777 	 * otherwise we corrupt the FS during
1778 	 * snapshot
1779 	 */
1780 	ret = btrfs_run_delayed_items(trans);
1781 	if (unlikely(ret)) {
1782 		btrfs_abort_transaction(trans, ret);
1783 		goto fail;
1784 	}
1785 
1786 	ret = record_root_in_trans(trans, root, false);
1787 	if (unlikely(ret)) {
1788 		btrfs_abort_transaction(trans, ret);
1789 		goto fail;
1790 	}
1791 	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1792 	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1793 	btrfs_check_and_init_root_item(new_root_item);
1794 
1795 	root_flags = btrfs_root_flags(new_root_item);
1796 	if (pending->readonly)
1797 		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1798 	else
1799 		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1800 	btrfs_set_root_flags(new_root_item, root_flags);
1801 
1802 	btrfs_set_root_generation_v2(new_root_item,
1803 			trans->transid);
1804 	generate_random_guid(new_root_item->uuid);
1805 	memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1806 			BTRFS_UUID_SIZE);
1807 	if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1808 		memset(new_root_item->received_uuid, 0,
1809 		       sizeof(new_root_item->received_uuid));
1810 		memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1811 		memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1812 		btrfs_set_root_stransid(new_root_item, 0);
1813 		btrfs_set_root_rtransid(new_root_item, 0);
1814 	}
1815 	btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1816 	btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1817 	btrfs_set_root_otransid(new_root_item, trans->transid);
1818 
1819 	root_eb = btrfs_lock_root_node(root);
1820 	ret = btrfs_copy_root(trans, root, root_eb, &tmp, objectid);
1821 	btrfs_tree_unlock(root_eb);
1822 	free_extent_buffer(root_eb);
1823 	if (unlikely(ret)) {
1824 		btrfs_abort_transaction(trans, ret);
1825 		goto fail;
1826 	}
1827 	/* see comments in should_cow_block() */
1828 	set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1829 	smp_mb__after_atomic();
1830 
1831 	btrfs_set_root_node(new_root_item, tmp);
1832 	/* record when the snapshot was created in key.offset */
1833 	key.objectid = objectid;
1834 	key.type = BTRFS_ROOT_ITEM_KEY;
1835 	key.offset = trans->transid;
1836 	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1837 	btrfs_tree_unlock(tmp);
1838 	free_extent_buffer(tmp);
1839 	if (unlikely(ret)) {
1840 		btrfs_abort_transaction(trans, ret);
1841 		goto fail;
1842 	}
1843 
1844 	/*
1845 	 * insert root back/forward references
1846 	 */
1847 	ret = btrfs_add_root_ref(trans, objectid,
1848 				 btrfs_root_id(parent_root),
1849 				 btrfs_ino(parent_inode), index,
1850 				 &fname.disk_name);
1851 	if (unlikely(ret)) {
1852 		btrfs_abort_transaction(trans, ret);
1853 		goto fail;
1854 	}
1855 
1856 	key.offset = (u64)-1;
1857 	pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
1858 	if (IS_ERR(pending->snap)) {
1859 		ret = PTR_ERR(pending->snap);
1860 		pending->snap = NULL;
1861 		btrfs_abort_transaction(trans, ret);
1862 		goto fail;
1863 	}
1864 
1865 	ret = btrfs_reloc_post_snapshot(trans, pending);
1866 	if (unlikely(ret)) {
1867 		btrfs_abort_transaction(trans, ret);
1868 		goto fail;
1869 	}
1870 
1871 	/*
1872 	 * Do special qgroup accounting for snapshot, as we do some qgroup
1873 	 * snapshot hack to do fast snapshot.
1874 	 * To co-operate with that hack, we do hack again.
1875 	 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1876 	 */
1877 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL)
1878 		ret = qgroup_account_snapshot(trans, root, parent_root,
1879 					      pending->inherit, objectid);
1880 	else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1881 		ret = btrfs_qgroup_inherit(trans, btrfs_root_id(root), objectid,
1882 					   btrfs_root_id(parent_root), pending->inherit);
1883 	if (unlikely(ret < 0))
1884 		goto fail;
1885 
1886 	ret = btrfs_insert_dir_item(trans, &fname.disk_name,
1887 				    parent_inode, &key, BTRFS_FT_DIR,
1888 				    index);
1889 	if (unlikely(ret)) {
1890 		btrfs_abort_transaction(trans, ret);
1891 		goto fail;
1892 	}
1893 
1894 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
1895 						  fname.disk_name.len * 2);
1896 	inode_set_mtime_to_ts(&parent_inode->vfs_inode,
1897 			      inode_set_ctime_current(&parent_inode->vfs_inode));
1898 	ret = btrfs_update_inode_fallback(trans, parent_inode);
1899 	if (unlikely(ret)) {
1900 		btrfs_abort_transaction(trans, ret);
1901 		goto fail;
1902 	}
1903 	ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1904 				  BTRFS_UUID_KEY_SUBVOL,
1905 				  objectid);
1906 	if (unlikely(ret)) {
1907 		btrfs_abort_transaction(trans, ret);
1908 		goto fail;
1909 	}
1910 	if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1911 		ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1912 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1913 					  objectid);
1914 		/*
1915 		 * We are creating of lot of snapshots of the same root that was
1916 		 * received (has a received UUID) and reached a leaf's limit for
1917 		 * an item. We can safely ignore this and avoid a transaction
1918 		 * abort. A deletion of this snapshot will still work since we
1919 		 * ignore if an item with a BTRFS_UUID_KEY_RECEIVED_SUBVOL key
1920 		 * is missing (see btrfs_delete_subvolume()). Send/receive will
1921 		 * work too since it peeks the first root id from the existing
1922 		 * item (it could peek any), and in case it's missing it
1923 		 * falls back to search by BTRFS_UUID_KEY_SUBVOL keys.
1924 		 * Creation of a snapshot does not require CAP_SYS_ADMIN, so
1925 		 * we don't want users triggering transaction aborts, either
1926 		 * intentionally or not.
1927 		 */
1928 		if (ret == -EOVERFLOW)
1929 			ret = 0;
1930 		if (unlikely(ret)) {
1931 			btrfs_abort_transaction(trans, ret);
1932 			goto fail;
1933 		}
1934 	}
1935 
1936 fail:
1937 	pending->error = ret;
1938 dir_item_existed:
1939 	trans->block_rsv = rsv;
1940 	trans->bytes_reserved = 0;
1941 clear_skip_qgroup:
1942 	btrfs_clear_skip_qgroup(trans);
1943 free_fname:
1944 	fscrypt_free_filename(&fname);
1945 free_pending:
1946 	kfree(new_root_item);
1947 	pending->root_item = NULL;
1948 	pending->path = NULL;
1949 
1950 	return ret;
1951 }
1952 
1953 /*
1954  * create all the snapshots we've scheduled for creation
1955  */
create_pending_snapshots(struct btrfs_trans_handle * trans)1956 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1957 {
1958 	struct btrfs_pending_snapshot *pending, *next;
1959 	struct list_head *head = &trans->transaction->pending_snapshots;
1960 	int ret = 0;
1961 
1962 	list_for_each_entry_safe(pending, next, head, list) {
1963 		list_del(&pending->list);
1964 		ret = create_pending_snapshot(trans, pending);
1965 		if (unlikely(ret))
1966 			break;
1967 	}
1968 	return ret;
1969 }
1970 
update_super_roots(struct btrfs_fs_info * fs_info)1971 static void update_super_roots(struct btrfs_fs_info *fs_info)
1972 {
1973 	struct btrfs_root_item *root_item;
1974 	struct btrfs_super_block *super;
1975 
1976 	super = fs_info->super_copy;
1977 
1978 	root_item = &fs_info->chunk_root->root_item;
1979 	super->chunk_root = root_item->bytenr;
1980 	super->chunk_root_generation = root_item->generation;
1981 	super->chunk_root_level = root_item->level;
1982 
1983 	root_item = &fs_info->tree_root->root_item;
1984 	super->root = root_item->bytenr;
1985 	super->generation = root_item->generation;
1986 	super->root_level = root_item->level;
1987 	if (btrfs_test_opt(fs_info, SPACE_CACHE))
1988 		super->cache_generation = root_item->generation;
1989 	else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
1990 		super->cache_generation = 0;
1991 	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1992 		super->uuid_tree_generation = root_item->generation;
1993 
1994 	if (btrfs_fs_incompat(fs_info, REMAP_TREE)) {
1995 		root_item = &fs_info->remap_root->root_item;
1996 		super->remap_root = root_item->bytenr;
1997 		super->remap_root_generation = root_item->generation;
1998 		super->remap_root_level = root_item->level;
1999 	}
2000 }
2001 
btrfs_transaction_blocked(struct btrfs_fs_info * info)2002 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
2003 {
2004 	struct btrfs_transaction *trans;
2005 	int ret = 0;
2006 
2007 	spin_lock(&info->trans_lock);
2008 	trans = info->running_transaction;
2009 	if (trans)
2010 		ret = is_transaction_blocked(trans);
2011 	spin_unlock(&info->trans_lock);
2012 	return ret;
2013 }
2014 
btrfs_commit_transaction_async(struct btrfs_trans_handle * trans)2015 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
2016 {
2017 	struct btrfs_fs_info *fs_info = trans->fs_info;
2018 	struct btrfs_transaction *cur_trans;
2019 
2020 	/* Kick the transaction kthread. */
2021 	set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
2022 	wake_up_process(fs_info->transaction_kthread);
2023 
2024 	/* take transaction reference */
2025 	cur_trans = trans->transaction;
2026 	refcount_inc(&cur_trans->use_count);
2027 
2028 	btrfs_end_transaction(trans);
2029 
2030 	/*
2031 	 * Wait for the current transaction commit to start and block
2032 	 * subsequent transaction joins
2033 	 */
2034 	btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2035 	wait_event(fs_info->transaction_blocked_wait,
2036 		   cur_trans->state >= TRANS_STATE_COMMIT_START ||
2037 		   TRANS_ABORTED(cur_trans));
2038 	btrfs_put_transaction(cur_trans);
2039 }
2040 
2041 /*
2042  * If there is a running transaction commit it or if it's already committing,
2043  * wait for its commit to complete. Does not start and commit a new transaction
2044  * if there isn't any running.
2045  */
btrfs_commit_current_transaction(struct btrfs_root * root)2046 int btrfs_commit_current_transaction(struct btrfs_root *root)
2047 {
2048 	struct btrfs_trans_handle *trans;
2049 
2050 	trans = btrfs_attach_transaction_barrier(root);
2051 	if (IS_ERR(trans)) {
2052 		int ret = PTR_ERR(trans);
2053 
2054 		return (ret == -ENOENT) ? 0 : ret;
2055 	}
2056 
2057 	return btrfs_commit_transaction(trans);
2058 }
2059 
cleanup_transaction(struct btrfs_trans_handle * trans,int err)2060 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
2061 {
2062 	struct btrfs_fs_info *fs_info = trans->fs_info;
2063 	struct btrfs_transaction *cur_trans = trans->transaction;
2064 
2065 	WARN_ON(refcount_read(&trans->use_count) > 1);
2066 
2067 	btrfs_abort_transaction(trans, err);
2068 
2069 	spin_lock(&fs_info->trans_lock);
2070 
2071 	/*
2072 	 * If the transaction is removed from the list, it means this
2073 	 * transaction has been committed successfully, so it is impossible
2074 	 * to call the cleanup function.
2075 	 */
2076 	BUG_ON(list_empty(&cur_trans->list));
2077 
2078 	if (cur_trans == fs_info->running_transaction) {
2079 		cur_trans->state = TRANS_STATE_COMMIT_DOING;
2080 		spin_unlock(&fs_info->trans_lock);
2081 
2082 		/*
2083 		 * The thread has already released the lockdep map as reader
2084 		 * already in btrfs_commit_transaction().
2085 		 */
2086 		btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2087 		wait_event(cur_trans->writer_wait,
2088 			   atomic_read(&cur_trans->num_writers) == 1);
2089 
2090 		spin_lock(&fs_info->trans_lock);
2091 	}
2092 
2093 	/*
2094 	 * Now that we know no one else is still using the transaction we can
2095 	 * remove the transaction from the list of transactions. This avoids
2096 	 * the transaction kthread from cleaning up the transaction while some
2097 	 * other task is still using it, which could result in a use-after-free
2098 	 * on things like log trees, as it forces the transaction kthread to
2099 	 * wait for this transaction to be cleaned up by us.
2100 	 */
2101 	list_del_init(&cur_trans->list);
2102 
2103 	spin_unlock(&fs_info->trans_lock);
2104 
2105 	btrfs_cleanup_one_transaction(trans->transaction);
2106 
2107 	spin_lock(&fs_info->trans_lock);
2108 	if (cur_trans == fs_info->running_transaction)
2109 		fs_info->running_transaction = NULL;
2110 	spin_unlock(&fs_info->trans_lock);
2111 
2112 	if (trans->type & __TRANS_FREEZABLE)
2113 		sb_end_intwrite(fs_info->sb);
2114 	btrfs_put_transaction(cur_trans);
2115 	btrfs_put_transaction(cur_trans);
2116 
2117 	trace_btrfs_transaction_commit(fs_info);
2118 
2119 	if (current->journal_info == trans)
2120 		current->journal_info = NULL;
2121 
2122 	/*
2123 	 * If relocation is running, we can't cancel scrub because that will
2124 	 * result in a deadlock. Before relocating a block group, relocation
2125 	 * pauses scrub, then starts and commits a transaction before unpausing
2126 	 * scrub. If the transaction commit is being done by the relocation
2127 	 * task or triggered by another task and the relocation task is waiting
2128 	 * for the commit, and we end up here due to an error in the commit
2129 	 * path, then calling btrfs_scrub_cancel() will deadlock, as we are
2130 	 * asking for scrub to stop while having it asked to be paused higher
2131 	 * above in relocation code.
2132 	 */
2133 	if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
2134 		btrfs_scrub_cancel(fs_info);
2135 
2136 	btrfs_uninhibit_all_eb_writeback(trans);
2137 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2138 }
2139 
2140 /*
2141  * Release reserved delayed ref space of all pending block groups of the
2142  * transaction and remove them from the list
2143  */
btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle * trans)2144 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
2145 {
2146        struct btrfs_fs_info *fs_info = trans->fs_info;
2147        struct btrfs_block_group *block_group, *tmp;
2148 
2149        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
2150                btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
2151 		/*
2152 		* Not strictly necessary to lock, as no other task will be using a
2153 		* block_group on the new_bgs list during a transaction abort.
2154 		*/
2155 	       spin_lock(&fs_info->unused_bgs_lock);
2156                list_del_init(&block_group->bg_list);
2157 	       btrfs_put_block_group(block_group);
2158 	       spin_unlock(&fs_info->unused_bgs_lock);
2159        }
2160 }
2161 
btrfs_start_delalloc_flush(struct btrfs_fs_info * fs_info)2162 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2163 {
2164 	/*
2165 	 * We use try_to_writeback_inodes_sb() here because if we used
2166 	 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2167 	 * Currently are holding the fs freeze lock, if we do an async flush
2168 	 * we'll do btrfs_join_transaction() and deadlock because we need to
2169 	 * wait for the fs freeze lock.  Using the direct flushing we benefit
2170 	 * from already being in a transaction and our join_transaction doesn't
2171 	 * have to re-take the fs freeze lock.
2172 	 *
2173 	 * Note that try_to_writeback_inodes_sb() will only trigger writeback
2174 	 * if it can read lock sb->s_umount. It will always be able to lock it,
2175 	 * except when the filesystem is being unmounted or being frozen, but in
2176 	 * those cases sync_filesystem() is called, which results in calling
2177 	 * writeback_inodes_sb() while holding a write lock on sb->s_umount.
2178 	 * Note that we don't call writeback_inodes_sb() directly, because it
2179 	 * will emit a warning if sb->s_umount is not locked.
2180 	 */
2181 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2182 		try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
2183 	return 0;
2184 }
2185 
btrfs_wait_delalloc_flush(struct btrfs_fs_info * fs_info)2186 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2187 {
2188 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2189 		btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
2190 }
2191 
2192 /*
2193  * Add a pending snapshot associated with the given transaction handle to the
2194  * respective handle. This must be called after the transaction commit started
2195  * and while holding fs_info->trans_lock.
2196  * This serves to guarantee a caller of btrfs_commit_transaction() that it can
2197  * safely free the pending snapshot pointer in case btrfs_commit_transaction()
2198  * returns an error.
2199  */
add_pending_snapshot(struct btrfs_trans_handle * trans)2200 static void add_pending_snapshot(struct btrfs_trans_handle *trans)
2201 {
2202 	struct btrfs_transaction *cur_trans = trans->transaction;
2203 
2204 	if (!trans->pending_snapshot)
2205 		return;
2206 
2207 	lockdep_assert_held(&trans->fs_info->trans_lock);
2208 	ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP,
2209 	       "cur_trans->state=%d", cur_trans->state);
2210 
2211 	list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
2212 }
2213 
update_commit_stats(struct btrfs_fs_info * fs_info)2214 static void update_commit_stats(struct btrfs_fs_info *fs_info)
2215 {
2216 	ktime_t now = ktime_get_ns();
2217 	ktime_t interval = now - fs_info->commit_stats.critical_section_start_time;
2218 
2219 	ASSERT(fs_info->commit_stats.critical_section_start_time);
2220 
2221 	fs_info->commit_stats.commit_count++;
2222 	fs_info->commit_stats.last_commit_dur = interval;
2223 	fs_info->commit_stats.max_commit_dur =
2224 			max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
2225 	fs_info->commit_stats.total_commit_dur += interval;
2226 	fs_info->commit_stats.critical_section_start_time = 0;
2227 }
2228 
btrfs_commit_transaction(struct btrfs_trans_handle * trans)2229 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2230 {
2231 	struct btrfs_fs_info *fs_info = trans->fs_info;
2232 	struct btrfs_transaction *cur_trans = trans->transaction;
2233 	struct btrfs_transaction *prev_trans = NULL;
2234 	int ret;
2235 
2236 	ASSERT(refcount_read(&trans->use_count) == 1,
2237 	       "refcount_read(&trans->use_count)=%d", refcount_read(&trans->use_count));
2238 	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2239 
2240 	clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
2241 
2242 	/* Stop the commit early if ->aborted is set */
2243 	if (TRANS_ABORTED(cur_trans)) {
2244 		ret = cur_trans->aborted;
2245 		goto lockdep_trans_commit_start_release;
2246 	}
2247 
2248 	btrfs_trans_release_metadata(trans);
2249 	trans->block_rsv = NULL;
2250 
2251 	/*
2252 	 * We only want one transaction commit doing the flushing so we do not
2253 	 * waste a bunch of time on lock contention on the extent root node.
2254 	 */
2255 	if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
2256 			      &cur_trans->delayed_refs.flags)) {
2257 		/*
2258 		 * Make a pass through all the delayed refs we have so far.
2259 		 * Any running threads may add more while we are here.
2260 		 */
2261 		ret = btrfs_run_delayed_refs(trans, 0);
2262 		if (ret)
2263 			goto lockdep_trans_commit_start_release;
2264 	}
2265 
2266 	btrfs_create_pending_block_groups(trans);
2267 
2268 	if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2269 		int run_it = 0;
2270 
2271 		/* this mutex is also taken before trying to set
2272 		 * block groups readonly.  We need to make sure
2273 		 * that nobody has set a block group readonly
2274 		 * after a extents from that block group have been
2275 		 * allocated for cache files.  btrfs_set_block_group_ro
2276 		 * will wait for the transaction to commit if it
2277 		 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2278 		 *
2279 		 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2280 		 * only one process starts all the block group IO.  It wouldn't
2281 		 * hurt to have more than one go through, but there's no
2282 		 * real advantage to it either.
2283 		 */
2284 		mutex_lock(&fs_info->ro_block_group_mutex);
2285 		if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2286 				      &cur_trans->flags))
2287 			run_it = 1;
2288 		mutex_unlock(&fs_info->ro_block_group_mutex);
2289 
2290 		if (run_it) {
2291 			ret = btrfs_start_dirty_block_groups(trans);
2292 			if (unlikely(ret))
2293 				goto lockdep_trans_commit_start_release;
2294 		}
2295 	}
2296 
2297 	spin_lock(&fs_info->trans_lock);
2298 	if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) {
2299 		enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2300 
2301 		add_pending_snapshot(trans);
2302 
2303 		spin_unlock(&fs_info->trans_lock);
2304 		refcount_inc(&cur_trans->use_count);
2305 
2306 		if (trans->in_fsync)
2307 			want_state = TRANS_STATE_SUPER_COMMITTED;
2308 
2309 		btrfs_trans_state_lockdep_release(fs_info,
2310 						  BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2311 		ret = btrfs_end_transaction(trans);
2312 		wait_for_commit(cur_trans, want_state);
2313 
2314 		if (TRANS_ABORTED(cur_trans))
2315 			ret = cur_trans->aborted;
2316 
2317 		btrfs_put_transaction(cur_trans);
2318 
2319 		return ret;
2320 	}
2321 
2322 	cur_trans->state = TRANS_STATE_COMMIT_PREP;
2323 	wake_up(&fs_info->transaction_blocked_wait);
2324 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2325 
2326 	if (!list_is_first(&cur_trans->list, &fs_info->trans_list)) {
2327 		enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2328 
2329 		if (trans->in_fsync)
2330 			want_state = TRANS_STATE_SUPER_COMMITTED;
2331 
2332 		prev_trans = list_prev_entry(cur_trans, list);
2333 		if (prev_trans->state < want_state) {
2334 			refcount_inc(&prev_trans->use_count);
2335 			spin_unlock(&fs_info->trans_lock);
2336 
2337 			wait_for_commit(prev_trans, want_state);
2338 
2339 			ret = READ_ONCE(prev_trans->aborted);
2340 
2341 			btrfs_put_transaction(prev_trans);
2342 			if (unlikely(ret))
2343 				goto lockdep_release;
2344 			spin_lock(&fs_info->trans_lock);
2345 		}
2346 	} else {
2347 		/*
2348 		 * The previous transaction was aborted and was already removed
2349 		 * from the list of transactions at fs_info->trans_list. So we
2350 		 * abort to prevent writing a new superblock that reflects a
2351 		 * corrupt state (pointing to trees with unwritten nodes/leafs).
2352 		 */
2353 		if (unlikely(BTRFS_FS_ERROR(fs_info))) {
2354 			spin_unlock(&fs_info->trans_lock);
2355 			ret = -EROFS;
2356 			goto lockdep_release;
2357 		}
2358 	}
2359 
2360 	cur_trans->state = TRANS_STATE_COMMIT_START;
2361 	wake_up(&fs_info->transaction_blocked_wait);
2362 	spin_unlock(&fs_info->trans_lock);
2363 
2364 	/*
2365 	 * Get the time spent on the work done by the commit thread and not
2366 	 * the time spent waiting on a previous commit
2367 	 */
2368 	fs_info->commit_stats.critical_section_start_time = ktime_get_ns();
2369 	extwriter_counter_dec(cur_trans, trans->type);
2370 
2371 	ret = btrfs_start_delalloc_flush(fs_info);
2372 	if (unlikely(ret))
2373 		goto lockdep_release;
2374 
2375 	ret = btrfs_run_delayed_items(trans);
2376 	if (unlikely(ret))
2377 		goto lockdep_release;
2378 
2379 	/*
2380 	 * The thread has started/joined the transaction thus it holds the
2381 	 * lockdep map as a reader. It has to release it before acquiring the
2382 	 * lockdep map as a writer.
2383 	 */
2384 	btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2385 	btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters);
2386 	wait_event(cur_trans->writer_wait,
2387 		   extwriter_counter_read(cur_trans) == 0);
2388 
2389 	/* some pending stuffs might be added after the previous flush. */
2390 	ret = btrfs_run_delayed_items(trans);
2391 	if (unlikely(ret)) {
2392 		btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2393 		goto cleanup_transaction;
2394 	}
2395 
2396 	btrfs_wait_delalloc_flush(fs_info);
2397 
2398 	/*
2399 	 * Wait for all ordered extents started by a fast fsync that joined this
2400 	 * transaction. Otherwise if this transaction commits before the ordered
2401 	 * extents complete we lose logged data after a power failure.
2402 	 */
2403 	btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered);
2404 	wait_event(cur_trans->pending_wait,
2405 		   atomic_read(&cur_trans->pending_ordered) == 0);
2406 
2407 	btrfs_scrub_pause(fs_info);
2408 	/*
2409 	 * Ok now we need to make sure to block out any other joins while we
2410 	 * commit the transaction.  We could have started a join before setting
2411 	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2412 	 */
2413 	spin_lock(&fs_info->trans_lock);
2414 	add_pending_snapshot(trans);
2415 	cur_trans->state = TRANS_STATE_COMMIT_DOING;
2416 	spin_unlock(&fs_info->trans_lock);
2417 
2418 	/*
2419 	 * The thread has started/joined the transaction thus it holds the
2420 	 * lockdep map as a reader. It has to release it before acquiring the
2421 	 * lockdep map as a writer.
2422 	 */
2423 	btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2424 	btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers);
2425 	wait_event(cur_trans->writer_wait,
2426 		   atomic_read(&cur_trans->num_writers) == 1);
2427 
2428 	/*
2429 	 * Make lockdep happy by acquiring the state locks after
2430 	 * btrfs_trans_num_writers is released. If we acquired the state locks
2431 	 * before releasing the btrfs_trans_num_writers lock then lockdep would
2432 	 * complain because we did not follow the reverse order unlocking rule.
2433 	 */
2434 	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2435 	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2436 	btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2437 
2438 	/*
2439 	 * We've started the commit, clear the flag in case we were triggered to
2440 	 * do an async commit but somebody else started before the transaction
2441 	 * kthread could do the work.
2442 	 */
2443 	clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
2444 
2445 	if (TRANS_ABORTED(cur_trans)) {
2446 		ret = cur_trans->aborted;
2447 		btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2448 		goto scrub_continue;
2449 	}
2450 	/*
2451 	 * the reloc mutex makes sure that we stop
2452 	 * the balancing code from coming in and moving
2453 	 * extents around in the middle of the commit
2454 	 */
2455 	mutex_lock(&fs_info->reloc_mutex);
2456 
2457 	/*
2458 	 * We needn't worry about the delayed items because we will
2459 	 * deal with them in create_pending_snapshot(), which is the
2460 	 * core function of the snapshot creation.
2461 	 */
2462 	ret = create_pending_snapshots(trans);
2463 	if (unlikely(ret))
2464 		goto unlock_reloc;
2465 
2466 	/*
2467 	 * We insert the dir indexes of the snapshots and update the inode
2468 	 * of the snapshots' parents after the snapshot creation, so there
2469 	 * are some delayed items which are not dealt with. Now deal with
2470 	 * them.
2471 	 *
2472 	 * We needn't worry that this operation will corrupt the snapshots,
2473 	 * because all the tree which are snapshotted will be forced to COW
2474 	 * the nodes and leaves.
2475 	 */
2476 	ret = btrfs_run_delayed_items(trans);
2477 	if (unlikely(ret))
2478 		goto unlock_reloc;
2479 
2480 	ret = btrfs_run_delayed_refs(trans, U64_MAX);
2481 	if (unlikely(ret))
2482 		goto unlock_reloc;
2483 
2484 	/*
2485 	 * make sure none of the code above managed to slip in a
2486 	 * delayed item
2487 	 */
2488 	btrfs_assert_delayed_root_empty(fs_info);
2489 
2490 	WARN_ON(cur_trans != trans->transaction);
2491 
2492 	ret = commit_fs_roots(trans);
2493 	if (unlikely(ret))
2494 		goto unlock_reloc;
2495 
2496 	/* commit_fs_roots gets rid of all the tree log roots, it is now
2497 	 * safe to free the root of tree log roots
2498 	 */
2499 	btrfs_free_log_root_tree(trans, fs_info);
2500 
2501 	/*
2502 	 * Since fs roots are all committed, we can get a quite accurate
2503 	 * new_roots. So let's do quota accounting.
2504 	 */
2505 	ret = btrfs_qgroup_account_extents(trans);
2506 	if (unlikely(ret < 0))
2507 		goto unlock_reloc;
2508 
2509 	ret = commit_cowonly_roots(trans);
2510 	if (unlikely(ret))
2511 		goto unlock_reloc;
2512 
2513 	/*
2514 	 * The tasks which save the space cache and inode cache may also
2515 	 * update ->aborted, check it.
2516 	 */
2517 	if (TRANS_ABORTED(cur_trans)) {
2518 		ret = cur_trans->aborted;
2519 		goto unlock_reloc;
2520 	}
2521 
2522 	cur_trans = fs_info->running_transaction;
2523 
2524 	btrfs_set_root_node(&fs_info->tree_root->root_item,
2525 			    fs_info->tree_root->node);
2526 	list_add_tail(&fs_info->tree_root->dirty_list,
2527 		      &cur_trans->switch_commits);
2528 
2529 	btrfs_set_root_node(&fs_info->chunk_root->root_item,
2530 			    fs_info->chunk_root->node);
2531 	list_add_tail(&fs_info->chunk_root->dirty_list,
2532 		      &cur_trans->switch_commits);
2533 
2534 	switch_commit_roots(trans);
2535 
2536 	ASSERT(list_empty(&cur_trans->dirty_bgs));
2537 	ASSERT(list_empty(&cur_trans->io_bgs));
2538 	update_super_roots(fs_info);
2539 
2540 	btrfs_set_super_log_root(fs_info->super_copy, 0);
2541 	btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2542 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2543 	       sizeof(*fs_info->super_copy));
2544 
2545 	btrfs_commit_device_sizes(cur_trans);
2546 
2547 	clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2548 	clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2549 
2550 	btrfs_trans_release_chunk_metadata(trans);
2551 
2552 	/*
2553 	 * Before changing the transaction state to TRANS_STATE_UNBLOCKED and
2554 	 * setting fs_info->running_transaction to NULL, lock tree_log_mutex to
2555 	 * make sure that before we commit our superblock, no other task can
2556 	 * start a new transaction and commit a log tree before we commit our
2557 	 * superblock. Anyone trying to commit a log tree locks this mutex before
2558 	 * writing its superblock.
2559 	 */
2560 	mutex_lock(&fs_info->tree_log_mutex);
2561 
2562 	spin_lock(&fs_info->trans_lock);
2563 	cur_trans->state = TRANS_STATE_UNBLOCKED;
2564 	fs_info->running_transaction = NULL;
2565 	spin_unlock(&fs_info->trans_lock);
2566 	mutex_unlock(&fs_info->reloc_mutex);
2567 
2568 	wake_up(&fs_info->transaction_wait);
2569 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2570 
2571 	/* If we have features changed, wake up the cleaner to update sysfs. */
2572 	if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) &&
2573 	    fs_info->cleaner_kthread)
2574 		wake_up_process(fs_info->cleaner_kthread);
2575 
2576 	/*
2577 	 * Uninhibit writeback on all extent buffers inhibited during this
2578 	 * transaction before writing them to disk. Inhibiting prevented
2579 	 * writeback while the transaction was building, but now we need
2580 	 * them written.
2581 	 */
2582 	btrfs_uninhibit_all_eb_writeback(trans);
2583 
2584 	ret = btrfs_write_and_wait_transaction(trans);
2585 	if (unlikely(ret)) {
2586 		btrfs_err(fs_info, "error while writing out transaction: %d", ret);
2587 		mutex_unlock(&fs_info->tree_log_mutex);
2588 		goto scrub_continue;
2589 	}
2590 
2591 	ret = write_all_supers(trans);
2592 	/*
2593 	 * the super is written, we can safely allow the tree-loggers
2594 	 * to go about their business
2595 	 */
2596 	mutex_unlock(&fs_info->tree_log_mutex);
2597 	if (unlikely(ret))
2598 		goto scrub_continue;
2599 
2600 	update_commit_stats(fs_info);
2601 	/*
2602 	 * We needn't acquire the lock here because there is no other task
2603 	 * which can change it.
2604 	 */
2605 	cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
2606 	wake_up(&cur_trans->commit_wait);
2607 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2608 
2609 	ret = btrfs_finish_extent_commit(trans);
2610 	if (unlikely(ret))
2611 		goto scrub_continue;
2612 
2613 	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2614 		btrfs_clear_space_info_full(fs_info);
2615 
2616 	btrfs_set_last_trans_committed(fs_info, cur_trans->transid);
2617 	/*
2618 	 * We needn't acquire the lock here because there is no other task
2619 	 * which can change it.
2620 	 */
2621 	cur_trans->state = TRANS_STATE_COMPLETED;
2622 	wake_up(&cur_trans->commit_wait);
2623 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2624 
2625 	spin_lock(&fs_info->trans_lock);
2626 	list_del_init(&cur_trans->list);
2627 	spin_unlock(&fs_info->trans_lock);
2628 
2629 	btrfs_put_transaction(cur_trans);
2630 	btrfs_put_transaction(cur_trans);
2631 
2632 	if (trans->type & __TRANS_FREEZABLE)
2633 		sb_end_intwrite(fs_info->sb);
2634 
2635 	trace_btrfs_transaction_commit(fs_info);
2636 
2637 	btrfs_scrub_continue(fs_info);
2638 
2639 	if (current->journal_info == trans)
2640 		current->journal_info = NULL;
2641 
2642 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2643 
2644 	return ret;
2645 
2646 unlock_reloc:
2647 	mutex_unlock(&fs_info->reloc_mutex);
2648 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2649 scrub_continue:
2650 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2651 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED);
2652 	btrfs_scrub_continue(fs_info);
2653 cleanup_transaction:
2654 	btrfs_trans_release_metadata(trans);
2655 	btrfs_cleanup_pending_block_groups(trans);
2656 	btrfs_trans_release_chunk_metadata(trans);
2657 	trans->block_rsv = NULL;
2658 	btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2659 	cleanup_transaction(trans, ret);
2660 
2661 	return ret;
2662 
2663 lockdep_release:
2664 	btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters);
2665 	btrfs_lockdep_release(fs_info, btrfs_trans_num_writers);
2666 	goto cleanup_transaction;
2667 
2668 lockdep_trans_commit_start_release:
2669 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2670 	btrfs_end_transaction(trans);
2671 	return ret;
2672 }
2673 
2674 /*
2675  * return < 0 if error
2676  * 0 if there are no more dead_roots at the time of call
2677  * 1 there are more to be processed, call me again
2678  *
2679  * The return value indicates there are certainly more snapshots to delete, but
2680  * if there comes a new one during processing, it may return 0. We don't mind,
2681  * because btrfs_commit_super will poke cleaner thread and it will process it a
2682  * few seconds later.
2683  */
btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info * fs_info)2684 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
2685 {
2686 	struct btrfs_root *root;
2687 	int ret;
2688 
2689 	spin_lock(&fs_info->trans_lock);
2690 	if (list_empty(&fs_info->dead_roots)) {
2691 		spin_unlock(&fs_info->trans_lock);
2692 		return 0;
2693 	}
2694 	root = list_first_entry(&fs_info->dead_roots,
2695 			struct btrfs_root, root_list);
2696 	list_del_init(&root->root_list);
2697 	spin_unlock(&fs_info->trans_lock);
2698 
2699 	btrfs_debug(fs_info, "cleaner removing %llu", btrfs_root_id(root));
2700 
2701 	btrfs_kill_all_delayed_nodes(root);
2702 
2703 	if (btrfs_header_backref_rev(root->node) <
2704 			BTRFS_MIXED_BACKREF_REV)
2705 		ret = btrfs_drop_snapshot(root, false, false);
2706 	else
2707 		ret = btrfs_drop_snapshot(root, true, false);
2708 
2709 	btrfs_put_root(root);
2710 	return (ret < 0) ? 0 : 1;
2711 }
2712 
2713 /*
2714  * We only mark the transaction aborted and then set the file system read-only.
2715  * This will prevent new transactions from starting or trying to join this
2716  * one.
2717  *
2718  * This means that error recovery at the call site is limited to freeing
2719  * any local memory allocations and passing the error code up without
2720  * further cleanup. The transaction should complete as it normally would
2721  * in the call path but will return -EIO.
2722  *
2723  * We'll complete the cleanup in btrfs_end_transaction and
2724  * btrfs_commit_transaction.
2725  */
__btrfs_abort_transaction(struct btrfs_trans_handle * trans,const char * function,unsigned int line,int error,bool first_hit)2726 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
2727 				      const char *function,
2728 				      unsigned int line, int error, bool first_hit)
2729 {
2730 	struct btrfs_fs_info *fs_info = trans->fs_info;
2731 
2732 	WRITE_ONCE(trans->aborted, error);
2733 	WRITE_ONCE(trans->transaction->aborted, error);
2734 	if (first_hit && error == -ENOSPC)
2735 		btrfs_dump_space_info_for_trans_abort(fs_info);
2736 	/* Wake up anybody who may be waiting on this transaction */
2737 	wake_up(&fs_info->transaction_wait);
2738 	wake_up(&fs_info->transaction_blocked_wait);
2739 	__btrfs_handle_fs_error(fs_info, function, line, error, NULL);
2740 }
2741 
btrfs_transaction_init(void)2742 int __init btrfs_transaction_init(void)
2743 {
2744 	btrfs_trans_handle_cachep = KMEM_CACHE(btrfs_trans_handle, SLAB_TEMPORARY);
2745 	if (!btrfs_trans_handle_cachep)
2746 		return -ENOMEM;
2747 	return 0;
2748 }
2749 
btrfs_transaction_exit(void)2750 void __cold btrfs_transaction_exit(void)
2751 {
2752 	kmem_cache_destroy(btrfs_trans_handle_cachep);
2753 }
2754