xref: /linux/fs/btrfs/transaction.c (revision ec63e2a4897075e427c121d863bd89c44578094f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/writeback.h>
10 #include <linux/pagemap.h>
11 #include <linux/blkdev.h>
12 #include <linux/uuid.h>
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "locking.h"
17 #include "tree-log.h"
18 #include "inode-map.h"
19 #include "volumes.h"
20 #include "dev-replace.h"
21 #include "qgroup.h"
22 
23 #define BTRFS_ROOT_TRANS_TAG 0
24 
25 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
26 	[TRANS_STATE_RUNNING]		= 0U,
27 	[TRANS_STATE_BLOCKED]		=  __TRANS_START,
28 	[TRANS_STATE_COMMIT_START]	= (__TRANS_START | __TRANS_ATTACH),
29 	[TRANS_STATE_COMMIT_DOING]	= (__TRANS_START |
30 					   __TRANS_ATTACH |
31 					   __TRANS_JOIN),
32 	[TRANS_STATE_UNBLOCKED]		= (__TRANS_START |
33 					   __TRANS_ATTACH |
34 					   __TRANS_JOIN |
35 					   __TRANS_JOIN_NOLOCK),
36 	[TRANS_STATE_COMPLETED]		= (__TRANS_START |
37 					   __TRANS_ATTACH |
38 					   __TRANS_JOIN |
39 					   __TRANS_JOIN_NOLOCK),
40 };
41 
42 void btrfs_put_transaction(struct btrfs_transaction *transaction)
43 {
44 	WARN_ON(refcount_read(&transaction->use_count) == 0);
45 	if (refcount_dec_and_test(&transaction->use_count)) {
46 		BUG_ON(!list_empty(&transaction->list));
47 		WARN_ON(!RB_EMPTY_ROOT(
48 				&transaction->delayed_refs.href_root.rb_root));
49 		if (transaction->delayed_refs.pending_csums)
50 			btrfs_err(transaction->fs_info,
51 				  "pending csums is %llu",
52 				  transaction->delayed_refs.pending_csums);
53 		while (!list_empty(&transaction->pending_chunks)) {
54 			struct extent_map *em;
55 
56 			em = list_first_entry(&transaction->pending_chunks,
57 					      struct extent_map, list);
58 			list_del_init(&em->list);
59 			free_extent_map(em);
60 		}
61 		/*
62 		 * If any block groups are found in ->deleted_bgs then it's
63 		 * because the transaction was aborted and a commit did not
64 		 * happen (things failed before writing the new superblock
65 		 * and calling btrfs_finish_extent_commit()), so we can not
66 		 * discard the physical locations of the block groups.
67 		 */
68 		while (!list_empty(&transaction->deleted_bgs)) {
69 			struct btrfs_block_group_cache *cache;
70 
71 			cache = list_first_entry(&transaction->deleted_bgs,
72 						 struct btrfs_block_group_cache,
73 						 bg_list);
74 			list_del_init(&cache->bg_list);
75 			btrfs_put_block_group_trimming(cache);
76 			btrfs_put_block_group(cache);
77 		}
78 		kfree(transaction);
79 	}
80 }
81 
82 static void clear_btree_io_tree(struct extent_io_tree *tree)
83 {
84 	spin_lock(&tree->lock);
85 	/*
86 	 * Do a single barrier for the waitqueue_active check here, the state
87 	 * of the waitqueue should not change once clear_btree_io_tree is
88 	 * called.
89 	 */
90 	smp_mb();
91 	while (!RB_EMPTY_ROOT(&tree->state)) {
92 		struct rb_node *node;
93 		struct extent_state *state;
94 
95 		node = rb_first(&tree->state);
96 		state = rb_entry(node, struct extent_state, rb_node);
97 		rb_erase(&state->rb_node, &tree->state);
98 		RB_CLEAR_NODE(&state->rb_node);
99 		/*
100 		 * btree io trees aren't supposed to have tasks waiting for
101 		 * changes in the flags of extent states ever.
102 		 */
103 		ASSERT(!waitqueue_active(&state->wq));
104 		free_extent_state(state);
105 
106 		cond_resched_lock(&tree->lock);
107 	}
108 	spin_unlock(&tree->lock);
109 }
110 
111 static noinline void switch_commit_roots(struct btrfs_transaction *trans)
112 {
113 	struct btrfs_fs_info *fs_info = trans->fs_info;
114 	struct btrfs_root *root, *tmp;
115 
116 	down_write(&fs_info->commit_root_sem);
117 	list_for_each_entry_safe(root, tmp, &trans->switch_commits,
118 				 dirty_list) {
119 		list_del_init(&root->dirty_list);
120 		free_extent_buffer(root->commit_root);
121 		root->commit_root = btrfs_root_node(root);
122 		if (is_fstree(root->root_key.objectid))
123 			btrfs_unpin_free_ino(root);
124 		clear_btree_io_tree(&root->dirty_log_pages);
125 		btrfs_qgroup_clean_swapped_blocks(root);
126 	}
127 
128 	/* We can free old roots now. */
129 	spin_lock(&trans->dropped_roots_lock);
130 	while (!list_empty(&trans->dropped_roots)) {
131 		root = list_first_entry(&trans->dropped_roots,
132 					struct btrfs_root, root_list);
133 		list_del_init(&root->root_list);
134 		spin_unlock(&trans->dropped_roots_lock);
135 		btrfs_drop_and_free_fs_root(fs_info, root);
136 		spin_lock(&trans->dropped_roots_lock);
137 	}
138 	spin_unlock(&trans->dropped_roots_lock);
139 	up_write(&fs_info->commit_root_sem);
140 }
141 
142 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
143 					 unsigned int type)
144 {
145 	if (type & TRANS_EXTWRITERS)
146 		atomic_inc(&trans->num_extwriters);
147 }
148 
149 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
150 					 unsigned int type)
151 {
152 	if (type & TRANS_EXTWRITERS)
153 		atomic_dec(&trans->num_extwriters);
154 }
155 
156 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
157 					  unsigned int type)
158 {
159 	atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
160 }
161 
162 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
163 {
164 	return atomic_read(&trans->num_extwriters);
165 }
166 
167 /*
168  * either allocate a new transaction or hop into the existing one
169  */
170 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
171 				     unsigned int type)
172 {
173 	struct btrfs_transaction *cur_trans;
174 
175 	spin_lock(&fs_info->trans_lock);
176 loop:
177 	/* The file system has been taken offline. No new transactions. */
178 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
179 		spin_unlock(&fs_info->trans_lock);
180 		return -EROFS;
181 	}
182 
183 	cur_trans = fs_info->running_transaction;
184 	if (cur_trans) {
185 		if (cur_trans->aborted) {
186 			spin_unlock(&fs_info->trans_lock);
187 			return cur_trans->aborted;
188 		}
189 		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
190 			spin_unlock(&fs_info->trans_lock);
191 			return -EBUSY;
192 		}
193 		refcount_inc(&cur_trans->use_count);
194 		atomic_inc(&cur_trans->num_writers);
195 		extwriter_counter_inc(cur_trans, type);
196 		spin_unlock(&fs_info->trans_lock);
197 		return 0;
198 	}
199 	spin_unlock(&fs_info->trans_lock);
200 
201 	/*
202 	 * If we are ATTACH, we just want to catch the current transaction,
203 	 * and commit it. If there is no transaction, just return ENOENT.
204 	 */
205 	if (type == TRANS_ATTACH)
206 		return -ENOENT;
207 
208 	/*
209 	 * JOIN_NOLOCK only happens during the transaction commit, so
210 	 * it is impossible that ->running_transaction is NULL
211 	 */
212 	BUG_ON(type == TRANS_JOIN_NOLOCK);
213 
214 	cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
215 	if (!cur_trans)
216 		return -ENOMEM;
217 
218 	spin_lock(&fs_info->trans_lock);
219 	if (fs_info->running_transaction) {
220 		/*
221 		 * someone started a transaction after we unlocked.  Make sure
222 		 * to redo the checks above
223 		 */
224 		kfree(cur_trans);
225 		goto loop;
226 	} else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
227 		spin_unlock(&fs_info->trans_lock);
228 		kfree(cur_trans);
229 		return -EROFS;
230 	}
231 
232 	cur_trans->fs_info = fs_info;
233 	atomic_set(&cur_trans->num_writers, 1);
234 	extwriter_counter_init(cur_trans, type);
235 	init_waitqueue_head(&cur_trans->writer_wait);
236 	init_waitqueue_head(&cur_trans->commit_wait);
237 	cur_trans->state = TRANS_STATE_RUNNING;
238 	/*
239 	 * One for this trans handle, one so it will live on until we
240 	 * commit the transaction.
241 	 */
242 	refcount_set(&cur_trans->use_count, 2);
243 	cur_trans->flags = 0;
244 	cur_trans->start_time = ktime_get_seconds();
245 
246 	memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
247 
248 	cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
249 	cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
250 	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
251 
252 	/*
253 	 * although the tree mod log is per file system and not per transaction,
254 	 * the log must never go across transaction boundaries.
255 	 */
256 	smp_mb();
257 	if (!list_empty(&fs_info->tree_mod_seq_list))
258 		WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
259 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
260 		WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
261 	atomic64_set(&fs_info->tree_mod_seq, 0);
262 
263 	spin_lock_init(&cur_trans->delayed_refs.lock);
264 
265 	INIT_LIST_HEAD(&cur_trans->pending_snapshots);
266 	INIT_LIST_HEAD(&cur_trans->pending_chunks);
267 	INIT_LIST_HEAD(&cur_trans->switch_commits);
268 	INIT_LIST_HEAD(&cur_trans->dirty_bgs);
269 	INIT_LIST_HEAD(&cur_trans->io_bgs);
270 	INIT_LIST_HEAD(&cur_trans->dropped_roots);
271 	mutex_init(&cur_trans->cache_write_mutex);
272 	cur_trans->num_dirty_bgs = 0;
273 	spin_lock_init(&cur_trans->dirty_bgs_lock);
274 	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
275 	spin_lock_init(&cur_trans->dropped_roots_lock);
276 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
277 	extent_io_tree_init(&cur_trans->dirty_pages,
278 			     fs_info->btree_inode);
279 	fs_info->generation++;
280 	cur_trans->transid = fs_info->generation;
281 	fs_info->running_transaction = cur_trans;
282 	cur_trans->aborted = 0;
283 	spin_unlock(&fs_info->trans_lock);
284 
285 	return 0;
286 }
287 
288 /*
289  * this does all the record keeping required to make sure that a reference
290  * counted root is properly recorded in a given transaction.  This is required
291  * to make sure the old root from before we joined the transaction is deleted
292  * when the transaction commits
293  */
294 static int record_root_in_trans(struct btrfs_trans_handle *trans,
295 			       struct btrfs_root *root,
296 			       int force)
297 {
298 	struct btrfs_fs_info *fs_info = root->fs_info;
299 
300 	if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
301 	    root->last_trans < trans->transid) || force) {
302 		WARN_ON(root == fs_info->extent_root);
303 		WARN_ON(!force && root->commit_root != root->node);
304 
305 		/*
306 		 * see below for IN_TRANS_SETUP usage rules
307 		 * we have the reloc mutex held now, so there
308 		 * is only one writer in this function
309 		 */
310 		set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
311 
312 		/* make sure readers find IN_TRANS_SETUP before
313 		 * they find our root->last_trans update
314 		 */
315 		smp_wmb();
316 
317 		spin_lock(&fs_info->fs_roots_radix_lock);
318 		if (root->last_trans == trans->transid && !force) {
319 			spin_unlock(&fs_info->fs_roots_radix_lock);
320 			return 0;
321 		}
322 		radix_tree_tag_set(&fs_info->fs_roots_radix,
323 				   (unsigned long)root->root_key.objectid,
324 				   BTRFS_ROOT_TRANS_TAG);
325 		spin_unlock(&fs_info->fs_roots_radix_lock);
326 		root->last_trans = trans->transid;
327 
328 		/* this is pretty tricky.  We don't want to
329 		 * take the relocation lock in btrfs_record_root_in_trans
330 		 * unless we're really doing the first setup for this root in
331 		 * this transaction.
332 		 *
333 		 * Normally we'd use root->last_trans as a flag to decide
334 		 * if we want to take the expensive mutex.
335 		 *
336 		 * But, we have to set root->last_trans before we
337 		 * init the relocation root, otherwise, we trip over warnings
338 		 * in ctree.c.  The solution used here is to flag ourselves
339 		 * with root IN_TRANS_SETUP.  When this is 1, we're still
340 		 * fixing up the reloc trees and everyone must wait.
341 		 *
342 		 * When this is zero, they can trust root->last_trans and fly
343 		 * through btrfs_record_root_in_trans without having to take the
344 		 * lock.  smp_wmb() makes sure that all the writes above are
345 		 * done before we pop in the zero below
346 		 */
347 		btrfs_init_reloc_root(trans, root);
348 		smp_mb__before_atomic();
349 		clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
350 	}
351 	return 0;
352 }
353 
354 
355 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
356 			    struct btrfs_root *root)
357 {
358 	struct btrfs_fs_info *fs_info = root->fs_info;
359 	struct btrfs_transaction *cur_trans = trans->transaction;
360 
361 	/* Add ourselves to the transaction dropped list */
362 	spin_lock(&cur_trans->dropped_roots_lock);
363 	list_add_tail(&root->root_list, &cur_trans->dropped_roots);
364 	spin_unlock(&cur_trans->dropped_roots_lock);
365 
366 	/* Make sure we don't try to update the root at commit time */
367 	spin_lock(&fs_info->fs_roots_radix_lock);
368 	radix_tree_tag_clear(&fs_info->fs_roots_radix,
369 			     (unsigned long)root->root_key.objectid,
370 			     BTRFS_ROOT_TRANS_TAG);
371 	spin_unlock(&fs_info->fs_roots_radix_lock);
372 }
373 
374 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
375 			       struct btrfs_root *root)
376 {
377 	struct btrfs_fs_info *fs_info = root->fs_info;
378 
379 	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
380 		return 0;
381 
382 	/*
383 	 * see record_root_in_trans for comments about IN_TRANS_SETUP usage
384 	 * and barriers
385 	 */
386 	smp_rmb();
387 	if (root->last_trans == trans->transid &&
388 	    !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
389 		return 0;
390 
391 	mutex_lock(&fs_info->reloc_mutex);
392 	record_root_in_trans(trans, root, 0);
393 	mutex_unlock(&fs_info->reloc_mutex);
394 
395 	return 0;
396 }
397 
398 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
399 {
400 	return (trans->state >= TRANS_STATE_BLOCKED &&
401 		trans->state < TRANS_STATE_UNBLOCKED &&
402 		!trans->aborted);
403 }
404 
405 /* wait for commit against the current transaction to become unblocked
406  * when this is done, it is safe to start a new transaction, but the current
407  * transaction might not be fully on disk.
408  */
409 static void wait_current_trans(struct btrfs_fs_info *fs_info)
410 {
411 	struct btrfs_transaction *cur_trans;
412 
413 	spin_lock(&fs_info->trans_lock);
414 	cur_trans = fs_info->running_transaction;
415 	if (cur_trans && is_transaction_blocked(cur_trans)) {
416 		refcount_inc(&cur_trans->use_count);
417 		spin_unlock(&fs_info->trans_lock);
418 
419 		wait_event(fs_info->transaction_wait,
420 			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
421 			   cur_trans->aborted);
422 		btrfs_put_transaction(cur_trans);
423 	} else {
424 		spin_unlock(&fs_info->trans_lock);
425 	}
426 }
427 
428 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
429 {
430 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
431 		return 0;
432 
433 	if (type == TRANS_START)
434 		return 1;
435 
436 	return 0;
437 }
438 
439 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
440 {
441 	struct btrfs_fs_info *fs_info = root->fs_info;
442 
443 	if (!fs_info->reloc_ctl ||
444 	    !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
445 	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
446 	    root->reloc_root)
447 		return false;
448 
449 	return true;
450 }
451 
452 static struct btrfs_trans_handle *
453 start_transaction(struct btrfs_root *root, unsigned int num_items,
454 		  unsigned int type, enum btrfs_reserve_flush_enum flush,
455 		  bool enforce_qgroups)
456 {
457 	struct btrfs_fs_info *fs_info = root->fs_info;
458 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
459 	struct btrfs_trans_handle *h;
460 	struct btrfs_transaction *cur_trans;
461 	u64 num_bytes = 0;
462 	u64 qgroup_reserved = 0;
463 	bool reloc_reserved = false;
464 	int ret;
465 
466 	/* Send isn't supposed to start transactions. */
467 	ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
468 
469 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
470 		return ERR_PTR(-EROFS);
471 
472 	if (current->journal_info) {
473 		WARN_ON(type & TRANS_EXTWRITERS);
474 		h = current->journal_info;
475 		refcount_inc(&h->use_count);
476 		WARN_ON(refcount_read(&h->use_count) > 2);
477 		h->orig_rsv = h->block_rsv;
478 		h->block_rsv = NULL;
479 		goto got_it;
480 	}
481 
482 	/*
483 	 * Do the reservation before we join the transaction so we can do all
484 	 * the appropriate flushing if need be.
485 	 */
486 	if (num_items && root != fs_info->chunk_root) {
487 		struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
488 		u64 delayed_refs_bytes = 0;
489 
490 		qgroup_reserved = num_items * fs_info->nodesize;
491 		ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
492 				enforce_qgroups);
493 		if (ret)
494 			return ERR_PTR(ret);
495 
496 		/*
497 		 * We want to reserve all the bytes we may need all at once, so
498 		 * we only do 1 enospc flushing cycle per transaction start.  We
499 		 * accomplish this by simply assuming we'll do 2 x num_items
500 		 * worth of delayed refs updates in this trans handle, and
501 		 * refill that amount for whatever is missing in the reserve.
502 		 */
503 		num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
504 		if (delayed_refs_rsv->full == 0) {
505 			delayed_refs_bytes = num_bytes;
506 			num_bytes <<= 1;
507 		}
508 
509 		/*
510 		 * Do the reservation for the relocation root creation
511 		 */
512 		if (need_reserve_reloc_root(root)) {
513 			num_bytes += fs_info->nodesize;
514 			reloc_reserved = true;
515 		}
516 
517 		ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
518 		if (ret)
519 			goto reserve_fail;
520 		if (delayed_refs_bytes) {
521 			btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
522 							  delayed_refs_bytes);
523 			num_bytes -= delayed_refs_bytes;
524 		}
525 	} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
526 		   !delayed_refs_rsv->full) {
527 		/*
528 		 * Some people call with btrfs_start_transaction(root, 0)
529 		 * because they can be throttled, but have some other mechanism
530 		 * for reserving space.  We still want these guys to refill the
531 		 * delayed block_rsv so just add 1 items worth of reservation
532 		 * here.
533 		 */
534 		ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
535 		if (ret)
536 			goto reserve_fail;
537 	}
538 again:
539 	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
540 	if (!h) {
541 		ret = -ENOMEM;
542 		goto alloc_fail;
543 	}
544 
545 	/*
546 	 * If we are JOIN_NOLOCK we're already committing a transaction and
547 	 * waiting on this guy, so we don't need to do the sb_start_intwrite
548 	 * because we're already holding a ref.  We need this because we could
549 	 * have raced in and did an fsync() on a file which can kick a commit
550 	 * and then we deadlock with somebody doing a freeze.
551 	 *
552 	 * If we are ATTACH, it means we just want to catch the current
553 	 * transaction and commit it, so we needn't do sb_start_intwrite().
554 	 */
555 	if (type & __TRANS_FREEZABLE)
556 		sb_start_intwrite(fs_info->sb);
557 
558 	if (may_wait_transaction(fs_info, type))
559 		wait_current_trans(fs_info);
560 
561 	do {
562 		ret = join_transaction(fs_info, type);
563 		if (ret == -EBUSY) {
564 			wait_current_trans(fs_info);
565 			if (unlikely(type == TRANS_ATTACH))
566 				ret = -ENOENT;
567 		}
568 	} while (ret == -EBUSY);
569 
570 	if (ret < 0)
571 		goto join_fail;
572 
573 	cur_trans = fs_info->running_transaction;
574 
575 	h->transid = cur_trans->transid;
576 	h->transaction = cur_trans;
577 	h->root = root;
578 	refcount_set(&h->use_count, 1);
579 	h->fs_info = root->fs_info;
580 
581 	h->type = type;
582 	h->can_flush_pending_bgs = true;
583 	INIT_LIST_HEAD(&h->new_bgs);
584 
585 	smp_mb();
586 	if (cur_trans->state >= TRANS_STATE_BLOCKED &&
587 	    may_wait_transaction(fs_info, type)) {
588 		current->journal_info = h;
589 		btrfs_commit_transaction(h);
590 		goto again;
591 	}
592 
593 	if (num_bytes) {
594 		trace_btrfs_space_reservation(fs_info, "transaction",
595 					      h->transid, num_bytes, 1);
596 		h->block_rsv = &fs_info->trans_block_rsv;
597 		h->bytes_reserved = num_bytes;
598 		h->reloc_reserved = reloc_reserved;
599 	}
600 
601 got_it:
602 	btrfs_record_root_in_trans(h, root);
603 
604 	if (!current->journal_info)
605 		current->journal_info = h;
606 	return h;
607 
608 join_fail:
609 	if (type & __TRANS_FREEZABLE)
610 		sb_end_intwrite(fs_info->sb);
611 	kmem_cache_free(btrfs_trans_handle_cachep, h);
612 alloc_fail:
613 	if (num_bytes)
614 		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
615 					num_bytes);
616 reserve_fail:
617 	btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
618 	return ERR_PTR(ret);
619 }
620 
621 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
622 						   unsigned int num_items)
623 {
624 	return start_transaction(root, num_items, TRANS_START,
625 				 BTRFS_RESERVE_FLUSH_ALL, true);
626 }
627 
628 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
629 					struct btrfs_root *root,
630 					unsigned int num_items,
631 					int min_factor)
632 {
633 	struct btrfs_fs_info *fs_info = root->fs_info;
634 	struct btrfs_trans_handle *trans;
635 	u64 num_bytes;
636 	int ret;
637 
638 	/*
639 	 * We have two callers: unlink and block group removal.  The
640 	 * former should succeed even if we will temporarily exceed
641 	 * quota and the latter operates on the extent root so
642 	 * qgroup enforcement is ignored anyway.
643 	 */
644 	trans = start_transaction(root, num_items, TRANS_START,
645 				  BTRFS_RESERVE_FLUSH_ALL, false);
646 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
647 		return trans;
648 
649 	trans = btrfs_start_transaction(root, 0);
650 	if (IS_ERR(trans))
651 		return trans;
652 
653 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
654 	ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
655 				       num_bytes, min_factor);
656 	if (ret) {
657 		btrfs_end_transaction(trans);
658 		return ERR_PTR(ret);
659 	}
660 
661 	trans->block_rsv = &fs_info->trans_block_rsv;
662 	trans->bytes_reserved = num_bytes;
663 	trace_btrfs_space_reservation(fs_info, "transaction",
664 				      trans->transid, num_bytes, 1);
665 
666 	return trans;
667 }
668 
669 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
670 {
671 	return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
672 				 true);
673 }
674 
675 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
676 {
677 	return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
678 				 BTRFS_RESERVE_NO_FLUSH, true);
679 }
680 
681 /*
682  * btrfs_attach_transaction() - catch the running transaction
683  *
684  * It is used when we want to commit the current the transaction, but
685  * don't want to start a new one.
686  *
687  * Note: If this function return -ENOENT, it just means there is no
688  * running transaction. But it is possible that the inactive transaction
689  * is still in the memory, not fully on disk. If you hope there is no
690  * inactive transaction in the fs when -ENOENT is returned, you should
691  * invoke
692  *     btrfs_attach_transaction_barrier()
693  */
694 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
695 {
696 	return start_transaction(root, 0, TRANS_ATTACH,
697 				 BTRFS_RESERVE_NO_FLUSH, true);
698 }
699 
700 /*
701  * btrfs_attach_transaction_barrier() - catch the running transaction
702  *
703  * It is similar to the above function, the difference is this one
704  * will wait for all the inactive transactions until they fully
705  * complete.
706  */
707 struct btrfs_trans_handle *
708 btrfs_attach_transaction_barrier(struct btrfs_root *root)
709 {
710 	struct btrfs_trans_handle *trans;
711 
712 	trans = start_transaction(root, 0, TRANS_ATTACH,
713 				  BTRFS_RESERVE_NO_FLUSH, true);
714 	if (trans == ERR_PTR(-ENOENT))
715 		btrfs_wait_for_commit(root->fs_info, 0);
716 
717 	return trans;
718 }
719 
720 /* wait for a transaction commit to be fully complete */
721 static noinline void wait_for_commit(struct btrfs_transaction *commit)
722 {
723 	wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
724 }
725 
726 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
727 {
728 	struct btrfs_transaction *cur_trans = NULL, *t;
729 	int ret = 0;
730 
731 	if (transid) {
732 		if (transid <= fs_info->last_trans_committed)
733 			goto out;
734 
735 		/* find specified transaction */
736 		spin_lock(&fs_info->trans_lock);
737 		list_for_each_entry(t, &fs_info->trans_list, list) {
738 			if (t->transid == transid) {
739 				cur_trans = t;
740 				refcount_inc(&cur_trans->use_count);
741 				ret = 0;
742 				break;
743 			}
744 			if (t->transid > transid) {
745 				ret = 0;
746 				break;
747 			}
748 		}
749 		spin_unlock(&fs_info->trans_lock);
750 
751 		/*
752 		 * The specified transaction doesn't exist, or we
753 		 * raced with btrfs_commit_transaction
754 		 */
755 		if (!cur_trans) {
756 			if (transid > fs_info->last_trans_committed)
757 				ret = -EINVAL;
758 			goto out;
759 		}
760 	} else {
761 		/* find newest transaction that is committing | committed */
762 		spin_lock(&fs_info->trans_lock);
763 		list_for_each_entry_reverse(t, &fs_info->trans_list,
764 					    list) {
765 			if (t->state >= TRANS_STATE_COMMIT_START) {
766 				if (t->state == TRANS_STATE_COMPLETED)
767 					break;
768 				cur_trans = t;
769 				refcount_inc(&cur_trans->use_count);
770 				break;
771 			}
772 		}
773 		spin_unlock(&fs_info->trans_lock);
774 		if (!cur_trans)
775 			goto out;  /* nothing committing|committed */
776 	}
777 
778 	wait_for_commit(cur_trans);
779 	btrfs_put_transaction(cur_trans);
780 out:
781 	return ret;
782 }
783 
784 void btrfs_throttle(struct btrfs_fs_info *fs_info)
785 {
786 	wait_current_trans(fs_info);
787 }
788 
789 static int should_end_transaction(struct btrfs_trans_handle *trans)
790 {
791 	struct btrfs_fs_info *fs_info = trans->fs_info;
792 
793 	if (btrfs_check_space_for_delayed_refs(fs_info))
794 		return 1;
795 
796 	return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
797 }
798 
799 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
800 {
801 	struct btrfs_transaction *cur_trans = trans->transaction;
802 
803 	smp_mb();
804 	if (cur_trans->state >= TRANS_STATE_BLOCKED ||
805 	    cur_trans->delayed_refs.flushing)
806 		return 1;
807 
808 	return should_end_transaction(trans);
809 }
810 
811 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
812 
813 {
814 	struct btrfs_fs_info *fs_info = trans->fs_info;
815 
816 	if (!trans->block_rsv) {
817 		ASSERT(!trans->bytes_reserved);
818 		return;
819 	}
820 
821 	if (!trans->bytes_reserved)
822 		return;
823 
824 	ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
825 	trace_btrfs_space_reservation(fs_info, "transaction",
826 				      trans->transid, trans->bytes_reserved, 0);
827 	btrfs_block_rsv_release(fs_info, trans->block_rsv,
828 				trans->bytes_reserved);
829 	trans->bytes_reserved = 0;
830 }
831 
832 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
833 				   int throttle)
834 {
835 	struct btrfs_fs_info *info = trans->fs_info;
836 	struct btrfs_transaction *cur_trans = trans->transaction;
837 	int lock = (trans->type != TRANS_JOIN_NOLOCK);
838 	int err = 0;
839 
840 	if (refcount_read(&trans->use_count) > 1) {
841 		refcount_dec(&trans->use_count);
842 		trans->block_rsv = trans->orig_rsv;
843 		return 0;
844 	}
845 
846 	btrfs_trans_release_metadata(trans);
847 	trans->block_rsv = NULL;
848 
849 	btrfs_create_pending_block_groups(trans);
850 
851 	btrfs_trans_release_chunk_metadata(trans);
852 
853 	if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
854 		if (throttle)
855 			return btrfs_commit_transaction(trans);
856 		else
857 			wake_up_process(info->transaction_kthread);
858 	}
859 
860 	if (trans->type & __TRANS_FREEZABLE)
861 		sb_end_intwrite(info->sb);
862 
863 	WARN_ON(cur_trans != info->running_transaction);
864 	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
865 	atomic_dec(&cur_trans->num_writers);
866 	extwriter_counter_dec(cur_trans, trans->type);
867 
868 	cond_wake_up(&cur_trans->writer_wait);
869 	btrfs_put_transaction(cur_trans);
870 
871 	if (current->journal_info == trans)
872 		current->journal_info = NULL;
873 
874 	if (throttle)
875 		btrfs_run_delayed_iputs(info);
876 
877 	if (trans->aborted ||
878 	    test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
879 		wake_up_process(info->transaction_kthread);
880 		err = -EIO;
881 	}
882 
883 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
884 	return err;
885 }
886 
887 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
888 {
889 	return __btrfs_end_transaction(trans, 0);
890 }
891 
892 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
893 {
894 	return __btrfs_end_transaction(trans, 1);
895 }
896 
897 /*
898  * when btree blocks are allocated, they have some corresponding bits set for
899  * them in one of two extent_io trees.  This is used to make sure all of
900  * those extents are sent to disk but does not wait on them
901  */
902 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
903 			       struct extent_io_tree *dirty_pages, int mark)
904 {
905 	int err = 0;
906 	int werr = 0;
907 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
908 	struct extent_state *cached_state = NULL;
909 	u64 start = 0;
910 	u64 end;
911 
912 	atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
913 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
914 				      mark, &cached_state)) {
915 		bool wait_writeback = false;
916 
917 		err = convert_extent_bit(dirty_pages, start, end,
918 					 EXTENT_NEED_WAIT,
919 					 mark, &cached_state);
920 		/*
921 		 * convert_extent_bit can return -ENOMEM, which is most of the
922 		 * time a temporary error. So when it happens, ignore the error
923 		 * and wait for writeback of this range to finish - because we
924 		 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
925 		 * to __btrfs_wait_marked_extents() would not know that
926 		 * writeback for this range started and therefore wouldn't
927 		 * wait for it to finish - we don't want to commit a
928 		 * superblock that points to btree nodes/leafs for which
929 		 * writeback hasn't finished yet (and without errors).
930 		 * We cleanup any entries left in the io tree when committing
931 		 * the transaction (through clear_btree_io_tree()).
932 		 */
933 		if (err == -ENOMEM) {
934 			err = 0;
935 			wait_writeback = true;
936 		}
937 		if (!err)
938 			err = filemap_fdatawrite_range(mapping, start, end);
939 		if (err)
940 			werr = err;
941 		else if (wait_writeback)
942 			werr = filemap_fdatawait_range(mapping, start, end);
943 		free_extent_state(cached_state);
944 		cached_state = NULL;
945 		cond_resched();
946 		start = end + 1;
947 	}
948 	atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
949 	return werr;
950 }
951 
952 /*
953  * when btree blocks are allocated, they have some corresponding bits set for
954  * them in one of two extent_io trees.  This is used to make sure all of
955  * those extents are on disk for transaction or log commit.  We wait
956  * on all the pages and clear them from the dirty pages state tree
957  */
958 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
959 				       struct extent_io_tree *dirty_pages)
960 {
961 	int err = 0;
962 	int werr = 0;
963 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
964 	struct extent_state *cached_state = NULL;
965 	u64 start = 0;
966 	u64 end;
967 
968 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
969 				      EXTENT_NEED_WAIT, &cached_state)) {
970 		/*
971 		 * Ignore -ENOMEM errors returned by clear_extent_bit().
972 		 * When committing the transaction, we'll remove any entries
973 		 * left in the io tree. For a log commit, we don't remove them
974 		 * after committing the log because the tree can be accessed
975 		 * concurrently - we do it only at transaction commit time when
976 		 * it's safe to do it (through clear_btree_io_tree()).
977 		 */
978 		err = clear_extent_bit(dirty_pages, start, end,
979 				       EXTENT_NEED_WAIT, 0, 0, &cached_state);
980 		if (err == -ENOMEM)
981 			err = 0;
982 		if (!err)
983 			err = filemap_fdatawait_range(mapping, start, end);
984 		if (err)
985 			werr = err;
986 		free_extent_state(cached_state);
987 		cached_state = NULL;
988 		cond_resched();
989 		start = end + 1;
990 	}
991 	if (err)
992 		werr = err;
993 	return werr;
994 }
995 
996 int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
997 		       struct extent_io_tree *dirty_pages)
998 {
999 	bool errors = false;
1000 	int err;
1001 
1002 	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1003 	if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1004 		errors = true;
1005 
1006 	if (errors && !err)
1007 		err = -EIO;
1008 	return err;
1009 }
1010 
1011 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1012 {
1013 	struct btrfs_fs_info *fs_info = log_root->fs_info;
1014 	struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1015 	bool errors = false;
1016 	int err;
1017 
1018 	ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1019 
1020 	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1021 	if ((mark & EXTENT_DIRTY) &&
1022 	    test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1023 		errors = true;
1024 
1025 	if ((mark & EXTENT_NEW) &&
1026 	    test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1027 		errors = true;
1028 
1029 	if (errors && !err)
1030 		err = -EIO;
1031 	return err;
1032 }
1033 
1034 /*
1035  * When btree blocks are allocated the corresponding extents are marked dirty.
1036  * This function ensures such extents are persisted on disk for transaction or
1037  * log commit.
1038  *
1039  * @trans: transaction whose dirty pages we'd like to write
1040  */
1041 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1042 {
1043 	int ret;
1044 	int ret2;
1045 	struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1046 	struct btrfs_fs_info *fs_info = trans->fs_info;
1047 	struct blk_plug plug;
1048 
1049 	blk_start_plug(&plug);
1050 	ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1051 	blk_finish_plug(&plug);
1052 	ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1053 
1054 	clear_btree_io_tree(&trans->transaction->dirty_pages);
1055 
1056 	if (ret)
1057 		return ret;
1058 	else if (ret2)
1059 		return ret2;
1060 	else
1061 		return 0;
1062 }
1063 
1064 /*
1065  * this is used to update the root pointer in the tree of tree roots.
1066  *
1067  * But, in the case of the extent allocation tree, updating the root
1068  * pointer may allocate blocks which may change the root of the extent
1069  * allocation tree.
1070  *
1071  * So, this loops and repeats and makes sure the cowonly root didn't
1072  * change while the root pointer was being updated in the metadata.
1073  */
1074 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1075 			       struct btrfs_root *root)
1076 {
1077 	int ret;
1078 	u64 old_root_bytenr;
1079 	u64 old_root_used;
1080 	struct btrfs_fs_info *fs_info = root->fs_info;
1081 	struct btrfs_root *tree_root = fs_info->tree_root;
1082 
1083 	old_root_used = btrfs_root_used(&root->root_item);
1084 
1085 	while (1) {
1086 		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1087 		if (old_root_bytenr == root->node->start &&
1088 		    old_root_used == btrfs_root_used(&root->root_item))
1089 			break;
1090 
1091 		btrfs_set_root_node(&root->root_item, root->node);
1092 		ret = btrfs_update_root(trans, tree_root,
1093 					&root->root_key,
1094 					&root->root_item);
1095 		if (ret)
1096 			return ret;
1097 
1098 		old_root_used = btrfs_root_used(&root->root_item);
1099 	}
1100 
1101 	return 0;
1102 }
1103 
1104 /*
1105  * update all the cowonly tree roots on disk
1106  *
1107  * The error handling in this function may not be obvious. Any of the
1108  * failures will cause the file system to go offline. We still need
1109  * to clean up the delayed refs.
1110  */
1111 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1112 {
1113 	struct btrfs_fs_info *fs_info = trans->fs_info;
1114 	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1115 	struct list_head *io_bgs = &trans->transaction->io_bgs;
1116 	struct list_head *next;
1117 	struct extent_buffer *eb;
1118 	int ret;
1119 
1120 	eb = btrfs_lock_root_node(fs_info->tree_root);
1121 	ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1122 			      0, &eb);
1123 	btrfs_tree_unlock(eb);
1124 	free_extent_buffer(eb);
1125 
1126 	if (ret)
1127 		return ret;
1128 
1129 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1130 	if (ret)
1131 		return ret;
1132 
1133 	ret = btrfs_run_dev_stats(trans, fs_info);
1134 	if (ret)
1135 		return ret;
1136 	ret = btrfs_run_dev_replace(trans, fs_info);
1137 	if (ret)
1138 		return ret;
1139 	ret = btrfs_run_qgroups(trans);
1140 	if (ret)
1141 		return ret;
1142 
1143 	ret = btrfs_setup_space_cache(trans, fs_info);
1144 	if (ret)
1145 		return ret;
1146 
1147 	/* run_qgroups might have added some more refs */
1148 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1149 	if (ret)
1150 		return ret;
1151 again:
1152 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1153 		struct btrfs_root *root;
1154 		next = fs_info->dirty_cowonly_roots.next;
1155 		list_del_init(next);
1156 		root = list_entry(next, struct btrfs_root, dirty_list);
1157 		clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1158 
1159 		if (root != fs_info->extent_root)
1160 			list_add_tail(&root->dirty_list,
1161 				      &trans->transaction->switch_commits);
1162 		ret = update_cowonly_root(trans, root);
1163 		if (ret)
1164 			return ret;
1165 		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1166 		if (ret)
1167 			return ret;
1168 	}
1169 
1170 	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1171 		ret = btrfs_write_dirty_block_groups(trans, fs_info);
1172 		if (ret)
1173 			return ret;
1174 		ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1175 		if (ret)
1176 			return ret;
1177 	}
1178 
1179 	if (!list_empty(&fs_info->dirty_cowonly_roots))
1180 		goto again;
1181 
1182 	list_add_tail(&fs_info->extent_root->dirty_list,
1183 		      &trans->transaction->switch_commits);
1184 
1185 	/* Update dev-replace pointer once everything is committed */
1186 	fs_info->dev_replace.committed_cursor_left =
1187 		fs_info->dev_replace.cursor_left_last_write_of_item;
1188 
1189 	return 0;
1190 }
1191 
1192 /*
1193  * dead roots are old snapshots that need to be deleted.  This allocates
1194  * a dirty root struct and adds it into the list of dead roots that need to
1195  * be deleted
1196  */
1197 void btrfs_add_dead_root(struct btrfs_root *root)
1198 {
1199 	struct btrfs_fs_info *fs_info = root->fs_info;
1200 
1201 	spin_lock(&fs_info->trans_lock);
1202 	if (list_empty(&root->root_list))
1203 		list_add_tail(&root->root_list, &fs_info->dead_roots);
1204 	spin_unlock(&fs_info->trans_lock);
1205 }
1206 
1207 /*
1208  * update all the cowonly tree roots on disk
1209  */
1210 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1211 {
1212 	struct btrfs_fs_info *fs_info = trans->fs_info;
1213 	struct btrfs_root *gang[8];
1214 	int i;
1215 	int ret;
1216 	int err = 0;
1217 
1218 	spin_lock(&fs_info->fs_roots_radix_lock);
1219 	while (1) {
1220 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1221 						 (void **)gang, 0,
1222 						 ARRAY_SIZE(gang),
1223 						 BTRFS_ROOT_TRANS_TAG);
1224 		if (ret == 0)
1225 			break;
1226 		for (i = 0; i < ret; i++) {
1227 			struct btrfs_root *root = gang[i];
1228 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
1229 					(unsigned long)root->root_key.objectid,
1230 					BTRFS_ROOT_TRANS_TAG);
1231 			spin_unlock(&fs_info->fs_roots_radix_lock);
1232 
1233 			btrfs_free_log(trans, root);
1234 			btrfs_update_reloc_root(trans, root);
1235 
1236 			btrfs_save_ino_cache(root, trans);
1237 
1238 			/* see comments in should_cow_block() */
1239 			clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1240 			smp_mb__after_atomic();
1241 
1242 			if (root->commit_root != root->node) {
1243 				list_add_tail(&root->dirty_list,
1244 					&trans->transaction->switch_commits);
1245 				btrfs_set_root_node(&root->root_item,
1246 						    root->node);
1247 			}
1248 
1249 			err = btrfs_update_root(trans, fs_info->tree_root,
1250 						&root->root_key,
1251 						&root->root_item);
1252 			spin_lock(&fs_info->fs_roots_radix_lock);
1253 			if (err)
1254 				break;
1255 			btrfs_qgroup_free_meta_all_pertrans(root);
1256 		}
1257 	}
1258 	spin_unlock(&fs_info->fs_roots_radix_lock);
1259 	return err;
1260 }
1261 
1262 /*
1263  * defrag a given btree.
1264  * Every leaf in the btree is read and defragged.
1265  */
1266 int btrfs_defrag_root(struct btrfs_root *root)
1267 {
1268 	struct btrfs_fs_info *info = root->fs_info;
1269 	struct btrfs_trans_handle *trans;
1270 	int ret;
1271 
1272 	if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1273 		return 0;
1274 
1275 	while (1) {
1276 		trans = btrfs_start_transaction(root, 0);
1277 		if (IS_ERR(trans))
1278 			return PTR_ERR(trans);
1279 
1280 		ret = btrfs_defrag_leaves(trans, root);
1281 
1282 		btrfs_end_transaction(trans);
1283 		btrfs_btree_balance_dirty(info);
1284 		cond_resched();
1285 
1286 		if (btrfs_fs_closing(info) || ret != -EAGAIN)
1287 			break;
1288 
1289 		if (btrfs_defrag_cancelled(info)) {
1290 			btrfs_debug(info, "defrag_root cancelled");
1291 			ret = -EAGAIN;
1292 			break;
1293 		}
1294 	}
1295 	clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1296 	return ret;
1297 }
1298 
1299 /*
1300  * Do all special snapshot related qgroup dirty hack.
1301  *
1302  * Will do all needed qgroup inherit and dirty hack like switch commit
1303  * roots inside one transaction and write all btree into disk, to make
1304  * qgroup works.
1305  */
1306 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1307 				   struct btrfs_root *src,
1308 				   struct btrfs_root *parent,
1309 				   struct btrfs_qgroup_inherit *inherit,
1310 				   u64 dst_objectid)
1311 {
1312 	struct btrfs_fs_info *fs_info = src->fs_info;
1313 	int ret;
1314 
1315 	/*
1316 	 * Save some performance in the case that qgroups are not
1317 	 * enabled. If this check races with the ioctl, rescan will
1318 	 * kick in anyway.
1319 	 */
1320 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1321 		return 0;
1322 
1323 	/*
1324 	 * Ensure dirty @src will be committed.  Or, after coming
1325 	 * commit_fs_roots() and switch_commit_roots(), any dirty but not
1326 	 * recorded root will never be updated again, causing an outdated root
1327 	 * item.
1328 	 */
1329 	record_root_in_trans(trans, src, 1);
1330 
1331 	/*
1332 	 * We are going to commit transaction, see btrfs_commit_transaction()
1333 	 * comment for reason locking tree_log_mutex
1334 	 */
1335 	mutex_lock(&fs_info->tree_log_mutex);
1336 
1337 	ret = commit_fs_roots(trans);
1338 	if (ret)
1339 		goto out;
1340 	ret = btrfs_qgroup_account_extents(trans);
1341 	if (ret < 0)
1342 		goto out;
1343 
1344 	/* Now qgroup are all updated, we can inherit it to new qgroups */
1345 	ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1346 				   inherit);
1347 	if (ret < 0)
1348 		goto out;
1349 
1350 	/*
1351 	 * Now we do a simplified commit transaction, which will:
1352 	 * 1) commit all subvolume and extent tree
1353 	 *    To ensure all subvolume and extent tree have a valid
1354 	 *    commit_root to accounting later insert_dir_item()
1355 	 * 2) write all btree blocks onto disk
1356 	 *    This is to make sure later btree modification will be cowed
1357 	 *    Or commit_root can be populated and cause wrong qgroup numbers
1358 	 * In this simplified commit, we don't really care about other trees
1359 	 * like chunk and root tree, as they won't affect qgroup.
1360 	 * And we don't write super to avoid half committed status.
1361 	 */
1362 	ret = commit_cowonly_roots(trans);
1363 	if (ret)
1364 		goto out;
1365 	switch_commit_roots(trans->transaction);
1366 	ret = btrfs_write_and_wait_transaction(trans);
1367 	if (ret)
1368 		btrfs_handle_fs_error(fs_info, ret,
1369 			"Error while writing out transaction for qgroup");
1370 
1371 out:
1372 	mutex_unlock(&fs_info->tree_log_mutex);
1373 
1374 	/*
1375 	 * Force parent root to be updated, as we recorded it before so its
1376 	 * last_trans == cur_transid.
1377 	 * Or it won't be committed again onto disk after later
1378 	 * insert_dir_item()
1379 	 */
1380 	if (!ret)
1381 		record_root_in_trans(trans, parent, 1);
1382 	return ret;
1383 }
1384 
1385 /*
1386  * new snapshots need to be created at a very specific time in the
1387  * transaction commit.  This does the actual creation.
1388  *
1389  * Note:
1390  * If the error which may affect the commitment of the current transaction
1391  * happens, we should return the error number. If the error which just affect
1392  * the creation of the pending snapshots, just return 0.
1393  */
1394 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1395 				   struct btrfs_pending_snapshot *pending)
1396 {
1397 
1398 	struct btrfs_fs_info *fs_info = trans->fs_info;
1399 	struct btrfs_key key;
1400 	struct btrfs_root_item *new_root_item;
1401 	struct btrfs_root *tree_root = fs_info->tree_root;
1402 	struct btrfs_root *root = pending->root;
1403 	struct btrfs_root *parent_root;
1404 	struct btrfs_block_rsv *rsv;
1405 	struct inode *parent_inode;
1406 	struct btrfs_path *path;
1407 	struct btrfs_dir_item *dir_item;
1408 	struct dentry *dentry;
1409 	struct extent_buffer *tmp;
1410 	struct extent_buffer *old;
1411 	struct timespec64 cur_time;
1412 	int ret = 0;
1413 	u64 to_reserve = 0;
1414 	u64 index = 0;
1415 	u64 objectid;
1416 	u64 root_flags;
1417 	uuid_le new_uuid;
1418 
1419 	ASSERT(pending->path);
1420 	path = pending->path;
1421 
1422 	ASSERT(pending->root_item);
1423 	new_root_item = pending->root_item;
1424 
1425 	pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1426 	if (pending->error)
1427 		goto no_free_objectid;
1428 
1429 	/*
1430 	 * Make qgroup to skip current new snapshot's qgroupid, as it is
1431 	 * accounted by later btrfs_qgroup_inherit().
1432 	 */
1433 	btrfs_set_skip_qgroup(trans, objectid);
1434 
1435 	btrfs_reloc_pre_snapshot(pending, &to_reserve);
1436 
1437 	if (to_reserve > 0) {
1438 		pending->error = btrfs_block_rsv_add(root,
1439 						     &pending->block_rsv,
1440 						     to_reserve,
1441 						     BTRFS_RESERVE_NO_FLUSH);
1442 		if (pending->error)
1443 			goto clear_skip_qgroup;
1444 	}
1445 
1446 	key.objectid = objectid;
1447 	key.offset = (u64)-1;
1448 	key.type = BTRFS_ROOT_ITEM_KEY;
1449 
1450 	rsv = trans->block_rsv;
1451 	trans->block_rsv = &pending->block_rsv;
1452 	trans->bytes_reserved = trans->block_rsv->reserved;
1453 	trace_btrfs_space_reservation(fs_info, "transaction",
1454 				      trans->transid,
1455 				      trans->bytes_reserved, 1);
1456 	dentry = pending->dentry;
1457 	parent_inode = pending->dir;
1458 	parent_root = BTRFS_I(parent_inode)->root;
1459 	record_root_in_trans(trans, parent_root, 0);
1460 
1461 	cur_time = current_time(parent_inode);
1462 
1463 	/*
1464 	 * insert the directory item
1465 	 */
1466 	ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1467 	BUG_ON(ret); /* -ENOMEM */
1468 
1469 	/* check if there is a file/dir which has the same name. */
1470 	dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1471 					 btrfs_ino(BTRFS_I(parent_inode)),
1472 					 dentry->d_name.name,
1473 					 dentry->d_name.len, 0);
1474 	if (dir_item != NULL && !IS_ERR(dir_item)) {
1475 		pending->error = -EEXIST;
1476 		goto dir_item_existed;
1477 	} else if (IS_ERR(dir_item)) {
1478 		ret = PTR_ERR(dir_item);
1479 		btrfs_abort_transaction(trans, ret);
1480 		goto fail;
1481 	}
1482 	btrfs_release_path(path);
1483 
1484 	/*
1485 	 * pull in the delayed directory update
1486 	 * and the delayed inode item
1487 	 * otherwise we corrupt the FS during
1488 	 * snapshot
1489 	 */
1490 	ret = btrfs_run_delayed_items(trans);
1491 	if (ret) {	/* Transaction aborted */
1492 		btrfs_abort_transaction(trans, ret);
1493 		goto fail;
1494 	}
1495 
1496 	record_root_in_trans(trans, root, 0);
1497 	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1498 	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1499 	btrfs_check_and_init_root_item(new_root_item);
1500 
1501 	root_flags = btrfs_root_flags(new_root_item);
1502 	if (pending->readonly)
1503 		root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1504 	else
1505 		root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1506 	btrfs_set_root_flags(new_root_item, root_flags);
1507 
1508 	btrfs_set_root_generation_v2(new_root_item,
1509 			trans->transid);
1510 	uuid_le_gen(&new_uuid);
1511 	memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1512 	memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1513 			BTRFS_UUID_SIZE);
1514 	if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1515 		memset(new_root_item->received_uuid, 0,
1516 		       sizeof(new_root_item->received_uuid));
1517 		memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1518 		memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1519 		btrfs_set_root_stransid(new_root_item, 0);
1520 		btrfs_set_root_rtransid(new_root_item, 0);
1521 	}
1522 	btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1523 	btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1524 	btrfs_set_root_otransid(new_root_item, trans->transid);
1525 
1526 	old = btrfs_lock_root_node(root);
1527 	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1528 	if (ret) {
1529 		btrfs_tree_unlock(old);
1530 		free_extent_buffer(old);
1531 		btrfs_abort_transaction(trans, ret);
1532 		goto fail;
1533 	}
1534 
1535 	btrfs_set_lock_blocking_write(old);
1536 
1537 	ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1538 	/* clean up in any case */
1539 	btrfs_tree_unlock(old);
1540 	free_extent_buffer(old);
1541 	if (ret) {
1542 		btrfs_abort_transaction(trans, ret);
1543 		goto fail;
1544 	}
1545 	/* see comments in should_cow_block() */
1546 	set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1547 	smp_wmb();
1548 
1549 	btrfs_set_root_node(new_root_item, tmp);
1550 	/* record when the snapshot was created in key.offset */
1551 	key.offset = trans->transid;
1552 	ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1553 	btrfs_tree_unlock(tmp);
1554 	free_extent_buffer(tmp);
1555 	if (ret) {
1556 		btrfs_abort_transaction(trans, ret);
1557 		goto fail;
1558 	}
1559 
1560 	/*
1561 	 * insert root back/forward references
1562 	 */
1563 	ret = btrfs_add_root_ref(trans, objectid,
1564 				 parent_root->root_key.objectid,
1565 				 btrfs_ino(BTRFS_I(parent_inode)), index,
1566 				 dentry->d_name.name, dentry->d_name.len);
1567 	if (ret) {
1568 		btrfs_abort_transaction(trans, ret);
1569 		goto fail;
1570 	}
1571 
1572 	key.offset = (u64)-1;
1573 	pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
1574 	if (IS_ERR(pending->snap)) {
1575 		ret = PTR_ERR(pending->snap);
1576 		btrfs_abort_transaction(trans, ret);
1577 		goto fail;
1578 	}
1579 
1580 	ret = btrfs_reloc_post_snapshot(trans, pending);
1581 	if (ret) {
1582 		btrfs_abort_transaction(trans, ret);
1583 		goto fail;
1584 	}
1585 
1586 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1587 	if (ret) {
1588 		btrfs_abort_transaction(trans, ret);
1589 		goto fail;
1590 	}
1591 
1592 	/*
1593 	 * Do special qgroup accounting for snapshot, as we do some qgroup
1594 	 * snapshot hack to do fast snapshot.
1595 	 * To co-operate with that hack, we do hack again.
1596 	 * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1597 	 */
1598 	ret = qgroup_account_snapshot(trans, root, parent_root,
1599 				      pending->inherit, objectid);
1600 	if (ret < 0)
1601 		goto fail;
1602 
1603 	ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1604 				    dentry->d_name.len, BTRFS_I(parent_inode),
1605 				    &key, BTRFS_FT_DIR, index);
1606 	/* We have check then name at the beginning, so it is impossible. */
1607 	BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1608 	if (ret) {
1609 		btrfs_abort_transaction(trans, ret);
1610 		goto fail;
1611 	}
1612 
1613 	btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1614 					 dentry->d_name.len * 2);
1615 	parent_inode->i_mtime = parent_inode->i_ctime =
1616 		current_time(parent_inode);
1617 	ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1618 	if (ret) {
1619 		btrfs_abort_transaction(trans, ret);
1620 		goto fail;
1621 	}
1622 	ret = btrfs_uuid_tree_add(trans, new_uuid.b, BTRFS_UUID_KEY_SUBVOL,
1623 				  objectid);
1624 	if (ret) {
1625 		btrfs_abort_transaction(trans, ret);
1626 		goto fail;
1627 	}
1628 	if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1629 		ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1630 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1631 					  objectid);
1632 		if (ret && ret != -EEXIST) {
1633 			btrfs_abort_transaction(trans, ret);
1634 			goto fail;
1635 		}
1636 	}
1637 
1638 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1639 	if (ret) {
1640 		btrfs_abort_transaction(trans, ret);
1641 		goto fail;
1642 	}
1643 
1644 fail:
1645 	pending->error = ret;
1646 dir_item_existed:
1647 	trans->block_rsv = rsv;
1648 	trans->bytes_reserved = 0;
1649 clear_skip_qgroup:
1650 	btrfs_clear_skip_qgroup(trans);
1651 no_free_objectid:
1652 	kfree(new_root_item);
1653 	pending->root_item = NULL;
1654 	btrfs_free_path(path);
1655 	pending->path = NULL;
1656 
1657 	return ret;
1658 }
1659 
1660 /*
1661  * create all the snapshots we've scheduled for creation
1662  */
1663 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1664 {
1665 	struct btrfs_pending_snapshot *pending, *next;
1666 	struct list_head *head = &trans->transaction->pending_snapshots;
1667 	int ret = 0;
1668 
1669 	list_for_each_entry_safe(pending, next, head, list) {
1670 		list_del(&pending->list);
1671 		ret = create_pending_snapshot(trans, pending);
1672 		if (ret)
1673 			break;
1674 	}
1675 	return ret;
1676 }
1677 
1678 static void update_super_roots(struct btrfs_fs_info *fs_info)
1679 {
1680 	struct btrfs_root_item *root_item;
1681 	struct btrfs_super_block *super;
1682 
1683 	super = fs_info->super_copy;
1684 
1685 	root_item = &fs_info->chunk_root->root_item;
1686 	super->chunk_root = root_item->bytenr;
1687 	super->chunk_root_generation = root_item->generation;
1688 	super->chunk_root_level = root_item->level;
1689 
1690 	root_item = &fs_info->tree_root->root_item;
1691 	super->root = root_item->bytenr;
1692 	super->generation = root_item->generation;
1693 	super->root_level = root_item->level;
1694 	if (btrfs_test_opt(fs_info, SPACE_CACHE))
1695 		super->cache_generation = root_item->generation;
1696 	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1697 		super->uuid_tree_generation = root_item->generation;
1698 }
1699 
1700 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1701 {
1702 	struct btrfs_transaction *trans;
1703 	int ret = 0;
1704 
1705 	spin_lock(&info->trans_lock);
1706 	trans = info->running_transaction;
1707 	if (trans)
1708 		ret = (trans->state >= TRANS_STATE_COMMIT_START);
1709 	spin_unlock(&info->trans_lock);
1710 	return ret;
1711 }
1712 
1713 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1714 {
1715 	struct btrfs_transaction *trans;
1716 	int ret = 0;
1717 
1718 	spin_lock(&info->trans_lock);
1719 	trans = info->running_transaction;
1720 	if (trans)
1721 		ret = is_transaction_blocked(trans);
1722 	spin_unlock(&info->trans_lock);
1723 	return ret;
1724 }
1725 
1726 /*
1727  * wait for the current transaction commit to start and block subsequent
1728  * transaction joins
1729  */
1730 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1731 					    struct btrfs_transaction *trans)
1732 {
1733 	wait_event(fs_info->transaction_blocked_wait,
1734 		   trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
1735 }
1736 
1737 /*
1738  * wait for the current transaction to start and then become unblocked.
1739  * caller holds ref.
1740  */
1741 static void wait_current_trans_commit_start_and_unblock(
1742 					struct btrfs_fs_info *fs_info,
1743 					struct btrfs_transaction *trans)
1744 {
1745 	wait_event(fs_info->transaction_wait,
1746 		   trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
1747 }
1748 
1749 /*
1750  * commit transactions asynchronously. once btrfs_commit_transaction_async
1751  * returns, any subsequent transaction will not be allowed to join.
1752  */
1753 struct btrfs_async_commit {
1754 	struct btrfs_trans_handle *newtrans;
1755 	struct work_struct work;
1756 };
1757 
1758 static void do_async_commit(struct work_struct *work)
1759 {
1760 	struct btrfs_async_commit *ac =
1761 		container_of(work, struct btrfs_async_commit, work);
1762 
1763 	/*
1764 	 * We've got freeze protection passed with the transaction.
1765 	 * Tell lockdep about it.
1766 	 */
1767 	if (ac->newtrans->type & __TRANS_FREEZABLE)
1768 		__sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1769 
1770 	current->journal_info = ac->newtrans;
1771 
1772 	btrfs_commit_transaction(ac->newtrans);
1773 	kfree(ac);
1774 }
1775 
1776 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1777 				   int wait_for_unblock)
1778 {
1779 	struct btrfs_fs_info *fs_info = trans->fs_info;
1780 	struct btrfs_async_commit *ac;
1781 	struct btrfs_transaction *cur_trans;
1782 
1783 	ac = kmalloc(sizeof(*ac), GFP_NOFS);
1784 	if (!ac)
1785 		return -ENOMEM;
1786 
1787 	INIT_WORK(&ac->work, do_async_commit);
1788 	ac->newtrans = btrfs_join_transaction(trans->root);
1789 	if (IS_ERR(ac->newtrans)) {
1790 		int err = PTR_ERR(ac->newtrans);
1791 		kfree(ac);
1792 		return err;
1793 	}
1794 
1795 	/* take transaction reference */
1796 	cur_trans = trans->transaction;
1797 	refcount_inc(&cur_trans->use_count);
1798 
1799 	btrfs_end_transaction(trans);
1800 
1801 	/*
1802 	 * Tell lockdep we've released the freeze rwsem, since the
1803 	 * async commit thread will be the one to unlock it.
1804 	 */
1805 	if (ac->newtrans->type & __TRANS_FREEZABLE)
1806 		__sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1807 
1808 	schedule_work(&ac->work);
1809 
1810 	/* wait for transaction to start and unblock */
1811 	if (wait_for_unblock)
1812 		wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
1813 	else
1814 		wait_current_trans_commit_start(fs_info, cur_trans);
1815 
1816 	if (current->journal_info == trans)
1817 		current->journal_info = NULL;
1818 
1819 	btrfs_put_transaction(cur_trans);
1820 	return 0;
1821 }
1822 
1823 
1824 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1825 {
1826 	struct btrfs_fs_info *fs_info = trans->fs_info;
1827 	struct btrfs_transaction *cur_trans = trans->transaction;
1828 
1829 	WARN_ON(refcount_read(&trans->use_count) > 1);
1830 
1831 	btrfs_abort_transaction(trans, err);
1832 
1833 	spin_lock(&fs_info->trans_lock);
1834 
1835 	/*
1836 	 * If the transaction is removed from the list, it means this
1837 	 * transaction has been committed successfully, so it is impossible
1838 	 * to call the cleanup function.
1839 	 */
1840 	BUG_ON(list_empty(&cur_trans->list));
1841 
1842 	list_del_init(&cur_trans->list);
1843 	if (cur_trans == fs_info->running_transaction) {
1844 		cur_trans->state = TRANS_STATE_COMMIT_DOING;
1845 		spin_unlock(&fs_info->trans_lock);
1846 		wait_event(cur_trans->writer_wait,
1847 			   atomic_read(&cur_trans->num_writers) == 1);
1848 
1849 		spin_lock(&fs_info->trans_lock);
1850 	}
1851 	spin_unlock(&fs_info->trans_lock);
1852 
1853 	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1854 
1855 	spin_lock(&fs_info->trans_lock);
1856 	if (cur_trans == fs_info->running_transaction)
1857 		fs_info->running_transaction = NULL;
1858 	spin_unlock(&fs_info->trans_lock);
1859 
1860 	if (trans->type & __TRANS_FREEZABLE)
1861 		sb_end_intwrite(fs_info->sb);
1862 	btrfs_put_transaction(cur_trans);
1863 	btrfs_put_transaction(cur_trans);
1864 
1865 	trace_btrfs_transaction_commit(trans->root);
1866 
1867 	if (current->journal_info == trans)
1868 		current->journal_info = NULL;
1869 	btrfs_scrub_cancel(fs_info);
1870 
1871 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
1872 }
1873 
1874 /*
1875  * Release reserved delayed ref space of all pending block groups of the
1876  * transaction and remove them from the list
1877  */
1878 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1879 {
1880        struct btrfs_fs_info *fs_info = trans->fs_info;
1881        struct btrfs_block_group_cache *block_group, *tmp;
1882 
1883        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1884                btrfs_delayed_refs_rsv_release(fs_info, 1);
1885                list_del_init(&block_group->bg_list);
1886        }
1887 }
1888 
1889 static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
1890 {
1891 	struct btrfs_fs_info *fs_info = trans->fs_info;
1892 
1893 	/*
1894 	 * We use writeback_inodes_sb here because if we used
1895 	 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
1896 	 * Currently are holding the fs freeze lock, if we do an async flush
1897 	 * we'll do btrfs_join_transaction() and deadlock because we need to
1898 	 * wait for the fs freeze lock.  Using the direct flushing we benefit
1899 	 * from already being in a transaction and our join_transaction doesn't
1900 	 * have to re-take the fs freeze lock.
1901 	 */
1902 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1903 		writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
1904 	} else {
1905 		struct btrfs_pending_snapshot *pending;
1906 		struct list_head *head = &trans->transaction->pending_snapshots;
1907 
1908 		/*
1909 		 * Flush dellaloc for any root that is going to be snapshotted.
1910 		 * This is done to avoid a corrupted version of files, in the
1911 		 * snapshots, that had both buffered and direct IO writes (even
1912 		 * if they were done sequentially) due to an unordered update of
1913 		 * the inode's size on disk.
1914 		 */
1915 		list_for_each_entry(pending, head, list) {
1916 			int ret;
1917 
1918 			ret = btrfs_start_delalloc_snapshot(pending->root);
1919 			if (ret)
1920 				return ret;
1921 		}
1922 	}
1923 	return 0;
1924 }
1925 
1926 static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
1927 {
1928 	struct btrfs_fs_info *fs_info = trans->fs_info;
1929 
1930 	if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1931 		btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1932 	} else {
1933 		struct btrfs_pending_snapshot *pending;
1934 		struct list_head *head = &trans->transaction->pending_snapshots;
1935 
1936 		/*
1937 		 * Wait for any dellaloc that we started previously for the roots
1938 		 * that are going to be snapshotted. This is to avoid a corrupted
1939 		 * version of files in the snapshots that had both buffered and
1940 		 * direct IO writes (even if they were done sequentially).
1941 		 */
1942 		list_for_each_entry(pending, head, list)
1943 			btrfs_wait_ordered_extents(pending->root,
1944 						   U64_MAX, 0, U64_MAX);
1945 	}
1946 }
1947 
1948 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1949 {
1950 	struct btrfs_fs_info *fs_info = trans->fs_info;
1951 	struct btrfs_transaction *cur_trans = trans->transaction;
1952 	struct btrfs_transaction *prev_trans = NULL;
1953 	int ret;
1954 
1955 	/* Stop the commit early if ->aborted is set */
1956 	if (unlikely(READ_ONCE(cur_trans->aborted))) {
1957 		ret = cur_trans->aborted;
1958 		btrfs_end_transaction(trans);
1959 		return ret;
1960 	}
1961 
1962 	btrfs_trans_release_metadata(trans);
1963 	trans->block_rsv = NULL;
1964 
1965 	/* make a pass through all the delayed refs we have so far
1966 	 * any runnings procs may add more while we are here
1967 	 */
1968 	ret = btrfs_run_delayed_refs(trans, 0);
1969 	if (ret) {
1970 		btrfs_end_transaction(trans);
1971 		return ret;
1972 	}
1973 
1974 	cur_trans = trans->transaction;
1975 
1976 	/*
1977 	 * set the flushing flag so procs in this transaction have to
1978 	 * start sending their work down.
1979 	 */
1980 	cur_trans->delayed_refs.flushing = 1;
1981 	smp_wmb();
1982 
1983 	btrfs_create_pending_block_groups(trans);
1984 
1985 	ret = btrfs_run_delayed_refs(trans, 0);
1986 	if (ret) {
1987 		btrfs_end_transaction(trans);
1988 		return ret;
1989 	}
1990 
1991 	if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
1992 		int run_it = 0;
1993 
1994 		/* this mutex is also taken before trying to set
1995 		 * block groups readonly.  We need to make sure
1996 		 * that nobody has set a block group readonly
1997 		 * after a extents from that block group have been
1998 		 * allocated for cache files.  btrfs_set_block_group_ro
1999 		 * will wait for the transaction to commit if it
2000 		 * finds BTRFS_TRANS_DIRTY_BG_RUN set.
2001 		 *
2002 		 * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
2003 		 * only one process starts all the block group IO.  It wouldn't
2004 		 * hurt to have more than one go through, but there's no
2005 		 * real advantage to it either.
2006 		 */
2007 		mutex_lock(&fs_info->ro_block_group_mutex);
2008 		if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2009 				      &cur_trans->flags))
2010 			run_it = 1;
2011 		mutex_unlock(&fs_info->ro_block_group_mutex);
2012 
2013 		if (run_it) {
2014 			ret = btrfs_start_dirty_block_groups(trans);
2015 			if (ret) {
2016 				btrfs_end_transaction(trans);
2017 				return ret;
2018 			}
2019 		}
2020 	}
2021 
2022 	spin_lock(&fs_info->trans_lock);
2023 	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2024 		spin_unlock(&fs_info->trans_lock);
2025 		refcount_inc(&cur_trans->use_count);
2026 		ret = btrfs_end_transaction(trans);
2027 
2028 		wait_for_commit(cur_trans);
2029 
2030 		if (unlikely(cur_trans->aborted))
2031 			ret = cur_trans->aborted;
2032 
2033 		btrfs_put_transaction(cur_trans);
2034 
2035 		return ret;
2036 	}
2037 
2038 	cur_trans->state = TRANS_STATE_COMMIT_START;
2039 	wake_up(&fs_info->transaction_blocked_wait);
2040 
2041 	if (cur_trans->list.prev != &fs_info->trans_list) {
2042 		prev_trans = list_entry(cur_trans->list.prev,
2043 					struct btrfs_transaction, list);
2044 		if (prev_trans->state != TRANS_STATE_COMPLETED) {
2045 			refcount_inc(&prev_trans->use_count);
2046 			spin_unlock(&fs_info->trans_lock);
2047 
2048 			wait_for_commit(prev_trans);
2049 			ret = prev_trans->aborted;
2050 
2051 			btrfs_put_transaction(prev_trans);
2052 			if (ret)
2053 				goto cleanup_transaction;
2054 		} else {
2055 			spin_unlock(&fs_info->trans_lock);
2056 		}
2057 	} else {
2058 		spin_unlock(&fs_info->trans_lock);
2059 	}
2060 
2061 	extwriter_counter_dec(cur_trans, trans->type);
2062 
2063 	ret = btrfs_start_delalloc_flush(trans);
2064 	if (ret)
2065 		goto cleanup_transaction;
2066 
2067 	ret = btrfs_run_delayed_items(trans);
2068 	if (ret)
2069 		goto cleanup_transaction;
2070 
2071 	wait_event(cur_trans->writer_wait,
2072 		   extwriter_counter_read(cur_trans) == 0);
2073 
2074 	/* some pending stuffs might be added after the previous flush. */
2075 	ret = btrfs_run_delayed_items(trans);
2076 	if (ret)
2077 		goto cleanup_transaction;
2078 
2079 	btrfs_wait_delalloc_flush(trans);
2080 
2081 	btrfs_scrub_pause(fs_info);
2082 	/*
2083 	 * Ok now we need to make sure to block out any other joins while we
2084 	 * commit the transaction.  We could have started a join before setting
2085 	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2086 	 */
2087 	spin_lock(&fs_info->trans_lock);
2088 	cur_trans->state = TRANS_STATE_COMMIT_DOING;
2089 	spin_unlock(&fs_info->trans_lock);
2090 	wait_event(cur_trans->writer_wait,
2091 		   atomic_read(&cur_trans->num_writers) == 1);
2092 
2093 	/* ->aborted might be set after the previous check, so check it */
2094 	if (unlikely(READ_ONCE(cur_trans->aborted))) {
2095 		ret = cur_trans->aborted;
2096 		goto scrub_continue;
2097 	}
2098 	/*
2099 	 * the reloc mutex makes sure that we stop
2100 	 * the balancing code from coming in and moving
2101 	 * extents around in the middle of the commit
2102 	 */
2103 	mutex_lock(&fs_info->reloc_mutex);
2104 
2105 	/*
2106 	 * We needn't worry about the delayed items because we will
2107 	 * deal with them in create_pending_snapshot(), which is the
2108 	 * core function of the snapshot creation.
2109 	 */
2110 	ret = create_pending_snapshots(trans);
2111 	if (ret) {
2112 		mutex_unlock(&fs_info->reloc_mutex);
2113 		goto scrub_continue;
2114 	}
2115 
2116 	/*
2117 	 * We insert the dir indexes of the snapshots and update the inode
2118 	 * of the snapshots' parents after the snapshot creation, so there
2119 	 * are some delayed items which are not dealt with. Now deal with
2120 	 * them.
2121 	 *
2122 	 * We needn't worry that this operation will corrupt the snapshots,
2123 	 * because all the tree which are snapshoted will be forced to COW
2124 	 * the nodes and leaves.
2125 	 */
2126 	ret = btrfs_run_delayed_items(trans);
2127 	if (ret) {
2128 		mutex_unlock(&fs_info->reloc_mutex);
2129 		goto scrub_continue;
2130 	}
2131 
2132 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2133 	if (ret) {
2134 		mutex_unlock(&fs_info->reloc_mutex);
2135 		goto scrub_continue;
2136 	}
2137 
2138 	/*
2139 	 * make sure none of the code above managed to slip in a
2140 	 * delayed item
2141 	 */
2142 	btrfs_assert_delayed_root_empty(fs_info);
2143 
2144 	WARN_ON(cur_trans != trans->transaction);
2145 
2146 	/* btrfs_commit_tree_roots is responsible for getting the
2147 	 * various roots consistent with each other.  Every pointer
2148 	 * in the tree of tree roots has to point to the most up to date
2149 	 * root for every subvolume and other tree.  So, we have to keep
2150 	 * the tree logging code from jumping in and changing any
2151 	 * of the trees.
2152 	 *
2153 	 * At this point in the commit, there can't be any tree-log
2154 	 * writers, but a little lower down we drop the trans mutex
2155 	 * and let new people in.  By holding the tree_log_mutex
2156 	 * from now until after the super is written, we avoid races
2157 	 * with the tree-log code.
2158 	 */
2159 	mutex_lock(&fs_info->tree_log_mutex);
2160 
2161 	ret = commit_fs_roots(trans);
2162 	if (ret) {
2163 		mutex_unlock(&fs_info->tree_log_mutex);
2164 		mutex_unlock(&fs_info->reloc_mutex);
2165 		goto scrub_continue;
2166 	}
2167 
2168 	/*
2169 	 * Since the transaction is done, we can apply the pending changes
2170 	 * before the next transaction.
2171 	 */
2172 	btrfs_apply_pending_changes(fs_info);
2173 
2174 	/* commit_fs_roots gets rid of all the tree log roots, it is now
2175 	 * safe to free the root of tree log roots
2176 	 */
2177 	btrfs_free_log_root_tree(trans, fs_info);
2178 
2179 	/*
2180 	 * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
2181 	 * new delayed refs. Must handle them or qgroup can be wrong.
2182 	 */
2183 	ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2184 	if (ret) {
2185 		mutex_unlock(&fs_info->tree_log_mutex);
2186 		mutex_unlock(&fs_info->reloc_mutex);
2187 		goto scrub_continue;
2188 	}
2189 
2190 	/*
2191 	 * Since fs roots are all committed, we can get a quite accurate
2192 	 * new_roots. So let's do quota accounting.
2193 	 */
2194 	ret = btrfs_qgroup_account_extents(trans);
2195 	if (ret < 0) {
2196 		mutex_unlock(&fs_info->tree_log_mutex);
2197 		mutex_unlock(&fs_info->reloc_mutex);
2198 		goto scrub_continue;
2199 	}
2200 
2201 	ret = commit_cowonly_roots(trans);
2202 	if (ret) {
2203 		mutex_unlock(&fs_info->tree_log_mutex);
2204 		mutex_unlock(&fs_info->reloc_mutex);
2205 		goto scrub_continue;
2206 	}
2207 
2208 	/*
2209 	 * The tasks which save the space cache and inode cache may also
2210 	 * update ->aborted, check it.
2211 	 */
2212 	if (unlikely(READ_ONCE(cur_trans->aborted))) {
2213 		ret = cur_trans->aborted;
2214 		mutex_unlock(&fs_info->tree_log_mutex);
2215 		mutex_unlock(&fs_info->reloc_mutex);
2216 		goto scrub_continue;
2217 	}
2218 
2219 	btrfs_prepare_extent_commit(fs_info);
2220 
2221 	cur_trans = fs_info->running_transaction;
2222 
2223 	btrfs_set_root_node(&fs_info->tree_root->root_item,
2224 			    fs_info->tree_root->node);
2225 	list_add_tail(&fs_info->tree_root->dirty_list,
2226 		      &cur_trans->switch_commits);
2227 
2228 	btrfs_set_root_node(&fs_info->chunk_root->root_item,
2229 			    fs_info->chunk_root->node);
2230 	list_add_tail(&fs_info->chunk_root->dirty_list,
2231 		      &cur_trans->switch_commits);
2232 
2233 	switch_commit_roots(cur_trans);
2234 
2235 	ASSERT(list_empty(&cur_trans->dirty_bgs));
2236 	ASSERT(list_empty(&cur_trans->io_bgs));
2237 	update_super_roots(fs_info);
2238 
2239 	btrfs_set_super_log_root(fs_info->super_copy, 0);
2240 	btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2241 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2242 	       sizeof(*fs_info->super_copy));
2243 
2244 	btrfs_update_commit_device_size(fs_info);
2245 	btrfs_update_commit_device_bytes_used(cur_trans);
2246 
2247 	clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2248 	clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2249 
2250 	btrfs_trans_release_chunk_metadata(trans);
2251 
2252 	spin_lock(&fs_info->trans_lock);
2253 	cur_trans->state = TRANS_STATE_UNBLOCKED;
2254 	fs_info->running_transaction = NULL;
2255 	spin_unlock(&fs_info->trans_lock);
2256 	mutex_unlock(&fs_info->reloc_mutex);
2257 
2258 	wake_up(&fs_info->transaction_wait);
2259 
2260 	ret = btrfs_write_and_wait_transaction(trans);
2261 	if (ret) {
2262 		btrfs_handle_fs_error(fs_info, ret,
2263 				      "Error while writing out transaction");
2264 		mutex_unlock(&fs_info->tree_log_mutex);
2265 		goto scrub_continue;
2266 	}
2267 
2268 	ret = write_all_supers(fs_info, 0);
2269 	/*
2270 	 * the super is written, we can safely allow the tree-loggers
2271 	 * to go about their business
2272 	 */
2273 	mutex_unlock(&fs_info->tree_log_mutex);
2274 	if (ret)
2275 		goto scrub_continue;
2276 
2277 	btrfs_finish_extent_commit(trans);
2278 
2279 	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2280 		btrfs_clear_space_info_full(fs_info);
2281 
2282 	fs_info->last_trans_committed = cur_trans->transid;
2283 	/*
2284 	 * We needn't acquire the lock here because there is no other task
2285 	 * which can change it.
2286 	 */
2287 	cur_trans->state = TRANS_STATE_COMPLETED;
2288 	wake_up(&cur_trans->commit_wait);
2289 	clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
2290 
2291 	spin_lock(&fs_info->trans_lock);
2292 	list_del_init(&cur_trans->list);
2293 	spin_unlock(&fs_info->trans_lock);
2294 
2295 	btrfs_put_transaction(cur_trans);
2296 	btrfs_put_transaction(cur_trans);
2297 
2298 	if (trans->type & __TRANS_FREEZABLE)
2299 		sb_end_intwrite(fs_info->sb);
2300 
2301 	trace_btrfs_transaction_commit(trans->root);
2302 
2303 	btrfs_scrub_continue(fs_info);
2304 
2305 	if (current->journal_info == trans)
2306 		current->journal_info = NULL;
2307 
2308 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
2309 
2310 	return ret;
2311 
2312 scrub_continue:
2313 	btrfs_scrub_continue(fs_info);
2314 cleanup_transaction:
2315 	btrfs_trans_release_metadata(trans);
2316 	btrfs_cleanup_pending_block_groups(trans);
2317 	btrfs_trans_release_chunk_metadata(trans);
2318 	trans->block_rsv = NULL;
2319 	btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2320 	if (current->journal_info == trans)
2321 		current->journal_info = NULL;
2322 	cleanup_transaction(trans, ret);
2323 
2324 	return ret;
2325 }
2326 
2327 /*
2328  * return < 0 if error
2329  * 0 if there are no more dead_roots at the time of call
2330  * 1 there are more to be processed, call me again
2331  *
2332  * The return value indicates there are certainly more snapshots to delete, but
2333  * if there comes a new one during processing, it may return 0. We don't mind,
2334  * because btrfs_commit_super will poke cleaner thread and it will process it a
2335  * few seconds later.
2336  */
2337 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2338 {
2339 	int ret;
2340 	struct btrfs_fs_info *fs_info = root->fs_info;
2341 
2342 	spin_lock(&fs_info->trans_lock);
2343 	if (list_empty(&fs_info->dead_roots)) {
2344 		spin_unlock(&fs_info->trans_lock);
2345 		return 0;
2346 	}
2347 	root = list_first_entry(&fs_info->dead_roots,
2348 			struct btrfs_root, root_list);
2349 	list_del_init(&root->root_list);
2350 	spin_unlock(&fs_info->trans_lock);
2351 
2352 	btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2353 
2354 	btrfs_kill_all_delayed_nodes(root);
2355 
2356 	if (btrfs_header_backref_rev(root->node) <
2357 			BTRFS_MIXED_BACKREF_REV)
2358 		ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2359 	else
2360 		ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2361 
2362 	return (ret < 0) ? 0 : 1;
2363 }
2364 
2365 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2366 {
2367 	unsigned long prev;
2368 	unsigned long bit;
2369 
2370 	prev = xchg(&fs_info->pending_changes, 0);
2371 	if (!prev)
2372 		return;
2373 
2374 	bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2375 	if (prev & bit)
2376 		btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2377 	prev &= ~bit;
2378 
2379 	bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2380 	if (prev & bit)
2381 		btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2382 	prev &= ~bit;
2383 
2384 	bit = 1 << BTRFS_PENDING_COMMIT;
2385 	if (prev & bit)
2386 		btrfs_debug(fs_info, "pending commit done");
2387 	prev &= ~bit;
2388 
2389 	if (prev)
2390 		btrfs_warn(fs_info,
2391 			"unknown pending changes left 0x%lx, ignoring", prev);
2392 }
2393