xref: /linux/fs/btrfs/transaction.h (revision a4ff64edf9edc8f05e2183610dc8306d3279c6ac)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_TRANSACTION_H
7 #define BTRFS_TRANSACTION_H
8 
9 #include <linux/refcount.h>
10 #include "btrfs_inode.h"
11 #include "delayed-ref.h"
12 #include "ctree.h"
13 #include "misc.h"
14 
15 enum btrfs_trans_state {
16 	TRANS_STATE_RUNNING,
17 	TRANS_STATE_COMMIT_PREP,
18 	TRANS_STATE_COMMIT_START,
19 	TRANS_STATE_COMMIT_DOING,
20 	TRANS_STATE_UNBLOCKED,
21 	TRANS_STATE_SUPER_COMMITTED,
22 	TRANS_STATE_COMPLETED,
23 	TRANS_STATE_MAX,
24 };
25 
26 #define BTRFS_TRANS_HAVE_FREE_BGS	0
27 #define BTRFS_TRANS_DIRTY_BG_RUN	1
28 #define BTRFS_TRANS_CACHE_ENOSPC	2
29 
30 struct btrfs_transaction {
31 	u64 transid;
32 	/*
33 	 * total external writers(USERSPACE/START/ATTACH) in this
34 	 * transaction, it must be zero before the transaction is
35 	 * being committed
36 	 */
37 	atomic_t num_extwriters;
38 	/*
39 	 * total writers in this transaction, it must be zero before the
40 	 * transaction can end
41 	 */
42 	atomic_t num_writers;
43 	refcount_t use_count;
44 
45 	unsigned long flags;
46 
47 	/* Be protected by fs_info->trans_lock when we want to change it. */
48 	enum btrfs_trans_state state;
49 	int aborted;
50 	struct list_head list;
51 	struct extent_io_tree dirty_pages;
52 	time64_t start_time;
53 	wait_queue_head_t writer_wait;
54 	wait_queue_head_t commit_wait;
55 	struct list_head pending_snapshots;
56 	struct list_head dev_update_list;
57 	struct list_head switch_commits;
58 	struct list_head dirty_bgs;
59 
60 	/*
61 	 * There is no explicit lock which protects io_bgs, rather its
62 	 * consistency is implied by the fact that all the sites which modify
63 	 * it do so under some form of transaction critical section, namely:
64 	 *
65 	 * - btrfs_start_dirty_block_groups - This function can only ever be
66 	 *   run by one of the transaction committers. Refer to
67 	 *   BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
68 	 *
69 	 * - btrfs_write_dirty_blockgroups - this is called by
70 	 *   commit_cowonly_roots from transaction critical section
71 	 *   (TRANS_STATE_COMMIT_DOING)
72 	 *
73 	 * - btrfs_cleanup_dirty_bgs - called on transaction abort
74 	 */
75 	struct list_head io_bgs;
76 	struct list_head dropped_roots;
77 	struct extent_io_tree pinned_extents;
78 
79 	/*
80 	 * we need to make sure block group deletion doesn't race with
81 	 * free space cache writeout.  This mutex keeps them from stomping
82 	 * on each other
83 	 */
84 	struct mutex cache_write_mutex;
85 	spinlock_t dirty_bgs_lock;
86 	/* Protected by spin lock fs_info->unused_bgs_lock. */
87 	struct list_head deleted_bgs;
88 	spinlock_t dropped_roots_lock;
89 	struct btrfs_delayed_ref_root delayed_refs;
90 	struct btrfs_fs_info *fs_info;
91 
92 	/*
93 	 * Number of ordered extents the transaction must wait for before
94 	 * committing. These are ordered extents started by a fast fsync.
95 	 */
96 	atomic_t pending_ordered;
97 	wait_queue_head_t pending_wait;
98 };
99 
100 enum {
101 	ENUM_BIT(__TRANS_FREEZABLE),
102 	ENUM_BIT(__TRANS_START),
103 	ENUM_BIT(__TRANS_ATTACH),
104 	ENUM_BIT(__TRANS_JOIN),
105 	ENUM_BIT(__TRANS_JOIN_NOLOCK),
106 	ENUM_BIT(__TRANS_DUMMY),
107 	ENUM_BIT(__TRANS_JOIN_NOSTART),
108 };
109 
110 #define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
111 #define TRANS_ATTACH		(__TRANS_ATTACH)
112 #define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
113 #define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
114 #define TRANS_JOIN_NOSTART	(__TRANS_JOIN_NOSTART)
115 
116 #define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
117 
118 struct btrfs_trans_handle {
119 	u64 transid;
120 	u64 bytes_reserved;
121 	u64 delayed_refs_bytes_reserved;
122 	u64 chunk_bytes_reserved;
123 	unsigned long delayed_ref_updates;
124 	unsigned long delayed_ref_csum_deletions;
125 	struct btrfs_transaction *transaction;
126 	struct btrfs_block_rsv *block_rsv;
127 	struct btrfs_block_rsv *orig_rsv;
128 	/* Set by a task that wants to create a snapshot. */
129 	struct btrfs_pending_snapshot *pending_snapshot;
130 	refcount_t use_count;
131 	unsigned int type;
132 	/*
133 	 * Error code of transaction abort, set outside of locks and must use
134 	 * the READ_ONCE/WRITE_ONCE access
135 	 */
136 	short aborted;
137 	bool adding_csums;
138 	bool allocating_chunk;
139 	bool removing_chunk;
140 	bool reloc_reserved;
141 	bool in_fsync;
142 	struct btrfs_fs_info *fs_info;
143 	struct list_head new_bgs;
144 	struct btrfs_block_rsv delayed_rsv;
145 };
146 
147 /*
148  * The abort status can be changed between calls and is not protected by locks.
149  * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
150  * set to a non-zero value it does not change, so the macro should be in checks
151  * but is not necessary for further reads of the value.
152  */
153 #define TRANS_ABORTED(trans)		(unlikely(READ_ONCE((trans)->aborted)))
154 
155 struct btrfs_pending_snapshot {
156 	struct dentry *dentry;
157 	struct inode *dir;
158 	struct btrfs_root *root;
159 	struct btrfs_root_item *root_item;
160 	struct btrfs_root *snap;
161 	struct btrfs_qgroup_inherit *inherit;
162 	struct btrfs_path *path;
163 	/* block reservation for the operation */
164 	struct btrfs_block_rsv block_rsv;
165 	/* extra metadata reservation for relocation */
166 	int error;
167 	/* Preallocated anonymous block device number */
168 	dev_t anon_dev;
169 	bool readonly;
170 	struct list_head list;
171 };
172 
173 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
174 					      struct btrfs_inode *inode)
175 {
176 	spin_lock(&inode->lock);
177 	inode->last_trans = trans->transaction->transid;
178 	inode->last_sub_trans = btrfs_get_root_log_transid(inode->root);
179 	inode->last_log_commit = inode->last_sub_trans - 1;
180 	spin_unlock(&inode->lock);
181 }
182 
183 /*
184  * Make qgroup codes to skip given qgroupid, means the old/new_roots for
185  * qgroup won't contain the qgroupid in it.
186  */
187 static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
188 					 u64 qgroupid)
189 {
190 	struct btrfs_delayed_ref_root *delayed_refs;
191 
192 	delayed_refs = &trans->transaction->delayed_refs;
193 	WARN_ON(delayed_refs->qgroup_to_skip);
194 	delayed_refs->qgroup_to_skip = qgroupid;
195 }
196 
197 static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
198 {
199 	struct btrfs_delayed_ref_root *delayed_refs;
200 
201 	delayed_refs = &trans->transaction->delayed_refs;
202 	WARN_ON(!delayed_refs->qgroup_to_skip);
203 	delayed_refs->qgroup_to_skip = 0;
204 }
205 
206 bool __cold abort_should_print_stack(int error);
207 
208 /*
209  * Call btrfs_abort_transaction as early as possible when an error condition is
210  * detected, that way the exact stack trace is reported for some errors.
211  */
212 #define btrfs_abort_transaction(trans, error)		\
213 do {								\
214 	bool first = false;					\
215 	/* Report first abort since mount */			\
216 	if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,	\
217 			&((trans)->fs_info->fs_state))) {	\
218 		first = true;					\
219 		if (WARN(abort_should_print_stack(error),	\
220 			KERN_ERR				\
221 			"BTRFS: Transaction aborted (error %d)\n",	\
222 			(error))) {					\
223 			/* Stack trace printed. */			\
224 		} else {						\
225 			btrfs_err((trans)->fs_info,			\
226 				  "Transaction aborted (error %d)",	\
227 				  (error));			\
228 		}						\
229 	}							\
230 	__btrfs_abort_transaction((trans), __func__,		\
231 				  __LINE__, (error), first);	\
232 } while (0)
233 
234 int btrfs_end_transaction(struct btrfs_trans_handle *trans);
235 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
236 						   unsigned int num_items);
237 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
238 					struct btrfs_root *root,
239 					unsigned int num_items);
240 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
241 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
242 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
243 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
244 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
245 					struct btrfs_root *root);
246 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
247 
248 void btrfs_add_dead_root(struct btrfs_root *root);
249 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
250 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
251 int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
252 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
253 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
254 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
255 void btrfs_throttle(struct btrfs_fs_info *fs_info);
256 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
257 				struct btrfs_root *root);
258 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
259 				struct extent_io_tree *dirty_pages, int mark);
260 int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
261 int btrfs_transaction_blocked(struct btrfs_fs_info *info);
262 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
263 void btrfs_put_transaction(struct btrfs_transaction *transaction);
264 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
265 			    struct btrfs_root *root);
266 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
267 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
268 				      const char *function,
269 				      unsigned int line, int error, bool first_hit);
270 
271 int __init btrfs_transaction_init(void);
272 void __cold btrfs_transaction_exit(void);
273 
274 #endif
275