xref: /linux/fs/btrfs/fs.h (revision b291ad4458df8311626dfa0a089918f6a542d6bc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_FS_H
4 #define BTRFS_FS_H
5 
6 #include <crypto/blake2b.h>
7 #include <crypto/sha2.h>
8 #include <linux/blkdev.h>
9 #include <linux/sizes.h>
10 #include <linux/time64.h>
11 #include <linux/compiler.h>
12 #include <linux/math.h>
13 #include <linux/atomic.h>
14 #include <linux/percpu_counter.h>
15 #include <linux/completion.h>
16 #include <linux/lockdep.h>
17 #include <linux/spinlock.h>
18 #include <linux/mutex.h>
19 #include <linux/rwsem.h>
20 #include <linux/semaphore.h>
21 #include <linux/list.h>
22 #include <linux/pagemap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/workqueue.h>
25 #include <linux/wait.h>
26 #include <linux/wait_bit.h>
27 #include <linux/sched.h>
28 #include <linux/rbtree.h>
29 #include <linux/xxhash.h>
30 #include <uapi/linux/btrfs.h>
31 #include <uapi/linux/btrfs_tree.h>
32 #include "extent-io-tree.h"
33 #include "async-thread.h"
34 #include "block-rsv.h"
35 #include "messages.h"
36 
37 struct inode;
38 struct super_block;
39 struct kobject;
40 struct reloc_control;
41 struct ulist;
42 struct btrfs_device;
43 struct btrfs_block_group;
44 struct btrfs_root;
45 struct btrfs_fs_devices;
46 struct btrfs_transaction;
47 struct btrfs_balance_control;
48 struct btrfs_subpage_info;
49 struct btrfs_stripe_hash_table;
50 struct btrfs_space_info;
51 
52 /*
53  * Minimum data and metadata block size.
54  *
55  * Normally it's 4K, but for testing subpage block size on 4K page systems, we
56  * allow DEBUG builds to accept 2K page size.
57  */
58 #ifdef CONFIG_BTRFS_DEBUG
59 #define BTRFS_MIN_BLOCKSIZE	(SZ_2K)
60 #else
61 #define BTRFS_MIN_BLOCKSIZE	(SZ_4K)
62 #endif
63 
64 #define BTRFS_MAX_BLOCKSIZE	(SZ_64K)
65 
66 #define BTRFS_MAX_EXTENT_SIZE SZ_128M
67 
68 /*
69  * Maximum length to trim in a single iteration to avoid holding device list
70  * mutex for too long.
71  */
72 #define BTRFS_MAX_TRIM_LENGTH			SZ_2G
73 
74 #define BTRFS_OLDEST_GENERATION	0ULL
75 
76 #define BTRFS_EMPTY_DIR_SIZE 0
77 
78 #define BTRFS_DIRTY_METADATA_THRESH		SZ_32M
79 
80 #define BTRFS_SUPER_INFO_OFFSET			SZ_64K
81 #define BTRFS_SUPER_INFO_SIZE			4096
82 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
83 
84 /* Array of bytes with variable length, hexadecimal format 0x1234 */
85 #define BTRFS_CSUM_FMT				"0x%*phN"
86 #define BTRFS_CSUM_FMT_VALUE(size, bytes)	size, bytes
87 
88 #define BTRFS_KEY_FMT			"(%llu %u %llu)"
89 #define BTRFS_KEY_FMT_VALUE(key)	(key)->objectid, (key)->type, (key)->offset
90 
91 /*
92  * Number of metadata items necessary for an unlink operation:
93  *
94  * 1 for the possible orphan item
95  * 1 for the dir item
96  * 1 for the dir index
97  * 1 for the inode ref
98  * 1 for the inode
99  * 1 for the parent inode
100  */
101 #define BTRFS_UNLINK_METADATA_UNITS		6
102 
103 /*
104  * The reserved space at the beginning of each device.  It covers the primary
105  * super block and leaves space for potential use by other tools like
106  * bootloaders or to lower potential damage of accidental overwrite.
107  */
108 #define BTRFS_DEVICE_RANGE_RESERVED			(SZ_1M)
109 /*
110  * Runtime (in-memory) states of filesystem
111  */
112 enum {
113 	/*
114 	 * Filesystem is being remounted, allow to skip some operations, like
115 	 * defrag
116 	 */
117 	BTRFS_FS_STATE_REMOUNTING,
118 	/* Filesystem in RO mode */
119 	BTRFS_FS_STATE_RO,
120 	/* Track if a transaction abort has been reported on this filesystem */
121 	BTRFS_FS_STATE_TRANS_ABORTED,
122 	/* Track if log replay has failed. */
123 	BTRFS_FS_STATE_LOG_REPLAY_ABORTED,
124 	/*
125 	 * Bio operations should be blocked on this filesystem because a source
126 	 * or target device is being destroyed as part of a device replace
127 	 */
128 	BTRFS_FS_STATE_DEV_REPLACING,
129 	/* The btrfs_fs_info created for self-tests */
130 	BTRFS_FS_STATE_DUMMY_FS_INFO,
131 
132 	/* Checksum errors are ignored. */
133 	BTRFS_FS_STATE_NO_DATA_CSUMS,
134 	BTRFS_FS_STATE_SKIP_META_CSUMS,
135 
136 	/* Indicates there was an error cleaning up a log tree. */
137 	BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
138 
139 	/* No more delayed iput can be queued. */
140 	BTRFS_FS_STATE_NO_DELAYED_IPUT,
141 
142 	/*
143 	 * Emergency shutdown, a step further than transaction aborted by
144 	 * rejecting all operations.
145 	 */
146 	BTRFS_FS_STATE_EMERGENCY_SHUTDOWN,
147 
148 	BTRFS_FS_STATE_COUNT
149 };
150 
151 enum {
152 	BTRFS_FS_CLOSING_START,
153 	BTRFS_FS_CLOSING_DONE,
154 	BTRFS_FS_LOG_RECOVERING,
155 	BTRFS_FS_OPEN,
156 	BTRFS_FS_QUOTA_ENABLED,
157 	BTRFS_FS_UPDATE_UUID_TREE_GEN,
158 	BTRFS_FS_CREATING_FREE_SPACE_TREE,
159 	BTRFS_FS_BTREE_ERR,
160 	BTRFS_FS_LOG1_ERR,
161 	BTRFS_FS_LOG2_ERR,
162 	BTRFS_FS_QUOTA_OVERRIDE,
163 	/* Used to record internally whether fs has been frozen */
164 	BTRFS_FS_FROZEN,
165 	/*
166 	 * Indicate that balance has been set up from the ioctl and is in the
167 	 * main phase. The fs_info::balance_ctl is initialized.
168 	 */
169 	BTRFS_FS_BALANCE_RUNNING,
170 
171 	/*
172 	 * Indicate that relocation of a chunk has started, it's set per chunk
173 	 * and is toggled between chunks.
174 	 */
175 	BTRFS_FS_RELOC_RUNNING,
176 
177 	/* Indicate that the cleaner thread is awake and doing something. */
178 	BTRFS_FS_CLEANER_RUNNING,
179 
180 	/*
181 	 * The checksumming has an optimized version and is considered fast,
182 	 * so we don't need to offload checksums to workqueues.
183 	 */
184 	BTRFS_FS_CSUM_IMPL_FAST,
185 
186 	/* Indicate that the discard workqueue can service discards. */
187 	BTRFS_FS_DISCARD_RUNNING,
188 
189 	/* Indicate that we need to cleanup space cache v1 */
190 	BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
191 
192 	/* Indicate that we can't trust the free space tree for caching yet */
193 	BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
194 
195 	/* Indicate whether there are any tree modification log users */
196 	BTRFS_FS_TREE_MOD_LOG_USERS,
197 
198 	/* Indicate that we want the transaction kthread to commit right now. */
199 	BTRFS_FS_COMMIT_TRANS,
200 
201 	/* Indicate we have half completed snapshot deletions pending. */
202 	BTRFS_FS_UNFINISHED_DROPS,
203 
204 	/* Indicate we have to finish a zone to do next allocation. */
205 	BTRFS_FS_NEED_ZONE_FINISH,
206 
207 	/* Indicate that we want to commit the transaction. */
208 	BTRFS_FS_NEED_TRANS_COMMIT,
209 
210 	/* This is set when active zone tracking is needed. */
211 	BTRFS_FS_ACTIVE_ZONE_TRACKING,
212 
213 	/*
214 	 * Indicate if we have some features changed, this is mostly for
215 	 * cleaner thread to update the sysfs interface.
216 	 */
217 	BTRFS_FS_FEATURE_CHANGED,
218 
219 	/*
220 	 * Indicate that we have found a tree block which is only aligned to
221 	 * sectorsize, but not to nodesize.  This should be rare nowadays.
222 	 */
223 	BTRFS_FS_UNALIGNED_TREE_BLOCK,
224 
225 #if BITS_PER_LONG == 32
226 	/* Indicate if we have error/warn message printed on 32bit systems */
227 	BTRFS_FS_32BIT_ERROR,
228 	BTRFS_FS_32BIT_WARN,
229 #endif
230 };
231 
232 /*
233  * Flags for mount options.
234  *
235  * Note: don't forget to add new options to btrfs_show_options()
236  */
237 enum {
238 	BTRFS_MOUNT_NODATASUM			= (1ULL << 0),
239 	BTRFS_MOUNT_NODATACOW			= (1ULL << 1),
240 	BTRFS_MOUNT_NOBARRIER			= (1ULL << 2),
241 	BTRFS_MOUNT_SSD				= (1ULL << 3),
242 	BTRFS_MOUNT_DEGRADED			= (1ULL << 4),
243 	BTRFS_MOUNT_COMPRESS			= (1ULL << 5),
244 	BTRFS_MOUNT_NOTREELOG			= (1ULL << 6),
245 	BTRFS_MOUNT_FLUSHONCOMMIT		= (1ULL << 7),
246 	BTRFS_MOUNT_SSD_SPREAD			= (1ULL << 8),
247 	BTRFS_MOUNT_NOSSD			= (1ULL << 9),
248 	BTRFS_MOUNT_DISCARD_SYNC		= (1ULL << 10),
249 	BTRFS_MOUNT_FORCE_COMPRESS		= (1ULL << 11),
250 	BTRFS_MOUNT_SPACE_CACHE			= (1ULL << 12),
251 	BTRFS_MOUNT_CLEAR_CACHE			= (1ULL << 13),
252 	BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED	= (1ULL << 14),
253 	BTRFS_MOUNT_ENOSPC_DEBUG		= (1ULL << 15),
254 	BTRFS_MOUNT_AUTO_DEFRAG			= (1ULL << 16),
255 	BTRFS_MOUNT_USEBACKUPROOT		= (1ULL << 17),
256 	BTRFS_MOUNT_SKIP_BALANCE		= (1ULL << 18),
257 	BTRFS_MOUNT_PANIC_ON_FATAL_ERROR	= (1ULL << 19),
258 	BTRFS_MOUNT_RESCAN_UUID_TREE		= (1ULL << 20),
259 	BTRFS_MOUNT_FRAGMENT_DATA		= (1ULL << 21),
260 	BTRFS_MOUNT_FRAGMENT_METADATA		= (1ULL << 22),
261 	BTRFS_MOUNT_FREE_SPACE_TREE		= (1ULL << 23),
262 	BTRFS_MOUNT_NOLOGREPLAY			= (1ULL << 24),
263 	BTRFS_MOUNT_REF_VERIFY			= (1ULL << 25),
264 	BTRFS_MOUNT_DISCARD_ASYNC		= (1ULL << 26),
265 	BTRFS_MOUNT_IGNOREBADROOTS		= (1ULL << 27),
266 	BTRFS_MOUNT_IGNOREDATACSUMS		= (1ULL << 28),
267 	BTRFS_MOUNT_NODISCARD			= (1ULL << 29),
268 	BTRFS_MOUNT_NOSPACECACHE		= (1ULL << 30),
269 	BTRFS_MOUNT_IGNOREMETACSUMS		= (1ULL << 31),
270 	BTRFS_MOUNT_IGNORESUPERFLAGS		= (1ULL << 32),
271 	BTRFS_MOUNT_REF_TRACKER			= (1ULL << 33),
272 };
273 
274 /* These mount options require a full read-only fs, no new transaction is allowed. */
275 #define BTRFS_MOUNT_FULL_RO_MASK		\
276 	(BTRFS_MOUNT_NOLOGREPLAY |		\
277 	 BTRFS_MOUNT_IGNOREBADROOTS |		\
278 	 BTRFS_MOUNT_IGNOREDATACSUMS |		\
279 	 BTRFS_MOUNT_IGNOREMETACSUMS |		\
280 	 BTRFS_MOUNT_IGNORESUPERFLAGS)
281 
282 /*
283  * Compat flags that we support.  If any incompat flags are set other than the
284  * ones specified below then we will fail to mount
285  */
286 #define BTRFS_FEATURE_COMPAT_SUPP		0ULL
287 #define BTRFS_FEATURE_COMPAT_SAFE_SET		0ULL
288 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR		0ULL
289 
290 #define BTRFS_FEATURE_COMPAT_RO_SUPP			\
291 	(BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE |	\
292 	 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
293 	 BTRFS_FEATURE_COMPAT_RO_VERITY |		\
294 	 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
295 
296 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET	0ULL
297 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR	0ULL
298 
299 #define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE		\
300 	(BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF |		\
301 	 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL |	\
302 	 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS |		\
303 	 BTRFS_FEATURE_INCOMPAT_BIG_METADATA |		\
304 	 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO |		\
305 	 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD |		\
306 	 BTRFS_FEATURE_INCOMPAT_RAID56 |		\
307 	 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF |		\
308 	 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA |	\
309 	 BTRFS_FEATURE_INCOMPAT_NO_HOLES	|	\
310 	 BTRFS_FEATURE_INCOMPAT_METADATA_UUID	|	\
311 	 BTRFS_FEATURE_INCOMPAT_RAID1C34	|	\
312 	 BTRFS_FEATURE_INCOMPAT_ZONED		|	\
313 	 BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
314 
315 #ifdef CONFIG_BTRFS_EXPERIMENTAL
316 	/*
317 	 * Features under development like Extent tree v2 support is enabled
318 	 * only under CONFIG_BTRFS_EXPERIMENTAL
319 	 */
320 #define BTRFS_FEATURE_INCOMPAT_SUPP		\
321 	(BTRFS_FEATURE_INCOMPAT_SUPP_STABLE |	\
322 	 BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \
323 	 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 | \
324 	 BTRFS_FEATURE_INCOMPAT_REMAP_TREE)
325 
326 #else
327 
328 #define BTRFS_FEATURE_INCOMPAT_SUPP		\
329 	(BTRFS_FEATURE_INCOMPAT_SUPP_STABLE)
330 
331 #endif
332 
333 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET			\
334 	(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
335 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR		0ULL
336 
337 #define BTRFS_DEFAULT_COMMIT_INTERVAL	(30)
338 #define BTRFS_WARNING_COMMIT_INTERVAL	(300)
339 #define BTRFS_DEFAULT_MAX_INLINE	(2048)
340 
341 enum btrfs_compression_type {
342 	BTRFS_COMPRESS_NONE  = 0,
343 	BTRFS_COMPRESS_ZLIB  = 1,
344 	BTRFS_COMPRESS_LZO   = 2,
345 	BTRFS_COMPRESS_ZSTD  = 3,
346 	BTRFS_NR_COMPRESS_TYPES = 4,
347 
348 	BTRFS_DEFRAG_DONT_COMPRESS,
349 };
350 
351 struct btrfs_dev_replace {
352 	/* See #define above */
353 	u64 replace_state;
354 	/* Seconds since 1-Jan-1970 */
355 	time64_t time_started;
356 	/* Seconds since 1-Jan-1970 */
357 	time64_t time_stopped;
358 	atomic64_t num_write_errors;
359 	atomic64_t num_uncorrectable_read_errors;
360 
361 	u64 cursor_left;
362 	u64 committed_cursor_left;
363 	u64 cursor_left_last_write_of_item;
364 	u64 cursor_right;
365 
366 	/* See #define above */
367 	u64 cont_reading_from_srcdev_mode;
368 
369 	int is_valid;
370 	int item_needs_writeback;
371 	struct btrfs_device *srcdev;
372 	struct btrfs_device *tgtdev;
373 
374 	struct mutex lock_finishing_cancel_unmount;
375 	struct rw_semaphore rwsem;
376 
377 	struct btrfs_scrub_progress scrub_progress;
378 
379 	struct percpu_counter bio_counter;
380 	wait_queue_head_t replace_wait;
381 
382 	struct task_struct *replace_task;
383 };
384 
385 /*
386  * Free clusters are used to claim free space in relatively large chunks,
387  * allowing us to do less seeky writes. They are used for all metadata
388  * allocations. In ssd_spread mode they are also used for data allocations.
389  */
390 struct btrfs_free_cluster {
391 	spinlock_t lock;
392 	spinlock_t refill_lock;
393 	struct rb_root root;
394 
395 	/* Largest extent in this cluster */
396 	u64 max_size;
397 
398 	/* First extent starting offset */
399 	u64 window_start;
400 
401 	/* We did a full search and couldn't create a cluster */
402 	bool fragmented;
403 
404 	struct btrfs_block_group *block_group;
405 	/*
406 	 * When a cluster is allocated from a block group, we put the cluster
407 	 * onto a list in the block group so that it can be freed before the
408 	 * block group is freed.
409 	 */
410 	struct list_head block_group_list;
411 };
412 
413 /* Discard control. */
414 /*
415  * Async discard uses multiple lists to differentiate the discard filter
416  * parameters.  Index 0 is for completely free block groups where we need to
417  * ensure the entire block group is trimmed without being lossy.  Indices
418  * afterwards represent monotonically decreasing discard filter sizes to
419  * prioritize what should be discarded next.
420  */
421 #define BTRFS_NR_DISCARD_LISTS		3
422 #define BTRFS_DISCARD_INDEX_UNUSED	0
423 #define BTRFS_DISCARD_INDEX_START	1
424 
425 struct btrfs_discard_ctl {
426 	struct workqueue_struct *discard_workers;
427 	struct delayed_work work;
428 	spinlock_t lock;
429 	struct btrfs_block_group *block_group;
430 	struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
431 	u64 prev_discard;
432 	u64 prev_discard_time;
433 	atomic_t discardable_extents;
434 	atomic64_t discardable_bytes;
435 	u64 max_discard_size;
436 	u64 delay_ms;
437 	u32 iops_limit;
438 	u32 kbps_limit;
439 	u64 discard_extent_bytes;
440 	u64 discard_bitmap_bytes;
441 	atomic64_t discard_bytes_saved;
442 };
443 
444 /*
445  * Exclusive operations (device replace, resize, device add/remove, balance)
446  */
447 enum btrfs_exclusive_operation {
448 	BTRFS_EXCLOP_NONE,
449 	BTRFS_EXCLOP_BALANCE_PAUSED,
450 	BTRFS_EXCLOP_BALANCE,
451 	BTRFS_EXCLOP_DEV_ADD,
452 	BTRFS_EXCLOP_DEV_REMOVE,
453 	BTRFS_EXCLOP_DEV_REPLACE,
454 	BTRFS_EXCLOP_RESIZE,
455 	BTRFS_EXCLOP_SWAP_ACTIVATE,
456 };
457 
458 /* Store data about transaction commits, exported via sysfs. */
459 struct btrfs_commit_stats {
460 	/* Total number of commits */
461 	u64 commit_count;
462 	/* The maximum commit duration so far in ns */
463 	u64 max_commit_dur;
464 	/* The last commit duration in ns */
465 	u64 last_commit_dur;
466 	/* The total commit duration in ns */
467 	u64 total_commit_dur;
468 	/* Start of the last critical section in ns. */
469 	u64 critical_section_start_time;
470 };
471 
472 struct btrfs_delayed_root {
473 	spinlock_t lock;
474 	int nodes;		/* for delayed nodes */
475 	struct list_head node_list;
476 	/*
477 	 * Used for delayed nodes which is waiting to be dealt with by the
478 	 * worker. If the delayed node is inserted into the work queue, we
479 	 * drop it from this list.
480 	 */
481 	struct list_head prepare_list;
482 	atomic_t items;		/* for delayed items */
483 	atomic_t items_seq;	/* for delayed items */
484 	wait_queue_head_t wait;
485 };
486 
487 struct btrfs_fs_info {
488 	u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
489 	unsigned long flags;
490 	struct btrfs_root *tree_root;
491 	struct btrfs_root *chunk_root;
492 	struct btrfs_root *dev_root;
493 	struct btrfs_root *fs_root;
494 	struct btrfs_root *quota_root;
495 	struct btrfs_root *uuid_root;
496 	struct btrfs_root *data_reloc_root;
497 	struct btrfs_root *block_group_root;
498 	struct btrfs_root *stripe_root;
499 	struct btrfs_root *remap_root;
500 
501 	/* The log root tree is a directory of all the other log roots */
502 	struct btrfs_root *log_root_tree;
503 
504 	/* The tree that holds the global roots (csum, extent, etc) */
505 	rwlock_t global_root_lock;
506 	struct rb_root global_root_tree;
507 
508 	spinlock_t fs_roots_radix_lock;
509 	struct radix_tree_root fs_roots_radix;
510 
511 	/* Block group cache stuff */
512 	rwlock_t block_group_cache_lock;
513 	struct rb_root_cached block_group_cache_tree;
514 
515 	/* Keep track of unallocated space */
516 	atomic64_t free_chunk_space;
517 
518 	/* Track ranges which are used by log trees blocks/logged data extents */
519 	struct extent_io_tree excluded_extents;
520 
521 	/* logical->physical extent mapping */
522 	struct rb_root_cached mapping_tree;
523 	rwlock_t mapping_tree_lock;
524 
525 	/*
526 	 * Block reservation for extent, checksum, root tree and delayed dir
527 	 * index item.
528 	 */
529 	struct btrfs_block_rsv global_block_rsv;
530 	/* Block reservation for metadata operations */
531 	struct btrfs_block_rsv trans_block_rsv;
532 	/* Block reservation for chunk tree */
533 	struct btrfs_block_rsv chunk_block_rsv;
534 	/* Block reservation for remap tree. */
535 	struct btrfs_block_rsv remap_block_rsv;
536 	/* Block reservation for delayed operations */
537 	struct btrfs_block_rsv delayed_block_rsv;
538 	/* Block reservation for delayed refs */
539 	struct btrfs_block_rsv delayed_refs_rsv;
540 	/* Block reservation for treelog tree */
541 	struct btrfs_block_rsv treelog_rsv;
542 
543 	struct btrfs_block_rsv empty_block_rsv;
544 
545 	/*
546 	 * Updated while holding the lock 'trans_lock'. Due to the life cycle of
547 	 * a transaction, it can be directly read while holding a transaction
548 	 * handle, everywhere else must be read with btrfs_get_fs_generation().
549 	 * Should always be updated using btrfs_set_fs_generation().
550 	 */
551 	u64 generation;
552 	/*
553 	 * Always use btrfs_get_last_trans_committed() and
554 	 * btrfs_set_last_trans_committed() to read and update this field.
555 	 */
556 	u64 last_trans_committed;
557 	/*
558 	 * Generation of the last transaction used for block group relocation
559 	 * since the filesystem was last mounted (or 0 if none happened yet).
560 	 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
561 	 */
562 	u64 last_reloc_trans;
563 
564 	/*
565 	 * This is updated to the current trans every time a full commit is
566 	 * required instead of the faster short fsync log commits
567 	 */
568 	u64 last_trans_log_full_commit;
569 	unsigned long long mount_opt;
570 
571 	/* Compress related structures. */
572 	void *compr_wsm[BTRFS_NR_COMPRESS_TYPES];
573 
574 	int compress_type;
575 	int compress_level;
576 	u32 commit_interval;
577 	/*
578 	 * It is a suggestive number, the read side is safe even it gets a
579 	 * wrong number because we will write out the data into a regular
580 	 * extent. The write side(mount/remount) is under ->s_umount lock,
581 	 * so it is also safe.
582 	 */
583 	u64 max_inline;
584 
585 	struct btrfs_transaction *running_transaction;
586 	wait_queue_head_t transaction_throttle;
587 	wait_queue_head_t transaction_wait;
588 	wait_queue_head_t transaction_blocked_wait;
589 	wait_queue_head_t async_submit_wait;
590 
591 	/*
592 	 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
593 	 * when they are updated.
594 	 *
595 	 * Because we do not clear the flags for ever, so we needn't use
596 	 * the lock on the read side.
597 	 *
598 	 * We also needn't use the lock when we mount the fs, because
599 	 * there is no other task which will update the flag.
600 	 */
601 	spinlock_t super_lock;
602 	struct btrfs_super_block *super_copy;
603 	struct btrfs_super_block *super_for_commit;
604 	struct super_block *sb;
605 	struct inode *btree_inode;
606 	struct mutex tree_log_mutex;
607 	struct mutex transaction_kthread_mutex;
608 	struct mutex cleaner_mutex;
609 	struct mutex chunk_mutex;
610 	struct mutex remap_mutex;
611 
612 	/*
613 	 * This is taken to make sure we don't set block groups ro after the
614 	 * free space cache has been allocated on them.
615 	 */
616 	struct mutex ro_block_group_mutex;
617 
618 	/*
619 	 * This is used during read/modify/write to make sure no two ios are
620 	 * trying to mod the same stripe at the same time.
621 	 */
622 	struct btrfs_stripe_hash_table *stripe_hash_table;
623 
624 	/*
625 	 * This protects the ordered operations list only while we are
626 	 * processing all of the entries on it.  This way we make sure the
627 	 * commit code doesn't find the list temporarily empty because another
628 	 * function happens to be doing non-waiting preflush before jumping
629 	 * into the main commit.
630 	 */
631 	struct mutex ordered_operations_mutex;
632 
633 	struct rw_semaphore commit_root_sem;
634 
635 	struct rw_semaphore cleanup_work_sem;
636 
637 	struct rw_semaphore subvol_sem;
638 
639 	spinlock_t trans_lock;
640 	/*
641 	 * The reloc mutex goes with the trans lock, it is taken during commit
642 	 * to protect us from the relocation code.
643 	 */
644 	struct mutex reloc_mutex;
645 
646 	struct list_head trans_list;
647 	struct list_head dead_roots;
648 	struct list_head caching_block_groups;
649 
650 	spinlock_t delayed_iput_lock;
651 	struct list_head delayed_iputs;
652 	atomic_t nr_delayed_iputs;
653 	wait_queue_head_t delayed_iputs_wait;
654 
655 	atomic64_t tree_mod_seq;
656 
657 	/* This protects tree_mod_log and tree_mod_seq_list */
658 	rwlock_t tree_mod_log_lock;
659 	struct rb_root tree_mod_log;
660 	struct list_head tree_mod_seq_list;
661 
662 	atomic_t async_delalloc_pages;
663 
664 	/* This is used to protect the following list -- ordered_roots. */
665 	spinlock_t ordered_root_lock;
666 
667 	/*
668 	 * All fs/file tree roots in which there are data=ordered extents
669 	 * pending writeback are added into this list.
670 	 *
671 	 * These can span multiple transactions and basically include every
672 	 * dirty data page that isn't from nodatacow.
673 	 */
674 	struct list_head ordered_roots;
675 
676 	struct mutex delalloc_root_mutex;
677 	spinlock_t delalloc_root_lock;
678 	/* All fs/file tree roots that have delalloc inodes. */
679 	struct list_head delalloc_roots;
680 
681 	/*
682 	 * There is a pool of worker threads for checksumming during writes and
683 	 * a pool for checksumming after reads.  This is because readers can
684 	 * run with FS locks held, and the writers may be waiting for those
685 	 * locks.  We don't want ordering in the pending list to cause
686 	 * deadlocks, and so the two are serviced separately.
687 	 *
688 	 * A third pool does submit_bio to avoid deadlocking with the other two.
689 	 */
690 	struct btrfs_workqueue *workers;
691 	struct btrfs_workqueue *delalloc_workers;
692 	struct btrfs_workqueue *flush_workers;
693 	struct workqueue_struct *endio_workers;
694 	struct workqueue_struct *endio_meta_workers;
695 	struct workqueue_struct *rmw_workers;
696 	struct btrfs_workqueue *endio_write_workers;
697 	struct btrfs_workqueue *endio_freespace_worker;
698 	struct btrfs_workqueue *caching_workers;
699 
700 	/*
701 	 * Fixup workers take dirty pages that didn't properly go through the
702 	 * cow mechanism and make them safe to write.  It happens for the
703 	 * sys_munmap function call path.
704 	 */
705 	struct btrfs_workqueue *fixup_workers;
706 	struct btrfs_workqueue *delayed_workers;
707 
708 	struct task_struct *transaction_kthread;
709 	struct task_struct *cleaner_kthread;
710 	u32 thread_pool_size;
711 
712 	struct kobject *space_info_kobj;
713 	struct kobject *qgroups_kobj;
714 	struct kobject *discard_kobj;
715 
716 	/* Track the number of blocks (sectors) read by the filesystem. */
717 	struct percpu_counter stats_read_blocks;
718 
719 	/* Used to keep from writing metadata until there is a nice batch */
720 	struct percpu_counter dirty_metadata_bytes;
721 	struct percpu_counter delalloc_bytes;
722 	struct percpu_counter ordered_bytes;
723 	s32 dirty_metadata_batch;
724 	s32 delalloc_batch;
725 
726 	struct percpu_counter evictable_extent_maps;
727 	u64 em_shrinker_last_root;
728 	u64 em_shrinker_last_ino;
729 	atomic64_t em_shrinker_nr_to_scan;
730 	struct work_struct em_shrinker_work;
731 
732 	/* Protected by 'trans_lock'. */
733 	struct list_head dirty_cowonly_roots;
734 
735 	struct btrfs_fs_devices *fs_devices;
736 
737 	/*
738 	 * The space_info list is effectively read only after initial setup.
739 	 * It is populated at mount time and cleaned up after all block groups
740 	 * are removed.  RCU is used to protect it.
741 	 */
742 	struct list_head space_info;
743 
744 	struct btrfs_space_info *data_sinfo;
745 
746 	struct reloc_control *reloc_ctl;
747 
748 	/* data_alloc_cluster is only used in ssd_spread mode */
749 	struct btrfs_free_cluster data_alloc_cluster;
750 
751 	/* All metadata allocations go through this cluster. */
752 	struct btrfs_free_cluster meta_alloc_cluster;
753 
754 	/* Auto defrag inodes go here. */
755 	spinlock_t defrag_inodes_lock;
756 	struct rb_root defrag_inodes;
757 	atomic_t defrag_running;
758 
759 	/* Used to protect avail_{data, metadata, system}_alloc_bits */
760 	seqlock_t profiles_lock;
761 	/*
762 	 * These three are in extended format (availability of single chunks is
763 	 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
764 	 * by corresponding BTRFS_BLOCK_GROUP_* bits)
765 	 */
766 	u64 avail_data_alloc_bits;
767 	u64 avail_metadata_alloc_bits;
768 	u64 avail_system_alloc_bits;
769 
770 	/* Balance state */
771 	spinlock_t balance_lock;
772 	struct mutex balance_mutex;
773 	atomic_t balance_pause_req;
774 	atomic_t balance_cancel_req;
775 	struct btrfs_balance_control *balance_ctl;
776 	wait_queue_head_t balance_wait_q;
777 
778 	/* Cancellation requests for chunk relocation */
779 	atomic_t reloc_cancel_req;
780 
781 	u32 data_chunk_allocations;
782 	u32 metadata_ratio;
783 
784 	/* Private scrub information */
785 	struct mutex scrub_lock;
786 	atomic_t scrubs_running;
787 	atomic_t scrub_pause_req;
788 	atomic_t scrubs_paused;
789 	atomic_t scrub_cancel_req;
790 	wait_queue_head_t scrub_pause_wait;
791 	/*
792 	 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
793 	 * running.
794 	 */
795 	refcount_t scrub_workers_refcnt;
796 	struct workqueue_struct *scrub_workers;
797 
798 	struct btrfs_discard_ctl discard_ctl;
799 
800 	/* Is qgroup tracking in a consistent state? */
801 	u64 qgroup_flags;
802 
803 	/* Holds configuration and tracking. Protected by qgroup_lock. */
804 	struct rb_root qgroup_tree;
805 	spinlock_t qgroup_lock;
806 
807 	/*
808 	 * Protect user change for quota operations. If a transaction is needed,
809 	 * it must be started before locking this lock.
810 	 */
811 	struct mutex qgroup_ioctl_lock;
812 
813 	/* List of dirty qgroups to be written at next commit. */
814 	struct list_head dirty_qgroups;
815 
816 	/* Used by qgroup for an efficient tree traversal. */
817 	u64 qgroup_seq;
818 
819 	/* Qgroup rescan items. */
820 	/* Protects the progress item */
821 	struct mutex qgroup_rescan_lock;
822 	struct btrfs_key qgroup_rescan_progress;
823 	struct btrfs_workqueue *qgroup_rescan_workers;
824 	struct completion qgroup_rescan_completion;
825 	struct btrfs_work qgroup_rescan_work;
826 	/* Protected by qgroup_rescan_lock */
827 	bool qgroup_rescan_running;
828 	u8 qgroup_drop_subtree_thres;
829 	u64 qgroup_enable_gen;
830 
831 	/*
832 	 * If this is not 0, then it indicates a serious filesystem error has
833 	 * happened and it contains that error (negative errno value).
834 	 */
835 	int fs_error;
836 
837 	/* Filesystem state */
838 	unsigned long fs_state;
839 
840 	struct btrfs_delayed_root delayed_root;
841 
842 	/* Entries are eb->start >> nodesize_bits */
843 	struct xarray buffer_tree;
844 
845 	/* Next backup root to be overwritten */
846 	int backup_root_index;
847 
848 	/* Device replace state */
849 	struct btrfs_dev_replace dev_replace;
850 
851 	struct semaphore uuid_tree_rescan_sem;
852 
853 	/* Used to reclaim the metadata space in the background. */
854 	struct work_struct async_reclaim_work;
855 	struct work_struct async_data_reclaim_work;
856 	struct work_struct preempt_reclaim_work;
857 
858 	/* Reclaim partially filled block groups in the background */
859 	struct work_struct reclaim_bgs_work;
860 	/* Protected by unused_bgs_lock. */
861 	struct list_head reclaim_bgs;
862 	int bg_reclaim_threshold;
863 
864 	/* Protects the lists unused_bgs, reclaim_bgs, and fully_remapped_bgs. */
865 	spinlock_t unused_bgs_lock;
866 	/* Protected by unused_bgs_lock. */
867 	struct list_head unused_bgs;
868 	struct list_head fully_remapped_bgs;
869 	struct mutex unused_bg_unpin_mutex;
870 	/* Protect block groups that are going to be deleted */
871 	struct mutex reclaim_bgs_lock;
872 
873 	/* Cached block sizes */
874 	u32 nodesize;
875 	u32 nodesize_bits;
876 	u32 sectorsize;
877 	/* ilog2 of sectorsize, use to avoid 64bit division */
878 	u32 sectorsize_bits;
879 	u32 block_min_order;
880 	u32 block_max_order;
881 	u32 stripesize;
882 	u32 csum_size;
883 	u32 csums_per_leaf;
884 	u32 csum_type;
885 
886 	/*
887 	 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
888 	 * filesystem, on zoned it depends on the device constraints.
889 	 */
890 	u64 max_extent_size;
891 
892 	/* Block groups and devices containing active swapfiles. */
893 	spinlock_t swapfile_pins_lock;
894 	struct rb_root swapfile_pins;
895 
896 	/* Type of exclusive operation running, protected by super_lock */
897 	enum btrfs_exclusive_operation exclusive_operation;
898 
899 	/*
900 	 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
901 	 * if the mode is enabled
902 	 */
903 	u64 zone_size;
904 
905 	/* Constraints for ZONE_APPEND commands: */
906 	struct queue_limits limits;
907 	u64 max_zone_append_size;
908 
909 	struct mutex zoned_meta_io_lock;
910 	spinlock_t treelog_bg_lock;
911 	u64 treelog_bg;
912 
913 	/*
914 	 * Start of the dedicated data relocation block group, protected by
915 	 * relocation_bg_lock.
916 	 */
917 	spinlock_t relocation_bg_lock;
918 	u64 data_reloc_bg;
919 	struct mutex zoned_data_reloc_io_lock;
920 
921 	struct btrfs_block_group *active_meta_bg;
922 	struct btrfs_block_group *active_system_bg;
923 
924 	u64 nr_global_roots;
925 
926 	spinlock_t zone_active_bgs_lock;
927 	struct list_head zone_active_bgs;
928 
929 	/* Updates are not protected by any lock */
930 	struct btrfs_commit_stats commit_stats;
931 
932 	/*
933 	 * Last generation where we dropped a non-relocation root.
934 	 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
935 	 * to change it and to read it, respectively.
936 	 */
937 	u64 last_root_drop_gen;
938 
939 	/*
940 	 * Annotations for transaction events (structures are empty when
941 	 * compiled without lockdep).
942 	 */
943 	struct lockdep_map btrfs_trans_num_writers_map;
944 	struct lockdep_map btrfs_trans_num_extwriters_map;
945 	struct lockdep_map btrfs_state_change_map[4];
946 	struct lockdep_map btrfs_trans_pending_ordered_map;
947 	struct lockdep_map btrfs_ordered_extent_map;
948 
949 #ifdef CONFIG_BTRFS_DEBUG
950 	spinlock_t ref_verify_lock;
951 	struct rb_root block_tree;
952 
953 	struct kobject *debug_kobj;
954 	struct list_head allocated_roots;
955 
956 	spinlock_t eb_leak_lock;
957 	struct list_head allocated_ebs;
958 #endif
959 };
960 
961 #define folio_to_inode(_folio)	(BTRFS_I(_Generic((_folio),			\
962 					  struct folio *: (_folio))->mapping->host))
963 
964 #define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info)
965 
966 #define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode),			\
967 					   struct inode *: (_inode)))->root->fs_info)
968 
969 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
970 {
971 	return mapping_gfp_constraint(mapping, ~__GFP_FS);
972 }
973 
974 /* Return the minimal folio size of the fs. */
975 static inline unsigned int btrfs_min_folio_size(struct btrfs_fs_info *fs_info)
976 {
977 	return 1U << (PAGE_SHIFT + fs_info->block_min_order);
978 }
979 
980 static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
981 {
982 	return READ_ONCE(fs_info->generation);
983 }
984 
985 static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
986 {
987 	WRITE_ONCE(fs_info->generation, gen);
988 }
989 
990 static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
991 {
992 	return READ_ONCE(fs_info->last_trans_committed);
993 }
994 
995 static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
996 {
997 	WRITE_ONCE(fs_info->last_trans_committed, gen);
998 }
999 
1000 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
1001 						u64 gen)
1002 {
1003 	WRITE_ONCE(fs_info->last_root_drop_gen, gen);
1004 }
1005 
1006 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
1007 {
1008 	return READ_ONCE(fs_info->last_root_drop_gen);
1009 }
1010 
1011 /*
1012  * Take the number of bytes to be checksummed and figure out how many leaves
1013  * it would require to store the csums for that many bytes.
1014  */
1015 static inline u64 btrfs_csum_bytes_to_leaves(
1016 			const struct btrfs_fs_info *fs_info, u64 csum_bytes)
1017 {
1018 	const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
1019 
1020 	return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
1021 }
1022 
1023 /*
1024  * Use this if we would be adding new items, as we could split nodes as we cow
1025  * down the tree.
1026  */
1027 static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
1028 						  unsigned num_items)
1029 {
1030 	return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
1031 }
1032 
1033 /*
1034  * Doing a truncate or a modification won't result in new nodes or leaves, just
1035  * what we need for COW.
1036  */
1037 static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
1038 						 unsigned num_items)
1039 {
1040 	return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
1041 }
1042 
1043 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
1044 					sizeof(struct btrfs_item))
1045 
1046 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) ((bytes) >> (fs_info)->sectorsize_bits)
1047 
1048 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
1049 {
1050 	return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
1051 }
1052 
1053 /*
1054  * Count how many fs_info->max_extent_size cover the @size
1055  */
1056 static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 size)
1057 {
1058 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1059 	if (!fs_info)
1060 		return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
1061 #endif
1062 
1063 	return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
1064 }
1065 
1066 static inline unsigned int btrfs_blocks_per_folio(const struct btrfs_fs_info *fs_info,
1067 						  const struct folio *folio)
1068 {
1069 	return folio_size(folio) >> fs_info->sectorsize_bits;
1070 }
1071 
1072 bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize);
1073 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
1074 			enum btrfs_exclusive_operation type);
1075 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
1076 				 enum btrfs_exclusive_operation type);
1077 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
1078 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
1079 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
1080 			  enum btrfs_exclusive_operation op);
1081 
1082 int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args);
1083 
1084 u16 btrfs_csum_type_size(u16 type);
1085 int btrfs_super_csum_size(const struct btrfs_super_block *s);
1086 const char *btrfs_super_csum_name(u16 csum_type);
1087 size_t __attribute_const__ btrfs_get_num_csums(void);
1088 struct btrfs_csum_ctx {
1089 	u16 csum_type;
1090 	union {
1091 		u32 crc32;
1092 		struct xxh64_state xxh64;
1093 		struct sha256_ctx sha256;
1094 		struct blake2b_ctx blake2b;
1095 	};
1096 };
1097 void btrfs_csum(u16 csum_type, const u8 *data, size_t len, u8 *out);
1098 void btrfs_csum_init(struct btrfs_csum_ctx *ctx, u16 csum_type);
1099 void btrfs_csum_update(struct btrfs_csum_ctx *ctx, const u8 *data, size_t len);
1100 void btrfs_csum_final(struct btrfs_csum_ctx *ctx, u8 *out);
1101 
1102 static inline bool btrfs_is_empty_uuid(const u8 *uuid)
1103 {
1104 	return uuid_is_null((const uuid_t *)uuid);
1105 }
1106 
1107 /* Compatibility and incompatibility defines */
1108 void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
1109 			     const char *name);
1110 void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
1111 			       const char *name);
1112 void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
1113 			      const char *name);
1114 void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
1115 				const char *name);
1116 
1117 #define __btrfs_fs_incompat(fs_info, flags)				\
1118 	(!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
1119 
1120 #define __btrfs_fs_compat_ro(fs_info, flags)				\
1121 	(!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
1122 
1123 #define btrfs_set_fs_incompat(__fs_info, opt)				\
1124 	__btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
1125 
1126 #define btrfs_clear_fs_incompat(__fs_info, opt)				\
1127 	__btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
1128 
1129 #define btrfs_fs_incompat(fs_info, opt)					\
1130 	__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
1131 
1132 #define btrfs_set_fs_compat_ro(__fs_info, opt)				\
1133 	__btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
1134 
1135 #define btrfs_clear_fs_compat_ro(__fs_info, opt)			\
1136 	__btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
1137 
1138 #define btrfs_fs_compat_ro(fs_info, opt)				\
1139 	__btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
1140 
1141 #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
1142 #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
1143 #define btrfs_raw_test_opt(o, opt)	((o) & BTRFS_MOUNT_##opt)
1144 #define btrfs_test_opt(fs_info, opt)	((fs_info)->mount_opt & \
1145 					 BTRFS_MOUNT_##opt)
1146 
1147 static inline bool btrfs_fs_closing(const struct btrfs_fs_info *fs_info)
1148 {
1149 	return unlikely(test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags));
1150 }
1151 
1152 static inline bool btrfs_fs_closing_done(const struct btrfs_fs_info *fs_info)
1153 {
1154 	if (btrfs_fs_closing(fs_info) && test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
1155 		return true;
1156 
1157 	return false;
1158 }
1159 
1160 /*
1161  * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
1162  * anything except sleeping. This function is used to check the status of
1163  * the fs.
1164  * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
1165  * since setting and checking for SB_RDONLY in the superblock's flags is not
1166  * atomic.
1167  */
1168 static inline int btrfs_need_cleaner_sleep(const struct btrfs_fs_info *fs_info)
1169 {
1170 	return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
1171 		btrfs_fs_closing(fs_info);
1172 }
1173 
1174 static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1175 {
1176 	clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
1177 }
1178 
1179 #define BTRFS_FS_ERROR(fs_info)	(READ_ONCE((fs_info)->fs_error))
1180 
1181 #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info)				\
1182 	(unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,		\
1183 			   &(fs_info)->fs_state)))
1184 
1185 static inline bool btrfs_is_shutdown(const struct btrfs_fs_info *fs_info)
1186 {
1187 	return unlikely(test_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state));
1188 }
1189 
1190 static inline void btrfs_force_shutdown(struct btrfs_fs_info *fs_info)
1191 {
1192 	/*
1193 	 * Here we do not want to use handle_fs_error(), which will mark the fs
1194 	 * read-only.
1195 	 * Some call sites like shutdown ioctl will mark the fs shutdown when
1196 	 * the fs is frozen. But thaw path will handle RO and RW fs
1197 	 * differently.
1198 	 *
1199 	 * So here we only mark the fs error without flipping it RO.
1200 	 */
1201 	WRITE_ONCE(fs_info->fs_error, -EIO);
1202 	if (!test_and_set_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state))
1203 		btrfs_crit(fs_info, "emergency shutdown");
1204 }
1205 
1206 /*
1207  * We use folio flag owner_2 to indicate there is an ordered extent with
1208  * unfinished IO.
1209  */
1210 #define folio_test_ordered(folio)	folio_test_owner_2(folio)
1211 #define folio_set_ordered(folio)	folio_set_owner_2(folio)
1212 #define folio_clear_ordered(folio)	folio_clear_owner_2(folio)
1213 
1214 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1215 
1216 #define EXPORT_FOR_TESTS
1217 
1218 static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
1219 {
1220 	return unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state));
1221 }
1222 
1223 void btrfs_test_destroy_inode(struct inode *inode);
1224 
1225 #else
1226 
1227 #define EXPORT_FOR_TESTS static
1228 
1229 static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
1230 {
1231 	return false;
1232 }
1233 #endif
1234 
1235 #endif
1236