1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef BTRFS_FS_H
4 #define BTRFS_FS_H
5
6 #include <linux/blkdev.h>
7 #include <linux/sizes.h>
8 #include <linux/time64.h>
9 #include <linux/compiler.h>
10 #include <linux/math.h>
11 #include <linux/atomic.h>
12 #include <linux/percpu_counter.h>
13 #include <linux/completion.h>
14 #include <linux/lockdep.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 #include <linux/rwsem.h>
18 #include <linux/semaphore.h>
19 #include <linux/list.h>
20 #include <linux/pagemap.h>
21 #include <linux/radix-tree.h>
22 #include <linux/workqueue.h>
23 #include <linux/wait.h>
24 #include <linux/wait_bit.h>
25 #include <linux/sched.h>
26 #include <linux/rbtree.h>
27 #include <uapi/linux/btrfs.h>
28 #include <uapi/linux/btrfs_tree.h>
29 #include "extent-io-tree.h"
30 #include "async-thread.h"
31 #include "block-rsv.h"
32 #include "messages.h"
33
34 struct inode;
35 struct super_block;
36 struct kobject;
37 struct reloc_control;
38 struct crypto_shash;
39 struct ulist;
40 struct btrfs_device;
41 struct btrfs_block_group;
42 struct btrfs_root;
43 struct btrfs_fs_devices;
44 struct btrfs_transaction;
45 struct btrfs_delayed_root;
46 struct btrfs_balance_control;
47 struct btrfs_subpage_info;
48 struct btrfs_stripe_hash_table;
49 struct btrfs_space_info;
50
51 /*
52 * Minimum data and metadata block size.
53 *
54 * Normally it's 4K, but for testing subpage block size on 4K page systems, we
55 * allow DEBUG builds to accept 2K page size.
56 */
57 #ifdef CONFIG_BTRFS_DEBUG
58 #define BTRFS_MIN_BLOCKSIZE (SZ_2K)
59 #else
60 #define BTRFS_MIN_BLOCKSIZE (SZ_4K)
61 #endif
62
63 #define BTRFS_MAX_BLOCKSIZE (SZ_64K)
64
65 #define BTRFS_MAX_EXTENT_SIZE SZ_128M
66
67 #define BTRFS_OLDEST_GENERATION 0ULL
68
69 #define BTRFS_EMPTY_DIR_SIZE 0
70
71 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M
72
73 #define BTRFS_SUPER_INFO_OFFSET SZ_64K
74 #define BTRFS_SUPER_INFO_SIZE 4096
75 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
76
77 /* Array of bytes with variable length, hexadecimal format 0x1234 */
78 #define BTRFS_CSUM_FMT "0x%*phN"
79 #define BTRFS_CSUM_FMT_VALUE(size, bytes) size, bytes
80
81 #define BTRFS_KEY_FMT "(%llu %u %llu)"
82 #define BTRFS_KEY_FMT_VALUE(key) (key)->objectid, (key)->type, (key)->offset
83
84 /*
85 * Number of metadata items necessary for an unlink operation:
86 *
87 * 1 for the possible orphan item
88 * 1 for the dir item
89 * 1 for the dir index
90 * 1 for the inode ref
91 * 1 for the inode
92 * 1 for the parent inode
93 */
94 #define BTRFS_UNLINK_METADATA_UNITS 6
95
96 /*
97 * The reserved space at the beginning of each device. It covers the primary
98 * super block and leaves space for potential use by other tools like
99 * bootloaders or to lower potential damage of accidental overwrite.
100 */
101 #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
102 /*
103 * Runtime (in-memory) states of filesystem
104 */
105 enum {
106 /*
107 * Filesystem is being remounted, allow to skip some operations, like
108 * defrag
109 */
110 BTRFS_FS_STATE_REMOUNTING,
111 /* Filesystem in RO mode */
112 BTRFS_FS_STATE_RO,
113 /* Track if a transaction abort has been reported on this filesystem */
114 BTRFS_FS_STATE_TRANS_ABORTED,
115 /* Track if log replay has failed. */
116 BTRFS_FS_STATE_LOG_REPLAY_ABORTED,
117 /*
118 * Bio operations should be blocked on this filesystem because a source
119 * or target device is being destroyed as part of a device replace
120 */
121 BTRFS_FS_STATE_DEV_REPLACING,
122 /* The btrfs_fs_info created for self-tests */
123 BTRFS_FS_STATE_DUMMY_FS_INFO,
124
125 /* Checksum errors are ignored. */
126 BTRFS_FS_STATE_NO_DATA_CSUMS,
127 BTRFS_FS_STATE_SKIP_META_CSUMS,
128
129 /* Indicates there was an error cleaning up a log tree. */
130 BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
131
132 /* No more delayed iput can be queued. */
133 BTRFS_FS_STATE_NO_DELAYED_IPUT,
134
135 /*
136 * Emergency shutdown, a step further than transaction aborted by
137 * rejecting all operations.
138 */
139 BTRFS_FS_STATE_EMERGENCY_SHUTDOWN,
140
141 BTRFS_FS_STATE_COUNT
142 };
143
144 enum {
145 BTRFS_FS_CLOSING_START,
146 BTRFS_FS_CLOSING_DONE,
147 BTRFS_FS_LOG_RECOVERING,
148 BTRFS_FS_OPEN,
149 BTRFS_FS_QUOTA_ENABLED,
150 BTRFS_FS_UPDATE_UUID_TREE_GEN,
151 BTRFS_FS_CREATING_FREE_SPACE_TREE,
152 BTRFS_FS_BTREE_ERR,
153 BTRFS_FS_LOG1_ERR,
154 BTRFS_FS_LOG2_ERR,
155 BTRFS_FS_QUOTA_OVERRIDE,
156 /* Used to record internally whether fs has been frozen */
157 BTRFS_FS_FROZEN,
158 /*
159 * Indicate that balance has been set up from the ioctl and is in the
160 * main phase. The fs_info::balance_ctl is initialized.
161 */
162 BTRFS_FS_BALANCE_RUNNING,
163
164 /*
165 * Indicate that relocation of a chunk has started, it's set per chunk
166 * and is toggled between chunks.
167 */
168 BTRFS_FS_RELOC_RUNNING,
169
170 /* Indicate that the cleaner thread is awake and doing something. */
171 BTRFS_FS_CLEANER_RUNNING,
172
173 /*
174 * The checksumming has an optimized version and is considered fast,
175 * so we don't need to offload checksums to workqueues.
176 */
177 BTRFS_FS_CSUM_IMPL_FAST,
178
179 /* Indicate that the discard workqueue can service discards. */
180 BTRFS_FS_DISCARD_RUNNING,
181
182 /* Indicate that we need to cleanup space cache v1 */
183 BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
184
185 /* Indicate that we can't trust the free space tree for caching yet */
186 BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
187
188 /* Indicate whether there are any tree modification log users */
189 BTRFS_FS_TREE_MOD_LOG_USERS,
190
191 /* Indicate that we want the transaction kthread to commit right now. */
192 BTRFS_FS_COMMIT_TRANS,
193
194 /* Indicate we have half completed snapshot deletions pending. */
195 BTRFS_FS_UNFINISHED_DROPS,
196
197 /* Indicate we have to finish a zone to do next allocation. */
198 BTRFS_FS_NEED_ZONE_FINISH,
199
200 /* Indicate that we want to commit the transaction. */
201 BTRFS_FS_NEED_TRANS_COMMIT,
202
203 /* This is set when active zone tracking is needed. */
204 BTRFS_FS_ACTIVE_ZONE_TRACKING,
205
206 /*
207 * Indicate if we have some features changed, this is mostly for
208 * cleaner thread to update the sysfs interface.
209 */
210 BTRFS_FS_FEATURE_CHANGED,
211
212 /*
213 * Indicate that we have found a tree block which is only aligned to
214 * sectorsize, but not to nodesize. This should be rare nowadays.
215 */
216 BTRFS_FS_UNALIGNED_TREE_BLOCK,
217
218 #if BITS_PER_LONG == 32
219 /* Indicate if we have error/warn message printed on 32bit systems */
220 BTRFS_FS_32BIT_ERROR,
221 BTRFS_FS_32BIT_WARN,
222 #endif
223 };
224
225 /*
226 * Flags for mount options.
227 *
228 * Note: don't forget to add new options to btrfs_show_options()
229 */
230 enum {
231 BTRFS_MOUNT_NODATASUM = (1ULL << 0),
232 BTRFS_MOUNT_NODATACOW = (1ULL << 1),
233 BTRFS_MOUNT_NOBARRIER = (1ULL << 2),
234 BTRFS_MOUNT_SSD = (1ULL << 3),
235 BTRFS_MOUNT_DEGRADED = (1ULL << 4),
236 BTRFS_MOUNT_COMPRESS = (1ULL << 5),
237 BTRFS_MOUNT_NOTREELOG = (1ULL << 6),
238 BTRFS_MOUNT_FLUSHONCOMMIT = (1ULL << 7),
239 BTRFS_MOUNT_SSD_SPREAD = (1ULL << 8),
240 BTRFS_MOUNT_NOSSD = (1ULL << 9),
241 BTRFS_MOUNT_DISCARD_SYNC = (1ULL << 10),
242 BTRFS_MOUNT_FORCE_COMPRESS = (1ULL << 11),
243 BTRFS_MOUNT_SPACE_CACHE = (1ULL << 12),
244 BTRFS_MOUNT_CLEAR_CACHE = (1ULL << 13),
245 BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1ULL << 14),
246 BTRFS_MOUNT_ENOSPC_DEBUG = (1ULL << 15),
247 BTRFS_MOUNT_AUTO_DEFRAG = (1ULL << 16),
248 BTRFS_MOUNT_USEBACKUPROOT = (1ULL << 17),
249 BTRFS_MOUNT_SKIP_BALANCE = (1ULL << 18),
250 BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1ULL << 19),
251 BTRFS_MOUNT_RESCAN_UUID_TREE = (1ULL << 20),
252 BTRFS_MOUNT_FRAGMENT_DATA = (1ULL << 21),
253 BTRFS_MOUNT_FRAGMENT_METADATA = (1ULL << 22),
254 BTRFS_MOUNT_FREE_SPACE_TREE = (1ULL << 23),
255 BTRFS_MOUNT_NOLOGREPLAY = (1ULL << 24),
256 BTRFS_MOUNT_REF_VERIFY = (1ULL << 25),
257 BTRFS_MOUNT_DISCARD_ASYNC = (1ULL << 26),
258 BTRFS_MOUNT_IGNOREBADROOTS = (1ULL << 27),
259 BTRFS_MOUNT_IGNOREDATACSUMS = (1ULL << 28),
260 BTRFS_MOUNT_NODISCARD = (1ULL << 29),
261 BTRFS_MOUNT_NOSPACECACHE = (1ULL << 30),
262 BTRFS_MOUNT_IGNOREMETACSUMS = (1ULL << 31),
263 BTRFS_MOUNT_IGNORESUPERFLAGS = (1ULL << 32),
264 BTRFS_MOUNT_REF_TRACKER = (1ULL << 33),
265 };
266
267 /* These mount options require a full read-only fs, no new transaction is allowed. */
268 #define BTRFS_MOUNT_FULL_RO_MASK \
269 (BTRFS_MOUNT_NOLOGREPLAY | \
270 BTRFS_MOUNT_IGNOREBADROOTS | \
271 BTRFS_MOUNT_IGNOREDATACSUMS | \
272 BTRFS_MOUNT_IGNOREMETACSUMS | \
273 BTRFS_MOUNT_IGNORESUPERFLAGS)
274
275 /*
276 * Compat flags that we support. If any incompat flags are set other than the
277 * ones specified below then we will fail to mount
278 */
279 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL
280 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
281 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
282
283 #define BTRFS_FEATURE_COMPAT_RO_SUPP \
284 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
285 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
286 BTRFS_FEATURE_COMPAT_RO_VERITY | \
287 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
288
289 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
290 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
291
292 #define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \
293 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
294 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
295 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
296 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
297 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
298 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
299 BTRFS_FEATURE_INCOMPAT_RAID56 | \
300 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
301 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
302 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
303 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
304 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
305 BTRFS_FEATURE_INCOMPAT_ZONED | \
306 BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
307
308 #ifdef CONFIG_BTRFS_EXPERIMENTAL
309 /*
310 * Features under development like Extent tree v2 support is enabled
311 * only under CONFIG_BTRFS_EXPERIMENTAL
312 */
313 #define BTRFS_FEATURE_INCOMPAT_SUPP \
314 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
315 BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \
316 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
317
318 #else
319
320 #define BTRFS_FEATURE_INCOMPAT_SUPP \
321 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE)
322
323 #endif
324
325 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
326 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
327 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
328
329 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
330 #define BTRFS_WARNING_COMMIT_INTERVAL (300)
331 #define BTRFS_DEFAULT_MAX_INLINE (2048)
332
333 enum btrfs_compression_type {
334 BTRFS_COMPRESS_NONE = 0,
335 BTRFS_COMPRESS_ZLIB = 1,
336 BTRFS_COMPRESS_LZO = 2,
337 BTRFS_COMPRESS_ZSTD = 3,
338 BTRFS_NR_COMPRESS_TYPES = 4,
339
340 BTRFS_DEFRAG_DONT_COMPRESS,
341 };
342
343 struct btrfs_dev_replace {
344 /* See #define above */
345 u64 replace_state;
346 /* Seconds since 1-Jan-1970 */
347 time64_t time_started;
348 /* Seconds since 1-Jan-1970 */
349 time64_t time_stopped;
350 atomic64_t num_write_errors;
351 atomic64_t num_uncorrectable_read_errors;
352
353 u64 cursor_left;
354 u64 committed_cursor_left;
355 u64 cursor_left_last_write_of_item;
356 u64 cursor_right;
357
358 /* See #define above */
359 u64 cont_reading_from_srcdev_mode;
360
361 int is_valid;
362 int item_needs_writeback;
363 struct btrfs_device *srcdev;
364 struct btrfs_device *tgtdev;
365
366 struct mutex lock_finishing_cancel_unmount;
367 struct rw_semaphore rwsem;
368
369 struct btrfs_scrub_progress scrub_progress;
370
371 struct percpu_counter bio_counter;
372 wait_queue_head_t replace_wait;
373
374 struct task_struct *replace_task;
375 };
376
377 /*
378 * Free clusters are used to claim free space in relatively large chunks,
379 * allowing us to do less seeky writes. They are used for all metadata
380 * allocations. In ssd_spread mode they are also used for data allocations.
381 */
382 struct btrfs_free_cluster {
383 spinlock_t lock;
384 spinlock_t refill_lock;
385 struct rb_root root;
386
387 /* Largest extent in this cluster */
388 u64 max_size;
389
390 /* First extent starting offset */
391 u64 window_start;
392
393 /* We did a full search and couldn't create a cluster */
394 bool fragmented;
395
396 struct btrfs_block_group *block_group;
397 /*
398 * When a cluster is allocated from a block group, we put the cluster
399 * onto a list in the block group so that it can be freed before the
400 * block group is freed.
401 */
402 struct list_head block_group_list;
403 };
404
405 /* Discard control. */
406 /*
407 * Async discard uses multiple lists to differentiate the discard filter
408 * parameters. Index 0 is for completely free block groups where we need to
409 * ensure the entire block group is trimmed without being lossy. Indices
410 * afterwards represent monotonically decreasing discard filter sizes to
411 * prioritize what should be discarded next.
412 */
413 #define BTRFS_NR_DISCARD_LISTS 3
414 #define BTRFS_DISCARD_INDEX_UNUSED 0
415 #define BTRFS_DISCARD_INDEX_START 1
416
417 struct btrfs_discard_ctl {
418 struct workqueue_struct *discard_workers;
419 struct delayed_work work;
420 spinlock_t lock;
421 struct btrfs_block_group *block_group;
422 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
423 u64 prev_discard;
424 u64 prev_discard_time;
425 atomic_t discardable_extents;
426 atomic64_t discardable_bytes;
427 u64 max_discard_size;
428 u64 delay_ms;
429 u32 iops_limit;
430 u32 kbps_limit;
431 u64 discard_extent_bytes;
432 u64 discard_bitmap_bytes;
433 atomic64_t discard_bytes_saved;
434 };
435
436 /*
437 * Exclusive operations (device replace, resize, device add/remove, balance)
438 */
439 enum btrfs_exclusive_operation {
440 BTRFS_EXCLOP_NONE,
441 BTRFS_EXCLOP_BALANCE_PAUSED,
442 BTRFS_EXCLOP_BALANCE,
443 BTRFS_EXCLOP_DEV_ADD,
444 BTRFS_EXCLOP_DEV_REMOVE,
445 BTRFS_EXCLOP_DEV_REPLACE,
446 BTRFS_EXCLOP_RESIZE,
447 BTRFS_EXCLOP_SWAP_ACTIVATE,
448 };
449
450 /* Store data about transaction commits, exported via sysfs. */
451 struct btrfs_commit_stats {
452 /* Total number of commits */
453 u64 commit_count;
454 /* The maximum commit duration so far in ns */
455 u64 max_commit_dur;
456 /* The last commit duration in ns */
457 u64 last_commit_dur;
458 /* The total commit duration in ns */
459 u64 total_commit_dur;
460 /* Start of the last critical section in ns. */
461 u64 critical_section_start_time;
462 };
463
464 struct btrfs_fs_info {
465 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
466 unsigned long flags;
467 struct btrfs_root *tree_root;
468 struct btrfs_root *chunk_root;
469 struct btrfs_root *dev_root;
470 struct btrfs_root *fs_root;
471 struct btrfs_root *quota_root;
472 struct btrfs_root *uuid_root;
473 struct btrfs_root *data_reloc_root;
474 struct btrfs_root *block_group_root;
475 struct btrfs_root *stripe_root;
476
477 /* The log root tree is a directory of all the other log roots */
478 struct btrfs_root *log_root_tree;
479
480 /* The tree that holds the global roots (csum, extent, etc) */
481 rwlock_t global_root_lock;
482 struct rb_root global_root_tree;
483
484 spinlock_t fs_roots_radix_lock;
485 struct radix_tree_root fs_roots_radix;
486
487 /* Block group cache stuff */
488 rwlock_t block_group_cache_lock;
489 struct rb_root_cached block_group_cache_tree;
490
491 /* Keep track of unallocated space */
492 atomic64_t free_chunk_space;
493
494 /* Track ranges which are used by log trees blocks/logged data extents */
495 struct extent_io_tree excluded_extents;
496
497 /* logical->physical extent mapping */
498 struct rb_root_cached mapping_tree;
499 rwlock_t mapping_tree_lock;
500
501 /*
502 * Block reservation for extent, checksum, root tree and delayed dir
503 * index item.
504 */
505 struct btrfs_block_rsv global_block_rsv;
506 /* Block reservation for metadata operations */
507 struct btrfs_block_rsv trans_block_rsv;
508 /* Block reservation for chunk tree */
509 struct btrfs_block_rsv chunk_block_rsv;
510 /* Block reservation for delayed operations */
511 struct btrfs_block_rsv delayed_block_rsv;
512 /* Block reservation for delayed refs */
513 struct btrfs_block_rsv delayed_refs_rsv;
514 /* Block reservation for treelog tree */
515 struct btrfs_block_rsv treelog_rsv;
516
517 struct btrfs_block_rsv empty_block_rsv;
518
519 /*
520 * Updated while holding the lock 'trans_lock'. Due to the life cycle of
521 * a transaction, it can be directly read while holding a transaction
522 * handle, everywhere else must be read with btrfs_get_fs_generation().
523 * Should always be updated using btrfs_set_fs_generation().
524 */
525 u64 generation;
526 /*
527 * Always use btrfs_get_last_trans_committed() and
528 * btrfs_set_last_trans_committed() to read and update this field.
529 */
530 u64 last_trans_committed;
531 /*
532 * Generation of the last transaction used for block group relocation
533 * since the filesystem was last mounted (or 0 if none happened yet).
534 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
535 */
536 u64 last_reloc_trans;
537
538 /*
539 * This is updated to the current trans every time a full commit is
540 * required instead of the faster short fsync log commits
541 */
542 u64 last_trans_log_full_commit;
543 unsigned long long mount_opt;
544
545 /* Compress related structures. */
546 void *compr_wsm[BTRFS_NR_COMPRESS_TYPES];
547
548 int compress_type;
549 int compress_level;
550 u32 commit_interval;
551 /*
552 * It is a suggestive number, the read side is safe even it gets a
553 * wrong number because we will write out the data into a regular
554 * extent. The write side(mount/remount) is under ->s_umount lock,
555 * so it is also safe.
556 */
557 u64 max_inline;
558
559 struct btrfs_transaction *running_transaction;
560 wait_queue_head_t transaction_throttle;
561 wait_queue_head_t transaction_wait;
562 wait_queue_head_t transaction_blocked_wait;
563 wait_queue_head_t async_submit_wait;
564
565 /*
566 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
567 * when they are updated.
568 *
569 * Because we do not clear the flags for ever, so we needn't use
570 * the lock on the read side.
571 *
572 * We also needn't use the lock when we mount the fs, because
573 * there is no other task which will update the flag.
574 */
575 spinlock_t super_lock;
576 struct btrfs_super_block *super_copy;
577 struct btrfs_super_block *super_for_commit;
578 struct super_block *sb;
579 struct inode *btree_inode;
580 struct mutex tree_log_mutex;
581 struct mutex transaction_kthread_mutex;
582 struct mutex cleaner_mutex;
583 struct mutex chunk_mutex;
584
585 /*
586 * This is taken to make sure we don't set block groups ro after the
587 * free space cache has been allocated on them.
588 */
589 struct mutex ro_block_group_mutex;
590
591 /*
592 * This is used during read/modify/write to make sure no two ios are
593 * trying to mod the same stripe at the same time.
594 */
595 struct btrfs_stripe_hash_table *stripe_hash_table;
596
597 /*
598 * This protects the ordered operations list only while we are
599 * processing all of the entries on it. This way we make sure the
600 * commit code doesn't find the list temporarily empty because another
601 * function happens to be doing non-waiting preflush before jumping
602 * into the main commit.
603 */
604 struct mutex ordered_operations_mutex;
605
606 struct rw_semaphore commit_root_sem;
607
608 struct rw_semaphore cleanup_work_sem;
609
610 struct rw_semaphore subvol_sem;
611
612 spinlock_t trans_lock;
613 /*
614 * The reloc mutex goes with the trans lock, it is taken during commit
615 * to protect us from the relocation code.
616 */
617 struct mutex reloc_mutex;
618
619 struct list_head trans_list;
620 struct list_head dead_roots;
621 struct list_head caching_block_groups;
622
623 spinlock_t delayed_iput_lock;
624 struct list_head delayed_iputs;
625 atomic_t nr_delayed_iputs;
626 wait_queue_head_t delayed_iputs_wait;
627
628 atomic64_t tree_mod_seq;
629
630 /* This protects tree_mod_log and tree_mod_seq_list */
631 rwlock_t tree_mod_log_lock;
632 struct rb_root tree_mod_log;
633 struct list_head tree_mod_seq_list;
634
635 atomic_t async_delalloc_pages;
636
637 /* This is used to protect the following list -- ordered_roots. */
638 spinlock_t ordered_root_lock;
639
640 /*
641 * All fs/file tree roots in which there are data=ordered extents
642 * pending writeback are added into this list.
643 *
644 * These can span multiple transactions and basically include every
645 * dirty data page that isn't from nodatacow.
646 */
647 struct list_head ordered_roots;
648
649 struct mutex delalloc_root_mutex;
650 spinlock_t delalloc_root_lock;
651 /* All fs/file tree roots that have delalloc inodes. */
652 struct list_head delalloc_roots;
653
654 /*
655 * There is a pool of worker threads for checksumming during writes and
656 * a pool for checksumming after reads. This is because readers can
657 * run with FS locks held, and the writers may be waiting for those
658 * locks. We don't want ordering in the pending list to cause
659 * deadlocks, and so the two are serviced separately.
660 *
661 * A third pool does submit_bio to avoid deadlocking with the other two.
662 */
663 struct btrfs_workqueue *workers;
664 struct btrfs_workqueue *delalloc_workers;
665 struct btrfs_workqueue *flush_workers;
666 struct workqueue_struct *endio_workers;
667 struct workqueue_struct *endio_meta_workers;
668 struct workqueue_struct *rmw_workers;
669 struct btrfs_workqueue *endio_write_workers;
670 struct btrfs_workqueue *endio_freespace_worker;
671 struct btrfs_workqueue *caching_workers;
672
673 /*
674 * Fixup workers take dirty pages that didn't properly go through the
675 * cow mechanism and make them safe to write. It happens for the
676 * sys_munmap function call path.
677 */
678 struct btrfs_workqueue *fixup_workers;
679 struct btrfs_workqueue *delayed_workers;
680
681 struct task_struct *transaction_kthread;
682 struct task_struct *cleaner_kthread;
683 u32 thread_pool_size;
684
685 struct kobject *space_info_kobj;
686 struct kobject *qgroups_kobj;
687 struct kobject *discard_kobj;
688
689 /* Track the number of blocks (sectors) read by the filesystem. */
690 struct percpu_counter stats_read_blocks;
691
692 /* Used to keep from writing metadata until there is a nice batch */
693 struct percpu_counter dirty_metadata_bytes;
694 struct percpu_counter delalloc_bytes;
695 struct percpu_counter ordered_bytes;
696 s32 dirty_metadata_batch;
697 s32 delalloc_batch;
698
699 struct percpu_counter evictable_extent_maps;
700 u64 em_shrinker_last_root;
701 u64 em_shrinker_last_ino;
702 atomic64_t em_shrinker_nr_to_scan;
703 struct work_struct em_shrinker_work;
704
705 /* Protected by 'trans_lock'. */
706 struct list_head dirty_cowonly_roots;
707
708 struct btrfs_fs_devices *fs_devices;
709
710 /*
711 * The space_info list is effectively read only after initial setup.
712 * It is populated at mount time and cleaned up after all block groups
713 * are removed. RCU is used to protect it.
714 */
715 struct list_head space_info;
716
717 struct btrfs_space_info *data_sinfo;
718
719 struct reloc_control *reloc_ctl;
720
721 /* data_alloc_cluster is only used in ssd_spread mode */
722 struct btrfs_free_cluster data_alloc_cluster;
723
724 /* All metadata allocations go through this cluster. */
725 struct btrfs_free_cluster meta_alloc_cluster;
726
727 /* Auto defrag inodes go here. */
728 spinlock_t defrag_inodes_lock;
729 struct rb_root defrag_inodes;
730 atomic_t defrag_running;
731
732 /* Used to protect avail_{data, metadata, system}_alloc_bits */
733 seqlock_t profiles_lock;
734 /*
735 * These three are in extended format (availability of single chunks is
736 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
737 * by corresponding BTRFS_BLOCK_GROUP_* bits)
738 */
739 u64 avail_data_alloc_bits;
740 u64 avail_metadata_alloc_bits;
741 u64 avail_system_alloc_bits;
742
743 /* Balance state */
744 spinlock_t balance_lock;
745 struct mutex balance_mutex;
746 atomic_t balance_pause_req;
747 atomic_t balance_cancel_req;
748 struct btrfs_balance_control *balance_ctl;
749 wait_queue_head_t balance_wait_q;
750
751 /* Cancellation requests for chunk relocation */
752 atomic_t reloc_cancel_req;
753
754 u32 data_chunk_allocations;
755 u32 metadata_ratio;
756
757 /* Private scrub information */
758 struct mutex scrub_lock;
759 atomic_t scrubs_running;
760 atomic_t scrub_pause_req;
761 atomic_t scrubs_paused;
762 atomic_t scrub_cancel_req;
763 wait_queue_head_t scrub_pause_wait;
764 /*
765 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
766 * running.
767 */
768 refcount_t scrub_workers_refcnt;
769 struct workqueue_struct *scrub_workers;
770
771 struct btrfs_discard_ctl discard_ctl;
772
773 /* Is qgroup tracking in a consistent state? */
774 u64 qgroup_flags;
775
776 /* Holds configuration and tracking. Protected by qgroup_lock. */
777 struct rb_root qgroup_tree;
778 spinlock_t qgroup_lock;
779
780 /*
781 * Protect user change for quota operations. If a transaction is needed,
782 * it must be started before locking this lock.
783 */
784 struct mutex qgroup_ioctl_lock;
785
786 /* List of dirty qgroups to be written at next commit. */
787 struct list_head dirty_qgroups;
788
789 /* Used by qgroup for an efficient tree traversal. */
790 u64 qgroup_seq;
791
792 /* Qgroup rescan items. */
793 /* Protects the progress item */
794 struct mutex qgroup_rescan_lock;
795 struct btrfs_key qgroup_rescan_progress;
796 struct btrfs_workqueue *qgroup_rescan_workers;
797 struct completion qgroup_rescan_completion;
798 struct btrfs_work qgroup_rescan_work;
799 /* Protected by qgroup_rescan_lock */
800 bool qgroup_rescan_running;
801 u8 qgroup_drop_subtree_thres;
802 u64 qgroup_enable_gen;
803
804 /*
805 * If this is not 0, then it indicates a serious filesystem error has
806 * happened and it contains that error (negative errno value).
807 */
808 int fs_error;
809
810 /* Filesystem state */
811 unsigned long fs_state;
812
813 struct btrfs_delayed_root *delayed_root;
814
815 /* Entries are eb->start >> nodesize_bits */
816 struct xarray buffer_tree;
817
818 /* Next backup root to be overwritten */
819 int backup_root_index;
820
821 /* Device replace state */
822 struct btrfs_dev_replace dev_replace;
823
824 struct semaphore uuid_tree_rescan_sem;
825
826 /* Used to reclaim the metadata space in the background. */
827 struct work_struct async_reclaim_work;
828 struct work_struct async_data_reclaim_work;
829 struct work_struct preempt_reclaim_work;
830
831 /* Reclaim partially filled block groups in the background */
832 struct work_struct reclaim_bgs_work;
833 /* Protected by unused_bgs_lock. */
834 struct list_head reclaim_bgs;
835 int bg_reclaim_threshold;
836
837 /* Protects the lists unused_bgs and reclaim_bgs. */
838 spinlock_t unused_bgs_lock;
839 /* Protected by unused_bgs_lock. */
840 struct list_head unused_bgs;
841 struct mutex unused_bg_unpin_mutex;
842 /* Protect block groups that are going to be deleted */
843 struct mutex reclaim_bgs_lock;
844
845 /* Cached block sizes */
846 u32 nodesize;
847 u32 nodesize_bits;
848 u32 sectorsize;
849 /* ilog2 of sectorsize, use to avoid 64bit division */
850 u32 sectorsize_bits;
851 u32 block_min_order;
852 u32 block_max_order;
853 u32 csum_size;
854 u32 csums_per_leaf;
855 u32 stripesize;
856
857 /*
858 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
859 * filesystem, on zoned it depends on the device constraints.
860 */
861 u64 max_extent_size;
862
863 /* Block groups and devices containing active swapfiles. */
864 spinlock_t swapfile_pins_lock;
865 struct rb_root swapfile_pins;
866
867 struct crypto_shash *csum_shash;
868
869 /* Type of exclusive operation running, protected by super_lock */
870 enum btrfs_exclusive_operation exclusive_operation;
871
872 /*
873 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
874 * if the mode is enabled
875 */
876 u64 zone_size;
877
878 /* Constraints for ZONE_APPEND commands: */
879 struct queue_limits limits;
880 u64 max_zone_append_size;
881
882 struct mutex zoned_meta_io_lock;
883 spinlock_t treelog_bg_lock;
884 u64 treelog_bg;
885
886 /*
887 * Start of the dedicated data relocation block group, protected by
888 * relocation_bg_lock.
889 */
890 spinlock_t relocation_bg_lock;
891 u64 data_reloc_bg;
892 struct mutex zoned_data_reloc_io_lock;
893
894 struct btrfs_block_group *active_meta_bg;
895 struct btrfs_block_group *active_system_bg;
896
897 u64 nr_global_roots;
898
899 spinlock_t zone_active_bgs_lock;
900 struct list_head zone_active_bgs;
901
902 /* Updates are not protected by any lock */
903 struct btrfs_commit_stats commit_stats;
904
905 /*
906 * Last generation where we dropped a non-relocation root.
907 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
908 * to change it and to read it, respectively.
909 */
910 u64 last_root_drop_gen;
911
912 /*
913 * Annotations for transaction events (structures are empty when
914 * compiled without lockdep).
915 */
916 struct lockdep_map btrfs_trans_num_writers_map;
917 struct lockdep_map btrfs_trans_num_extwriters_map;
918 struct lockdep_map btrfs_state_change_map[4];
919 struct lockdep_map btrfs_trans_pending_ordered_map;
920 struct lockdep_map btrfs_ordered_extent_map;
921
922 #ifdef CONFIG_BTRFS_DEBUG
923 spinlock_t ref_verify_lock;
924 struct rb_root block_tree;
925
926 struct kobject *debug_kobj;
927 struct list_head allocated_roots;
928
929 spinlock_t eb_leak_lock;
930 struct list_head allocated_ebs;
931 #endif
932 };
933
934 #define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \
935 struct folio *: (_folio))->mapping->host))
936
937 #define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info)
938
939 #define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \
940 struct inode *: (_inode)))->root->fs_info)
941
btrfs_alloc_write_mask(struct address_space * mapping)942 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
943 {
944 return mapping_gfp_constraint(mapping, ~__GFP_FS);
945 }
946
947 /* Return the minimal folio size of the fs. */
btrfs_min_folio_size(struct btrfs_fs_info * fs_info)948 static inline unsigned int btrfs_min_folio_size(struct btrfs_fs_info *fs_info)
949 {
950 return 1U << (PAGE_SHIFT + fs_info->block_min_order);
951 }
952
btrfs_get_fs_generation(const struct btrfs_fs_info * fs_info)953 static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
954 {
955 return READ_ONCE(fs_info->generation);
956 }
957
btrfs_set_fs_generation(struct btrfs_fs_info * fs_info,u64 gen)958 static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
959 {
960 WRITE_ONCE(fs_info->generation, gen);
961 }
962
btrfs_get_last_trans_committed(const struct btrfs_fs_info * fs_info)963 static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
964 {
965 return READ_ONCE(fs_info->last_trans_committed);
966 }
967
btrfs_set_last_trans_committed(struct btrfs_fs_info * fs_info,u64 gen)968 static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
969 {
970 WRITE_ONCE(fs_info->last_trans_committed, gen);
971 }
972
btrfs_set_last_root_drop_gen(struct btrfs_fs_info * fs_info,u64 gen)973 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
974 u64 gen)
975 {
976 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
977 }
978
btrfs_get_last_root_drop_gen(const struct btrfs_fs_info * fs_info)979 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
980 {
981 return READ_ONCE(fs_info->last_root_drop_gen);
982 }
983
984 /*
985 * Take the number of bytes to be checksummed and figure out how many leaves
986 * it would require to store the csums for that many bytes.
987 */
btrfs_csum_bytes_to_leaves(const struct btrfs_fs_info * fs_info,u64 csum_bytes)988 static inline u64 btrfs_csum_bytes_to_leaves(
989 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
990 {
991 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
992
993 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
994 }
995
996 /*
997 * Use this if we would be adding new items, as we could split nodes as we cow
998 * down the tree.
999 */
btrfs_calc_insert_metadata_size(const struct btrfs_fs_info * fs_info,unsigned num_items)1000 static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
1001 unsigned num_items)
1002 {
1003 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
1004 }
1005
1006 /*
1007 * Doing a truncate or a modification won't result in new nodes or leaves, just
1008 * what we need for COW.
1009 */
btrfs_calc_metadata_size(const struct btrfs_fs_info * fs_info,unsigned num_items)1010 static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
1011 unsigned num_items)
1012 {
1013 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
1014 }
1015
1016 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
1017 sizeof(struct btrfs_item))
1018
1019 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) ((bytes) >> (fs_info)->sectorsize_bits)
1020
btrfs_is_zoned(const struct btrfs_fs_info * fs_info)1021 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
1022 {
1023 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
1024 }
1025
1026 /*
1027 * Count how many fs_info->max_extent_size cover the @size
1028 */
count_max_extents(const struct btrfs_fs_info * fs_info,u64 size)1029 static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 size)
1030 {
1031 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1032 if (!fs_info)
1033 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
1034 #endif
1035
1036 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
1037 }
1038
btrfs_blocks_per_folio(const struct btrfs_fs_info * fs_info,const struct folio * folio)1039 static inline unsigned int btrfs_blocks_per_folio(const struct btrfs_fs_info *fs_info,
1040 const struct folio *folio)
1041 {
1042 return folio_size(folio) >> fs_info->sectorsize_bits;
1043 }
1044
1045 bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize);
1046 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
1047 enum btrfs_exclusive_operation type);
1048 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
1049 enum btrfs_exclusive_operation type);
1050 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
1051 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
1052 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
1053 enum btrfs_exclusive_operation op);
1054
1055 int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args);
1056
1057 u16 btrfs_csum_type_size(u16 type);
1058 int btrfs_super_csum_size(const struct btrfs_super_block *s);
1059 const char *btrfs_super_csum_name(u16 csum_type);
1060 const char *btrfs_super_csum_driver(u16 csum_type);
1061 size_t __attribute_const__ btrfs_get_num_csums(void);
1062
btrfs_is_empty_uuid(const u8 * uuid)1063 static inline bool btrfs_is_empty_uuid(const u8 *uuid)
1064 {
1065 return uuid_is_null((const uuid_t *)uuid);
1066 }
1067
1068 /* Compatibility and incompatibility defines */
1069 void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
1070 const char *name);
1071 void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
1072 const char *name);
1073 void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
1074 const char *name);
1075 void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
1076 const char *name);
1077
1078 #define __btrfs_fs_incompat(fs_info, flags) \
1079 (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
1080
1081 #define __btrfs_fs_compat_ro(fs_info, flags) \
1082 (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
1083
1084 #define btrfs_set_fs_incompat(__fs_info, opt) \
1085 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
1086
1087 #define btrfs_clear_fs_incompat(__fs_info, opt) \
1088 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
1089
1090 #define btrfs_fs_incompat(fs_info, opt) \
1091 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
1092
1093 #define btrfs_set_fs_compat_ro(__fs_info, opt) \
1094 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
1095
1096 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \
1097 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
1098
1099 #define btrfs_fs_compat_ro(fs_info, opt) \
1100 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
1101
1102 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1103 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
1104 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
1105 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
1106 BTRFS_MOUNT_##opt)
1107
btrfs_fs_closing(const struct btrfs_fs_info * fs_info)1108 static inline int btrfs_fs_closing(const struct btrfs_fs_info *fs_info)
1109 {
1110 /* Do it this way so we only ever do one test_bit in the normal case. */
1111 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
1112 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
1113 return 2;
1114 return 1;
1115 }
1116 return 0;
1117 }
1118
1119 /*
1120 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
1121 * anything except sleeping. This function is used to check the status of
1122 * the fs.
1123 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
1124 * since setting and checking for SB_RDONLY in the superblock's flags is not
1125 * atomic.
1126 */
btrfs_need_cleaner_sleep(const struct btrfs_fs_info * fs_info)1127 static inline int btrfs_need_cleaner_sleep(const struct btrfs_fs_info *fs_info)
1128 {
1129 return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
1130 btrfs_fs_closing(fs_info);
1131 }
1132
btrfs_wake_unfinished_drop(struct btrfs_fs_info * fs_info)1133 static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1134 {
1135 clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
1136 }
1137
1138 #define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error))
1139
1140 #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
1141 (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
1142 &(fs_info)->fs_state)))
1143
btrfs_is_shutdown(struct btrfs_fs_info * fs_info)1144 static inline bool btrfs_is_shutdown(struct btrfs_fs_info *fs_info)
1145 {
1146 return test_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state);
1147 }
1148
btrfs_force_shutdown(struct btrfs_fs_info * fs_info)1149 static inline void btrfs_force_shutdown(struct btrfs_fs_info *fs_info)
1150 {
1151 /*
1152 * Here we do not want to use handle_fs_error(), which will mark the fs
1153 * read-only.
1154 * Some call sites like shutdown ioctl will mark the fs shutdown when
1155 * the fs is frozen. But thaw path will handle RO and RW fs
1156 * differently.
1157 *
1158 * So here we only mark the fs error without flipping it RO.
1159 */
1160 WRITE_ONCE(fs_info->fs_error, -EIO);
1161 if (!test_and_set_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state))
1162 btrfs_crit(fs_info, "emergency shutdown");
1163 }
1164
1165 /*
1166 * We use folio flag owner_2 to indicate there is an ordered extent with
1167 * unfinished IO.
1168 */
1169 #define folio_test_ordered(folio) folio_test_owner_2(folio)
1170 #define folio_set_ordered(folio) folio_set_owner_2(folio)
1171 #define folio_clear_ordered(folio) folio_clear_owner_2(folio)
1172
1173 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1174
1175 #define EXPORT_FOR_TESTS
1176
btrfs_is_testing(const struct btrfs_fs_info * fs_info)1177 static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
1178 {
1179 return unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state));
1180 }
1181
1182 void btrfs_test_destroy_inode(struct inode *inode);
1183
1184 #else
1185
1186 #define EXPORT_FOR_TESTS static
1187
btrfs_is_testing(const struct btrfs_fs_info * fs_info)1188 static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info)
1189 {
1190 return false;
1191 }
1192 #endif
1193
1194 #endif
1195