1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef BTRFS_FS_H 4 #define BTRFS_FS_H 5 6 #include <linux/blkdev.h> 7 #include <linux/sizes.h> 8 #include <linux/time64.h> 9 #include <linux/compiler.h> 10 #include <linux/math.h> 11 #include <linux/atomic.h> 12 #include <linux/percpu_counter.h> 13 #include <linux/completion.h> 14 #include <linux/lockdep.h> 15 #include <linux/spinlock.h> 16 #include <linux/mutex.h> 17 #include <linux/rwsem.h> 18 #include <linux/semaphore.h> 19 #include <linux/list.h> 20 #include <linux/pagemap.h> 21 #include <linux/radix-tree.h> 22 #include <linux/workqueue.h> 23 #include <linux/wait.h> 24 #include <linux/wait_bit.h> 25 #include <linux/sched.h> 26 #include <linux/rbtree.h> 27 #include <uapi/linux/btrfs.h> 28 #include <uapi/linux/btrfs_tree.h> 29 #include "extent-io-tree.h" 30 #include "async-thread.h" 31 #include "block-rsv.h" 32 #include "messages.h" 33 34 struct inode; 35 struct super_block; 36 struct kobject; 37 struct reloc_control; 38 struct crypto_shash; 39 struct ulist; 40 struct btrfs_device; 41 struct btrfs_block_group; 42 struct btrfs_root; 43 struct btrfs_fs_devices; 44 struct btrfs_transaction; 45 struct btrfs_delayed_root; 46 struct btrfs_balance_control; 47 struct btrfs_subpage_info; 48 struct btrfs_stripe_hash_table; 49 struct btrfs_space_info; 50 51 /* 52 * Minimum data and metadata block size. 53 * 54 * Normally it's 4K, but for testing subpage block size on 4K page systems, we 55 * allow DEBUG builds to accept 2K page size. 56 */ 57 #ifdef CONFIG_BTRFS_DEBUG 58 #define BTRFS_MIN_BLOCKSIZE (SZ_2K) 59 #else 60 #define BTRFS_MIN_BLOCKSIZE (SZ_4K) 61 #endif 62 63 #define BTRFS_MAX_BLOCKSIZE (SZ_64K) 64 65 #define BTRFS_MAX_EXTENT_SIZE SZ_128M 66 67 #define BTRFS_OLDEST_GENERATION 0ULL 68 69 #define BTRFS_EMPTY_DIR_SIZE 0 70 71 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M 72 73 #define BTRFS_SUPER_INFO_OFFSET SZ_64K 74 #define BTRFS_SUPER_INFO_SIZE 4096 75 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE); 76 77 /* Array of bytes with variable length, hexadecimal format 0x1234 */ 78 #define BTRFS_CSUM_FMT "0x%*phN" 79 #define BTRFS_CSUM_FMT_VALUE(size, bytes) size, bytes 80 81 #define BTRFS_KEY_FMT "(%llu %u %llu)" 82 #define BTRFS_KEY_FMT_VALUE(key) (key)->objectid, (key)->type, (key)->offset 83 84 /* 85 * Number of metadata items necessary for an unlink operation: 86 * 87 * 1 for the possible orphan item 88 * 1 for the dir item 89 * 1 for the dir index 90 * 1 for the inode ref 91 * 1 for the inode 92 * 1 for the parent inode 93 */ 94 #define BTRFS_UNLINK_METADATA_UNITS 6 95 96 /* 97 * The reserved space at the beginning of each device. It covers the primary 98 * super block and leaves space for potential use by other tools like 99 * bootloaders or to lower potential damage of accidental overwrite. 100 */ 101 #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M) 102 /* 103 * Runtime (in-memory) states of filesystem 104 */ 105 enum { 106 /* 107 * Filesystem is being remounted, allow to skip some operations, like 108 * defrag 109 */ 110 BTRFS_FS_STATE_REMOUNTING, 111 /* Filesystem in RO mode */ 112 BTRFS_FS_STATE_RO, 113 /* Track if a transaction abort has been reported on this filesystem */ 114 BTRFS_FS_STATE_TRANS_ABORTED, 115 /* Track if log replay has failed. */ 116 BTRFS_FS_STATE_LOG_REPLAY_ABORTED, 117 /* 118 * Bio operations should be blocked on this filesystem because a source 119 * or target device is being destroyed as part of a device replace 120 */ 121 BTRFS_FS_STATE_DEV_REPLACING, 122 /* The btrfs_fs_info created for self-tests */ 123 BTRFS_FS_STATE_DUMMY_FS_INFO, 124 125 /* Checksum errors are ignored. */ 126 BTRFS_FS_STATE_NO_DATA_CSUMS, 127 BTRFS_FS_STATE_SKIP_META_CSUMS, 128 129 /* Indicates there was an error cleaning up a log tree. */ 130 BTRFS_FS_STATE_LOG_CLEANUP_ERROR, 131 132 /* No more delayed iput can be queued. */ 133 BTRFS_FS_STATE_NO_DELAYED_IPUT, 134 135 /* 136 * Emergency shutdown, a step further than transaction aborted by 137 * rejecting all operations. 138 */ 139 BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, 140 141 BTRFS_FS_STATE_COUNT 142 }; 143 144 enum { 145 BTRFS_FS_CLOSING_START, 146 BTRFS_FS_CLOSING_DONE, 147 BTRFS_FS_LOG_RECOVERING, 148 BTRFS_FS_OPEN, 149 BTRFS_FS_QUOTA_ENABLED, 150 BTRFS_FS_UPDATE_UUID_TREE_GEN, 151 BTRFS_FS_CREATING_FREE_SPACE_TREE, 152 BTRFS_FS_BTREE_ERR, 153 BTRFS_FS_LOG1_ERR, 154 BTRFS_FS_LOG2_ERR, 155 BTRFS_FS_QUOTA_OVERRIDE, 156 /* Used to record internally whether fs has been frozen */ 157 BTRFS_FS_FROZEN, 158 /* 159 * Indicate that balance has been set up from the ioctl and is in the 160 * main phase. The fs_info::balance_ctl is initialized. 161 */ 162 BTRFS_FS_BALANCE_RUNNING, 163 164 /* 165 * Indicate that relocation of a chunk has started, it's set per chunk 166 * and is toggled between chunks. 167 */ 168 BTRFS_FS_RELOC_RUNNING, 169 170 /* Indicate that the cleaner thread is awake and doing something. */ 171 BTRFS_FS_CLEANER_RUNNING, 172 173 /* 174 * The checksumming has an optimized version and is considered fast, 175 * so we don't need to offload checksums to workqueues. 176 */ 177 BTRFS_FS_CSUM_IMPL_FAST, 178 179 /* Indicate that the discard workqueue can service discards. */ 180 BTRFS_FS_DISCARD_RUNNING, 181 182 /* Indicate that we need to cleanup space cache v1 */ 183 BTRFS_FS_CLEANUP_SPACE_CACHE_V1, 184 185 /* Indicate that we can't trust the free space tree for caching yet */ 186 BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, 187 188 /* Indicate whether there are any tree modification log users */ 189 BTRFS_FS_TREE_MOD_LOG_USERS, 190 191 /* Indicate that we want the transaction kthread to commit right now. */ 192 BTRFS_FS_COMMIT_TRANS, 193 194 /* Indicate we have half completed snapshot deletions pending. */ 195 BTRFS_FS_UNFINISHED_DROPS, 196 197 /* Indicate we have to finish a zone to do next allocation. */ 198 BTRFS_FS_NEED_ZONE_FINISH, 199 200 /* Indicate that we want to commit the transaction. */ 201 BTRFS_FS_NEED_TRANS_COMMIT, 202 203 /* This is set when active zone tracking is needed. */ 204 BTRFS_FS_ACTIVE_ZONE_TRACKING, 205 206 /* 207 * Indicate if we have some features changed, this is mostly for 208 * cleaner thread to update the sysfs interface. 209 */ 210 BTRFS_FS_FEATURE_CHANGED, 211 212 /* 213 * Indicate that we have found a tree block which is only aligned to 214 * sectorsize, but not to nodesize. This should be rare nowadays. 215 */ 216 BTRFS_FS_UNALIGNED_TREE_BLOCK, 217 218 #if BITS_PER_LONG == 32 219 /* Indicate if we have error/warn message printed on 32bit systems */ 220 BTRFS_FS_32BIT_ERROR, 221 BTRFS_FS_32BIT_WARN, 222 #endif 223 }; 224 225 /* 226 * Flags for mount options. 227 * 228 * Note: don't forget to add new options to btrfs_show_options() 229 */ 230 enum { 231 BTRFS_MOUNT_NODATASUM = (1ULL << 0), 232 BTRFS_MOUNT_NODATACOW = (1ULL << 1), 233 BTRFS_MOUNT_NOBARRIER = (1ULL << 2), 234 BTRFS_MOUNT_SSD = (1ULL << 3), 235 BTRFS_MOUNT_DEGRADED = (1ULL << 4), 236 BTRFS_MOUNT_COMPRESS = (1ULL << 5), 237 BTRFS_MOUNT_NOTREELOG = (1ULL << 6), 238 BTRFS_MOUNT_FLUSHONCOMMIT = (1ULL << 7), 239 BTRFS_MOUNT_SSD_SPREAD = (1ULL << 8), 240 BTRFS_MOUNT_NOSSD = (1ULL << 9), 241 BTRFS_MOUNT_DISCARD_SYNC = (1ULL << 10), 242 BTRFS_MOUNT_FORCE_COMPRESS = (1ULL << 11), 243 BTRFS_MOUNT_SPACE_CACHE = (1ULL << 12), 244 BTRFS_MOUNT_CLEAR_CACHE = (1ULL << 13), 245 BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1ULL << 14), 246 BTRFS_MOUNT_ENOSPC_DEBUG = (1ULL << 15), 247 BTRFS_MOUNT_AUTO_DEFRAG = (1ULL << 16), 248 BTRFS_MOUNT_USEBACKUPROOT = (1ULL << 17), 249 BTRFS_MOUNT_SKIP_BALANCE = (1ULL << 18), 250 BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1ULL << 19), 251 BTRFS_MOUNT_RESCAN_UUID_TREE = (1ULL << 20), 252 BTRFS_MOUNT_FRAGMENT_DATA = (1ULL << 21), 253 BTRFS_MOUNT_FRAGMENT_METADATA = (1ULL << 22), 254 BTRFS_MOUNT_FREE_SPACE_TREE = (1ULL << 23), 255 BTRFS_MOUNT_NOLOGREPLAY = (1ULL << 24), 256 BTRFS_MOUNT_REF_VERIFY = (1ULL << 25), 257 BTRFS_MOUNT_DISCARD_ASYNC = (1ULL << 26), 258 BTRFS_MOUNT_IGNOREBADROOTS = (1ULL << 27), 259 BTRFS_MOUNT_IGNOREDATACSUMS = (1ULL << 28), 260 BTRFS_MOUNT_NODISCARD = (1ULL << 29), 261 BTRFS_MOUNT_NOSPACECACHE = (1ULL << 30), 262 BTRFS_MOUNT_IGNOREMETACSUMS = (1ULL << 31), 263 BTRFS_MOUNT_IGNORESUPERFLAGS = (1ULL << 32), 264 BTRFS_MOUNT_REF_TRACKER = (1ULL << 33), 265 }; 266 267 /* 268 * Compat flags that we support. If any incompat flags are set other than the 269 * ones specified below then we will fail to mount 270 */ 271 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL 272 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL 273 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL 274 275 #define BTRFS_FEATURE_COMPAT_RO_SUPP \ 276 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \ 277 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \ 278 BTRFS_FEATURE_COMPAT_RO_VERITY | \ 279 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE) 280 281 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL 282 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL 283 284 #define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \ 285 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 286 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 287 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 288 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ 289 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ 290 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \ 291 BTRFS_FEATURE_INCOMPAT_RAID56 | \ 292 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ 293 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ 294 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ 295 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ 296 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \ 297 BTRFS_FEATURE_INCOMPAT_ZONED | \ 298 BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA) 299 300 #ifdef CONFIG_BTRFS_EXPERIMENTAL 301 /* 302 * Features under development like Extent tree v2 support is enabled 303 * only under CONFIG_BTRFS_EXPERIMENTAL 304 */ 305 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 306 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \ 307 BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \ 308 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2) 309 310 #else 311 312 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 313 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE) 314 315 #endif 316 317 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ 318 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 319 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL 320 321 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 322 #define BTRFS_WARNING_COMMIT_INTERVAL (300) 323 #define BTRFS_DEFAULT_MAX_INLINE (2048) 324 325 enum btrfs_compression_type { 326 BTRFS_COMPRESS_NONE = 0, 327 BTRFS_COMPRESS_ZLIB = 1, 328 BTRFS_COMPRESS_LZO = 2, 329 BTRFS_COMPRESS_ZSTD = 3, 330 BTRFS_NR_COMPRESS_TYPES = 4, 331 332 BTRFS_DEFRAG_DONT_COMPRESS, 333 }; 334 335 struct btrfs_dev_replace { 336 /* See #define above */ 337 u64 replace_state; 338 /* Seconds since 1-Jan-1970 */ 339 time64_t time_started; 340 /* Seconds since 1-Jan-1970 */ 341 time64_t time_stopped; 342 atomic64_t num_write_errors; 343 atomic64_t num_uncorrectable_read_errors; 344 345 u64 cursor_left; 346 u64 committed_cursor_left; 347 u64 cursor_left_last_write_of_item; 348 u64 cursor_right; 349 350 /* See #define above */ 351 u64 cont_reading_from_srcdev_mode; 352 353 int is_valid; 354 int item_needs_writeback; 355 struct btrfs_device *srcdev; 356 struct btrfs_device *tgtdev; 357 358 struct mutex lock_finishing_cancel_unmount; 359 struct rw_semaphore rwsem; 360 361 struct btrfs_scrub_progress scrub_progress; 362 363 struct percpu_counter bio_counter; 364 wait_queue_head_t replace_wait; 365 366 struct task_struct *replace_task; 367 }; 368 369 /* 370 * Free clusters are used to claim free space in relatively large chunks, 371 * allowing us to do less seeky writes. They are used for all metadata 372 * allocations. In ssd_spread mode they are also used for data allocations. 373 */ 374 struct btrfs_free_cluster { 375 spinlock_t lock; 376 spinlock_t refill_lock; 377 struct rb_root root; 378 379 /* Largest extent in this cluster */ 380 u64 max_size; 381 382 /* First extent starting offset */ 383 u64 window_start; 384 385 /* We did a full search and couldn't create a cluster */ 386 bool fragmented; 387 388 struct btrfs_block_group *block_group; 389 /* 390 * When a cluster is allocated from a block group, we put the cluster 391 * onto a list in the block group so that it can be freed before the 392 * block group is freed. 393 */ 394 struct list_head block_group_list; 395 }; 396 397 /* Discard control. */ 398 /* 399 * Async discard uses multiple lists to differentiate the discard filter 400 * parameters. Index 0 is for completely free block groups where we need to 401 * ensure the entire block group is trimmed without being lossy. Indices 402 * afterwards represent monotonically decreasing discard filter sizes to 403 * prioritize what should be discarded next. 404 */ 405 #define BTRFS_NR_DISCARD_LISTS 3 406 #define BTRFS_DISCARD_INDEX_UNUSED 0 407 #define BTRFS_DISCARD_INDEX_START 1 408 409 struct btrfs_discard_ctl { 410 struct workqueue_struct *discard_workers; 411 struct delayed_work work; 412 spinlock_t lock; 413 struct btrfs_block_group *block_group; 414 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; 415 u64 prev_discard; 416 u64 prev_discard_time; 417 atomic_t discardable_extents; 418 atomic64_t discardable_bytes; 419 u64 max_discard_size; 420 u64 delay_ms; 421 u32 iops_limit; 422 u32 kbps_limit; 423 u64 discard_extent_bytes; 424 u64 discard_bitmap_bytes; 425 atomic64_t discard_bytes_saved; 426 }; 427 428 /* 429 * Exclusive operations (device replace, resize, device add/remove, balance) 430 */ 431 enum btrfs_exclusive_operation { 432 BTRFS_EXCLOP_NONE, 433 BTRFS_EXCLOP_BALANCE_PAUSED, 434 BTRFS_EXCLOP_BALANCE, 435 BTRFS_EXCLOP_DEV_ADD, 436 BTRFS_EXCLOP_DEV_REMOVE, 437 BTRFS_EXCLOP_DEV_REPLACE, 438 BTRFS_EXCLOP_RESIZE, 439 BTRFS_EXCLOP_SWAP_ACTIVATE, 440 }; 441 442 /* Store data about transaction commits, exported via sysfs. */ 443 struct btrfs_commit_stats { 444 /* Total number of commits */ 445 u64 commit_count; 446 /* The maximum commit duration so far in ns */ 447 u64 max_commit_dur; 448 /* The last commit duration in ns */ 449 u64 last_commit_dur; 450 /* The total commit duration in ns */ 451 u64 total_commit_dur; 452 /* Start of the last critical section in ns. */ 453 u64 critical_section_start_time; 454 }; 455 456 struct btrfs_fs_info { 457 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 458 unsigned long flags; 459 struct btrfs_root *tree_root; 460 struct btrfs_root *chunk_root; 461 struct btrfs_root *dev_root; 462 struct btrfs_root *fs_root; 463 struct btrfs_root *quota_root; 464 struct btrfs_root *uuid_root; 465 struct btrfs_root *data_reloc_root; 466 struct btrfs_root *block_group_root; 467 struct btrfs_root *stripe_root; 468 469 /* The log root tree is a directory of all the other log roots */ 470 struct btrfs_root *log_root_tree; 471 472 /* The tree that holds the global roots (csum, extent, etc) */ 473 rwlock_t global_root_lock; 474 struct rb_root global_root_tree; 475 476 spinlock_t fs_roots_radix_lock; 477 struct radix_tree_root fs_roots_radix; 478 479 /* Block group cache stuff */ 480 rwlock_t block_group_cache_lock; 481 struct rb_root_cached block_group_cache_tree; 482 483 /* Keep track of unallocated space */ 484 atomic64_t free_chunk_space; 485 486 /* Track ranges which are used by log trees blocks/logged data extents */ 487 struct extent_io_tree excluded_extents; 488 489 /* logical->physical extent mapping */ 490 struct rb_root_cached mapping_tree; 491 rwlock_t mapping_tree_lock; 492 493 /* 494 * Block reservation for extent, checksum, root tree and delayed dir 495 * index item. 496 */ 497 struct btrfs_block_rsv global_block_rsv; 498 /* Block reservation for metadata operations */ 499 struct btrfs_block_rsv trans_block_rsv; 500 /* Block reservation for chunk tree */ 501 struct btrfs_block_rsv chunk_block_rsv; 502 /* Block reservation for delayed operations */ 503 struct btrfs_block_rsv delayed_block_rsv; 504 /* Block reservation for delayed refs */ 505 struct btrfs_block_rsv delayed_refs_rsv; 506 /* Block reservation for treelog tree */ 507 struct btrfs_block_rsv treelog_rsv; 508 509 struct btrfs_block_rsv empty_block_rsv; 510 511 /* 512 * Updated while holding the lock 'trans_lock'. Due to the life cycle of 513 * a transaction, it can be directly read while holding a transaction 514 * handle, everywhere else must be read with btrfs_get_fs_generation(). 515 * Should always be updated using btrfs_set_fs_generation(). 516 */ 517 u64 generation; 518 /* 519 * Always use btrfs_get_last_trans_committed() and 520 * btrfs_set_last_trans_committed() to read and update this field. 521 */ 522 u64 last_trans_committed; 523 /* 524 * Generation of the last transaction used for block group relocation 525 * since the filesystem was last mounted (or 0 if none happened yet). 526 * Must be written and read while holding btrfs_fs_info::commit_root_sem. 527 */ 528 u64 last_reloc_trans; 529 530 /* 531 * This is updated to the current trans every time a full commit is 532 * required instead of the faster short fsync log commits 533 */ 534 u64 last_trans_log_full_commit; 535 unsigned long long mount_opt; 536 537 /* Compress related structures. */ 538 void *compr_wsm[BTRFS_NR_COMPRESS_TYPES]; 539 540 int compress_type; 541 int compress_level; 542 u32 commit_interval; 543 /* 544 * It is a suggestive number, the read side is safe even it gets a 545 * wrong number because we will write out the data into a regular 546 * extent. The write side(mount/remount) is under ->s_umount lock, 547 * so it is also safe. 548 */ 549 u64 max_inline; 550 551 struct btrfs_transaction *running_transaction; 552 wait_queue_head_t transaction_throttle; 553 wait_queue_head_t transaction_wait; 554 wait_queue_head_t transaction_blocked_wait; 555 wait_queue_head_t async_submit_wait; 556 557 /* 558 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 559 * when they are updated. 560 * 561 * Because we do not clear the flags for ever, so we needn't use 562 * the lock on the read side. 563 * 564 * We also needn't use the lock when we mount the fs, because 565 * there is no other task which will update the flag. 566 */ 567 spinlock_t super_lock; 568 struct btrfs_super_block *super_copy; 569 struct btrfs_super_block *super_for_commit; 570 struct super_block *sb; 571 struct inode *btree_inode; 572 struct mutex tree_log_mutex; 573 struct mutex transaction_kthread_mutex; 574 struct mutex cleaner_mutex; 575 struct mutex chunk_mutex; 576 577 /* 578 * This is taken to make sure we don't set block groups ro after the 579 * free space cache has been allocated on them. 580 */ 581 struct mutex ro_block_group_mutex; 582 583 /* 584 * This is used during read/modify/write to make sure no two ios are 585 * trying to mod the same stripe at the same time. 586 */ 587 struct btrfs_stripe_hash_table *stripe_hash_table; 588 589 /* 590 * This protects the ordered operations list only while we are 591 * processing all of the entries on it. This way we make sure the 592 * commit code doesn't find the list temporarily empty because another 593 * function happens to be doing non-waiting preflush before jumping 594 * into the main commit. 595 */ 596 struct mutex ordered_operations_mutex; 597 598 struct rw_semaphore commit_root_sem; 599 600 struct rw_semaphore cleanup_work_sem; 601 602 struct rw_semaphore subvol_sem; 603 604 spinlock_t trans_lock; 605 /* 606 * The reloc mutex goes with the trans lock, it is taken during commit 607 * to protect us from the relocation code. 608 */ 609 struct mutex reloc_mutex; 610 611 struct list_head trans_list; 612 struct list_head dead_roots; 613 struct list_head caching_block_groups; 614 615 spinlock_t delayed_iput_lock; 616 struct list_head delayed_iputs; 617 atomic_t nr_delayed_iputs; 618 wait_queue_head_t delayed_iputs_wait; 619 620 atomic64_t tree_mod_seq; 621 622 /* This protects tree_mod_log and tree_mod_seq_list */ 623 rwlock_t tree_mod_log_lock; 624 struct rb_root tree_mod_log; 625 struct list_head tree_mod_seq_list; 626 627 atomic_t async_delalloc_pages; 628 629 /* This is used to protect the following list -- ordered_roots. */ 630 spinlock_t ordered_root_lock; 631 632 /* 633 * All fs/file tree roots in which there are data=ordered extents 634 * pending writeback are added into this list. 635 * 636 * These can span multiple transactions and basically include every 637 * dirty data page that isn't from nodatacow. 638 */ 639 struct list_head ordered_roots; 640 641 struct mutex delalloc_root_mutex; 642 spinlock_t delalloc_root_lock; 643 /* All fs/file tree roots that have delalloc inodes. */ 644 struct list_head delalloc_roots; 645 646 /* 647 * There is a pool of worker threads for checksumming during writes and 648 * a pool for checksumming after reads. This is because readers can 649 * run with FS locks held, and the writers may be waiting for those 650 * locks. We don't want ordering in the pending list to cause 651 * deadlocks, and so the two are serviced separately. 652 * 653 * A third pool does submit_bio to avoid deadlocking with the other two. 654 */ 655 struct btrfs_workqueue *workers; 656 struct btrfs_workqueue *delalloc_workers; 657 struct btrfs_workqueue *flush_workers; 658 struct workqueue_struct *endio_workers; 659 struct workqueue_struct *endio_meta_workers; 660 struct workqueue_struct *rmw_workers; 661 struct btrfs_workqueue *endio_write_workers; 662 struct btrfs_workqueue *endio_freespace_worker; 663 struct btrfs_workqueue *caching_workers; 664 665 /* 666 * Fixup workers take dirty pages that didn't properly go through the 667 * cow mechanism and make them safe to write. It happens for the 668 * sys_munmap function call path. 669 */ 670 struct btrfs_workqueue *fixup_workers; 671 struct btrfs_workqueue *delayed_workers; 672 673 struct task_struct *transaction_kthread; 674 struct task_struct *cleaner_kthread; 675 u32 thread_pool_size; 676 677 struct kobject *space_info_kobj; 678 struct kobject *qgroups_kobj; 679 struct kobject *discard_kobj; 680 681 /* Track the number of blocks (sectors) read by the filesystem. */ 682 struct percpu_counter stats_read_blocks; 683 684 /* Used to keep from writing metadata until there is a nice batch */ 685 struct percpu_counter dirty_metadata_bytes; 686 struct percpu_counter delalloc_bytes; 687 struct percpu_counter ordered_bytes; 688 s32 dirty_metadata_batch; 689 s32 delalloc_batch; 690 691 struct percpu_counter evictable_extent_maps; 692 u64 em_shrinker_last_root; 693 u64 em_shrinker_last_ino; 694 atomic64_t em_shrinker_nr_to_scan; 695 struct work_struct em_shrinker_work; 696 697 /* Protected by 'trans_lock'. */ 698 struct list_head dirty_cowonly_roots; 699 700 struct btrfs_fs_devices *fs_devices; 701 702 /* 703 * The space_info list is effectively read only after initial setup. 704 * It is populated at mount time and cleaned up after all block groups 705 * are removed. RCU is used to protect it. 706 */ 707 struct list_head space_info; 708 709 struct btrfs_space_info *data_sinfo; 710 711 struct reloc_control *reloc_ctl; 712 713 /* data_alloc_cluster is only used in ssd_spread mode */ 714 struct btrfs_free_cluster data_alloc_cluster; 715 716 /* All metadata allocations go through this cluster. */ 717 struct btrfs_free_cluster meta_alloc_cluster; 718 719 /* Auto defrag inodes go here. */ 720 spinlock_t defrag_inodes_lock; 721 struct rb_root defrag_inodes; 722 atomic_t defrag_running; 723 724 /* Used to protect avail_{data, metadata, system}_alloc_bits */ 725 seqlock_t profiles_lock; 726 /* 727 * These three are in extended format (availability of single chunks is 728 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted 729 * by corresponding BTRFS_BLOCK_GROUP_* bits) 730 */ 731 u64 avail_data_alloc_bits; 732 u64 avail_metadata_alloc_bits; 733 u64 avail_system_alloc_bits; 734 735 /* Balance state */ 736 spinlock_t balance_lock; 737 struct mutex balance_mutex; 738 atomic_t balance_pause_req; 739 atomic_t balance_cancel_req; 740 struct btrfs_balance_control *balance_ctl; 741 wait_queue_head_t balance_wait_q; 742 743 /* Cancellation requests for chunk relocation */ 744 atomic_t reloc_cancel_req; 745 746 u32 data_chunk_allocations; 747 u32 metadata_ratio; 748 749 /* Private scrub information */ 750 struct mutex scrub_lock; 751 atomic_t scrubs_running; 752 atomic_t scrub_pause_req; 753 atomic_t scrubs_paused; 754 atomic_t scrub_cancel_req; 755 wait_queue_head_t scrub_pause_wait; 756 /* 757 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not 758 * running. 759 */ 760 refcount_t scrub_workers_refcnt; 761 struct workqueue_struct *scrub_workers; 762 763 struct btrfs_discard_ctl discard_ctl; 764 765 /* Is qgroup tracking in a consistent state? */ 766 u64 qgroup_flags; 767 768 /* Holds configuration and tracking. Protected by qgroup_lock. */ 769 struct rb_root qgroup_tree; 770 spinlock_t qgroup_lock; 771 772 /* 773 * Protect user change for quota operations. If a transaction is needed, 774 * it must be started before locking this lock. 775 */ 776 struct mutex qgroup_ioctl_lock; 777 778 /* List of dirty qgroups to be written at next commit. */ 779 struct list_head dirty_qgroups; 780 781 /* Used by qgroup for an efficient tree traversal. */ 782 u64 qgroup_seq; 783 784 /* Qgroup rescan items. */ 785 /* Protects the progress item */ 786 struct mutex qgroup_rescan_lock; 787 struct btrfs_key qgroup_rescan_progress; 788 struct btrfs_workqueue *qgroup_rescan_workers; 789 struct completion qgroup_rescan_completion; 790 struct btrfs_work qgroup_rescan_work; 791 /* Protected by qgroup_rescan_lock */ 792 bool qgroup_rescan_running; 793 u8 qgroup_drop_subtree_thres; 794 u64 qgroup_enable_gen; 795 796 /* 797 * If this is not 0, then it indicates a serious filesystem error has 798 * happened and it contains that error (negative errno value). 799 */ 800 int fs_error; 801 802 /* Filesystem state */ 803 unsigned long fs_state; 804 805 struct btrfs_delayed_root *delayed_root; 806 807 /* Entries are eb->start >> nodesize_bits */ 808 struct xarray buffer_tree; 809 810 /* Next backup root to be overwritten */ 811 int backup_root_index; 812 813 /* Device replace state */ 814 struct btrfs_dev_replace dev_replace; 815 816 struct semaphore uuid_tree_rescan_sem; 817 818 /* Used to reclaim the metadata space in the background. */ 819 struct work_struct async_reclaim_work; 820 struct work_struct async_data_reclaim_work; 821 struct work_struct preempt_reclaim_work; 822 823 /* Reclaim partially filled block groups in the background */ 824 struct work_struct reclaim_bgs_work; 825 /* Protected by unused_bgs_lock. */ 826 struct list_head reclaim_bgs; 827 int bg_reclaim_threshold; 828 829 /* Protects the lists unused_bgs and reclaim_bgs. */ 830 spinlock_t unused_bgs_lock; 831 /* Protected by unused_bgs_lock. */ 832 struct list_head unused_bgs; 833 struct mutex unused_bg_unpin_mutex; 834 /* Protect block groups that are going to be deleted */ 835 struct mutex reclaim_bgs_lock; 836 837 /* Cached block sizes */ 838 u32 nodesize; 839 u32 nodesize_bits; 840 u32 sectorsize; 841 /* ilog2 of sectorsize, use to avoid 64bit division */ 842 u32 sectorsize_bits; 843 u32 block_min_order; 844 u32 block_max_order; 845 u32 csum_size; 846 u32 csums_per_leaf; 847 u32 stripesize; 848 849 /* 850 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular 851 * filesystem, on zoned it depends on the device constraints. 852 */ 853 u64 max_extent_size; 854 855 /* Block groups and devices containing active swapfiles. */ 856 spinlock_t swapfile_pins_lock; 857 struct rb_root swapfile_pins; 858 859 struct crypto_shash *csum_shash; 860 861 /* Type of exclusive operation running, protected by super_lock */ 862 enum btrfs_exclusive_operation exclusive_operation; 863 864 /* 865 * Zone size > 0 when in ZONED mode, otherwise it's used for a check 866 * if the mode is enabled 867 */ 868 u64 zone_size; 869 870 /* Constraints for ZONE_APPEND commands: */ 871 struct queue_limits limits; 872 u64 max_zone_append_size; 873 874 struct mutex zoned_meta_io_lock; 875 spinlock_t treelog_bg_lock; 876 u64 treelog_bg; 877 878 /* 879 * Start of the dedicated data relocation block group, protected by 880 * relocation_bg_lock. 881 */ 882 spinlock_t relocation_bg_lock; 883 u64 data_reloc_bg; 884 struct mutex zoned_data_reloc_io_lock; 885 886 struct btrfs_block_group *active_meta_bg; 887 struct btrfs_block_group *active_system_bg; 888 889 u64 nr_global_roots; 890 891 spinlock_t zone_active_bgs_lock; 892 struct list_head zone_active_bgs; 893 894 /* Updates are not protected by any lock */ 895 struct btrfs_commit_stats commit_stats; 896 897 /* 898 * Last generation where we dropped a non-relocation root. 899 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen() 900 * to change it and to read it, respectively. 901 */ 902 u64 last_root_drop_gen; 903 904 /* 905 * Annotations for transaction events (structures are empty when 906 * compiled without lockdep). 907 */ 908 struct lockdep_map btrfs_trans_num_writers_map; 909 struct lockdep_map btrfs_trans_num_extwriters_map; 910 struct lockdep_map btrfs_state_change_map[4]; 911 struct lockdep_map btrfs_trans_pending_ordered_map; 912 struct lockdep_map btrfs_ordered_extent_map; 913 914 #ifdef CONFIG_BTRFS_DEBUG 915 spinlock_t ref_verify_lock; 916 struct rb_root block_tree; 917 918 struct kobject *debug_kobj; 919 struct list_head allocated_roots; 920 921 spinlock_t eb_leak_lock; 922 struct list_head allocated_ebs; 923 #endif 924 }; 925 926 #define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \ 927 struct folio *: (_folio))->mapping->host)) 928 929 #define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info) 930 931 #define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \ 932 struct inode *: (_inode)))->root->fs_info) 933 934 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) 935 { 936 return mapping_gfp_constraint(mapping, ~__GFP_FS); 937 } 938 939 /* Return the minimal folio size of the fs. */ 940 static inline unsigned int btrfs_min_folio_size(struct btrfs_fs_info *fs_info) 941 { 942 return 1U << (PAGE_SHIFT + fs_info->block_min_order); 943 } 944 945 static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info) 946 { 947 return READ_ONCE(fs_info->generation); 948 } 949 950 static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen) 951 { 952 WRITE_ONCE(fs_info->generation, gen); 953 } 954 955 static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info) 956 { 957 return READ_ONCE(fs_info->last_trans_committed); 958 } 959 960 static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen) 961 { 962 WRITE_ONCE(fs_info->last_trans_committed, gen); 963 } 964 965 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info, 966 u64 gen) 967 { 968 WRITE_ONCE(fs_info->last_root_drop_gen, gen); 969 } 970 971 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info) 972 { 973 return READ_ONCE(fs_info->last_root_drop_gen); 974 } 975 976 /* 977 * Take the number of bytes to be checksummed and figure out how many leaves 978 * it would require to store the csums for that many bytes. 979 */ 980 static inline u64 btrfs_csum_bytes_to_leaves( 981 const struct btrfs_fs_info *fs_info, u64 csum_bytes) 982 { 983 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits; 984 985 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf); 986 } 987 988 /* 989 * Use this if we would be adding new items, as we could split nodes as we cow 990 * down the tree. 991 */ 992 static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info, 993 unsigned num_items) 994 { 995 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 996 } 997 998 /* 999 * Doing a truncate or a modification won't result in new nodes or leaves, just 1000 * what we need for COW. 1001 */ 1002 static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info, 1003 unsigned num_items) 1004 { 1005 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; 1006 } 1007 1008 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \ 1009 sizeof(struct btrfs_item)) 1010 1011 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) ((bytes) >> (fs_info)->sectorsize_bits) 1012 1013 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info) 1014 { 1015 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0; 1016 } 1017 1018 /* 1019 * Count how many fs_info->max_extent_size cover the @size 1020 */ 1021 static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 size) 1022 { 1023 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1024 if (!fs_info) 1025 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); 1026 #endif 1027 1028 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size); 1029 } 1030 1031 static inline unsigned int btrfs_blocks_per_folio(const struct btrfs_fs_info *fs_info, 1032 const struct folio *folio) 1033 { 1034 return folio_size(folio) >> fs_info->sectorsize_bits; 1035 } 1036 1037 bool __attribute_const__ btrfs_supported_blocksize(u32 blocksize); 1038 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info, 1039 enum btrfs_exclusive_operation type); 1040 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info, 1041 enum btrfs_exclusive_operation type); 1042 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info); 1043 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info); 1044 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info, 1045 enum btrfs_exclusive_operation op); 1046 1047 int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args); 1048 1049 u16 btrfs_csum_type_size(u16 type); 1050 int btrfs_super_csum_size(const struct btrfs_super_block *s); 1051 const char *btrfs_super_csum_name(u16 csum_type); 1052 const char *btrfs_super_csum_driver(u16 csum_type); 1053 size_t __attribute_const__ btrfs_get_num_csums(void); 1054 1055 static inline bool btrfs_is_empty_uuid(const u8 *uuid) 1056 { 1057 return uuid_is_null((const uuid_t *)uuid); 1058 } 1059 1060 /* Compatibility and incompatibility defines */ 1061 void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag, 1062 const char *name); 1063 void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag, 1064 const char *name); 1065 void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag, 1066 const char *name); 1067 void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag, 1068 const char *name); 1069 1070 #define __btrfs_fs_incompat(fs_info, flags) \ 1071 (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags))) 1072 1073 #define __btrfs_fs_compat_ro(fs_info, flags) \ 1074 (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags))) 1075 1076 #define btrfs_set_fs_incompat(__fs_info, opt) \ 1077 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt) 1078 1079 #define btrfs_clear_fs_incompat(__fs_info, opt) \ 1080 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt) 1081 1082 #define btrfs_fs_incompat(fs_info, opt) \ 1083 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 1084 1085 #define btrfs_set_fs_compat_ro(__fs_info, opt) \ 1086 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt) 1087 1088 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ 1089 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt) 1090 1091 #define btrfs_fs_compat_ro(fs_info, opt) \ 1092 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) 1093 1094 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1095 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1096 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 1097 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \ 1098 BTRFS_MOUNT_##opt) 1099 1100 static inline int btrfs_fs_closing(const struct btrfs_fs_info *fs_info) 1101 { 1102 /* Do it this way so we only ever do one test_bit in the normal case. */ 1103 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) { 1104 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)) 1105 return 2; 1106 return 1; 1107 } 1108 return 0; 1109 } 1110 1111 /* 1112 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 1113 * anything except sleeping. This function is used to check the status of 1114 * the fs. 1115 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount, 1116 * since setting and checking for SB_RDONLY in the superblock's flags is not 1117 * atomic. 1118 */ 1119 static inline int btrfs_need_cleaner_sleep(const struct btrfs_fs_info *fs_info) 1120 { 1121 return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) || 1122 btrfs_fs_closing(fs_info); 1123 } 1124 1125 static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info) 1126 { 1127 clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags); 1128 } 1129 1130 #define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error)) 1131 1132 #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \ 1133 (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \ 1134 &(fs_info)->fs_state))) 1135 1136 static inline bool btrfs_is_shutdown(struct btrfs_fs_info *fs_info) 1137 { 1138 return test_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state); 1139 } 1140 1141 static inline void btrfs_force_shutdown(struct btrfs_fs_info *fs_info) 1142 { 1143 /* 1144 * Here we do not want to use handle_fs_error(), which will mark the fs 1145 * read-only. 1146 * Some call sites like shutdown ioctl will mark the fs shutdown when 1147 * the fs is frozen. But thaw path will handle RO and RW fs 1148 * differently. 1149 * 1150 * So here we only mark the fs error without flipping it RO. 1151 */ 1152 WRITE_ONCE(fs_info->fs_error, -EIO); 1153 if (!test_and_set_bit(BTRFS_FS_STATE_EMERGENCY_SHUTDOWN, &fs_info->fs_state)) 1154 btrfs_crit(fs_info, "emergency shutdown"); 1155 } 1156 1157 /* 1158 * We use folio flag owner_2 to indicate there is an ordered extent with 1159 * unfinished IO. 1160 */ 1161 #define folio_test_ordered(folio) folio_test_owner_2(folio) 1162 #define folio_set_ordered(folio) folio_set_owner_2(folio) 1163 #define folio_clear_ordered(folio) folio_clear_owner_2(folio) 1164 1165 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1166 1167 #define EXPORT_FOR_TESTS 1168 1169 static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info) 1170 { 1171 return unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state)); 1172 } 1173 1174 void btrfs_test_destroy_inode(struct inode *inode); 1175 1176 #else 1177 1178 #define EXPORT_FOR_TESTS static 1179 1180 static inline bool btrfs_is_testing(const struct btrfs_fs_info *fs_info) 1181 { 1182 return false; 1183 } 1184 #endif 1185 1186 #endif 1187