1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #ifndef __BTRFS_CTREE__ 20 #define __BTRFS_CTREE__ 21 22 #include <linux/mm.h> 23 #include <linux/highmem.h> 24 #include <linux/fs.h> 25 #include <linux/rwsem.h> 26 #include <linux/semaphore.h> 27 #include <linux/completion.h> 28 #include <linux/backing-dev.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/kobject.h> 32 #include <trace/events/btrfs.h> 33 #include <asm/kmap_types.h> 34 #include <linux/pagemap.h> 35 #include <linux/btrfs.h> 36 #include <linux/workqueue.h> 37 #include <linux/security.h> 38 #include "extent_io.h" 39 #include "extent_map.h" 40 #include "async-thread.h" 41 42 struct btrfs_trans_handle; 43 struct btrfs_transaction; 44 struct btrfs_pending_snapshot; 45 extern struct kmem_cache *btrfs_trans_handle_cachep; 46 extern struct kmem_cache *btrfs_transaction_cachep; 47 extern struct kmem_cache *btrfs_bit_radix_cachep; 48 extern struct kmem_cache *btrfs_path_cachep; 49 extern struct kmem_cache *btrfs_free_space_cachep; 50 struct btrfs_ordered_sum; 51 52 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 53 #define STATIC noinline 54 #else 55 #define STATIC static noinline 56 #endif 57 58 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ 59 60 #define BTRFS_MAX_MIRRORS 3 61 62 #define BTRFS_MAX_LEVEL 8 63 64 #define BTRFS_COMPAT_EXTENT_TREE_V0 65 66 /* holds pointers to all of the tree roots */ 67 #define BTRFS_ROOT_TREE_OBJECTID 1ULL 68 69 /* stores information about which extents are in use, and reference counts */ 70 #define BTRFS_EXTENT_TREE_OBJECTID 2ULL 71 72 /* 73 * chunk tree stores translations from logical -> physical block numbering 74 * the super block points to the chunk tree 75 */ 76 #define BTRFS_CHUNK_TREE_OBJECTID 3ULL 77 78 /* 79 * stores information about which areas of a given device are in use. 80 * one per device. The tree of tree roots points to the device tree 81 */ 82 #define BTRFS_DEV_TREE_OBJECTID 4ULL 83 84 /* one per subvolume, storing files and directories */ 85 #define BTRFS_FS_TREE_OBJECTID 5ULL 86 87 /* directory objectid inside the root tree */ 88 #define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL 89 90 /* holds checksums of all the data extents */ 91 #define BTRFS_CSUM_TREE_OBJECTID 7ULL 92 93 /* holds quota configuration and tracking */ 94 #define BTRFS_QUOTA_TREE_OBJECTID 8ULL 95 96 /* for storing items that use the BTRFS_UUID_KEY* types */ 97 #define BTRFS_UUID_TREE_OBJECTID 9ULL 98 99 /* for storing balance parameters in the root tree */ 100 #define BTRFS_BALANCE_OBJECTID -4ULL 101 102 /* orhpan objectid for tracking unlinked/truncated files */ 103 #define BTRFS_ORPHAN_OBJECTID -5ULL 104 105 /* does write ahead logging to speed up fsyncs */ 106 #define BTRFS_TREE_LOG_OBJECTID -6ULL 107 #define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL 108 109 /* for space balancing */ 110 #define BTRFS_TREE_RELOC_OBJECTID -8ULL 111 #define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL 112 113 /* 114 * extent checksums all have this objectid 115 * this allows them to share the logging tree 116 * for fsyncs 117 */ 118 #define BTRFS_EXTENT_CSUM_OBJECTID -10ULL 119 120 /* For storing free space cache */ 121 #define BTRFS_FREE_SPACE_OBJECTID -11ULL 122 123 /* 124 * The inode number assigned to the special inode for storing 125 * free ino cache 126 */ 127 #define BTRFS_FREE_INO_OBJECTID -12ULL 128 129 /* dummy objectid represents multiple objectids */ 130 #define BTRFS_MULTIPLE_OBJECTIDS -255ULL 131 132 /* 133 * All files have objectids in this range. 134 */ 135 #define BTRFS_FIRST_FREE_OBJECTID 256ULL 136 #define BTRFS_LAST_FREE_OBJECTID -256ULL 137 #define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL 138 139 140 /* 141 * the device items go into the chunk tree. The key is in the form 142 * [ 1 BTRFS_DEV_ITEM_KEY device_id ] 143 */ 144 #define BTRFS_DEV_ITEMS_OBJECTID 1ULL 145 146 #define BTRFS_BTREE_INODE_OBJECTID 1 147 148 #define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2 149 150 #define BTRFS_DEV_REPLACE_DEVID 0ULL 151 152 /* 153 * the max metadata block size. This limit is somewhat artificial, 154 * but the memmove costs go through the roof for larger blocks. 155 */ 156 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 157 158 /* 159 * we can actually store much bigger names, but lets not confuse the rest 160 * of linux 161 */ 162 #define BTRFS_NAME_LEN 255 163 164 /* 165 * Theoretical limit is larger, but we keep this down to a sane 166 * value. That should limit greatly the possibility of collisions on 167 * inode ref items. 168 */ 169 #define BTRFS_LINK_MAX 65535U 170 171 /* 32 bytes in various csum fields */ 172 #define BTRFS_CSUM_SIZE 32 173 174 /* csum types */ 175 #define BTRFS_CSUM_TYPE_CRC32 0 176 177 static int btrfs_csum_sizes[] = { 4, 0 }; 178 179 /* four bytes for CRC32 */ 180 #define BTRFS_EMPTY_DIR_SIZE 0 181 182 /* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 183 #define REQ_GET_READ_MIRRORS (1 << 30) 184 185 #define BTRFS_FT_UNKNOWN 0 186 #define BTRFS_FT_REG_FILE 1 187 #define BTRFS_FT_DIR 2 188 #define BTRFS_FT_CHRDEV 3 189 #define BTRFS_FT_BLKDEV 4 190 #define BTRFS_FT_FIFO 5 191 #define BTRFS_FT_SOCK 6 192 #define BTRFS_FT_SYMLINK 7 193 #define BTRFS_FT_XATTR 8 194 #define BTRFS_FT_MAX 9 195 196 /* ioprio of readahead is set to idle */ 197 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) 198 199 #define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) 200 201 #define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024) 202 203 /* 204 * The key defines the order in the tree, and so it also defines (optimal) 205 * block layout. 206 * 207 * objectid corresponds to the inode number. 208 * 209 * type tells us things about the object, and is a kind of stream selector. 210 * so for a given inode, keys with type of 1 might refer to the inode data, 211 * type of 2 may point to file data in the btree and type == 3 may point to 212 * extents. 213 * 214 * offset is the starting byte offset for this key in the stream. 215 * 216 * btrfs_disk_key is in disk byte order. struct btrfs_key is always 217 * in cpu native order. Otherwise they are identical and their sizes 218 * should be the same (ie both packed) 219 */ 220 struct btrfs_disk_key { 221 __le64 objectid; 222 u8 type; 223 __le64 offset; 224 } __attribute__ ((__packed__)); 225 226 struct btrfs_key { 227 u64 objectid; 228 u8 type; 229 u64 offset; 230 } __attribute__ ((__packed__)); 231 232 struct btrfs_mapping_tree { 233 struct extent_map_tree map_tree; 234 }; 235 236 struct btrfs_dev_item { 237 /* the internal btrfs device id */ 238 __le64 devid; 239 240 /* size of the device */ 241 __le64 total_bytes; 242 243 /* bytes used */ 244 __le64 bytes_used; 245 246 /* optimal io alignment for this device */ 247 __le32 io_align; 248 249 /* optimal io width for this device */ 250 __le32 io_width; 251 252 /* minimal io size for this device */ 253 __le32 sector_size; 254 255 /* type and info about this device */ 256 __le64 type; 257 258 /* expected generation for this device */ 259 __le64 generation; 260 261 /* 262 * starting byte of this partition on the device, 263 * to allow for stripe alignment in the future 264 */ 265 __le64 start_offset; 266 267 /* grouping information for allocation decisions */ 268 __le32 dev_group; 269 270 /* seek speed 0-100 where 100 is fastest */ 271 u8 seek_speed; 272 273 /* bandwidth 0-100 where 100 is fastest */ 274 u8 bandwidth; 275 276 /* btrfs generated uuid for this device */ 277 u8 uuid[BTRFS_UUID_SIZE]; 278 279 /* uuid of FS who owns this device */ 280 u8 fsid[BTRFS_UUID_SIZE]; 281 } __attribute__ ((__packed__)); 282 283 struct btrfs_stripe { 284 __le64 devid; 285 __le64 offset; 286 u8 dev_uuid[BTRFS_UUID_SIZE]; 287 } __attribute__ ((__packed__)); 288 289 struct btrfs_chunk { 290 /* size of this chunk in bytes */ 291 __le64 length; 292 293 /* objectid of the root referencing this chunk */ 294 __le64 owner; 295 296 __le64 stripe_len; 297 __le64 type; 298 299 /* optimal io alignment for this chunk */ 300 __le32 io_align; 301 302 /* optimal io width for this chunk */ 303 __le32 io_width; 304 305 /* minimal io size for this chunk */ 306 __le32 sector_size; 307 308 /* 2^16 stripes is quite a lot, a second limit is the size of a single 309 * item in the btree 310 */ 311 __le16 num_stripes; 312 313 /* sub stripes only matter for raid10 */ 314 __le16 sub_stripes; 315 struct btrfs_stripe stripe; 316 /* additional stripes go here */ 317 } __attribute__ ((__packed__)); 318 319 #define BTRFS_FREE_SPACE_EXTENT 1 320 #define BTRFS_FREE_SPACE_BITMAP 2 321 322 struct btrfs_free_space_entry { 323 __le64 offset; 324 __le64 bytes; 325 u8 type; 326 } __attribute__ ((__packed__)); 327 328 struct btrfs_free_space_header { 329 struct btrfs_disk_key location; 330 __le64 generation; 331 __le64 num_entries; 332 __le64 num_bitmaps; 333 } __attribute__ ((__packed__)); 334 335 static inline unsigned long btrfs_chunk_item_size(int num_stripes) 336 { 337 BUG_ON(num_stripes == 0); 338 return sizeof(struct btrfs_chunk) + 339 sizeof(struct btrfs_stripe) * (num_stripes - 1); 340 } 341 342 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) 343 #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) 344 345 /* 346 * File system states 347 */ 348 #define BTRFS_FS_STATE_ERROR 0 349 #define BTRFS_FS_STATE_REMOUNTING 1 350 #define BTRFS_FS_STATE_TRANS_ABORTED 2 351 #define BTRFS_FS_STATE_DEV_REPLACING 3 352 353 /* Super block flags */ 354 /* Errors detected */ 355 #define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) 356 357 #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) 358 #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) 359 360 #define BTRFS_BACKREF_REV_MAX 256 361 #define BTRFS_BACKREF_REV_SHIFT 56 362 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ 363 BTRFS_BACKREF_REV_SHIFT) 364 365 #define BTRFS_OLD_BACKREF_REV 0 366 #define BTRFS_MIXED_BACKREF_REV 1 367 368 /* 369 * every tree block (leaf or node) starts with this header. 370 */ 371 struct btrfs_header { 372 /* these first four must match the super block */ 373 u8 csum[BTRFS_CSUM_SIZE]; 374 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 375 __le64 bytenr; /* which block this node is supposed to live in */ 376 __le64 flags; 377 378 /* allowed to be different from the super from here on down */ 379 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 380 __le64 generation; 381 __le64 owner; 382 __le32 nritems; 383 u8 level; 384 } __attribute__ ((__packed__)); 385 386 #define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \ 387 sizeof(struct btrfs_header)) / \ 388 sizeof(struct btrfs_key_ptr)) 389 #define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header)) 390 #define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->nodesize)) 391 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ 392 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) 393 #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 394 sizeof(struct btrfs_item) - \ 395 BTRFS_FILE_EXTENT_INLINE_DATA_START) 396 #define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 397 sizeof(struct btrfs_item) -\ 398 sizeof(struct btrfs_dir_item)) 399 400 401 /* 402 * this is a very generous portion of the super block, giving us 403 * room to translate 14 chunks with 3 stripes each. 404 */ 405 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 406 #define BTRFS_LABEL_SIZE 256 407 408 /* 409 * just in case we somehow lose the roots and are not able to mount, 410 * we store an array of the roots from previous transactions 411 * in the super. 412 */ 413 #define BTRFS_NUM_BACKUP_ROOTS 4 414 struct btrfs_root_backup { 415 __le64 tree_root; 416 __le64 tree_root_gen; 417 418 __le64 chunk_root; 419 __le64 chunk_root_gen; 420 421 __le64 extent_root; 422 __le64 extent_root_gen; 423 424 __le64 fs_root; 425 __le64 fs_root_gen; 426 427 __le64 dev_root; 428 __le64 dev_root_gen; 429 430 __le64 csum_root; 431 __le64 csum_root_gen; 432 433 __le64 total_bytes; 434 __le64 bytes_used; 435 __le64 num_devices; 436 /* future */ 437 __le64 unused_64[4]; 438 439 u8 tree_root_level; 440 u8 chunk_root_level; 441 u8 extent_root_level; 442 u8 fs_root_level; 443 u8 dev_root_level; 444 u8 csum_root_level; 445 /* future and to align */ 446 u8 unused_8[10]; 447 } __attribute__ ((__packed__)); 448 449 /* 450 * the super block basically lists the main trees of the FS 451 * it currently lacks any block count etc etc 452 */ 453 struct btrfs_super_block { 454 u8 csum[BTRFS_CSUM_SIZE]; 455 /* the first 4 fields must match struct btrfs_header */ 456 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 457 __le64 bytenr; /* this block number */ 458 __le64 flags; 459 460 /* allowed to be different from the btrfs_header from here own down */ 461 __le64 magic; 462 __le64 generation; 463 __le64 root; 464 __le64 chunk_root; 465 __le64 log_root; 466 467 /* this will help find the new super based on the log root */ 468 __le64 log_root_transid; 469 __le64 total_bytes; 470 __le64 bytes_used; 471 __le64 root_dir_objectid; 472 __le64 num_devices; 473 __le32 sectorsize; 474 __le32 nodesize; 475 __le32 __unused_leafsize; 476 __le32 stripesize; 477 __le32 sys_chunk_array_size; 478 __le64 chunk_root_generation; 479 __le64 compat_flags; 480 __le64 compat_ro_flags; 481 __le64 incompat_flags; 482 __le16 csum_type; 483 u8 root_level; 484 u8 chunk_root_level; 485 u8 log_root_level; 486 struct btrfs_dev_item dev_item; 487 488 char label[BTRFS_LABEL_SIZE]; 489 490 __le64 cache_generation; 491 __le64 uuid_tree_generation; 492 493 /* future expansion */ 494 __le64 reserved[30]; 495 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; 496 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; 497 } __attribute__ ((__packed__)); 498 499 /* 500 * Compat flags that we support. If any incompat flags are set other than the 501 * ones specified below then we will fail to mount 502 */ 503 #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) 504 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) 505 #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) 506 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) 507 /* 508 * some patches floated around with a second compression method 509 * lets save that incompat here for when they do get in 510 * Note we don't actually support it, we're just reserving the 511 * number 512 */ 513 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) 514 515 /* 516 * older kernels tried to do bigger metadata blocks, but the 517 * code was pretty buggy. Lets not let them try anymore. 518 */ 519 #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) 520 521 #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) 522 #define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7) 523 #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) 524 #define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) 525 526 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL 527 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL 528 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL 529 #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL 530 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL 531 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL 532 533 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 534 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 535 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 536 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 537 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ 538 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ 539 BTRFS_FEATURE_INCOMPAT_RAID56 | \ 540 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ 541 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ 542 BTRFS_FEATURE_INCOMPAT_NO_HOLES) 543 544 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ 545 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 546 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL 547 548 /* 549 * A leaf is full of items. offset and size tell us where to find 550 * the item in the leaf (relative to the start of the data area) 551 */ 552 struct btrfs_item { 553 struct btrfs_disk_key key; 554 __le32 offset; 555 __le32 size; 556 } __attribute__ ((__packed__)); 557 558 /* 559 * leaves have an item area and a data area: 560 * [item0, item1....itemN] [free space] [dataN...data1, data0] 561 * 562 * The data is separate from the items to get the keys closer together 563 * during searches. 564 */ 565 struct btrfs_leaf { 566 struct btrfs_header header; 567 struct btrfs_item items[]; 568 } __attribute__ ((__packed__)); 569 570 /* 571 * all non-leaf blocks are nodes, they hold only keys and pointers to 572 * other blocks 573 */ 574 struct btrfs_key_ptr { 575 struct btrfs_disk_key key; 576 __le64 blockptr; 577 __le64 generation; 578 } __attribute__ ((__packed__)); 579 580 struct btrfs_node { 581 struct btrfs_header header; 582 struct btrfs_key_ptr ptrs[]; 583 } __attribute__ ((__packed__)); 584 585 /* 586 * btrfs_paths remember the path taken from the root down to the leaf. 587 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 588 * to any other levels that are present. 589 * 590 * The slots array records the index of the item or block pointer 591 * used while walking the tree. 592 */ 593 struct btrfs_path { 594 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 595 int slots[BTRFS_MAX_LEVEL]; 596 /* if there is real range locking, this locks field will change */ 597 int locks[BTRFS_MAX_LEVEL]; 598 int reada; 599 /* keep some upper locks as we walk down */ 600 int lowest_level; 601 602 /* 603 * set by btrfs_split_item, tells search_slot to keep all locks 604 * and to force calls to keep space in the nodes 605 */ 606 unsigned int search_for_split:1; 607 unsigned int keep_locks:1; 608 unsigned int skip_locking:1; 609 unsigned int leave_spinning:1; 610 unsigned int search_commit_root:1; 611 unsigned int need_commit_sem:1; 612 unsigned int skip_release_on_error:1; 613 }; 614 615 /* 616 * items in the extent btree are used to record the objectid of the 617 * owner of the block and the number of references 618 */ 619 620 struct btrfs_extent_item { 621 __le64 refs; 622 __le64 generation; 623 __le64 flags; 624 } __attribute__ ((__packed__)); 625 626 struct btrfs_extent_item_v0 { 627 __le32 refs; 628 } __attribute__ ((__packed__)); 629 630 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \ 631 sizeof(struct btrfs_item)) 632 633 #define BTRFS_EXTENT_FLAG_DATA (1ULL << 0) 634 #define BTRFS_EXTENT_FLAG_TREE_BLOCK (1ULL << 1) 635 636 /* following flags only apply to tree blocks */ 637 638 /* use full backrefs for extent pointers in the block */ 639 #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) 640 641 /* 642 * this flag is only used internally by scrub and may be changed at any time 643 * it is only declared here to avoid collisions 644 */ 645 #define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) 646 647 struct btrfs_tree_block_info { 648 struct btrfs_disk_key key; 649 u8 level; 650 } __attribute__ ((__packed__)); 651 652 struct btrfs_extent_data_ref { 653 __le64 root; 654 __le64 objectid; 655 __le64 offset; 656 __le32 count; 657 } __attribute__ ((__packed__)); 658 659 struct btrfs_shared_data_ref { 660 __le32 count; 661 } __attribute__ ((__packed__)); 662 663 struct btrfs_extent_inline_ref { 664 u8 type; 665 __le64 offset; 666 } __attribute__ ((__packed__)); 667 668 /* old style backrefs item */ 669 struct btrfs_extent_ref_v0 { 670 __le64 root; 671 __le64 generation; 672 __le64 objectid; 673 __le32 count; 674 } __attribute__ ((__packed__)); 675 676 677 /* dev extents record free space on individual devices. The owner 678 * field points back to the chunk allocation mapping tree that allocated 679 * the extent. The chunk tree uuid field is a way to double check the owner 680 */ 681 struct btrfs_dev_extent { 682 __le64 chunk_tree; 683 __le64 chunk_objectid; 684 __le64 chunk_offset; 685 __le64 length; 686 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 687 } __attribute__ ((__packed__)); 688 689 struct btrfs_inode_ref { 690 __le64 index; 691 __le16 name_len; 692 /* name goes here */ 693 } __attribute__ ((__packed__)); 694 695 struct btrfs_inode_extref { 696 __le64 parent_objectid; 697 __le64 index; 698 __le16 name_len; 699 __u8 name[0]; 700 /* name goes here */ 701 } __attribute__ ((__packed__)); 702 703 struct btrfs_timespec { 704 __le64 sec; 705 __le32 nsec; 706 } __attribute__ ((__packed__)); 707 708 enum btrfs_compression_type { 709 BTRFS_COMPRESS_NONE = 0, 710 BTRFS_COMPRESS_ZLIB = 1, 711 BTRFS_COMPRESS_LZO = 2, 712 BTRFS_COMPRESS_TYPES = 2, 713 BTRFS_COMPRESS_LAST = 3, 714 }; 715 716 struct btrfs_inode_item { 717 /* nfs style generation number */ 718 __le64 generation; 719 /* transid that last touched this inode */ 720 __le64 transid; 721 __le64 size; 722 __le64 nbytes; 723 __le64 block_group; 724 __le32 nlink; 725 __le32 uid; 726 __le32 gid; 727 __le32 mode; 728 __le64 rdev; 729 __le64 flags; 730 731 /* modification sequence number for NFS */ 732 __le64 sequence; 733 734 /* 735 * a little future expansion, for more than this we can 736 * just grow the inode item and version it 737 */ 738 __le64 reserved[4]; 739 struct btrfs_timespec atime; 740 struct btrfs_timespec ctime; 741 struct btrfs_timespec mtime; 742 struct btrfs_timespec otime; 743 } __attribute__ ((__packed__)); 744 745 struct btrfs_dir_log_item { 746 __le64 end; 747 } __attribute__ ((__packed__)); 748 749 struct btrfs_dir_item { 750 struct btrfs_disk_key location; 751 __le64 transid; 752 __le16 data_len; 753 __le16 name_len; 754 u8 type; 755 } __attribute__ ((__packed__)); 756 757 #define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) 758 759 /* 760 * Internal in-memory flag that a subvolume has been marked for deletion but 761 * still visible as a directory 762 */ 763 #define BTRFS_ROOT_SUBVOL_DEAD (1ULL << 48) 764 765 struct btrfs_root_item { 766 struct btrfs_inode_item inode; 767 __le64 generation; 768 __le64 root_dirid; 769 __le64 bytenr; 770 __le64 byte_limit; 771 __le64 bytes_used; 772 __le64 last_snapshot; 773 __le64 flags; 774 __le32 refs; 775 struct btrfs_disk_key drop_progress; 776 u8 drop_level; 777 u8 level; 778 779 /* 780 * The following fields appear after subvol_uuids+subvol_times 781 * were introduced. 782 */ 783 784 /* 785 * This generation number is used to test if the new fields are valid 786 * and up to date while reading the root item. Everytime the root item 787 * is written out, the "generation" field is copied into this field. If 788 * anyone ever mounted the fs with an older kernel, we will have 789 * mismatching generation values here and thus must invalidate the 790 * new fields. See btrfs_update_root and btrfs_find_last_root for 791 * details. 792 * the offset of generation_v2 is also used as the start for the memset 793 * when invalidating the fields. 794 */ 795 __le64 generation_v2; 796 u8 uuid[BTRFS_UUID_SIZE]; 797 u8 parent_uuid[BTRFS_UUID_SIZE]; 798 u8 received_uuid[BTRFS_UUID_SIZE]; 799 __le64 ctransid; /* updated when an inode changes */ 800 __le64 otransid; /* trans when created */ 801 __le64 stransid; /* trans when sent. non-zero for received subvol */ 802 __le64 rtransid; /* trans when received. non-zero for received subvol */ 803 struct btrfs_timespec ctime; 804 struct btrfs_timespec otime; 805 struct btrfs_timespec stime; 806 struct btrfs_timespec rtime; 807 __le64 reserved[8]; /* for future */ 808 } __attribute__ ((__packed__)); 809 810 /* 811 * this is used for both forward and backward root refs 812 */ 813 struct btrfs_root_ref { 814 __le64 dirid; 815 __le64 sequence; 816 __le16 name_len; 817 } __attribute__ ((__packed__)); 818 819 struct btrfs_disk_balance_args { 820 /* 821 * profiles to operate on, single is denoted by 822 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 823 */ 824 __le64 profiles; 825 826 /* usage filter */ 827 __le64 usage; 828 829 /* devid filter */ 830 __le64 devid; 831 832 /* devid subset filter [pstart..pend) */ 833 __le64 pstart; 834 __le64 pend; 835 836 /* btrfs virtual address space subset filter [vstart..vend) */ 837 __le64 vstart; 838 __le64 vend; 839 840 /* 841 * profile to convert to, single is denoted by 842 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 843 */ 844 __le64 target; 845 846 /* BTRFS_BALANCE_ARGS_* */ 847 __le64 flags; 848 849 /* BTRFS_BALANCE_ARGS_LIMIT value */ 850 __le64 limit; 851 852 __le64 unused[7]; 853 } __attribute__ ((__packed__)); 854 855 /* 856 * store balance parameters to disk so that balance can be properly 857 * resumed after crash or unmount 858 */ 859 struct btrfs_balance_item { 860 /* BTRFS_BALANCE_* */ 861 __le64 flags; 862 863 struct btrfs_disk_balance_args data; 864 struct btrfs_disk_balance_args meta; 865 struct btrfs_disk_balance_args sys; 866 867 __le64 unused[4]; 868 } __attribute__ ((__packed__)); 869 870 #define BTRFS_FILE_EXTENT_INLINE 0 871 #define BTRFS_FILE_EXTENT_REG 1 872 #define BTRFS_FILE_EXTENT_PREALLOC 2 873 874 struct btrfs_file_extent_item { 875 /* 876 * transaction id that created this extent 877 */ 878 __le64 generation; 879 /* 880 * max number of bytes to hold this extent in ram 881 * when we split a compressed extent we can't know how big 882 * each of the resulting pieces will be. So, this is 883 * an upper limit on the size of the extent in ram instead of 884 * an exact limit. 885 */ 886 __le64 ram_bytes; 887 888 /* 889 * 32 bits for the various ways we might encode the data, 890 * including compression and encryption. If any of these 891 * are set to something a given disk format doesn't understand 892 * it is treated like an incompat flag for reading and writing, 893 * but not for stat. 894 */ 895 u8 compression; 896 u8 encryption; 897 __le16 other_encoding; /* spare for later use */ 898 899 /* are we inline data or a real extent? */ 900 u8 type; 901 902 /* 903 * disk space consumed by the extent, checksum blocks are included 904 * in these numbers 905 * 906 * At this offset in the structure, the inline extent data start. 907 */ 908 __le64 disk_bytenr; 909 __le64 disk_num_bytes; 910 /* 911 * the logical offset in file blocks (no csums) 912 * this extent record is for. This allows a file extent to point 913 * into the middle of an existing extent on disk, sharing it 914 * between two snapshots (useful if some bytes in the middle of the 915 * extent have changed 916 */ 917 __le64 offset; 918 /* 919 * the logical number of file blocks (no csums included). This 920 * always reflects the size uncompressed and without encoding. 921 */ 922 __le64 num_bytes; 923 924 } __attribute__ ((__packed__)); 925 926 struct btrfs_csum_item { 927 u8 csum; 928 } __attribute__ ((__packed__)); 929 930 struct btrfs_dev_stats_item { 931 /* 932 * grow this item struct at the end for future enhancements and keep 933 * the existing values unchanged 934 */ 935 __le64 values[BTRFS_DEV_STAT_VALUES_MAX]; 936 } __attribute__ ((__packed__)); 937 938 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 939 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 940 #define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 941 #define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 942 #define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 943 #define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 944 #define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 945 946 struct btrfs_dev_replace { 947 u64 replace_state; /* see #define above */ 948 u64 time_started; /* seconds since 1-Jan-1970 */ 949 u64 time_stopped; /* seconds since 1-Jan-1970 */ 950 atomic64_t num_write_errors; 951 atomic64_t num_uncorrectable_read_errors; 952 953 u64 cursor_left; 954 u64 committed_cursor_left; 955 u64 cursor_left_last_write_of_item; 956 u64 cursor_right; 957 958 u64 cont_reading_from_srcdev_mode; /* see #define above */ 959 960 int is_valid; 961 int item_needs_writeback; 962 struct btrfs_device *srcdev; 963 struct btrfs_device *tgtdev; 964 965 pid_t lock_owner; 966 atomic_t nesting_level; 967 struct mutex lock_finishing_cancel_unmount; 968 struct mutex lock_management_lock; 969 struct mutex lock; 970 971 struct btrfs_scrub_progress scrub_progress; 972 }; 973 974 struct btrfs_dev_replace_item { 975 /* 976 * grow this item struct at the end for future enhancements and keep 977 * the existing values unchanged 978 */ 979 __le64 src_devid; 980 __le64 cursor_left; 981 __le64 cursor_right; 982 __le64 cont_reading_from_srcdev_mode; 983 984 __le64 replace_state; 985 __le64 time_started; 986 __le64 time_stopped; 987 __le64 num_write_errors; 988 __le64 num_uncorrectable_read_errors; 989 } __attribute__ ((__packed__)); 990 991 /* different types of block groups (and chunks) */ 992 #define BTRFS_BLOCK_GROUP_DATA (1ULL << 0) 993 #define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1) 994 #define BTRFS_BLOCK_GROUP_METADATA (1ULL << 2) 995 #define BTRFS_BLOCK_GROUP_RAID0 (1ULL << 3) 996 #define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4) 997 #define BTRFS_BLOCK_GROUP_DUP (1ULL << 5) 998 #define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) 999 #define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) 1000 #define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) 1001 #define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ 1002 BTRFS_SPACE_INFO_GLOBAL_RSV) 1003 1004 enum btrfs_raid_types { 1005 BTRFS_RAID_RAID10, 1006 BTRFS_RAID_RAID1, 1007 BTRFS_RAID_DUP, 1008 BTRFS_RAID_RAID0, 1009 BTRFS_RAID_SINGLE, 1010 BTRFS_RAID_RAID5, 1011 BTRFS_RAID_RAID6, 1012 BTRFS_NR_RAID_TYPES 1013 }; 1014 1015 #define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \ 1016 BTRFS_BLOCK_GROUP_SYSTEM | \ 1017 BTRFS_BLOCK_GROUP_METADATA) 1018 1019 #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 1020 BTRFS_BLOCK_GROUP_RAID1 | \ 1021 BTRFS_BLOCK_GROUP_RAID5 | \ 1022 BTRFS_BLOCK_GROUP_RAID6 | \ 1023 BTRFS_BLOCK_GROUP_DUP | \ 1024 BTRFS_BLOCK_GROUP_RAID10) 1025 #define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ 1026 BTRFS_BLOCK_GROUP_RAID6) 1027 1028 /* 1029 * We need a bit for restriper to be able to tell when chunks of type 1030 * SINGLE are available. This "extended" profile format is used in 1031 * fs_info->avail_*_alloc_bits (in-memory) and balance item fields 1032 * (on-disk). The corresponding on-disk bit in chunk.type is reserved 1033 * to avoid remappings between two formats in future. 1034 */ 1035 #define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) 1036 1037 /* 1038 * A fake block group type that is used to communicate global block reserve 1039 * size to userspace via the SPACE_INFO ioctl. 1040 */ 1041 #define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49) 1042 1043 #define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \ 1044 BTRFS_AVAIL_ALLOC_BIT_SINGLE) 1045 1046 static inline u64 chunk_to_extended(u64 flags) 1047 { 1048 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0) 1049 flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1050 1051 return flags; 1052 } 1053 static inline u64 extended_to_chunk(u64 flags) 1054 { 1055 return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1056 } 1057 1058 struct btrfs_block_group_item { 1059 __le64 used; 1060 __le64 chunk_objectid; 1061 __le64 flags; 1062 } __attribute__ ((__packed__)); 1063 1064 /* 1065 * is subvolume quota turned on? 1066 */ 1067 #define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0) 1068 /* 1069 * RESCAN is set during the initialization phase 1070 */ 1071 #define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1) 1072 /* 1073 * Some qgroup entries are known to be out of date, 1074 * either because the configuration has changed in a way that 1075 * makes a rescan necessary, or because the fs has been mounted 1076 * with a non-qgroup-aware version. 1077 * Turning qouta off and on again makes it inconsistent, too. 1078 */ 1079 #define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) 1080 1081 #define BTRFS_QGROUP_STATUS_VERSION 1 1082 1083 struct btrfs_qgroup_status_item { 1084 __le64 version; 1085 /* 1086 * the generation is updated during every commit. As older 1087 * versions of btrfs are not aware of qgroups, it will be 1088 * possible to detect inconsistencies by checking the 1089 * generation on mount time 1090 */ 1091 __le64 generation; 1092 1093 /* flag definitions see above */ 1094 __le64 flags; 1095 1096 /* 1097 * only used during scanning to record the progress 1098 * of the scan. It contains a logical address 1099 */ 1100 __le64 rescan; 1101 } __attribute__ ((__packed__)); 1102 1103 struct btrfs_qgroup_info_item { 1104 __le64 generation; 1105 __le64 rfer; 1106 __le64 rfer_cmpr; 1107 __le64 excl; 1108 __le64 excl_cmpr; 1109 } __attribute__ ((__packed__)); 1110 1111 /* flags definition for qgroup limits */ 1112 #define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0) 1113 #define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1) 1114 #define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2) 1115 #define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3) 1116 #define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4) 1117 #define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5) 1118 1119 struct btrfs_qgroup_limit_item { 1120 /* 1121 * only updated when any of the other values change 1122 */ 1123 __le64 flags; 1124 __le64 max_rfer; 1125 __le64 max_excl; 1126 __le64 rsv_rfer; 1127 __le64 rsv_excl; 1128 } __attribute__ ((__packed__)); 1129 1130 /* For raid type sysfs entries */ 1131 struct raid_kobject { 1132 int raid_type; 1133 struct kobject kobj; 1134 }; 1135 1136 struct btrfs_space_info { 1137 spinlock_t lock; 1138 1139 u64 total_bytes; /* total bytes in the space, 1140 this doesn't take mirrors into account */ 1141 u64 bytes_used; /* total bytes used, 1142 this doesn't take mirrors into account */ 1143 u64 bytes_pinned; /* total bytes pinned, will be freed when the 1144 transaction finishes */ 1145 u64 bytes_reserved; /* total bytes the allocator has reserved for 1146 current allocations */ 1147 u64 bytes_may_use; /* number of bytes that may be used for 1148 delalloc/allocations */ 1149 u64 bytes_readonly; /* total bytes that are read only */ 1150 1151 unsigned int full:1; /* indicates that we cannot allocate any more 1152 chunks for this space */ 1153 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 1154 1155 unsigned int flush:1; /* set if we are trying to make space */ 1156 1157 unsigned int force_alloc; /* set if we need to force a chunk 1158 alloc for this space */ 1159 1160 u64 disk_used; /* total bytes used on disk */ 1161 u64 disk_total; /* total bytes on disk, takes mirrors into 1162 account */ 1163 1164 u64 flags; 1165 1166 /* 1167 * bytes_pinned is kept in line with what is actually pinned, as in 1168 * we've called update_block_group and dropped the bytes_used counter 1169 * and increased the bytes_pinned counter. However this means that 1170 * bytes_pinned does not reflect the bytes that will be pinned once the 1171 * delayed refs are flushed, so this counter is inc'ed everytime we call 1172 * btrfs_free_extent so it is a realtime count of what will be freed 1173 * once the transaction is committed. It will be zero'ed everytime the 1174 * transaction commits. 1175 */ 1176 struct percpu_counter total_bytes_pinned; 1177 1178 struct list_head list; 1179 /* Protected by the spinlock 'lock'. */ 1180 struct list_head ro_bgs; 1181 1182 struct rw_semaphore groups_sem; 1183 /* for block groups in our same type */ 1184 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 1185 wait_queue_head_t wait; 1186 1187 struct kobject kobj; 1188 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 1189 }; 1190 1191 #define BTRFS_BLOCK_RSV_GLOBAL 1 1192 #define BTRFS_BLOCK_RSV_DELALLOC 2 1193 #define BTRFS_BLOCK_RSV_TRANS 3 1194 #define BTRFS_BLOCK_RSV_CHUNK 4 1195 #define BTRFS_BLOCK_RSV_DELOPS 5 1196 #define BTRFS_BLOCK_RSV_EMPTY 6 1197 #define BTRFS_BLOCK_RSV_TEMP 7 1198 1199 struct btrfs_block_rsv { 1200 u64 size; 1201 u64 reserved; 1202 struct btrfs_space_info *space_info; 1203 spinlock_t lock; 1204 unsigned short full; 1205 unsigned short type; 1206 unsigned short failfast; 1207 }; 1208 1209 /* 1210 * free clusters are used to claim free space in relatively large chunks, 1211 * allowing us to do less seeky writes. They are used for all metadata 1212 * allocations and data allocations in ssd mode. 1213 */ 1214 struct btrfs_free_cluster { 1215 spinlock_t lock; 1216 spinlock_t refill_lock; 1217 struct rb_root root; 1218 1219 /* largest extent in this cluster */ 1220 u64 max_size; 1221 1222 /* first extent starting offset */ 1223 u64 window_start; 1224 1225 struct btrfs_block_group_cache *block_group; 1226 /* 1227 * when a cluster is allocated from a block group, we put the 1228 * cluster onto a list in the block group so that it can 1229 * be freed before the block group is freed. 1230 */ 1231 struct list_head block_group_list; 1232 }; 1233 1234 enum btrfs_caching_type { 1235 BTRFS_CACHE_NO = 0, 1236 BTRFS_CACHE_STARTED = 1, 1237 BTRFS_CACHE_FAST = 2, 1238 BTRFS_CACHE_FINISHED = 3, 1239 BTRFS_CACHE_ERROR = 4, 1240 }; 1241 1242 enum btrfs_disk_cache_state { 1243 BTRFS_DC_WRITTEN = 0, 1244 BTRFS_DC_ERROR = 1, 1245 BTRFS_DC_CLEAR = 2, 1246 BTRFS_DC_SETUP = 3, 1247 }; 1248 1249 struct btrfs_caching_control { 1250 struct list_head list; 1251 struct mutex mutex; 1252 wait_queue_head_t wait; 1253 struct btrfs_work work; 1254 struct btrfs_block_group_cache *block_group; 1255 u64 progress; 1256 atomic_t count; 1257 }; 1258 1259 struct btrfs_block_group_cache { 1260 struct btrfs_key key; 1261 struct btrfs_block_group_item item; 1262 struct btrfs_fs_info *fs_info; 1263 struct inode *inode; 1264 spinlock_t lock; 1265 u64 pinned; 1266 u64 reserved; 1267 u64 delalloc_bytes; 1268 u64 bytes_super; 1269 u64 flags; 1270 u64 sectorsize; 1271 u64 cache_generation; 1272 1273 /* 1274 * It is just used for the delayed data space allocation because 1275 * only the data space allocation and the relative metadata update 1276 * can be done cross the transaction. 1277 */ 1278 struct rw_semaphore data_rwsem; 1279 1280 /* for raid56, this is a full stripe, without parity */ 1281 unsigned long full_stripe_len; 1282 1283 unsigned int ro:1; 1284 unsigned int iref:1; 1285 unsigned int has_caching_ctl:1; 1286 unsigned int removed:1; 1287 1288 int disk_cache_state; 1289 1290 /* cache tracking stuff */ 1291 int cached; 1292 struct btrfs_caching_control *caching_ctl; 1293 u64 last_byte_to_unpin; 1294 1295 struct btrfs_space_info *space_info; 1296 1297 /* free space cache stuff */ 1298 struct btrfs_free_space_ctl *free_space_ctl; 1299 1300 /* block group cache stuff */ 1301 struct rb_node cache_node; 1302 1303 /* for block groups in the same raid type */ 1304 struct list_head list; 1305 1306 /* usage count */ 1307 atomic_t count; 1308 1309 /* List of struct btrfs_free_clusters for this block group. 1310 * Today it will only have one thing on it, but that may change 1311 */ 1312 struct list_head cluster_list; 1313 1314 /* For delayed block group creation or deletion of empty block groups */ 1315 struct list_head bg_list; 1316 1317 /* For read-only block groups */ 1318 struct list_head ro_list; 1319 1320 atomic_t trimming; 1321 1322 /* For dirty block groups */ 1323 struct list_head dirty_list; 1324 }; 1325 1326 /* delayed seq elem */ 1327 struct seq_list { 1328 struct list_head list; 1329 u64 seq; 1330 }; 1331 1332 enum btrfs_orphan_cleanup_state { 1333 ORPHAN_CLEANUP_STARTED = 1, 1334 ORPHAN_CLEANUP_DONE = 2, 1335 }; 1336 1337 /* used by the raid56 code to lock stripes for read/modify/write */ 1338 struct btrfs_stripe_hash { 1339 struct list_head hash_list; 1340 wait_queue_head_t wait; 1341 spinlock_t lock; 1342 }; 1343 1344 /* used by the raid56 code to lock stripes for read/modify/write */ 1345 struct btrfs_stripe_hash_table { 1346 struct list_head stripe_cache; 1347 spinlock_t cache_lock; 1348 int cache_size; 1349 struct btrfs_stripe_hash table[]; 1350 }; 1351 1352 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 1353 1354 void btrfs_init_async_reclaim_work(struct work_struct *work); 1355 1356 /* fs_info */ 1357 struct reloc_control; 1358 struct btrfs_device; 1359 struct btrfs_fs_devices; 1360 struct btrfs_balance_control; 1361 struct btrfs_delayed_root; 1362 struct btrfs_fs_info { 1363 u8 fsid[BTRFS_FSID_SIZE]; 1364 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 1365 struct btrfs_root *extent_root; 1366 struct btrfs_root *tree_root; 1367 struct btrfs_root *chunk_root; 1368 struct btrfs_root *dev_root; 1369 struct btrfs_root *fs_root; 1370 struct btrfs_root *csum_root; 1371 struct btrfs_root *quota_root; 1372 struct btrfs_root *uuid_root; 1373 1374 /* the log root tree is a directory of all the other log roots */ 1375 struct btrfs_root *log_root_tree; 1376 1377 spinlock_t fs_roots_radix_lock; 1378 struct radix_tree_root fs_roots_radix; 1379 1380 /* block group cache stuff */ 1381 spinlock_t block_group_cache_lock; 1382 u64 first_logical_byte; 1383 struct rb_root block_group_cache_tree; 1384 1385 /* keep track of unallocated space */ 1386 spinlock_t free_chunk_lock; 1387 u64 free_chunk_space; 1388 1389 struct extent_io_tree freed_extents[2]; 1390 struct extent_io_tree *pinned_extents; 1391 1392 /* logical->physical extent mapping */ 1393 struct btrfs_mapping_tree mapping_tree; 1394 1395 /* 1396 * block reservation for extent, checksum, root tree and 1397 * delayed dir index item 1398 */ 1399 struct btrfs_block_rsv global_block_rsv; 1400 /* block reservation for delay allocation */ 1401 struct btrfs_block_rsv delalloc_block_rsv; 1402 /* block reservation for metadata operations */ 1403 struct btrfs_block_rsv trans_block_rsv; 1404 /* block reservation for chunk tree */ 1405 struct btrfs_block_rsv chunk_block_rsv; 1406 /* block reservation for delayed operations */ 1407 struct btrfs_block_rsv delayed_block_rsv; 1408 1409 struct btrfs_block_rsv empty_block_rsv; 1410 1411 u64 generation; 1412 u64 last_trans_committed; 1413 u64 avg_delayed_ref_runtime; 1414 1415 /* 1416 * this is updated to the current trans every time a full commit 1417 * is required instead of the faster short fsync log commits 1418 */ 1419 u64 last_trans_log_full_commit; 1420 unsigned long mount_opt; 1421 /* 1422 * Track requests for actions that need to be done during transaction 1423 * commit (like for some mount options). 1424 */ 1425 unsigned long pending_changes; 1426 unsigned long compress_type:4; 1427 int commit_interval; 1428 /* 1429 * It is a suggestive number, the read side is safe even it gets a 1430 * wrong number because we will write out the data into a regular 1431 * extent. The write side(mount/remount) is under ->s_umount lock, 1432 * so it is also safe. 1433 */ 1434 u64 max_inline; 1435 /* 1436 * Protected by ->chunk_mutex and sb->s_umount. 1437 * 1438 * The reason that we use two lock to protect it is because only 1439 * remount and mount operations can change it and these two operations 1440 * are under sb->s_umount, but the read side (chunk allocation) can not 1441 * acquire sb->s_umount or the deadlock would happen. So we use two 1442 * locks to protect it. On the write side, we must acquire two locks, 1443 * and on the read side, we just need acquire one of them. 1444 */ 1445 u64 alloc_start; 1446 struct btrfs_transaction *running_transaction; 1447 wait_queue_head_t transaction_throttle; 1448 wait_queue_head_t transaction_wait; 1449 wait_queue_head_t transaction_blocked_wait; 1450 wait_queue_head_t async_submit_wait; 1451 1452 /* 1453 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 1454 * when they are updated. 1455 * 1456 * Because we do not clear the flags for ever, so we needn't use 1457 * the lock on the read side. 1458 * 1459 * We also needn't use the lock when we mount the fs, because 1460 * there is no other task which will update the flag. 1461 */ 1462 spinlock_t super_lock; 1463 struct btrfs_super_block *super_copy; 1464 struct btrfs_super_block *super_for_commit; 1465 struct block_device *__bdev; 1466 struct super_block *sb; 1467 struct inode *btree_inode; 1468 struct backing_dev_info bdi; 1469 struct mutex tree_log_mutex; 1470 struct mutex transaction_kthread_mutex; 1471 struct mutex cleaner_mutex; 1472 struct mutex chunk_mutex; 1473 struct mutex volume_mutex; 1474 1475 /* this is used during read/modify/write to make sure 1476 * no two ios are trying to mod the same stripe at the same 1477 * time 1478 */ 1479 struct btrfs_stripe_hash_table *stripe_hash_table; 1480 1481 /* 1482 * this protects the ordered operations list only while we are 1483 * processing all of the entries on it. This way we make 1484 * sure the commit code doesn't find the list temporarily empty 1485 * because another function happens to be doing non-waiting preflush 1486 * before jumping into the main commit. 1487 */ 1488 struct mutex ordered_operations_mutex; 1489 1490 /* 1491 * Same as ordered_operations_mutex except this is for ordered extents 1492 * and not the operations. 1493 */ 1494 struct mutex ordered_extent_flush_mutex; 1495 1496 struct rw_semaphore commit_root_sem; 1497 1498 struct rw_semaphore cleanup_work_sem; 1499 1500 struct rw_semaphore subvol_sem; 1501 struct srcu_struct subvol_srcu; 1502 1503 spinlock_t trans_lock; 1504 /* 1505 * the reloc mutex goes with the trans lock, it is taken 1506 * during commit to protect us from the relocation code 1507 */ 1508 struct mutex reloc_mutex; 1509 1510 struct list_head trans_list; 1511 struct list_head dead_roots; 1512 struct list_head caching_block_groups; 1513 1514 spinlock_t delayed_iput_lock; 1515 struct list_head delayed_iputs; 1516 1517 /* this protects tree_mod_seq_list */ 1518 spinlock_t tree_mod_seq_lock; 1519 atomic64_t tree_mod_seq; 1520 struct list_head tree_mod_seq_list; 1521 1522 /* this protects tree_mod_log */ 1523 rwlock_t tree_mod_log_lock; 1524 struct rb_root tree_mod_log; 1525 1526 atomic_t nr_async_submits; 1527 atomic_t async_submit_draining; 1528 atomic_t nr_async_bios; 1529 atomic_t async_delalloc_pages; 1530 atomic_t open_ioctl_trans; 1531 1532 /* 1533 * this is used to protect the following list -- ordered_roots. 1534 */ 1535 spinlock_t ordered_root_lock; 1536 1537 /* 1538 * all fs/file tree roots in which there are data=ordered extents 1539 * pending writeback are added into this list. 1540 * 1541 * these can span multiple transactions and basically include 1542 * every dirty data page that isn't from nodatacow 1543 */ 1544 struct list_head ordered_roots; 1545 1546 struct mutex delalloc_root_mutex; 1547 spinlock_t delalloc_root_lock; 1548 /* all fs/file tree roots that have delalloc inodes. */ 1549 struct list_head delalloc_roots; 1550 1551 /* 1552 * there is a pool of worker threads for checksumming during writes 1553 * and a pool for checksumming after reads. This is because readers 1554 * can run with FS locks held, and the writers may be waiting for 1555 * those locks. We don't want ordering in the pending list to cause 1556 * deadlocks, and so the two are serviced separately. 1557 * 1558 * A third pool does submit_bio to avoid deadlocking with the other 1559 * two 1560 */ 1561 struct btrfs_workqueue *workers; 1562 struct btrfs_workqueue *delalloc_workers; 1563 struct btrfs_workqueue *flush_workers; 1564 struct btrfs_workqueue *endio_workers; 1565 struct btrfs_workqueue *endio_meta_workers; 1566 struct btrfs_workqueue *endio_raid56_workers; 1567 struct btrfs_workqueue *endio_repair_workers; 1568 struct btrfs_workqueue *rmw_workers; 1569 struct btrfs_workqueue *endio_meta_write_workers; 1570 struct btrfs_workqueue *endio_write_workers; 1571 struct btrfs_workqueue *endio_freespace_worker; 1572 struct btrfs_workqueue *submit_workers; 1573 struct btrfs_workqueue *caching_workers; 1574 struct btrfs_workqueue *readahead_workers; 1575 1576 /* 1577 * fixup workers take dirty pages that didn't properly go through 1578 * the cow mechanism and make them safe to write. It happens 1579 * for the sys_munmap function call path 1580 */ 1581 struct btrfs_workqueue *fixup_workers; 1582 struct btrfs_workqueue *delayed_workers; 1583 1584 /* the extent workers do delayed refs on the extent allocation tree */ 1585 struct btrfs_workqueue *extent_workers; 1586 struct task_struct *transaction_kthread; 1587 struct task_struct *cleaner_kthread; 1588 int thread_pool_size; 1589 1590 struct kobject super_kobj; 1591 struct kobject *space_info_kobj; 1592 struct kobject *device_dir_kobj; 1593 struct completion kobj_unregister; 1594 int do_barriers; 1595 int closing; 1596 int log_root_recovering; 1597 int open; 1598 1599 u64 total_pinned; 1600 1601 /* used to keep from writing metadata until there is a nice batch */ 1602 struct percpu_counter dirty_metadata_bytes; 1603 struct percpu_counter delalloc_bytes; 1604 s32 dirty_metadata_batch; 1605 s32 delalloc_batch; 1606 1607 struct list_head dirty_cowonly_roots; 1608 1609 struct btrfs_fs_devices *fs_devices; 1610 1611 /* 1612 * the space_info list is almost entirely read only. It only changes 1613 * when we add a new raid type to the FS, and that happens 1614 * very rarely. RCU is used to protect it. 1615 */ 1616 struct list_head space_info; 1617 1618 struct btrfs_space_info *data_sinfo; 1619 1620 struct reloc_control *reloc_ctl; 1621 1622 /* data_alloc_cluster is only used in ssd mode */ 1623 struct btrfs_free_cluster data_alloc_cluster; 1624 1625 /* all metadata allocations go through this cluster */ 1626 struct btrfs_free_cluster meta_alloc_cluster; 1627 1628 /* auto defrag inodes go here */ 1629 spinlock_t defrag_inodes_lock; 1630 struct rb_root defrag_inodes; 1631 atomic_t defrag_running; 1632 1633 /* Used to protect avail_{data, metadata, system}_alloc_bits */ 1634 seqlock_t profiles_lock; 1635 /* 1636 * these three are in extended format (availability of single 1637 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other 1638 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) 1639 */ 1640 u64 avail_data_alloc_bits; 1641 u64 avail_metadata_alloc_bits; 1642 u64 avail_system_alloc_bits; 1643 1644 /* restriper state */ 1645 spinlock_t balance_lock; 1646 struct mutex balance_mutex; 1647 atomic_t balance_running; 1648 atomic_t balance_pause_req; 1649 atomic_t balance_cancel_req; 1650 struct btrfs_balance_control *balance_ctl; 1651 wait_queue_head_t balance_wait_q; 1652 1653 unsigned data_chunk_allocations; 1654 unsigned metadata_ratio; 1655 1656 void *bdev_holder; 1657 1658 /* private scrub information */ 1659 struct mutex scrub_lock; 1660 atomic_t scrubs_running; 1661 atomic_t scrub_pause_req; 1662 atomic_t scrubs_paused; 1663 atomic_t scrub_cancel_req; 1664 wait_queue_head_t scrub_pause_wait; 1665 int scrub_workers_refcnt; 1666 struct btrfs_workqueue *scrub_workers; 1667 struct btrfs_workqueue *scrub_wr_completion_workers; 1668 struct btrfs_workqueue *scrub_nocow_workers; 1669 1670 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1671 u32 check_integrity_print_mask; 1672 #endif 1673 /* 1674 * quota information 1675 */ 1676 unsigned int quota_enabled:1; 1677 1678 /* 1679 * quota_enabled only changes state after a commit. This holds the 1680 * next state. 1681 */ 1682 unsigned int pending_quota_state:1; 1683 1684 /* is qgroup tracking in a consistent state? */ 1685 u64 qgroup_flags; 1686 1687 /* holds configuration and tracking. Protected by qgroup_lock */ 1688 struct rb_root qgroup_tree; 1689 struct rb_root qgroup_op_tree; 1690 spinlock_t qgroup_lock; 1691 spinlock_t qgroup_op_lock; 1692 atomic_t qgroup_op_seq; 1693 1694 /* 1695 * used to avoid frequently calling ulist_alloc()/ulist_free() 1696 * when doing qgroup accounting, it must be protected by qgroup_lock. 1697 */ 1698 struct ulist *qgroup_ulist; 1699 1700 /* protect user change for quota operations */ 1701 struct mutex qgroup_ioctl_lock; 1702 1703 /* list of dirty qgroups to be written at next commit */ 1704 struct list_head dirty_qgroups; 1705 1706 /* used by btrfs_qgroup_record_ref for an efficient tree traversal */ 1707 u64 qgroup_seq; 1708 1709 /* qgroup rescan items */ 1710 struct mutex qgroup_rescan_lock; /* protects the progress item */ 1711 struct btrfs_key qgroup_rescan_progress; 1712 struct btrfs_workqueue *qgroup_rescan_workers; 1713 struct completion qgroup_rescan_completion; 1714 struct btrfs_work qgroup_rescan_work; 1715 1716 /* filesystem state */ 1717 unsigned long fs_state; 1718 1719 struct btrfs_delayed_root *delayed_root; 1720 1721 /* readahead tree */ 1722 spinlock_t reada_lock; 1723 struct radix_tree_root reada_tree; 1724 1725 /* Extent buffer radix tree */ 1726 spinlock_t buffer_lock; 1727 struct radix_tree_root buffer_radix; 1728 1729 /* next backup root to be overwritten */ 1730 int backup_root_index; 1731 1732 int num_tolerated_disk_barrier_failures; 1733 1734 /* device replace state */ 1735 struct btrfs_dev_replace dev_replace; 1736 1737 atomic_t mutually_exclusive_operation_running; 1738 1739 struct percpu_counter bio_counter; 1740 wait_queue_head_t replace_wait; 1741 1742 struct semaphore uuid_tree_rescan_sem; 1743 unsigned int update_uuid_tree_gen:1; 1744 1745 /* Used to reclaim the metadata space in the background. */ 1746 struct work_struct async_reclaim_work; 1747 1748 spinlock_t unused_bgs_lock; 1749 struct list_head unused_bgs; 1750 struct mutex unused_bg_unpin_mutex; 1751 1752 /* For btrfs to record security options */ 1753 struct security_mnt_opts security_opts; 1754 1755 /* 1756 * Chunks that can't be freed yet (under a trim/discard operation) 1757 * and will be latter freed. Protected by fs_info->chunk_mutex. 1758 */ 1759 struct list_head pinned_chunks; 1760 }; 1761 1762 struct btrfs_subvolume_writers { 1763 struct percpu_counter counter; 1764 wait_queue_head_t wait; 1765 }; 1766 1767 /* 1768 * The state of btrfs root 1769 */ 1770 /* 1771 * btrfs_record_root_in_trans is a multi-step process, 1772 * and it can race with the balancing code. But the 1773 * race is very small, and only the first time the root 1774 * is added to each transaction. So IN_TRANS_SETUP 1775 * is used to tell us when more checks are required 1776 */ 1777 #define BTRFS_ROOT_IN_TRANS_SETUP 0 1778 #define BTRFS_ROOT_REF_COWS 1 1779 #define BTRFS_ROOT_TRACK_DIRTY 2 1780 #define BTRFS_ROOT_IN_RADIX 3 1781 #define BTRFS_ROOT_DUMMY_ROOT 4 1782 #define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 5 1783 #define BTRFS_ROOT_DEFRAG_RUNNING 6 1784 #define BTRFS_ROOT_FORCE_COW 7 1785 #define BTRFS_ROOT_MULTI_LOG_TASKS 8 1786 #define BTRFS_ROOT_DIRTY 9 1787 1788 /* 1789 * in ram representation of the tree. extent_root is used for all allocations 1790 * and for the extent tree extent_root root. 1791 */ 1792 struct btrfs_root { 1793 struct extent_buffer *node; 1794 1795 struct extent_buffer *commit_root; 1796 struct btrfs_root *log_root; 1797 struct btrfs_root *reloc_root; 1798 1799 unsigned long state; 1800 struct btrfs_root_item root_item; 1801 struct btrfs_key root_key; 1802 struct btrfs_fs_info *fs_info; 1803 struct extent_io_tree dirty_log_pages; 1804 1805 struct mutex objectid_mutex; 1806 1807 spinlock_t accounting_lock; 1808 struct btrfs_block_rsv *block_rsv; 1809 1810 /* free ino cache stuff */ 1811 struct btrfs_free_space_ctl *free_ino_ctl; 1812 enum btrfs_caching_type ino_cache_state; 1813 spinlock_t ino_cache_lock; 1814 wait_queue_head_t ino_cache_wait; 1815 struct btrfs_free_space_ctl *free_ino_pinned; 1816 u64 ino_cache_progress; 1817 struct inode *ino_cache_inode; 1818 1819 struct mutex log_mutex; 1820 wait_queue_head_t log_writer_wait; 1821 wait_queue_head_t log_commit_wait[2]; 1822 struct list_head log_ctxs[2]; 1823 atomic_t log_writers; 1824 atomic_t log_commit[2]; 1825 atomic_t log_batch; 1826 int log_transid; 1827 /* No matter the commit succeeds or not*/ 1828 int log_transid_committed; 1829 /* Just be updated when the commit succeeds. */ 1830 int last_log_commit; 1831 pid_t log_start_pid; 1832 1833 u64 objectid; 1834 u64 last_trans; 1835 1836 /* data allocations are done in sectorsize units */ 1837 u32 sectorsize; 1838 1839 /* node allocations are done in nodesize units */ 1840 u32 nodesize; 1841 1842 u32 stripesize; 1843 1844 u32 type; 1845 1846 u64 highest_objectid; 1847 1848 /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */ 1849 u64 alloc_bytenr; 1850 1851 u64 defrag_trans_start; 1852 struct btrfs_key defrag_progress; 1853 struct btrfs_key defrag_max; 1854 char *name; 1855 1856 /* the dirty list is only used by non-reference counted roots */ 1857 struct list_head dirty_list; 1858 1859 struct list_head root_list; 1860 1861 spinlock_t log_extents_lock[2]; 1862 struct list_head logged_list[2]; 1863 1864 spinlock_t orphan_lock; 1865 atomic_t orphan_inodes; 1866 struct btrfs_block_rsv *orphan_block_rsv; 1867 int orphan_cleanup_state; 1868 1869 spinlock_t inode_lock; 1870 /* red-black tree that keeps track of in-memory inodes */ 1871 struct rb_root inode_tree; 1872 1873 /* 1874 * radix tree that keeps track of delayed nodes of every inode, 1875 * protected by inode_lock 1876 */ 1877 struct radix_tree_root delayed_nodes_tree; 1878 /* 1879 * right now this just gets used so that a root has its own devid 1880 * for stat. It may be used for more later 1881 */ 1882 dev_t anon_dev; 1883 1884 spinlock_t root_item_lock; 1885 atomic_t refs; 1886 1887 struct mutex delalloc_mutex; 1888 spinlock_t delalloc_lock; 1889 /* 1890 * all of the inodes that have delalloc bytes. It is possible for 1891 * this list to be empty even when there is still dirty data=ordered 1892 * extents waiting to finish IO. 1893 */ 1894 struct list_head delalloc_inodes; 1895 struct list_head delalloc_root; 1896 u64 nr_delalloc_inodes; 1897 1898 struct mutex ordered_extent_mutex; 1899 /* 1900 * this is used by the balancing code to wait for all the pending 1901 * ordered extents 1902 */ 1903 spinlock_t ordered_extent_lock; 1904 1905 /* 1906 * all of the data=ordered extents pending writeback 1907 * these can span multiple transactions and basically include 1908 * every dirty data page that isn't from nodatacow 1909 */ 1910 struct list_head ordered_extents; 1911 struct list_head ordered_root; 1912 u64 nr_ordered_extents; 1913 1914 /* 1915 * Number of currently running SEND ioctls to prevent 1916 * manipulation with the read-only status via SUBVOL_SETFLAGS 1917 */ 1918 int send_in_progress; 1919 struct btrfs_subvolume_writers *subv_writers; 1920 atomic_t will_be_snapshoted; 1921 }; 1922 1923 struct btrfs_ioctl_defrag_range_args { 1924 /* start of the defrag operation */ 1925 __u64 start; 1926 1927 /* number of bytes to defrag, use (u64)-1 to say all */ 1928 __u64 len; 1929 1930 /* 1931 * flags for the operation, which can include turning 1932 * on compression for this one defrag 1933 */ 1934 __u64 flags; 1935 1936 /* 1937 * any extent bigger than this will be considered 1938 * already defragged. Use 0 to take the kernel default 1939 * Use 1 to say every single extent must be rewritten 1940 */ 1941 __u32 extent_thresh; 1942 1943 /* 1944 * which compression method to use if turning on compression 1945 * for this defrag operation. If unspecified, zlib will 1946 * be used 1947 */ 1948 __u32 compress_type; 1949 1950 /* spare for later */ 1951 __u32 unused[4]; 1952 }; 1953 1954 1955 /* 1956 * inode items have the data typically returned from stat and store other 1957 * info about object characteristics. There is one for every file and dir in 1958 * the FS 1959 */ 1960 #define BTRFS_INODE_ITEM_KEY 1 1961 #define BTRFS_INODE_REF_KEY 12 1962 #define BTRFS_INODE_EXTREF_KEY 13 1963 #define BTRFS_XATTR_ITEM_KEY 24 1964 #define BTRFS_ORPHAN_ITEM_KEY 48 1965 /* reserve 2-15 close to the inode for later flexibility */ 1966 1967 /* 1968 * dir items are the name -> inode pointers in a directory. There is one 1969 * for every name in a directory. 1970 */ 1971 #define BTRFS_DIR_LOG_ITEM_KEY 60 1972 #define BTRFS_DIR_LOG_INDEX_KEY 72 1973 #define BTRFS_DIR_ITEM_KEY 84 1974 #define BTRFS_DIR_INDEX_KEY 96 1975 /* 1976 * extent data is for file data 1977 */ 1978 #define BTRFS_EXTENT_DATA_KEY 108 1979 1980 /* 1981 * extent csums are stored in a separate tree and hold csums for 1982 * an entire extent on disk. 1983 */ 1984 #define BTRFS_EXTENT_CSUM_KEY 128 1985 1986 /* 1987 * root items point to tree roots. They are typically in the root 1988 * tree used by the super block to find all the other trees 1989 */ 1990 #define BTRFS_ROOT_ITEM_KEY 132 1991 1992 /* 1993 * root backrefs tie subvols and snapshots to the directory entries that 1994 * reference them 1995 */ 1996 #define BTRFS_ROOT_BACKREF_KEY 144 1997 1998 /* 1999 * root refs make a fast index for listing all of the snapshots and 2000 * subvolumes referenced by a given root. They point directly to the 2001 * directory item in the root that references the subvol 2002 */ 2003 #define BTRFS_ROOT_REF_KEY 156 2004 2005 /* 2006 * extent items are in the extent map tree. These record which blocks 2007 * are used, and how many references there are to each block 2008 */ 2009 #define BTRFS_EXTENT_ITEM_KEY 168 2010 2011 /* 2012 * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know 2013 * the length, so we save the level in key->offset instead of the length. 2014 */ 2015 #define BTRFS_METADATA_ITEM_KEY 169 2016 2017 #define BTRFS_TREE_BLOCK_REF_KEY 176 2018 2019 #define BTRFS_EXTENT_DATA_REF_KEY 178 2020 2021 #define BTRFS_EXTENT_REF_V0_KEY 180 2022 2023 #define BTRFS_SHARED_BLOCK_REF_KEY 182 2024 2025 #define BTRFS_SHARED_DATA_REF_KEY 184 2026 2027 /* 2028 * block groups give us hints into the extent allocation trees. Which 2029 * blocks are free etc etc 2030 */ 2031 #define BTRFS_BLOCK_GROUP_ITEM_KEY 192 2032 2033 #define BTRFS_DEV_EXTENT_KEY 204 2034 #define BTRFS_DEV_ITEM_KEY 216 2035 #define BTRFS_CHUNK_ITEM_KEY 228 2036 2037 /* 2038 * Records the overall state of the qgroups. 2039 * There's only one instance of this key present, 2040 * (0, BTRFS_QGROUP_STATUS_KEY, 0) 2041 */ 2042 #define BTRFS_QGROUP_STATUS_KEY 240 2043 /* 2044 * Records the currently used space of the qgroup. 2045 * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). 2046 */ 2047 #define BTRFS_QGROUP_INFO_KEY 242 2048 /* 2049 * Contains the user configured limits for the qgroup. 2050 * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). 2051 */ 2052 #define BTRFS_QGROUP_LIMIT_KEY 244 2053 /* 2054 * Records the child-parent relationship of qgroups. For 2055 * each relation, 2 keys are present: 2056 * (childid, BTRFS_QGROUP_RELATION_KEY, parentid) 2057 * (parentid, BTRFS_QGROUP_RELATION_KEY, childid) 2058 */ 2059 #define BTRFS_QGROUP_RELATION_KEY 246 2060 2061 #define BTRFS_BALANCE_ITEM_KEY 248 2062 2063 /* 2064 * Persistantly stores the io stats in the device tree. 2065 * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid). 2066 */ 2067 #define BTRFS_DEV_STATS_KEY 249 2068 2069 /* 2070 * Persistantly stores the device replace state in the device tree. 2071 * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). 2072 */ 2073 #define BTRFS_DEV_REPLACE_KEY 250 2074 2075 /* 2076 * Stores items that allow to quickly map UUIDs to something else. 2077 * These items are part of the filesystem UUID tree. 2078 * The key is built like this: 2079 * (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits). 2080 */ 2081 #if BTRFS_UUID_SIZE != 16 2082 #error "UUID items require BTRFS_UUID_SIZE == 16!" 2083 #endif 2084 #define BTRFS_UUID_KEY_SUBVOL 251 /* for UUIDs assigned to subvols */ 2085 #define BTRFS_UUID_KEY_RECEIVED_SUBVOL 252 /* for UUIDs assigned to 2086 * received subvols */ 2087 2088 /* 2089 * string items are for debugging. They just store a short string of 2090 * data in the FS 2091 */ 2092 #define BTRFS_STRING_ITEM_KEY 253 2093 2094 /* 2095 * Flags for mount options. 2096 * 2097 * Note: don't forget to add new options to btrfs_show_options() 2098 */ 2099 #define BTRFS_MOUNT_NODATASUM (1 << 0) 2100 #define BTRFS_MOUNT_NODATACOW (1 << 1) 2101 #define BTRFS_MOUNT_NOBARRIER (1 << 2) 2102 #define BTRFS_MOUNT_SSD (1 << 3) 2103 #define BTRFS_MOUNT_DEGRADED (1 << 4) 2104 #define BTRFS_MOUNT_COMPRESS (1 << 5) 2105 #define BTRFS_MOUNT_NOTREELOG (1 << 6) 2106 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 2107 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 2108 #define BTRFS_MOUNT_NOSSD (1 << 9) 2109 #define BTRFS_MOUNT_DISCARD (1 << 10) 2110 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) 2111 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 2112 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 2113 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 2114 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 2115 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 2116 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) 2117 #define BTRFS_MOUNT_RECOVERY (1 << 18) 2118 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 2119 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 2120 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 2121 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) 2122 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) 2123 2124 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 2125 #define BTRFS_DEFAULT_MAX_INLINE (8192) 2126 2127 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 2128 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 2129 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 2130 #define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \ 2131 BTRFS_MOUNT_##opt) 2132 2133 #define btrfs_set_and_info(root, opt, fmt, args...) \ 2134 { \ 2135 if (!btrfs_test_opt(root, opt)) \ 2136 btrfs_info(root->fs_info, fmt, ##args); \ 2137 btrfs_set_opt(root->fs_info->mount_opt, opt); \ 2138 } 2139 2140 #define btrfs_clear_and_info(root, opt, fmt, args...) \ 2141 { \ 2142 if (btrfs_test_opt(root, opt)) \ 2143 btrfs_info(root->fs_info, fmt, ##args); \ 2144 btrfs_clear_opt(root->fs_info->mount_opt, opt); \ 2145 } 2146 2147 /* 2148 * Requests for changes that need to be done during transaction commit. 2149 * 2150 * Internal mount options that are used for special handling of the real 2151 * mount options (eg. cannot be set during remount and have to be set during 2152 * transaction commit) 2153 */ 2154 2155 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) 2156 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) 2157 #define BTRFS_PENDING_COMMIT (2) 2158 2159 #define btrfs_test_pending(info, opt) \ 2160 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2161 #define btrfs_set_pending(info, opt) \ 2162 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2163 #define btrfs_clear_pending(info, opt) \ 2164 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2165 2166 /* 2167 * Helpers for setting pending mount option changes. 2168 * 2169 * Expects corresponding macros 2170 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name 2171 */ 2172 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ 2173 do { \ 2174 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2175 btrfs_info((info), fmt, ##args); \ 2176 btrfs_set_pending((info), SET_##opt); \ 2177 btrfs_clear_pending((info), CLEAR_##opt); \ 2178 } \ 2179 } while(0) 2180 2181 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ 2182 do { \ 2183 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2184 btrfs_info((info), fmt, ##args); \ 2185 btrfs_set_pending((info), CLEAR_##opt); \ 2186 btrfs_clear_pending((info), SET_##opt); \ 2187 } \ 2188 } while(0) 2189 2190 /* 2191 * Inode flags 2192 */ 2193 #define BTRFS_INODE_NODATASUM (1 << 0) 2194 #define BTRFS_INODE_NODATACOW (1 << 1) 2195 #define BTRFS_INODE_READONLY (1 << 2) 2196 #define BTRFS_INODE_NOCOMPRESS (1 << 3) 2197 #define BTRFS_INODE_PREALLOC (1 << 4) 2198 #define BTRFS_INODE_SYNC (1 << 5) 2199 #define BTRFS_INODE_IMMUTABLE (1 << 6) 2200 #define BTRFS_INODE_APPEND (1 << 7) 2201 #define BTRFS_INODE_NODUMP (1 << 8) 2202 #define BTRFS_INODE_NOATIME (1 << 9) 2203 #define BTRFS_INODE_DIRSYNC (1 << 10) 2204 #define BTRFS_INODE_COMPRESS (1 << 11) 2205 2206 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) 2207 2208 struct btrfs_map_token { 2209 struct extent_buffer *eb; 2210 char *kaddr; 2211 unsigned long offset; 2212 }; 2213 2214 static inline void btrfs_init_map_token (struct btrfs_map_token *token) 2215 { 2216 token->kaddr = NULL; 2217 } 2218 2219 /* some macros to generate set/get funcs for the struct fields. This 2220 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 2221 * one for u8: 2222 */ 2223 #define le8_to_cpu(v) (v) 2224 #define cpu_to_le8(v) (v) 2225 #define __le8 u8 2226 2227 #define read_eb_member(eb, ptr, type, member, result) ( \ 2228 read_extent_buffer(eb, (char *)(result), \ 2229 ((unsigned long)(ptr)) + \ 2230 offsetof(type, member), \ 2231 sizeof(((type *)0)->member))) 2232 2233 #define write_eb_member(eb, ptr, type, member, result) ( \ 2234 write_extent_buffer(eb, (char *)(result), \ 2235 ((unsigned long)(ptr)) + \ 2236 offsetof(type, member), \ 2237 sizeof(((type *)0)->member))) 2238 2239 #define DECLARE_BTRFS_SETGET_BITS(bits) \ 2240 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ 2241 unsigned long off, \ 2242 struct btrfs_map_token *token); \ 2243 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ 2244 unsigned long off, u##bits val, \ 2245 struct btrfs_map_token *token); \ 2246 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ 2247 unsigned long off) \ 2248 { \ 2249 return btrfs_get_token_##bits(eb, ptr, off, NULL); \ 2250 } \ 2251 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 2252 unsigned long off, u##bits val) \ 2253 { \ 2254 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ 2255 } 2256 2257 DECLARE_BTRFS_SETGET_BITS(8) 2258 DECLARE_BTRFS_SETGET_BITS(16) 2259 DECLARE_BTRFS_SETGET_BITS(32) 2260 DECLARE_BTRFS_SETGET_BITS(64) 2261 2262 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 2263 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ 2264 { \ 2265 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2266 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ 2267 } \ 2268 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ 2269 u##bits val) \ 2270 { \ 2271 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2272 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ 2273 } \ 2274 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ 2275 struct btrfs_map_token *token) \ 2276 { \ 2277 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2278 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ 2279 } \ 2280 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ 2281 type *s, u##bits val, \ 2282 struct btrfs_map_token *token) \ 2283 { \ 2284 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2285 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ 2286 } 2287 2288 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 2289 static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 2290 { \ 2291 type *p = page_address(eb->pages[0]); \ 2292 u##bits res = le##bits##_to_cpu(p->member); \ 2293 return res; \ 2294 } \ 2295 static inline void btrfs_set_##name(struct extent_buffer *eb, \ 2296 u##bits val) \ 2297 { \ 2298 type *p = page_address(eb->pages[0]); \ 2299 p->member = cpu_to_le##bits(val); \ 2300 } 2301 2302 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ 2303 static inline u##bits btrfs_##name(type *s) \ 2304 { \ 2305 return le##bits##_to_cpu(s->member); \ 2306 } \ 2307 static inline void btrfs_set_##name(type *s, u##bits val) \ 2308 { \ 2309 s->member = cpu_to_le##bits(val); \ 2310 } 2311 2312 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); 2313 BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); 2314 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); 2315 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); 2316 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); 2317 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, 2318 start_offset, 64); 2319 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); 2320 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); 2321 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); 2322 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); 2323 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); 2324 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); 2325 2326 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); 2327 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, 2328 total_bytes, 64); 2329 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, 2330 bytes_used, 64); 2331 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, 2332 io_align, 32); 2333 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, 2334 io_width, 32); 2335 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, 2336 sector_size, 32); 2337 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); 2338 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, 2339 dev_group, 32); 2340 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, 2341 seek_speed, 8); 2342 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, 2343 bandwidth, 8); 2344 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, 2345 generation, 64); 2346 2347 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) 2348 { 2349 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); 2350 } 2351 2352 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) 2353 { 2354 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); 2355 } 2356 2357 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); 2358 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); 2359 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); 2360 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); 2361 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); 2362 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); 2363 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); 2364 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); 2365 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); 2366 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); 2367 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); 2368 2369 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) 2370 { 2371 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); 2372 } 2373 2374 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); 2375 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); 2376 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, 2377 stripe_len, 64); 2378 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, 2379 io_align, 32); 2380 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, 2381 io_width, 32); 2382 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, 2383 sector_size, 32); 2384 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); 2385 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, 2386 num_stripes, 16); 2387 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, 2388 sub_stripes, 16); 2389 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); 2390 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); 2391 2392 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, 2393 int nr) 2394 { 2395 unsigned long offset = (unsigned long)c; 2396 offset += offsetof(struct btrfs_chunk, stripe); 2397 offset += nr * sizeof(struct btrfs_stripe); 2398 return (struct btrfs_stripe *)offset; 2399 } 2400 2401 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) 2402 { 2403 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); 2404 } 2405 2406 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, 2407 struct btrfs_chunk *c, int nr) 2408 { 2409 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 2410 } 2411 2412 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 2413 struct btrfs_chunk *c, int nr) 2414 { 2415 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); 2416 } 2417 2418 /* struct btrfs_block_group_item */ 2419 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, 2420 used, 64); 2421 BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, 2422 used, 64); 2423 BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, 2424 struct btrfs_block_group_item, chunk_objectid, 64); 2425 2426 BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, 2427 struct btrfs_block_group_item, chunk_objectid, 64); 2428 BTRFS_SETGET_FUNCS(disk_block_group_flags, 2429 struct btrfs_block_group_item, flags, 64); 2430 BTRFS_SETGET_STACK_FUNCS(block_group_flags, 2431 struct btrfs_block_group_item, flags, 64); 2432 2433 /* struct btrfs_inode_ref */ 2434 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); 2435 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); 2436 2437 /* struct btrfs_inode_extref */ 2438 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, 2439 parent_objectid, 64); 2440 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, 2441 name_len, 16); 2442 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); 2443 2444 /* struct btrfs_inode_item */ 2445 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); 2446 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); 2447 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); 2448 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); 2449 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); 2450 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); 2451 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); 2452 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); 2453 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); 2454 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); 2455 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); 2456 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); 2457 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, 2458 generation, 64); 2459 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, 2460 sequence, 64); 2461 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, 2462 transid, 64); 2463 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); 2464 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, 2465 nbytes, 64); 2466 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, 2467 block_group, 64); 2468 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); 2469 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); 2470 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); 2471 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 2472 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 2473 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 2474 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 2475 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 2476 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 2477 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); 2478 2479 /* struct btrfs_dev_extent */ 2480 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, 2481 chunk_tree, 64); 2482 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, 2483 chunk_objectid, 64); 2484 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, 2485 chunk_offset, 64); 2486 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); 2487 2488 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) 2489 { 2490 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); 2491 return (unsigned long)dev + ptr; 2492 } 2493 2494 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 2495 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, 2496 generation, 64); 2497 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); 2498 2499 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 2500 2501 2502 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); 2503 2504 static inline void btrfs_tree_block_key(struct extent_buffer *eb, 2505 struct btrfs_tree_block_info *item, 2506 struct btrfs_disk_key *key) 2507 { 2508 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2509 } 2510 2511 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, 2512 struct btrfs_tree_block_info *item, 2513 struct btrfs_disk_key *key) 2514 { 2515 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2516 } 2517 2518 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, 2519 root, 64); 2520 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, 2521 objectid, 64); 2522 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, 2523 offset, 64); 2524 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, 2525 count, 32); 2526 2527 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, 2528 count, 32); 2529 2530 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, 2531 type, 8); 2532 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, 2533 offset, 64); 2534 2535 static inline u32 btrfs_extent_inline_ref_size(int type) 2536 { 2537 if (type == BTRFS_TREE_BLOCK_REF_KEY || 2538 type == BTRFS_SHARED_BLOCK_REF_KEY) 2539 return sizeof(struct btrfs_extent_inline_ref); 2540 if (type == BTRFS_SHARED_DATA_REF_KEY) 2541 return sizeof(struct btrfs_shared_data_ref) + 2542 sizeof(struct btrfs_extent_inline_ref); 2543 if (type == BTRFS_EXTENT_DATA_REF_KEY) 2544 return sizeof(struct btrfs_extent_data_ref) + 2545 offsetof(struct btrfs_extent_inline_ref, offset); 2546 BUG(); 2547 return 0; 2548 } 2549 2550 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); 2551 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, 2552 generation, 64); 2553 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); 2554 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); 2555 2556 /* struct btrfs_node */ 2557 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); 2558 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); 2559 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, 2560 blockptr, 64); 2561 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, 2562 generation, 64); 2563 2564 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 2565 { 2566 unsigned long ptr; 2567 ptr = offsetof(struct btrfs_node, ptrs) + 2568 sizeof(struct btrfs_key_ptr) * nr; 2569 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); 2570 } 2571 2572 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, 2573 int nr, u64 val) 2574 { 2575 unsigned long ptr; 2576 ptr = offsetof(struct btrfs_node, ptrs) + 2577 sizeof(struct btrfs_key_ptr) * nr; 2578 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); 2579 } 2580 2581 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) 2582 { 2583 unsigned long ptr; 2584 ptr = offsetof(struct btrfs_node, ptrs) + 2585 sizeof(struct btrfs_key_ptr) * nr; 2586 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 2587 } 2588 2589 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, 2590 int nr, u64 val) 2591 { 2592 unsigned long ptr; 2593 ptr = offsetof(struct btrfs_node, ptrs) + 2594 sizeof(struct btrfs_key_ptr) * nr; 2595 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); 2596 } 2597 2598 static inline unsigned long btrfs_node_key_ptr_offset(int nr) 2599 { 2600 return offsetof(struct btrfs_node, ptrs) + 2601 sizeof(struct btrfs_key_ptr) * nr; 2602 } 2603 2604 void btrfs_node_key(struct extent_buffer *eb, 2605 struct btrfs_disk_key *disk_key, int nr); 2606 2607 static inline void btrfs_set_node_key(struct extent_buffer *eb, 2608 struct btrfs_disk_key *disk_key, int nr) 2609 { 2610 unsigned long ptr; 2611 ptr = btrfs_node_key_ptr_offset(nr); 2612 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, 2613 struct btrfs_key_ptr, key, disk_key); 2614 } 2615 2616 /* struct btrfs_item */ 2617 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); 2618 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); 2619 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); 2620 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); 2621 2622 static inline unsigned long btrfs_item_nr_offset(int nr) 2623 { 2624 return offsetof(struct btrfs_leaf, items) + 2625 sizeof(struct btrfs_item) * nr; 2626 } 2627 2628 static inline struct btrfs_item *btrfs_item_nr(int nr) 2629 { 2630 return (struct btrfs_item *)btrfs_item_nr_offset(nr); 2631 } 2632 2633 static inline u32 btrfs_item_end(struct extent_buffer *eb, 2634 struct btrfs_item *item) 2635 { 2636 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); 2637 } 2638 2639 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) 2640 { 2641 return btrfs_item_end(eb, btrfs_item_nr(nr)); 2642 } 2643 2644 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) 2645 { 2646 return btrfs_item_offset(eb, btrfs_item_nr(nr)); 2647 } 2648 2649 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) 2650 { 2651 return btrfs_item_size(eb, btrfs_item_nr(nr)); 2652 } 2653 2654 static inline void btrfs_item_key(struct extent_buffer *eb, 2655 struct btrfs_disk_key *disk_key, int nr) 2656 { 2657 struct btrfs_item *item = btrfs_item_nr(nr); 2658 read_eb_member(eb, item, struct btrfs_item, key, disk_key); 2659 } 2660 2661 static inline void btrfs_set_item_key(struct extent_buffer *eb, 2662 struct btrfs_disk_key *disk_key, int nr) 2663 { 2664 struct btrfs_item *item = btrfs_item_nr(nr); 2665 write_eb_member(eb, item, struct btrfs_item, key, disk_key); 2666 } 2667 2668 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); 2669 2670 /* 2671 * struct btrfs_root_ref 2672 */ 2673 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 2674 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 2675 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 2676 2677 /* struct btrfs_dir_item */ 2678 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 2679 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); 2680 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); 2681 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); 2682 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); 2683 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, 2684 data_len, 16); 2685 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, 2686 name_len, 16); 2687 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, 2688 transid, 64); 2689 2690 static inline void btrfs_dir_item_key(struct extent_buffer *eb, 2691 struct btrfs_dir_item *item, 2692 struct btrfs_disk_key *key) 2693 { 2694 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 2695 } 2696 2697 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, 2698 struct btrfs_dir_item *item, 2699 struct btrfs_disk_key *key) 2700 { 2701 write_eb_member(eb, item, struct btrfs_dir_item, location, key); 2702 } 2703 2704 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 2705 num_entries, 64); 2706 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 2707 num_bitmaps, 64); 2708 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 2709 generation, 64); 2710 2711 static inline void btrfs_free_space_key(struct extent_buffer *eb, 2712 struct btrfs_free_space_header *h, 2713 struct btrfs_disk_key *key) 2714 { 2715 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2716 } 2717 2718 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, 2719 struct btrfs_free_space_header *h, 2720 struct btrfs_disk_key *key) 2721 { 2722 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2723 } 2724 2725 /* struct btrfs_disk_key */ 2726 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, 2727 objectid, 64); 2728 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); 2729 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); 2730 2731 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, 2732 struct btrfs_disk_key *disk) 2733 { 2734 cpu->offset = le64_to_cpu(disk->offset); 2735 cpu->type = disk->type; 2736 cpu->objectid = le64_to_cpu(disk->objectid); 2737 } 2738 2739 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, 2740 struct btrfs_key *cpu) 2741 { 2742 disk->offset = cpu_to_le64(cpu->offset); 2743 disk->type = cpu->type; 2744 disk->objectid = cpu_to_le64(cpu->objectid); 2745 } 2746 2747 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, 2748 struct btrfs_key *key, int nr) 2749 { 2750 struct btrfs_disk_key disk_key; 2751 btrfs_node_key(eb, &disk_key, nr); 2752 btrfs_disk_key_to_cpu(key, &disk_key); 2753 } 2754 2755 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, 2756 struct btrfs_key *key, int nr) 2757 { 2758 struct btrfs_disk_key disk_key; 2759 btrfs_item_key(eb, &disk_key, nr); 2760 btrfs_disk_key_to_cpu(key, &disk_key); 2761 } 2762 2763 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, 2764 struct btrfs_dir_item *item, 2765 struct btrfs_key *key) 2766 { 2767 struct btrfs_disk_key disk_key; 2768 btrfs_dir_item_key(eb, item, &disk_key); 2769 btrfs_disk_key_to_cpu(key, &disk_key); 2770 } 2771 2772 2773 static inline u8 btrfs_key_type(struct btrfs_key *key) 2774 { 2775 return key->type; 2776 } 2777 2778 static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) 2779 { 2780 key->type = val; 2781 } 2782 2783 /* struct btrfs_header */ 2784 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); 2785 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, 2786 generation, 64); 2787 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); 2788 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); 2789 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); 2790 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); 2791 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, 2792 generation, 64); 2793 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); 2794 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, 2795 nritems, 32); 2796 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); 2797 2798 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) 2799 { 2800 return (btrfs_header_flags(eb) & flag) == flag; 2801 } 2802 2803 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) 2804 { 2805 u64 flags = btrfs_header_flags(eb); 2806 btrfs_set_header_flags(eb, flags | flag); 2807 return (flags & flag) == flag; 2808 } 2809 2810 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) 2811 { 2812 u64 flags = btrfs_header_flags(eb); 2813 btrfs_set_header_flags(eb, flags & ~flag); 2814 return (flags & flag) == flag; 2815 } 2816 2817 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) 2818 { 2819 u64 flags = btrfs_header_flags(eb); 2820 return flags >> BTRFS_BACKREF_REV_SHIFT; 2821 } 2822 2823 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, 2824 int rev) 2825 { 2826 u64 flags = btrfs_header_flags(eb); 2827 flags &= ~BTRFS_BACKREF_REV_MASK; 2828 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; 2829 btrfs_set_header_flags(eb, flags); 2830 } 2831 2832 static inline unsigned long btrfs_header_fsid(void) 2833 { 2834 return offsetof(struct btrfs_header, fsid); 2835 } 2836 2837 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) 2838 { 2839 return offsetof(struct btrfs_header, chunk_tree_uuid); 2840 } 2841 2842 static inline int btrfs_is_leaf(struct extent_buffer *eb) 2843 { 2844 return btrfs_header_level(eb) == 0; 2845 } 2846 2847 /* struct btrfs_root_item */ 2848 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, 2849 generation, 64); 2850 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); 2851 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); 2852 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); 2853 2854 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, 2855 generation, 64); 2856 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); 2857 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); 2858 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); 2859 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); 2860 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); 2861 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); 2862 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); 2863 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, 2864 last_snapshot, 64); 2865 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, 2866 generation_v2, 64); 2867 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, 2868 ctransid, 64); 2869 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, 2870 otransid, 64); 2871 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, 2872 stransid, 64); 2873 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, 2874 rtransid, 64); 2875 2876 static inline bool btrfs_root_readonly(struct btrfs_root *root) 2877 { 2878 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; 2879 } 2880 2881 static inline bool btrfs_root_dead(struct btrfs_root *root) 2882 { 2883 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; 2884 } 2885 2886 /* struct btrfs_root_backup */ 2887 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, 2888 tree_root, 64); 2889 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, 2890 tree_root_gen, 64); 2891 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, 2892 tree_root_level, 8); 2893 2894 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, 2895 chunk_root, 64); 2896 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, 2897 chunk_root_gen, 64); 2898 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, 2899 chunk_root_level, 8); 2900 2901 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, 2902 extent_root, 64); 2903 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, 2904 extent_root_gen, 64); 2905 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, 2906 extent_root_level, 8); 2907 2908 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, 2909 fs_root, 64); 2910 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, 2911 fs_root_gen, 64); 2912 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, 2913 fs_root_level, 8); 2914 2915 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, 2916 dev_root, 64); 2917 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, 2918 dev_root_gen, 64); 2919 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, 2920 dev_root_level, 8); 2921 2922 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, 2923 csum_root, 64); 2924 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, 2925 csum_root_gen, 64); 2926 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, 2927 csum_root_level, 8); 2928 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, 2929 total_bytes, 64); 2930 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, 2931 bytes_used, 64); 2932 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, 2933 num_devices, 64); 2934 2935 /* struct btrfs_balance_item */ 2936 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); 2937 2938 static inline void btrfs_balance_data(struct extent_buffer *eb, 2939 struct btrfs_balance_item *bi, 2940 struct btrfs_disk_balance_args *ba) 2941 { 2942 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2943 } 2944 2945 static inline void btrfs_set_balance_data(struct extent_buffer *eb, 2946 struct btrfs_balance_item *bi, 2947 struct btrfs_disk_balance_args *ba) 2948 { 2949 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2950 } 2951 2952 static inline void btrfs_balance_meta(struct extent_buffer *eb, 2953 struct btrfs_balance_item *bi, 2954 struct btrfs_disk_balance_args *ba) 2955 { 2956 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2957 } 2958 2959 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, 2960 struct btrfs_balance_item *bi, 2961 struct btrfs_disk_balance_args *ba) 2962 { 2963 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2964 } 2965 2966 static inline void btrfs_balance_sys(struct extent_buffer *eb, 2967 struct btrfs_balance_item *bi, 2968 struct btrfs_disk_balance_args *ba) 2969 { 2970 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2971 } 2972 2973 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, 2974 struct btrfs_balance_item *bi, 2975 struct btrfs_disk_balance_args *ba) 2976 { 2977 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2978 } 2979 2980 static inline void 2981 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 2982 struct btrfs_disk_balance_args *disk) 2983 { 2984 memset(cpu, 0, sizeof(*cpu)); 2985 2986 cpu->profiles = le64_to_cpu(disk->profiles); 2987 cpu->usage = le64_to_cpu(disk->usage); 2988 cpu->devid = le64_to_cpu(disk->devid); 2989 cpu->pstart = le64_to_cpu(disk->pstart); 2990 cpu->pend = le64_to_cpu(disk->pend); 2991 cpu->vstart = le64_to_cpu(disk->vstart); 2992 cpu->vend = le64_to_cpu(disk->vend); 2993 cpu->target = le64_to_cpu(disk->target); 2994 cpu->flags = le64_to_cpu(disk->flags); 2995 cpu->limit = le64_to_cpu(disk->limit); 2996 } 2997 2998 static inline void 2999 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3000 struct btrfs_balance_args *cpu) 3001 { 3002 memset(disk, 0, sizeof(*disk)); 3003 3004 disk->profiles = cpu_to_le64(cpu->profiles); 3005 disk->usage = cpu_to_le64(cpu->usage); 3006 disk->devid = cpu_to_le64(cpu->devid); 3007 disk->pstart = cpu_to_le64(cpu->pstart); 3008 disk->pend = cpu_to_le64(cpu->pend); 3009 disk->vstart = cpu_to_le64(cpu->vstart); 3010 disk->vend = cpu_to_le64(cpu->vend); 3011 disk->target = cpu_to_le64(cpu->target); 3012 disk->flags = cpu_to_le64(cpu->flags); 3013 disk->limit = cpu_to_le64(cpu->limit); 3014 } 3015 3016 /* struct btrfs_super_block */ 3017 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); 3018 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); 3019 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, 3020 generation, 64); 3021 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); 3022 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, 3023 struct btrfs_super_block, sys_chunk_array_size, 32); 3024 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, 3025 struct btrfs_super_block, chunk_root_generation, 64); 3026 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, 3027 root_level, 8); 3028 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, 3029 chunk_root, 64); 3030 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, 3031 chunk_root_level, 8); 3032 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, 3033 log_root, 64); 3034 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, 3035 log_root_transid, 64); 3036 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, 3037 log_root_level, 8); 3038 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, 3039 total_bytes, 64); 3040 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, 3041 bytes_used, 64); 3042 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, 3043 sectorsize, 32); 3044 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, 3045 nodesize, 32); 3046 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, 3047 stripesize, 32); 3048 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, 3049 root_dir_objectid, 64); 3050 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, 3051 num_devices, 64); 3052 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, 3053 compat_flags, 64); 3054 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, 3055 compat_ro_flags, 64); 3056 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, 3057 incompat_flags, 64); 3058 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, 3059 csum_type, 16); 3060 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, 3061 cache_generation, 64); 3062 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); 3063 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, 3064 uuid_tree_generation, 64); 3065 3066 static inline int btrfs_super_csum_size(struct btrfs_super_block *s) 3067 { 3068 u16 t = btrfs_super_csum_type(s); 3069 /* 3070 * csum type is validated at mount time 3071 */ 3072 return btrfs_csum_sizes[t]; 3073 } 3074 3075 static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) 3076 { 3077 return offsetof(struct btrfs_leaf, items); 3078 } 3079 3080 /* struct btrfs_file_extent_item */ 3081 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 3082 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, 3083 struct btrfs_file_extent_item, disk_bytenr, 64); 3084 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, 3085 struct btrfs_file_extent_item, offset, 64); 3086 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, 3087 struct btrfs_file_extent_item, generation, 64); 3088 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, 3089 struct btrfs_file_extent_item, num_bytes, 64); 3090 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, 3091 struct btrfs_file_extent_item, disk_num_bytes, 64); 3092 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, 3093 struct btrfs_file_extent_item, compression, 8); 3094 3095 static inline unsigned long 3096 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) 3097 { 3098 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; 3099 } 3100 3101 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) 3102 { 3103 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; 3104 } 3105 3106 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, 3107 disk_bytenr, 64); 3108 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, 3109 generation, 64); 3110 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, 3111 disk_num_bytes, 64); 3112 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, 3113 offset, 64); 3114 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, 3115 num_bytes, 64); 3116 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, 3117 ram_bytes, 64); 3118 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, 3119 compression, 8); 3120 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, 3121 encryption, 8); 3122 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, 3123 other_encoding, 16); 3124 3125 /* 3126 * this returns the number of bytes used by the item on disk, minus the 3127 * size of any extent headers. If a file is compressed on disk, this is 3128 * the compressed size 3129 */ 3130 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, 3131 struct btrfs_item *e) 3132 { 3133 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; 3134 } 3135 3136 /* this returns the number of file bytes represented by the inline item. 3137 * If an item is compressed, this is the uncompressed size 3138 */ 3139 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, 3140 int slot, 3141 struct btrfs_file_extent_item *fi) 3142 { 3143 struct btrfs_map_token token; 3144 3145 btrfs_init_map_token(&token); 3146 /* 3147 * return the space used on disk if this item isn't 3148 * compressed or encoded 3149 */ 3150 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && 3151 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && 3152 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { 3153 return btrfs_file_extent_inline_item_len(eb, 3154 btrfs_item_nr(slot)); 3155 } 3156 3157 /* otherwise use the ram bytes field */ 3158 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); 3159 } 3160 3161 3162 /* btrfs_dev_stats_item */ 3163 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, 3164 struct btrfs_dev_stats_item *ptr, 3165 int index) 3166 { 3167 u64 val; 3168 3169 read_extent_buffer(eb, &val, 3170 offsetof(struct btrfs_dev_stats_item, values) + 3171 ((unsigned long)ptr) + (index * sizeof(u64)), 3172 sizeof(val)); 3173 return val; 3174 } 3175 3176 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, 3177 struct btrfs_dev_stats_item *ptr, 3178 int index, u64 val) 3179 { 3180 write_extent_buffer(eb, &val, 3181 offsetof(struct btrfs_dev_stats_item, values) + 3182 ((unsigned long)ptr) + (index * sizeof(u64)), 3183 sizeof(val)); 3184 } 3185 3186 /* btrfs_qgroup_status_item */ 3187 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, 3188 generation, 64); 3189 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, 3190 version, 64); 3191 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, 3192 flags, 64); 3193 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, 3194 rescan, 64); 3195 3196 /* btrfs_qgroup_info_item */ 3197 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, 3198 generation, 64); 3199 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); 3200 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, 3201 rfer_cmpr, 64); 3202 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); 3203 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, 3204 excl_cmpr, 64); 3205 3206 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, 3207 struct btrfs_qgroup_info_item, generation, 64); 3208 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, 3209 rfer, 64); 3210 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, 3211 struct btrfs_qgroup_info_item, rfer_cmpr, 64); 3212 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, 3213 excl, 64); 3214 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, 3215 struct btrfs_qgroup_info_item, excl_cmpr, 64); 3216 3217 /* btrfs_qgroup_limit_item */ 3218 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, 3219 flags, 64); 3220 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, 3221 max_rfer, 64); 3222 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, 3223 max_excl, 64); 3224 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, 3225 rsv_rfer, 64); 3226 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, 3227 rsv_excl, 64); 3228 3229 /* btrfs_dev_replace_item */ 3230 BTRFS_SETGET_FUNCS(dev_replace_src_devid, 3231 struct btrfs_dev_replace_item, src_devid, 64); 3232 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, 3233 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, 3234 64); 3235 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, 3236 replace_state, 64); 3237 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, 3238 time_started, 64); 3239 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, 3240 time_stopped, 64); 3241 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, 3242 num_write_errors, 64); 3243 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, 3244 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, 3245 64); 3246 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, 3247 cursor_left, 64); 3248 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, 3249 cursor_right, 64); 3250 3251 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, 3252 struct btrfs_dev_replace_item, src_devid, 64); 3253 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, 3254 struct btrfs_dev_replace_item, 3255 cont_reading_from_srcdev_mode, 64); 3256 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, 3257 struct btrfs_dev_replace_item, replace_state, 64); 3258 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, 3259 struct btrfs_dev_replace_item, time_started, 64); 3260 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, 3261 struct btrfs_dev_replace_item, time_stopped, 64); 3262 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, 3263 struct btrfs_dev_replace_item, num_write_errors, 64); 3264 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, 3265 struct btrfs_dev_replace_item, 3266 num_uncorrectable_read_errors, 64); 3267 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, 3268 struct btrfs_dev_replace_item, cursor_left, 64); 3269 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, 3270 struct btrfs_dev_replace_item, cursor_right, 64); 3271 3272 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) 3273 { 3274 return sb->s_fs_info; 3275 } 3276 3277 /* helper function to cast into the data area of the leaf. */ 3278 #define btrfs_item_ptr(leaf, slot, type) \ 3279 ((type *)(btrfs_leaf_data(leaf) + \ 3280 btrfs_item_offset_nr(leaf, slot))) 3281 3282 #define btrfs_item_ptr_offset(leaf, slot) \ 3283 ((unsigned long)(btrfs_leaf_data(leaf) + \ 3284 btrfs_item_offset_nr(leaf, slot))) 3285 3286 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 3287 { 3288 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 3289 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 3290 } 3291 3292 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) 3293 { 3294 return mapping_gfp_mask(mapping) & ~__GFP_FS; 3295 } 3296 3297 /* extent-tree.c */ 3298 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, 3299 unsigned num_items) 3300 { 3301 return (root->nodesize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3302 2 * num_items; 3303 } 3304 3305 /* 3306 * Doing a truncate won't result in new nodes or leaves, just what we need for 3307 * COW. 3308 */ 3309 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root, 3310 unsigned num_items) 3311 { 3312 return root->nodesize * BTRFS_MAX_LEVEL * num_items; 3313 } 3314 3315 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 3316 struct btrfs_root *root); 3317 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 3318 struct btrfs_root *root); 3319 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3320 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 3321 struct btrfs_root *root, unsigned long count); 3322 int btrfs_async_run_delayed_refs(struct btrfs_root *root, 3323 unsigned long count, int wait); 3324 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 3325 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 3326 struct btrfs_root *root, u64 bytenr, 3327 u64 offset, int metadata, u64 *refs, u64 *flags); 3328 int btrfs_pin_extent(struct btrfs_root *root, 3329 u64 bytenr, u64 num, int reserved); 3330 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 3331 u64 bytenr, u64 num_bytes); 3332 int btrfs_exclude_logged_extents(struct btrfs_root *root, 3333 struct extent_buffer *eb); 3334 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 3335 struct btrfs_root *root, 3336 u64 objectid, u64 offset, u64 bytenr); 3337 struct btrfs_block_group_cache *btrfs_lookup_block_group( 3338 struct btrfs_fs_info *info, 3339 u64 bytenr); 3340 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3341 int get_block_group_index(struct btrfs_block_group_cache *cache); 3342 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 3343 struct btrfs_root *root, u64 parent, 3344 u64 root_objectid, 3345 struct btrfs_disk_key *key, int level, 3346 u64 hint, u64 empty_size); 3347 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3348 struct btrfs_root *root, 3349 struct extent_buffer *buf, 3350 u64 parent, int last_ref); 3351 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 3352 struct btrfs_root *root, 3353 u64 root_objectid, u64 owner, 3354 u64 offset, struct btrfs_key *ins); 3355 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 3356 struct btrfs_root *root, 3357 u64 root_objectid, u64 owner, u64 offset, 3358 struct btrfs_key *ins); 3359 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 3360 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 3361 struct btrfs_key *ins, int is_data, int delalloc); 3362 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3363 struct extent_buffer *buf, int full_backref); 3364 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3365 struct extent_buffer *buf, int full_backref); 3366 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 3367 struct btrfs_root *root, 3368 u64 bytenr, u64 num_bytes, u64 flags, 3369 int level, int is_data); 3370 int btrfs_free_extent(struct btrfs_trans_handle *trans, 3371 struct btrfs_root *root, 3372 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 3373 u64 owner, u64 offset, int no_quota); 3374 3375 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, 3376 int delalloc); 3377 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 3378 u64 start, u64 len); 3379 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 3380 struct btrfs_root *root); 3381 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 3382 struct btrfs_root *root); 3383 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 3384 struct btrfs_root *root, 3385 u64 bytenr, u64 num_bytes, u64 parent, 3386 u64 root_objectid, u64 owner, u64 offset, int no_quota); 3387 3388 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3389 struct btrfs_root *root); 3390 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, 3391 struct btrfs_root *root); 3392 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 3393 int btrfs_free_block_groups(struct btrfs_fs_info *info); 3394 int btrfs_read_block_groups(struct btrfs_root *root); 3395 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr); 3396 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 3397 struct btrfs_root *root, u64 bytes_used, 3398 u64 type, u64 chunk_objectid, u64 chunk_offset, 3399 u64 size); 3400 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 3401 struct btrfs_root *root, u64 group_start, 3402 struct extent_map *em); 3403 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); 3404 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 3405 struct btrfs_root *root); 3406 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); 3407 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 3408 3409 enum btrfs_reserve_flush_enum { 3410 /* If we are in the transaction, we can't flush anything.*/ 3411 BTRFS_RESERVE_NO_FLUSH, 3412 /* 3413 * Flushing delalloc may cause deadlock somewhere, in this 3414 * case, use FLUSH LIMIT 3415 */ 3416 BTRFS_RESERVE_FLUSH_LIMIT, 3417 BTRFS_RESERVE_FLUSH_ALL, 3418 }; 3419 3420 int btrfs_check_data_free_space(struct inode *inode, u64 bytes); 3421 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes); 3422 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 3423 struct btrfs_root *root); 3424 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 3425 struct inode *inode); 3426 void btrfs_orphan_release_metadata(struct inode *inode); 3427 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 3428 struct btrfs_block_rsv *rsv, 3429 int nitems, 3430 u64 *qgroup_reserved, bool use_global_rsv); 3431 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 3432 struct btrfs_block_rsv *rsv, 3433 u64 qgroup_reserved); 3434 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); 3435 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); 3436 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); 3437 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); 3438 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); 3439 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 3440 unsigned short type); 3441 void btrfs_free_block_rsv(struct btrfs_root *root, 3442 struct btrfs_block_rsv *rsv); 3443 int btrfs_block_rsv_add(struct btrfs_root *root, 3444 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 3445 enum btrfs_reserve_flush_enum flush); 3446 int btrfs_block_rsv_check(struct btrfs_root *root, 3447 struct btrfs_block_rsv *block_rsv, int min_factor); 3448 int btrfs_block_rsv_refill(struct btrfs_root *root, 3449 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 3450 enum btrfs_reserve_flush_enum flush); 3451 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3452 struct btrfs_block_rsv *dst_rsv, 3453 u64 num_bytes); 3454 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 3455 struct btrfs_block_rsv *dest, u64 num_bytes, 3456 int min_factor); 3457 void btrfs_block_rsv_release(struct btrfs_root *root, 3458 struct btrfs_block_rsv *block_rsv, 3459 u64 num_bytes); 3460 int btrfs_set_block_group_ro(struct btrfs_root *root, 3461 struct btrfs_block_group_cache *cache); 3462 void btrfs_set_block_group_rw(struct btrfs_root *root, 3463 struct btrfs_block_group_cache *cache); 3464 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 3465 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 3466 int btrfs_error_unpin_extent_range(struct btrfs_root *root, 3467 u64 start, u64 end); 3468 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 3469 u64 num_bytes, u64 *actual_bytes); 3470 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 3471 struct btrfs_root *root, u64 type); 3472 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); 3473 3474 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 3475 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 3476 struct btrfs_fs_info *fs_info); 3477 int __get_raid_index(u64 flags); 3478 int btrfs_start_write_no_snapshoting(struct btrfs_root *root); 3479 void btrfs_end_write_no_snapshoting(struct btrfs_root *root); 3480 /* ctree.c */ 3481 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 3482 int level, int *slot); 3483 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2); 3484 int btrfs_previous_item(struct btrfs_root *root, 3485 struct btrfs_path *path, u64 min_objectid, 3486 int type); 3487 int btrfs_previous_extent_item(struct btrfs_root *root, 3488 struct btrfs_path *path, u64 min_objectid); 3489 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path, 3490 struct btrfs_key *new_key); 3491 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 3492 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 3493 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 3494 struct btrfs_key *key, int lowest_level, 3495 u64 min_trans); 3496 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 3497 struct btrfs_path *path, 3498 u64 min_trans); 3499 enum btrfs_compare_tree_result { 3500 BTRFS_COMPARE_TREE_NEW, 3501 BTRFS_COMPARE_TREE_DELETED, 3502 BTRFS_COMPARE_TREE_CHANGED, 3503 BTRFS_COMPARE_TREE_SAME, 3504 }; 3505 typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root, 3506 struct btrfs_root *right_root, 3507 struct btrfs_path *left_path, 3508 struct btrfs_path *right_path, 3509 struct btrfs_key *key, 3510 enum btrfs_compare_tree_result result, 3511 void *ctx); 3512 int btrfs_compare_trees(struct btrfs_root *left_root, 3513 struct btrfs_root *right_root, 3514 btrfs_changed_cb_t cb, void *ctx); 3515 int btrfs_cow_block(struct btrfs_trans_handle *trans, 3516 struct btrfs_root *root, struct extent_buffer *buf, 3517 struct extent_buffer *parent, int parent_slot, 3518 struct extent_buffer **cow_ret); 3519 int btrfs_copy_root(struct btrfs_trans_handle *trans, 3520 struct btrfs_root *root, 3521 struct extent_buffer *buf, 3522 struct extent_buffer **cow_ret, u64 new_root_objectid); 3523 int btrfs_block_can_be_shared(struct btrfs_root *root, 3524 struct extent_buffer *buf); 3525 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, 3526 u32 data_size); 3527 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, 3528 u32 new_size, int from_end); 3529 int btrfs_split_item(struct btrfs_trans_handle *trans, 3530 struct btrfs_root *root, 3531 struct btrfs_path *path, 3532 struct btrfs_key *new_key, 3533 unsigned long split_offset); 3534 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 3535 struct btrfs_root *root, 3536 struct btrfs_path *path, 3537 struct btrfs_key *new_key); 3538 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 3539 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 3540 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 3541 *root, struct btrfs_key *key, struct btrfs_path *p, int 3542 ins_len, int cow); 3543 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 3544 struct btrfs_path *p, u64 time_seq); 3545 int btrfs_search_slot_for_read(struct btrfs_root *root, 3546 struct btrfs_key *key, struct btrfs_path *p, 3547 int find_higher, int return_any); 3548 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 3549 struct btrfs_root *root, struct extent_buffer *parent, 3550 int start_slot, u64 *last_ret, 3551 struct btrfs_key *progress); 3552 void btrfs_release_path(struct btrfs_path *p); 3553 struct btrfs_path *btrfs_alloc_path(void); 3554 void btrfs_free_path(struct btrfs_path *p); 3555 void btrfs_set_path_blocking(struct btrfs_path *p); 3556 void btrfs_clear_path_blocking(struct btrfs_path *p, 3557 struct extent_buffer *held, int held_rw); 3558 void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 3559 3560 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3561 struct btrfs_path *path, int slot, int nr); 3562 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 3563 struct btrfs_root *root, 3564 struct btrfs_path *path) 3565 { 3566 return btrfs_del_items(trans, root, path, path->slots[0], 1); 3567 } 3568 3569 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 3570 struct btrfs_key *cpu_key, u32 *data_size, 3571 u32 total_data, u32 total_size, int nr); 3572 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 3573 *root, struct btrfs_key *key, void *data, u32 data_size); 3574 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 3575 struct btrfs_root *root, 3576 struct btrfs_path *path, 3577 struct btrfs_key *cpu_key, u32 *data_size, int nr); 3578 3579 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 3580 struct btrfs_root *root, 3581 struct btrfs_path *path, 3582 struct btrfs_key *key, 3583 u32 data_size) 3584 { 3585 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 3586 } 3587 3588 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 3589 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 3590 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 3591 u64 time_seq); 3592 static inline int btrfs_next_old_item(struct btrfs_root *root, 3593 struct btrfs_path *p, u64 time_seq) 3594 { 3595 ++p->slots[0]; 3596 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 3597 return btrfs_next_old_leaf(root, p, time_seq); 3598 return 0; 3599 } 3600 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 3601 { 3602 return btrfs_next_old_item(root, p, 0); 3603 } 3604 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 3605 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 3606 struct btrfs_block_rsv *block_rsv, 3607 int update_ref, int for_reloc); 3608 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 3609 struct btrfs_root *root, 3610 struct extent_buffer *node, 3611 struct extent_buffer *parent); 3612 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) 3613 { 3614 /* 3615 * Get synced with close_ctree() 3616 */ 3617 smp_mb(); 3618 return fs_info->closing; 3619 } 3620 3621 /* 3622 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 3623 * anything except sleeping. This function is used to check the status of 3624 * the fs. 3625 */ 3626 static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root) 3627 { 3628 return (root->fs_info->sb->s_flags & MS_RDONLY || 3629 btrfs_fs_closing(root->fs_info)); 3630 } 3631 3632 static inline void free_fs_info(struct btrfs_fs_info *fs_info) 3633 { 3634 kfree(fs_info->balance_ctl); 3635 kfree(fs_info->delayed_root); 3636 kfree(fs_info->extent_root); 3637 kfree(fs_info->tree_root); 3638 kfree(fs_info->chunk_root); 3639 kfree(fs_info->dev_root); 3640 kfree(fs_info->csum_root); 3641 kfree(fs_info->quota_root); 3642 kfree(fs_info->uuid_root); 3643 kfree(fs_info->super_copy); 3644 kfree(fs_info->super_for_commit); 3645 security_free_mnt_opts(&fs_info->security_opts); 3646 kfree(fs_info); 3647 } 3648 3649 /* tree mod log functions from ctree.c */ 3650 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 3651 struct seq_list *elem); 3652 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 3653 struct seq_list *elem); 3654 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); 3655 3656 /* root-item.c */ 3657 int btrfs_find_root_ref(struct btrfs_root *tree_root, 3658 struct btrfs_path *path, 3659 u64 root_id, u64 ref_id); 3660 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 3661 struct btrfs_root *tree_root, 3662 u64 root_id, u64 ref_id, u64 dirid, u64 sequence, 3663 const char *name, int name_len); 3664 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, 3665 struct btrfs_root *tree_root, 3666 u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, 3667 const char *name, int name_len); 3668 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3669 struct btrfs_key *key); 3670 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 3671 *root, struct btrfs_key *key, struct btrfs_root_item 3672 *item); 3673 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, 3674 struct btrfs_root *root, 3675 struct btrfs_key *key, 3676 struct btrfs_root_item *item); 3677 int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, 3678 struct btrfs_path *path, struct btrfs_root_item *root_item, 3679 struct btrfs_key *root_key); 3680 int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 3681 void btrfs_set_root_node(struct btrfs_root_item *item, 3682 struct extent_buffer *node); 3683 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); 3684 void btrfs_update_root_times(struct btrfs_trans_handle *trans, 3685 struct btrfs_root *root); 3686 3687 /* uuid-tree.c */ 3688 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, 3689 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3690 u64 subid); 3691 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans, 3692 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3693 u64 subid); 3694 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, 3695 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, 3696 u64)); 3697 3698 /* dir-item.c */ 3699 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, 3700 const char *name, int name_len); 3701 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 3702 struct btrfs_root *root, const char *name, 3703 int name_len, struct inode *dir, 3704 struct btrfs_key *location, u8 type, u64 index); 3705 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 3706 struct btrfs_root *root, 3707 struct btrfs_path *path, u64 dir, 3708 const char *name, int name_len, 3709 int mod); 3710 struct btrfs_dir_item * 3711 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, 3712 struct btrfs_root *root, 3713 struct btrfs_path *path, u64 dir, 3714 u64 objectid, const char *name, int name_len, 3715 int mod); 3716 struct btrfs_dir_item * 3717 btrfs_search_dir_index_item(struct btrfs_root *root, 3718 struct btrfs_path *path, u64 dirid, 3719 const char *name, int name_len); 3720 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, 3721 struct btrfs_root *root, 3722 struct btrfs_path *path, 3723 struct btrfs_dir_item *di); 3724 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, 3725 struct btrfs_root *root, 3726 struct btrfs_path *path, u64 objectid, 3727 const char *name, u16 name_len, 3728 const void *data, u16 data_len); 3729 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, 3730 struct btrfs_root *root, 3731 struct btrfs_path *path, u64 dir, 3732 const char *name, u16 name_len, 3733 int mod); 3734 int verify_dir_item(struct btrfs_root *root, 3735 struct extent_buffer *leaf, 3736 struct btrfs_dir_item *dir_item); 3737 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 3738 struct btrfs_path *path, 3739 const char *name, 3740 int name_len); 3741 3742 /* orphan.c */ 3743 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 3744 struct btrfs_root *root, u64 offset); 3745 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, 3746 struct btrfs_root *root, u64 offset); 3747 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 3748 3749 /* inode-item.c */ 3750 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 3751 struct btrfs_root *root, 3752 const char *name, int name_len, 3753 u64 inode_objectid, u64 ref_objectid, u64 index); 3754 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 3755 struct btrfs_root *root, 3756 const char *name, int name_len, 3757 u64 inode_objectid, u64 ref_objectid, u64 *index); 3758 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 3759 struct btrfs_root *root, 3760 struct btrfs_path *path, u64 objectid); 3761 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 3762 *root, struct btrfs_path *path, 3763 struct btrfs_key *location, int mod); 3764 3765 struct btrfs_inode_extref * 3766 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, 3767 struct btrfs_root *root, 3768 struct btrfs_path *path, 3769 const char *name, int name_len, 3770 u64 inode_objectid, u64 ref_objectid, int ins_len, 3771 int cow); 3772 3773 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, 3774 u64 ref_objectid, const char *name, 3775 int name_len, 3776 struct btrfs_inode_extref **extref_ret); 3777 3778 /* file-item.c */ 3779 struct btrfs_dio_private; 3780 int btrfs_del_csums(struct btrfs_trans_handle *trans, 3781 struct btrfs_root *root, u64 bytenr, u64 len); 3782 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3783 struct bio *bio, u32 *dst); 3784 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3785 struct bio *bio, u64 logical_offset); 3786 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3787 struct btrfs_root *root, 3788 u64 objectid, u64 pos, 3789 u64 disk_offset, u64 disk_num_bytes, 3790 u64 num_bytes, u64 offset, u64 ram_bytes, 3791 u8 compression, u8 encryption, u16 other_encoding); 3792 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 3793 struct btrfs_root *root, 3794 struct btrfs_path *path, u64 objectid, 3795 u64 bytenr, int mod); 3796 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3797 struct btrfs_root *root, 3798 struct btrfs_ordered_sum *sums); 3799 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3800 struct bio *bio, u64 file_start, int contig); 3801 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3802 struct list_head *list, int search_commit); 3803 void btrfs_extent_item_to_extent_map(struct inode *inode, 3804 const struct btrfs_path *path, 3805 struct btrfs_file_extent_item *fi, 3806 const bool new_inline, 3807 struct extent_map *em); 3808 3809 /* inode.c */ 3810 struct btrfs_delalloc_work { 3811 struct inode *inode; 3812 int wait; 3813 int delay_iput; 3814 struct completion completion; 3815 struct list_head list; 3816 struct btrfs_work work; 3817 }; 3818 3819 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 3820 int wait, int delay_iput); 3821 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); 3822 3823 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 3824 size_t pg_offset, u64 start, u64 len, 3825 int create); 3826 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 3827 u64 *orig_start, u64 *orig_block_len, 3828 u64 *ram_bytes); 3829 3830 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 3831 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) 3832 #define ClearPageChecked ClearPageFsMisc 3833 #define SetPageChecked SetPageFsMisc 3834 #define PageChecked PageFsMisc 3835 #endif 3836 3837 /* This forces readahead on a given range of bytes in an inode */ 3838 static inline void btrfs_force_ra(struct address_space *mapping, 3839 struct file_ra_state *ra, struct file *file, 3840 pgoff_t offset, unsigned long req_size) 3841 { 3842 page_cache_sync_readahead(mapping, ra, file, offset, req_size); 3843 } 3844 3845 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 3846 int btrfs_set_inode_index(struct inode *dir, u64 *index); 3847 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3848 struct btrfs_root *root, 3849 struct inode *dir, struct inode *inode, 3850 const char *name, int name_len); 3851 int btrfs_add_link(struct btrfs_trans_handle *trans, 3852 struct inode *parent_inode, struct inode *inode, 3853 const char *name, int name_len, int add_backref, u64 index); 3854 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3855 struct btrfs_root *root, 3856 struct inode *dir, u64 objectid, 3857 const char *name, int name_len); 3858 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 3859 int front); 3860 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3861 struct btrfs_root *root, 3862 struct inode *inode, u64 new_size, 3863 u32 min_type); 3864 3865 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 3866 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 3867 int nr); 3868 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3869 struct extent_state **cached_state); 3870 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3871 struct btrfs_root *new_root, 3872 struct btrfs_root *parent_root, 3873 u64 new_dirid); 3874 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, 3875 size_t size, struct bio *bio, 3876 unsigned long bio_flags); 3877 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 3878 int btrfs_readpage(struct file *file, struct page *page); 3879 void btrfs_evict_inode(struct inode *inode); 3880 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 3881 struct inode *btrfs_alloc_inode(struct super_block *sb); 3882 void btrfs_destroy_inode(struct inode *inode); 3883 int btrfs_drop_inode(struct inode *inode); 3884 int btrfs_init_cachep(void); 3885 void btrfs_destroy_cachep(void); 3886 long btrfs_ioctl_trans_end(struct file *file); 3887 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3888 struct btrfs_root *root, int *was_new); 3889 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 3890 size_t pg_offset, u64 start, u64 end, 3891 int create); 3892 int btrfs_update_inode(struct btrfs_trans_handle *trans, 3893 struct btrfs_root *root, 3894 struct inode *inode); 3895 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3896 struct btrfs_root *root, struct inode *inode); 3897 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3898 int btrfs_orphan_cleanup(struct btrfs_root *root); 3899 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3900 struct btrfs_root *root); 3901 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 3902 void btrfs_invalidate_inodes(struct btrfs_root *root); 3903 void btrfs_add_delayed_iput(struct inode *inode); 3904 void btrfs_run_delayed_iputs(struct btrfs_root *root); 3905 int btrfs_prealloc_file_range(struct inode *inode, int mode, 3906 u64 start, u64 num_bytes, u64 min_size, 3907 loff_t actual_len, u64 *alloc_hint); 3908 int btrfs_prealloc_file_range_trans(struct inode *inode, 3909 struct btrfs_trans_handle *trans, int mode, 3910 u64 start, u64 num_bytes, u64 min_size, 3911 loff_t actual_len, u64 *alloc_hint); 3912 int btrfs_inode_check_errors(struct inode *inode); 3913 extern const struct dentry_operations btrfs_dentry_operations; 3914 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3915 void btrfs_test_inode_set_ops(struct inode *inode); 3916 #endif 3917 3918 /* ioctl.c */ 3919 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3920 void btrfs_update_iflags(struct inode *inode); 3921 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 3922 int btrfs_is_empty_uuid(u8 *uuid); 3923 int btrfs_defrag_file(struct inode *inode, struct file *file, 3924 struct btrfs_ioctl_defrag_range_args *range, 3925 u64 newer_than, unsigned long max_pages); 3926 void btrfs_get_block_group_info(struct list_head *groups_list, 3927 struct btrfs_ioctl_space_info *space); 3928 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 3929 struct btrfs_ioctl_balance_args *bargs); 3930 3931 3932 /* file.c */ 3933 int btrfs_auto_defrag_init(void); 3934 void btrfs_auto_defrag_exit(void); 3935 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 3936 struct inode *inode); 3937 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); 3938 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); 3939 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3940 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 3941 int skip_pinned); 3942 extern const struct file_operations btrfs_file_operations; 3943 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 3944 struct btrfs_root *root, struct inode *inode, 3945 struct btrfs_path *path, u64 start, u64 end, 3946 u64 *drop_end, int drop_cache, 3947 int replace_extent, 3948 u32 extent_item_size, 3949 int *key_inserted); 3950 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 3951 struct btrfs_root *root, struct inode *inode, u64 start, 3952 u64 end, int drop_cache); 3953 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 3954 struct inode *inode, u64 start, u64 end); 3955 int btrfs_release_file(struct inode *inode, struct file *file); 3956 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 3957 struct page **pages, size_t num_pages, 3958 loff_t pos, size_t write_bytes, 3959 struct extent_state **cached); 3960 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); 3961 3962 /* tree-defrag.c */ 3963 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 3964 struct btrfs_root *root); 3965 3966 /* sysfs.c */ 3967 int btrfs_init_sysfs(void); 3968 void btrfs_exit_sysfs(void); 3969 int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info); 3970 void btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info); 3971 3972 /* xattr.c */ 3973 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 3974 3975 /* super.c */ 3976 int btrfs_parse_options(struct btrfs_root *root, char *options); 3977 int btrfs_sync_fs(struct super_block *sb, int wait); 3978 3979 #ifdef CONFIG_PRINTK 3980 __printf(2, 3) 3981 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); 3982 #else 3983 static inline __printf(2, 3) 3984 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 3985 { 3986 } 3987 #endif 3988 3989 #define btrfs_emerg(fs_info, fmt, args...) \ 3990 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) 3991 #define btrfs_alert(fs_info, fmt, args...) \ 3992 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) 3993 #define btrfs_crit(fs_info, fmt, args...) \ 3994 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) 3995 #define btrfs_err(fs_info, fmt, args...) \ 3996 btrfs_printk(fs_info, KERN_ERR fmt, ##args) 3997 #define btrfs_warn(fs_info, fmt, args...) \ 3998 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) 3999 #define btrfs_notice(fs_info, fmt, args...) \ 4000 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) 4001 #define btrfs_info(fs_info, fmt, args...) \ 4002 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 4003 4004 #ifdef DEBUG 4005 #define btrfs_debug(fs_info, fmt, args...) \ 4006 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) 4007 #else 4008 #define btrfs_debug(fs_info, fmt, args...) \ 4009 no_printk(KERN_DEBUG fmt, ##args) 4010 #endif 4011 4012 #ifdef CONFIG_BTRFS_ASSERT 4013 4014 static inline void assfail(char *expr, char *file, int line) 4015 { 4016 pr_err("BTRFS: assertion failed: %s, file: %s, line: %d", 4017 expr, file, line); 4018 BUG(); 4019 } 4020 4021 #define ASSERT(expr) \ 4022 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 4023 #else 4024 #define ASSERT(expr) ((void)0) 4025 #endif 4026 4027 #define btrfs_assert() 4028 __printf(5, 6) 4029 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 4030 unsigned int line, int errno, const char *fmt, ...); 4031 4032 4033 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 4034 struct btrfs_root *root, const char *function, 4035 unsigned int line, int errno); 4036 4037 #define btrfs_set_fs_incompat(__fs_info, opt) \ 4038 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4039 4040 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, 4041 u64 flag) 4042 { 4043 struct btrfs_super_block *disk_super; 4044 u64 features; 4045 4046 disk_super = fs_info->super_copy; 4047 features = btrfs_super_incompat_flags(disk_super); 4048 if (!(features & flag)) { 4049 spin_lock(&fs_info->super_lock); 4050 features = btrfs_super_incompat_flags(disk_super); 4051 if (!(features & flag)) { 4052 features |= flag; 4053 btrfs_set_super_incompat_flags(disk_super, features); 4054 btrfs_info(fs_info, "setting %llu feature flag", 4055 flag); 4056 } 4057 spin_unlock(&fs_info->super_lock); 4058 } 4059 } 4060 4061 #define btrfs_fs_incompat(fs_info, opt) \ 4062 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4063 4064 static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) 4065 { 4066 struct btrfs_super_block *disk_super; 4067 disk_super = fs_info->super_copy; 4068 return !!(btrfs_super_incompat_flags(disk_super) & flag); 4069 } 4070 4071 /* 4072 * Call btrfs_abort_transaction as early as possible when an error condition is 4073 * detected, that way the exact line number is reported. 4074 */ 4075 4076 #define btrfs_abort_transaction(trans, root, errno) \ 4077 do { \ 4078 __btrfs_abort_transaction(trans, root, __func__, \ 4079 __LINE__, errno); \ 4080 } while (0) 4081 4082 #define btrfs_std_error(fs_info, errno) \ 4083 do { \ 4084 if ((errno)) \ 4085 __btrfs_std_error((fs_info), __func__, \ 4086 __LINE__, (errno), NULL); \ 4087 } while (0) 4088 4089 #define btrfs_error(fs_info, errno, fmt, args...) \ 4090 do { \ 4091 __btrfs_std_error((fs_info), __func__, __LINE__, \ 4092 (errno), fmt, ##args); \ 4093 } while (0) 4094 4095 __printf(5, 6) 4096 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 4097 unsigned int line, int errno, const char *fmt, ...); 4098 4099 /* 4100 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic 4101 * will panic(). Otherwise we BUG() here. 4102 */ 4103 #define btrfs_panic(fs_info, errno, fmt, args...) \ 4104 do { \ 4105 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ 4106 BUG(); \ 4107 } while (0) 4108 4109 /* acl.c */ 4110 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 4111 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); 4112 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); 4113 int btrfs_init_acl(struct btrfs_trans_handle *trans, 4114 struct inode *inode, struct inode *dir); 4115 #else 4116 #define btrfs_get_acl NULL 4117 #define btrfs_set_acl NULL 4118 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, 4119 struct inode *inode, struct inode *dir) 4120 { 4121 return 0; 4122 } 4123 #endif 4124 4125 /* relocation.c */ 4126 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); 4127 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 4128 struct btrfs_root *root); 4129 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 4130 struct btrfs_root *root); 4131 int btrfs_recover_relocation(struct btrfs_root *root); 4132 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 4133 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4134 struct btrfs_root *root, struct extent_buffer *buf, 4135 struct extent_buffer *cow); 4136 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 4137 struct btrfs_pending_snapshot *pending, 4138 u64 *bytes_to_reserve); 4139 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4140 struct btrfs_pending_snapshot *pending); 4141 4142 /* scrub.c */ 4143 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 4144 u64 end, struct btrfs_scrub_progress *progress, 4145 int readonly, int is_dev_replace); 4146 void btrfs_scrub_pause(struct btrfs_root *root); 4147 void btrfs_scrub_continue(struct btrfs_root *root); 4148 int btrfs_scrub_cancel(struct btrfs_fs_info *info); 4149 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, 4150 struct btrfs_device *dev); 4151 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 4152 struct btrfs_scrub_progress *progress); 4153 4154 /* dev-replace.c */ 4155 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); 4156 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); 4157 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); 4158 4159 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) 4160 { 4161 btrfs_bio_counter_sub(fs_info, 1); 4162 } 4163 4164 /* reada.c */ 4165 struct reada_control { 4166 struct btrfs_root *root; /* tree to prefetch */ 4167 struct btrfs_key key_start; 4168 struct btrfs_key key_end; /* exclusive */ 4169 atomic_t elems; 4170 struct kref refcnt; 4171 wait_queue_head_t wait; 4172 }; 4173 struct reada_control *btrfs_reada_add(struct btrfs_root *root, 4174 struct btrfs_key *start, struct btrfs_key *end); 4175 int btrfs_reada_wait(void *handle); 4176 void btrfs_reada_detach(void *handle); 4177 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, 4178 u64 start, int err); 4179 4180 static inline int is_fstree(u64 rootid) 4181 { 4182 if (rootid == BTRFS_FS_TREE_OBJECTID || 4183 (s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID) 4184 return 1; 4185 return 0; 4186 } 4187 4188 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 4189 { 4190 return signal_pending(current); 4191 } 4192 4193 /* Sanity test specific functions */ 4194 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4195 void btrfs_test_destroy_inode(struct inode *inode); 4196 #endif 4197 4198 static inline int btrfs_test_is_dummy_root(struct btrfs_root *root) 4199 { 4200 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4201 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state))) 4202 return 1; 4203 #endif 4204 return 0; 4205 } 4206 4207 #endif 4208