1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #ifndef __BTRFS_CTREE__ 20 #define __BTRFS_CTREE__ 21 22 #include <linux/mm.h> 23 #include <linux/highmem.h> 24 #include <linux/fs.h> 25 #include <linux/rwsem.h> 26 #include <linux/semaphore.h> 27 #include <linux/completion.h> 28 #include <linux/backing-dev.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/kobject.h> 32 #include <trace/events/btrfs.h> 33 #include <asm/kmap_types.h> 34 #include <linux/pagemap.h> 35 #include <linux/btrfs.h> 36 #include <linux/workqueue.h> 37 #include <linux/security.h> 38 #include "extent_io.h" 39 #include "extent_map.h" 40 #include "async-thread.h" 41 42 struct btrfs_trans_handle; 43 struct btrfs_transaction; 44 struct btrfs_pending_snapshot; 45 extern struct kmem_cache *btrfs_trans_handle_cachep; 46 extern struct kmem_cache *btrfs_transaction_cachep; 47 extern struct kmem_cache *btrfs_bit_radix_cachep; 48 extern struct kmem_cache *btrfs_path_cachep; 49 extern struct kmem_cache *btrfs_free_space_cachep; 50 struct btrfs_ordered_sum; 51 52 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 53 #define STATIC noinline 54 #else 55 #define STATIC static noinline 56 #endif 57 58 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ 59 60 #define BTRFS_MAX_MIRRORS 3 61 62 #define BTRFS_MAX_LEVEL 8 63 64 #define BTRFS_COMPAT_EXTENT_TREE_V0 65 66 /* holds pointers to all of the tree roots */ 67 #define BTRFS_ROOT_TREE_OBJECTID 1ULL 68 69 /* stores information about which extents are in use, and reference counts */ 70 #define BTRFS_EXTENT_TREE_OBJECTID 2ULL 71 72 /* 73 * chunk tree stores translations from logical -> physical block numbering 74 * the super block points to the chunk tree 75 */ 76 #define BTRFS_CHUNK_TREE_OBJECTID 3ULL 77 78 /* 79 * stores information about which areas of a given device are in use. 80 * one per device. The tree of tree roots points to the device tree 81 */ 82 #define BTRFS_DEV_TREE_OBJECTID 4ULL 83 84 /* one per subvolume, storing files and directories */ 85 #define BTRFS_FS_TREE_OBJECTID 5ULL 86 87 /* directory objectid inside the root tree */ 88 #define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL 89 90 /* holds checksums of all the data extents */ 91 #define BTRFS_CSUM_TREE_OBJECTID 7ULL 92 93 /* holds quota configuration and tracking */ 94 #define BTRFS_QUOTA_TREE_OBJECTID 8ULL 95 96 /* for storing items that use the BTRFS_UUID_KEY* types */ 97 #define BTRFS_UUID_TREE_OBJECTID 9ULL 98 99 /* for storing balance parameters in the root tree */ 100 #define BTRFS_BALANCE_OBJECTID -4ULL 101 102 /* orhpan objectid for tracking unlinked/truncated files */ 103 #define BTRFS_ORPHAN_OBJECTID -5ULL 104 105 /* does write ahead logging to speed up fsyncs */ 106 #define BTRFS_TREE_LOG_OBJECTID -6ULL 107 #define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL 108 109 /* for space balancing */ 110 #define BTRFS_TREE_RELOC_OBJECTID -8ULL 111 #define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL 112 113 /* 114 * extent checksums all have this objectid 115 * this allows them to share the logging tree 116 * for fsyncs 117 */ 118 #define BTRFS_EXTENT_CSUM_OBJECTID -10ULL 119 120 /* For storing free space cache */ 121 #define BTRFS_FREE_SPACE_OBJECTID -11ULL 122 123 /* 124 * The inode number assigned to the special inode for storing 125 * free ino cache 126 */ 127 #define BTRFS_FREE_INO_OBJECTID -12ULL 128 129 /* dummy objectid represents multiple objectids */ 130 #define BTRFS_MULTIPLE_OBJECTIDS -255ULL 131 132 /* 133 * All files have objectids in this range. 134 */ 135 #define BTRFS_FIRST_FREE_OBJECTID 256ULL 136 #define BTRFS_LAST_FREE_OBJECTID -256ULL 137 #define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL 138 139 140 /* 141 * the device items go into the chunk tree. The key is in the form 142 * [ 1 BTRFS_DEV_ITEM_KEY device_id ] 143 */ 144 #define BTRFS_DEV_ITEMS_OBJECTID 1ULL 145 146 #define BTRFS_BTREE_INODE_OBJECTID 1 147 148 #define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2 149 150 #define BTRFS_DEV_REPLACE_DEVID 0ULL 151 152 /* 153 * the max metadata block size. This limit is somewhat artificial, 154 * but the memmove costs go through the roof for larger blocks. 155 */ 156 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 157 158 /* 159 * we can actually store much bigger names, but lets not confuse the rest 160 * of linux 161 */ 162 #define BTRFS_NAME_LEN 255 163 164 /* 165 * Theoretical limit is larger, but we keep this down to a sane 166 * value. That should limit greatly the possibility of collisions on 167 * inode ref items. 168 */ 169 #define BTRFS_LINK_MAX 65535U 170 171 /* 32 bytes in various csum fields */ 172 #define BTRFS_CSUM_SIZE 32 173 174 /* csum types */ 175 #define BTRFS_CSUM_TYPE_CRC32 0 176 177 static int btrfs_csum_sizes[] = { 4 }; 178 179 /* four bytes for CRC32 */ 180 #define BTRFS_EMPTY_DIR_SIZE 0 181 182 /* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 183 #define REQ_GET_READ_MIRRORS (1 << 30) 184 185 #define BTRFS_FT_UNKNOWN 0 186 #define BTRFS_FT_REG_FILE 1 187 #define BTRFS_FT_DIR 2 188 #define BTRFS_FT_CHRDEV 3 189 #define BTRFS_FT_BLKDEV 4 190 #define BTRFS_FT_FIFO 5 191 #define BTRFS_FT_SOCK 6 192 #define BTRFS_FT_SYMLINK 7 193 #define BTRFS_FT_XATTR 8 194 #define BTRFS_FT_MAX 9 195 196 /* ioprio of readahead is set to idle */ 197 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) 198 199 #define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) 200 201 #define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024) 202 203 /* 204 * The key defines the order in the tree, and so it also defines (optimal) 205 * block layout. 206 * 207 * objectid corresponds to the inode number. 208 * 209 * type tells us things about the object, and is a kind of stream selector. 210 * so for a given inode, keys with type of 1 might refer to the inode data, 211 * type of 2 may point to file data in the btree and type == 3 may point to 212 * extents. 213 * 214 * offset is the starting byte offset for this key in the stream. 215 * 216 * btrfs_disk_key is in disk byte order. struct btrfs_key is always 217 * in cpu native order. Otherwise they are identical and their sizes 218 * should be the same (ie both packed) 219 */ 220 struct btrfs_disk_key { 221 __le64 objectid; 222 u8 type; 223 __le64 offset; 224 } __attribute__ ((__packed__)); 225 226 struct btrfs_key { 227 u64 objectid; 228 u8 type; 229 u64 offset; 230 } __attribute__ ((__packed__)); 231 232 struct btrfs_mapping_tree { 233 struct extent_map_tree map_tree; 234 }; 235 236 struct btrfs_dev_item { 237 /* the internal btrfs device id */ 238 __le64 devid; 239 240 /* size of the device */ 241 __le64 total_bytes; 242 243 /* bytes used */ 244 __le64 bytes_used; 245 246 /* optimal io alignment for this device */ 247 __le32 io_align; 248 249 /* optimal io width for this device */ 250 __le32 io_width; 251 252 /* minimal io size for this device */ 253 __le32 sector_size; 254 255 /* type and info about this device */ 256 __le64 type; 257 258 /* expected generation for this device */ 259 __le64 generation; 260 261 /* 262 * starting byte of this partition on the device, 263 * to allow for stripe alignment in the future 264 */ 265 __le64 start_offset; 266 267 /* grouping information for allocation decisions */ 268 __le32 dev_group; 269 270 /* seek speed 0-100 where 100 is fastest */ 271 u8 seek_speed; 272 273 /* bandwidth 0-100 where 100 is fastest */ 274 u8 bandwidth; 275 276 /* btrfs generated uuid for this device */ 277 u8 uuid[BTRFS_UUID_SIZE]; 278 279 /* uuid of FS who owns this device */ 280 u8 fsid[BTRFS_UUID_SIZE]; 281 } __attribute__ ((__packed__)); 282 283 struct btrfs_stripe { 284 __le64 devid; 285 __le64 offset; 286 u8 dev_uuid[BTRFS_UUID_SIZE]; 287 } __attribute__ ((__packed__)); 288 289 struct btrfs_chunk { 290 /* size of this chunk in bytes */ 291 __le64 length; 292 293 /* objectid of the root referencing this chunk */ 294 __le64 owner; 295 296 __le64 stripe_len; 297 __le64 type; 298 299 /* optimal io alignment for this chunk */ 300 __le32 io_align; 301 302 /* optimal io width for this chunk */ 303 __le32 io_width; 304 305 /* minimal io size for this chunk */ 306 __le32 sector_size; 307 308 /* 2^16 stripes is quite a lot, a second limit is the size of a single 309 * item in the btree 310 */ 311 __le16 num_stripes; 312 313 /* sub stripes only matter for raid10 */ 314 __le16 sub_stripes; 315 struct btrfs_stripe stripe; 316 /* additional stripes go here */ 317 } __attribute__ ((__packed__)); 318 319 #define BTRFS_FREE_SPACE_EXTENT 1 320 #define BTRFS_FREE_SPACE_BITMAP 2 321 322 struct btrfs_free_space_entry { 323 __le64 offset; 324 __le64 bytes; 325 u8 type; 326 } __attribute__ ((__packed__)); 327 328 struct btrfs_free_space_header { 329 struct btrfs_disk_key location; 330 __le64 generation; 331 __le64 num_entries; 332 __le64 num_bitmaps; 333 } __attribute__ ((__packed__)); 334 335 static inline unsigned long btrfs_chunk_item_size(int num_stripes) 336 { 337 BUG_ON(num_stripes == 0); 338 return sizeof(struct btrfs_chunk) + 339 sizeof(struct btrfs_stripe) * (num_stripes - 1); 340 } 341 342 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) 343 #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) 344 345 /* 346 * File system states 347 */ 348 #define BTRFS_FS_STATE_ERROR 0 349 #define BTRFS_FS_STATE_REMOUNTING 1 350 #define BTRFS_FS_STATE_TRANS_ABORTED 2 351 #define BTRFS_FS_STATE_DEV_REPLACING 3 352 353 /* Super block flags */ 354 /* Errors detected */ 355 #define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) 356 357 #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) 358 #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) 359 360 #define BTRFS_BACKREF_REV_MAX 256 361 #define BTRFS_BACKREF_REV_SHIFT 56 362 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ 363 BTRFS_BACKREF_REV_SHIFT) 364 365 #define BTRFS_OLD_BACKREF_REV 0 366 #define BTRFS_MIXED_BACKREF_REV 1 367 368 /* 369 * every tree block (leaf or node) starts with this header. 370 */ 371 struct btrfs_header { 372 /* these first four must match the super block */ 373 u8 csum[BTRFS_CSUM_SIZE]; 374 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 375 __le64 bytenr; /* which block this node is supposed to live in */ 376 __le64 flags; 377 378 /* allowed to be different from the super from here on down */ 379 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 380 __le64 generation; 381 __le64 owner; 382 __le32 nritems; 383 u8 level; 384 } __attribute__ ((__packed__)); 385 386 #define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \ 387 sizeof(struct btrfs_header)) / \ 388 sizeof(struct btrfs_key_ptr)) 389 #define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header)) 390 #define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->nodesize)) 391 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ 392 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) 393 #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 394 sizeof(struct btrfs_item) - \ 395 BTRFS_FILE_EXTENT_INLINE_DATA_START) 396 #define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 397 sizeof(struct btrfs_item) -\ 398 sizeof(struct btrfs_dir_item)) 399 400 401 /* 402 * this is a very generous portion of the super block, giving us 403 * room to translate 14 chunks with 3 stripes each. 404 */ 405 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 406 #define BTRFS_LABEL_SIZE 256 407 408 /* 409 * just in case we somehow lose the roots and are not able to mount, 410 * we store an array of the roots from previous transactions 411 * in the super. 412 */ 413 #define BTRFS_NUM_BACKUP_ROOTS 4 414 struct btrfs_root_backup { 415 __le64 tree_root; 416 __le64 tree_root_gen; 417 418 __le64 chunk_root; 419 __le64 chunk_root_gen; 420 421 __le64 extent_root; 422 __le64 extent_root_gen; 423 424 __le64 fs_root; 425 __le64 fs_root_gen; 426 427 __le64 dev_root; 428 __le64 dev_root_gen; 429 430 __le64 csum_root; 431 __le64 csum_root_gen; 432 433 __le64 total_bytes; 434 __le64 bytes_used; 435 __le64 num_devices; 436 /* future */ 437 __le64 unused_64[4]; 438 439 u8 tree_root_level; 440 u8 chunk_root_level; 441 u8 extent_root_level; 442 u8 fs_root_level; 443 u8 dev_root_level; 444 u8 csum_root_level; 445 /* future and to align */ 446 u8 unused_8[10]; 447 } __attribute__ ((__packed__)); 448 449 /* 450 * the super block basically lists the main trees of the FS 451 * it currently lacks any block count etc etc 452 */ 453 struct btrfs_super_block { 454 u8 csum[BTRFS_CSUM_SIZE]; 455 /* the first 4 fields must match struct btrfs_header */ 456 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 457 __le64 bytenr; /* this block number */ 458 __le64 flags; 459 460 /* allowed to be different from the btrfs_header from here own down */ 461 __le64 magic; 462 __le64 generation; 463 __le64 root; 464 __le64 chunk_root; 465 __le64 log_root; 466 467 /* this will help find the new super based on the log root */ 468 __le64 log_root_transid; 469 __le64 total_bytes; 470 __le64 bytes_used; 471 __le64 root_dir_objectid; 472 __le64 num_devices; 473 __le32 sectorsize; 474 __le32 nodesize; 475 __le32 __unused_leafsize; 476 __le32 stripesize; 477 __le32 sys_chunk_array_size; 478 __le64 chunk_root_generation; 479 __le64 compat_flags; 480 __le64 compat_ro_flags; 481 __le64 incompat_flags; 482 __le16 csum_type; 483 u8 root_level; 484 u8 chunk_root_level; 485 u8 log_root_level; 486 struct btrfs_dev_item dev_item; 487 488 char label[BTRFS_LABEL_SIZE]; 489 490 __le64 cache_generation; 491 __le64 uuid_tree_generation; 492 493 /* future expansion */ 494 __le64 reserved[30]; 495 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; 496 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; 497 } __attribute__ ((__packed__)); 498 499 /* 500 * Compat flags that we support. If any incompat flags are set other than the 501 * ones specified below then we will fail to mount 502 */ 503 #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) 504 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) 505 #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) 506 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) 507 /* 508 * some patches floated around with a second compression method 509 * lets save that incompat here for when they do get in 510 * Note we don't actually support it, we're just reserving the 511 * number 512 */ 513 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) 514 515 /* 516 * older kernels tried to do bigger metadata blocks, but the 517 * code was pretty buggy. Lets not let them try anymore. 518 */ 519 #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) 520 521 #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) 522 #define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7) 523 #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) 524 #define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) 525 526 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL 527 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL 528 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL 529 #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL 530 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL 531 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL 532 533 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 534 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 535 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 536 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 537 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ 538 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ 539 BTRFS_FEATURE_INCOMPAT_RAID56 | \ 540 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ 541 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ 542 BTRFS_FEATURE_INCOMPAT_NO_HOLES) 543 544 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ 545 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 546 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL 547 548 /* 549 * A leaf is full of items. offset and size tell us where to find 550 * the item in the leaf (relative to the start of the data area) 551 */ 552 struct btrfs_item { 553 struct btrfs_disk_key key; 554 __le32 offset; 555 __le32 size; 556 } __attribute__ ((__packed__)); 557 558 /* 559 * leaves have an item area and a data area: 560 * [item0, item1....itemN] [free space] [dataN...data1, data0] 561 * 562 * The data is separate from the items to get the keys closer together 563 * during searches. 564 */ 565 struct btrfs_leaf { 566 struct btrfs_header header; 567 struct btrfs_item items[]; 568 } __attribute__ ((__packed__)); 569 570 /* 571 * all non-leaf blocks are nodes, they hold only keys and pointers to 572 * other blocks 573 */ 574 struct btrfs_key_ptr { 575 struct btrfs_disk_key key; 576 __le64 blockptr; 577 __le64 generation; 578 } __attribute__ ((__packed__)); 579 580 struct btrfs_node { 581 struct btrfs_header header; 582 struct btrfs_key_ptr ptrs[]; 583 } __attribute__ ((__packed__)); 584 585 /* 586 * btrfs_paths remember the path taken from the root down to the leaf. 587 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 588 * to any other levels that are present. 589 * 590 * The slots array records the index of the item or block pointer 591 * used while walking the tree. 592 */ 593 struct btrfs_path { 594 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 595 int slots[BTRFS_MAX_LEVEL]; 596 /* if there is real range locking, this locks field will change */ 597 int locks[BTRFS_MAX_LEVEL]; 598 int reada; 599 /* keep some upper locks as we walk down */ 600 int lowest_level; 601 602 /* 603 * set by btrfs_split_item, tells search_slot to keep all locks 604 * and to force calls to keep space in the nodes 605 */ 606 unsigned int search_for_split:1; 607 unsigned int keep_locks:1; 608 unsigned int skip_locking:1; 609 unsigned int leave_spinning:1; 610 unsigned int search_commit_root:1; 611 unsigned int need_commit_sem:1; 612 unsigned int skip_release_on_error:1; 613 }; 614 615 /* 616 * items in the extent btree are used to record the objectid of the 617 * owner of the block and the number of references 618 */ 619 620 struct btrfs_extent_item { 621 __le64 refs; 622 __le64 generation; 623 __le64 flags; 624 } __attribute__ ((__packed__)); 625 626 struct btrfs_extent_item_v0 { 627 __le32 refs; 628 } __attribute__ ((__packed__)); 629 630 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \ 631 sizeof(struct btrfs_item)) 632 633 #define BTRFS_EXTENT_FLAG_DATA (1ULL << 0) 634 #define BTRFS_EXTENT_FLAG_TREE_BLOCK (1ULL << 1) 635 636 /* following flags only apply to tree blocks */ 637 638 /* use full backrefs for extent pointers in the block */ 639 #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) 640 641 /* 642 * this flag is only used internally by scrub and may be changed at any time 643 * it is only declared here to avoid collisions 644 */ 645 #define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) 646 647 struct btrfs_tree_block_info { 648 struct btrfs_disk_key key; 649 u8 level; 650 } __attribute__ ((__packed__)); 651 652 struct btrfs_extent_data_ref { 653 __le64 root; 654 __le64 objectid; 655 __le64 offset; 656 __le32 count; 657 } __attribute__ ((__packed__)); 658 659 struct btrfs_shared_data_ref { 660 __le32 count; 661 } __attribute__ ((__packed__)); 662 663 struct btrfs_extent_inline_ref { 664 u8 type; 665 __le64 offset; 666 } __attribute__ ((__packed__)); 667 668 /* old style backrefs item */ 669 struct btrfs_extent_ref_v0 { 670 __le64 root; 671 __le64 generation; 672 __le64 objectid; 673 __le32 count; 674 } __attribute__ ((__packed__)); 675 676 677 /* dev extents record free space on individual devices. The owner 678 * field points back to the chunk allocation mapping tree that allocated 679 * the extent. The chunk tree uuid field is a way to double check the owner 680 */ 681 struct btrfs_dev_extent { 682 __le64 chunk_tree; 683 __le64 chunk_objectid; 684 __le64 chunk_offset; 685 __le64 length; 686 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 687 } __attribute__ ((__packed__)); 688 689 struct btrfs_inode_ref { 690 __le64 index; 691 __le16 name_len; 692 /* name goes here */ 693 } __attribute__ ((__packed__)); 694 695 struct btrfs_inode_extref { 696 __le64 parent_objectid; 697 __le64 index; 698 __le16 name_len; 699 __u8 name[0]; 700 /* name goes here */ 701 } __attribute__ ((__packed__)); 702 703 struct btrfs_timespec { 704 __le64 sec; 705 __le32 nsec; 706 } __attribute__ ((__packed__)); 707 708 enum btrfs_compression_type { 709 BTRFS_COMPRESS_NONE = 0, 710 BTRFS_COMPRESS_ZLIB = 1, 711 BTRFS_COMPRESS_LZO = 2, 712 BTRFS_COMPRESS_TYPES = 2, 713 BTRFS_COMPRESS_LAST = 3, 714 }; 715 716 struct btrfs_inode_item { 717 /* nfs style generation number */ 718 __le64 generation; 719 /* transid that last touched this inode */ 720 __le64 transid; 721 __le64 size; 722 __le64 nbytes; 723 __le64 block_group; 724 __le32 nlink; 725 __le32 uid; 726 __le32 gid; 727 __le32 mode; 728 __le64 rdev; 729 __le64 flags; 730 731 /* modification sequence number for NFS */ 732 __le64 sequence; 733 734 /* 735 * a little future expansion, for more than this we can 736 * just grow the inode item and version it 737 */ 738 __le64 reserved[4]; 739 struct btrfs_timespec atime; 740 struct btrfs_timespec ctime; 741 struct btrfs_timespec mtime; 742 struct btrfs_timespec otime; 743 } __attribute__ ((__packed__)); 744 745 struct btrfs_dir_log_item { 746 __le64 end; 747 } __attribute__ ((__packed__)); 748 749 struct btrfs_dir_item { 750 struct btrfs_disk_key location; 751 __le64 transid; 752 __le16 data_len; 753 __le16 name_len; 754 u8 type; 755 } __attribute__ ((__packed__)); 756 757 #define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) 758 759 /* 760 * Internal in-memory flag that a subvolume has been marked for deletion but 761 * still visible as a directory 762 */ 763 #define BTRFS_ROOT_SUBVOL_DEAD (1ULL << 48) 764 765 struct btrfs_root_item { 766 struct btrfs_inode_item inode; 767 __le64 generation; 768 __le64 root_dirid; 769 __le64 bytenr; 770 __le64 byte_limit; 771 __le64 bytes_used; 772 __le64 last_snapshot; 773 __le64 flags; 774 __le32 refs; 775 struct btrfs_disk_key drop_progress; 776 u8 drop_level; 777 u8 level; 778 779 /* 780 * The following fields appear after subvol_uuids+subvol_times 781 * were introduced. 782 */ 783 784 /* 785 * This generation number is used to test if the new fields are valid 786 * and up to date while reading the root item. Everytime the root item 787 * is written out, the "generation" field is copied into this field. If 788 * anyone ever mounted the fs with an older kernel, we will have 789 * mismatching generation values here and thus must invalidate the 790 * new fields. See btrfs_update_root and btrfs_find_last_root for 791 * details. 792 * the offset of generation_v2 is also used as the start for the memset 793 * when invalidating the fields. 794 */ 795 __le64 generation_v2; 796 u8 uuid[BTRFS_UUID_SIZE]; 797 u8 parent_uuid[BTRFS_UUID_SIZE]; 798 u8 received_uuid[BTRFS_UUID_SIZE]; 799 __le64 ctransid; /* updated when an inode changes */ 800 __le64 otransid; /* trans when created */ 801 __le64 stransid; /* trans when sent. non-zero for received subvol */ 802 __le64 rtransid; /* trans when received. non-zero for received subvol */ 803 struct btrfs_timespec ctime; 804 struct btrfs_timespec otime; 805 struct btrfs_timespec stime; 806 struct btrfs_timespec rtime; 807 __le64 reserved[8]; /* for future */ 808 } __attribute__ ((__packed__)); 809 810 /* 811 * this is used for both forward and backward root refs 812 */ 813 struct btrfs_root_ref { 814 __le64 dirid; 815 __le64 sequence; 816 __le16 name_len; 817 } __attribute__ ((__packed__)); 818 819 struct btrfs_disk_balance_args { 820 /* 821 * profiles to operate on, single is denoted by 822 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 823 */ 824 __le64 profiles; 825 826 /* usage filter */ 827 __le64 usage; 828 829 /* devid filter */ 830 __le64 devid; 831 832 /* devid subset filter [pstart..pend) */ 833 __le64 pstart; 834 __le64 pend; 835 836 /* btrfs virtual address space subset filter [vstart..vend) */ 837 __le64 vstart; 838 __le64 vend; 839 840 /* 841 * profile to convert to, single is denoted by 842 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 843 */ 844 __le64 target; 845 846 /* BTRFS_BALANCE_ARGS_* */ 847 __le64 flags; 848 849 /* BTRFS_BALANCE_ARGS_LIMIT value */ 850 __le64 limit; 851 852 __le64 unused[7]; 853 } __attribute__ ((__packed__)); 854 855 /* 856 * store balance parameters to disk so that balance can be properly 857 * resumed after crash or unmount 858 */ 859 struct btrfs_balance_item { 860 /* BTRFS_BALANCE_* */ 861 __le64 flags; 862 863 struct btrfs_disk_balance_args data; 864 struct btrfs_disk_balance_args meta; 865 struct btrfs_disk_balance_args sys; 866 867 __le64 unused[4]; 868 } __attribute__ ((__packed__)); 869 870 #define BTRFS_FILE_EXTENT_INLINE 0 871 #define BTRFS_FILE_EXTENT_REG 1 872 #define BTRFS_FILE_EXTENT_PREALLOC 2 873 874 struct btrfs_file_extent_item { 875 /* 876 * transaction id that created this extent 877 */ 878 __le64 generation; 879 /* 880 * max number of bytes to hold this extent in ram 881 * when we split a compressed extent we can't know how big 882 * each of the resulting pieces will be. So, this is 883 * an upper limit on the size of the extent in ram instead of 884 * an exact limit. 885 */ 886 __le64 ram_bytes; 887 888 /* 889 * 32 bits for the various ways we might encode the data, 890 * including compression and encryption. If any of these 891 * are set to something a given disk format doesn't understand 892 * it is treated like an incompat flag for reading and writing, 893 * but not for stat. 894 */ 895 u8 compression; 896 u8 encryption; 897 __le16 other_encoding; /* spare for later use */ 898 899 /* are we inline data or a real extent? */ 900 u8 type; 901 902 /* 903 * disk space consumed by the extent, checksum blocks are included 904 * in these numbers 905 * 906 * At this offset in the structure, the inline extent data start. 907 */ 908 __le64 disk_bytenr; 909 __le64 disk_num_bytes; 910 /* 911 * the logical offset in file blocks (no csums) 912 * this extent record is for. This allows a file extent to point 913 * into the middle of an existing extent on disk, sharing it 914 * between two snapshots (useful if some bytes in the middle of the 915 * extent have changed 916 */ 917 __le64 offset; 918 /* 919 * the logical number of file blocks (no csums included). This 920 * always reflects the size uncompressed and without encoding. 921 */ 922 __le64 num_bytes; 923 924 } __attribute__ ((__packed__)); 925 926 struct btrfs_csum_item { 927 u8 csum; 928 } __attribute__ ((__packed__)); 929 930 struct btrfs_dev_stats_item { 931 /* 932 * grow this item struct at the end for future enhancements and keep 933 * the existing values unchanged 934 */ 935 __le64 values[BTRFS_DEV_STAT_VALUES_MAX]; 936 } __attribute__ ((__packed__)); 937 938 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 939 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 940 #define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 941 #define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 942 #define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 943 #define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 944 #define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 945 946 struct btrfs_dev_replace { 947 u64 replace_state; /* see #define above */ 948 u64 time_started; /* seconds since 1-Jan-1970 */ 949 u64 time_stopped; /* seconds since 1-Jan-1970 */ 950 atomic64_t num_write_errors; 951 atomic64_t num_uncorrectable_read_errors; 952 953 u64 cursor_left; 954 u64 committed_cursor_left; 955 u64 cursor_left_last_write_of_item; 956 u64 cursor_right; 957 958 u64 cont_reading_from_srcdev_mode; /* see #define above */ 959 960 int is_valid; 961 int item_needs_writeback; 962 struct btrfs_device *srcdev; 963 struct btrfs_device *tgtdev; 964 965 pid_t lock_owner; 966 atomic_t nesting_level; 967 struct mutex lock_finishing_cancel_unmount; 968 struct mutex lock_management_lock; 969 struct mutex lock; 970 971 struct btrfs_scrub_progress scrub_progress; 972 }; 973 974 struct btrfs_dev_replace_item { 975 /* 976 * grow this item struct at the end for future enhancements and keep 977 * the existing values unchanged 978 */ 979 __le64 src_devid; 980 __le64 cursor_left; 981 __le64 cursor_right; 982 __le64 cont_reading_from_srcdev_mode; 983 984 __le64 replace_state; 985 __le64 time_started; 986 __le64 time_stopped; 987 __le64 num_write_errors; 988 __le64 num_uncorrectable_read_errors; 989 } __attribute__ ((__packed__)); 990 991 /* different types of block groups (and chunks) */ 992 #define BTRFS_BLOCK_GROUP_DATA (1ULL << 0) 993 #define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1) 994 #define BTRFS_BLOCK_GROUP_METADATA (1ULL << 2) 995 #define BTRFS_BLOCK_GROUP_RAID0 (1ULL << 3) 996 #define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4) 997 #define BTRFS_BLOCK_GROUP_DUP (1ULL << 5) 998 #define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) 999 #define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) 1000 #define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) 1001 #define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ 1002 BTRFS_SPACE_INFO_GLOBAL_RSV) 1003 1004 enum btrfs_raid_types { 1005 BTRFS_RAID_RAID10, 1006 BTRFS_RAID_RAID1, 1007 BTRFS_RAID_DUP, 1008 BTRFS_RAID_RAID0, 1009 BTRFS_RAID_SINGLE, 1010 BTRFS_RAID_RAID5, 1011 BTRFS_RAID_RAID6, 1012 BTRFS_NR_RAID_TYPES 1013 }; 1014 1015 #define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \ 1016 BTRFS_BLOCK_GROUP_SYSTEM | \ 1017 BTRFS_BLOCK_GROUP_METADATA) 1018 1019 #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 1020 BTRFS_BLOCK_GROUP_RAID1 | \ 1021 BTRFS_BLOCK_GROUP_RAID5 | \ 1022 BTRFS_BLOCK_GROUP_RAID6 | \ 1023 BTRFS_BLOCK_GROUP_DUP | \ 1024 BTRFS_BLOCK_GROUP_RAID10) 1025 #define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ 1026 BTRFS_BLOCK_GROUP_RAID6) 1027 1028 /* 1029 * We need a bit for restriper to be able to tell when chunks of type 1030 * SINGLE are available. This "extended" profile format is used in 1031 * fs_info->avail_*_alloc_bits (in-memory) and balance item fields 1032 * (on-disk). The corresponding on-disk bit in chunk.type is reserved 1033 * to avoid remappings between two formats in future. 1034 */ 1035 #define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) 1036 1037 /* 1038 * A fake block group type that is used to communicate global block reserve 1039 * size to userspace via the SPACE_INFO ioctl. 1040 */ 1041 #define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49) 1042 1043 #define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \ 1044 BTRFS_AVAIL_ALLOC_BIT_SINGLE) 1045 1046 static inline u64 chunk_to_extended(u64 flags) 1047 { 1048 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0) 1049 flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1050 1051 return flags; 1052 } 1053 static inline u64 extended_to_chunk(u64 flags) 1054 { 1055 return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1056 } 1057 1058 struct btrfs_block_group_item { 1059 __le64 used; 1060 __le64 chunk_objectid; 1061 __le64 flags; 1062 } __attribute__ ((__packed__)); 1063 1064 #define BTRFS_QGROUP_LEVEL_SHIFT 48 1065 static inline u64 btrfs_qgroup_level(u64 qgroupid) 1066 { 1067 return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT; 1068 } 1069 1070 /* 1071 * is subvolume quota turned on? 1072 */ 1073 #define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0) 1074 /* 1075 * RESCAN is set during the initialization phase 1076 */ 1077 #define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1) 1078 /* 1079 * Some qgroup entries are known to be out of date, 1080 * either because the configuration has changed in a way that 1081 * makes a rescan necessary, or because the fs has been mounted 1082 * with a non-qgroup-aware version. 1083 * Turning qouta off and on again makes it inconsistent, too. 1084 */ 1085 #define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) 1086 1087 #define BTRFS_QGROUP_STATUS_VERSION 1 1088 1089 struct btrfs_qgroup_status_item { 1090 __le64 version; 1091 /* 1092 * the generation is updated during every commit. As older 1093 * versions of btrfs are not aware of qgroups, it will be 1094 * possible to detect inconsistencies by checking the 1095 * generation on mount time 1096 */ 1097 __le64 generation; 1098 1099 /* flag definitions see above */ 1100 __le64 flags; 1101 1102 /* 1103 * only used during scanning to record the progress 1104 * of the scan. It contains a logical address 1105 */ 1106 __le64 rescan; 1107 } __attribute__ ((__packed__)); 1108 1109 struct btrfs_qgroup_info_item { 1110 __le64 generation; 1111 __le64 rfer; 1112 __le64 rfer_cmpr; 1113 __le64 excl; 1114 __le64 excl_cmpr; 1115 } __attribute__ ((__packed__)); 1116 1117 /* flags definition for qgroup limits */ 1118 #define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0) 1119 #define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1) 1120 #define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2) 1121 #define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3) 1122 #define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4) 1123 #define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5) 1124 1125 struct btrfs_qgroup_limit_item { 1126 /* 1127 * only updated when any of the other values change 1128 */ 1129 __le64 flags; 1130 __le64 max_rfer; 1131 __le64 max_excl; 1132 __le64 rsv_rfer; 1133 __le64 rsv_excl; 1134 } __attribute__ ((__packed__)); 1135 1136 /* For raid type sysfs entries */ 1137 struct raid_kobject { 1138 int raid_type; 1139 struct kobject kobj; 1140 }; 1141 1142 struct btrfs_space_info { 1143 spinlock_t lock; 1144 1145 u64 total_bytes; /* total bytes in the space, 1146 this doesn't take mirrors into account */ 1147 u64 bytes_used; /* total bytes used, 1148 this doesn't take mirrors into account */ 1149 u64 bytes_pinned; /* total bytes pinned, will be freed when the 1150 transaction finishes */ 1151 u64 bytes_reserved; /* total bytes the allocator has reserved for 1152 current allocations */ 1153 u64 bytes_may_use; /* number of bytes that may be used for 1154 delalloc/allocations */ 1155 u64 bytes_readonly; /* total bytes that are read only */ 1156 1157 unsigned int full:1; /* indicates that we cannot allocate any more 1158 chunks for this space */ 1159 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 1160 1161 unsigned int flush:1; /* set if we are trying to make space */ 1162 1163 unsigned int force_alloc; /* set if we need to force a chunk 1164 alloc for this space */ 1165 1166 u64 disk_used; /* total bytes used on disk */ 1167 u64 disk_total; /* total bytes on disk, takes mirrors into 1168 account */ 1169 1170 u64 flags; 1171 1172 /* 1173 * bytes_pinned is kept in line with what is actually pinned, as in 1174 * we've called update_block_group and dropped the bytes_used counter 1175 * and increased the bytes_pinned counter. However this means that 1176 * bytes_pinned does not reflect the bytes that will be pinned once the 1177 * delayed refs are flushed, so this counter is inc'ed everytime we call 1178 * btrfs_free_extent so it is a realtime count of what will be freed 1179 * once the transaction is committed. It will be zero'ed everytime the 1180 * transaction commits. 1181 */ 1182 struct percpu_counter total_bytes_pinned; 1183 1184 struct list_head list; 1185 /* Protected by the spinlock 'lock'. */ 1186 struct list_head ro_bgs; 1187 1188 struct rw_semaphore groups_sem; 1189 /* for block groups in our same type */ 1190 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 1191 wait_queue_head_t wait; 1192 1193 struct kobject kobj; 1194 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 1195 }; 1196 1197 #define BTRFS_BLOCK_RSV_GLOBAL 1 1198 #define BTRFS_BLOCK_RSV_DELALLOC 2 1199 #define BTRFS_BLOCK_RSV_TRANS 3 1200 #define BTRFS_BLOCK_RSV_CHUNK 4 1201 #define BTRFS_BLOCK_RSV_DELOPS 5 1202 #define BTRFS_BLOCK_RSV_EMPTY 6 1203 #define BTRFS_BLOCK_RSV_TEMP 7 1204 1205 struct btrfs_block_rsv { 1206 u64 size; 1207 u64 reserved; 1208 struct btrfs_space_info *space_info; 1209 spinlock_t lock; 1210 unsigned short full; 1211 unsigned short type; 1212 unsigned short failfast; 1213 }; 1214 1215 /* 1216 * free clusters are used to claim free space in relatively large chunks, 1217 * allowing us to do less seeky writes. They are used for all metadata 1218 * allocations and data allocations in ssd mode. 1219 */ 1220 struct btrfs_free_cluster { 1221 spinlock_t lock; 1222 spinlock_t refill_lock; 1223 struct rb_root root; 1224 1225 /* largest extent in this cluster */ 1226 u64 max_size; 1227 1228 /* first extent starting offset */ 1229 u64 window_start; 1230 1231 struct btrfs_block_group_cache *block_group; 1232 /* 1233 * when a cluster is allocated from a block group, we put the 1234 * cluster onto a list in the block group so that it can 1235 * be freed before the block group is freed. 1236 */ 1237 struct list_head block_group_list; 1238 }; 1239 1240 enum btrfs_caching_type { 1241 BTRFS_CACHE_NO = 0, 1242 BTRFS_CACHE_STARTED = 1, 1243 BTRFS_CACHE_FAST = 2, 1244 BTRFS_CACHE_FINISHED = 3, 1245 BTRFS_CACHE_ERROR = 4, 1246 }; 1247 1248 enum btrfs_disk_cache_state { 1249 BTRFS_DC_WRITTEN = 0, 1250 BTRFS_DC_ERROR = 1, 1251 BTRFS_DC_CLEAR = 2, 1252 BTRFS_DC_SETUP = 3, 1253 }; 1254 1255 struct btrfs_caching_control { 1256 struct list_head list; 1257 struct mutex mutex; 1258 wait_queue_head_t wait; 1259 struct btrfs_work work; 1260 struct btrfs_block_group_cache *block_group; 1261 u64 progress; 1262 atomic_t count; 1263 }; 1264 1265 struct btrfs_io_ctl { 1266 void *cur, *orig; 1267 struct page *page; 1268 struct page **pages; 1269 struct btrfs_root *root; 1270 struct inode *inode; 1271 unsigned long size; 1272 int index; 1273 int num_pages; 1274 int entries; 1275 int bitmaps; 1276 unsigned check_crcs:1; 1277 }; 1278 1279 struct btrfs_block_group_cache { 1280 struct btrfs_key key; 1281 struct btrfs_block_group_item item; 1282 struct btrfs_fs_info *fs_info; 1283 struct inode *inode; 1284 spinlock_t lock; 1285 u64 pinned; 1286 u64 reserved; 1287 u64 delalloc_bytes; 1288 u64 bytes_super; 1289 u64 flags; 1290 u64 sectorsize; 1291 u64 cache_generation; 1292 1293 /* 1294 * It is just used for the delayed data space allocation because 1295 * only the data space allocation and the relative metadata update 1296 * can be done cross the transaction. 1297 */ 1298 struct rw_semaphore data_rwsem; 1299 1300 /* for raid56, this is a full stripe, without parity */ 1301 unsigned long full_stripe_len; 1302 1303 unsigned int ro; 1304 unsigned int iref:1; 1305 unsigned int has_caching_ctl:1; 1306 unsigned int removed:1; 1307 1308 int disk_cache_state; 1309 1310 /* cache tracking stuff */ 1311 int cached; 1312 struct btrfs_caching_control *caching_ctl; 1313 u64 last_byte_to_unpin; 1314 1315 struct btrfs_space_info *space_info; 1316 1317 /* free space cache stuff */ 1318 struct btrfs_free_space_ctl *free_space_ctl; 1319 1320 /* block group cache stuff */ 1321 struct rb_node cache_node; 1322 1323 /* for block groups in the same raid type */ 1324 struct list_head list; 1325 1326 /* usage count */ 1327 atomic_t count; 1328 1329 /* List of struct btrfs_free_clusters for this block group. 1330 * Today it will only have one thing on it, but that may change 1331 */ 1332 struct list_head cluster_list; 1333 1334 /* For delayed block group creation or deletion of empty block groups */ 1335 struct list_head bg_list; 1336 1337 /* For read-only block groups */ 1338 struct list_head ro_list; 1339 1340 atomic_t trimming; 1341 1342 /* For dirty block groups */ 1343 struct list_head dirty_list; 1344 struct list_head io_list; 1345 1346 struct btrfs_io_ctl io_ctl; 1347 }; 1348 1349 /* delayed seq elem */ 1350 struct seq_list { 1351 struct list_head list; 1352 u64 seq; 1353 }; 1354 1355 #define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 } 1356 1357 enum btrfs_orphan_cleanup_state { 1358 ORPHAN_CLEANUP_STARTED = 1, 1359 ORPHAN_CLEANUP_DONE = 2, 1360 }; 1361 1362 /* used by the raid56 code to lock stripes for read/modify/write */ 1363 struct btrfs_stripe_hash { 1364 struct list_head hash_list; 1365 wait_queue_head_t wait; 1366 spinlock_t lock; 1367 }; 1368 1369 /* used by the raid56 code to lock stripes for read/modify/write */ 1370 struct btrfs_stripe_hash_table { 1371 struct list_head stripe_cache; 1372 spinlock_t cache_lock; 1373 int cache_size; 1374 struct btrfs_stripe_hash table[]; 1375 }; 1376 1377 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 1378 1379 void btrfs_init_async_reclaim_work(struct work_struct *work); 1380 1381 /* fs_info */ 1382 struct reloc_control; 1383 struct btrfs_device; 1384 struct btrfs_fs_devices; 1385 struct btrfs_balance_control; 1386 struct btrfs_delayed_root; 1387 struct btrfs_fs_info { 1388 u8 fsid[BTRFS_FSID_SIZE]; 1389 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 1390 struct btrfs_root *extent_root; 1391 struct btrfs_root *tree_root; 1392 struct btrfs_root *chunk_root; 1393 struct btrfs_root *dev_root; 1394 struct btrfs_root *fs_root; 1395 struct btrfs_root *csum_root; 1396 struct btrfs_root *quota_root; 1397 struct btrfs_root *uuid_root; 1398 1399 /* the log root tree is a directory of all the other log roots */ 1400 struct btrfs_root *log_root_tree; 1401 1402 spinlock_t fs_roots_radix_lock; 1403 struct radix_tree_root fs_roots_radix; 1404 1405 /* block group cache stuff */ 1406 spinlock_t block_group_cache_lock; 1407 u64 first_logical_byte; 1408 struct rb_root block_group_cache_tree; 1409 1410 /* keep track of unallocated space */ 1411 spinlock_t free_chunk_lock; 1412 u64 free_chunk_space; 1413 1414 struct extent_io_tree freed_extents[2]; 1415 struct extent_io_tree *pinned_extents; 1416 1417 /* logical->physical extent mapping */ 1418 struct btrfs_mapping_tree mapping_tree; 1419 1420 /* 1421 * block reservation for extent, checksum, root tree and 1422 * delayed dir index item 1423 */ 1424 struct btrfs_block_rsv global_block_rsv; 1425 /* block reservation for delay allocation */ 1426 struct btrfs_block_rsv delalloc_block_rsv; 1427 /* block reservation for metadata operations */ 1428 struct btrfs_block_rsv trans_block_rsv; 1429 /* block reservation for chunk tree */ 1430 struct btrfs_block_rsv chunk_block_rsv; 1431 /* block reservation for delayed operations */ 1432 struct btrfs_block_rsv delayed_block_rsv; 1433 1434 struct btrfs_block_rsv empty_block_rsv; 1435 1436 u64 generation; 1437 u64 last_trans_committed; 1438 u64 avg_delayed_ref_runtime; 1439 1440 /* 1441 * this is updated to the current trans every time a full commit 1442 * is required instead of the faster short fsync log commits 1443 */ 1444 u64 last_trans_log_full_commit; 1445 unsigned long mount_opt; 1446 /* 1447 * Track requests for actions that need to be done during transaction 1448 * commit (like for some mount options). 1449 */ 1450 unsigned long pending_changes; 1451 unsigned long compress_type:4; 1452 int commit_interval; 1453 /* 1454 * It is a suggestive number, the read side is safe even it gets a 1455 * wrong number because we will write out the data into a regular 1456 * extent. The write side(mount/remount) is under ->s_umount lock, 1457 * so it is also safe. 1458 */ 1459 u64 max_inline; 1460 /* 1461 * Protected by ->chunk_mutex and sb->s_umount. 1462 * 1463 * The reason that we use two lock to protect it is because only 1464 * remount and mount operations can change it and these two operations 1465 * are under sb->s_umount, but the read side (chunk allocation) can not 1466 * acquire sb->s_umount or the deadlock would happen. So we use two 1467 * locks to protect it. On the write side, we must acquire two locks, 1468 * and on the read side, we just need acquire one of them. 1469 */ 1470 u64 alloc_start; 1471 struct btrfs_transaction *running_transaction; 1472 wait_queue_head_t transaction_throttle; 1473 wait_queue_head_t transaction_wait; 1474 wait_queue_head_t transaction_blocked_wait; 1475 wait_queue_head_t async_submit_wait; 1476 1477 /* 1478 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 1479 * when they are updated. 1480 * 1481 * Because we do not clear the flags for ever, so we needn't use 1482 * the lock on the read side. 1483 * 1484 * We also needn't use the lock when we mount the fs, because 1485 * there is no other task which will update the flag. 1486 */ 1487 spinlock_t super_lock; 1488 struct btrfs_super_block *super_copy; 1489 struct btrfs_super_block *super_for_commit; 1490 struct block_device *__bdev; 1491 struct super_block *sb; 1492 struct inode *btree_inode; 1493 struct backing_dev_info bdi; 1494 struct mutex tree_log_mutex; 1495 struct mutex transaction_kthread_mutex; 1496 struct mutex cleaner_mutex; 1497 struct mutex chunk_mutex; 1498 struct mutex volume_mutex; 1499 1500 /* 1501 * this is taken to make sure we don't set block groups ro after 1502 * the free space cache has been allocated on them 1503 */ 1504 struct mutex ro_block_group_mutex; 1505 1506 /* this is used during read/modify/write to make sure 1507 * no two ios are trying to mod the same stripe at the same 1508 * time 1509 */ 1510 struct btrfs_stripe_hash_table *stripe_hash_table; 1511 1512 /* 1513 * this protects the ordered operations list only while we are 1514 * processing all of the entries on it. This way we make 1515 * sure the commit code doesn't find the list temporarily empty 1516 * because another function happens to be doing non-waiting preflush 1517 * before jumping into the main commit. 1518 */ 1519 struct mutex ordered_operations_mutex; 1520 1521 struct rw_semaphore commit_root_sem; 1522 1523 struct rw_semaphore cleanup_work_sem; 1524 1525 struct rw_semaphore subvol_sem; 1526 struct srcu_struct subvol_srcu; 1527 1528 spinlock_t trans_lock; 1529 /* 1530 * the reloc mutex goes with the trans lock, it is taken 1531 * during commit to protect us from the relocation code 1532 */ 1533 struct mutex reloc_mutex; 1534 1535 struct list_head trans_list; 1536 struct list_head dead_roots; 1537 struct list_head caching_block_groups; 1538 1539 spinlock_t delayed_iput_lock; 1540 struct list_head delayed_iputs; 1541 struct rw_semaphore delayed_iput_sem; 1542 1543 /* this protects tree_mod_seq_list */ 1544 spinlock_t tree_mod_seq_lock; 1545 atomic64_t tree_mod_seq; 1546 struct list_head tree_mod_seq_list; 1547 1548 /* this protects tree_mod_log */ 1549 rwlock_t tree_mod_log_lock; 1550 struct rb_root tree_mod_log; 1551 1552 atomic_t nr_async_submits; 1553 atomic_t async_submit_draining; 1554 atomic_t nr_async_bios; 1555 atomic_t async_delalloc_pages; 1556 atomic_t open_ioctl_trans; 1557 1558 /* 1559 * this is used to protect the following list -- ordered_roots. 1560 */ 1561 spinlock_t ordered_root_lock; 1562 1563 /* 1564 * all fs/file tree roots in which there are data=ordered extents 1565 * pending writeback are added into this list. 1566 * 1567 * these can span multiple transactions and basically include 1568 * every dirty data page that isn't from nodatacow 1569 */ 1570 struct list_head ordered_roots; 1571 1572 struct mutex delalloc_root_mutex; 1573 spinlock_t delalloc_root_lock; 1574 /* all fs/file tree roots that have delalloc inodes. */ 1575 struct list_head delalloc_roots; 1576 1577 /* 1578 * there is a pool of worker threads for checksumming during writes 1579 * and a pool for checksumming after reads. This is because readers 1580 * can run with FS locks held, and the writers may be waiting for 1581 * those locks. We don't want ordering in the pending list to cause 1582 * deadlocks, and so the two are serviced separately. 1583 * 1584 * A third pool does submit_bio to avoid deadlocking with the other 1585 * two 1586 */ 1587 struct btrfs_workqueue *workers; 1588 struct btrfs_workqueue *delalloc_workers; 1589 struct btrfs_workqueue *flush_workers; 1590 struct btrfs_workqueue *endio_workers; 1591 struct btrfs_workqueue *endio_meta_workers; 1592 struct btrfs_workqueue *endio_raid56_workers; 1593 struct btrfs_workqueue *endio_repair_workers; 1594 struct btrfs_workqueue *rmw_workers; 1595 struct btrfs_workqueue *endio_meta_write_workers; 1596 struct btrfs_workqueue *endio_write_workers; 1597 struct btrfs_workqueue *endio_freespace_worker; 1598 struct btrfs_workqueue *submit_workers; 1599 struct btrfs_workqueue *caching_workers; 1600 struct btrfs_workqueue *readahead_workers; 1601 1602 /* 1603 * fixup workers take dirty pages that didn't properly go through 1604 * the cow mechanism and make them safe to write. It happens 1605 * for the sys_munmap function call path 1606 */ 1607 struct btrfs_workqueue *fixup_workers; 1608 struct btrfs_workqueue *delayed_workers; 1609 1610 /* the extent workers do delayed refs on the extent allocation tree */ 1611 struct btrfs_workqueue *extent_workers; 1612 struct task_struct *transaction_kthread; 1613 struct task_struct *cleaner_kthread; 1614 int thread_pool_size; 1615 1616 struct kobject *space_info_kobj; 1617 int do_barriers; 1618 int closing; 1619 int log_root_recovering; 1620 int open; 1621 1622 u64 total_pinned; 1623 1624 /* used to keep from writing metadata until there is a nice batch */ 1625 struct percpu_counter dirty_metadata_bytes; 1626 struct percpu_counter delalloc_bytes; 1627 s32 dirty_metadata_batch; 1628 s32 delalloc_batch; 1629 1630 struct list_head dirty_cowonly_roots; 1631 1632 struct btrfs_fs_devices *fs_devices; 1633 1634 /* 1635 * the space_info list is almost entirely read only. It only changes 1636 * when we add a new raid type to the FS, and that happens 1637 * very rarely. RCU is used to protect it. 1638 */ 1639 struct list_head space_info; 1640 1641 struct btrfs_space_info *data_sinfo; 1642 1643 struct reloc_control *reloc_ctl; 1644 1645 /* data_alloc_cluster is only used in ssd mode */ 1646 struct btrfs_free_cluster data_alloc_cluster; 1647 1648 /* all metadata allocations go through this cluster */ 1649 struct btrfs_free_cluster meta_alloc_cluster; 1650 1651 /* auto defrag inodes go here */ 1652 spinlock_t defrag_inodes_lock; 1653 struct rb_root defrag_inodes; 1654 atomic_t defrag_running; 1655 1656 /* Used to protect avail_{data, metadata, system}_alloc_bits */ 1657 seqlock_t profiles_lock; 1658 /* 1659 * these three are in extended format (availability of single 1660 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other 1661 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) 1662 */ 1663 u64 avail_data_alloc_bits; 1664 u64 avail_metadata_alloc_bits; 1665 u64 avail_system_alloc_bits; 1666 1667 /* restriper state */ 1668 spinlock_t balance_lock; 1669 struct mutex balance_mutex; 1670 atomic_t balance_running; 1671 atomic_t balance_pause_req; 1672 atomic_t balance_cancel_req; 1673 struct btrfs_balance_control *balance_ctl; 1674 wait_queue_head_t balance_wait_q; 1675 1676 unsigned data_chunk_allocations; 1677 unsigned metadata_ratio; 1678 1679 void *bdev_holder; 1680 1681 /* private scrub information */ 1682 struct mutex scrub_lock; 1683 atomic_t scrubs_running; 1684 atomic_t scrub_pause_req; 1685 atomic_t scrubs_paused; 1686 atomic_t scrub_cancel_req; 1687 wait_queue_head_t scrub_pause_wait; 1688 int scrub_workers_refcnt; 1689 struct btrfs_workqueue *scrub_workers; 1690 struct btrfs_workqueue *scrub_wr_completion_workers; 1691 struct btrfs_workqueue *scrub_nocow_workers; 1692 struct btrfs_workqueue *scrub_parity_workers; 1693 1694 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1695 u32 check_integrity_print_mask; 1696 #endif 1697 /* 1698 * quota information 1699 */ 1700 unsigned int quota_enabled:1; 1701 1702 /* 1703 * quota_enabled only changes state after a commit. This holds the 1704 * next state. 1705 */ 1706 unsigned int pending_quota_state:1; 1707 1708 /* is qgroup tracking in a consistent state? */ 1709 u64 qgroup_flags; 1710 1711 /* holds configuration and tracking. Protected by qgroup_lock */ 1712 struct rb_root qgroup_tree; 1713 struct rb_root qgroup_op_tree; 1714 spinlock_t qgroup_lock; 1715 spinlock_t qgroup_op_lock; 1716 atomic_t qgroup_op_seq; 1717 1718 /* 1719 * used to avoid frequently calling ulist_alloc()/ulist_free() 1720 * when doing qgroup accounting, it must be protected by qgroup_lock. 1721 */ 1722 struct ulist *qgroup_ulist; 1723 1724 /* protect user change for quota operations */ 1725 struct mutex qgroup_ioctl_lock; 1726 1727 /* list of dirty qgroups to be written at next commit */ 1728 struct list_head dirty_qgroups; 1729 1730 /* used by qgroup for an efficient tree traversal */ 1731 u64 qgroup_seq; 1732 1733 /* qgroup rescan items */ 1734 struct mutex qgroup_rescan_lock; /* protects the progress item */ 1735 struct btrfs_key qgroup_rescan_progress; 1736 struct btrfs_workqueue *qgroup_rescan_workers; 1737 struct completion qgroup_rescan_completion; 1738 struct btrfs_work qgroup_rescan_work; 1739 1740 /* filesystem state */ 1741 unsigned long fs_state; 1742 1743 struct btrfs_delayed_root *delayed_root; 1744 1745 /* readahead tree */ 1746 spinlock_t reada_lock; 1747 struct radix_tree_root reada_tree; 1748 1749 /* Extent buffer radix tree */ 1750 spinlock_t buffer_lock; 1751 struct radix_tree_root buffer_radix; 1752 1753 /* next backup root to be overwritten */ 1754 int backup_root_index; 1755 1756 int num_tolerated_disk_barrier_failures; 1757 1758 /* device replace state */ 1759 struct btrfs_dev_replace dev_replace; 1760 1761 atomic_t mutually_exclusive_operation_running; 1762 1763 struct percpu_counter bio_counter; 1764 wait_queue_head_t replace_wait; 1765 1766 struct semaphore uuid_tree_rescan_sem; 1767 unsigned int update_uuid_tree_gen:1; 1768 1769 /* Used to reclaim the metadata space in the background. */ 1770 struct work_struct async_reclaim_work; 1771 1772 spinlock_t unused_bgs_lock; 1773 struct list_head unused_bgs; 1774 struct mutex unused_bg_unpin_mutex; 1775 struct mutex delete_unused_bgs_mutex; 1776 1777 /* For btrfs to record security options */ 1778 struct security_mnt_opts security_opts; 1779 1780 /* 1781 * Chunks that can't be freed yet (under a trim/discard operation) 1782 * and will be latter freed. Protected by fs_info->chunk_mutex. 1783 */ 1784 struct list_head pinned_chunks; 1785 }; 1786 1787 struct btrfs_subvolume_writers { 1788 struct percpu_counter counter; 1789 wait_queue_head_t wait; 1790 }; 1791 1792 /* 1793 * The state of btrfs root 1794 */ 1795 /* 1796 * btrfs_record_root_in_trans is a multi-step process, 1797 * and it can race with the balancing code. But the 1798 * race is very small, and only the first time the root 1799 * is added to each transaction. So IN_TRANS_SETUP 1800 * is used to tell us when more checks are required 1801 */ 1802 #define BTRFS_ROOT_IN_TRANS_SETUP 0 1803 #define BTRFS_ROOT_REF_COWS 1 1804 #define BTRFS_ROOT_TRACK_DIRTY 2 1805 #define BTRFS_ROOT_IN_RADIX 3 1806 #define BTRFS_ROOT_DUMMY_ROOT 4 1807 #define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 5 1808 #define BTRFS_ROOT_DEFRAG_RUNNING 6 1809 #define BTRFS_ROOT_FORCE_COW 7 1810 #define BTRFS_ROOT_MULTI_LOG_TASKS 8 1811 #define BTRFS_ROOT_DIRTY 9 1812 1813 /* 1814 * in ram representation of the tree. extent_root is used for all allocations 1815 * and for the extent tree extent_root root. 1816 */ 1817 struct btrfs_root { 1818 struct extent_buffer *node; 1819 1820 struct extent_buffer *commit_root; 1821 struct btrfs_root *log_root; 1822 struct btrfs_root *reloc_root; 1823 1824 unsigned long state; 1825 struct btrfs_root_item root_item; 1826 struct btrfs_key root_key; 1827 struct btrfs_fs_info *fs_info; 1828 struct extent_io_tree dirty_log_pages; 1829 1830 struct mutex objectid_mutex; 1831 1832 spinlock_t accounting_lock; 1833 struct btrfs_block_rsv *block_rsv; 1834 1835 /* free ino cache stuff */ 1836 struct btrfs_free_space_ctl *free_ino_ctl; 1837 enum btrfs_caching_type ino_cache_state; 1838 spinlock_t ino_cache_lock; 1839 wait_queue_head_t ino_cache_wait; 1840 struct btrfs_free_space_ctl *free_ino_pinned; 1841 u64 ino_cache_progress; 1842 struct inode *ino_cache_inode; 1843 1844 struct mutex log_mutex; 1845 wait_queue_head_t log_writer_wait; 1846 wait_queue_head_t log_commit_wait[2]; 1847 struct list_head log_ctxs[2]; 1848 atomic_t log_writers; 1849 atomic_t log_commit[2]; 1850 atomic_t log_batch; 1851 int log_transid; 1852 /* No matter the commit succeeds or not*/ 1853 int log_transid_committed; 1854 /* Just be updated when the commit succeeds. */ 1855 int last_log_commit; 1856 pid_t log_start_pid; 1857 1858 u64 objectid; 1859 u64 last_trans; 1860 1861 /* data allocations are done in sectorsize units */ 1862 u32 sectorsize; 1863 1864 /* node allocations are done in nodesize units */ 1865 u32 nodesize; 1866 1867 u32 stripesize; 1868 1869 u32 type; 1870 1871 u64 highest_objectid; 1872 1873 /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */ 1874 u64 alloc_bytenr; 1875 1876 u64 defrag_trans_start; 1877 struct btrfs_key defrag_progress; 1878 struct btrfs_key defrag_max; 1879 char *name; 1880 1881 /* the dirty list is only used by non-reference counted roots */ 1882 struct list_head dirty_list; 1883 1884 struct list_head root_list; 1885 1886 spinlock_t log_extents_lock[2]; 1887 struct list_head logged_list[2]; 1888 1889 spinlock_t orphan_lock; 1890 atomic_t orphan_inodes; 1891 struct btrfs_block_rsv *orphan_block_rsv; 1892 int orphan_cleanup_state; 1893 1894 spinlock_t inode_lock; 1895 /* red-black tree that keeps track of in-memory inodes */ 1896 struct rb_root inode_tree; 1897 1898 /* 1899 * radix tree that keeps track of delayed nodes of every inode, 1900 * protected by inode_lock 1901 */ 1902 struct radix_tree_root delayed_nodes_tree; 1903 /* 1904 * right now this just gets used so that a root has its own devid 1905 * for stat. It may be used for more later 1906 */ 1907 dev_t anon_dev; 1908 1909 spinlock_t root_item_lock; 1910 atomic_t refs; 1911 1912 struct mutex delalloc_mutex; 1913 spinlock_t delalloc_lock; 1914 /* 1915 * all of the inodes that have delalloc bytes. It is possible for 1916 * this list to be empty even when there is still dirty data=ordered 1917 * extents waiting to finish IO. 1918 */ 1919 struct list_head delalloc_inodes; 1920 struct list_head delalloc_root; 1921 u64 nr_delalloc_inodes; 1922 1923 struct mutex ordered_extent_mutex; 1924 /* 1925 * this is used by the balancing code to wait for all the pending 1926 * ordered extents 1927 */ 1928 spinlock_t ordered_extent_lock; 1929 1930 /* 1931 * all of the data=ordered extents pending writeback 1932 * these can span multiple transactions and basically include 1933 * every dirty data page that isn't from nodatacow 1934 */ 1935 struct list_head ordered_extents; 1936 struct list_head ordered_root; 1937 u64 nr_ordered_extents; 1938 1939 /* 1940 * Number of currently running SEND ioctls to prevent 1941 * manipulation with the read-only status via SUBVOL_SETFLAGS 1942 */ 1943 int send_in_progress; 1944 struct btrfs_subvolume_writers *subv_writers; 1945 atomic_t will_be_snapshoted; 1946 }; 1947 1948 struct btrfs_ioctl_defrag_range_args { 1949 /* start of the defrag operation */ 1950 __u64 start; 1951 1952 /* number of bytes to defrag, use (u64)-1 to say all */ 1953 __u64 len; 1954 1955 /* 1956 * flags for the operation, which can include turning 1957 * on compression for this one defrag 1958 */ 1959 __u64 flags; 1960 1961 /* 1962 * any extent bigger than this will be considered 1963 * already defragged. Use 0 to take the kernel default 1964 * Use 1 to say every single extent must be rewritten 1965 */ 1966 __u32 extent_thresh; 1967 1968 /* 1969 * which compression method to use if turning on compression 1970 * for this defrag operation. If unspecified, zlib will 1971 * be used 1972 */ 1973 __u32 compress_type; 1974 1975 /* spare for later */ 1976 __u32 unused[4]; 1977 }; 1978 1979 1980 /* 1981 * inode items have the data typically returned from stat and store other 1982 * info about object characteristics. There is one for every file and dir in 1983 * the FS 1984 */ 1985 #define BTRFS_INODE_ITEM_KEY 1 1986 #define BTRFS_INODE_REF_KEY 12 1987 #define BTRFS_INODE_EXTREF_KEY 13 1988 #define BTRFS_XATTR_ITEM_KEY 24 1989 #define BTRFS_ORPHAN_ITEM_KEY 48 1990 /* reserve 2-15 close to the inode for later flexibility */ 1991 1992 /* 1993 * dir items are the name -> inode pointers in a directory. There is one 1994 * for every name in a directory. 1995 */ 1996 #define BTRFS_DIR_LOG_ITEM_KEY 60 1997 #define BTRFS_DIR_LOG_INDEX_KEY 72 1998 #define BTRFS_DIR_ITEM_KEY 84 1999 #define BTRFS_DIR_INDEX_KEY 96 2000 /* 2001 * extent data is for file data 2002 */ 2003 #define BTRFS_EXTENT_DATA_KEY 108 2004 2005 /* 2006 * extent csums are stored in a separate tree and hold csums for 2007 * an entire extent on disk. 2008 */ 2009 #define BTRFS_EXTENT_CSUM_KEY 128 2010 2011 /* 2012 * root items point to tree roots. They are typically in the root 2013 * tree used by the super block to find all the other trees 2014 */ 2015 #define BTRFS_ROOT_ITEM_KEY 132 2016 2017 /* 2018 * root backrefs tie subvols and snapshots to the directory entries that 2019 * reference them 2020 */ 2021 #define BTRFS_ROOT_BACKREF_KEY 144 2022 2023 /* 2024 * root refs make a fast index for listing all of the snapshots and 2025 * subvolumes referenced by a given root. They point directly to the 2026 * directory item in the root that references the subvol 2027 */ 2028 #define BTRFS_ROOT_REF_KEY 156 2029 2030 /* 2031 * extent items are in the extent map tree. These record which blocks 2032 * are used, and how many references there are to each block 2033 */ 2034 #define BTRFS_EXTENT_ITEM_KEY 168 2035 2036 /* 2037 * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know 2038 * the length, so we save the level in key->offset instead of the length. 2039 */ 2040 #define BTRFS_METADATA_ITEM_KEY 169 2041 2042 #define BTRFS_TREE_BLOCK_REF_KEY 176 2043 2044 #define BTRFS_EXTENT_DATA_REF_KEY 178 2045 2046 #define BTRFS_EXTENT_REF_V0_KEY 180 2047 2048 #define BTRFS_SHARED_BLOCK_REF_KEY 182 2049 2050 #define BTRFS_SHARED_DATA_REF_KEY 184 2051 2052 /* 2053 * block groups give us hints into the extent allocation trees. Which 2054 * blocks are free etc etc 2055 */ 2056 #define BTRFS_BLOCK_GROUP_ITEM_KEY 192 2057 2058 #define BTRFS_DEV_EXTENT_KEY 204 2059 #define BTRFS_DEV_ITEM_KEY 216 2060 #define BTRFS_CHUNK_ITEM_KEY 228 2061 2062 /* 2063 * Records the overall state of the qgroups. 2064 * There's only one instance of this key present, 2065 * (0, BTRFS_QGROUP_STATUS_KEY, 0) 2066 */ 2067 #define BTRFS_QGROUP_STATUS_KEY 240 2068 /* 2069 * Records the currently used space of the qgroup. 2070 * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). 2071 */ 2072 #define BTRFS_QGROUP_INFO_KEY 242 2073 /* 2074 * Contains the user configured limits for the qgroup. 2075 * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). 2076 */ 2077 #define BTRFS_QGROUP_LIMIT_KEY 244 2078 /* 2079 * Records the child-parent relationship of qgroups. For 2080 * each relation, 2 keys are present: 2081 * (childid, BTRFS_QGROUP_RELATION_KEY, parentid) 2082 * (parentid, BTRFS_QGROUP_RELATION_KEY, childid) 2083 */ 2084 #define BTRFS_QGROUP_RELATION_KEY 246 2085 2086 #define BTRFS_BALANCE_ITEM_KEY 248 2087 2088 /* 2089 * Persistantly stores the io stats in the device tree. 2090 * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid). 2091 */ 2092 #define BTRFS_DEV_STATS_KEY 249 2093 2094 /* 2095 * Persistantly stores the device replace state in the device tree. 2096 * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). 2097 */ 2098 #define BTRFS_DEV_REPLACE_KEY 250 2099 2100 /* 2101 * Stores items that allow to quickly map UUIDs to something else. 2102 * These items are part of the filesystem UUID tree. 2103 * The key is built like this: 2104 * (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits). 2105 */ 2106 #if BTRFS_UUID_SIZE != 16 2107 #error "UUID items require BTRFS_UUID_SIZE == 16!" 2108 #endif 2109 #define BTRFS_UUID_KEY_SUBVOL 251 /* for UUIDs assigned to subvols */ 2110 #define BTRFS_UUID_KEY_RECEIVED_SUBVOL 252 /* for UUIDs assigned to 2111 * received subvols */ 2112 2113 /* 2114 * string items are for debugging. They just store a short string of 2115 * data in the FS 2116 */ 2117 #define BTRFS_STRING_ITEM_KEY 253 2118 2119 /* 2120 * Flags for mount options. 2121 * 2122 * Note: don't forget to add new options to btrfs_show_options() 2123 */ 2124 #define BTRFS_MOUNT_NODATASUM (1 << 0) 2125 #define BTRFS_MOUNT_NODATACOW (1 << 1) 2126 #define BTRFS_MOUNT_NOBARRIER (1 << 2) 2127 #define BTRFS_MOUNT_SSD (1 << 3) 2128 #define BTRFS_MOUNT_DEGRADED (1 << 4) 2129 #define BTRFS_MOUNT_COMPRESS (1 << 5) 2130 #define BTRFS_MOUNT_NOTREELOG (1 << 6) 2131 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 2132 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 2133 #define BTRFS_MOUNT_NOSSD (1 << 9) 2134 #define BTRFS_MOUNT_DISCARD (1 << 10) 2135 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) 2136 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 2137 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 2138 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 2139 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 2140 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 2141 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) 2142 #define BTRFS_MOUNT_RECOVERY (1 << 18) 2143 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 2144 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 2145 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 2146 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) 2147 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) 2148 2149 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 2150 #define BTRFS_DEFAULT_MAX_INLINE (8192) 2151 2152 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 2153 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 2154 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 2155 #define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \ 2156 BTRFS_MOUNT_##opt) 2157 2158 #define btrfs_set_and_info(root, opt, fmt, args...) \ 2159 { \ 2160 if (!btrfs_test_opt(root, opt)) \ 2161 btrfs_info(root->fs_info, fmt, ##args); \ 2162 btrfs_set_opt(root->fs_info->mount_opt, opt); \ 2163 } 2164 2165 #define btrfs_clear_and_info(root, opt, fmt, args...) \ 2166 { \ 2167 if (btrfs_test_opt(root, opt)) \ 2168 btrfs_info(root->fs_info, fmt, ##args); \ 2169 btrfs_clear_opt(root->fs_info->mount_opt, opt); \ 2170 } 2171 2172 /* 2173 * Requests for changes that need to be done during transaction commit. 2174 * 2175 * Internal mount options that are used for special handling of the real 2176 * mount options (eg. cannot be set during remount and have to be set during 2177 * transaction commit) 2178 */ 2179 2180 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) 2181 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) 2182 #define BTRFS_PENDING_COMMIT (2) 2183 2184 #define btrfs_test_pending(info, opt) \ 2185 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2186 #define btrfs_set_pending(info, opt) \ 2187 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2188 #define btrfs_clear_pending(info, opt) \ 2189 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2190 2191 /* 2192 * Helpers for setting pending mount option changes. 2193 * 2194 * Expects corresponding macros 2195 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name 2196 */ 2197 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ 2198 do { \ 2199 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2200 btrfs_info((info), fmt, ##args); \ 2201 btrfs_set_pending((info), SET_##opt); \ 2202 btrfs_clear_pending((info), CLEAR_##opt); \ 2203 } \ 2204 } while(0) 2205 2206 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ 2207 do { \ 2208 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2209 btrfs_info((info), fmt, ##args); \ 2210 btrfs_set_pending((info), CLEAR_##opt); \ 2211 btrfs_clear_pending((info), SET_##opt); \ 2212 } \ 2213 } while(0) 2214 2215 /* 2216 * Inode flags 2217 */ 2218 #define BTRFS_INODE_NODATASUM (1 << 0) 2219 #define BTRFS_INODE_NODATACOW (1 << 1) 2220 #define BTRFS_INODE_READONLY (1 << 2) 2221 #define BTRFS_INODE_NOCOMPRESS (1 << 3) 2222 #define BTRFS_INODE_PREALLOC (1 << 4) 2223 #define BTRFS_INODE_SYNC (1 << 5) 2224 #define BTRFS_INODE_IMMUTABLE (1 << 6) 2225 #define BTRFS_INODE_APPEND (1 << 7) 2226 #define BTRFS_INODE_NODUMP (1 << 8) 2227 #define BTRFS_INODE_NOATIME (1 << 9) 2228 #define BTRFS_INODE_DIRSYNC (1 << 10) 2229 #define BTRFS_INODE_COMPRESS (1 << 11) 2230 2231 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) 2232 2233 struct btrfs_map_token { 2234 struct extent_buffer *eb; 2235 char *kaddr; 2236 unsigned long offset; 2237 }; 2238 2239 static inline void btrfs_init_map_token (struct btrfs_map_token *token) 2240 { 2241 token->kaddr = NULL; 2242 } 2243 2244 /* some macros to generate set/get funcs for the struct fields. This 2245 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 2246 * one for u8: 2247 */ 2248 #define le8_to_cpu(v) (v) 2249 #define cpu_to_le8(v) (v) 2250 #define __le8 u8 2251 2252 #define read_eb_member(eb, ptr, type, member, result) ( \ 2253 read_extent_buffer(eb, (char *)(result), \ 2254 ((unsigned long)(ptr)) + \ 2255 offsetof(type, member), \ 2256 sizeof(((type *)0)->member))) 2257 2258 #define write_eb_member(eb, ptr, type, member, result) ( \ 2259 write_extent_buffer(eb, (char *)(result), \ 2260 ((unsigned long)(ptr)) + \ 2261 offsetof(type, member), \ 2262 sizeof(((type *)0)->member))) 2263 2264 #define DECLARE_BTRFS_SETGET_BITS(bits) \ 2265 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ 2266 unsigned long off, \ 2267 struct btrfs_map_token *token); \ 2268 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ 2269 unsigned long off, u##bits val, \ 2270 struct btrfs_map_token *token); \ 2271 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ 2272 unsigned long off) \ 2273 { \ 2274 return btrfs_get_token_##bits(eb, ptr, off, NULL); \ 2275 } \ 2276 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 2277 unsigned long off, u##bits val) \ 2278 { \ 2279 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ 2280 } 2281 2282 DECLARE_BTRFS_SETGET_BITS(8) 2283 DECLARE_BTRFS_SETGET_BITS(16) 2284 DECLARE_BTRFS_SETGET_BITS(32) 2285 DECLARE_BTRFS_SETGET_BITS(64) 2286 2287 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 2288 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ 2289 { \ 2290 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2291 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ 2292 } \ 2293 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ 2294 u##bits val) \ 2295 { \ 2296 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2297 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ 2298 } \ 2299 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ 2300 struct btrfs_map_token *token) \ 2301 { \ 2302 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2303 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ 2304 } \ 2305 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ 2306 type *s, u##bits val, \ 2307 struct btrfs_map_token *token) \ 2308 { \ 2309 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2310 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ 2311 } 2312 2313 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 2314 static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 2315 { \ 2316 type *p = page_address(eb->pages[0]); \ 2317 u##bits res = le##bits##_to_cpu(p->member); \ 2318 return res; \ 2319 } \ 2320 static inline void btrfs_set_##name(struct extent_buffer *eb, \ 2321 u##bits val) \ 2322 { \ 2323 type *p = page_address(eb->pages[0]); \ 2324 p->member = cpu_to_le##bits(val); \ 2325 } 2326 2327 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ 2328 static inline u##bits btrfs_##name(type *s) \ 2329 { \ 2330 return le##bits##_to_cpu(s->member); \ 2331 } \ 2332 static inline void btrfs_set_##name(type *s, u##bits val) \ 2333 { \ 2334 s->member = cpu_to_le##bits(val); \ 2335 } 2336 2337 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); 2338 BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); 2339 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); 2340 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); 2341 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); 2342 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, 2343 start_offset, 64); 2344 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); 2345 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); 2346 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); 2347 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); 2348 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); 2349 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); 2350 2351 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); 2352 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, 2353 total_bytes, 64); 2354 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, 2355 bytes_used, 64); 2356 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, 2357 io_align, 32); 2358 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, 2359 io_width, 32); 2360 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, 2361 sector_size, 32); 2362 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); 2363 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, 2364 dev_group, 32); 2365 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, 2366 seek_speed, 8); 2367 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, 2368 bandwidth, 8); 2369 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, 2370 generation, 64); 2371 2372 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) 2373 { 2374 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); 2375 } 2376 2377 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) 2378 { 2379 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); 2380 } 2381 2382 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); 2383 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); 2384 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); 2385 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); 2386 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); 2387 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); 2388 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); 2389 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); 2390 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); 2391 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); 2392 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); 2393 2394 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) 2395 { 2396 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); 2397 } 2398 2399 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); 2400 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); 2401 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, 2402 stripe_len, 64); 2403 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, 2404 io_align, 32); 2405 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, 2406 io_width, 32); 2407 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, 2408 sector_size, 32); 2409 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); 2410 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, 2411 num_stripes, 16); 2412 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, 2413 sub_stripes, 16); 2414 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); 2415 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); 2416 2417 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, 2418 int nr) 2419 { 2420 unsigned long offset = (unsigned long)c; 2421 offset += offsetof(struct btrfs_chunk, stripe); 2422 offset += nr * sizeof(struct btrfs_stripe); 2423 return (struct btrfs_stripe *)offset; 2424 } 2425 2426 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) 2427 { 2428 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); 2429 } 2430 2431 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, 2432 struct btrfs_chunk *c, int nr) 2433 { 2434 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 2435 } 2436 2437 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 2438 struct btrfs_chunk *c, int nr) 2439 { 2440 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); 2441 } 2442 2443 /* struct btrfs_block_group_item */ 2444 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, 2445 used, 64); 2446 BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, 2447 used, 64); 2448 BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, 2449 struct btrfs_block_group_item, chunk_objectid, 64); 2450 2451 BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, 2452 struct btrfs_block_group_item, chunk_objectid, 64); 2453 BTRFS_SETGET_FUNCS(disk_block_group_flags, 2454 struct btrfs_block_group_item, flags, 64); 2455 BTRFS_SETGET_STACK_FUNCS(block_group_flags, 2456 struct btrfs_block_group_item, flags, 64); 2457 2458 /* struct btrfs_inode_ref */ 2459 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); 2460 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); 2461 2462 /* struct btrfs_inode_extref */ 2463 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, 2464 parent_objectid, 64); 2465 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, 2466 name_len, 16); 2467 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); 2468 2469 /* struct btrfs_inode_item */ 2470 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); 2471 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); 2472 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); 2473 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); 2474 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); 2475 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); 2476 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); 2477 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); 2478 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); 2479 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); 2480 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); 2481 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); 2482 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, 2483 generation, 64); 2484 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, 2485 sequence, 64); 2486 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, 2487 transid, 64); 2488 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); 2489 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, 2490 nbytes, 64); 2491 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, 2492 block_group, 64); 2493 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); 2494 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); 2495 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); 2496 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 2497 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 2498 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 2499 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 2500 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 2501 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 2502 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); 2503 2504 /* struct btrfs_dev_extent */ 2505 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, 2506 chunk_tree, 64); 2507 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, 2508 chunk_objectid, 64); 2509 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, 2510 chunk_offset, 64); 2511 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); 2512 2513 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) 2514 { 2515 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); 2516 return (unsigned long)dev + ptr; 2517 } 2518 2519 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 2520 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, 2521 generation, 64); 2522 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); 2523 2524 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 2525 2526 2527 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); 2528 2529 static inline void btrfs_tree_block_key(struct extent_buffer *eb, 2530 struct btrfs_tree_block_info *item, 2531 struct btrfs_disk_key *key) 2532 { 2533 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2534 } 2535 2536 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, 2537 struct btrfs_tree_block_info *item, 2538 struct btrfs_disk_key *key) 2539 { 2540 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2541 } 2542 2543 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, 2544 root, 64); 2545 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, 2546 objectid, 64); 2547 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, 2548 offset, 64); 2549 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, 2550 count, 32); 2551 2552 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, 2553 count, 32); 2554 2555 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, 2556 type, 8); 2557 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, 2558 offset, 64); 2559 2560 static inline u32 btrfs_extent_inline_ref_size(int type) 2561 { 2562 if (type == BTRFS_TREE_BLOCK_REF_KEY || 2563 type == BTRFS_SHARED_BLOCK_REF_KEY) 2564 return sizeof(struct btrfs_extent_inline_ref); 2565 if (type == BTRFS_SHARED_DATA_REF_KEY) 2566 return sizeof(struct btrfs_shared_data_ref) + 2567 sizeof(struct btrfs_extent_inline_ref); 2568 if (type == BTRFS_EXTENT_DATA_REF_KEY) 2569 return sizeof(struct btrfs_extent_data_ref) + 2570 offsetof(struct btrfs_extent_inline_ref, offset); 2571 BUG(); 2572 return 0; 2573 } 2574 2575 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); 2576 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, 2577 generation, 64); 2578 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); 2579 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); 2580 2581 /* struct btrfs_node */ 2582 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); 2583 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); 2584 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, 2585 blockptr, 64); 2586 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, 2587 generation, 64); 2588 2589 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 2590 { 2591 unsigned long ptr; 2592 ptr = offsetof(struct btrfs_node, ptrs) + 2593 sizeof(struct btrfs_key_ptr) * nr; 2594 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); 2595 } 2596 2597 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, 2598 int nr, u64 val) 2599 { 2600 unsigned long ptr; 2601 ptr = offsetof(struct btrfs_node, ptrs) + 2602 sizeof(struct btrfs_key_ptr) * nr; 2603 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); 2604 } 2605 2606 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) 2607 { 2608 unsigned long ptr; 2609 ptr = offsetof(struct btrfs_node, ptrs) + 2610 sizeof(struct btrfs_key_ptr) * nr; 2611 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 2612 } 2613 2614 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, 2615 int nr, u64 val) 2616 { 2617 unsigned long ptr; 2618 ptr = offsetof(struct btrfs_node, ptrs) + 2619 sizeof(struct btrfs_key_ptr) * nr; 2620 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); 2621 } 2622 2623 static inline unsigned long btrfs_node_key_ptr_offset(int nr) 2624 { 2625 return offsetof(struct btrfs_node, ptrs) + 2626 sizeof(struct btrfs_key_ptr) * nr; 2627 } 2628 2629 void btrfs_node_key(struct extent_buffer *eb, 2630 struct btrfs_disk_key *disk_key, int nr); 2631 2632 static inline void btrfs_set_node_key(struct extent_buffer *eb, 2633 struct btrfs_disk_key *disk_key, int nr) 2634 { 2635 unsigned long ptr; 2636 ptr = btrfs_node_key_ptr_offset(nr); 2637 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, 2638 struct btrfs_key_ptr, key, disk_key); 2639 } 2640 2641 /* struct btrfs_item */ 2642 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); 2643 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); 2644 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); 2645 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); 2646 2647 static inline unsigned long btrfs_item_nr_offset(int nr) 2648 { 2649 return offsetof(struct btrfs_leaf, items) + 2650 sizeof(struct btrfs_item) * nr; 2651 } 2652 2653 static inline struct btrfs_item *btrfs_item_nr(int nr) 2654 { 2655 return (struct btrfs_item *)btrfs_item_nr_offset(nr); 2656 } 2657 2658 static inline u32 btrfs_item_end(struct extent_buffer *eb, 2659 struct btrfs_item *item) 2660 { 2661 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); 2662 } 2663 2664 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) 2665 { 2666 return btrfs_item_end(eb, btrfs_item_nr(nr)); 2667 } 2668 2669 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) 2670 { 2671 return btrfs_item_offset(eb, btrfs_item_nr(nr)); 2672 } 2673 2674 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) 2675 { 2676 return btrfs_item_size(eb, btrfs_item_nr(nr)); 2677 } 2678 2679 static inline void btrfs_item_key(struct extent_buffer *eb, 2680 struct btrfs_disk_key *disk_key, int nr) 2681 { 2682 struct btrfs_item *item = btrfs_item_nr(nr); 2683 read_eb_member(eb, item, struct btrfs_item, key, disk_key); 2684 } 2685 2686 static inline void btrfs_set_item_key(struct extent_buffer *eb, 2687 struct btrfs_disk_key *disk_key, int nr) 2688 { 2689 struct btrfs_item *item = btrfs_item_nr(nr); 2690 write_eb_member(eb, item, struct btrfs_item, key, disk_key); 2691 } 2692 2693 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); 2694 2695 /* 2696 * struct btrfs_root_ref 2697 */ 2698 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 2699 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 2700 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 2701 2702 /* struct btrfs_dir_item */ 2703 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 2704 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); 2705 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); 2706 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); 2707 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); 2708 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, 2709 data_len, 16); 2710 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, 2711 name_len, 16); 2712 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, 2713 transid, 64); 2714 2715 static inline void btrfs_dir_item_key(struct extent_buffer *eb, 2716 struct btrfs_dir_item *item, 2717 struct btrfs_disk_key *key) 2718 { 2719 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 2720 } 2721 2722 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, 2723 struct btrfs_dir_item *item, 2724 struct btrfs_disk_key *key) 2725 { 2726 write_eb_member(eb, item, struct btrfs_dir_item, location, key); 2727 } 2728 2729 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 2730 num_entries, 64); 2731 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 2732 num_bitmaps, 64); 2733 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 2734 generation, 64); 2735 2736 static inline void btrfs_free_space_key(struct extent_buffer *eb, 2737 struct btrfs_free_space_header *h, 2738 struct btrfs_disk_key *key) 2739 { 2740 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2741 } 2742 2743 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, 2744 struct btrfs_free_space_header *h, 2745 struct btrfs_disk_key *key) 2746 { 2747 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2748 } 2749 2750 /* struct btrfs_disk_key */ 2751 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, 2752 objectid, 64); 2753 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); 2754 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); 2755 2756 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, 2757 struct btrfs_disk_key *disk) 2758 { 2759 cpu->offset = le64_to_cpu(disk->offset); 2760 cpu->type = disk->type; 2761 cpu->objectid = le64_to_cpu(disk->objectid); 2762 } 2763 2764 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, 2765 struct btrfs_key *cpu) 2766 { 2767 disk->offset = cpu_to_le64(cpu->offset); 2768 disk->type = cpu->type; 2769 disk->objectid = cpu_to_le64(cpu->objectid); 2770 } 2771 2772 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, 2773 struct btrfs_key *key, int nr) 2774 { 2775 struct btrfs_disk_key disk_key; 2776 btrfs_node_key(eb, &disk_key, nr); 2777 btrfs_disk_key_to_cpu(key, &disk_key); 2778 } 2779 2780 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, 2781 struct btrfs_key *key, int nr) 2782 { 2783 struct btrfs_disk_key disk_key; 2784 btrfs_item_key(eb, &disk_key, nr); 2785 btrfs_disk_key_to_cpu(key, &disk_key); 2786 } 2787 2788 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, 2789 struct btrfs_dir_item *item, 2790 struct btrfs_key *key) 2791 { 2792 struct btrfs_disk_key disk_key; 2793 btrfs_dir_item_key(eb, item, &disk_key); 2794 btrfs_disk_key_to_cpu(key, &disk_key); 2795 } 2796 2797 2798 static inline u8 btrfs_key_type(struct btrfs_key *key) 2799 { 2800 return key->type; 2801 } 2802 2803 static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) 2804 { 2805 key->type = val; 2806 } 2807 2808 /* struct btrfs_header */ 2809 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); 2810 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, 2811 generation, 64); 2812 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); 2813 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); 2814 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); 2815 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); 2816 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, 2817 generation, 64); 2818 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); 2819 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, 2820 nritems, 32); 2821 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); 2822 2823 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) 2824 { 2825 return (btrfs_header_flags(eb) & flag) == flag; 2826 } 2827 2828 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) 2829 { 2830 u64 flags = btrfs_header_flags(eb); 2831 btrfs_set_header_flags(eb, flags | flag); 2832 return (flags & flag) == flag; 2833 } 2834 2835 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) 2836 { 2837 u64 flags = btrfs_header_flags(eb); 2838 btrfs_set_header_flags(eb, flags & ~flag); 2839 return (flags & flag) == flag; 2840 } 2841 2842 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) 2843 { 2844 u64 flags = btrfs_header_flags(eb); 2845 return flags >> BTRFS_BACKREF_REV_SHIFT; 2846 } 2847 2848 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, 2849 int rev) 2850 { 2851 u64 flags = btrfs_header_flags(eb); 2852 flags &= ~BTRFS_BACKREF_REV_MASK; 2853 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; 2854 btrfs_set_header_flags(eb, flags); 2855 } 2856 2857 static inline unsigned long btrfs_header_fsid(void) 2858 { 2859 return offsetof(struct btrfs_header, fsid); 2860 } 2861 2862 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) 2863 { 2864 return offsetof(struct btrfs_header, chunk_tree_uuid); 2865 } 2866 2867 static inline int btrfs_is_leaf(struct extent_buffer *eb) 2868 { 2869 return btrfs_header_level(eb) == 0; 2870 } 2871 2872 /* struct btrfs_root_item */ 2873 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, 2874 generation, 64); 2875 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); 2876 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); 2877 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); 2878 2879 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, 2880 generation, 64); 2881 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); 2882 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); 2883 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); 2884 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); 2885 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); 2886 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); 2887 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); 2888 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, 2889 last_snapshot, 64); 2890 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, 2891 generation_v2, 64); 2892 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, 2893 ctransid, 64); 2894 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, 2895 otransid, 64); 2896 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, 2897 stransid, 64); 2898 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, 2899 rtransid, 64); 2900 2901 static inline bool btrfs_root_readonly(struct btrfs_root *root) 2902 { 2903 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; 2904 } 2905 2906 static inline bool btrfs_root_dead(struct btrfs_root *root) 2907 { 2908 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; 2909 } 2910 2911 /* struct btrfs_root_backup */ 2912 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, 2913 tree_root, 64); 2914 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, 2915 tree_root_gen, 64); 2916 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, 2917 tree_root_level, 8); 2918 2919 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, 2920 chunk_root, 64); 2921 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, 2922 chunk_root_gen, 64); 2923 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, 2924 chunk_root_level, 8); 2925 2926 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, 2927 extent_root, 64); 2928 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, 2929 extent_root_gen, 64); 2930 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, 2931 extent_root_level, 8); 2932 2933 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, 2934 fs_root, 64); 2935 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, 2936 fs_root_gen, 64); 2937 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, 2938 fs_root_level, 8); 2939 2940 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, 2941 dev_root, 64); 2942 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, 2943 dev_root_gen, 64); 2944 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, 2945 dev_root_level, 8); 2946 2947 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, 2948 csum_root, 64); 2949 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, 2950 csum_root_gen, 64); 2951 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, 2952 csum_root_level, 8); 2953 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, 2954 total_bytes, 64); 2955 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, 2956 bytes_used, 64); 2957 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, 2958 num_devices, 64); 2959 2960 /* struct btrfs_balance_item */ 2961 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); 2962 2963 static inline void btrfs_balance_data(struct extent_buffer *eb, 2964 struct btrfs_balance_item *bi, 2965 struct btrfs_disk_balance_args *ba) 2966 { 2967 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2968 } 2969 2970 static inline void btrfs_set_balance_data(struct extent_buffer *eb, 2971 struct btrfs_balance_item *bi, 2972 struct btrfs_disk_balance_args *ba) 2973 { 2974 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2975 } 2976 2977 static inline void btrfs_balance_meta(struct extent_buffer *eb, 2978 struct btrfs_balance_item *bi, 2979 struct btrfs_disk_balance_args *ba) 2980 { 2981 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2982 } 2983 2984 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, 2985 struct btrfs_balance_item *bi, 2986 struct btrfs_disk_balance_args *ba) 2987 { 2988 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2989 } 2990 2991 static inline void btrfs_balance_sys(struct extent_buffer *eb, 2992 struct btrfs_balance_item *bi, 2993 struct btrfs_disk_balance_args *ba) 2994 { 2995 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2996 } 2997 2998 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, 2999 struct btrfs_balance_item *bi, 3000 struct btrfs_disk_balance_args *ba) 3001 { 3002 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 3003 } 3004 3005 static inline void 3006 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 3007 struct btrfs_disk_balance_args *disk) 3008 { 3009 memset(cpu, 0, sizeof(*cpu)); 3010 3011 cpu->profiles = le64_to_cpu(disk->profiles); 3012 cpu->usage = le64_to_cpu(disk->usage); 3013 cpu->devid = le64_to_cpu(disk->devid); 3014 cpu->pstart = le64_to_cpu(disk->pstart); 3015 cpu->pend = le64_to_cpu(disk->pend); 3016 cpu->vstart = le64_to_cpu(disk->vstart); 3017 cpu->vend = le64_to_cpu(disk->vend); 3018 cpu->target = le64_to_cpu(disk->target); 3019 cpu->flags = le64_to_cpu(disk->flags); 3020 cpu->limit = le64_to_cpu(disk->limit); 3021 } 3022 3023 static inline void 3024 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3025 struct btrfs_balance_args *cpu) 3026 { 3027 memset(disk, 0, sizeof(*disk)); 3028 3029 disk->profiles = cpu_to_le64(cpu->profiles); 3030 disk->usage = cpu_to_le64(cpu->usage); 3031 disk->devid = cpu_to_le64(cpu->devid); 3032 disk->pstart = cpu_to_le64(cpu->pstart); 3033 disk->pend = cpu_to_le64(cpu->pend); 3034 disk->vstart = cpu_to_le64(cpu->vstart); 3035 disk->vend = cpu_to_le64(cpu->vend); 3036 disk->target = cpu_to_le64(cpu->target); 3037 disk->flags = cpu_to_le64(cpu->flags); 3038 disk->limit = cpu_to_le64(cpu->limit); 3039 } 3040 3041 /* struct btrfs_super_block */ 3042 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); 3043 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); 3044 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, 3045 generation, 64); 3046 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); 3047 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, 3048 struct btrfs_super_block, sys_chunk_array_size, 32); 3049 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, 3050 struct btrfs_super_block, chunk_root_generation, 64); 3051 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, 3052 root_level, 8); 3053 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, 3054 chunk_root, 64); 3055 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, 3056 chunk_root_level, 8); 3057 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, 3058 log_root, 64); 3059 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, 3060 log_root_transid, 64); 3061 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, 3062 log_root_level, 8); 3063 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, 3064 total_bytes, 64); 3065 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, 3066 bytes_used, 64); 3067 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, 3068 sectorsize, 32); 3069 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, 3070 nodesize, 32); 3071 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, 3072 stripesize, 32); 3073 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, 3074 root_dir_objectid, 64); 3075 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, 3076 num_devices, 64); 3077 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, 3078 compat_flags, 64); 3079 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, 3080 compat_ro_flags, 64); 3081 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, 3082 incompat_flags, 64); 3083 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, 3084 csum_type, 16); 3085 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, 3086 cache_generation, 64); 3087 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); 3088 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, 3089 uuid_tree_generation, 64); 3090 3091 static inline int btrfs_super_csum_size(struct btrfs_super_block *s) 3092 { 3093 u16 t = btrfs_super_csum_type(s); 3094 /* 3095 * csum type is validated at mount time 3096 */ 3097 return btrfs_csum_sizes[t]; 3098 } 3099 3100 static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) 3101 { 3102 return offsetof(struct btrfs_leaf, items); 3103 } 3104 3105 /* struct btrfs_file_extent_item */ 3106 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 3107 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, 3108 struct btrfs_file_extent_item, disk_bytenr, 64); 3109 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, 3110 struct btrfs_file_extent_item, offset, 64); 3111 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, 3112 struct btrfs_file_extent_item, generation, 64); 3113 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, 3114 struct btrfs_file_extent_item, num_bytes, 64); 3115 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, 3116 struct btrfs_file_extent_item, disk_num_bytes, 64); 3117 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, 3118 struct btrfs_file_extent_item, compression, 8); 3119 3120 static inline unsigned long 3121 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) 3122 { 3123 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; 3124 } 3125 3126 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) 3127 { 3128 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; 3129 } 3130 3131 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, 3132 disk_bytenr, 64); 3133 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, 3134 generation, 64); 3135 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, 3136 disk_num_bytes, 64); 3137 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, 3138 offset, 64); 3139 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, 3140 num_bytes, 64); 3141 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, 3142 ram_bytes, 64); 3143 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, 3144 compression, 8); 3145 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, 3146 encryption, 8); 3147 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, 3148 other_encoding, 16); 3149 3150 /* 3151 * this returns the number of bytes used by the item on disk, minus the 3152 * size of any extent headers. If a file is compressed on disk, this is 3153 * the compressed size 3154 */ 3155 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, 3156 struct btrfs_item *e) 3157 { 3158 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; 3159 } 3160 3161 /* this returns the number of file bytes represented by the inline item. 3162 * If an item is compressed, this is the uncompressed size 3163 */ 3164 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, 3165 int slot, 3166 struct btrfs_file_extent_item *fi) 3167 { 3168 struct btrfs_map_token token; 3169 3170 btrfs_init_map_token(&token); 3171 /* 3172 * return the space used on disk if this item isn't 3173 * compressed or encoded 3174 */ 3175 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && 3176 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && 3177 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { 3178 return btrfs_file_extent_inline_item_len(eb, 3179 btrfs_item_nr(slot)); 3180 } 3181 3182 /* otherwise use the ram bytes field */ 3183 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); 3184 } 3185 3186 3187 /* btrfs_dev_stats_item */ 3188 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, 3189 struct btrfs_dev_stats_item *ptr, 3190 int index) 3191 { 3192 u64 val; 3193 3194 read_extent_buffer(eb, &val, 3195 offsetof(struct btrfs_dev_stats_item, values) + 3196 ((unsigned long)ptr) + (index * sizeof(u64)), 3197 sizeof(val)); 3198 return val; 3199 } 3200 3201 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, 3202 struct btrfs_dev_stats_item *ptr, 3203 int index, u64 val) 3204 { 3205 write_extent_buffer(eb, &val, 3206 offsetof(struct btrfs_dev_stats_item, values) + 3207 ((unsigned long)ptr) + (index * sizeof(u64)), 3208 sizeof(val)); 3209 } 3210 3211 /* btrfs_qgroup_status_item */ 3212 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, 3213 generation, 64); 3214 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, 3215 version, 64); 3216 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, 3217 flags, 64); 3218 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, 3219 rescan, 64); 3220 3221 /* btrfs_qgroup_info_item */ 3222 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, 3223 generation, 64); 3224 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); 3225 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, 3226 rfer_cmpr, 64); 3227 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); 3228 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, 3229 excl_cmpr, 64); 3230 3231 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, 3232 struct btrfs_qgroup_info_item, generation, 64); 3233 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, 3234 rfer, 64); 3235 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, 3236 struct btrfs_qgroup_info_item, rfer_cmpr, 64); 3237 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, 3238 excl, 64); 3239 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, 3240 struct btrfs_qgroup_info_item, excl_cmpr, 64); 3241 3242 /* btrfs_qgroup_limit_item */ 3243 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, 3244 flags, 64); 3245 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, 3246 max_rfer, 64); 3247 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, 3248 max_excl, 64); 3249 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, 3250 rsv_rfer, 64); 3251 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, 3252 rsv_excl, 64); 3253 3254 /* btrfs_dev_replace_item */ 3255 BTRFS_SETGET_FUNCS(dev_replace_src_devid, 3256 struct btrfs_dev_replace_item, src_devid, 64); 3257 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, 3258 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, 3259 64); 3260 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, 3261 replace_state, 64); 3262 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, 3263 time_started, 64); 3264 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, 3265 time_stopped, 64); 3266 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, 3267 num_write_errors, 64); 3268 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, 3269 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, 3270 64); 3271 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, 3272 cursor_left, 64); 3273 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, 3274 cursor_right, 64); 3275 3276 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, 3277 struct btrfs_dev_replace_item, src_devid, 64); 3278 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, 3279 struct btrfs_dev_replace_item, 3280 cont_reading_from_srcdev_mode, 64); 3281 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, 3282 struct btrfs_dev_replace_item, replace_state, 64); 3283 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, 3284 struct btrfs_dev_replace_item, time_started, 64); 3285 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, 3286 struct btrfs_dev_replace_item, time_stopped, 64); 3287 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, 3288 struct btrfs_dev_replace_item, num_write_errors, 64); 3289 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, 3290 struct btrfs_dev_replace_item, 3291 num_uncorrectable_read_errors, 64); 3292 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, 3293 struct btrfs_dev_replace_item, cursor_left, 64); 3294 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, 3295 struct btrfs_dev_replace_item, cursor_right, 64); 3296 3297 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) 3298 { 3299 return sb->s_fs_info; 3300 } 3301 3302 /* helper function to cast into the data area of the leaf. */ 3303 #define btrfs_item_ptr(leaf, slot, type) \ 3304 ((type *)(btrfs_leaf_data(leaf) + \ 3305 btrfs_item_offset_nr(leaf, slot))) 3306 3307 #define btrfs_item_ptr_offset(leaf, slot) \ 3308 ((unsigned long)(btrfs_leaf_data(leaf) + \ 3309 btrfs_item_offset_nr(leaf, slot))) 3310 3311 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 3312 { 3313 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 3314 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 3315 } 3316 3317 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) 3318 { 3319 return mapping_gfp_mask(mapping) & ~__GFP_FS; 3320 } 3321 3322 /* extent-tree.c */ 3323 3324 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes); 3325 3326 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, 3327 unsigned num_items) 3328 { 3329 return (root->nodesize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3330 2 * num_items; 3331 } 3332 3333 /* 3334 * Doing a truncate won't result in new nodes or leaves, just what we need for 3335 * COW. 3336 */ 3337 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root, 3338 unsigned num_items) 3339 { 3340 return root->nodesize * BTRFS_MAX_LEVEL * num_items; 3341 } 3342 3343 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 3344 struct btrfs_root *root); 3345 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 3346 struct btrfs_root *root); 3347 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3348 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 3349 struct btrfs_root *root, unsigned long count); 3350 int btrfs_async_run_delayed_refs(struct btrfs_root *root, 3351 unsigned long count, int wait); 3352 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 3353 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 3354 struct btrfs_root *root, u64 bytenr, 3355 u64 offset, int metadata, u64 *refs, u64 *flags); 3356 int btrfs_pin_extent(struct btrfs_root *root, 3357 u64 bytenr, u64 num, int reserved); 3358 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 3359 u64 bytenr, u64 num_bytes); 3360 int btrfs_exclude_logged_extents(struct btrfs_root *root, 3361 struct extent_buffer *eb); 3362 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 3363 struct btrfs_root *root, 3364 u64 objectid, u64 offset, u64 bytenr); 3365 struct btrfs_block_group_cache *btrfs_lookup_block_group( 3366 struct btrfs_fs_info *info, 3367 u64 bytenr); 3368 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3369 int get_block_group_index(struct btrfs_block_group_cache *cache); 3370 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 3371 struct btrfs_root *root, u64 parent, 3372 u64 root_objectid, 3373 struct btrfs_disk_key *key, int level, 3374 u64 hint, u64 empty_size); 3375 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3376 struct btrfs_root *root, 3377 struct extent_buffer *buf, 3378 u64 parent, int last_ref); 3379 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 3380 struct btrfs_root *root, 3381 u64 root_objectid, u64 owner, 3382 u64 offset, struct btrfs_key *ins); 3383 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 3384 struct btrfs_root *root, 3385 u64 root_objectid, u64 owner, u64 offset, 3386 struct btrfs_key *ins); 3387 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 3388 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 3389 struct btrfs_key *ins, int is_data, int delalloc); 3390 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3391 struct extent_buffer *buf, int full_backref); 3392 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3393 struct extent_buffer *buf, int full_backref); 3394 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 3395 struct btrfs_root *root, 3396 u64 bytenr, u64 num_bytes, u64 flags, 3397 int level, int is_data); 3398 int btrfs_free_extent(struct btrfs_trans_handle *trans, 3399 struct btrfs_root *root, 3400 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 3401 u64 owner, u64 offset, int no_quota); 3402 3403 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, 3404 int delalloc); 3405 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 3406 u64 start, u64 len); 3407 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 3408 struct btrfs_root *root); 3409 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 3410 struct btrfs_root *root); 3411 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 3412 struct btrfs_root *root, 3413 u64 bytenr, u64 num_bytes, u64 parent, 3414 u64 root_objectid, u64 owner, u64 offset, int no_quota); 3415 3416 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, 3417 struct btrfs_root *root); 3418 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3419 struct btrfs_root *root); 3420 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, 3421 struct btrfs_root *root); 3422 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 3423 int btrfs_free_block_groups(struct btrfs_fs_info *info); 3424 int btrfs_read_block_groups(struct btrfs_root *root); 3425 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr); 3426 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 3427 struct btrfs_root *root, u64 bytes_used, 3428 u64 type, u64 chunk_objectid, u64 chunk_offset, 3429 u64 size); 3430 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 3431 struct btrfs_root *root, u64 group_start, 3432 struct extent_map *em); 3433 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); 3434 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); 3435 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); 3436 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 3437 struct btrfs_root *root); 3438 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); 3439 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 3440 3441 enum btrfs_reserve_flush_enum { 3442 /* If we are in the transaction, we can't flush anything.*/ 3443 BTRFS_RESERVE_NO_FLUSH, 3444 /* 3445 * Flushing delalloc may cause deadlock somewhere, in this 3446 * case, use FLUSH LIMIT 3447 */ 3448 BTRFS_RESERVE_FLUSH_LIMIT, 3449 BTRFS_RESERVE_FLUSH_ALL, 3450 }; 3451 3452 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes); 3453 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes); 3454 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 3455 struct btrfs_root *root); 3456 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); 3457 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 3458 struct inode *inode); 3459 void btrfs_orphan_release_metadata(struct inode *inode); 3460 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 3461 struct btrfs_block_rsv *rsv, 3462 int nitems, 3463 u64 *qgroup_reserved, bool use_global_rsv); 3464 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 3465 struct btrfs_block_rsv *rsv, 3466 u64 qgroup_reserved); 3467 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); 3468 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); 3469 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); 3470 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); 3471 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); 3472 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 3473 unsigned short type); 3474 void btrfs_free_block_rsv(struct btrfs_root *root, 3475 struct btrfs_block_rsv *rsv); 3476 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv); 3477 int btrfs_block_rsv_add(struct btrfs_root *root, 3478 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 3479 enum btrfs_reserve_flush_enum flush); 3480 int btrfs_block_rsv_check(struct btrfs_root *root, 3481 struct btrfs_block_rsv *block_rsv, int min_factor); 3482 int btrfs_block_rsv_refill(struct btrfs_root *root, 3483 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 3484 enum btrfs_reserve_flush_enum flush); 3485 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3486 struct btrfs_block_rsv *dst_rsv, 3487 u64 num_bytes); 3488 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 3489 struct btrfs_block_rsv *dest, u64 num_bytes, 3490 int min_factor); 3491 void btrfs_block_rsv_release(struct btrfs_root *root, 3492 struct btrfs_block_rsv *block_rsv, 3493 u64 num_bytes); 3494 int btrfs_inc_block_group_ro(struct btrfs_root *root, 3495 struct btrfs_block_group_cache *cache); 3496 void btrfs_dec_block_group_ro(struct btrfs_root *root, 3497 struct btrfs_block_group_cache *cache); 3498 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 3499 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 3500 int btrfs_error_unpin_extent_range(struct btrfs_root *root, 3501 u64 start, u64 end); 3502 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 3503 u64 num_bytes, u64 *actual_bytes); 3504 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 3505 struct btrfs_root *root, u64 type); 3506 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); 3507 3508 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 3509 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 3510 struct btrfs_fs_info *fs_info); 3511 int __get_raid_index(u64 flags); 3512 int btrfs_start_write_no_snapshoting(struct btrfs_root *root); 3513 void btrfs_end_write_no_snapshoting(struct btrfs_root *root); 3514 void check_system_chunk(struct btrfs_trans_handle *trans, 3515 struct btrfs_root *root, 3516 const u64 type); 3517 /* ctree.c */ 3518 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 3519 int level, int *slot); 3520 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2); 3521 int btrfs_previous_item(struct btrfs_root *root, 3522 struct btrfs_path *path, u64 min_objectid, 3523 int type); 3524 int btrfs_previous_extent_item(struct btrfs_root *root, 3525 struct btrfs_path *path, u64 min_objectid); 3526 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 3527 struct btrfs_path *path, 3528 struct btrfs_key *new_key); 3529 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 3530 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 3531 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 3532 struct btrfs_key *key, int lowest_level, 3533 u64 min_trans); 3534 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 3535 struct btrfs_path *path, 3536 u64 min_trans); 3537 enum btrfs_compare_tree_result { 3538 BTRFS_COMPARE_TREE_NEW, 3539 BTRFS_COMPARE_TREE_DELETED, 3540 BTRFS_COMPARE_TREE_CHANGED, 3541 BTRFS_COMPARE_TREE_SAME, 3542 }; 3543 typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root, 3544 struct btrfs_root *right_root, 3545 struct btrfs_path *left_path, 3546 struct btrfs_path *right_path, 3547 struct btrfs_key *key, 3548 enum btrfs_compare_tree_result result, 3549 void *ctx); 3550 int btrfs_compare_trees(struct btrfs_root *left_root, 3551 struct btrfs_root *right_root, 3552 btrfs_changed_cb_t cb, void *ctx); 3553 int btrfs_cow_block(struct btrfs_trans_handle *trans, 3554 struct btrfs_root *root, struct extent_buffer *buf, 3555 struct extent_buffer *parent, int parent_slot, 3556 struct extent_buffer **cow_ret); 3557 int btrfs_copy_root(struct btrfs_trans_handle *trans, 3558 struct btrfs_root *root, 3559 struct extent_buffer *buf, 3560 struct extent_buffer **cow_ret, u64 new_root_objectid); 3561 int btrfs_block_can_be_shared(struct btrfs_root *root, 3562 struct extent_buffer *buf); 3563 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, 3564 u32 data_size); 3565 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, 3566 u32 new_size, int from_end); 3567 int btrfs_split_item(struct btrfs_trans_handle *trans, 3568 struct btrfs_root *root, 3569 struct btrfs_path *path, 3570 struct btrfs_key *new_key, 3571 unsigned long split_offset); 3572 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 3573 struct btrfs_root *root, 3574 struct btrfs_path *path, 3575 struct btrfs_key *new_key); 3576 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 3577 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 3578 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 3579 *root, struct btrfs_key *key, struct btrfs_path *p, int 3580 ins_len, int cow); 3581 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 3582 struct btrfs_path *p, u64 time_seq); 3583 int btrfs_search_slot_for_read(struct btrfs_root *root, 3584 struct btrfs_key *key, struct btrfs_path *p, 3585 int find_higher, int return_any); 3586 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 3587 struct btrfs_root *root, struct extent_buffer *parent, 3588 int start_slot, u64 *last_ret, 3589 struct btrfs_key *progress); 3590 void btrfs_release_path(struct btrfs_path *p); 3591 struct btrfs_path *btrfs_alloc_path(void); 3592 void btrfs_free_path(struct btrfs_path *p); 3593 void btrfs_set_path_blocking(struct btrfs_path *p); 3594 void btrfs_clear_path_blocking(struct btrfs_path *p, 3595 struct extent_buffer *held, int held_rw); 3596 void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 3597 3598 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3599 struct btrfs_path *path, int slot, int nr); 3600 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 3601 struct btrfs_root *root, 3602 struct btrfs_path *path) 3603 { 3604 return btrfs_del_items(trans, root, path, path->slots[0], 1); 3605 } 3606 3607 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 3608 struct btrfs_key *cpu_key, u32 *data_size, 3609 u32 total_data, u32 total_size, int nr); 3610 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 3611 *root, struct btrfs_key *key, void *data, u32 data_size); 3612 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 3613 struct btrfs_root *root, 3614 struct btrfs_path *path, 3615 struct btrfs_key *cpu_key, u32 *data_size, int nr); 3616 3617 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 3618 struct btrfs_root *root, 3619 struct btrfs_path *path, 3620 struct btrfs_key *key, 3621 u32 data_size) 3622 { 3623 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 3624 } 3625 3626 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 3627 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 3628 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 3629 u64 time_seq); 3630 static inline int btrfs_next_old_item(struct btrfs_root *root, 3631 struct btrfs_path *p, u64 time_seq) 3632 { 3633 ++p->slots[0]; 3634 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 3635 return btrfs_next_old_leaf(root, p, time_seq); 3636 return 0; 3637 } 3638 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 3639 { 3640 return btrfs_next_old_item(root, p, 0); 3641 } 3642 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 3643 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 3644 struct btrfs_block_rsv *block_rsv, 3645 int update_ref, int for_reloc); 3646 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 3647 struct btrfs_root *root, 3648 struct extent_buffer *node, 3649 struct extent_buffer *parent); 3650 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) 3651 { 3652 /* 3653 * Get synced with close_ctree() 3654 */ 3655 smp_mb(); 3656 return fs_info->closing; 3657 } 3658 3659 /* 3660 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 3661 * anything except sleeping. This function is used to check the status of 3662 * the fs. 3663 */ 3664 static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root) 3665 { 3666 return (root->fs_info->sb->s_flags & MS_RDONLY || 3667 btrfs_fs_closing(root->fs_info)); 3668 } 3669 3670 static inline void free_fs_info(struct btrfs_fs_info *fs_info) 3671 { 3672 kfree(fs_info->balance_ctl); 3673 kfree(fs_info->delayed_root); 3674 kfree(fs_info->extent_root); 3675 kfree(fs_info->tree_root); 3676 kfree(fs_info->chunk_root); 3677 kfree(fs_info->dev_root); 3678 kfree(fs_info->csum_root); 3679 kfree(fs_info->quota_root); 3680 kfree(fs_info->uuid_root); 3681 kfree(fs_info->super_copy); 3682 kfree(fs_info->super_for_commit); 3683 security_free_mnt_opts(&fs_info->security_opts); 3684 kfree(fs_info); 3685 } 3686 3687 /* tree mod log functions from ctree.c */ 3688 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 3689 struct seq_list *elem); 3690 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 3691 struct seq_list *elem); 3692 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); 3693 3694 /* root-item.c */ 3695 int btrfs_find_root_ref(struct btrfs_root *tree_root, 3696 struct btrfs_path *path, 3697 u64 root_id, u64 ref_id); 3698 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 3699 struct btrfs_root *tree_root, 3700 u64 root_id, u64 ref_id, u64 dirid, u64 sequence, 3701 const char *name, int name_len); 3702 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, 3703 struct btrfs_root *tree_root, 3704 u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, 3705 const char *name, int name_len); 3706 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3707 struct btrfs_key *key); 3708 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 3709 *root, struct btrfs_key *key, struct btrfs_root_item 3710 *item); 3711 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, 3712 struct btrfs_root *root, 3713 struct btrfs_key *key, 3714 struct btrfs_root_item *item); 3715 int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, 3716 struct btrfs_path *path, struct btrfs_root_item *root_item, 3717 struct btrfs_key *root_key); 3718 int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 3719 void btrfs_set_root_node(struct btrfs_root_item *item, 3720 struct extent_buffer *node); 3721 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); 3722 void btrfs_update_root_times(struct btrfs_trans_handle *trans, 3723 struct btrfs_root *root); 3724 3725 /* uuid-tree.c */ 3726 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, 3727 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3728 u64 subid); 3729 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans, 3730 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3731 u64 subid); 3732 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, 3733 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, 3734 u64)); 3735 3736 /* dir-item.c */ 3737 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, 3738 const char *name, int name_len); 3739 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 3740 struct btrfs_root *root, const char *name, 3741 int name_len, struct inode *dir, 3742 struct btrfs_key *location, u8 type, u64 index); 3743 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 3744 struct btrfs_root *root, 3745 struct btrfs_path *path, u64 dir, 3746 const char *name, int name_len, 3747 int mod); 3748 struct btrfs_dir_item * 3749 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, 3750 struct btrfs_root *root, 3751 struct btrfs_path *path, u64 dir, 3752 u64 objectid, const char *name, int name_len, 3753 int mod); 3754 struct btrfs_dir_item * 3755 btrfs_search_dir_index_item(struct btrfs_root *root, 3756 struct btrfs_path *path, u64 dirid, 3757 const char *name, int name_len); 3758 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, 3759 struct btrfs_root *root, 3760 struct btrfs_path *path, 3761 struct btrfs_dir_item *di); 3762 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, 3763 struct btrfs_root *root, 3764 struct btrfs_path *path, u64 objectid, 3765 const char *name, u16 name_len, 3766 const void *data, u16 data_len); 3767 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, 3768 struct btrfs_root *root, 3769 struct btrfs_path *path, u64 dir, 3770 const char *name, u16 name_len, 3771 int mod); 3772 int verify_dir_item(struct btrfs_root *root, 3773 struct extent_buffer *leaf, 3774 struct btrfs_dir_item *dir_item); 3775 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 3776 struct btrfs_path *path, 3777 const char *name, 3778 int name_len); 3779 3780 /* orphan.c */ 3781 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 3782 struct btrfs_root *root, u64 offset); 3783 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, 3784 struct btrfs_root *root, u64 offset); 3785 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 3786 3787 /* inode-item.c */ 3788 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 3789 struct btrfs_root *root, 3790 const char *name, int name_len, 3791 u64 inode_objectid, u64 ref_objectid, u64 index); 3792 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 3793 struct btrfs_root *root, 3794 const char *name, int name_len, 3795 u64 inode_objectid, u64 ref_objectid, u64 *index); 3796 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 3797 struct btrfs_root *root, 3798 struct btrfs_path *path, u64 objectid); 3799 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 3800 *root, struct btrfs_path *path, 3801 struct btrfs_key *location, int mod); 3802 3803 struct btrfs_inode_extref * 3804 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, 3805 struct btrfs_root *root, 3806 struct btrfs_path *path, 3807 const char *name, int name_len, 3808 u64 inode_objectid, u64 ref_objectid, int ins_len, 3809 int cow); 3810 3811 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, 3812 u64 ref_objectid, const char *name, 3813 int name_len, 3814 struct btrfs_inode_extref **extref_ret); 3815 3816 /* file-item.c */ 3817 struct btrfs_dio_private; 3818 int btrfs_del_csums(struct btrfs_trans_handle *trans, 3819 struct btrfs_root *root, u64 bytenr, u64 len); 3820 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3821 struct bio *bio, u32 *dst); 3822 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3823 struct bio *bio, u64 logical_offset); 3824 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3825 struct btrfs_root *root, 3826 u64 objectid, u64 pos, 3827 u64 disk_offset, u64 disk_num_bytes, 3828 u64 num_bytes, u64 offset, u64 ram_bytes, 3829 u8 compression, u8 encryption, u16 other_encoding); 3830 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 3831 struct btrfs_root *root, 3832 struct btrfs_path *path, u64 objectid, 3833 u64 bytenr, int mod); 3834 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3835 struct btrfs_root *root, 3836 struct btrfs_ordered_sum *sums); 3837 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3838 struct bio *bio, u64 file_start, int contig); 3839 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3840 struct list_head *list, int search_commit); 3841 void btrfs_extent_item_to_extent_map(struct inode *inode, 3842 const struct btrfs_path *path, 3843 struct btrfs_file_extent_item *fi, 3844 const bool new_inline, 3845 struct extent_map *em); 3846 3847 /* inode.c */ 3848 struct btrfs_delalloc_work { 3849 struct inode *inode; 3850 int wait; 3851 int delay_iput; 3852 struct completion completion; 3853 struct list_head list; 3854 struct btrfs_work work; 3855 }; 3856 3857 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 3858 int wait, int delay_iput); 3859 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); 3860 3861 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 3862 size_t pg_offset, u64 start, u64 len, 3863 int create); 3864 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 3865 u64 *orig_start, u64 *orig_block_len, 3866 u64 *ram_bytes); 3867 3868 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 3869 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) 3870 #define ClearPageChecked ClearPageFsMisc 3871 #define SetPageChecked SetPageFsMisc 3872 #define PageChecked PageFsMisc 3873 #endif 3874 3875 /* This forces readahead on a given range of bytes in an inode */ 3876 static inline void btrfs_force_ra(struct address_space *mapping, 3877 struct file_ra_state *ra, struct file *file, 3878 pgoff_t offset, unsigned long req_size) 3879 { 3880 page_cache_sync_readahead(mapping, ra, file, offset, req_size); 3881 } 3882 3883 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 3884 int btrfs_set_inode_index(struct inode *dir, u64 *index); 3885 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3886 struct btrfs_root *root, 3887 struct inode *dir, struct inode *inode, 3888 const char *name, int name_len); 3889 int btrfs_add_link(struct btrfs_trans_handle *trans, 3890 struct inode *parent_inode, struct inode *inode, 3891 const char *name, int name_len, int add_backref, u64 index); 3892 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3893 struct btrfs_root *root, 3894 struct inode *dir, u64 objectid, 3895 const char *name, int name_len); 3896 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 3897 int front); 3898 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3899 struct btrfs_root *root, 3900 struct inode *inode, u64 new_size, 3901 u32 min_type); 3902 3903 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 3904 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 3905 int nr); 3906 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3907 struct extent_state **cached_state); 3908 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3909 struct btrfs_root *new_root, 3910 struct btrfs_root *parent_root, 3911 u64 new_dirid); 3912 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, 3913 size_t size, struct bio *bio, 3914 unsigned long bio_flags); 3915 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 3916 int btrfs_readpage(struct file *file, struct page *page); 3917 void btrfs_evict_inode(struct inode *inode); 3918 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 3919 struct inode *btrfs_alloc_inode(struct super_block *sb); 3920 void btrfs_destroy_inode(struct inode *inode); 3921 int btrfs_drop_inode(struct inode *inode); 3922 int btrfs_init_cachep(void); 3923 void btrfs_destroy_cachep(void); 3924 long btrfs_ioctl_trans_end(struct file *file); 3925 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3926 struct btrfs_root *root, int *was_new); 3927 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 3928 size_t pg_offset, u64 start, u64 end, 3929 int create); 3930 int btrfs_update_inode(struct btrfs_trans_handle *trans, 3931 struct btrfs_root *root, 3932 struct inode *inode); 3933 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3934 struct btrfs_root *root, struct inode *inode); 3935 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3936 int btrfs_orphan_cleanup(struct btrfs_root *root); 3937 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3938 struct btrfs_root *root); 3939 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 3940 void btrfs_invalidate_inodes(struct btrfs_root *root); 3941 void btrfs_add_delayed_iput(struct inode *inode); 3942 void btrfs_run_delayed_iputs(struct btrfs_root *root); 3943 int btrfs_prealloc_file_range(struct inode *inode, int mode, 3944 u64 start, u64 num_bytes, u64 min_size, 3945 loff_t actual_len, u64 *alloc_hint); 3946 int btrfs_prealloc_file_range_trans(struct inode *inode, 3947 struct btrfs_trans_handle *trans, int mode, 3948 u64 start, u64 num_bytes, u64 min_size, 3949 loff_t actual_len, u64 *alloc_hint); 3950 int btrfs_inode_check_errors(struct inode *inode); 3951 extern const struct dentry_operations btrfs_dentry_operations; 3952 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3953 void btrfs_test_inode_set_ops(struct inode *inode); 3954 #endif 3955 3956 /* ioctl.c */ 3957 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3958 void btrfs_update_iflags(struct inode *inode); 3959 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 3960 int btrfs_is_empty_uuid(u8 *uuid); 3961 int btrfs_defrag_file(struct inode *inode, struct file *file, 3962 struct btrfs_ioctl_defrag_range_args *range, 3963 u64 newer_than, unsigned long max_pages); 3964 void btrfs_get_block_group_info(struct list_head *groups_list, 3965 struct btrfs_ioctl_space_info *space); 3966 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 3967 struct btrfs_ioctl_balance_args *bargs); 3968 3969 3970 /* file.c */ 3971 int btrfs_auto_defrag_init(void); 3972 void btrfs_auto_defrag_exit(void); 3973 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 3974 struct inode *inode); 3975 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); 3976 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); 3977 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3978 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 3979 int skip_pinned); 3980 extern const struct file_operations btrfs_file_operations; 3981 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 3982 struct btrfs_root *root, struct inode *inode, 3983 struct btrfs_path *path, u64 start, u64 end, 3984 u64 *drop_end, int drop_cache, 3985 int replace_extent, 3986 u32 extent_item_size, 3987 int *key_inserted); 3988 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 3989 struct btrfs_root *root, struct inode *inode, u64 start, 3990 u64 end, int drop_cache); 3991 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 3992 struct inode *inode, u64 start, u64 end); 3993 int btrfs_release_file(struct inode *inode, struct file *file); 3994 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 3995 struct page **pages, size_t num_pages, 3996 loff_t pos, size_t write_bytes, 3997 struct extent_state **cached); 3998 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); 3999 4000 /* tree-defrag.c */ 4001 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 4002 struct btrfs_root *root); 4003 4004 /* sysfs.c */ 4005 int btrfs_init_sysfs(void); 4006 void btrfs_exit_sysfs(void); 4007 int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info); 4008 void btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info); 4009 4010 /* xattr.c */ 4011 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 4012 4013 /* super.c */ 4014 int btrfs_parse_options(struct btrfs_root *root, char *options); 4015 int btrfs_sync_fs(struct super_block *sb, int wait); 4016 4017 #ifdef CONFIG_PRINTK 4018 __printf(2, 3) 4019 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); 4020 #else 4021 static inline __printf(2, 3) 4022 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 4023 { 4024 } 4025 #endif 4026 4027 #define btrfs_emerg(fs_info, fmt, args...) \ 4028 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) 4029 #define btrfs_alert(fs_info, fmt, args...) \ 4030 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) 4031 #define btrfs_crit(fs_info, fmt, args...) \ 4032 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) 4033 #define btrfs_err(fs_info, fmt, args...) \ 4034 btrfs_printk(fs_info, KERN_ERR fmt, ##args) 4035 #define btrfs_warn(fs_info, fmt, args...) \ 4036 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) 4037 #define btrfs_notice(fs_info, fmt, args...) \ 4038 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) 4039 #define btrfs_info(fs_info, fmt, args...) \ 4040 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 4041 4042 #ifdef DEBUG 4043 #define btrfs_debug(fs_info, fmt, args...) \ 4044 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) 4045 #else 4046 #define btrfs_debug(fs_info, fmt, args...) \ 4047 no_printk(KERN_DEBUG fmt, ##args) 4048 #endif 4049 4050 #ifdef CONFIG_BTRFS_ASSERT 4051 4052 __cold 4053 static inline void assfail(char *expr, char *file, int line) 4054 { 4055 pr_err("BTRFS: assertion failed: %s, file: %s, line: %d", 4056 expr, file, line); 4057 BUG(); 4058 } 4059 4060 #define ASSERT(expr) \ 4061 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 4062 #else 4063 #define ASSERT(expr) ((void)0) 4064 #endif 4065 4066 #define btrfs_assert() 4067 __printf(5, 6) 4068 __cold 4069 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 4070 unsigned int line, int errno, const char *fmt, ...); 4071 4072 const char *btrfs_decode_error(int errno); 4073 4074 __cold 4075 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 4076 struct btrfs_root *root, const char *function, 4077 unsigned int line, int errno); 4078 4079 #define btrfs_set_fs_incompat(__fs_info, opt) \ 4080 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4081 4082 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, 4083 u64 flag) 4084 { 4085 struct btrfs_super_block *disk_super; 4086 u64 features; 4087 4088 disk_super = fs_info->super_copy; 4089 features = btrfs_super_incompat_flags(disk_super); 4090 if (!(features & flag)) { 4091 spin_lock(&fs_info->super_lock); 4092 features = btrfs_super_incompat_flags(disk_super); 4093 if (!(features & flag)) { 4094 features |= flag; 4095 btrfs_set_super_incompat_flags(disk_super, features); 4096 btrfs_info(fs_info, "setting %llu feature flag", 4097 flag); 4098 } 4099 spin_unlock(&fs_info->super_lock); 4100 } 4101 } 4102 4103 #define btrfs_fs_incompat(fs_info, opt) \ 4104 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4105 4106 static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) 4107 { 4108 struct btrfs_super_block *disk_super; 4109 disk_super = fs_info->super_copy; 4110 return !!(btrfs_super_incompat_flags(disk_super) & flag); 4111 } 4112 4113 /* 4114 * Call btrfs_abort_transaction as early as possible when an error condition is 4115 * detected, that way the exact line number is reported. 4116 */ 4117 #define btrfs_abort_transaction(trans, root, errno) \ 4118 do { \ 4119 /* Report first abort since mount */ \ 4120 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ 4121 &((root)->fs_info->fs_state))) { \ 4122 WARN(1, KERN_DEBUG \ 4123 "BTRFS: Transaction aborted (error %d)\n", \ 4124 (errno)); \ 4125 } \ 4126 __btrfs_abort_transaction((trans), (root), __func__, \ 4127 __LINE__, (errno)); \ 4128 } while (0) 4129 4130 #define btrfs_std_error(fs_info, errno) \ 4131 do { \ 4132 if ((errno)) \ 4133 __btrfs_std_error((fs_info), __func__, \ 4134 __LINE__, (errno), NULL); \ 4135 } while (0) 4136 4137 #define btrfs_error(fs_info, errno, fmt, args...) \ 4138 do { \ 4139 __btrfs_std_error((fs_info), __func__, __LINE__, \ 4140 (errno), fmt, ##args); \ 4141 } while (0) 4142 4143 __printf(5, 6) 4144 __cold 4145 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 4146 unsigned int line, int errno, const char *fmt, ...); 4147 4148 /* 4149 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic 4150 * will panic(). Otherwise we BUG() here. 4151 */ 4152 #define btrfs_panic(fs_info, errno, fmt, args...) \ 4153 do { \ 4154 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ 4155 BUG(); \ 4156 } while (0) 4157 4158 /* acl.c */ 4159 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 4160 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); 4161 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); 4162 int btrfs_init_acl(struct btrfs_trans_handle *trans, 4163 struct inode *inode, struct inode *dir); 4164 #else 4165 #define btrfs_get_acl NULL 4166 #define btrfs_set_acl NULL 4167 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, 4168 struct inode *inode, struct inode *dir) 4169 { 4170 return 0; 4171 } 4172 #endif 4173 4174 /* relocation.c */ 4175 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); 4176 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 4177 struct btrfs_root *root); 4178 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 4179 struct btrfs_root *root); 4180 int btrfs_recover_relocation(struct btrfs_root *root); 4181 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 4182 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4183 struct btrfs_root *root, struct extent_buffer *buf, 4184 struct extent_buffer *cow); 4185 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 4186 u64 *bytes_to_reserve); 4187 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4188 struct btrfs_pending_snapshot *pending); 4189 4190 /* scrub.c */ 4191 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 4192 u64 end, struct btrfs_scrub_progress *progress, 4193 int readonly, int is_dev_replace); 4194 void btrfs_scrub_pause(struct btrfs_root *root); 4195 void btrfs_scrub_continue(struct btrfs_root *root); 4196 int btrfs_scrub_cancel(struct btrfs_fs_info *info); 4197 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, 4198 struct btrfs_device *dev); 4199 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 4200 struct btrfs_scrub_progress *progress); 4201 4202 /* dev-replace.c */ 4203 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); 4204 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); 4205 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); 4206 4207 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) 4208 { 4209 btrfs_bio_counter_sub(fs_info, 1); 4210 } 4211 4212 /* reada.c */ 4213 struct reada_control { 4214 struct btrfs_root *root; /* tree to prefetch */ 4215 struct btrfs_key key_start; 4216 struct btrfs_key key_end; /* exclusive */ 4217 atomic_t elems; 4218 struct kref refcnt; 4219 wait_queue_head_t wait; 4220 }; 4221 struct reada_control *btrfs_reada_add(struct btrfs_root *root, 4222 struct btrfs_key *start, struct btrfs_key *end); 4223 int btrfs_reada_wait(void *handle); 4224 void btrfs_reada_detach(void *handle); 4225 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, 4226 u64 start, int err); 4227 4228 static inline int is_fstree(u64 rootid) 4229 { 4230 if (rootid == BTRFS_FS_TREE_OBJECTID || 4231 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID && 4232 !btrfs_qgroup_level(rootid))) 4233 return 1; 4234 return 0; 4235 } 4236 4237 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 4238 { 4239 return signal_pending(current); 4240 } 4241 4242 /* Sanity test specific functions */ 4243 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4244 void btrfs_test_destroy_inode(struct inode *inode); 4245 #endif 4246 4247 static inline int btrfs_test_is_dummy_root(struct btrfs_root *root) 4248 { 4249 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4250 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state))) 4251 return 1; 4252 #endif 4253 return 0; 4254 } 4255 4256 #endif 4257