1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_FORMAT_H 3 #define _BCACHEFS_FORMAT_H 4 5 /* 6 * bcachefs on disk data structures 7 * 8 * OVERVIEW: 9 * 10 * There are three main types of on disk data structures in bcachefs (this is 11 * reduced from 5 in bcache) 12 * 13 * - superblock 14 * - journal 15 * - btree 16 * 17 * The btree is the primary structure; most metadata exists as keys in the 18 * various btrees. There are only a small number of btrees, they're not 19 * sharded - we have one btree for extents, another for inodes, et cetera. 20 * 21 * SUPERBLOCK: 22 * 23 * The superblock contains the location of the journal, the list of devices in 24 * the filesystem, and in general any metadata we need in order to decide 25 * whether we can start a filesystem or prior to reading the journal/btree 26 * roots. 27 * 28 * The superblock is extensible, and most of the contents of the superblock are 29 * in variable length, type tagged fields; see struct bch_sb_field. 30 * 31 * Backup superblocks do not reside in a fixed location; also, superblocks do 32 * not have a fixed size. To locate backup superblocks we have struct 33 * bch_sb_layout; we store a copy of this inside every superblock, and also 34 * before the first superblock. 35 * 36 * JOURNAL: 37 * 38 * The journal primarily records btree updates in the order they occurred; 39 * journal replay consists of just iterating over all the keys in the open 40 * journal entries and re-inserting them into the btrees. 41 * 42 * The journal also contains entry types for the btree roots, and blacklisted 43 * journal sequence numbers (see journal_seq_blacklist.c). 44 * 45 * BTREE: 46 * 47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically 48 * 128k-256k) and log structured. We use struct btree_node for writing the first 49 * entry in a given node (offset 0), and struct btree_node_entry for all 50 * subsequent writes. 51 * 52 * After the header, btree node entries contain a list of keys in sorted order. 53 * Values are stored inline with the keys; since values are variable length (and 54 * keys effectively are variable length too, due to packing) we can't do random 55 * access without building up additional in memory tables in the btree node read 56 * path. 57 * 58 * BTREE KEYS (struct bkey): 59 * 60 * The various btrees share a common format for the key - so as to avoid 61 * switching in fastpath lookup/comparison code - but define their own 62 * structures for the key values. 63 * 64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max 65 * size is just under 2k. The common part also contains a type tag for the 66 * value, and a format field indicating whether the key is packed or not (and 67 * also meant to allow adding new key fields in the future, if desired). 68 * 69 * bkeys, when stored within a btree node, may also be packed. In that case, the 70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can 71 * be generous with field sizes in the common part of the key format (64 bit 72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost. 73 */ 74 75 #include <asm/types.h> 76 #include <asm/byteorder.h> 77 #include <linux/kernel.h> 78 #include <linux/uuid.h> 79 #include "vstructs.h" 80 81 #ifdef __KERNEL__ 82 typedef uuid_t __uuid_t; 83 #endif 84 85 #define BITMASK(name, type, field, offset, end) \ 86 static const __maybe_unused unsigned name##_OFFSET = offset; \ 87 static const __maybe_unused unsigned name##_BITS = (end - offset); \ 88 \ 89 static inline __u64 name(const type *k) \ 90 { \ 91 return (k->field >> offset) & ~(~0ULL << (end - offset)); \ 92 } \ 93 \ 94 static inline void SET_##name(type *k, __u64 v) \ 95 { \ 96 k->field &= ~(~(~0ULL << (end - offset)) << offset); \ 97 k->field |= (v & ~(~0ULL << (end - offset))) << offset; \ 98 } 99 100 #define LE_BITMASK(_bits, name, type, field, offset, end) \ 101 static const __maybe_unused unsigned name##_OFFSET = offset; \ 102 static const __maybe_unused unsigned name##_BITS = (end - offset); \ 103 static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\ 104 \ 105 static inline __u64 name(const type *k) \ 106 { \ 107 return (__le##_bits##_to_cpu(k->field) >> offset) & \ 108 ~(~0ULL << (end - offset)); \ 109 } \ 110 \ 111 static inline void SET_##name(type *k, __u64 v) \ 112 { \ 113 __u##_bits new = __le##_bits##_to_cpu(k->field); \ 114 \ 115 new &= ~(~(~0ULL << (end - offset)) << offset); \ 116 new |= (v & ~(~0ULL << (end - offset))) << offset; \ 117 k->field = __cpu_to_le##_bits(new); \ 118 } 119 120 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e) 121 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e) 122 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e) 123 124 struct bkey_format { 125 __u8 key_u64s; 126 __u8 nr_fields; 127 /* One unused slot for now: */ 128 __u8 bits_per_field[6]; 129 __le64 field_offset[6]; 130 }; 131 132 /* Btree keys - all units are in sectors */ 133 134 struct bpos { 135 /* 136 * Word order matches machine byte order - btree code treats a bpos as a 137 * single large integer, for search/comparison purposes 138 * 139 * Note that wherever a bpos is embedded in another on disk data 140 * structure, it has to be byte swabbed when reading in metadata that 141 * wasn't written in native endian order: 142 */ 143 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 144 __u32 snapshot; 145 __u64 offset; 146 __u64 inode; 147 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 148 __u64 inode; 149 __u64 offset; /* Points to end of extent - sectors */ 150 __u32 snapshot; 151 #else 152 #error edit for your odd byteorder. 153 #endif 154 } __packed 155 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 156 __aligned(4) 157 #endif 158 ; 159 160 #define KEY_INODE_MAX ((__u64)~0ULL) 161 #define KEY_OFFSET_MAX ((__u64)~0ULL) 162 #define KEY_SNAPSHOT_MAX ((__u32)~0U) 163 #define KEY_SIZE_MAX ((__u32)~0U) 164 165 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot) 166 { 167 return (struct bpos) { 168 .inode = inode, 169 .offset = offset, 170 .snapshot = snapshot, 171 }; 172 } 173 174 #define POS_MIN SPOS(0, 0, 0) 175 #define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0) 176 #define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX) 177 #define POS(_inode, _offset) SPOS(_inode, _offset, 0) 178 179 /* Empty placeholder struct, for container_of() */ 180 struct bch_val { 181 __u64 __nothing[0]; 182 }; 183 184 struct bversion { 185 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 186 __u64 lo; 187 __u32 hi; 188 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 189 __u32 hi; 190 __u64 lo; 191 #endif 192 } __packed __aligned(4); 193 194 struct bkey { 195 /* Size of combined key and value, in u64s */ 196 __u8 u64s; 197 198 /* Format of key (0 for format local to btree node) */ 199 #if defined(__LITTLE_ENDIAN_BITFIELD) 200 __u8 format:7, 201 needs_whiteout:1; 202 #elif defined (__BIG_ENDIAN_BITFIELD) 203 __u8 needs_whiteout:1, 204 format:7; 205 #else 206 #error edit for your odd byteorder. 207 #endif 208 209 /* Type of the value */ 210 __u8 type; 211 212 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 213 __u8 pad[1]; 214 215 struct bversion version; 216 __u32 size; /* extent size, in sectors */ 217 struct bpos p; 218 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 219 struct bpos p; 220 __u32 size; /* extent size, in sectors */ 221 struct bversion version; 222 223 __u8 pad[1]; 224 #endif 225 } __packed __aligned(8); 226 227 struct bkey_packed { 228 __u64 _data[0]; 229 230 /* Size of combined key and value, in u64s */ 231 __u8 u64s; 232 233 /* Format of key (0 for format local to btree node) */ 234 235 /* 236 * XXX: next incompat on disk format change, switch format and 237 * needs_whiteout - bkey_packed() will be cheaper if format is the high 238 * bits of the bitfield 239 */ 240 #if defined(__LITTLE_ENDIAN_BITFIELD) 241 __u8 format:7, 242 needs_whiteout:1; 243 #elif defined (__BIG_ENDIAN_BITFIELD) 244 __u8 needs_whiteout:1, 245 format:7; 246 #endif 247 248 /* Type of the value */ 249 __u8 type; 250 __u8 key_start[0]; 251 252 /* 253 * We copy bkeys with struct assignment in various places, and while 254 * that shouldn't be done with packed bkeys we can't disallow it in C, 255 * and it's legal to cast a bkey to a bkey_packed - so padding it out 256 * to the same size as struct bkey should hopefully be safest. 257 */ 258 __u8 pad[sizeof(struct bkey) - 3]; 259 } __packed __aligned(8); 260 261 typedef struct { 262 __le64 lo; 263 __le64 hi; 264 } bch_le128; 265 266 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64)) 267 #define BKEY_U64s_MAX U8_MAX 268 #define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s) 269 270 #define KEY_PACKED_BITS_START 24 271 272 #define KEY_FORMAT_LOCAL_BTREE 0 273 #define KEY_FORMAT_CURRENT 1 274 275 enum bch_bkey_fields { 276 BKEY_FIELD_INODE, 277 BKEY_FIELD_OFFSET, 278 BKEY_FIELD_SNAPSHOT, 279 BKEY_FIELD_SIZE, 280 BKEY_FIELD_VERSION_HI, 281 BKEY_FIELD_VERSION_LO, 282 BKEY_NR_FIELDS, 283 }; 284 285 #define bkey_format_field(name, field) \ 286 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8) 287 288 #define BKEY_FORMAT_CURRENT \ 289 ((struct bkey_format) { \ 290 .key_u64s = BKEY_U64s, \ 291 .nr_fields = BKEY_NR_FIELDS, \ 292 .bits_per_field = { \ 293 bkey_format_field(INODE, p.inode), \ 294 bkey_format_field(OFFSET, p.offset), \ 295 bkey_format_field(SNAPSHOT, p.snapshot), \ 296 bkey_format_field(SIZE, size), \ 297 bkey_format_field(VERSION_HI, version.hi), \ 298 bkey_format_field(VERSION_LO, version.lo), \ 299 }, \ 300 }) 301 302 /* bkey with inline value */ 303 struct bkey_i { 304 __u64 _data[0]; 305 306 struct bkey k; 307 struct bch_val v; 308 }; 309 310 #define POS_KEY(_pos) \ 311 ((struct bkey) { \ 312 .u64s = BKEY_U64s, \ 313 .format = KEY_FORMAT_CURRENT, \ 314 .p = _pos, \ 315 }) 316 317 #define KEY(_inode, _offset, _size) \ 318 ((struct bkey) { \ 319 .u64s = BKEY_U64s, \ 320 .format = KEY_FORMAT_CURRENT, \ 321 .p = POS(_inode, _offset), \ 322 .size = _size, \ 323 }) 324 325 static inline void bkey_init(struct bkey *k) 326 { 327 *k = KEY(0, 0, 0); 328 } 329 330 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64)) 331 332 #define __BKEY_PADDED(key, pad) \ 333 struct bkey_i key; __u64 key ## _pad[pad] 334 335 /* 336 * - DELETED keys are used internally to mark keys that should be ignored but 337 * override keys in composition order. Their version number is ignored. 338 * 339 * - DISCARDED keys indicate that the data is all 0s because it has been 340 * discarded. DISCARDs may have a version; if the version is nonzero the key 341 * will be persistent, otherwise the key will be dropped whenever the btree 342 * node is rewritten (like DELETED keys). 343 * 344 * - ERROR: any read of the data returns a read error, as the data was lost due 345 * to a failing device. Like DISCARDED keys, they can be removed (overridden) 346 * by new writes or cluster-wide GC. Node repair can also overwrite them with 347 * the same or a more recent version number, but not with an older version 348 * number. 349 * 350 * - WHITEOUT: for hash table btrees 351 */ 352 #define BCH_BKEY_TYPES() \ 353 x(deleted, 0) \ 354 x(whiteout, 1) \ 355 x(error, 2) \ 356 x(cookie, 3) \ 357 x(hash_whiteout, 4) \ 358 x(btree_ptr, 5) \ 359 x(extent, 6) \ 360 x(reservation, 7) \ 361 x(inode, 8) \ 362 x(inode_generation, 9) \ 363 x(dirent, 10) \ 364 x(xattr, 11) \ 365 x(alloc, 12) \ 366 x(quota, 13) \ 367 x(stripe, 14) \ 368 x(reflink_p, 15) \ 369 x(reflink_v, 16) \ 370 x(inline_data, 17) \ 371 x(btree_ptr_v2, 18) \ 372 x(indirect_inline_data, 19) \ 373 x(alloc_v2, 20) \ 374 x(subvolume, 21) \ 375 x(snapshot, 22) \ 376 x(inode_v2, 23) \ 377 x(alloc_v3, 24) \ 378 x(set, 25) \ 379 x(lru, 26) \ 380 x(alloc_v4, 27) \ 381 x(backpointer, 28) \ 382 x(inode_v3, 29) \ 383 x(bucket_gens, 30) \ 384 x(snapshot_tree, 31) \ 385 x(logged_op_truncate, 32) \ 386 x(logged_op_finsert, 33) 387 388 enum bch_bkey_type { 389 #define x(name, nr) KEY_TYPE_##name = nr, 390 BCH_BKEY_TYPES() 391 #undef x 392 KEY_TYPE_MAX, 393 }; 394 395 struct bch_deleted { 396 struct bch_val v; 397 }; 398 399 struct bch_whiteout { 400 struct bch_val v; 401 }; 402 403 struct bch_error { 404 struct bch_val v; 405 }; 406 407 struct bch_cookie { 408 struct bch_val v; 409 __le64 cookie; 410 }; 411 412 struct bch_hash_whiteout { 413 struct bch_val v; 414 }; 415 416 struct bch_set { 417 struct bch_val v; 418 }; 419 420 /* Extents */ 421 422 /* 423 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally 424 * preceded by checksum/compression information (bch_extent_crc32 or 425 * bch_extent_crc64). 426 * 427 * One major determining factor in the format of extents is how we handle and 428 * represent extents that have been partially overwritten and thus trimmed: 429 * 430 * If an extent is not checksummed or compressed, when the extent is trimmed we 431 * don't have to remember the extent we originally allocated and wrote: we can 432 * merely adjust ptr->offset to point to the start of the data that is currently 433 * live. The size field in struct bkey records the current (live) size of the 434 * extent, and is also used to mean "size of region on disk that we point to" in 435 * this case. 436 * 437 * Thus an extent that is not checksummed or compressed will consist only of a 438 * list of bch_extent_ptrs, with none of the fields in 439 * bch_extent_crc32/bch_extent_crc64. 440 * 441 * When an extent is checksummed or compressed, it's not possible to read only 442 * the data that is currently live: we have to read the entire extent that was 443 * originally written, and then return only the part of the extent that is 444 * currently live. 445 * 446 * Thus, in addition to the current size of the extent in struct bkey, we need 447 * to store the size of the originally allocated space - this is the 448 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also, 449 * when the extent is trimmed, instead of modifying the offset field of the 450 * pointer, we keep a second smaller offset field - "offset into the original 451 * extent of the currently live region". 452 * 453 * The other major determining factor is replication and data migration: 454 * 455 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated 456 * write, we will initially write all the replicas in the same format, with the 457 * same checksum type and compression format - however, when copygc runs later (or 458 * tiering/cache promotion, anything that moves data), it is not in general 459 * going to rewrite all the pointers at once - one of the replicas may be in a 460 * bucket on one device that has very little fragmentation while another lives 461 * in a bucket that has become heavily fragmented, and thus is being rewritten 462 * sooner than the rest. 463 * 464 * Thus it will only move a subset of the pointers (or in the case of 465 * tiering/cache promotion perhaps add a single pointer without dropping any 466 * current pointers), and if the extent has been partially overwritten it must 467 * write only the currently live portion (or copygc would not be able to reduce 468 * fragmentation!) - which necessitates a different bch_extent_crc format for 469 * the new pointer. 470 * 471 * But in the interests of space efficiency, we don't want to store one 472 * bch_extent_crc for each pointer if we don't have to. 473 * 474 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and 475 * bch_extent_ptrs appended arbitrarily one after the other. We determine the 476 * type of a given entry with a scheme similar to utf8 (except we're encoding a 477 * type, not a size), encoding the type in the position of the first set bit: 478 * 479 * bch_extent_crc32 - 0b1 480 * bch_extent_ptr - 0b10 481 * bch_extent_crc64 - 0b100 482 * 483 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and 484 * bch_extent_crc64 is the least constrained). 485 * 486 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it, 487 * until the next bch_extent_crc32/64. 488 * 489 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer 490 * is neither checksummed nor compressed. 491 */ 492 493 /* 128 bits, sufficient for cryptographic MACs: */ 494 struct bch_csum { 495 __le64 lo; 496 __le64 hi; 497 } __packed __aligned(8); 498 499 #define BCH_EXTENT_ENTRY_TYPES() \ 500 x(ptr, 0) \ 501 x(crc32, 1) \ 502 x(crc64, 2) \ 503 x(crc128, 3) \ 504 x(stripe_ptr, 4) \ 505 x(rebalance, 5) 506 #define BCH_EXTENT_ENTRY_MAX 6 507 508 enum bch_extent_entry_type { 509 #define x(f, n) BCH_EXTENT_ENTRY_##f = n, 510 BCH_EXTENT_ENTRY_TYPES() 511 #undef x 512 }; 513 514 /* Compressed/uncompressed size are stored biased by 1: */ 515 struct bch_extent_crc32 { 516 #if defined(__LITTLE_ENDIAN_BITFIELD) 517 __u32 type:2, 518 _compressed_size:7, 519 _uncompressed_size:7, 520 offset:7, 521 _unused:1, 522 csum_type:4, 523 compression_type:4; 524 __u32 csum; 525 #elif defined (__BIG_ENDIAN_BITFIELD) 526 __u32 csum; 527 __u32 compression_type:4, 528 csum_type:4, 529 _unused:1, 530 offset:7, 531 _uncompressed_size:7, 532 _compressed_size:7, 533 type:2; 534 #endif 535 } __packed __aligned(8); 536 537 #define CRC32_SIZE_MAX (1U << 7) 538 #define CRC32_NONCE_MAX 0 539 540 struct bch_extent_crc64 { 541 #if defined(__LITTLE_ENDIAN_BITFIELD) 542 __u64 type:3, 543 _compressed_size:9, 544 _uncompressed_size:9, 545 offset:9, 546 nonce:10, 547 csum_type:4, 548 compression_type:4, 549 csum_hi:16; 550 #elif defined (__BIG_ENDIAN_BITFIELD) 551 __u64 csum_hi:16, 552 compression_type:4, 553 csum_type:4, 554 nonce:10, 555 offset:9, 556 _uncompressed_size:9, 557 _compressed_size:9, 558 type:3; 559 #endif 560 __u64 csum_lo; 561 } __packed __aligned(8); 562 563 #define CRC64_SIZE_MAX (1U << 9) 564 #define CRC64_NONCE_MAX ((1U << 10) - 1) 565 566 struct bch_extent_crc128 { 567 #if defined(__LITTLE_ENDIAN_BITFIELD) 568 __u64 type:4, 569 _compressed_size:13, 570 _uncompressed_size:13, 571 offset:13, 572 nonce:13, 573 csum_type:4, 574 compression_type:4; 575 #elif defined (__BIG_ENDIAN_BITFIELD) 576 __u64 compression_type:4, 577 csum_type:4, 578 nonce:13, 579 offset:13, 580 _uncompressed_size:13, 581 _compressed_size:13, 582 type:4; 583 #endif 584 struct bch_csum csum; 585 } __packed __aligned(8); 586 587 #define CRC128_SIZE_MAX (1U << 13) 588 #define CRC128_NONCE_MAX ((1U << 13) - 1) 589 590 /* 591 * @reservation - pointer hasn't been written to, just reserved 592 */ 593 struct bch_extent_ptr { 594 #if defined(__LITTLE_ENDIAN_BITFIELD) 595 __u64 type:1, 596 cached:1, 597 unused:1, 598 unwritten:1, 599 offset:44, /* 8 petabytes */ 600 dev:8, 601 gen:8; 602 #elif defined (__BIG_ENDIAN_BITFIELD) 603 __u64 gen:8, 604 dev:8, 605 offset:44, 606 unwritten:1, 607 unused:1, 608 cached:1, 609 type:1; 610 #endif 611 } __packed __aligned(8); 612 613 struct bch_extent_stripe_ptr { 614 #if defined(__LITTLE_ENDIAN_BITFIELD) 615 __u64 type:5, 616 block:8, 617 redundancy:4, 618 idx:47; 619 #elif defined (__BIG_ENDIAN_BITFIELD) 620 __u64 idx:47, 621 redundancy:4, 622 block:8, 623 type:5; 624 #endif 625 }; 626 627 struct bch_extent_rebalance { 628 #if defined(__LITTLE_ENDIAN_BITFIELD) 629 __u64 type:6, 630 unused:34, 631 compression:8, /* enum bch_compression_opt */ 632 target:16; 633 #elif defined (__BIG_ENDIAN_BITFIELD) 634 __u64 target:16, 635 compression:8, 636 unused:34, 637 type:6; 638 #endif 639 }; 640 641 union bch_extent_entry { 642 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64 643 unsigned long type; 644 #elif __BITS_PER_LONG == 32 645 struct { 646 unsigned long pad; 647 unsigned long type; 648 }; 649 #else 650 #error edit for your odd byteorder. 651 #endif 652 653 #define x(f, n) struct bch_extent_##f f; 654 BCH_EXTENT_ENTRY_TYPES() 655 #undef x 656 }; 657 658 struct bch_btree_ptr { 659 struct bch_val v; 660 661 __u64 _data[0]; 662 struct bch_extent_ptr start[]; 663 } __packed __aligned(8); 664 665 struct bch_btree_ptr_v2 { 666 struct bch_val v; 667 668 __u64 mem_ptr; 669 __le64 seq; 670 __le16 sectors_written; 671 __le16 flags; 672 struct bpos min_key; 673 __u64 _data[0]; 674 struct bch_extent_ptr start[]; 675 } __packed __aligned(8); 676 677 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1); 678 679 struct bch_extent { 680 struct bch_val v; 681 682 __u64 _data[0]; 683 union bch_extent_entry start[]; 684 } __packed __aligned(8); 685 686 struct bch_reservation { 687 struct bch_val v; 688 689 __le32 generation; 690 __u8 nr_replicas; 691 __u8 pad[3]; 692 } __packed __aligned(8); 693 694 /* Maximum size (in u64s) a single pointer could be: */ 695 #define BKEY_EXTENT_PTR_U64s_MAX\ 696 ((sizeof(struct bch_extent_crc128) + \ 697 sizeof(struct bch_extent_ptr)) / sizeof(__u64)) 698 699 /* Maximum possible size of an entire extent value: */ 700 #define BKEY_EXTENT_VAL_U64s_MAX \ 701 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1)) 702 703 /* * Maximum possible size of an entire extent, key + value: */ 704 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX) 705 706 /* Btree pointers don't carry around checksums: */ 707 #define BKEY_BTREE_PTR_VAL_U64s_MAX \ 708 ((sizeof(struct bch_btree_ptr_v2) + \ 709 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64)) 710 #define BKEY_BTREE_PTR_U64s_MAX \ 711 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX) 712 713 /* Inodes */ 714 715 #define BLOCKDEV_INODE_MAX 4096 716 717 #define BCACHEFS_ROOT_INO 4096 718 719 struct bch_inode { 720 struct bch_val v; 721 722 __le64 bi_hash_seed; 723 __le32 bi_flags; 724 __le16 bi_mode; 725 __u8 fields[]; 726 } __packed __aligned(8); 727 728 struct bch_inode_v2 { 729 struct bch_val v; 730 731 __le64 bi_journal_seq; 732 __le64 bi_hash_seed; 733 __le64 bi_flags; 734 __le16 bi_mode; 735 __u8 fields[]; 736 } __packed __aligned(8); 737 738 struct bch_inode_v3 { 739 struct bch_val v; 740 741 __le64 bi_journal_seq; 742 __le64 bi_hash_seed; 743 __le64 bi_flags; 744 __le64 bi_sectors; 745 __le64 bi_size; 746 __le64 bi_version; 747 __u8 fields[]; 748 } __packed __aligned(8); 749 750 #define INODEv3_FIELDS_START_INITIAL 6 751 #define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64)) 752 753 struct bch_inode_generation { 754 struct bch_val v; 755 756 __le32 bi_generation; 757 __le32 pad; 758 } __packed __aligned(8); 759 760 /* 761 * bi_subvol and bi_parent_subvol are only set for subvolume roots: 762 */ 763 764 #define BCH_INODE_FIELDS_v2() \ 765 x(bi_atime, 96) \ 766 x(bi_ctime, 96) \ 767 x(bi_mtime, 96) \ 768 x(bi_otime, 96) \ 769 x(bi_size, 64) \ 770 x(bi_sectors, 64) \ 771 x(bi_uid, 32) \ 772 x(bi_gid, 32) \ 773 x(bi_nlink, 32) \ 774 x(bi_generation, 32) \ 775 x(bi_dev, 32) \ 776 x(bi_data_checksum, 8) \ 777 x(bi_compression, 8) \ 778 x(bi_project, 32) \ 779 x(bi_background_compression, 8) \ 780 x(bi_data_replicas, 8) \ 781 x(bi_promote_target, 16) \ 782 x(bi_foreground_target, 16) \ 783 x(bi_background_target, 16) \ 784 x(bi_erasure_code, 16) \ 785 x(bi_fields_set, 16) \ 786 x(bi_dir, 64) \ 787 x(bi_dir_offset, 64) \ 788 x(bi_subvol, 32) \ 789 x(bi_parent_subvol, 32) 790 791 #define BCH_INODE_FIELDS_v3() \ 792 x(bi_atime, 96) \ 793 x(bi_ctime, 96) \ 794 x(bi_mtime, 96) \ 795 x(bi_otime, 96) \ 796 x(bi_uid, 32) \ 797 x(bi_gid, 32) \ 798 x(bi_nlink, 32) \ 799 x(bi_generation, 32) \ 800 x(bi_dev, 32) \ 801 x(bi_data_checksum, 8) \ 802 x(bi_compression, 8) \ 803 x(bi_project, 32) \ 804 x(bi_background_compression, 8) \ 805 x(bi_data_replicas, 8) \ 806 x(bi_promote_target, 16) \ 807 x(bi_foreground_target, 16) \ 808 x(bi_background_target, 16) \ 809 x(bi_erasure_code, 16) \ 810 x(bi_fields_set, 16) \ 811 x(bi_dir, 64) \ 812 x(bi_dir_offset, 64) \ 813 x(bi_subvol, 32) \ 814 x(bi_parent_subvol, 32) \ 815 x(bi_nocow, 8) 816 817 /* subset of BCH_INODE_FIELDS */ 818 #define BCH_INODE_OPTS() \ 819 x(data_checksum, 8) \ 820 x(compression, 8) \ 821 x(project, 32) \ 822 x(background_compression, 8) \ 823 x(data_replicas, 8) \ 824 x(promote_target, 16) \ 825 x(foreground_target, 16) \ 826 x(background_target, 16) \ 827 x(erasure_code, 16) \ 828 x(nocow, 8) 829 830 enum inode_opt_id { 831 #define x(name, ...) \ 832 Inode_opt_##name, 833 BCH_INODE_OPTS() 834 #undef x 835 Inode_opt_nr, 836 }; 837 838 #define BCH_INODE_FLAGS() \ 839 x(sync, 0) \ 840 x(immutable, 1) \ 841 x(append, 2) \ 842 x(nodump, 3) \ 843 x(noatime, 4) \ 844 x(i_size_dirty, 5) \ 845 x(i_sectors_dirty, 6) \ 846 x(unlinked, 7) \ 847 x(backptr_untrusted, 8) 848 849 /* bits 20+ reserved for packed fields below: */ 850 851 enum bch_inode_flags { 852 #define x(t, n) BCH_INODE_##t = 1U << n, 853 BCH_INODE_FLAGS() 854 #undef x 855 }; 856 857 enum __bch_inode_flags { 858 #define x(t, n) __BCH_INODE_##t = n, 859 BCH_INODE_FLAGS() 860 #undef x 861 }; 862 863 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24); 864 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31); 865 LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32); 866 867 LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24); 868 LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31); 869 870 LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24); 871 LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31); 872 873 LE64_BITMASK(INODEv3_FIELDS_START, 874 struct bch_inode_v3, bi_flags, 31, 36); 875 LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52); 876 877 /* Dirents */ 878 879 /* 880 * Dirents (and xattrs) have to implement string lookups; since our b-tree 881 * doesn't support arbitrary length strings for the key, we instead index by a 882 * 64 bit hash (currently truncated sha1) of the string, stored in the offset 883 * field of the key - using linear probing to resolve hash collisions. This also 884 * provides us with the readdir cookie posix requires. 885 * 886 * Linear probing requires us to use whiteouts for deletions, in the event of a 887 * collision: 888 */ 889 890 struct bch_dirent { 891 struct bch_val v; 892 893 /* Target inode number: */ 894 union { 895 __le64 d_inum; 896 struct { /* DT_SUBVOL */ 897 __le32 d_child_subvol; 898 __le32 d_parent_subvol; 899 }; 900 }; 901 902 /* 903 * Copy of mode bits 12-15 from the target inode - so userspace can get 904 * the filetype without having to do a stat() 905 */ 906 __u8 d_type; 907 908 __u8 d_name[]; 909 } __packed __aligned(8); 910 911 #define DT_SUBVOL 16 912 #define BCH_DT_MAX 17 913 914 #define BCH_NAME_MAX 512 915 916 /* Xattrs */ 917 918 #define KEY_TYPE_XATTR_INDEX_USER 0 919 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1 920 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2 921 #define KEY_TYPE_XATTR_INDEX_TRUSTED 3 922 #define KEY_TYPE_XATTR_INDEX_SECURITY 4 923 924 struct bch_xattr { 925 struct bch_val v; 926 __u8 x_type; 927 __u8 x_name_len; 928 __le16 x_val_len; 929 __u8 x_name[]; 930 } __packed __aligned(8); 931 932 /* Bucket/allocation information: */ 933 934 struct bch_alloc { 935 struct bch_val v; 936 __u8 fields; 937 __u8 gen; 938 __u8 data[]; 939 } __packed __aligned(8); 940 941 #define BCH_ALLOC_FIELDS_V1() \ 942 x(read_time, 16) \ 943 x(write_time, 16) \ 944 x(data_type, 8) \ 945 x(dirty_sectors, 16) \ 946 x(cached_sectors, 16) \ 947 x(oldest_gen, 8) \ 948 x(stripe, 32) \ 949 x(stripe_redundancy, 8) 950 951 enum { 952 #define x(name, _bits) BCH_ALLOC_FIELD_V1_##name, 953 BCH_ALLOC_FIELDS_V1() 954 #undef x 955 }; 956 957 struct bch_alloc_v2 { 958 struct bch_val v; 959 __u8 nr_fields; 960 __u8 gen; 961 __u8 oldest_gen; 962 __u8 data_type; 963 __u8 data[]; 964 } __packed __aligned(8); 965 966 #define BCH_ALLOC_FIELDS_V2() \ 967 x(read_time, 64) \ 968 x(write_time, 64) \ 969 x(dirty_sectors, 32) \ 970 x(cached_sectors, 32) \ 971 x(stripe, 32) \ 972 x(stripe_redundancy, 8) 973 974 struct bch_alloc_v3 { 975 struct bch_val v; 976 __le64 journal_seq; 977 __le32 flags; 978 __u8 nr_fields; 979 __u8 gen; 980 __u8 oldest_gen; 981 __u8 data_type; 982 __u8 data[]; 983 } __packed __aligned(8); 984 985 LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1) 986 LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2) 987 988 struct bch_alloc_v4 { 989 struct bch_val v; 990 __u64 journal_seq; 991 __u32 flags; 992 __u8 gen; 993 __u8 oldest_gen; 994 __u8 data_type; 995 __u8 stripe_redundancy; 996 __u32 dirty_sectors; 997 __u32 cached_sectors; 998 __u64 io_time[2]; 999 __u32 stripe; 1000 __u32 nr_external_backpointers; 1001 __u64 fragmentation_lru; 1002 } __packed __aligned(8); 1003 1004 #define BCH_ALLOC_V4_U64s_V0 6 1005 #define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64)) 1006 1007 BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1) 1008 BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2) 1009 BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8) 1010 BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14) 1011 1012 #define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40 1013 1014 struct bch_backpointer { 1015 struct bch_val v; 1016 __u8 btree_id; 1017 __u8 level; 1018 __u8 data_type; 1019 __u64 bucket_offset:40; 1020 __u32 bucket_len; 1021 struct bpos pos; 1022 } __packed __aligned(8); 1023 1024 #define KEY_TYPE_BUCKET_GENS_BITS 8 1025 #define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS) 1026 #define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1) 1027 1028 struct bch_bucket_gens { 1029 struct bch_val v; 1030 u8 gens[KEY_TYPE_BUCKET_GENS_NR]; 1031 } __packed __aligned(8); 1032 1033 /* Quotas: */ 1034 1035 enum quota_types { 1036 QTYP_USR = 0, 1037 QTYP_GRP = 1, 1038 QTYP_PRJ = 2, 1039 QTYP_NR = 3, 1040 }; 1041 1042 enum quota_counters { 1043 Q_SPC = 0, 1044 Q_INO = 1, 1045 Q_COUNTERS = 2, 1046 }; 1047 1048 struct bch_quota_counter { 1049 __le64 hardlimit; 1050 __le64 softlimit; 1051 }; 1052 1053 struct bch_quota { 1054 struct bch_val v; 1055 struct bch_quota_counter c[Q_COUNTERS]; 1056 } __packed __aligned(8); 1057 1058 /* Erasure coding */ 1059 1060 struct bch_stripe { 1061 struct bch_val v; 1062 __le16 sectors; 1063 __u8 algorithm; 1064 __u8 nr_blocks; 1065 __u8 nr_redundant; 1066 1067 __u8 csum_granularity_bits; 1068 __u8 csum_type; 1069 __u8 pad; 1070 1071 struct bch_extent_ptr ptrs[]; 1072 } __packed __aligned(8); 1073 1074 /* Reflink: */ 1075 1076 struct bch_reflink_p { 1077 struct bch_val v; 1078 __le64 idx; 1079 /* 1080 * A reflink pointer might point to an indirect extent which is then 1081 * later split (by copygc or rebalance). If we only pointed to part of 1082 * the original indirect extent, and then one of the fragments is 1083 * outside the range we point to, we'd leak a refcount: so when creating 1084 * reflink pointers, we need to store pad values to remember the full 1085 * range we were taking a reference on. 1086 */ 1087 __le32 front_pad; 1088 __le32 back_pad; 1089 } __packed __aligned(8); 1090 1091 struct bch_reflink_v { 1092 struct bch_val v; 1093 __le64 refcount; 1094 union bch_extent_entry start[0]; 1095 __u64 _data[]; 1096 } __packed __aligned(8); 1097 1098 struct bch_indirect_inline_data { 1099 struct bch_val v; 1100 __le64 refcount; 1101 u8 data[]; 1102 }; 1103 1104 /* Inline data */ 1105 1106 struct bch_inline_data { 1107 struct bch_val v; 1108 u8 data[]; 1109 }; 1110 1111 /* Subvolumes: */ 1112 1113 #define SUBVOL_POS_MIN POS(0, 1) 1114 #define SUBVOL_POS_MAX POS(0, S32_MAX) 1115 #define BCACHEFS_ROOT_SUBVOL 1 1116 1117 struct bch_subvolume { 1118 struct bch_val v; 1119 __le32 flags; 1120 __le32 snapshot; 1121 __le64 inode; 1122 /* 1123 * Snapshot subvolumes form a tree, separate from the snapshot nodes 1124 * tree - if this subvolume is a snapshot, this is the ID of the 1125 * subvolume it was created from: 1126 */ 1127 __le32 parent; 1128 __le32 pad; 1129 bch_le128 otime; 1130 }; 1131 1132 LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1) 1133 /* 1134 * We need to know whether a subvolume is a snapshot so we can know whether we 1135 * can delete it (or whether it should just be rm -rf'd) 1136 */ 1137 LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2) 1138 LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3) 1139 1140 /* Snapshots */ 1141 1142 struct bch_snapshot { 1143 struct bch_val v; 1144 __le32 flags; 1145 __le32 parent; 1146 __le32 children[2]; 1147 __le32 subvol; 1148 /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */ 1149 __le32 tree; 1150 __le32 depth; 1151 __le32 skip[3]; 1152 }; 1153 1154 LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1) 1155 1156 /* True if a subvolume points to this snapshot node: */ 1157 LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) 1158 1159 /* 1160 * Snapshot trees: 1161 * 1162 * The snapshot_trees btree gives us persistent indentifier for each tree of 1163 * bch_snapshot nodes, and allow us to record and easily find the root/master 1164 * subvolume that other snapshots were created from: 1165 */ 1166 struct bch_snapshot_tree { 1167 struct bch_val v; 1168 __le32 master_subvol; 1169 __le32 root_snapshot; 1170 }; 1171 1172 /* LRU btree: */ 1173 1174 struct bch_lru { 1175 struct bch_val v; 1176 __le64 idx; 1177 } __packed __aligned(8); 1178 1179 #define LRU_ID_STRIPES (1U << 16) 1180 1181 /* Logged operations btree: */ 1182 1183 struct bch_logged_op_truncate { 1184 struct bch_val v; 1185 __le32 subvol; 1186 __le32 pad; 1187 __le64 inum; 1188 __le64 new_i_size; 1189 }; 1190 1191 enum logged_op_finsert_state { 1192 LOGGED_OP_FINSERT_start, 1193 LOGGED_OP_FINSERT_shift_extents, 1194 LOGGED_OP_FINSERT_finish, 1195 }; 1196 1197 struct bch_logged_op_finsert { 1198 struct bch_val v; 1199 __u8 state; 1200 __u8 pad[3]; 1201 __le32 subvol; 1202 __le64 inum; 1203 __le64 dst_offset; 1204 __le64 src_offset; 1205 __le64 pos; 1206 }; 1207 1208 /* Optional/variable size superblock sections: */ 1209 1210 struct bch_sb_field { 1211 __u64 _data[0]; 1212 __le32 u64s; 1213 __le32 type; 1214 }; 1215 1216 #define BCH_SB_FIELDS() \ 1217 x(journal, 0) \ 1218 x(members_v1, 1) \ 1219 x(crypt, 2) \ 1220 x(replicas_v0, 3) \ 1221 x(quota, 4) \ 1222 x(disk_groups, 5) \ 1223 x(clean, 6) \ 1224 x(replicas, 7) \ 1225 x(journal_seq_blacklist, 8) \ 1226 x(journal_v2, 9) \ 1227 x(counters, 10) \ 1228 x(members_v2, 11) \ 1229 x(errors, 12) \ 1230 x(ext, 13) \ 1231 x(downgrade, 14) 1232 1233 enum bch_sb_field_type { 1234 #define x(f, nr) BCH_SB_FIELD_##f = nr, 1235 BCH_SB_FIELDS() 1236 #undef x 1237 BCH_SB_FIELD_NR 1238 }; 1239 1240 /* 1241 * Most superblock fields are replicated in all device's superblocks - a few are 1242 * not: 1243 */ 1244 #define BCH_SINGLE_DEVICE_SB_FIELDS \ 1245 ((1U << BCH_SB_FIELD_journal)| \ 1246 (1U << BCH_SB_FIELD_journal_v2)) 1247 1248 /* BCH_SB_FIELD_journal: */ 1249 1250 struct bch_sb_field_journal { 1251 struct bch_sb_field field; 1252 __le64 buckets[]; 1253 }; 1254 1255 struct bch_sb_field_journal_v2 { 1256 struct bch_sb_field field; 1257 1258 struct bch_sb_field_journal_v2_entry { 1259 __le64 start; 1260 __le64 nr; 1261 } d[]; 1262 }; 1263 1264 /* BCH_SB_FIELD_members_v1: */ 1265 1266 #define BCH_MIN_NR_NBUCKETS (1 << 6) 1267 1268 #define BCH_IOPS_MEASUREMENTS() \ 1269 x(seqread, 0) \ 1270 x(seqwrite, 1) \ 1271 x(randread, 2) \ 1272 x(randwrite, 3) 1273 1274 enum bch_iops_measurement { 1275 #define x(t, n) BCH_IOPS_##t = n, 1276 BCH_IOPS_MEASUREMENTS() 1277 #undef x 1278 BCH_IOPS_NR 1279 }; 1280 1281 #define BCH_MEMBER_ERROR_TYPES() \ 1282 x(read, 0) \ 1283 x(write, 1) \ 1284 x(checksum, 2) 1285 1286 enum bch_member_error_type { 1287 #define x(t, n) BCH_MEMBER_ERROR_##t = n, 1288 BCH_MEMBER_ERROR_TYPES() 1289 #undef x 1290 BCH_MEMBER_ERROR_NR 1291 }; 1292 1293 struct bch_member { 1294 __uuid_t uuid; 1295 __le64 nbuckets; /* device size */ 1296 __le16 first_bucket; /* index of first bucket used */ 1297 __le16 bucket_size; /* sectors */ 1298 __le32 pad; 1299 __le64 last_mount; /* time_t */ 1300 1301 __le64 flags; 1302 __le32 iops[4]; 1303 __le64 errors[BCH_MEMBER_ERROR_NR]; 1304 __le64 errors_at_reset[BCH_MEMBER_ERROR_NR]; 1305 __le64 errors_reset_time; 1306 __le64 seq; 1307 }; 1308 1309 #define BCH_MEMBER_V1_BYTES 56 1310 1311 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4) 1312 /* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */ 1313 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15) 1314 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20) 1315 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28) 1316 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30) 1317 LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED, 1318 struct bch_member, flags, 30, 31) 1319 1320 #if 0 1321 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20); 1322 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40); 1323 #endif 1324 1325 #define BCH_MEMBER_STATES() \ 1326 x(rw, 0) \ 1327 x(ro, 1) \ 1328 x(failed, 2) \ 1329 x(spare, 3) 1330 1331 enum bch_member_state { 1332 #define x(t, n) BCH_MEMBER_STATE_##t = n, 1333 BCH_MEMBER_STATES() 1334 #undef x 1335 BCH_MEMBER_STATE_NR 1336 }; 1337 1338 struct bch_sb_field_members_v1 { 1339 struct bch_sb_field field; 1340 struct bch_member _members[]; //Members are now variable size 1341 }; 1342 1343 struct bch_sb_field_members_v2 { 1344 struct bch_sb_field field; 1345 __le16 member_bytes; //size of single member entry 1346 u8 pad[6]; 1347 struct bch_member _members[]; 1348 }; 1349 1350 /* BCH_SB_FIELD_crypt: */ 1351 1352 struct nonce { 1353 __le32 d[4]; 1354 }; 1355 1356 struct bch_key { 1357 __le64 key[4]; 1358 }; 1359 1360 #define BCH_KEY_MAGIC \ 1361 (((__u64) 'b' << 0)|((__u64) 'c' << 8)| \ 1362 ((__u64) 'h' << 16)|((__u64) '*' << 24)| \ 1363 ((__u64) '*' << 32)|((__u64) 'k' << 40)| \ 1364 ((__u64) 'e' << 48)|((__u64) 'y' << 56)) 1365 1366 struct bch_encrypted_key { 1367 __le64 magic; 1368 struct bch_key key; 1369 }; 1370 1371 /* 1372 * If this field is present in the superblock, it stores an encryption key which 1373 * is used encrypt all other data/metadata. The key will normally be encrypted 1374 * with the key userspace provides, but if encryption has been turned off we'll 1375 * just store the master key unencrypted in the superblock so we can access the 1376 * previously encrypted data. 1377 */ 1378 struct bch_sb_field_crypt { 1379 struct bch_sb_field field; 1380 1381 __le64 flags; 1382 __le64 kdf_flags; 1383 struct bch_encrypted_key key; 1384 }; 1385 1386 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4); 1387 1388 enum bch_kdf_types { 1389 BCH_KDF_SCRYPT = 0, 1390 BCH_KDF_NR = 1, 1391 }; 1392 1393 /* stored as base 2 log of scrypt params: */ 1394 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16); 1395 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32); 1396 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48); 1397 1398 /* BCH_SB_FIELD_replicas: */ 1399 1400 #define BCH_DATA_TYPES() \ 1401 x(free, 0) \ 1402 x(sb, 1) \ 1403 x(journal, 2) \ 1404 x(btree, 3) \ 1405 x(user, 4) \ 1406 x(cached, 5) \ 1407 x(parity, 6) \ 1408 x(stripe, 7) \ 1409 x(need_gc_gens, 8) \ 1410 x(need_discard, 9) 1411 1412 enum bch_data_type { 1413 #define x(t, n) BCH_DATA_##t, 1414 BCH_DATA_TYPES() 1415 #undef x 1416 BCH_DATA_NR 1417 }; 1418 1419 static inline bool data_type_is_empty(enum bch_data_type type) 1420 { 1421 switch (type) { 1422 case BCH_DATA_free: 1423 case BCH_DATA_need_gc_gens: 1424 case BCH_DATA_need_discard: 1425 return true; 1426 default: 1427 return false; 1428 } 1429 } 1430 1431 static inline bool data_type_is_hidden(enum bch_data_type type) 1432 { 1433 switch (type) { 1434 case BCH_DATA_sb: 1435 case BCH_DATA_journal: 1436 return true; 1437 default: 1438 return false; 1439 } 1440 } 1441 1442 struct bch_replicas_entry_v0 { 1443 __u8 data_type; 1444 __u8 nr_devs; 1445 __u8 devs[]; 1446 } __packed; 1447 1448 struct bch_sb_field_replicas_v0 { 1449 struct bch_sb_field field; 1450 struct bch_replicas_entry_v0 entries[]; 1451 } __packed __aligned(8); 1452 1453 struct bch_replicas_entry_v1 { 1454 __u8 data_type; 1455 __u8 nr_devs; 1456 __u8 nr_required; 1457 __u8 devs[]; 1458 } __packed; 1459 1460 #define replicas_entry_bytes(_i) \ 1461 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs) 1462 1463 struct bch_sb_field_replicas { 1464 struct bch_sb_field field; 1465 struct bch_replicas_entry_v1 entries[]; 1466 } __packed __aligned(8); 1467 1468 /* BCH_SB_FIELD_quota: */ 1469 1470 struct bch_sb_quota_counter { 1471 __le32 timelimit; 1472 __le32 warnlimit; 1473 }; 1474 1475 struct bch_sb_quota_type { 1476 __le64 flags; 1477 struct bch_sb_quota_counter c[Q_COUNTERS]; 1478 }; 1479 1480 struct bch_sb_field_quota { 1481 struct bch_sb_field field; 1482 struct bch_sb_quota_type q[QTYP_NR]; 1483 } __packed __aligned(8); 1484 1485 /* BCH_SB_FIELD_disk_groups: */ 1486 1487 #define BCH_SB_LABEL_SIZE 32 1488 1489 struct bch_disk_group { 1490 __u8 label[BCH_SB_LABEL_SIZE]; 1491 __le64 flags[2]; 1492 } __packed __aligned(8); 1493 1494 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1) 1495 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6) 1496 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24) 1497 1498 struct bch_sb_field_disk_groups { 1499 struct bch_sb_field field; 1500 struct bch_disk_group entries[]; 1501 } __packed __aligned(8); 1502 1503 /* BCH_SB_FIELD_counters */ 1504 1505 #define BCH_PERSISTENT_COUNTERS() \ 1506 x(io_read, 0) \ 1507 x(io_write, 1) \ 1508 x(io_move, 2) \ 1509 x(bucket_invalidate, 3) \ 1510 x(bucket_discard, 4) \ 1511 x(bucket_alloc, 5) \ 1512 x(bucket_alloc_fail, 6) \ 1513 x(btree_cache_scan, 7) \ 1514 x(btree_cache_reap, 8) \ 1515 x(btree_cache_cannibalize, 9) \ 1516 x(btree_cache_cannibalize_lock, 10) \ 1517 x(btree_cache_cannibalize_lock_fail, 11) \ 1518 x(btree_cache_cannibalize_unlock, 12) \ 1519 x(btree_node_write, 13) \ 1520 x(btree_node_read, 14) \ 1521 x(btree_node_compact, 15) \ 1522 x(btree_node_merge, 16) \ 1523 x(btree_node_split, 17) \ 1524 x(btree_node_rewrite, 18) \ 1525 x(btree_node_alloc, 19) \ 1526 x(btree_node_free, 20) \ 1527 x(btree_node_set_root, 21) \ 1528 x(btree_path_relock_fail, 22) \ 1529 x(btree_path_upgrade_fail, 23) \ 1530 x(btree_reserve_get_fail, 24) \ 1531 x(journal_entry_full, 25) \ 1532 x(journal_full, 26) \ 1533 x(journal_reclaim_finish, 27) \ 1534 x(journal_reclaim_start, 28) \ 1535 x(journal_write, 29) \ 1536 x(read_promote, 30) \ 1537 x(read_bounce, 31) \ 1538 x(read_split, 33) \ 1539 x(read_retry, 32) \ 1540 x(read_reuse_race, 34) \ 1541 x(move_extent_read, 35) \ 1542 x(move_extent_write, 36) \ 1543 x(move_extent_finish, 37) \ 1544 x(move_extent_fail, 38) \ 1545 x(move_extent_start_fail, 39) \ 1546 x(copygc, 40) \ 1547 x(copygc_wait, 41) \ 1548 x(gc_gens_end, 42) \ 1549 x(gc_gens_start, 43) \ 1550 x(trans_blocked_journal_reclaim, 44) \ 1551 x(trans_restart_btree_node_reused, 45) \ 1552 x(trans_restart_btree_node_split, 46) \ 1553 x(trans_restart_fault_inject, 47) \ 1554 x(trans_restart_iter_upgrade, 48) \ 1555 x(trans_restart_journal_preres_get, 49) \ 1556 x(trans_restart_journal_reclaim, 50) \ 1557 x(trans_restart_journal_res_get, 51) \ 1558 x(trans_restart_key_cache_key_realloced, 52) \ 1559 x(trans_restart_key_cache_raced, 53) \ 1560 x(trans_restart_mark_replicas, 54) \ 1561 x(trans_restart_mem_realloced, 55) \ 1562 x(trans_restart_memory_allocation_failure, 56) \ 1563 x(trans_restart_relock, 57) \ 1564 x(trans_restart_relock_after_fill, 58) \ 1565 x(trans_restart_relock_key_cache_fill, 59) \ 1566 x(trans_restart_relock_next_node, 60) \ 1567 x(trans_restart_relock_parent_for_fill, 61) \ 1568 x(trans_restart_relock_path, 62) \ 1569 x(trans_restart_relock_path_intent, 63) \ 1570 x(trans_restart_too_many_iters, 64) \ 1571 x(trans_restart_traverse, 65) \ 1572 x(trans_restart_upgrade, 66) \ 1573 x(trans_restart_would_deadlock, 67) \ 1574 x(trans_restart_would_deadlock_write, 68) \ 1575 x(trans_restart_injected, 69) \ 1576 x(trans_restart_key_cache_upgrade, 70) \ 1577 x(trans_traverse_all, 71) \ 1578 x(transaction_commit, 72) \ 1579 x(write_super, 73) \ 1580 x(trans_restart_would_deadlock_recursion_limit, 74) \ 1581 x(trans_restart_write_buffer_flush, 75) \ 1582 x(trans_restart_split_race, 76) \ 1583 x(write_buffer_flush_slowpath, 77) \ 1584 x(write_buffer_flush_sync, 78) 1585 1586 enum bch_persistent_counters { 1587 #define x(t, n, ...) BCH_COUNTER_##t, 1588 BCH_PERSISTENT_COUNTERS() 1589 #undef x 1590 BCH_COUNTER_NR 1591 }; 1592 1593 struct bch_sb_field_counters { 1594 struct bch_sb_field field; 1595 __le64 d[]; 1596 }; 1597 1598 /* 1599 * On clean shutdown, store btree roots and current journal sequence number in 1600 * the superblock: 1601 */ 1602 struct jset_entry { 1603 __le16 u64s; 1604 __u8 btree_id; 1605 __u8 level; 1606 __u8 type; /* designates what this jset holds */ 1607 __u8 pad[3]; 1608 1609 struct bkey_i start[0]; 1610 __u64 _data[]; 1611 }; 1612 1613 struct bch_sb_field_clean { 1614 struct bch_sb_field field; 1615 1616 __le32 flags; 1617 __le16 _read_clock; /* no longer used */ 1618 __le16 _write_clock; 1619 __le64 journal_seq; 1620 1621 struct jset_entry start[0]; 1622 __u64 _data[]; 1623 }; 1624 1625 struct journal_seq_blacklist_entry { 1626 __le64 start; 1627 __le64 end; 1628 }; 1629 1630 struct bch_sb_field_journal_seq_blacklist { 1631 struct bch_sb_field field; 1632 struct journal_seq_blacklist_entry start[]; 1633 }; 1634 1635 struct bch_sb_field_errors { 1636 struct bch_sb_field field; 1637 struct bch_sb_field_error_entry { 1638 __le64 v; 1639 __le64 last_error_time; 1640 } entries[]; 1641 }; 1642 1643 LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID, struct bch_sb_field_error_entry, v, 0, 16); 1644 LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR, struct bch_sb_field_error_entry, v, 16, 64); 1645 1646 struct bch_sb_field_ext { 1647 struct bch_sb_field field; 1648 __le64 recovery_passes_required[2]; 1649 __le64 errors_silent[8]; 1650 }; 1651 1652 struct bch_sb_field_downgrade_entry { 1653 __le16 version; 1654 __le64 recovery_passes[2]; 1655 __le16 nr_errors; 1656 __le16 errors[] __counted_by(nr_errors); 1657 } __packed __aligned(2); 1658 1659 struct bch_sb_field_downgrade { 1660 struct bch_sb_field field; 1661 struct bch_sb_field_downgrade_entry entries[]; 1662 }; 1663 1664 /* Superblock: */ 1665 1666 /* 1667 * New versioning scheme: 1668 * One common version number for all on disk data structures - superblock, btree 1669 * nodes, journal entries 1670 */ 1671 #define BCH_VERSION_MAJOR(_v) ((__u16) ((_v) >> 10)) 1672 #define BCH_VERSION_MINOR(_v) ((__u16) ((_v) & ~(~0U << 10))) 1673 #define BCH_VERSION(_major, _minor) (((_major) << 10)|(_minor) << 0) 1674 1675 /* 1676 * field 1: version name 1677 * field 2: BCH_VERSION(major, minor) 1678 * field 3: recovery passess required on upgrade 1679 */ 1680 #define BCH_METADATA_VERSIONS() \ 1681 x(bkey_renumber, BCH_VERSION(0, 10)) \ 1682 x(inode_btree_change, BCH_VERSION(0, 11)) \ 1683 x(snapshot, BCH_VERSION(0, 12)) \ 1684 x(inode_backpointers, BCH_VERSION(0, 13)) \ 1685 x(btree_ptr_sectors_written, BCH_VERSION(0, 14)) \ 1686 x(snapshot_2, BCH_VERSION(0, 15)) \ 1687 x(reflink_p_fix, BCH_VERSION(0, 16)) \ 1688 x(subvol_dirent, BCH_VERSION(0, 17)) \ 1689 x(inode_v2, BCH_VERSION(0, 18)) \ 1690 x(freespace, BCH_VERSION(0, 19)) \ 1691 x(alloc_v4, BCH_VERSION(0, 20)) \ 1692 x(new_data_types, BCH_VERSION(0, 21)) \ 1693 x(backpointers, BCH_VERSION(0, 22)) \ 1694 x(inode_v3, BCH_VERSION(0, 23)) \ 1695 x(unwritten_extents, BCH_VERSION(0, 24)) \ 1696 x(bucket_gens, BCH_VERSION(0, 25)) \ 1697 x(lru_v2, BCH_VERSION(0, 26)) \ 1698 x(fragmentation_lru, BCH_VERSION(0, 27)) \ 1699 x(no_bps_in_alloc_keys, BCH_VERSION(0, 28)) \ 1700 x(snapshot_trees, BCH_VERSION(0, 29)) \ 1701 x(major_minor, BCH_VERSION(1, 0)) \ 1702 x(snapshot_skiplists, BCH_VERSION(1, 1)) \ 1703 x(deleted_inodes, BCH_VERSION(1, 2)) \ 1704 x(rebalance_work, BCH_VERSION(1, 3)) \ 1705 x(member_seq, BCH_VERSION(1, 4)) 1706 1707 enum bcachefs_metadata_version { 1708 bcachefs_metadata_version_min = 9, 1709 #define x(t, n) bcachefs_metadata_version_##t = n, 1710 BCH_METADATA_VERSIONS() 1711 #undef x 1712 bcachefs_metadata_version_max 1713 }; 1714 1715 static const __maybe_unused 1716 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work; 1717 1718 #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1) 1719 1720 #define BCH_SB_SECTOR 8 1721 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */ 1722 1723 struct bch_sb_layout { 1724 __uuid_t magic; /* bcachefs superblock UUID */ 1725 __u8 layout_type; 1726 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */ 1727 __u8 nr_superblocks; 1728 __u8 pad[5]; 1729 __le64 sb_offset[61]; 1730 } __packed __aligned(8); 1731 1732 #define BCH_SB_LAYOUT_SECTOR 7 1733 1734 /* 1735 * @offset - sector where this sb was written 1736 * @version - on disk format version 1737 * @version_min - Oldest metadata version this filesystem contains; so we can 1738 * safely drop compatibility code and refuse to mount filesystems 1739 * we'd need it for 1740 * @magic - identifies as a bcachefs superblock (BCHFS_MAGIC) 1741 * @seq - incremented each time superblock is written 1742 * @uuid - used for generating various magic numbers and identifying 1743 * member devices, never changes 1744 * @user_uuid - user visible UUID, may be changed 1745 * @label - filesystem label 1746 * @seq - identifies most recent superblock, incremented each time 1747 * superblock is written 1748 * @features - enabled incompatible features 1749 */ 1750 struct bch_sb { 1751 struct bch_csum csum; 1752 __le16 version; 1753 __le16 version_min; 1754 __le16 pad[2]; 1755 __uuid_t magic; 1756 __uuid_t uuid; 1757 __uuid_t user_uuid; 1758 __u8 label[BCH_SB_LABEL_SIZE]; 1759 __le64 offset; 1760 __le64 seq; 1761 1762 __le16 block_size; 1763 __u8 dev_idx; 1764 __u8 nr_devices; 1765 __le32 u64s; 1766 1767 __le64 time_base_lo; 1768 __le32 time_base_hi; 1769 __le32 time_precision; 1770 1771 __le64 flags[7]; 1772 __le64 write_time; 1773 __le64 features[2]; 1774 __le64 compat[2]; 1775 1776 struct bch_sb_layout layout; 1777 1778 struct bch_sb_field start[0]; 1779 __le64 _data[]; 1780 } __packed __aligned(8); 1781 1782 /* 1783 * Flags: 1784 * BCH_SB_INITALIZED - set on first mount 1785 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect 1786 * behaviour of mount/recovery path: 1787 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits 1788 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80 1789 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides 1790 * DATA/META_CSUM_TYPE. Also indicates encryption 1791 * algorithm in use, if/when we get more than one 1792 */ 1793 1794 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16); 1795 1796 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1); 1797 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2); 1798 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8); 1799 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12); 1800 1801 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28); 1802 1803 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33); 1804 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40); 1805 1806 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44); 1807 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48); 1808 1809 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52); 1810 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56); 1811 1812 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57); 1813 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58); 1814 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59); 1815 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60); 1816 1817 LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61); 1818 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62); 1819 1820 LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63); 1821 1822 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4); 1823 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8); 1824 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9); 1825 1826 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10); 1827 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14); 1828 1829 /* 1830 * Max size of an extent that may require bouncing to read or write 1831 * (checksummed, compressed): 64k 1832 */ 1833 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS, 1834 struct bch_sb, flags[1], 14, 20); 1835 1836 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24); 1837 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28); 1838 1839 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40); 1840 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52); 1841 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64); 1842 1843 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO, 1844 struct bch_sb, flags[2], 0, 4); 1845 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64); 1846 1847 LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16); 1848 LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28); 1849 LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29); 1850 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30); 1851 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62); 1852 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63); 1853 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32); 1854 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33); 1855 LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34); 1856 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54); 1857 LE64_BITMASK(BCH_SB_VERSION_UPGRADE, struct bch_sb, flags[4], 54, 56); 1858 1859 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60); 1860 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI, 1861 struct bch_sb, flags[4], 60, 64); 1862 1863 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE, 1864 struct bch_sb, flags[5], 0, 16); 1865 1866 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb) 1867 { 1868 return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4); 1869 } 1870 1871 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v) 1872 { 1873 SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v); 1874 SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4); 1875 } 1876 1877 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb) 1878 { 1879 return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) | 1880 (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4); 1881 } 1882 1883 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v) 1884 { 1885 SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v); 1886 SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4); 1887 } 1888 1889 /* 1890 * Features: 1891 * 1892 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist 1893 * reflink: gates KEY_TYPE_reflink 1894 * inline_data: gates KEY_TYPE_inline_data 1895 * new_siphash: gates BCH_STR_HASH_siphash 1896 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE 1897 */ 1898 #define BCH_SB_FEATURES() \ 1899 x(lz4, 0) \ 1900 x(gzip, 1) \ 1901 x(zstd, 2) \ 1902 x(atomic_nlink, 3) \ 1903 x(ec, 4) \ 1904 x(journal_seq_blacklist_v3, 5) \ 1905 x(reflink, 6) \ 1906 x(new_siphash, 7) \ 1907 x(inline_data, 8) \ 1908 x(new_extent_overwrite, 9) \ 1909 x(incompressible, 10) \ 1910 x(btree_ptr_v2, 11) \ 1911 x(extents_above_btree_updates, 12) \ 1912 x(btree_updates_journalled, 13) \ 1913 x(reflink_inline_data, 14) \ 1914 x(new_varint, 15) \ 1915 x(journal_no_flush, 16) \ 1916 x(alloc_v2, 17) \ 1917 x(extents_across_btree_nodes, 18) 1918 1919 #define BCH_SB_FEATURES_ALWAYS \ 1920 ((1ULL << BCH_FEATURE_new_extent_overwrite)| \ 1921 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\ 1922 (1ULL << BCH_FEATURE_btree_updates_journalled)|\ 1923 (1ULL << BCH_FEATURE_alloc_v2)|\ 1924 (1ULL << BCH_FEATURE_extents_across_btree_nodes)) 1925 1926 #define BCH_SB_FEATURES_ALL \ 1927 (BCH_SB_FEATURES_ALWAYS| \ 1928 (1ULL << BCH_FEATURE_new_siphash)| \ 1929 (1ULL << BCH_FEATURE_btree_ptr_v2)| \ 1930 (1ULL << BCH_FEATURE_new_varint)| \ 1931 (1ULL << BCH_FEATURE_journal_no_flush)) 1932 1933 enum bch_sb_feature { 1934 #define x(f, n) BCH_FEATURE_##f, 1935 BCH_SB_FEATURES() 1936 #undef x 1937 BCH_FEATURE_NR, 1938 }; 1939 1940 #define BCH_SB_COMPAT() \ 1941 x(alloc_info, 0) \ 1942 x(alloc_metadata, 1) \ 1943 x(extents_above_btree_updates_done, 2) \ 1944 x(bformat_overflow_done, 3) 1945 1946 enum bch_sb_compat { 1947 #define x(f, n) BCH_COMPAT_##f, 1948 BCH_SB_COMPAT() 1949 #undef x 1950 BCH_COMPAT_NR, 1951 }; 1952 1953 /* options: */ 1954 1955 #define BCH_VERSION_UPGRADE_OPTS() \ 1956 x(compatible, 0) \ 1957 x(incompatible, 1) \ 1958 x(none, 2) 1959 1960 enum bch_version_upgrade_opts { 1961 #define x(t, n) BCH_VERSION_UPGRADE_##t = n, 1962 BCH_VERSION_UPGRADE_OPTS() 1963 #undef x 1964 }; 1965 1966 #define BCH_REPLICAS_MAX 4U 1967 1968 #define BCH_BKEY_PTRS_MAX 16U 1969 1970 #define BCH_ERROR_ACTIONS() \ 1971 x(continue, 0) \ 1972 x(ro, 1) \ 1973 x(panic, 2) 1974 1975 enum bch_error_actions { 1976 #define x(t, n) BCH_ON_ERROR_##t = n, 1977 BCH_ERROR_ACTIONS() 1978 #undef x 1979 BCH_ON_ERROR_NR 1980 }; 1981 1982 #define BCH_STR_HASH_TYPES() \ 1983 x(crc32c, 0) \ 1984 x(crc64, 1) \ 1985 x(siphash_old, 2) \ 1986 x(siphash, 3) 1987 1988 enum bch_str_hash_type { 1989 #define x(t, n) BCH_STR_HASH_##t = n, 1990 BCH_STR_HASH_TYPES() 1991 #undef x 1992 BCH_STR_HASH_NR 1993 }; 1994 1995 #define BCH_STR_HASH_OPTS() \ 1996 x(crc32c, 0) \ 1997 x(crc64, 1) \ 1998 x(siphash, 2) 1999 2000 enum bch_str_hash_opts { 2001 #define x(t, n) BCH_STR_HASH_OPT_##t = n, 2002 BCH_STR_HASH_OPTS() 2003 #undef x 2004 BCH_STR_HASH_OPT_NR 2005 }; 2006 2007 #define BCH_CSUM_TYPES() \ 2008 x(none, 0) \ 2009 x(crc32c_nonzero, 1) \ 2010 x(crc64_nonzero, 2) \ 2011 x(chacha20_poly1305_80, 3) \ 2012 x(chacha20_poly1305_128, 4) \ 2013 x(crc32c, 5) \ 2014 x(crc64, 6) \ 2015 x(xxhash, 7) 2016 2017 enum bch_csum_type { 2018 #define x(t, n) BCH_CSUM_##t = n, 2019 BCH_CSUM_TYPES() 2020 #undef x 2021 BCH_CSUM_NR 2022 }; 2023 2024 static const __maybe_unused unsigned bch_crc_bytes[] = { 2025 [BCH_CSUM_none] = 0, 2026 [BCH_CSUM_crc32c_nonzero] = 4, 2027 [BCH_CSUM_crc32c] = 4, 2028 [BCH_CSUM_crc64_nonzero] = 8, 2029 [BCH_CSUM_crc64] = 8, 2030 [BCH_CSUM_xxhash] = 8, 2031 [BCH_CSUM_chacha20_poly1305_80] = 10, 2032 [BCH_CSUM_chacha20_poly1305_128] = 16, 2033 }; 2034 2035 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type) 2036 { 2037 switch (type) { 2038 case BCH_CSUM_chacha20_poly1305_80: 2039 case BCH_CSUM_chacha20_poly1305_128: 2040 return true; 2041 default: 2042 return false; 2043 } 2044 } 2045 2046 #define BCH_CSUM_OPTS() \ 2047 x(none, 0) \ 2048 x(crc32c, 1) \ 2049 x(crc64, 2) \ 2050 x(xxhash, 3) 2051 2052 enum bch_csum_opts { 2053 #define x(t, n) BCH_CSUM_OPT_##t = n, 2054 BCH_CSUM_OPTS() 2055 #undef x 2056 BCH_CSUM_OPT_NR 2057 }; 2058 2059 #define BCH_COMPRESSION_TYPES() \ 2060 x(none, 0) \ 2061 x(lz4_old, 1) \ 2062 x(gzip, 2) \ 2063 x(lz4, 3) \ 2064 x(zstd, 4) \ 2065 x(incompressible, 5) 2066 2067 enum bch_compression_type { 2068 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n, 2069 BCH_COMPRESSION_TYPES() 2070 #undef x 2071 BCH_COMPRESSION_TYPE_NR 2072 }; 2073 2074 #define BCH_COMPRESSION_OPTS() \ 2075 x(none, 0) \ 2076 x(lz4, 1) \ 2077 x(gzip, 2) \ 2078 x(zstd, 3) 2079 2080 enum bch_compression_opts { 2081 #define x(t, n) BCH_COMPRESSION_OPT_##t = n, 2082 BCH_COMPRESSION_OPTS() 2083 #undef x 2084 BCH_COMPRESSION_OPT_NR 2085 }; 2086 2087 /* 2088 * Magic numbers 2089 * 2090 * The various other data structures have their own magic numbers, which are 2091 * xored with the first part of the cache set's UUID 2092 */ 2093 2094 #define BCACHE_MAGIC \ 2095 UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \ 2096 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81) 2097 #define BCHFS_MAGIC \ 2098 UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \ 2099 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef) 2100 2101 #define BCACHEFS_STATFS_MAGIC 0xca451a4e 2102 2103 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL) 2104 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL) 2105 2106 static inline __le64 __bch2_sb_magic(struct bch_sb *sb) 2107 { 2108 __le64 ret; 2109 2110 memcpy(&ret, &sb->uuid, sizeof(ret)); 2111 return ret; 2112 } 2113 2114 static inline __u64 __jset_magic(struct bch_sb *sb) 2115 { 2116 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC); 2117 } 2118 2119 static inline __u64 __bset_magic(struct bch_sb *sb) 2120 { 2121 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC); 2122 } 2123 2124 /* Journal */ 2125 2126 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64)) 2127 2128 #define BCH_JSET_ENTRY_TYPES() \ 2129 x(btree_keys, 0) \ 2130 x(btree_root, 1) \ 2131 x(prio_ptrs, 2) \ 2132 x(blacklist, 3) \ 2133 x(blacklist_v2, 4) \ 2134 x(usage, 5) \ 2135 x(data_usage, 6) \ 2136 x(clock, 7) \ 2137 x(dev_usage, 8) \ 2138 x(log, 9) \ 2139 x(overwrite, 10) \ 2140 x(write_buffer_keys, 11) 2141 2142 enum { 2143 #define x(f, nr) BCH_JSET_ENTRY_##f = nr, 2144 BCH_JSET_ENTRY_TYPES() 2145 #undef x 2146 BCH_JSET_ENTRY_NR 2147 }; 2148 2149 static inline bool jset_entry_is_key(struct jset_entry *e) 2150 { 2151 switch (e->type) { 2152 case BCH_JSET_ENTRY_btree_keys: 2153 case BCH_JSET_ENTRY_btree_root: 2154 case BCH_JSET_ENTRY_overwrite: 2155 case BCH_JSET_ENTRY_write_buffer_keys: 2156 return true; 2157 } 2158 2159 return false; 2160 } 2161 2162 /* 2163 * Journal sequence numbers can be blacklisted: bsets record the max sequence 2164 * number of all the journal entries they contain updates for, so that on 2165 * recovery we can ignore those bsets that contain index updates newer that what 2166 * made it into the journal. 2167 * 2168 * This means that we can't reuse that journal_seq - we have to skip it, and 2169 * then record that we skipped it so that the next time we crash and recover we 2170 * don't think there was a missing journal entry. 2171 */ 2172 struct jset_entry_blacklist { 2173 struct jset_entry entry; 2174 __le64 seq; 2175 }; 2176 2177 struct jset_entry_blacklist_v2 { 2178 struct jset_entry entry; 2179 __le64 start; 2180 __le64 end; 2181 }; 2182 2183 #define BCH_FS_USAGE_TYPES() \ 2184 x(reserved, 0) \ 2185 x(inodes, 1) \ 2186 x(key_version, 2) 2187 2188 enum { 2189 #define x(f, nr) BCH_FS_USAGE_##f = nr, 2190 BCH_FS_USAGE_TYPES() 2191 #undef x 2192 BCH_FS_USAGE_NR 2193 }; 2194 2195 struct jset_entry_usage { 2196 struct jset_entry entry; 2197 __le64 v; 2198 } __packed; 2199 2200 struct jset_entry_data_usage { 2201 struct jset_entry entry; 2202 __le64 v; 2203 struct bch_replicas_entry_v1 r; 2204 } __packed; 2205 2206 struct jset_entry_clock { 2207 struct jset_entry entry; 2208 __u8 rw; 2209 __u8 pad[7]; 2210 __le64 time; 2211 } __packed; 2212 2213 struct jset_entry_dev_usage_type { 2214 __le64 buckets; 2215 __le64 sectors; 2216 __le64 fragmented; 2217 } __packed; 2218 2219 struct jset_entry_dev_usage { 2220 struct jset_entry entry; 2221 __le32 dev; 2222 __u32 pad; 2223 2224 __le64 _buckets_ec; /* No longer used */ 2225 __le64 _buckets_unavailable; /* No longer used */ 2226 2227 struct jset_entry_dev_usage_type d[]; 2228 }; 2229 2230 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u) 2231 { 2232 return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) / 2233 sizeof(struct jset_entry_dev_usage_type); 2234 } 2235 2236 struct jset_entry_log { 2237 struct jset_entry entry; 2238 u8 d[]; 2239 } __packed __aligned(8); 2240 2241 /* 2242 * On disk format for a journal entry: 2243 * seq is monotonically increasing; every journal entry has its own unique 2244 * sequence number. 2245 * 2246 * last_seq is the oldest journal entry that still has keys the btree hasn't 2247 * flushed to disk yet. 2248 * 2249 * version is for on disk format changes. 2250 */ 2251 struct jset { 2252 struct bch_csum csum; 2253 2254 __le64 magic; 2255 __le64 seq; 2256 __le32 version; 2257 __le32 flags; 2258 2259 __le32 u64s; /* size of d[] in u64s */ 2260 2261 __u8 encrypted_start[0]; 2262 2263 __le16 _read_clock; /* no longer used */ 2264 __le16 _write_clock; 2265 2266 /* Sequence number of oldest dirty journal entry */ 2267 __le64 last_seq; 2268 2269 2270 struct jset_entry start[0]; 2271 __u64 _data[]; 2272 } __packed __aligned(8); 2273 2274 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); 2275 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5); 2276 LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6); 2277 2278 #define BCH_JOURNAL_BUCKETS_MIN 8 2279 2280 /* Btree: */ 2281 2282 enum btree_id_flags { 2283 BTREE_ID_EXTENTS = BIT(0), 2284 BTREE_ID_SNAPSHOTS = BIT(1), 2285 BTREE_ID_SNAPSHOT_FIELD = BIT(2), 2286 BTREE_ID_DATA = BIT(3), 2287 }; 2288 2289 #define BCH_BTREE_IDS() \ 2290 x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\ 2291 BIT_ULL(KEY_TYPE_whiteout)| \ 2292 BIT_ULL(KEY_TYPE_error)| \ 2293 BIT_ULL(KEY_TYPE_cookie)| \ 2294 BIT_ULL(KEY_TYPE_extent)| \ 2295 BIT_ULL(KEY_TYPE_reservation)| \ 2296 BIT_ULL(KEY_TYPE_reflink_p)| \ 2297 BIT_ULL(KEY_TYPE_inline_data)) \ 2298 x(inodes, 1, BTREE_ID_SNAPSHOTS, \ 2299 BIT_ULL(KEY_TYPE_whiteout)| \ 2300 BIT_ULL(KEY_TYPE_inode)| \ 2301 BIT_ULL(KEY_TYPE_inode_v2)| \ 2302 BIT_ULL(KEY_TYPE_inode_v3)| \ 2303 BIT_ULL(KEY_TYPE_inode_generation)) \ 2304 x(dirents, 2, BTREE_ID_SNAPSHOTS, \ 2305 BIT_ULL(KEY_TYPE_whiteout)| \ 2306 BIT_ULL(KEY_TYPE_hash_whiteout)| \ 2307 BIT_ULL(KEY_TYPE_dirent)) \ 2308 x(xattrs, 3, BTREE_ID_SNAPSHOTS, \ 2309 BIT_ULL(KEY_TYPE_whiteout)| \ 2310 BIT_ULL(KEY_TYPE_cookie)| \ 2311 BIT_ULL(KEY_TYPE_hash_whiteout)| \ 2312 BIT_ULL(KEY_TYPE_xattr)) \ 2313 x(alloc, 4, 0, \ 2314 BIT_ULL(KEY_TYPE_alloc)| \ 2315 BIT_ULL(KEY_TYPE_alloc_v2)| \ 2316 BIT_ULL(KEY_TYPE_alloc_v3)| \ 2317 BIT_ULL(KEY_TYPE_alloc_v4)) \ 2318 x(quotas, 5, 0, \ 2319 BIT_ULL(KEY_TYPE_quota)) \ 2320 x(stripes, 6, 0, \ 2321 BIT_ULL(KEY_TYPE_stripe)) \ 2322 x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \ 2323 BIT_ULL(KEY_TYPE_reflink_v)| \ 2324 BIT_ULL(KEY_TYPE_indirect_inline_data)) \ 2325 x(subvolumes, 8, 0, \ 2326 BIT_ULL(KEY_TYPE_subvolume)) \ 2327 x(snapshots, 9, 0, \ 2328 BIT_ULL(KEY_TYPE_snapshot)) \ 2329 x(lru, 10, 0, \ 2330 BIT_ULL(KEY_TYPE_set)) \ 2331 x(freespace, 11, BTREE_ID_EXTENTS, \ 2332 BIT_ULL(KEY_TYPE_set)) \ 2333 x(need_discard, 12, 0, \ 2334 BIT_ULL(KEY_TYPE_set)) \ 2335 x(backpointers, 13, 0, \ 2336 BIT_ULL(KEY_TYPE_backpointer)) \ 2337 x(bucket_gens, 14, 0, \ 2338 BIT_ULL(KEY_TYPE_bucket_gens)) \ 2339 x(snapshot_trees, 15, 0, \ 2340 BIT_ULL(KEY_TYPE_snapshot_tree)) \ 2341 x(deleted_inodes, 16, BTREE_ID_SNAPSHOT_FIELD, \ 2342 BIT_ULL(KEY_TYPE_set)) \ 2343 x(logged_ops, 17, 0, \ 2344 BIT_ULL(KEY_TYPE_logged_op_truncate)| \ 2345 BIT_ULL(KEY_TYPE_logged_op_finsert)) \ 2346 x(rebalance_work, 18, BTREE_ID_SNAPSHOT_FIELD, \ 2347 BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie)) 2348 2349 enum btree_id { 2350 #define x(name, nr, ...) BTREE_ID_##name = nr, 2351 BCH_BTREE_IDS() 2352 #undef x 2353 BTREE_ID_NR 2354 }; 2355 2356 #define BTREE_MAX_DEPTH 4U 2357 2358 /* Btree nodes */ 2359 2360 /* 2361 * Btree nodes 2362 * 2363 * On disk a btree node is a list/log of these; within each set the keys are 2364 * sorted 2365 */ 2366 struct bset { 2367 __le64 seq; 2368 2369 /* 2370 * Highest journal entry this bset contains keys for. 2371 * If on recovery we don't see that journal entry, this bset is ignored: 2372 * this allows us to preserve the order of all index updates after a 2373 * crash, since the journal records a total order of all index updates 2374 * and anything that didn't make it to the journal doesn't get used. 2375 */ 2376 __le64 journal_seq; 2377 2378 __le32 flags; 2379 __le16 version; 2380 __le16 u64s; /* count of d[] in u64s */ 2381 2382 struct bkey_packed start[0]; 2383 __u64 _data[]; 2384 } __packed __aligned(8); 2385 2386 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4); 2387 2388 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5); 2389 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS, 2390 struct bset, flags, 5, 6); 2391 2392 /* Sector offset within the btree node: */ 2393 LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32); 2394 2395 struct btree_node { 2396 struct bch_csum csum; 2397 __le64 magic; 2398 2399 /* this flags field is encrypted, unlike bset->flags: */ 2400 __le64 flags; 2401 2402 /* Closed interval: */ 2403 struct bpos min_key; 2404 struct bpos max_key; 2405 struct bch_extent_ptr _ptr; /* not used anymore */ 2406 struct bkey_format format; 2407 2408 union { 2409 struct bset keys; 2410 struct { 2411 __u8 pad[22]; 2412 __le16 u64s; 2413 __u64 _data[0]; 2414 2415 }; 2416 }; 2417 } __packed __aligned(8); 2418 2419 LE64_BITMASK(BTREE_NODE_ID_LO, struct btree_node, flags, 0, 4); 2420 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8); 2421 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE, 2422 struct btree_node, flags, 8, 9); 2423 LE64_BITMASK(BTREE_NODE_ID_HI, struct btree_node, flags, 9, 25); 2424 /* 25-32 unused */ 2425 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64); 2426 2427 static inline __u64 BTREE_NODE_ID(struct btree_node *n) 2428 { 2429 return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4); 2430 } 2431 2432 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v) 2433 { 2434 SET_BTREE_NODE_ID_LO(n, v); 2435 SET_BTREE_NODE_ID_HI(n, v >> 4); 2436 } 2437 2438 struct btree_node_entry { 2439 struct bch_csum csum; 2440 2441 union { 2442 struct bset keys; 2443 struct { 2444 __u8 pad[22]; 2445 __le16 u64s; 2446 __u64 _data[0]; 2447 }; 2448 }; 2449 } __packed __aligned(8); 2450 2451 #endif /* _BCACHEFS_FORMAT_H */ 2452