xref: /linux/fs/bcachefs/bcachefs_format.h (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
4 
5 /*
6  * bcachefs on disk data structures
7  *
8  * OVERVIEW:
9  *
10  * There are three main types of on disk data structures in bcachefs (this is
11  * reduced from 5 in bcache)
12  *
13  *  - superblock
14  *  - journal
15  *  - btree
16  *
17  * The btree is the primary structure; most metadata exists as keys in the
18  * various btrees. There are only a small number of btrees, they're not
19  * sharded - we have one btree for extents, another for inodes, et cetera.
20  *
21  * SUPERBLOCK:
22  *
23  * The superblock contains the location of the journal, the list of devices in
24  * the filesystem, and in general any metadata we need in order to decide
25  * whether we can start a filesystem or prior to reading the journal/btree
26  * roots.
27  *
28  * The superblock is extensible, and most of the contents of the superblock are
29  * in variable length, type tagged fields; see struct bch_sb_field.
30  *
31  * Backup superblocks do not reside in a fixed location; also, superblocks do
32  * not have a fixed size. To locate backup superblocks we have struct
33  * bch_sb_layout; we store a copy of this inside every superblock, and also
34  * before the first superblock.
35  *
36  * JOURNAL:
37  *
38  * The journal primarily records btree updates in the order they occurred;
39  * journal replay consists of just iterating over all the keys in the open
40  * journal entries and re-inserting them into the btrees.
41  *
42  * The journal also contains entry types for the btree roots, and blacklisted
43  * journal sequence numbers (see journal_seq_blacklist.c).
44  *
45  * BTREE:
46  *
47  * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48  * 128k-256k) and log structured. We use struct btree_node for writing the first
49  * entry in a given node (offset 0), and struct btree_node_entry for all
50  * subsequent writes.
51  *
52  * After the header, btree node entries contain a list of keys in sorted order.
53  * Values are stored inline with the keys; since values are variable length (and
54  * keys effectively are variable length too, due to packing) we can't do random
55  * access without building up additional in memory tables in the btree node read
56  * path.
57  *
58  * BTREE KEYS (struct bkey):
59  *
60  * The various btrees share a common format for the key - so as to avoid
61  * switching in fastpath lookup/comparison code - but define their own
62  * structures for the key values.
63  *
64  * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65  * size is just under 2k. The common part also contains a type tag for the
66  * value, and a format field indicating whether the key is packed or not (and
67  * also meant to allow adding new key fields in the future, if desired).
68  *
69  * bkeys, when stored within a btree node, may also be packed. In that case, the
70  * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71  * be generous with field sizes in the common part of the key format (64 bit
72  * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
73  */
74 
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
79 #include "vstructs.h"
80 
81 #ifdef __KERNEL__
82 typedef uuid_t __uuid_t;
83 #endif
84 
85 #define BITMASK(name, type, field, offset, end)				\
86 static const __maybe_unused unsigned	name##_OFFSET = offset;		\
87 static const __maybe_unused unsigned	name##_BITS = (end - offset);	\
88 									\
89 static inline __u64 name(const type *k)					\
90 {									\
91 	return (k->field >> offset) & ~(~0ULL << (end - offset));	\
92 }									\
93 									\
94 static inline void SET_##name(type *k, __u64 v)				\
95 {									\
96 	k->field &= ~(~(~0ULL << (end - offset)) << offset);		\
97 	k->field |= (v & ~(~0ULL << (end - offset))) << offset;		\
98 }
99 
100 #define LE_BITMASK(_bits, name, type, field, offset, end)		\
101 static const __maybe_unused unsigned	name##_OFFSET = offset;		\
102 static const __maybe_unused unsigned	name##_BITS = (end - offset);	\
103 static const __maybe_unused __u##_bits	name##_MAX = (1ULL << (end - offset)) - 1;\
104 									\
105 static inline __u64 name(const type *k)					\
106 {									\
107 	return (__le##_bits##_to_cpu(k->field) >> offset) &		\
108 		~(~0ULL << (end - offset));				\
109 }									\
110 									\
111 static inline void SET_##name(type *k, __u64 v)				\
112 {									\
113 	__u##_bits new = __le##_bits##_to_cpu(k->field);		\
114 									\
115 	new &= ~(~(~0ULL << (end - offset)) << offset);			\
116 	new |= (v & ~(~0ULL << (end - offset))) << offset;		\
117 	k->field = __cpu_to_le##_bits(new);				\
118 }
119 
120 #define LE16_BITMASK(n, t, f, o, e)	LE_BITMASK(16, n, t, f, o, e)
121 #define LE32_BITMASK(n, t, f, o, e)	LE_BITMASK(32, n, t, f, o, e)
122 #define LE64_BITMASK(n, t, f, o, e)	LE_BITMASK(64, n, t, f, o, e)
123 
124 struct bkey_format {
125 	__u8		key_u64s;
126 	__u8		nr_fields;
127 	/* One unused slot for now: */
128 	__u8		bits_per_field[6];
129 	__le64		field_offset[6];
130 };
131 
132 /* Btree keys - all units are in sectors */
133 
134 struct bpos {
135 	/*
136 	 * Word order matches machine byte order - btree code treats a bpos as a
137 	 * single large integer, for search/comparison purposes
138 	 *
139 	 * Note that wherever a bpos is embedded in another on disk data
140 	 * structure, it has to be byte swabbed when reading in metadata that
141 	 * wasn't written in native endian order:
142 	 */
143 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
144 	__u32		snapshot;
145 	__u64		offset;
146 	__u64		inode;
147 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
148 	__u64		inode;
149 	__u64		offset;		/* Points to end of extent - sectors */
150 	__u32		snapshot;
151 #else
152 #error edit for your odd byteorder.
153 #endif
154 } __packed
155 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
156 __aligned(4)
157 #endif
158 ;
159 
160 #define KEY_INODE_MAX			((__u64)~0ULL)
161 #define KEY_OFFSET_MAX			((__u64)~0ULL)
162 #define KEY_SNAPSHOT_MAX		((__u32)~0U)
163 #define KEY_SIZE_MAX			((__u32)~0U)
164 
165 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
166 {
167 	return (struct bpos) {
168 		.inode		= inode,
169 		.offset		= offset,
170 		.snapshot	= snapshot,
171 	};
172 }
173 
174 #define POS_MIN				SPOS(0, 0, 0)
175 #define POS_MAX				SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
176 #define SPOS_MAX			SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
177 #define POS(_inode, _offset)		SPOS(_inode, _offset, 0)
178 
179 /* Empty placeholder struct, for container_of() */
180 struct bch_val {
181 	__u64		__nothing[0];
182 };
183 
184 struct bversion {
185 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
186 	__u64		lo;
187 	__u32		hi;
188 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
189 	__u32		hi;
190 	__u64		lo;
191 #endif
192 } __packed __aligned(4);
193 
194 struct bkey {
195 	/* Size of combined key and value, in u64s */
196 	__u8		u64s;
197 
198 	/* Format of key (0 for format local to btree node) */
199 #if defined(__LITTLE_ENDIAN_BITFIELD)
200 	__u8		format:7,
201 			needs_whiteout:1;
202 #elif defined (__BIG_ENDIAN_BITFIELD)
203 	__u8		needs_whiteout:1,
204 			format:7;
205 #else
206 #error edit for your odd byteorder.
207 #endif
208 
209 	/* Type of the value */
210 	__u8		type;
211 
212 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
213 	__u8		pad[1];
214 
215 	struct bversion	version;
216 	__u32		size;		/* extent size, in sectors */
217 	struct bpos	p;
218 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
219 	struct bpos	p;
220 	__u32		size;		/* extent size, in sectors */
221 	struct bversion	version;
222 
223 	__u8		pad[1];
224 #endif
225 } __packed __aligned(8);
226 
227 struct bkey_packed {
228 	__u64		_data[0];
229 
230 	/* Size of combined key and value, in u64s */
231 	__u8		u64s;
232 
233 	/* Format of key (0 for format local to btree node) */
234 
235 	/*
236 	 * XXX: next incompat on disk format change, switch format and
237 	 * needs_whiteout - bkey_packed() will be cheaper if format is the high
238 	 * bits of the bitfield
239 	 */
240 #if defined(__LITTLE_ENDIAN_BITFIELD)
241 	__u8		format:7,
242 			needs_whiteout:1;
243 #elif defined (__BIG_ENDIAN_BITFIELD)
244 	__u8		needs_whiteout:1,
245 			format:7;
246 #endif
247 
248 	/* Type of the value */
249 	__u8		type;
250 	__u8		key_start[0];
251 
252 	/*
253 	 * We copy bkeys with struct assignment in various places, and while
254 	 * that shouldn't be done with packed bkeys we can't disallow it in C,
255 	 * and it's legal to cast a bkey to a bkey_packed  - so padding it out
256 	 * to the same size as struct bkey should hopefully be safest.
257 	 */
258 	__u8		pad[sizeof(struct bkey) - 3];
259 } __packed __aligned(8);
260 
261 typedef struct {
262 	__le64			lo;
263 	__le64			hi;
264 } bch_le128;
265 
266 #define BKEY_U64s			(sizeof(struct bkey) / sizeof(__u64))
267 #define BKEY_U64s_MAX			U8_MAX
268 #define BKEY_VAL_U64s_MAX		(BKEY_U64s_MAX - BKEY_U64s)
269 
270 #define KEY_PACKED_BITS_START		24
271 
272 #define KEY_FORMAT_LOCAL_BTREE		0
273 #define KEY_FORMAT_CURRENT		1
274 
275 enum bch_bkey_fields {
276 	BKEY_FIELD_INODE,
277 	BKEY_FIELD_OFFSET,
278 	BKEY_FIELD_SNAPSHOT,
279 	BKEY_FIELD_SIZE,
280 	BKEY_FIELD_VERSION_HI,
281 	BKEY_FIELD_VERSION_LO,
282 	BKEY_NR_FIELDS,
283 };
284 
285 #define bkey_format_field(name, field)					\
286 	[BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
287 
288 #define BKEY_FORMAT_CURRENT						\
289 ((struct bkey_format) {							\
290 	.key_u64s	= BKEY_U64s,					\
291 	.nr_fields	= BKEY_NR_FIELDS,				\
292 	.bits_per_field = {						\
293 		bkey_format_field(INODE,	p.inode),		\
294 		bkey_format_field(OFFSET,	p.offset),		\
295 		bkey_format_field(SNAPSHOT,	p.snapshot),		\
296 		bkey_format_field(SIZE,		size),			\
297 		bkey_format_field(VERSION_HI,	version.hi),		\
298 		bkey_format_field(VERSION_LO,	version.lo),		\
299 	},								\
300 })
301 
302 /* bkey with inline value */
303 struct bkey_i {
304 	__u64			_data[0];
305 
306 	struct bkey	k;
307 	struct bch_val	v;
308 };
309 
310 #define KEY(_inode, _offset, _size)					\
311 ((struct bkey) {							\
312 	.u64s		= BKEY_U64s,					\
313 	.format		= KEY_FORMAT_CURRENT,				\
314 	.p		= POS(_inode, _offset),				\
315 	.size		= _size,					\
316 })
317 
318 static inline void bkey_init(struct bkey *k)
319 {
320 	*k = KEY(0, 0, 0);
321 }
322 
323 #define bkey_bytes(_k)		((_k)->u64s * sizeof(__u64))
324 
325 #define __BKEY_PADDED(key, pad)					\
326 	struct bkey_i key; __u64 key ## _pad[pad]
327 
328 /*
329  * - DELETED keys are used internally to mark keys that should be ignored but
330  *   override keys in composition order.  Their version number is ignored.
331  *
332  * - DISCARDED keys indicate that the data is all 0s because it has been
333  *   discarded. DISCARDs may have a version; if the version is nonzero the key
334  *   will be persistent, otherwise the key will be dropped whenever the btree
335  *   node is rewritten (like DELETED keys).
336  *
337  * - ERROR: any read of the data returns a read error, as the data was lost due
338  *   to a failing device. Like DISCARDED keys, they can be removed (overridden)
339  *   by new writes or cluster-wide GC. Node repair can also overwrite them with
340  *   the same or a more recent version number, but not with an older version
341  *   number.
342  *
343  * - WHITEOUT: for hash table btrees
344  */
345 #define BCH_BKEY_TYPES()				\
346 	x(deleted,		0)			\
347 	x(whiteout,		1)			\
348 	x(error,		2)			\
349 	x(cookie,		3)			\
350 	x(hash_whiteout,	4)			\
351 	x(btree_ptr,		5)			\
352 	x(extent,		6)			\
353 	x(reservation,		7)			\
354 	x(inode,		8)			\
355 	x(inode_generation,	9)			\
356 	x(dirent,		10)			\
357 	x(xattr,		11)			\
358 	x(alloc,		12)			\
359 	x(quota,		13)			\
360 	x(stripe,		14)			\
361 	x(reflink_p,		15)			\
362 	x(reflink_v,		16)			\
363 	x(inline_data,		17)			\
364 	x(btree_ptr_v2,		18)			\
365 	x(indirect_inline_data,	19)			\
366 	x(alloc_v2,		20)			\
367 	x(subvolume,		21)			\
368 	x(snapshot,		22)			\
369 	x(inode_v2,		23)			\
370 	x(alloc_v3,		24)			\
371 	x(set,			25)			\
372 	x(lru,			26)			\
373 	x(alloc_v4,		27)			\
374 	x(backpointer,		28)			\
375 	x(inode_v3,		29)			\
376 	x(bucket_gens,		30)			\
377 	x(snapshot_tree,	31)			\
378 	x(logged_op_truncate,	32)			\
379 	x(logged_op_finsert,	33)
380 
381 enum bch_bkey_type {
382 #define x(name, nr) KEY_TYPE_##name	= nr,
383 	BCH_BKEY_TYPES()
384 #undef x
385 	KEY_TYPE_MAX,
386 };
387 
388 struct bch_deleted {
389 	struct bch_val		v;
390 };
391 
392 struct bch_whiteout {
393 	struct bch_val		v;
394 };
395 
396 struct bch_error {
397 	struct bch_val		v;
398 };
399 
400 struct bch_cookie {
401 	struct bch_val		v;
402 	__le64			cookie;
403 };
404 
405 struct bch_hash_whiteout {
406 	struct bch_val		v;
407 };
408 
409 struct bch_set {
410 	struct bch_val		v;
411 };
412 
413 /* Extents */
414 
415 /*
416  * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
417  * preceded by checksum/compression information (bch_extent_crc32 or
418  * bch_extent_crc64).
419  *
420  * One major determining factor in the format of extents is how we handle and
421  * represent extents that have been partially overwritten and thus trimmed:
422  *
423  * If an extent is not checksummed or compressed, when the extent is trimmed we
424  * don't have to remember the extent we originally allocated and wrote: we can
425  * merely adjust ptr->offset to point to the start of the data that is currently
426  * live. The size field in struct bkey records the current (live) size of the
427  * extent, and is also used to mean "size of region on disk that we point to" in
428  * this case.
429  *
430  * Thus an extent that is not checksummed or compressed will consist only of a
431  * list of bch_extent_ptrs, with none of the fields in
432  * bch_extent_crc32/bch_extent_crc64.
433  *
434  * When an extent is checksummed or compressed, it's not possible to read only
435  * the data that is currently live: we have to read the entire extent that was
436  * originally written, and then return only the part of the extent that is
437  * currently live.
438  *
439  * Thus, in addition to the current size of the extent in struct bkey, we need
440  * to store the size of the originally allocated space - this is the
441  * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
442  * when the extent is trimmed, instead of modifying the offset field of the
443  * pointer, we keep a second smaller offset field - "offset into the original
444  * extent of the currently live region".
445  *
446  * The other major determining factor is replication and data migration:
447  *
448  * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
449  * write, we will initially write all the replicas in the same format, with the
450  * same checksum type and compression format - however, when copygc runs later (or
451  * tiering/cache promotion, anything that moves data), it is not in general
452  * going to rewrite all the pointers at once - one of the replicas may be in a
453  * bucket on one device that has very little fragmentation while another lives
454  * in a bucket that has become heavily fragmented, and thus is being rewritten
455  * sooner than the rest.
456  *
457  * Thus it will only move a subset of the pointers (or in the case of
458  * tiering/cache promotion perhaps add a single pointer without dropping any
459  * current pointers), and if the extent has been partially overwritten it must
460  * write only the currently live portion (or copygc would not be able to reduce
461  * fragmentation!) - which necessitates a different bch_extent_crc format for
462  * the new pointer.
463  *
464  * But in the interests of space efficiency, we don't want to store one
465  * bch_extent_crc for each pointer if we don't have to.
466  *
467  * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
468  * bch_extent_ptrs appended arbitrarily one after the other. We determine the
469  * type of a given entry with a scheme similar to utf8 (except we're encoding a
470  * type, not a size), encoding the type in the position of the first set bit:
471  *
472  * bch_extent_crc32	- 0b1
473  * bch_extent_ptr	- 0b10
474  * bch_extent_crc64	- 0b100
475  *
476  * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
477  * bch_extent_crc64 is the least constrained).
478  *
479  * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
480  * until the next bch_extent_crc32/64.
481  *
482  * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
483  * is neither checksummed nor compressed.
484  */
485 
486 /* 128 bits, sufficient for cryptographic MACs: */
487 struct bch_csum {
488 	__le64			lo;
489 	__le64			hi;
490 } __packed __aligned(8);
491 
492 #define BCH_EXTENT_ENTRY_TYPES()		\
493 	x(ptr,			0)		\
494 	x(crc32,		1)		\
495 	x(crc64,		2)		\
496 	x(crc128,		3)		\
497 	x(stripe_ptr,		4)		\
498 	x(rebalance,		5)
499 #define BCH_EXTENT_ENTRY_MAX	6
500 
501 enum bch_extent_entry_type {
502 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
503 	BCH_EXTENT_ENTRY_TYPES()
504 #undef x
505 };
506 
507 /* Compressed/uncompressed size are stored biased by 1: */
508 struct bch_extent_crc32 {
509 #if defined(__LITTLE_ENDIAN_BITFIELD)
510 	__u32			type:2,
511 				_compressed_size:7,
512 				_uncompressed_size:7,
513 				offset:7,
514 				_unused:1,
515 				csum_type:4,
516 				compression_type:4;
517 	__u32			csum;
518 #elif defined (__BIG_ENDIAN_BITFIELD)
519 	__u32			csum;
520 	__u32			compression_type:4,
521 				csum_type:4,
522 				_unused:1,
523 				offset:7,
524 				_uncompressed_size:7,
525 				_compressed_size:7,
526 				type:2;
527 #endif
528 } __packed __aligned(8);
529 
530 #define CRC32_SIZE_MAX		(1U << 7)
531 #define CRC32_NONCE_MAX		0
532 
533 struct bch_extent_crc64 {
534 #if defined(__LITTLE_ENDIAN_BITFIELD)
535 	__u64			type:3,
536 				_compressed_size:9,
537 				_uncompressed_size:9,
538 				offset:9,
539 				nonce:10,
540 				csum_type:4,
541 				compression_type:4,
542 				csum_hi:16;
543 #elif defined (__BIG_ENDIAN_BITFIELD)
544 	__u64			csum_hi:16,
545 				compression_type:4,
546 				csum_type:4,
547 				nonce:10,
548 				offset:9,
549 				_uncompressed_size:9,
550 				_compressed_size:9,
551 				type:3;
552 #endif
553 	__u64			csum_lo;
554 } __packed __aligned(8);
555 
556 #define CRC64_SIZE_MAX		(1U << 9)
557 #define CRC64_NONCE_MAX		((1U << 10) - 1)
558 
559 struct bch_extent_crc128 {
560 #if defined(__LITTLE_ENDIAN_BITFIELD)
561 	__u64			type:4,
562 				_compressed_size:13,
563 				_uncompressed_size:13,
564 				offset:13,
565 				nonce:13,
566 				csum_type:4,
567 				compression_type:4;
568 #elif defined (__BIG_ENDIAN_BITFIELD)
569 	__u64			compression_type:4,
570 				csum_type:4,
571 				nonce:13,
572 				offset:13,
573 				_uncompressed_size:13,
574 				_compressed_size:13,
575 				type:4;
576 #endif
577 	struct bch_csum		csum;
578 } __packed __aligned(8);
579 
580 #define CRC128_SIZE_MAX		(1U << 13)
581 #define CRC128_NONCE_MAX	((1U << 13) - 1)
582 
583 /*
584  * @reservation - pointer hasn't been written to, just reserved
585  */
586 struct bch_extent_ptr {
587 #if defined(__LITTLE_ENDIAN_BITFIELD)
588 	__u64			type:1,
589 				cached:1,
590 				unused:1,
591 				unwritten:1,
592 				offset:44, /* 8 petabytes */
593 				dev:8,
594 				gen:8;
595 #elif defined (__BIG_ENDIAN_BITFIELD)
596 	__u64			gen:8,
597 				dev:8,
598 				offset:44,
599 				unwritten:1,
600 				unused:1,
601 				cached:1,
602 				type:1;
603 #endif
604 } __packed __aligned(8);
605 
606 struct bch_extent_stripe_ptr {
607 #if defined(__LITTLE_ENDIAN_BITFIELD)
608 	__u64			type:5,
609 				block:8,
610 				redundancy:4,
611 				idx:47;
612 #elif defined (__BIG_ENDIAN_BITFIELD)
613 	__u64			idx:47,
614 				redundancy:4,
615 				block:8,
616 				type:5;
617 #endif
618 };
619 
620 struct bch_extent_rebalance {
621 #if defined(__LITTLE_ENDIAN_BITFIELD)
622 	__u64			type:6,
623 				unused:34,
624 				compression:8, /* enum bch_compression_opt */
625 				target:16;
626 #elif defined (__BIG_ENDIAN_BITFIELD)
627 	__u64			target:16,
628 				compression:8,
629 				unused:34,
630 				type:6;
631 #endif
632 };
633 
634 union bch_extent_entry {
635 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ||  __BITS_PER_LONG == 64
636 	unsigned long			type;
637 #elif __BITS_PER_LONG == 32
638 	struct {
639 		unsigned long		pad;
640 		unsigned long		type;
641 	};
642 #else
643 #error edit for your odd byteorder.
644 #endif
645 
646 #define x(f, n) struct bch_extent_##f	f;
647 	BCH_EXTENT_ENTRY_TYPES()
648 #undef x
649 };
650 
651 struct bch_btree_ptr {
652 	struct bch_val		v;
653 
654 	__u64			_data[0];
655 	struct bch_extent_ptr	start[];
656 } __packed __aligned(8);
657 
658 struct bch_btree_ptr_v2 {
659 	struct bch_val		v;
660 
661 	__u64			mem_ptr;
662 	__le64			seq;
663 	__le16			sectors_written;
664 	__le16			flags;
665 	struct bpos		min_key;
666 	__u64			_data[0];
667 	struct bch_extent_ptr	start[];
668 } __packed __aligned(8);
669 
670 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,	struct bch_btree_ptr_v2, flags, 0, 1);
671 
672 struct bch_extent {
673 	struct bch_val		v;
674 
675 	__u64			_data[0];
676 	union bch_extent_entry	start[];
677 } __packed __aligned(8);
678 
679 struct bch_reservation {
680 	struct bch_val		v;
681 
682 	__le32			generation;
683 	__u8			nr_replicas;
684 	__u8			pad[3];
685 } __packed __aligned(8);
686 
687 /* Maximum size (in u64s) a single pointer could be: */
688 #define BKEY_EXTENT_PTR_U64s_MAX\
689 	((sizeof(struct bch_extent_crc128) +			\
690 	  sizeof(struct bch_extent_ptr)) / sizeof(__u64))
691 
692 /* Maximum possible size of an entire extent value: */
693 #define BKEY_EXTENT_VAL_U64s_MAX				\
694 	(1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
695 
696 /* * Maximum possible size of an entire extent, key + value: */
697 #define BKEY_EXTENT_U64s_MAX		(BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
698 
699 /* Btree pointers don't carry around checksums: */
700 #define BKEY_BTREE_PTR_VAL_U64s_MAX				\
701 	((sizeof(struct bch_btree_ptr_v2) +			\
702 	  sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
703 #define BKEY_BTREE_PTR_U64s_MAX					\
704 	(BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
705 
706 /* Inodes */
707 
708 #define BLOCKDEV_INODE_MAX	4096
709 
710 #define BCACHEFS_ROOT_INO	4096
711 
712 struct bch_inode {
713 	struct bch_val		v;
714 
715 	__le64			bi_hash_seed;
716 	__le32			bi_flags;
717 	__le16			bi_mode;
718 	__u8			fields[];
719 } __packed __aligned(8);
720 
721 struct bch_inode_v2 {
722 	struct bch_val		v;
723 
724 	__le64			bi_journal_seq;
725 	__le64			bi_hash_seed;
726 	__le64			bi_flags;
727 	__le16			bi_mode;
728 	__u8			fields[];
729 } __packed __aligned(8);
730 
731 struct bch_inode_v3 {
732 	struct bch_val		v;
733 
734 	__le64			bi_journal_seq;
735 	__le64			bi_hash_seed;
736 	__le64			bi_flags;
737 	__le64			bi_sectors;
738 	__le64			bi_size;
739 	__le64			bi_version;
740 	__u8			fields[];
741 } __packed __aligned(8);
742 
743 #define INODEv3_FIELDS_START_INITIAL	6
744 #define INODEv3_FIELDS_START_CUR	(offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
745 
746 struct bch_inode_generation {
747 	struct bch_val		v;
748 
749 	__le32			bi_generation;
750 	__le32			pad;
751 } __packed __aligned(8);
752 
753 /*
754  * bi_subvol and bi_parent_subvol are only set for subvolume roots:
755  */
756 
757 #define BCH_INODE_FIELDS_v2()			\
758 	x(bi_atime,			96)	\
759 	x(bi_ctime,			96)	\
760 	x(bi_mtime,			96)	\
761 	x(bi_otime,			96)	\
762 	x(bi_size,			64)	\
763 	x(bi_sectors,			64)	\
764 	x(bi_uid,			32)	\
765 	x(bi_gid,			32)	\
766 	x(bi_nlink,			32)	\
767 	x(bi_generation,		32)	\
768 	x(bi_dev,			32)	\
769 	x(bi_data_checksum,		8)	\
770 	x(bi_compression,		8)	\
771 	x(bi_project,			32)	\
772 	x(bi_background_compression,	8)	\
773 	x(bi_data_replicas,		8)	\
774 	x(bi_promote_target,		16)	\
775 	x(bi_foreground_target,		16)	\
776 	x(bi_background_target,		16)	\
777 	x(bi_erasure_code,		16)	\
778 	x(bi_fields_set,		16)	\
779 	x(bi_dir,			64)	\
780 	x(bi_dir_offset,		64)	\
781 	x(bi_subvol,			32)	\
782 	x(bi_parent_subvol,		32)
783 
784 #define BCH_INODE_FIELDS_v3()			\
785 	x(bi_atime,			96)	\
786 	x(bi_ctime,			96)	\
787 	x(bi_mtime,			96)	\
788 	x(bi_otime,			96)	\
789 	x(bi_uid,			32)	\
790 	x(bi_gid,			32)	\
791 	x(bi_nlink,			32)	\
792 	x(bi_generation,		32)	\
793 	x(bi_dev,			32)	\
794 	x(bi_data_checksum,		8)	\
795 	x(bi_compression,		8)	\
796 	x(bi_project,			32)	\
797 	x(bi_background_compression,	8)	\
798 	x(bi_data_replicas,		8)	\
799 	x(bi_promote_target,		16)	\
800 	x(bi_foreground_target,		16)	\
801 	x(bi_background_target,		16)	\
802 	x(bi_erasure_code,		16)	\
803 	x(bi_fields_set,		16)	\
804 	x(bi_dir,			64)	\
805 	x(bi_dir_offset,		64)	\
806 	x(bi_subvol,			32)	\
807 	x(bi_parent_subvol,		32)	\
808 	x(bi_nocow,			8)
809 
810 /* subset of BCH_INODE_FIELDS */
811 #define BCH_INODE_OPTS()			\
812 	x(data_checksum,		8)	\
813 	x(compression,			8)	\
814 	x(project,			32)	\
815 	x(background_compression,	8)	\
816 	x(data_replicas,		8)	\
817 	x(promote_target,		16)	\
818 	x(foreground_target,		16)	\
819 	x(background_target,		16)	\
820 	x(erasure_code,			16)	\
821 	x(nocow,			8)
822 
823 enum inode_opt_id {
824 #define x(name, ...)				\
825 	Inode_opt_##name,
826 	BCH_INODE_OPTS()
827 #undef  x
828 	Inode_opt_nr,
829 };
830 
831 #define BCH_INODE_FLAGS()			\
832 	x(sync,				0)	\
833 	x(immutable,			1)	\
834 	x(append,			2)	\
835 	x(nodump,			3)	\
836 	x(noatime,			4)	\
837 	x(i_size_dirty,			5)	\
838 	x(i_sectors_dirty,		6)	\
839 	x(unlinked,			7)	\
840 	x(backptr_untrusted,		8)
841 
842 /* bits 20+ reserved for packed fields below: */
843 
844 enum bch_inode_flags {
845 #define x(t, n)	BCH_INODE_##t = 1U << n,
846 	BCH_INODE_FLAGS()
847 #undef x
848 };
849 
850 enum __bch_inode_flags {
851 #define x(t, n)	__BCH_INODE_##t = n,
852 	BCH_INODE_FLAGS()
853 #undef x
854 };
855 
856 LE32_BITMASK(INODE_STR_HASH,	struct bch_inode, bi_flags, 20, 24);
857 LE32_BITMASK(INODE_NR_FIELDS,	struct bch_inode, bi_flags, 24, 31);
858 LE32_BITMASK(INODE_NEW_VARINT,	struct bch_inode, bi_flags, 31, 32);
859 
860 LE64_BITMASK(INODEv2_STR_HASH,	struct bch_inode_v2, bi_flags, 20, 24);
861 LE64_BITMASK(INODEv2_NR_FIELDS,	struct bch_inode_v2, bi_flags, 24, 31);
862 
863 LE64_BITMASK(INODEv3_STR_HASH,	struct bch_inode_v3, bi_flags, 20, 24);
864 LE64_BITMASK(INODEv3_NR_FIELDS,	struct bch_inode_v3, bi_flags, 24, 31);
865 
866 LE64_BITMASK(INODEv3_FIELDS_START,
867 				struct bch_inode_v3, bi_flags, 31, 36);
868 LE64_BITMASK(INODEv3_MODE,	struct bch_inode_v3, bi_flags, 36, 52);
869 
870 /* Dirents */
871 
872 /*
873  * Dirents (and xattrs) have to implement string lookups; since our b-tree
874  * doesn't support arbitrary length strings for the key, we instead index by a
875  * 64 bit hash (currently truncated sha1) of the string, stored in the offset
876  * field of the key - using linear probing to resolve hash collisions. This also
877  * provides us with the readdir cookie posix requires.
878  *
879  * Linear probing requires us to use whiteouts for deletions, in the event of a
880  * collision:
881  */
882 
883 struct bch_dirent {
884 	struct bch_val		v;
885 
886 	/* Target inode number: */
887 	union {
888 	__le64			d_inum;
889 	struct {		/* DT_SUBVOL */
890 	__le32			d_child_subvol;
891 	__le32			d_parent_subvol;
892 	};
893 	};
894 
895 	/*
896 	 * Copy of mode bits 12-15 from the target inode - so userspace can get
897 	 * the filetype without having to do a stat()
898 	 */
899 	__u8			d_type;
900 
901 	__u8			d_name[];
902 } __packed __aligned(8);
903 
904 #define DT_SUBVOL	16
905 #define BCH_DT_MAX	17
906 
907 #define BCH_NAME_MAX	512
908 
909 /* Xattrs */
910 
911 #define KEY_TYPE_XATTR_INDEX_USER			0
912 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS	1
913 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT	2
914 #define KEY_TYPE_XATTR_INDEX_TRUSTED			3
915 #define KEY_TYPE_XATTR_INDEX_SECURITY	        4
916 
917 struct bch_xattr {
918 	struct bch_val		v;
919 	__u8			x_type;
920 	__u8			x_name_len;
921 	__le16			x_val_len;
922 	__u8			x_name[];
923 } __packed __aligned(8);
924 
925 /* Bucket/allocation information: */
926 
927 struct bch_alloc {
928 	struct bch_val		v;
929 	__u8			fields;
930 	__u8			gen;
931 	__u8			data[];
932 } __packed __aligned(8);
933 
934 #define BCH_ALLOC_FIELDS_V1()			\
935 	x(read_time,		16)		\
936 	x(write_time,		16)		\
937 	x(data_type,		8)		\
938 	x(dirty_sectors,	16)		\
939 	x(cached_sectors,	16)		\
940 	x(oldest_gen,		8)		\
941 	x(stripe,		32)		\
942 	x(stripe_redundancy,	8)
943 
944 enum {
945 #define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
946 	BCH_ALLOC_FIELDS_V1()
947 #undef x
948 };
949 
950 struct bch_alloc_v2 {
951 	struct bch_val		v;
952 	__u8			nr_fields;
953 	__u8			gen;
954 	__u8			oldest_gen;
955 	__u8			data_type;
956 	__u8			data[];
957 } __packed __aligned(8);
958 
959 #define BCH_ALLOC_FIELDS_V2()			\
960 	x(read_time,		64)		\
961 	x(write_time,		64)		\
962 	x(dirty_sectors,	32)		\
963 	x(cached_sectors,	32)		\
964 	x(stripe,		32)		\
965 	x(stripe_redundancy,	8)
966 
967 struct bch_alloc_v3 {
968 	struct bch_val		v;
969 	__le64			journal_seq;
970 	__le32			flags;
971 	__u8			nr_fields;
972 	__u8			gen;
973 	__u8			oldest_gen;
974 	__u8			data_type;
975 	__u8			data[];
976 } __packed __aligned(8);
977 
978 LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
979 LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
980 
981 struct bch_alloc_v4 {
982 	struct bch_val		v;
983 	__u64			journal_seq;
984 	__u32			flags;
985 	__u8			gen;
986 	__u8			oldest_gen;
987 	__u8			data_type;
988 	__u8			stripe_redundancy;
989 	__u32			dirty_sectors;
990 	__u32			cached_sectors;
991 	__u64			io_time[2];
992 	__u32			stripe;
993 	__u32			nr_external_backpointers;
994 	__u64			fragmentation_lru;
995 } __packed __aligned(8);
996 
997 #define BCH_ALLOC_V4_U64s_V0	6
998 #define BCH_ALLOC_V4_U64s	(sizeof(struct bch_alloc_v4) / sizeof(__u64))
999 
1000 BITMASK(BCH_ALLOC_V4_NEED_DISCARD,	struct bch_alloc_v4, flags,  0,  1)
1001 BITMASK(BCH_ALLOC_V4_NEED_INC_GEN,	struct bch_alloc_v4, flags,  1,  2)
1002 BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags,  2,  8)
1003 BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS,	struct bch_alloc_v4, flags,  8,  14)
1004 
1005 #define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX	40
1006 
1007 struct bch_backpointer {
1008 	struct bch_val		v;
1009 	__u8			btree_id;
1010 	__u8			level;
1011 	__u8			data_type;
1012 	__u64			bucket_offset:40;
1013 	__u32			bucket_len;
1014 	struct bpos		pos;
1015 } __packed __aligned(8);
1016 
1017 #define KEY_TYPE_BUCKET_GENS_BITS	8
1018 #define KEY_TYPE_BUCKET_GENS_NR		(1U << KEY_TYPE_BUCKET_GENS_BITS)
1019 #define KEY_TYPE_BUCKET_GENS_MASK	(KEY_TYPE_BUCKET_GENS_NR - 1)
1020 
1021 struct bch_bucket_gens {
1022 	struct bch_val		v;
1023 	u8			gens[KEY_TYPE_BUCKET_GENS_NR];
1024 } __packed __aligned(8);
1025 
1026 /* Quotas: */
1027 
1028 enum quota_types {
1029 	QTYP_USR		= 0,
1030 	QTYP_GRP		= 1,
1031 	QTYP_PRJ		= 2,
1032 	QTYP_NR			= 3,
1033 };
1034 
1035 enum quota_counters {
1036 	Q_SPC			= 0,
1037 	Q_INO			= 1,
1038 	Q_COUNTERS		= 2,
1039 };
1040 
1041 struct bch_quota_counter {
1042 	__le64			hardlimit;
1043 	__le64			softlimit;
1044 };
1045 
1046 struct bch_quota {
1047 	struct bch_val		v;
1048 	struct bch_quota_counter c[Q_COUNTERS];
1049 } __packed __aligned(8);
1050 
1051 /* Erasure coding */
1052 
1053 struct bch_stripe {
1054 	struct bch_val		v;
1055 	__le16			sectors;
1056 	__u8			algorithm;
1057 	__u8			nr_blocks;
1058 	__u8			nr_redundant;
1059 
1060 	__u8			csum_granularity_bits;
1061 	__u8			csum_type;
1062 	__u8			pad;
1063 
1064 	struct bch_extent_ptr	ptrs[];
1065 } __packed __aligned(8);
1066 
1067 /* Reflink: */
1068 
1069 struct bch_reflink_p {
1070 	struct bch_val		v;
1071 	__le64			idx;
1072 	/*
1073 	 * A reflink pointer might point to an indirect extent which is then
1074 	 * later split (by copygc or rebalance). If we only pointed to part of
1075 	 * the original indirect extent, and then one of the fragments is
1076 	 * outside the range we point to, we'd leak a refcount: so when creating
1077 	 * reflink pointers, we need to store pad values to remember the full
1078 	 * range we were taking a reference on.
1079 	 */
1080 	__le32			front_pad;
1081 	__le32			back_pad;
1082 } __packed __aligned(8);
1083 
1084 struct bch_reflink_v {
1085 	struct bch_val		v;
1086 	__le64			refcount;
1087 	union bch_extent_entry	start[0];
1088 	__u64			_data[];
1089 } __packed __aligned(8);
1090 
1091 struct bch_indirect_inline_data {
1092 	struct bch_val		v;
1093 	__le64			refcount;
1094 	u8			data[];
1095 };
1096 
1097 /* Inline data */
1098 
1099 struct bch_inline_data {
1100 	struct bch_val		v;
1101 	u8			data[];
1102 };
1103 
1104 /* Subvolumes: */
1105 
1106 #define SUBVOL_POS_MIN		POS(0, 1)
1107 #define SUBVOL_POS_MAX		POS(0, S32_MAX)
1108 #define BCACHEFS_ROOT_SUBVOL	1
1109 
1110 struct bch_subvolume {
1111 	struct bch_val		v;
1112 	__le32			flags;
1113 	__le32			snapshot;
1114 	__le64			inode;
1115 	/*
1116 	 * Snapshot subvolumes form a tree, separate from the snapshot nodes
1117 	 * tree - if this subvolume is a snapshot, this is the ID of the
1118 	 * subvolume it was created from:
1119 	 */
1120 	__le32			parent;
1121 	__le32			pad;
1122 	bch_le128		otime;
1123 };
1124 
1125 LE32_BITMASK(BCH_SUBVOLUME_RO,		struct bch_subvolume, flags,  0,  1)
1126 /*
1127  * We need to know whether a subvolume is a snapshot so we can know whether we
1128  * can delete it (or whether it should just be rm -rf'd)
1129  */
1130 LE32_BITMASK(BCH_SUBVOLUME_SNAP,	struct bch_subvolume, flags,  1,  2)
1131 LE32_BITMASK(BCH_SUBVOLUME_UNLINKED,	struct bch_subvolume, flags,  2,  3)
1132 
1133 /* Snapshots */
1134 
1135 struct bch_snapshot {
1136 	struct bch_val		v;
1137 	__le32			flags;
1138 	__le32			parent;
1139 	__le32			children[2];
1140 	__le32			subvol;
1141 	/* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
1142 	__le32			tree;
1143 	__le32			depth;
1144 	__le32			skip[3];
1145 };
1146 
1147 LE32_BITMASK(BCH_SNAPSHOT_DELETED,	struct bch_snapshot, flags,  0,  1)
1148 
1149 /* True if a subvolume points to this snapshot node: */
1150 LE32_BITMASK(BCH_SNAPSHOT_SUBVOL,	struct bch_snapshot, flags,  1,  2)
1151 
1152 /*
1153  * Snapshot trees:
1154  *
1155  * The snapshot_trees btree gives us persistent indentifier for each tree of
1156  * bch_snapshot nodes, and allow us to record and easily find the root/master
1157  * subvolume that other snapshots were created from:
1158  */
1159 struct bch_snapshot_tree {
1160 	struct bch_val		v;
1161 	__le32			master_subvol;
1162 	__le32			root_snapshot;
1163 };
1164 
1165 /* LRU btree: */
1166 
1167 struct bch_lru {
1168 	struct bch_val		v;
1169 	__le64			idx;
1170 } __packed __aligned(8);
1171 
1172 #define LRU_ID_STRIPES		(1U << 16)
1173 
1174 /* Logged operations btree: */
1175 
1176 struct bch_logged_op_truncate {
1177 	struct bch_val		v;
1178 	__le32			subvol;
1179 	__le32			pad;
1180 	__le64			inum;
1181 	__le64			new_i_size;
1182 };
1183 
1184 enum logged_op_finsert_state {
1185 	LOGGED_OP_FINSERT_start,
1186 	LOGGED_OP_FINSERT_shift_extents,
1187 	LOGGED_OP_FINSERT_finish,
1188 };
1189 
1190 struct bch_logged_op_finsert {
1191 	struct bch_val		v;
1192 	__u8			state;
1193 	__u8			pad[3];
1194 	__le32			subvol;
1195 	__le64			inum;
1196 	__le64			dst_offset;
1197 	__le64			src_offset;
1198 	__le64			pos;
1199 };
1200 
1201 /* Optional/variable size superblock sections: */
1202 
1203 struct bch_sb_field {
1204 	__u64			_data[0];
1205 	__le32			u64s;
1206 	__le32			type;
1207 };
1208 
1209 #define BCH_SB_FIELDS()				\
1210 	x(journal,			0)	\
1211 	x(members_v1,			1)	\
1212 	x(crypt,			2)	\
1213 	x(replicas_v0,			3)	\
1214 	x(quota,			4)	\
1215 	x(disk_groups,			5)	\
1216 	x(clean,			6)	\
1217 	x(replicas,			7)	\
1218 	x(journal_seq_blacklist,	8)	\
1219 	x(journal_v2,			9)	\
1220 	x(counters,			10)	\
1221 	x(members_v2,			11)	\
1222 	x(errors,			12)	\
1223 	x(ext,				13)	\
1224 	x(downgrade,			14)
1225 
1226 enum bch_sb_field_type {
1227 #define x(f, nr)	BCH_SB_FIELD_##f = nr,
1228 	BCH_SB_FIELDS()
1229 #undef x
1230 	BCH_SB_FIELD_NR
1231 };
1232 
1233 /*
1234  * Most superblock fields are replicated in all device's superblocks - a few are
1235  * not:
1236  */
1237 #define BCH_SINGLE_DEVICE_SB_FIELDS		\
1238 	((1U << BCH_SB_FIELD_journal)|		\
1239 	 (1U << BCH_SB_FIELD_journal_v2))
1240 
1241 /* BCH_SB_FIELD_journal: */
1242 
1243 struct bch_sb_field_journal {
1244 	struct bch_sb_field	field;
1245 	__le64			buckets[];
1246 };
1247 
1248 struct bch_sb_field_journal_v2 {
1249 	struct bch_sb_field	field;
1250 
1251 	struct bch_sb_field_journal_v2_entry {
1252 		__le64		start;
1253 		__le64		nr;
1254 	}			d[];
1255 };
1256 
1257 /* BCH_SB_FIELD_members_v1: */
1258 
1259 #define BCH_MIN_NR_NBUCKETS	(1 << 6)
1260 
1261 #define BCH_IOPS_MEASUREMENTS()			\
1262 	x(seqread,	0)			\
1263 	x(seqwrite,	1)			\
1264 	x(randread,	2)			\
1265 	x(randwrite,	3)
1266 
1267 enum bch_iops_measurement {
1268 #define x(t, n) BCH_IOPS_##t = n,
1269 	BCH_IOPS_MEASUREMENTS()
1270 #undef x
1271 	BCH_IOPS_NR
1272 };
1273 
1274 #define BCH_MEMBER_ERROR_TYPES()		\
1275 	x(read,		0)			\
1276 	x(write,	1)			\
1277 	x(checksum,	2)
1278 
1279 enum bch_member_error_type {
1280 #define x(t, n) BCH_MEMBER_ERROR_##t = n,
1281 	BCH_MEMBER_ERROR_TYPES()
1282 #undef x
1283 	BCH_MEMBER_ERROR_NR
1284 };
1285 
1286 struct bch_member {
1287 	__uuid_t		uuid;
1288 	__le64			nbuckets;	/* device size */
1289 	__le16			first_bucket;   /* index of first bucket used */
1290 	__le16			bucket_size;	/* sectors */
1291 	__le32			pad;
1292 	__le64			last_mount;	/* time_t */
1293 
1294 	__le64			flags;
1295 	__le32			iops[4];
1296 	__le64			errors[BCH_MEMBER_ERROR_NR];
1297 	__le64			errors_at_reset[BCH_MEMBER_ERROR_NR];
1298 	__le64			errors_reset_time;
1299 };
1300 
1301 #define BCH_MEMBER_V1_BYTES	56
1302 
1303 LE64_BITMASK(BCH_MEMBER_STATE,		struct bch_member, flags,  0,  4)
1304 /* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
1305 LE64_BITMASK(BCH_MEMBER_DISCARD,	struct bch_member, flags, 14, 15)
1306 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED,	struct bch_member, flags, 15, 20)
1307 LE64_BITMASK(BCH_MEMBER_GROUP,		struct bch_member, flags, 20, 28)
1308 LE64_BITMASK(BCH_MEMBER_DURABILITY,	struct bch_member, flags, 28, 30)
1309 LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
1310 					struct bch_member, flags, 30, 31)
1311 
1312 #if 0
1313 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS,	struct bch_member, flags[1], 0,  20);
1314 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
1315 #endif
1316 
1317 #define BCH_MEMBER_STATES()			\
1318 	x(rw,		0)			\
1319 	x(ro,		1)			\
1320 	x(failed,	2)			\
1321 	x(spare,	3)
1322 
1323 enum bch_member_state {
1324 #define x(t, n) BCH_MEMBER_STATE_##t = n,
1325 	BCH_MEMBER_STATES()
1326 #undef x
1327 	BCH_MEMBER_STATE_NR
1328 };
1329 
1330 struct bch_sb_field_members_v1 {
1331 	struct bch_sb_field	field;
1332 	struct bch_member	_members[]; //Members are now variable size
1333 };
1334 
1335 struct bch_sb_field_members_v2 {
1336 	struct bch_sb_field	field;
1337 	__le16			member_bytes; //size of single member entry
1338 	u8			pad[6];
1339 	struct bch_member	_members[];
1340 };
1341 
1342 /* BCH_SB_FIELD_crypt: */
1343 
1344 struct nonce {
1345 	__le32			d[4];
1346 };
1347 
1348 struct bch_key {
1349 	__le64			key[4];
1350 };
1351 
1352 #define BCH_KEY_MAGIC					\
1353 	(((__u64) 'b' <<  0)|((__u64) 'c' <<  8)|		\
1354 	 ((__u64) 'h' << 16)|((__u64) '*' << 24)|		\
1355 	 ((__u64) '*' << 32)|((__u64) 'k' << 40)|		\
1356 	 ((__u64) 'e' << 48)|((__u64) 'y' << 56))
1357 
1358 struct bch_encrypted_key {
1359 	__le64			magic;
1360 	struct bch_key		key;
1361 };
1362 
1363 /*
1364  * If this field is present in the superblock, it stores an encryption key which
1365  * is used encrypt all other data/metadata. The key will normally be encrypted
1366  * with the key userspace provides, but if encryption has been turned off we'll
1367  * just store the master key unencrypted in the superblock so we can access the
1368  * previously encrypted data.
1369  */
1370 struct bch_sb_field_crypt {
1371 	struct bch_sb_field	field;
1372 
1373 	__le64			flags;
1374 	__le64			kdf_flags;
1375 	struct bch_encrypted_key key;
1376 };
1377 
1378 LE64_BITMASK(BCH_CRYPT_KDF_TYPE,	struct bch_sb_field_crypt, flags, 0, 4);
1379 
1380 enum bch_kdf_types {
1381 	BCH_KDF_SCRYPT		= 0,
1382 	BCH_KDF_NR		= 1,
1383 };
1384 
1385 /* stored as base 2 log of scrypt params: */
1386 LE64_BITMASK(BCH_KDF_SCRYPT_N,	struct bch_sb_field_crypt, kdf_flags,  0, 16);
1387 LE64_BITMASK(BCH_KDF_SCRYPT_R,	struct bch_sb_field_crypt, kdf_flags, 16, 32);
1388 LE64_BITMASK(BCH_KDF_SCRYPT_P,	struct bch_sb_field_crypt, kdf_flags, 32, 48);
1389 
1390 /* BCH_SB_FIELD_replicas: */
1391 
1392 #define BCH_DATA_TYPES()		\
1393 	x(free,		0)		\
1394 	x(sb,		1)		\
1395 	x(journal,	2)		\
1396 	x(btree,	3)		\
1397 	x(user,		4)		\
1398 	x(cached,	5)		\
1399 	x(parity,	6)		\
1400 	x(stripe,	7)		\
1401 	x(need_gc_gens,	8)		\
1402 	x(need_discard,	9)
1403 
1404 enum bch_data_type {
1405 #define x(t, n) BCH_DATA_##t,
1406 	BCH_DATA_TYPES()
1407 #undef x
1408 	BCH_DATA_NR
1409 };
1410 
1411 static inline bool data_type_is_empty(enum bch_data_type type)
1412 {
1413 	switch (type) {
1414 	case BCH_DATA_free:
1415 	case BCH_DATA_need_gc_gens:
1416 	case BCH_DATA_need_discard:
1417 		return true;
1418 	default:
1419 		return false;
1420 	}
1421 }
1422 
1423 static inline bool data_type_is_hidden(enum bch_data_type type)
1424 {
1425 	switch (type) {
1426 	case BCH_DATA_sb:
1427 	case BCH_DATA_journal:
1428 		return true;
1429 	default:
1430 		return false;
1431 	}
1432 }
1433 
1434 struct bch_replicas_entry_v0 {
1435 	__u8			data_type;
1436 	__u8			nr_devs;
1437 	__u8			devs[];
1438 } __packed;
1439 
1440 struct bch_sb_field_replicas_v0 {
1441 	struct bch_sb_field	field;
1442 	struct bch_replicas_entry_v0 entries[];
1443 } __packed __aligned(8);
1444 
1445 struct bch_replicas_entry {
1446 	__u8			data_type;
1447 	__u8			nr_devs;
1448 	__u8			nr_required;
1449 	__u8			devs[];
1450 } __packed;
1451 
1452 #define replicas_entry_bytes(_i)					\
1453 	(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1454 
1455 struct bch_sb_field_replicas {
1456 	struct bch_sb_field	field;
1457 	struct bch_replicas_entry entries[];
1458 } __packed __aligned(8);
1459 
1460 /* BCH_SB_FIELD_quota: */
1461 
1462 struct bch_sb_quota_counter {
1463 	__le32				timelimit;
1464 	__le32				warnlimit;
1465 };
1466 
1467 struct bch_sb_quota_type {
1468 	__le64				flags;
1469 	struct bch_sb_quota_counter	c[Q_COUNTERS];
1470 };
1471 
1472 struct bch_sb_field_quota {
1473 	struct bch_sb_field		field;
1474 	struct bch_sb_quota_type	q[QTYP_NR];
1475 } __packed __aligned(8);
1476 
1477 /* BCH_SB_FIELD_disk_groups: */
1478 
1479 #define BCH_SB_LABEL_SIZE		32
1480 
1481 struct bch_disk_group {
1482 	__u8			label[BCH_SB_LABEL_SIZE];
1483 	__le64			flags[2];
1484 } __packed __aligned(8);
1485 
1486 LE64_BITMASK(BCH_GROUP_DELETED,		struct bch_disk_group, flags[0], 0,  1)
1487 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,	struct bch_disk_group, flags[0], 1,  6)
1488 LE64_BITMASK(BCH_GROUP_PARENT,		struct bch_disk_group, flags[0], 6, 24)
1489 
1490 struct bch_sb_field_disk_groups {
1491 	struct bch_sb_field	field;
1492 	struct bch_disk_group	entries[];
1493 } __packed __aligned(8);
1494 
1495 /* BCH_SB_FIELD_counters */
1496 
1497 #define BCH_PERSISTENT_COUNTERS()				\
1498 	x(io_read,					0)	\
1499 	x(io_write,					1)	\
1500 	x(io_move,					2)	\
1501 	x(bucket_invalidate,				3)	\
1502 	x(bucket_discard,				4)	\
1503 	x(bucket_alloc,					5)	\
1504 	x(bucket_alloc_fail,				6)	\
1505 	x(btree_cache_scan,				7)	\
1506 	x(btree_cache_reap,				8)	\
1507 	x(btree_cache_cannibalize,			9)	\
1508 	x(btree_cache_cannibalize_lock,			10)	\
1509 	x(btree_cache_cannibalize_lock_fail,		11)	\
1510 	x(btree_cache_cannibalize_unlock,		12)	\
1511 	x(btree_node_write,				13)	\
1512 	x(btree_node_read,				14)	\
1513 	x(btree_node_compact,				15)	\
1514 	x(btree_node_merge,				16)	\
1515 	x(btree_node_split,				17)	\
1516 	x(btree_node_rewrite,				18)	\
1517 	x(btree_node_alloc,				19)	\
1518 	x(btree_node_free,				20)	\
1519 	x(btree_node_set_root,				21)	\
1520 	x(btree_path_relock_fail,			22)	\
1521 	x(btree_path_upgrade_fail,			23)	\
1522 	x(btree_reserve_get_fail,			24)	\
1523 	x(journal_entry_full,				25)	\
1524 	x(journal_full,					26)	\
1525 	x(journal_reclaim_finish,			27)	\
1526 	x(journal_reclaim_start,			28)	\
1527 	x(journal_write,				29)	\
1528 	x(read_promote,					30)	\
1529 	x(read_bounce,					31)	\
1530 	x(read_split,					33)	\
1531 	x(read_retry,					32)	\
1532 	x(read_reuse_race,				34)	\
1533 	x(move_extent_read,				35)	\
1534 	x(move_extent_write,				36)	\
1535 	x(move_extent_finish,				37)	\
1536 	x(move_extent_fail,				38)	\
1537 	x(move_extent_start_fail,			39)	\
1538 	x(copygc,					40)	\
1539 	x(copygc_wait,					41)	\
1540 	x(gc_gens_end,					42)	\
1541 	x(gc_gens_start,				43)	\
1542 	x(trans_blocked_journal_reclaim,		44)	\
1543 	x(trans_restart_btree_node_reused,		45)	\
1544 	x(trans_restart_btree_node_split,		46)	\
1545 	x(trans_restart_fault_inject,			47)	\
1546 	x(trans_restart_iter_upgrade,			48)	\
1547 	x(trans_restart_journal_preres_get,		49)	\
1548 	x(trans_restart_journal_reclaim,		50)	\
1549 	x(trans_restart_journal_res_get,		51)	\
1550 	x(trans_restart_key_cache_key_realloced,	52)	\
1551 	x(trans_restart_key_cache_raced,		53)	\
1552 	x(trans_restart_mark_replicas,			54)	\
1553 	x(trans_restart_mem_realloced,			55)	\
1554 	x(trans_restart_memory_allocation_failure,	56)	\
1555 	x(trans_restart_relock,				57)	\
1556 	x(trans_restart_relock_after_fill,		58)	\
1557 	x(trans_restart_relock_key_cache_fill,		59)	\
1558 	x(trans_restart_relock_next_node,		60)	\
1559 	x(trans_restart_relock_parent_for_fill,		61)	\
1560 	x(trans_restart_relock_path,			62)	\
1561 	x(trans_restart_relock_path_intent,		63)	\
1562 	x(trans_restart_too_many_iters,			64)	\
1563 	x(trans_restart_traverse,			65)	\
1564 	x(trans_restart_upgrade,			66)	\
1565 	x(trans_restart_would_deadlock,			67)	\
1566 	x(trans_restart_would_deadlock_write,		68)	\
1567 	x(trans_restart_injected,			69)	\
1568 	x(trans_restart_key_cache_upgrade,		70)	\
1569 	x(trans_traverse_all,				71)	\
1570 	x(transaction_commit,				72)	\
1571 	x(write_super,					73)	\
1572 	x(trans_restart_would_deadlock_recursion_limit,	74)	\
1573 	x(trans_restart_write_buffer_flush,		75)	\
1574 	x(trans_restart_split_race,			76)
1575 
1576 enum bch_persistent_counters {
1577 #define x(t, n, ...) BCH_COUNTER_##t,
1578 	BCH_PERSISTENT_COUNTERS()
1579 #undef x
1580 	BCH_COUNTER_NR
1581 };
1582 
1583 struct bch_sb_field_counters {
1584 	struct bch_sb_field	field;
1585 	__le64			d[];
1586 };
1587 
1588 /*
1589  * On clean shutdown, store btree roots and current journal sequence number in
1590  * the superblock:
1591  */
1592 struct jset_entry {
1593 	__le16			u64s;
1594 	__u8			btree_id;
1595 	__u8			level;
1596 	__u8			type; /* designates what this jset holds */
1597 	__u8			pad[3];
1598 
1599 	struct bkey_i		start[0];
1600 	__u64			_data[];
1601 };
1602 
1603 struct bch_sb_field_clean {
1604 	struct bch_sb_field	field;
1605 
1606 	__le32			flags;
1607 	__le16			_read_clock; /* no longer used */
1608 	__le16			_write_clock;
1609 	__le64			journal_seq;
1610 
1611 	struct jset_entry	start[0];
1612 	__u64			_data[];
1613 };
1614 
1615 struct journal_seq_blacklist_entry {
1616 	__le64			start;
1617 	__le64			end;
1618 };
1619 
1620 struct bch_sb_field_journal_seq_blacklist {
1621 	struct bch_sb_field	field;
1622 	struct journal_seq_blacklist_entry start[];
1623 };
1624 
1625 struct bch_sb_field_errors {
1626 	struct bch_sb_field	field;
1627 	struct bch_sb_field_error_entry {
1628 		__le64		v;
1629 		__le64		last_error_time;
1630 	}			entries[];
1631 };
1632 
1633 LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID,	struct bch_sb_field_error_entry, v,  0, 16);
1634 LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR,	struct bch_sb_field_error_entry, v, 16, 64);
1635 
1636 struct bch_sb_field_ext {
1637 	struct bch_sb_field	field;
1638 	__le64			recovery_passes_required[2];
1639 	__le64			errors_silent[8];
1640 };
1641 
1642 struct bch_sb_field_downgrade_entry {
1643 	__le16			version;
1644 	__le64			recovery_passes[2];
1645 	__le16			nr_errors;
1646 	__le16			errors[] __counted_by(nr_errors);
1647 } __packed __aligned(2);
1648 
1649 struct bch_sb_field_downgrade {
1650 	struct bch_sb_field	field;
1651 	struct bch_sb_field_downgrade_entry entries[];
1652 };
1653 
1654 /* Superblock: */
1655 
1656 /*
1657  * New versioning scheme:
1658  * One common version number for all on disk data structures - superblock, btree
1659  * nodes, journal entries
1660  */
1661 #define BCH_VERSION_MAJOR(_v)		((__u16) ((_v) >> 10))
1662 #define BCH_VERSION_MINOR(_v)		((__u16) ((_v) & ~(~0U << 10)))
1663 #define BCH_VERSION(_major, _minor)	(((_major) << 10)|(_minor) << 0)
1664 
1665 #define RECOVERY_PASS_ALL_FSCK		(1ULL << 63)
1666 
1667 /*
1668  * field 1:		version name
1669  * field 2:		BCH_VERSION(major, minor)
1670  * field 3:		recovery passess required on upgrade
1671  */
1672 #define BCH_METADATA_VERSIONS()						\
1673 	x(bkey_renumber,		BCH_VERSION(0, 10),		\
1674 	  RECOVERY_PASS_ALL_FSCK)					\
1675 	x(inode_btree_change,		BCH_VERSION(0, 11),		\
1676 	  RECOVERY_PASS_ALL_FSCK)					\
1677 	x(snapshot,			BCH_VERSION(0, 12),		\
1678 	  RECOVERY_PASS_ALL_FSCK)					\
1679 	x(inode_backpointers,		BCH_VERSION(0, 13),		\
1680 	  RECOVERY_PASS_ALL_FSCK)					\
1681 	x(btree_ptr_sectors_written,	BCH_VERSION(0, 14),		\
1682 	  RECOVERY_PASS_ALL_FSCK)					\
1683 	x(snapshot_2,			BCH_VERSION(0, 15),		\
1684 	  BIT_ULL(BCH_RECOVERY_PASS_fs_upgrade_for_subvolumes)|		\
1685 	  BIT_ULL(BCH_RECOVERY_PASS_initialize_subvolumes)|		\
1686 	  RECOVERY_PASS_ALL_FSCK)					\
1687 	x(reflink_p_fix,		BCH_VERSION(0, 16),		\
1688 	  BIT_ULL(BCH_RECOVERY_PASS_fix_reflink_p))			\
1689 	x(subvol_dirent,		BCH_VERSION(0, 17),		\
1690 	  RECOVERY_PASS_ALL_FSCK)					\
1691 	x(inode_v2,			BCH_VERSION(0, 18),		\
1692 	  RECOVERY_PASS_ALL_FSCK)					\
1693 	x(freespace,			BCH_VERSION(0, 19),		\
1694 	  RECOVERY_PASS_ALL_FSCK)					\
1695 	x(alloc_v4,			BCH_VERSION(0, 20),		\
1696 	  RECOVERY_PASS_ALL_FSCK)					\
1697 	x(new_data_types,		BCH_VERSION(0, 21),		\
1698 	  RECOVERY_PASS_ALL_FSCK)					\
1699 	x(backpointers,			BCH_VERSION(0, 22),		\
1700 	  RECOVERY_PASS_ALL_FSCK)					\
1701 	x(inode_v3,			BCH_VERSION(0, 23),		\
1702 	  RECOVERY_PASS_ALL_FSCK)					\
1703 	x(unwritten_extents,		BCH_VERSION(0, 24),		\
1704 	  RECOVERY_PASS_ALL_FSCK)					\
1705 	x(bucket_gens,			BCH_VERSION(0, 25),		\
1706 	  BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)|			\
1707 	  RECOVERY_PASS_ALL_FSCK)					\
1708 	x(lru_v2,			BCH_VERSION(0, 26),		\
1709 	  RECOVERY_PASS_ALL_FSCK)					\
1710 	x(fragmentation_lru,		BCH_VERSION(0, 27),		\
1711 	  RECOVERY_PASS_ALL_FSCK)					\
1712 	x(no_bps_in_alloc_keys,		BCH_VERSION(0, 28),		\
1713 	  RECOVERY_PASS_ALL_FSCK)					\
1714 	x(snapshot_trees,		BCH_VERSION(0, 29),		\
1715 	  RECOVERY_PASS_ALL_FSCK)					\
1716 	x(major_minor,			BCH_VERSION(1,  0),		\
1717 	  0)								\
1718 	x(snapshot_skiplists,		BCH_VERSION(1,  1),		\
1719 	  BIT_ULL(BCH_RECOVERY_PASS_check_snapshots))			\
1720 	x(deleted_inodes,		BCH_VERSION(1,  2),		\
1721 	  BIT_ULL(BCH_RECOVERY_PASS_check_inodes))			\
1722 	x(rebalance_work,		BCH_VERSION(1,  3),		\
1723 	  BIT_ULL(BCH_RECOVERY_PASS_set_fs_needs_rebalance))
1724 
1725 enum bcachefs_metadata_version {
1726 	bcachefs_metadata_version_min = 9,
1727 #define x(t, n, upgrade_passes)	bcachefs_metadata_version_##t = n,
1728 	BCH_METADATA_VERSIONS()
1729 #undef x
1730 	bcachefs_metadata_version_max
1731 };
1732 
1733 static const __maybe_unused
1734 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
1735 
1736 #define bcachefs_metadata_version_current	(bcachefs_metadata_version_max - 1)
1737 
1738 #define BCH_SB_SECTOR			8
1739 #define BCH_SB_MEMBERS_MAX		64 /* XXX kill */
1740 
1741 struct bch_sb_layout {
1742 	__uuid_t		magic;	/* bcachefs superblock UUID */
1743 	__u8			layout_type;
1744 	__u8			sb_max_size_bits; /* base 2 of 512 byte sectors */
1745 	__u8			nr_superblocks;
1746 	__u8			pad[5];
1747 	__le64			sb_offset[61];
1748 } __packed __aligned(8);
1749 
1750 #define BCH_SB_LAYOUT_SECTOR	7
1751 
1752 /*
1753  * @offset	- sector where this sb was written
1754  * @version	- on disk format version
1755  * @version_min	- Oldest metadata version this filesystem contains; so we can
1756  *		  safely drop compatibility code and refuse to mount filesystems
1757  *		  we'd need it for
1758  * @magic	- identifies as a bcachefs superblock (BCHFS_MAGIC)
1759  * @seq		- incremented each time superblock is written
1760  * @uuid	- used for generating various magic numbers and identifying
1761  *                member devices, never changes
1762  * @user_uuid	- user visible UUID, may be changed
1763  * @label	- filesystem label
1764  * @seq		- identifies most recent superblock, incremented each time
1765  *		  superblock is written
1766  * @features	- enabled incompatible features
1767  */
1768 struct bch_sb {
1769 	struct bch_csum		csum;
1770 	__le16			version;
1771 	__le16			version_min;
1772 	__le16			pad[2];
1773 	__uuid_t		magic;
1774 	__uuid_t		uuid;
1775 	__uuid_t		user_uuid;
1776 	__u8			label[BCH_SB_LABEL_SIZE];
1777 	__le64			offset;
1778 	__le64			seq;
1779 
1780 	__le16			block_size;
1781 	__u8			dev_idx;
1782 	__u8			nr_devices;
1783 	__le32			u64s;
1784 
1785 	__le64			time_base_lo;
1786 	__le32			time_base_hi;
1787 	__le32			time_precision;
1788 
1789 	__le64			flags[8];
1790 	__le64			features[2];
1791 	__le64			compat[2];
1792 
1793 	struct bch_sb_layout	layout;
1794 
1795 	struct bch_sb_field	start[0];
1796 	__le64			_data[];
1797 } __packed __aligned(8);
1798 
1799 /*
1800  * Flags:
1801  * BCH_SB_INITALIZED	- set on first mount
1802  * BCH_SB_CLEAN		- did we shut down cleanly? Just a hint, doesn't affect
1803  *			  behaviour of mount/recovery path:
1804  * BCH_SB_INODE_32BIT	- limit inode numbers to 32 bits
1805  * BCH_SB_128_BIT_MACS	- 128 bit macs instead of 80
1806  * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1807  *			   DATA/META_CSUM_TYPE. Also indicates encryption
1808  *			   algorithm in use, if/when we get more than one
1809  */
1810 
1811 LE16_BITMASK(BCH_SB_BLOCK_SIZE,		struct bch_sb, block_size, 0, 16);
1812 
1813 LE64_BITMASK(BCH_SB_INITIALIZED,	struct bch_sb, flags[0],  0,  1);
1814 LE64_BITMASK(BCH_SB_CLEAN,		struct bch_sb, flags[0],  1,  2);
1815 LE64_BITMASK(BCH_SB_CSUM_TYPE,		struct bch_sb, flags[0],  2,  8);
1816 LE64_BITMASK(BCH_SB_ERROR_ACTION,	struct bch_sb, flags[0],  8, 12);
1817 
1818 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE,	struct bch_sb, flags[0], 12, 28);
1819 
1820 LE64_BITMASK(BCH_SB_GC_RESERVE,		struct bch_sb, flags[0], 28, 33);
1821 LE64_BITMASK(BCH_SB_ROOT_RESERVE,	struct bch_sb, flags[0], 33, 40);
1822 
1823 LE64_BITMASK(BCH_SB_META_CSUM_TYPE,	struct bch_sb, flags[0], 40, 44);
1824 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE,	struct bch_sb, flags[0], 44, 48);
1825 
1826 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT,	struct bch_sb, flags[0], 48, 52);
1827 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT,	struct bch_sb, flags[0], 52, 56);
1828 
1829 LE64_BITMASK(BCH_SB_POSIX_ACL,		struct bch_sb, flags[0], 56, 57);
1830 LE64_BITMASK(BCH_SB_USRQUOTA,		struct bch_sb, flags[0], 57, 58);
1831 LE64_BITMASK(BCH_SB_GRPQUOTA,		struct bch_sb, flags[0], 58, 59);
1832 LE64_BITMASK(BCH_SB_PRJQUOTA,		struct bch_sb, flags[0], 59, 60);
1833 
1834 LE64_BITMASK(BCH_SB_HAS_ERRORS,		struct bch_sb, flags[0], 60, 61);
1835 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
1836 
1837 LE64_BITMASK(BCH_SB_BIG_ENDIAN,		struct bch_sb, flags[0], 62, 63);
1838 
1839 LE64_BITMASK(BCH_SB_STR_HASH_TYPE,	struct bch_sb, flags[1],  0,  4);
1840 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1],  4,  8);
1841 LE64_BITMASK(BCH_SB_INODE_32BIT,	struct bch_sb, flags[1],  8,  9);
1842 
1843 LE64_BITMASK(BCH_SB_128_BIT_MACS,	struct bch_sb, flags[1],  9, 10);
1844 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE,	struct bch_sb, flags[1], 10, 14);
1845 
1846 /*
1847  * Max size of an extent that may require bouncing to read or write
1848  * (checksummed, compressed): 64k
1849  */
1850 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1851 					struct bch_sb, flags[1], 14, 20);
1852 
1853 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ,	struct bch_sb, flags[1], 20, 24);
1854 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ,	struct bch_sb, flags[1], 24, 28);
1855 
1856 LE64_BITMASK(BCH_SB_PROMOTE_TARGET,	struct bch_sb, flags[1], 28, 40);
1857 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET,	struct bch_sb, flags[1], 40, 52);
1858 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET,	struct bch_sb, flags[1], 52, 64);
1859 
1860 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
1861 					struct bch_sb, flags[2],  0,  4);
1862 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,	struct bch_sb, flags[2],  4, 64);
1863 
1864 LE64_BITMASK(BCH_SB_ERASURE_CODE,	struct bch_sb, flags[3],  0, 16);
1865 LE64_BITMASK(BCH_SB_METADATA_TARGET,	struct bch_sb, flags[3], 16, 28);
1866 LE64_BITMASK(BCH_SB_SHARD_INUMS,	struct bch_sb, flags[3], 28, 29);
1867 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
1868 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
1869 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
1870 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
1871 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
1872 LE64_BITMASK(BCH_SB_NOCOW,		struct bch_sb, flags[4], 33, 34);
1873 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE,	struct bch_sb, flags[4], 34, 54);
1874 LE64_BITMASK(BCH_SB_VERSION_UPGRADE,	struct bch_sb, flags[4], 54, 56);
1875 
1876 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
1877 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
1878 					struct bch_sb, flags[4], 60, 64);
1879 
1880 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
1881 					struct bch_sb, flags[5],  0, 16);
1882 
1883 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
1884 {
1885 	return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
1886 }
1887 
1888 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1889 {
1890 	SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
1891 	SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
1892 }
1893 
1894 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
1895 {
1896 	return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
1897 		(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
1898 }
1899 
1900 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1901 {
1902 	SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
1903 	SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
1904 }
1905 
1906 /*
1907  * Features:
1908  *
1909  * journal_seq_blacklist_v3:	gates BCH_SB_FIELD_journal_seq_blacklist
1910  * reflink:			gates KEY_TYPE_reflink
1911  * inline_data:			gates KEY_TYPE_inline_data
1912  * new_siphash:			gates BCH_STR_HASH_siphash
1913  * new_extent_overwrite:	gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1914  */
1915 #define BCH_SB_FEATURES()			\
1916 	x(lz4,				0)	\
1917 	x(gzip,				1)	\
1918 	x(zstd,				2)	\
1919 	x(atomic_nlink,			3)	\
1920 	x(ec,				4)	\
1921 	x(journal_seq_blacklist_v3,	5)	\
1922 	x(reflink,			6)	\
1923 	x(new_siphash,			7)	\
1924 	x(inline_data,			8)	\
1925 	x(new_extent_overwrite,		9)	\
1926 	x(incompressible,		10)	\
1927 	x(btree_ptr_v2,			11)	\
1928 	x(extents_above_btree_updates,	12)	\
1929 	x(btree_updates_journalled,	13)	\
1930 	x(reflink_inline_data,		14)	\
1931 	x(new_varint,			15)	\
1932 	x(journal_no_flush,		16)	\
1933 	x(alloc_v2,			17)	\
1934 	x(extents_across_btree_nodes,	18)
1935 
1936 #define BCH_SB_FEATURES_ALWAYS				\
1937 	((1ULL << BCH_FEATURE_new_extent_overwrite)|	\
1938 	 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1939 	 (1ULL << BCH_FEATURE_btree_updates_journalled)|\
1940 	 (1ULL << BCH_FEATURE_alloc_v2)|\
1941 	 (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1942 
1943 #define BCH_SB_FEATURES_ALL				\
1944 	(BCH_SB_FEATURES_ALWAYS|			\
1945 	 (1ULL << BCH_FEATURE_new_siphash)|		\
1946 	 (1ULL << BCH_FEATURE_btree_ptr_v2)|		\
1947 	 (1ULL << BCH_FEATURE_new_varint)|		\
1948 	 (1ULL << BCH_FEATURE_journal_no_flush))
1949 
1950 enum bch_sb_feature {
1951 #define x(f, n) BCH_FEATURE_##f,
1952 	BCH_SB_FEATURES()
1953 #undef x
1954 	BCH_FEATURE_NR,
1955 };
1956 
1957 #define BCH_SB_COMPAT()					\
1958 	x(alloc_info,				0)	\
1959 	x(alloc_metadata,			1)	\
1960 	x(extents_above_btree_updates_done,	2)	\
1961 	x(bformat_overflow_done,		3)
1962 
1963 enum bch_sb_compat {
1964 #define x(f, n) BCH_COMPAT_##f,
1965 	BCH_SB_COMPAT()
1966 #undef x
1967 	BCH_COMPAT_NR,
1968 };
1969 
1970 /* options: */
1971 
1972 #define BCH_VERSION_UPGRADE_OPTS()	\
1973 	x(compatible,		0)	\
1974 	x(incompatible,		1)	\
1975 	x(none,			2)
1976 
1977 enum bch_version_upgrade_opts {
1978 #define x(t, n) BCH_VERSION_UPGRADE_##t = n,
1979 	BCH_VERSION_UPGRADE_OPTS()
1980 #undef x
1981 };
1982 
1983 #define BCH_REPLICAS_MAX		4U
1984 
1985 #define BCH_BKEY_PTRS_MAX		16U
1986 
1987 #define BCH_ERROR_ACTIONS()		\
1988 	x(continue,		0)	\
1989 	x(ro,			1)	\
1990 	x(panic,		2)
1991 
1992 enum bch_error_actions {
1993 #define x(t, n) BCH_ON_ERROR_##t = n,
1994 	BCH_ERROR_ACTIONS()
1995 #undef x
1996 	BCH_ON_ERROR_NR
1997 };
1998 
1999 #define BCH_STR_HASH_TYPES()		\
2000 	x(crc32c,		0)	\
2001 	x(crc64,		1)	\
2002 	x(siphash_old,		2)	\
2003 	x(siphash,		3)
2004 
2005 enum bch_str_hash_type {
2006 #define x(t, n) BCH_STR_HASH_##t = n,
2007 	BCH_STR_HASH_TYPES()
2008 #undef x
2009 	BCH_STR_HASH_NR
2010 };
2011 
2012 #define BCH_STR_HASH_OPTS()		\
2013 	x(crc32c,		0)	\
2014 	x(crc64,		1)	\
2015 	x(siphash,		2)
2016 
2017 enum bch_str_hash_opts {
2018 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
2019 	BCH_STR_HASH_OPTS()
2020 #undef x
2021 	BCH_STR_HASH_OPT_NR
2022 };
2023 
2024 #define BCH_CSUM_TYPES()			\
2025 	x(none,				0)	\
2026 	x(crc32c_nonzero,		1)	\
2027 	x(crc64_nonzero,		2)	\
2028 	x(chacha20_poly1305_80,		3)	\
2029 	x(chacha20_poly1305_128,	4)	\
2030 	x(crc32c,			5)	\
2031 	x(crc64,			6)	\
2032 	x(xxhash,			7)
2033 
2034 enum bch_csum_type {
2035 #define x(t, n) BCH_CSUM_##t = n,
2036 	BCH_CSUM_TYPES()
2037 #undef x
2038 	BCH_CSUM_NR
2039 };
2040 
2041 static const __maybe_unused unsigned bch_crc_bytes[] = {
2042 	[BCH_CSUM_none]				= 0,
2043 	[BCH_CSUM_crc32c_nonzero]		= 4,
2044 	[BCH_CSUM_crc32c]			= 4,
2045 	[BCH_CSUM_crc64_nonzero]		= 8,
2046 	[BCH_CSUM_crc64]			= 8,
2047 	[BCH_CSUM_xxhash]			= 8,
2048 	[BCH_CSUM_chacha20_poly1305_80]		= 10,
2049 	[BCH_CSUM_chacha20_poly1305_128]	= 16,
2050 };
2051 
2052 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
2053 {
2054 	switch (type) {
2055 	case BCH_CSUM_chacha20_poly1305_80:
2056 	case BCH_CSUM_chacha20_poly1305_128:
2057 		return true;
2058 	default:
2059 		return false;
2060 	}
2061 }
2062 
2063 #define BCH_CSUM_OPTS()			\
2064 	x(none,			0)	\
2065 	x(crc32c,		1)	\
2066 	x(crc64,		2)	\
2067 	x(xxhash,		3)
2068 
2069 enum bch_csum_opts {
2070 #define x(t, n) BCH_CSUM_OPT_##t = n,
2071 	BCH_CSUM_OPTS()
2072 #undef x
2073 	BCH_CSUM_OPT_NR
2074 };
2075 
2076 #define BCH_COMPRESSION_TYPES()		\
2077 	x(none,			0)	\
2078 	x(lz4_old,		1)	\
2079 	x(gzip,			2)	\
2080 	x(lz4,			3)	\
2081 	x(zstd,			4)	\
2082 	x(incompressible,	5)
2083 
2084 enum bch_compression_type {
2085 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
2086 	BCH_COMPRESSION_TYPES()
2087 #undef x
2088 	BCH_COMPRESSION_TYPE_NR
2089 };
2090 
2091 #define BCH_COMPRESSION_OPTS()		\
2092 	x(none,		0)		\
2093 	x(lz4,		1)		\
2094 	x(gzip,		2)		\
2095 	x(zstd,		3)
2096 
2097 enum bch_compression_opts {
2098 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
2099 	BCH_COMPRESSION_OPTS()
2100 #undef x
2101 	BCH_COMPRESSION_OPT_NR
2102 };
2103 
2104 /*
2105  * Magic numbers
2106  *
2107  * The various other data structures have their own magic numbers, which are
2108  * xored with the first part of the cache set's UUID
2109  */
2110 
2111 #define BCACHE_MAGIC							\
2112 	UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca,				\
2113 		  0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
2114 #define BCHFS_MAGIC							\
2115 	UUID_INIT(0xc68573f6, 0x66ce, 0x90a9,				\
2116 		  0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
2117 
2118 #define BCACHEFS_STATFS_MAGIC		0xca451a4e
2119 
2120 #define JSET_MAGIC		__cpu_to_le64(0x245235c1a3625032ULL)
2121 #define BSET_MAGIC		__cpu_to_le64(0x90135c78b99e07f5ULL)
2122 
2123 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
2124 {
2125 	__le64 ret;
2126 
2127 	memcpy(&ret, &sb->uuid, sizeof(ret));
2128 	return ret;
2129 }
2130 
2131 static inline __u64 __jset_magic(struct bch_sb *sb)
2132 {
2133 	return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
2134 }
2135 
2136 static inline __u64 __bset_magic(struct bch_sb *sb)
2137 {
2138 	return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
2139 }
2140 
2141 /* Journal */
2142 
2143 #define JSET_KEYS_U64s	(sizeof(struct jset_entry) / sizeof(__u64))
2144 
2145 #define BCH_JSET_ENTRY_TYPES()			\
2146 	x(btree_keys,		0)		\
2147 	x(btree_root,		1)		\
2148 	x(prio_ptrs,		2)		\
2149 	x(blacklist,		3)		\
2150 	x(blacklist_v2,		4)		\
2151 	x(usage,		5)		\
2152 	x(data_usage,		6)		\
2153 	x(clock,		7)		\
2154 	x(dev_usage,		8)		\
2155 	x(log,			9)		\
2156 	x(overwrite,		10)
2157 
2158 enum {
2159 #define x(f, nr)	BCH_JSET_ENTRY_##f	= nr,
2160 	BCH_JSET_ENTRY_TYPES()
2161 #undef x
2162 	BCH_JSET_ENTRY_NR
2163 };
2164 
2165 /*
2166  * Journal sequence numbers can be blacklisted: bsets record the max sequence
2167  * number of all the journal entries they contain updates for, so that on
2168  * recovery we can ignore those bsets that contain index updates newer that what
2169  * made it into the journal.
2170  *
2171  * This means that we can't reuse that journal_seq - we have to skip it, and
2172  * then record that we skipped it so that the next time we crash and recover we
2173  * don't think there was a missing journal entry.
2174  */
2175 struct jset_entry_blacklist {
2176 	struct jset_entry	entry;
2177 	__le64			seq;
2178 };
2179 
2180 struct jset_entry_blacklist_v2 {
2181 	struct jset_entry	entry;
2182 	__le64			start;
2183 	__le64			end;
2184 };
2185 
2186 #define BCH_FS_USAGE_TYPES()			\
2187 	x(reserved,		0)		\
2188 	x(inodes,		1)		\
2189 	x(key_version,		2)
2190 
2191 enum {
2192 #define x(f, nr)	BCH_FS_USAGE_##f	= nr,
2193 	BCH_FS_USAGE_TYPES()
2194 #undef x
2195 	BCH_FS_USAGE_NR
2196 };
2197 
2198 struct jset_entry_usage {
2199 	struct jset_entry	entry;
2200 	__le64			v;
2201 } __packed;
2202 
2203 struct jset_entry_data_usage {
2204 	struct jset_entry	entry;
2205 	__le64			v;
2206 	struct bch_replicas_entry r;
2207 } __packed;
2208 
2209 struct jset_entry_clock {
2210 	struct jset_entry	entry;
2211 	__u8			rw;
2212 	__u8			pad[7];
2213 	__le64			time;
2214 } __packed;
2215 
2216 struct jset_entry_dev_usage_type {
2217 	__le64			buckets;
2218 	__le64			sectors;
2219 	__le64			fragmented;
2220 } __packed;
2221 
2222 struct jset_entry_dev_usage {
2223 	struct jset_entry	entry;
2224 	__le32			dev;
2225 	__u32			pad;
2226 
2227 	__le64			buckets_ec;
2228 	__le64			_buckets_unavailable; /* No longer used */
2229 
2230 	struct jset_entry_dev_usage_type d[];
2231 };
2232 
2233 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
2234 {
2235 	return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
2236 		sizeof(struct jset_entry_dev_usage_type);
2237 }
2238 
2239 struct jset_entry_log {
2240 	struct jset_entry	entry;
2241 	u8			d[];
2242 } __packed;
2243 
2244 /*
2245  * On disk format for a journal entry:
2246  * seq is monotonically increasing; every journal entry has its own unique
2247  * sequence number.
2248  *
2249  * last_seq is the oldest journal entry that still has keys the btree hasn't
2250  * flushed to disk yet.
2251  *
2252  * version is for on disk format changes.
2253  */
2254 struct jset {
2255 	struct bch_csum		csum;
2256 
2257 	__le64			magic;
2258 	__le64			seq;
2259 	__le32			version;
2260 	__le32			flags;
2261 
2262 	__le32			u64s; /* size of d[] in u64s */
2263 
2264 	__u8			encrypted_start[0];
2265 
2266 	__le16			_read_clock; /* no longer used */
2267 	__le16			_write_clock;
2268 
2269 	/* Sequence number of oldest dirty journal entry */
2270 	__le64			last_seq;
2271 
2272 
2273 	struct jset_entry	start[0];
2274 	__u64			_data[];
2275 } __packed __aligned(8);
2276 
2277 LE32_BITMASK(JSET_CSUM_TYPE,	struct jset, flags, 0, 4);
2278 LE32_BITMASK(JSET_BIG_ENDIAN,	struct jset, flags, 4, 5);
2279 LE32_BITMASK(JSET_NO_FLUSH,	struct jset, flags, 5, 6);
2280 
2281 #define BCH_JOURNAL_BUCKETS_MIN		8
2282 
2283 /* Btree: */
2284 
2285 enum btree_id_flags {
2286 	BTREE_ID_EXTENTS	= BIT(0),
2287 	BTREE_ID_SNAPSHOTS	= BIT(1),
2288 	BTREE_ID_SNAPSHOT_FIELD	= BIT(2),
2289 	BTREE_ID_DATA		= BIT(3),
2290 };
2291 
2292 #define BCH_BTREE_IDS()								\
2293 	x(extents,		0,	BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
2294 	  BIT_ULL(KEY_TYPE_whiteout)|						\
2295 	  BIT_ULL(KEY_TYPE_error)|						\
2296 	  BIT_ULL(KEY_TYPE_cookie)|						\
2297 	  BIT_ULL(KEY_TYPE_extent)|						\
2298 	  BIT_ULL(KEY_TYPE_reservation)|					\
2299 	  BIT_ULL(KEY_TYPE_reflink_p)|						\
2300 	  BIT_ULL(KEY_TYPE_inline_data))					\
2301 	x(inodes,		1,	BTREE_ID_SNAPSHOTS,			\
2302 	  BIT_ULL(KEY_TYPE_whiteout)|						\
2303 	  BIT_ULL(KEY_TYPE_inode)|						\
2304 	  BIT_ULL(KEY_TYPE_inode_v2)|						\
2305 	  BIT_ULL(KEY_TYPE_inode_v3)|						\
2306 	  BIT_ULL(KEY_TYPE_inode_generation))					\
2307 	x(dirents,		2,	BTREE_ID_SNAPSHOTS,			\
2308 	  BIT_ULL(KEY_TYPE_whiteout)|						\
2309 	  BIT_ULL(KEY_TYPE_hash_whiteout)|					\
2310 	  BIT_ULL(KEY_TYPE_dirent))						\
2311 	x(xattrs,		3,	BTREE_ID_SNAPSHOTS,			\
2312 	  BIT_ULL(KEY_TYPE_whiteout)|						\
2313 	  BIT_ULL(KEY_TYPE_cookie)|						\
2314 	  BIT_ULL(KEY_TYPE_hash_whiteout)|					\
2315 	  BIT_ULL(KEY_TYPE_xattr))						\
2316 	x(alloc,		4,	0,					\
2317 	  BIT_ULL(KEY_TYPE_alloc)|						\
2318 	  BIT_ULL(KEY_TYPE_alloc_v2)|						\
2319 	  BIT_ULL(KEY_TYPE_alloc_v3)|						\
2320 	  BIT_ULL(KEY_TYPE_alloc_v4))						\
2321 	x(quotas,		5,	0,					\
2322 	  BIT_ULL(KEY_TYPE_quota))						\
2323 	x(stripes,		6,	0,					\
2324 	  BIT_ULL(KEY_TYPE_stripe))						\
2325 	x(reflink,		7,	BTREE_ID_EXTENTS|BTREE_ID_DATA,		\
2326 	  BIT_ULL(KEY_TYPE_reflink_v)|						\
2327 	  BIT_ULL(KEY_TYPE_indirect_inline_data))				\
2328 	x(subvolumes,		8,	0,					\
2329 	  BIT_ULL(KEY_TYPE_subvolume))						\
2330 	x(snapshots,		9,	0,					\
2331 	  BIT_ULL(KEY_TYPE_snapshot))						\
2332 	x(lru,			10,	0,					\
2333 	  BIT_ULL(KEY_TYPE_set))						\
2334 	x(freespace,		11,	BTREE_ID_EXTENTS,			\
2335 	  BIT_ULL(KEY_TYPE_set))						\
2336 	x(need_discard,		12,	0,					\
2337 	  BIT_ULL(KEY_TYPE_set))						\
2338 	x(backpointers,		13,	0,					\
2339 	  BIT_ULL(KEY_TYPE_backpointer))					\
2340 	x(bucket_gens,		14,	0,					\
2341 	  BIT_ULL(KEY_TYPE_bucket_gens))					\
2342 	x(snapshot_trees,	15,	0,					\
2343 	  BIT_ULL(KEY_TYPE_snapshot_tree))					\
2344 	x(deleted_inodes,	16,	BTREE_ID_SNAPSHOT_FIELD,		\
2345 	  BIT_ULL(KEY_TYPE_set))						\
2346 	x(logged_ops,		17,	0,					\
2347 	  BIT_ULL(KEY_TYPE_logged_op_truncate)|					\
2348 	  BIT_ULL(KEY_TYPE_logged_op_finsert))					\
2349 	x(rebalance_work,	18,	BTREE_ID_SNAPSHOT_FIELD,		\
2350 	  BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))
2351 
2352 enum btree_id {
2353 #define x(name, nr, ...) BTREE_ID_##name = nr,
2354 	BCH_BTREE_IDS()
2355 #undef x
2356 	BTREE_ID_NR
2357 };
2358 
2359 #define BTREE_MAX_DEPTH		4U
2360 
2361 /* Btree nodes */
2362 
2363 /*
2364  * Btree nodes
2365  *
2366  * On disk a btree node is a list/log of these; within each set the keys are
2367  * sorted
2368  */
2369 struct bset {
2370 	__le64			seq;
2371 
2372 	/*
2373 	 * Highest journal entry this bset contains keys for.
2374 	 * If on recovery we don't see that journal entry, this bset is ignored:
2375 	 * this allows us to preserve the order of all index updates after a
2376 	 * crash, since the journal records a total order of all index updates
2377 	 * and anything that didn't make it to the journal doesn't get used.
2378 	 */
2379 	__le64			journal_seq;
2380 
2381 	__le32			flags;
2382 	__le16			version;
2383 	__le16			u64s; /* count of d[] in u64s */
2384 
2385 	struct bkey_packed	start[0];
2386 	__u64			_data[];
2387 } __packed __aligned(8);
2388 
2389 LE32_BITMASK(BSET_CSUM_TYPE,	struct bset, flags, 0, 4);
2390 
2391 LE32_BITMASK(BSET_BIG_ENDIAN,	struct bset, flags, 4, 5);
2392 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
2393 				struct bset, flags, 5, 6);
2394 
2395 /* Sector offset within the btree node: */
2396 LE32_BITMASK(BSET_OFFSET,	struct bset, flags, 16, 32);
2397 
2398 struct btree_node {
2399 	struct bch_csum		csum;
2400 	__le64			magic;
2401 
2402 	/* this flags field is encrypted, unlike bset->flags: */
2403 	__le64			flags;
2404 
2405 	/* Closed interval: */
2406 	struct bpos		min_key;
2407 	struct bpos		max_key;
2408 	struct bch_extent_ptr	_ptr; /* not used anymore */
2409 	struct bkey_format	format;
2410 
2411 	union {
2412 	struct bset		keys;
2413 	struct {
2414 		__u8		pad[22];
2415 		__le16		u64s;
2416 		__u64		_data[0];
2417 
2418 	};
2419 	};
2420 } __packed __aligned(8);
2421 
2422 LE64_BITMASK(BTREE_NODE_ID_LO,	struct btree_node, flags,  0,  4);
2423 LE64_BITMASK(BTREE_NODE_LEVEL,	struct btree_node, flags,  4,  8);
2424 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
2425 				struct btree_node, flags,  8,  9);
2426 LE64_BITMASK(BTREE_NODE_ID_HI,	struct btree_node, flags,  9, 25);
2427 /* 25-32 unused */
2428 LE64_BITMASK(BTREE_NODE_SEQ,	struct btree_node, flags, 32, 64);
2429 
2430 static inline __u64 BTREE_NODE_ID(struct btree_node *n)
2431 {
2432 	return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
2433 }
2434 
2435 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
2436 {
2437 	SET_BTREE_NODE_ID_LO(n, v);
2438 	SET_BTREE_NODE_ID_HI(n, v >> 4);
2439 }
2440 
2441 struct btree_node_entry {
2442 	struct bch_csum		csum;
2443 
2444 	union {
2445 	struct bset		keys;
2446 	struct {
2447 		__u8		pad[22];
2448 		__le16		u64s;
2449 		__u64		_data[0];
2450 	};
2451 	};
2452 } __packed __aligned(8);
2453 
2454 #endif /* _BCACHEFS_FORMAT_H */
2455