xref: /linux/fs/bcachefs/bcachefs_format.h (revision ea518afc992032f7570c0a89ac9240b387dc0faf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
4 
5 /*
6  * bcachefs on disk data structures
7  *
8  * OVERVIEW:
9  *
10  * There are three main types of on disk data structures in bcachefs (this is
11  * reduced from 5 in bcache)
12  *
13  *  - superblock
14  *  - journal
15  *  - btree
16  *
17  * The btree is the primary structure; most metadata exists as keys in the
18  * various btrees. There are only a small number of btrees, they're not
19  * sharded - we have one btree for extents, another for inodes, et cetera.
20  *
21  * SUPERBLOCK:
22  *
23  * The superblock contains the location of the journal, the list of devices in
24  * the filesystem, and in general any metadata we need in order to decide
25  * whether we can start a filesystem or prior to reading the journal/btree
26  * roots.
27  *
28  * The superblock is extensible, and most of the contents of the superblock are
29  * in variable length, type tagged fields; see struct bch_sb_field.
30  *
31  * Backup superblocks do not reside in a fixed location; also, superblocks do
32  * not have a fixed size. To locate backup superblocks we have struct
33  * bch_sb_layout; we store a copy of this inside every superblock, and also
34  * before the first superblock.
35  *
36  * JOURNAL:
37  *
38  * The journal primarily records btree updates in the order they occurred;
39  * journal replay consists of just iterating over all the keys in the open
40  * journal entries and re-inserting them into the btrees.
41  *
42  * The journal also contains entry types for the btree roots, and blacklisted
43  * journal sequence numbers (see journal_seq_blacklist.c).
44  *
45  * BTREE:
46  *
47  * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48  * 128k-256k) and log structured. We use struct btree_node for writing the first
49  * entry in a given node (offset 0), and struct btree_node_entry for all
50  * subsequent writes.
51  *
52  * After the header, btree node entries contain a list of keys in sorted order.
53  * Values are stored inline with the keys; since values are variable length (and
54  * keys effectively are variable length too, due to packing) we can't do random
55  * access without building up additional in memory tables in the btree node read
56  * path.
57  *
58  * BTREE KEYS (struct bkey):
59  *
60  * The various btrees share a common format for the key - so as to avoid
61  * switching in fastpath lookup/comparison code - but define their own
62  * structures for the key values.
63  *
64  * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65  * size is just under 2k. The common part also contains a type tag for the
66  * value, and a format field indicating whether the key is packed or not (and
67  * also meant to allow adding new key fields in the future, if desired).
68  *
69  * bkeys, when stored within a btree node, may also be packed. In that case, the
70  * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71  * be generous with field sizes in the common part of the key format (64 bit
72  * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
73  */
74 
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
79 #include "vstructs.h"
80 
81 #ifdef __KERNEL__
82 typedef uuid_t __uuid_t;
83 #endif
84 
85 #define BITMASK(name, type, field, offset, end)				\
86 static const __maybe_unused unsigned	name##_OFFSET = offset;		\
87 static const __maybe_unused unsigned	name##_BITS = (end - offset);	\
88 									\
89 static inline __u64 name(const type *k)					\
90 {									\
91 	return (k->field >> offset) & ~(~0ULL << (end - offset));	\
92 }									\
93 									\
94 static inline void SET_##name(type *k, __u64 v)				\
95 {									\
96 	k->field &= ~(~(~0ULL << (end - offset)) << offset);		\
97 	k->field |= (v & ~(~0ULL << (end - offset))) << offset;		\
98 }
99 
100 #define LE_BITMASK(_bits, name, type, field, offset, end)		\
101 static const __maybe_unused unsigned	name##_OFFSET = offset;		\
102 static const __maybe_unused unsigned	name##_BITS = (end - offset);	\
103 static const __maybe_unused __u##_bits	name##_MAX = (1ULL << (end - offset)) - 1;\
104 									\
105 static inline __u64 name(const type *k)					\
106 {									\
107 	return (__le##_bits##_to_cpu(k->field) >> offset) &		\
108 		~(~0ULL << (end - offset));				\
109 }									\
110 									\
111 static inline void SET_##name(type *k, __u64 v)				\
112 {									\
113 	__u##_bits new = __le##_bits##_to_cpu(k->field);		\
114 									\
115 	new &= ~(~(~0ULL << (end - offset)) << offset);			\
116 	new |= (v & ~(~0ULL << (end - offset))) << offset;		\
117 	k->field = __cpu_to_le##_bits(new);				\
118 }
119 
120 #define LE16_BITMASK(n, t, f, o, e)	LE_BITMASK(16, n, t, f, o, e)
121 #define LE32_BITMASK(n, t, f, o, e)	LE_BITMASK(32, n, t, f, o, e)
122 #define LE64_BITMASK(n, t, f, o, e)	LE_BITMASK(64, n, t, f, o, e)
123 
124 struct bkey_format {
125 	__u8		key_u64s;
126 	__u8		nr_fields;
127 	/* One unused slot for now: */
128 	__u8		bits_per_field[6];
129 	__le64		field_offset[6];
130 };
131 
132 /* Btree keys - all units are in sectors */
133 
134 struct bpos {
135 	/*
136 	 * Word order matches machine byte order - btree code treats a bpos as a
137 	 * single large integer, for search/comparison purposes
138 	 *
139 	 * Note that wherever a bpos is embedded in another on disk data
140 	 * structure, it has to be byte swabbed when reading in metadata that
141 	 * wasn't written in native endian order:
142 	 */
143 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
144 	__u32		snapshot;
145 	__u64		offset;
146 	__u64		inode;
147 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
148 	__u64		inode;
149 	__u64		offset;		/* Points to end of extent - sectors */
150 	__u32		snapshot;
151 #else
152 #error edit for your odd byteorder.
153 #endif
154 } __packed
155 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
156 __aligned(4)
157 #endif
158 ;
159 
160 #define KEY_INODE_MAX			((__u64)~0ULL)
161 #define KEY_OFFSET_MAX			((__u64)~0ULL)
162 #define KEY_SNAPSHOT_MAX		((__u32)~0U)
163 #define KEY_SIZE_MAX			((__u32)~0U)
164 
165 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
166 {
167 	return (struct bpos) {
168 		.inode		= inode,
169 		.offset		= offset,
170 		.snapshot	= snapshot,
171 	};
172 }
173 
174 #define POS_MIN				SPOS(0, 0, 0)
175 #define POS_MAX				SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
176 #define SPOS_MAX			SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
177 #define POS(_inode, _offset)		SPOS(_inode, _offset, 0)
178 
179 /* Empty placeholder struct, for container_of() */
180 struct bch_val {
181 	__u64		__nothing[0];
182 };
183 
184 struct bversion {
185 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
186 	__u64		lo;
187 	__u32		hi;
188 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
189 	__u32		hi;
190 	__u64		lo;
191 #endif
192 } __packed __aligned(4);
193 
194 struct bkey {
195 	/* Size of combined key and value, in u64s */
196 	__u8		u64s;
197 
198 	/* Format of key (0 for format local to btree node) */
199 #if defined(__LITTLE_ENDIAN_BITFIELD)
200 	__u8		format:7,
201 			needs_whiteout:1;
202 #elif defined (__BIG_ENDIAN_BITFIELD)
203 	__u8		needs_whiteout:1,
204 			format:7;
205 #else
206 #error edit for your odd byteorder.
207 #endif
208 
209 	/* Type of the value */
210 	__u8		type;
211 
212 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
213 	__u8		pad[1];
214 
215 	struct bversion	version;
216 	__u32		size;		/* extent size, in sectors */
217 	struct bpos	p;
218 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
219 	struct bpos	p;
220 	__u32		size;		/* extent size, in sectors */
221 	struct bversion	version;
222 
223 	__u8		pad[1];
224 #endif
225 } __packed __aligned(8);
226 
227 struct bkey_packed {
228 	__u64		_data[0];
229 
230 	/* Size of combined key and value, in u64s */
231 	__u8		u64s;
232 
233 	/* Format of key (0 for format local to btree node) */
234 
235 	/*
236 	 * XXX: next incompat on disk format change, switch format and
237 	 * needs_whiteout - bkey_packed() will be cheaper if format is the high
238 	 * bits of the bitfield
239 	 */
240 #if defined(__LITTLE_ENDIAN_BITFIELD)
241 	__u8		format:7,
242 			needs_whiteout:1;
243 #elif defined (__BIG_ENDIAN_BITFIELD)
244 	__u8		needs_whiteout:1,
245 			format:7;
246 #endif
247 
248 	/* Type of the value */
249 	__u8		type;
250 	__u8		key_start[0];
251 
252 	/*
253 	 * We copy bkeys with struct assignment in various places, and while
254 	 * that shouldn't be done with packed bkeys we can't disallow it in C,
255 	 * and it's legal to cast a bkey to a bkey_packed  - so padding it out
256 	 * to the same size as struct bkey should hopefully be safest.
257 	 */
258 	__u8		pad[sizeof(struct bkey) - 3];
259 } __packed __aligned(8);
260 
261 typedef struct {
262 	__le64			lo;
263 	__le64			hi;
264 } bch_le128;
265 
266 #define BKEY_U64s			(sizeof(struct bkey) / sizeof(__u64))
267 #define BKEY_U64s_MAX			U8_MAX
268 #define BKEY_VAL_U64s_MAX		(BKEY_U64s_MAX - BKEY_U64s)
269 
270 #define KEY_PACKED_BITS_START		24
271 
272 #define KEY_FORMAT_LOCAL_BTREE		0
273 #define KEY_FORMAT_CURRENT		1
274 
275 enum bch_bkey_fields {
276 	BKEY_FIELD_INODE,
277 	BKEY_FIELD_OFFSET,
278 	BKEY_FIELD_SNAPSHOT,
279 	BKEY_FIELD_SIZE,
280 	BKEY_FIELD_VERSION_HI,
281 	BKEY_FIELD_VERSION_LO,
282 	BKEY_NR_FIELDS,
283 };
284 
285 #define bkey_format_field(name, field)					\
286 	[BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
287 
288 #define BKEY_FORMAT_CURRENT						\
289 ((struct bkey_format) {							\
290 	.key_u64s	= BKEY_U64s,					\
291 	.nr_fields	= BKEY_NR_FIELDS,				\
292 	.bits_per_field = {						\
293 		bkey_format_field(INODE,	p.inode),		\
294 		bkey_format_field(OFFSET,	p.offset),		\
295 		bkey_format_field(SNAPSHOT,	p.snapshot),		\
296 		bkey_format_field(SIZE,		size),			\
297 		bkey_format_field(VERSION_HI,	version.hi),		\
298 		bkey_format_field(VERSION_LO,	version.lo),		\
299 	},								\
300 })
301 
302 /* bkey with inline value */
303 struct bkey_i {
304 	__u64			_data[0];
305 
306 	struct bkey	k;
307 	struct bch_val	v;
308 };
309 
310 #define POS_KEY(_pos)							\
311 ((struct bkey) {							\
312 	.u64s		= BKEY_U64s,					\
313 	.format		= KEY_FORMAT_CURRENT,				\
314 	.p		= _pos,						\
315 })
316 
317 #define KEY(_inode, _offset, _size)					\
318 ((struct bkey) {							\
319 	.u64s		= BKEY_U64s,					\
320 	.format		= KEY_FORMAT_CURRENT,				\
321 	.p		= POS(_inode, _offset),				\
322 	.size		= _size,					\
323 })
324 
325 static inline void bkey_init(struct bkey *k)
326 {
327 	*k = KEY(0, 0, 0);
328 }
329 
330 #define bkey_bytes(_k)		((_k)->u64s * sizeof(__u64))
331 
332 #define __BKEY_PADDED(key, pad)					\
333 	struct bkey_i key; __u64 key ## _pad[pad]
334 
335 /*
336  * - DELETED keys are used internally to mark keys that should be ignored but
337  *   override keys in composition order.  Their version number is ignored.
338  *
339  * - DISCARDED keys indicate that the data is all 0s because it has been
340  *   discarded. DISCARDs may have a version; if the version is nonzero the key
341  *   will be persistent, otherwise the key will be dropped whenever the btree
342  *   node is rewritten (like DELETED keys).
343  *
344  * - ERROR: any read of the data returns a read error, as the data was lost due
345  *   to a failing device. Like DISCARDED keys, they can be removed (overridden)
346  *   by new writes or cluster-wide GC. Node repair can also overwrite them with
347  *   the same or a more recent version number, but not with an older version
348  *   number.
349  *
350  * - WHITEOUT: for hash table btrees
351  */
352 #define BCH_BKEY_TYPES()				\
353 	x(deleted,		0)			\
354 	x(whiteout,		1)			\
355 	x(error,		2)			\
356 	x(cookie,		3)			\
357 	x(hash_whiteout,	4)			\
358 	x(btree_ptr,		5)			\
359 	x(extent,		6)			\
360 	x(reservation,		7)			\
361 	x(inode,		8)			\
362 	x(inode_generation,	9)			\
363 	x(dirent,		10)			\
364 	x(xattr,		11)			\
365 	x(alloc,		12)			\
366 	x(quota,		13)			\
367 	x(stripe,		14)			\
368 	x(reflink_p,		15)			\
369 	x(reflink_v,		16)			\
370 	x(inline_data,		17)			\
371 	x(btree_ptr_v2,		18)			\
372 	x(indirect_inline_data,	19)			\
373 	x(alloc_v2,		20)			\
374 	x(subvolume,		21)			\
375 	x(snapshot,		22)			\
376 	x(inode_v2,		23)			\
377 	x(alloc_v3,		24)			\
378 	x(set,			25)			\
379 	x(lru,			26)			\
380 	x(alloc_v4,		27)			\
381 	x(backpointer,		28)			\
382 	x(inode_v3,		29)			\
383 	x(bucket_gens,		30)			\
384 	x(snapshot_tree,	31)			\
385 	x(logged_op_truncate,	32)			\
386 	x(logged_op_finsert,	33)
387 
388 enum bch_bkey_type {
389 #define x(name, nr) KEY_TYPE_##name	= nr,
390 	BCH_BKEY_TYPES()
391 #undef x
392 	KEY_TYPE_MAX,
393 };
394 
395 struct bch_deleted {
396 	struct bch_val		v;
397 };
398 
399 struct bch_whiteout {
400 	struct bch_val		v;
401 };
402 
403 struct bch_error {
404 	struct bch_val		v;
405 };
406 
407 struct bch_cookie {
408 	struct bch_val		v;
409 	__le64			cookie;
410 };
411 
412 struct bch_hash_whiteout {
413 	struct bch_val		v;
414 };
415 
416 struct bch_set {
417 	struct bch_val		v;
418 };
419 
420 /* 128 bits, sufficient for cryptographic MACs: */
421 struct bch_csum {
422 	__le64			lo;
423 	__le64			hi;
424 } __packed __aligned(8);
425 
426 struct bch_backpointer {
427 	struct bch_val		v;
428 	__u8			btree_id;
429 	__u8			level;
430 	__u8			data_type;
431 	__u64			bucket_offset:40;
432 	__u32			bucket_len;
433 	struct bpos		pos;
434 } __packed __aligned(8);
435 
436 /* LRU btree: */
437 
438 struct bch_lru {
439 	struct bch_val		v;
440 	__le64			idx;
441 } __packed __aligned(8);
442 
443 #define LRU_ID_STRIPES		(1U << 16)
444 
445 /* Optional/variable size superblock sections: */
446 
447 struct bch_sb_field {
448 	__u64			_data[0];
449 	__le32			u64s;
450 	__le32			type;
451 };
452 
453 #define BCH_SB_FIELDS()				\
454 	x(journal,			0)	\
455 	x(members_v1,			1)	\
456 	x(crypt,			2)	\
457 	x(replicas_v0,			3)	\
458 	x(quota,			4)	\
459 	x(disk_groups,			5)	\
460 	x(clean,			6)	\
461 	x(replicas,			7)	\
462 	x(journal_seq_blacklist,	8)	\
463 	x(journal_v2,			9)	\
464 	x(counters,			10)	\
465 	x(members_v2,			11)	\
466 	x(errors,			12)	\
467 	x(ext,				13)	\
468 	x(downgrade,			14)
469 
470 #include "alloc_background_format.h"
471 #include "extents_format.h"
472 #include "reflink_format.h"
473 #include "ec_format.h"
474 #include "inode_format.h"
475 #include "dirent_format.h"
476 #include "xattr_format.h"
477 #include "quota_format.h"
478 #include "logged_ops_format.h"
479 #include "snapshot_format.h"
480 #include "subvolume_format.h"
481 #include "sb-counters_format.h"
482 
483 enum bch_sb_field_type {
484 #define x(f, nr)	BCH_SB_FIELD_##f = nr,
485 	BCH_SB_FIELDS()
486 #undef x
487 	BCH_SB_FIELD_NR
488 };
489 
490 /*
491  * Most superblock fields are replicated in all device's superblocks - a few are
492  * not:
493  */
494 #define BCH_SINGLE_DEVICE_SB_FIELDS		\
495 	((1U << BCH_SB_FIELD_journal)|		\
496 	 (1U << BCH_SB_FIELD_journal_v2))
497 
498 /* BCH_SB_FIELD_journal: */
499 
500 struct bch_sb_field_journal {
501 	struct bch_sb_field	field;
502 	__le64			buckets[];
503 };
504 
505 struct bch_sb_field_journal_v2 {
506 	struct bch_sb_field	field;
507 
508 	struct bch_sb_field_journal_v2_entry {
509 		__le64		start;
510 		__le64		nr;
511 	}			d[];
512 };
513 
514 /* BCH_SB_FIELD_members_v1: */
515 
516 #define BCH_MIN_NR_NBUCKETS	(1 << 6)
517 
518 #define BCH_IOPS_MEASUREMENTS()			\
519 	x(seqread,	0)			\
520 	x(seqwrite,	1)			\
521 	x(randread,	2)			\
522 	x(randwrite,	3)
523 
524 enum bch_iops_measurement {
525 #define x(t, n) BCH_IOPS_##t = n,
526 	BCH_IOPS_MEASUREMENTS()
527 #undef x
528 	BCH_IOPS_NR
529 };
530 
531 #define BCH_MEMBER_ERROR_TYPES()		\
532 	x(read,		0)			\
533 	x(write,	1)			\
534 	x(checksum,	2)
535 
536 enum bch_member_error_type {
537 #define x(t, n) BCH_MEMBER_ERROR_##t = n,
538 	BCH_MEMBER_ERROR_TYPES()
539 #undef x
540 	BCH_MEMBER_ERROR_NR
541 };
542 
543 struct bch_member {
544 	__uuid_t		uuid;
545 	__le64			nbuckets;	/* device size */
546 	__le16			first_bucket;   /* index of first bucket used */
547 	__le16			bucket_size;	/* sectors */
548 	__le32			pad;
549 	__le64			last_mount;	/* time_t */
550 
551 	__le64			flags;
552 	__le32			iops[4];
553 	__le64			errors[BCH_MEMBER_ERROR_NR];
554 	__le64			errors_at_reset[BCH_MEMBER_ERROR_NR];
555 	__le64			errors_reset_time;
556 	__le64			seq;
557 };
558 
559 #define BCH_MEMBER_V1_BYTES	56
560 
561 LE64_BITMASK(BCH_MEMBER_STATE,		struct bch_member, flags,  0,  4)
562 /* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
563 LE64_BITMASK(BCH_MEMBER_DISCARD,	struct bch_member, flags, 14, 15)
564 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED,	struct bch_member, flags, 15, 20)
565 LE64_BITMASK(BCH_MEMBER_GROUP,		struct bch_member, flags, 20, 28)
566 LE64_BITMASK(BCH_MEMBER_DURABILITY,	struct bch_member, flags, 28, 30)
567 LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
568 					struct bch_member, flags, 30, 31)
569 
570 #if 0
571 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS,	struct bch_member, flags[1], 0,  20);
572 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
573 #endif
574 
575 #define BCH_MEMBER_STATES()			\
576 	x(rw,		0)			\
577 	x(ro,		1)			\
578 	x(failed,	2)			\
579 	x(spare,	3)
580 
581 enum bch_member_state {
582 #define x(t, n) BCH_MEMBER_STATE_##t = n,
583 	BCH_MEMBER_STATES()
584 #undef x
585 	BCH_MEMBER_STATE_NR
586 };
587 
588 struct bch_sb_field_members_v1 {
589 	struct bch_sb_field	field;
590 	struct bch_member	_members[]; //Members are now variable size
591 };
592 
593 struct bch_sb_field_members_v2 {
594 	struct bch_sb_field	field;
595 	__le16			member_bytes; //size of single member entry
596 	u8			pad[6];
597 	struct bch_member	_members[];
598 };
599 
600 /* BCH_SB_FIELD_crypt: */
601 
602 struct nonce {
603 	__le32			d[4];
604 };
605 
606 struct bch_key {
607 	__le64			key[4];
608 };
609 
610 #define BCH_KEY_MAGIC					\
611 	(((__u64) 'b' <<  0)|((__u64) 'c' <<  8)|		\
612 	 ((__u64) 'h' << 16)|((__u64) '*' << 24)|		\
613 	 ((__u64) '*' << 32)|((__u64) 'k' << 40)|		\
614 	 ((__u64) 'e' << 48)|((__u64) 'y' << 56))
615 
616 struct bch_encrypted_key {
617 	__le64			magic;
618 	struct bch_key		key;
619 };
620 
621 /*
622  * If this field is present in the superblock, it stores an encryption key which
623  * is used encrypt all other data/metadata. The key will normally be encrypted
624  * with the key userspace provides, but if encryption has been turned off we'll
625  * just store the master key unencrypted in the superblock so we can access the
626  * previously encrypted data.
627  */
628 struct bch_sb_field_crypt {
629 	struct bch_sb_field	field;
630 
631 	__le64			flags;
632 	__le64			kdf_flags;
633 	struct bch_encrypted_key key;
634 };
635 
636 LE64_BITMASK(BCH_CRYPT_KDF_TYPE,	struct bch_sb_field_crypt, flags, 0, 4);
637 
638 enum bch_kdf_types {
639 	BCH_KDF_SCRYPT		= 0,
640 	BCH_KDF_NR		= 1,
641 };
642 
643 /* stored as base 2 log of scrypt params: */
644 LE64_BITMASK(BCH_KDF_SCRYPT_N,	struct bch_sb_field_crypt, kdf_flags,  0, 16);
645 LE64_BITMASK(BCH_KDF_SCRYPT_R,	struct bch_sb_field_crypt, kdf_flags, 16, 32);
646 LE64_BITMASK(BCH_KDF_SCRYPT_P,	struct bch_sb_field_crypt, kdf_flags, 32, 48);
647 
648 /* BCH_SB_FIELD_replicas: */
649 
650 #define BCH_DATA_TYPES()		\
651 	x(free,		0)		\
652 	x(sb,		1)		\
653 	x(journal,	2)		\
654 	x(btree,	3)		\
655 	x(user,		4)		\
656 	x(cached,	5)		\
657 	x(parity,	6)		\
658 	x(stripe,	7)		\
659 	x(need_gc_gens,	8)		\
660 	x(need_discard,	9)
661 
662 enum bch_data_type {
663 #define x(t, n) BCH_DATA_##t,
664 	BCH_DATA_TYPES()
665 #undef x
666 	BCH_DATA_NR
667 };
668 
669 static inline bool data_type_is_empty(enum bch_data_type type)
670 {
671 	switch (type) {
672 	case BCH_DATA_free:
673 	case BCH_DATA_need_gc_gens:
674 	case BCH_DATA_need_discard:
675 		return true;
676 	default:
677 		return false;
678 	}
679 }
680 
681 static inline bool data_type_is_hidden(enum bch_data_type type)
682 {
683 	switch (type) {
684 	case BCH_DATA_sb:
685 	case BCH_DATA_journal:
686 		return true;
687 	default:
688 		return false;
689 	}
690 }
691 
692 struct bch_replicas_entry_v0 {
693 	__u8			data_type;
694 	__u8			nr_devs;
695 	__u8			devs[];
696 } __packed;
697 
698 struct bch_sb_field_replicas_v0 {
699 	struct bch_sb_field	field;
700 	struct bch_replicas_entry_v0 entries[];
701 } __packed __aligned(8);
702 
703 struct bch_replicas_entry_v1 {
704 	__u8			data_type;
705 	__u8			nr_devs;
706 	__u8			nr_required;
707 	__u8			devs[];
708 } __packed;
709 
710 #define replicas_entry_bytes(_i)					\
711 	(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
712 
713 struct bch_sb_field_replicas {
714 	struct bch_sb_field	field;
715 	struct bch_replicas_entry_v1 entries[];
716 } __packed __aligned(8);
717 
718 /* BCH_SB_FIELD_disk_groups: */
719 
720 #define BCH_SB_LABEL_SIZE		32
721 
722 struct bch_disk_group {
723 	__u8			label[BCH_SB_LABEL_SIZE];
724 	__le64			flags[2];
725 } __packed __aligned(8);
726 
727 LE64_BITMASK(BCH_GROUP_DELETED,		struct bch_disk_group, flags[0], 0,  1)
728 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,	struct bch_disk_group, flags[0], 1,  6)
729 LE64_BITMASK(BCH_GROUP_PARENT,		struct bch_disk_group, flags[0], 6, 24)
730 
731 struct bch_sb_field_disk_groups {
732 	struct bch_sb_field	field;
733 	struct bch_disk_group	entries[];
734 } __packed __aligned(8);
735 
736 /*
737  * On clean shutdown, store btree roots and current journal sequence number in
738  * the superblock:
739  */
740 struct jset_entry {
741 	__le16			u64s;
742 	__u8			btree_id;
743 	__u8			level;
744 	__u8			type; /* designates what this jset holds */
745 	__u8			pad[3];
746 
747 	struct bkey_i		start[0];
748 	__u64			_data[];
749 };
750 
751 struct bch_sb_field_clean {
752 	struct bch_sb_field	field;
753 
754 	__le32			flags;
755 	__le16			_read_clock; /* no longer used */
756 	__le16			_write_clock;
757 	__le64			journal_seq;
758 
759 	struct jset_entry	start[0];
760 	__u64			_data[];
761 };
762 
763 struct journal_seq_blacklist_entry {
764 	__le64			start;
765 	__le64			end;
766 };
767 
768 struct bch_sb_field_journal_seq_blacklist {
769 	struct bch_sb_field	field;
770 	struct journal_seq_blacklist_entry start[];
771 };
772 
773 struct bch_sb_field_errors {
774 	struct bch_sb_field	field;
775 	struct bch_sb_field_error_entry {
776 		__le64		v;
777 		__le64		last_error_time;
778 	}			entries[];
779 };
780 
781 LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID,	struct bch_sb_field_error_entry, v,  0, 16);
782 LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR,	struct bch_sb_field_error_entry, v, 16, 64);
783 
784 struct bch_sb_field_ext {
785 	struct bch_sb_field	field;
786 	__le64			recovery_passes_required[2];
787 	__le64			errors_silent[8];
788 };
789 
790 struct bch_sb_field_downgrade_entry {
791 	__le16			version;
792 	__le64			recovery_passes[2];
793 	__le16			nr_errors;
794 	__le16			errors[] __counted_by(nr_errors);
795 } __packed __aligned(2);
796 
797 struct bch_sb_field_downgrade {
798 	struct bch_sb_field	field;
799 	struct bch_sb_field_downgrade_entry entries[];
800 };
801 
802 /* Superblock: */
803 
804 /*
805  * New versioning scheme:
806  * One common version number for all on disk data structures - superblock, btree
807  * nodes, journal entries
808  */
809 #define BCH_VERSION_MAJOR(_v)		((__u16) ((_v) >> 10))
810 #define BCH_VERSION_MINOR(_v)		((__u16) ((_v) & ~(~0U << 10)))
811 #define BCH_VERSION(_major, _minor)	(((_major) << 10)|(_minor) << 0)
812 
813 /*
814  * field 1:		version name
815  * field 2:		BCH_VERSION(major, minor)
816  * field 3:		recovery passess required on upgrade
817  */
818 #define BCH_METADATA_VERSIONS()						\
819 	x(bkey_renumber,		BCH_VERSION(0, 10))		\
820 	x(inode_btree_change,		BCH_VERSION(0, 11))		\
821 	x(snapshot,			BCH_VERSION(0, 12))		\
822 	x(inode_backpointers,		BCH_VERSION(0, 13))		\
823 	x(btree_ptr_sectors_written,	BCH_VERSION(0, 14))		\
824 	x(snapshot_2,			BCH_VERSION(0, 15))		\
825 	x(reflink_p_fix,		BCH_VERSION(0, 16))		\
826 	x(subvol_dirent,		BCH_VERSION(0, 17))		\
827 	x(inode_v2,			BCH_VERSION(0, 18))		\
828 	x(freespace,			BCH_VERSION(0, 19))		\
829 	x(alloc_v4,			BCH_VERSION(0, 20))		\
830 	x(new_data_types,		BCH_VERSION(0, 21))		\
831 	x(backpointers,			BCH_VERSION(0, 22))		\
832 	x(inode_v3,			BCH_VERSION(0, 23))		\
833 	x(unwritten_extents,		BCH_VERSION(0, 24))		\
834 	x(bucket_gens,			BCH_VERSION(0, 25))		\
835 	x(lru_v2,			BCH_VERSION(0, 26))		\
836 	x(fragmentation_lru,		BCH_VERSION(0, 27))		\
837 	x(no_bps_in_alloc_keys,		BCH_VERSION(0, 28))		\
838 	x(snapshot_trees,		BCH_VERSION(0, 29))		\
839 	x(major_minor,			BCH_VERSION(1,  0))		\
840 	x(snapshot_skiplists,		BCH_VERSION(1,  1))		\
841 	x(deleted_inodes,		BCH_VERSION(1,  2))		\
842 	x(rebalance_work,		BCH_VERSION(1,  3))		\
843 	x(member_seq,			BCH_VERSION(1,  4))
844 
845 enum bcachefs_metadata_version {
846 	bcachefs_metadata_version_min = 9,
847 #define x(t, n)	bcachefs_metadata_version_##t = n,
848 	BCH_METADATA_VERSIONS()
849 #undef x
850 	bcachefs_metadata_version_max
851 };
852 
853 static const __maybe_unused
854 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
855 
856 #define bcachefs_metadata_version_current	(bcachefs_metadata_version_max - 1)
857 
858 #define BCH_SB_SECTOR			8
859 #define BCH_SB_MEMBERS_MAX		64 /* XXX kill */
860 
861 struct bch_sb_layout {
862 	__uuid_t		magic;	/* bcachefs superblock UUID */
863 	__u8			layout_type;
864 	__u8			sb_max_size_bits; /* base 2 of 512 byte sectors */
865 	__u8			nr_superblocks;
866 	__u8			pad[5];
867 	__le64			sb_offset[61];
868 } __packed __aligned(8);
869 
870 #define BCH_SB_LAYOUT_SECTOR	7
871 
872 /*
873  * @offset	- sector where this sb was written
874  * @version	- on disk format version
875  * @version_min	- Oldest metadata version this filesystem contains; so we can
876  *		  safely drop compatibility code and refuse to mount filesystems
877  *		  we'd need it for
878  * @magic	- identifies as a bcachefs superblock (BCHFS_MAGIC)
879  * @seq		- incremented each time superblock is written
880  * @uuid	- used for generating various magic numbers and identifying
881  *                member devices, never changes
882  * @user_uuid	- user visible UUID, may be changed
883  * @label	- filesystem label
884  * @seq		- identifies most recent superblock, incremented each time
885  *		  superblock is written
886  * @features	- enabled incompatible features
887  */
888 struct bch_sb {
889 	struct bch_csum		csum;
890 	__le16			version;
891 	__le16			version_min;
892 	__le16			pad[2];
893 	__uuid_t		magic;
894 	__uuid_t		uuid;
895 	__uuid_t		user_uuid;
896 	__u8			label[BCH_SB_LABEL_SIZE];
897 	__le64			offset;
898 	__le64			seq;
899 
900 	__le16			block_size;
901 	__u8			dev_idx;
902 	__u8			nr_devices;
903 	__le32			u64s;
904 
905 	__le64			time_base_lo;
906 	__le32			time_base_hi;
907 	__le32			time_precision;
908 
909 	__le64			flags[7];
910 	__le64			write_time;
911 	__le64			features[2];
912 	__le64			compat[2];
913 
914 	struct bch_sb_layout	layout;
915 
916 	struct bch_sb_field	start[0];
917 	__le64			_data[];
918 } __packed __aligned(8);
919 
920 /*
921  * Flags:
922  * BCH_SB_INITALIZED	- set on first mount
923  * BCH_SB_CLEAN		- did we shut down cleanly? Just a hint, doesn't affect
924  *			  behaviour of mount/recovery path:
925  * BCH_SB_INODE_32BIT	- limit inode numbers to 32 bits
926  * BCH_SB_128_BIT_MACS	- 128 bit macs instead of 80
927  * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
928  *			   DATA/META_CSUM_TYPE. Also indicates encryption
929  *			   algorithm in use, if/when we get more than one
930  */
931 
932 LE16_BITMASK(BCH_SB_BLOCK_SIZE,		struct bch_sb, block_size, 0, 16);
933 
934 LE64_BITMASK(BCH_SB_INITIALIZED,	struct bch_sb, flags[0],  0,  1);
935 LE64_BITMASK(BCH_SB_CLEAN,		struct bch_sb, flags[0],  1,  2);
936 LE64_BITMASK(BCH_SB_CSUM_TYPE,		struct bch_sb, flags[0],  2,  8);
937 LE64_BITMASK(BCH_SB_ERROR_ACTION,	struct bch_sb, flags[0],  8, 12);
938 
939 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE,	struct bch_sb, flags[0], 12, 28);
940 
941 LE64_BITMASK(BCH_SB_GC_RESERVE,		struct bch_sb, flags[0], 28, 33);
942 LE64_BITMASK(BCH_SB_ROOT_RESERVE,	struct bch_sb, flags[0], 33, 40);
943 
944 LE64_BITMASK(BCH_SB_META_CSUM_TYPE,	struct bch_sb, flags[0], 40, 44);
945 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE,	struct bch_sb, flags[0], 44, 48);
946 
947 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT,	struct bch_sb, flags[0], 48, 52);
948 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT,	struct bch_sb, flags[0], 52, 56);
949 
950 LE64_BITMASK(BCH_SB_POSIX_ACL,		struct bch_sb, flags[0], 56, 57);
951 LE64_BITMASK(BCH_SB_USRQUOTA,		struct bch_sb, flags[0], 57, 58);
952 LE64_BITMASK(BCH_SB_GRPQUOTA,		struct bch_sb, flags[0], 58, 59);
953 LE64_BITMASK(BCH_SB_PRJQUOTA,		struct bch_sb, flags[0], 59, 60);
954 
955 LE64_BITMASK(BCH_SB_HAS_ERRORS,		struct bch_sb, flags[0], 60, 61);
956 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
957 
958 LE64_BITMASK(BCH_SB_BIG_ENDIAN,		struct bch_sb, flags[0], 62, 63);
959 
960 LE64_BITMASK(BCH_SB_STR_HASH_TYPE,	struct bch_sb, flags[1],  0,  4);
961 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1],  4,  8);
962 LE64_BITMASK(BCH_SB_INODE_32BIT,	struct bch_sb, flags[1],  8,  9);
963 
964 LE64_BITMASK(BCH_SB_128_BIT_MACS,	struct bch_sb, flags[1],  9, 10);
965 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE,	struct bch_sb, flags[1], 10, 14);
966 
967 /*
968  * Max size of an extent that may require bouncing to read or write
969  * (checksummed, compressed): 64k
970  */
971 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
972 					struct bch_sb, flags[1], 14, 20);
973 
974 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ,	struct bch_sb, flags[1], 20, 24);
975 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ,	struct bch_sb, flags[1], 24, 28);
976 
977 LE64_BITMASK(BCH_SB_PROMOTE_TARGET,	struct bch_sb, flags[1], 28, 40);
978 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET,	struct bch_sb, flags[1], 40, 52);
979 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET,	struct bch_sb, flags[1], 52, 64);
980 
981 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
982 					struct bch_sb, flags[2],  0,  4);
983 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,	struct bch_sb, flags[2],  4, 64);
984 
985 LE64_BITMASK(BCH_SB_ERASURE_CODE,	struct bch_sb, flags[3],  0, 16);
986 LE64_BITMASK(BCH_SB_METADATA_TARGET,	struct bch_sb, flags[3], 16, 28);
987 LE64_BITMASK(BCH_SB_SHARD_INUMS,	struct bch_sb, flags[3], 28, 29);
988 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
989 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
990 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
991 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
992 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
993 LE64_BITMASK(BCH_SB_NOCOW,		struct bch_sb, flags[4], 33, 34);
994 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE,	struct bch_sb, flags[4], 34, 54);
995 LE64_BITMASK(BCH_SB_VERSION_UPGRADE,	struct bch_sb, flags[4], 54, 56);
996 
997 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
998 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
999 					struct bch_sb, flags[4], 60, 64);
1000 
1001 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
1002 					struct bch_sb, flags[5],  0, 16);
1003 
1004 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
1005 {
1006 	return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
1007 }
1008 
1009 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1010 {
1011 	SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
1012 	SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
1013 }
1014 
1015 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
1016 {
1017 	return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
1018 		(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
1019 }
1020 
1021 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
1022 {
1023 	SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
1024 	SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
1025 }
1026 
1027 /*
1028  * Features:
1029  *
1030  * journal_seq_blacklist_v3:	gates BCH_SB_FIELD_journal_seq_blacklist
1031  * reflink:			gates KEY_TYPE_reflink
1032  * inline_data:			gates KEY_TYPE_inline_data
1033  * new_siphash:			gates BCH_STR_HASH_siphash
1034  * new_extent_overwrite:	gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1035  */
1036 #define BCH_SB_FEATURES()			\
1037 	x(lz4,				0)	\
1038 	x(gzip,				1)	\
1039 	x(zstd,				2)	\
1040 	x(atomic_nlink,			3)	\
1041 	x(ec,				4)	\
1042 	x(journal_seq_blacklist_v3,	5)	\
1043 	x(reflink,			6)	\
1044 	x(new_siphash,			7)	\
1045 	x(inline_data,			8)	\
1046 	x(new_extent_overwrite,		9)	\
1047 	x(incompressible,		10)	\
1048 	x(btree_ptr_v2,			11)	\
1049 	x(extents_above_btree_updates,	12)	\
1050 	x(btree_updates_journalled,	13)	\
1051 	x(reflink_inline_data,		14)	\
1052 	x(new_varint,			15)	\
1053 	x(journal_no_flush,		16)	\
1054 	x(alloc_v2,			17)	\
1055 	x(extents_across_btree_nodes,	18)
1056 
1057 #define BCH_SB_FEATURES_ALWAYS				\
1058 	((1ULL << BCH_FEATURE_new_extent_overwrite)|	\
1059 	 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1060 	 (1ULL << BCH_FEATURE_btree_updates_journalled)|\
1061 	 (1ULL << BCH_FEATURE_alloc_v2)|\
1062 	 (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1063 
1064 #define BCH_SB_FEATURES_ALL				\
1065 	(BCH_SB_FEATURES_ALWAYS|			\
1066 	 (1ULL << BCH_FEATURE_new_siphash)|		\
1067 	 (1ULL << BCH_FEATURE_btree_ptr_v2)|		\
1068 	 (1ULL << BCH_FEATURE_new_varint)|		\
1069 	 (1ULL << BCH_FEATURE_journal_no_flush))
1070 
1071 enum bch_sb_feature {
1072 #define x(f, n) BCH_FEATURE_##f,
1073 	BCH_SB_FEATURES()
1074 #undef x
1075 	BCH_FEATURE_NR,
1076 };
1077 
1078 #define BCH_SB_COMPAT()					\
1079 	x(alloc_info,				0)	\
1080 	x(alloc_metadata,			1)	\
1081 	x(extents_above_btree_updates_done,	2)	\
1082 	x(bformat_overflow_done,		3)
1083 
1084 enum bch_sb_compat {
1085 #define x(f, n) BCH_COMPAT_##f,
1086 	BCH_SB_COMPAT()
1087 #undef x
1088 	BCH_COMPAT_NR,
1089 };
1090 
1091 /* options: */
1092 
1093 #define BCH_VERSION_UPGRADE_OPTS()	\
1094 	x(compatible,		0)	\
1095 	x(incompatible,		1)	\
1096 	x(none,			2)
1097 
1098 enum bch_version_upgrade_opts {
1099 #define x(t, n) BCH_VERSION_UPGRADE_##t = n,
1100 	BCH_VERSION_UPGRADE_OPTS()
1101 #undef x
1102 };
1103 
1104 #define BCH_REPLICAS_MAX		4U
1105 
1106 #define BCH_BKEY_PTRS_MAX		16U
1107 
1108 #define BCH_ERROR_ACTIONS()		\
1109 	x(continue,		0)	\
1110 	x(ro,			1)	\
1111 	x(panic,		2)
1112 
1113 enum bch_error_actions {
1114 #define x(t, n) BCH_ON_ERROR_##t = n,
1115 	BCH_ERROR_ACTIONS()
1116 #undef x
1117 	BCH_ON_ERROR_NR
1118 };
1119 
1120 #define BCH_STR_HASH_TYPES()		\
1121 	x(crc32c,		0)	\
1122 	x(crc64,		1)	\
1123 	x(siphash_old,		2)	\
1124 	x(siphash,		3)
1125 
1126 enum bch_str_hash_type {
1127 #define x(t, n) BCH_STR_HASH_##t = n,
1128 	BCH_STR_HASH_TYPES()
1129 #undef x
1130 	BCH_STR_HASH_NR
1131 };
1132 
1133 #define BCH_STR_HASH_OPTS()		\
1134 	x(crc32c,		0)	\
1135 	x(crc64,		1)	\
1136 	x(siphash,		2)
1137 
1138 enum bch_str_hash_opts {
1139 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
1140 	BCH_STR_HASH_OPTS()
1141 #undef x
1142 	BCH_STR_HASH_OPT_NR
1143 };
1144 
1145 #define BCH_CSUM_TYPES()			\
1146 	x(none,				0)	\
1147 	x(crc32c_nonzero,		1)	\
1148 	x(crc64_nonzero,		2)	\
1149 	x(chacha20_poly1305_80,		3)	\
1150 	x(chacha20_poly1305_128,	4)	\
1151 	x(crc32c,			5)	\
1152 	x(crc64,			6)	\
1153 	x(xxhash,			7)
1154 
1155 enum bch_csum_type {
1156 #define x(t, n) BCH_CSUM_##t = n,
1157 	BCH_CSUM_TYPES()
1158 #undef x
1159 	BCH_CSUM_NR
1160 };
1161 
1162 static const __maybe_unused unsigned bch_crc_bytes[] = {
1163 	[BCH_CSUM_none]				= 0,
1164 	[BCH_CSUM_crc32c_nonzero]		= 4,
1165 	[BCH_CSUM_crc32c]			= 4,
1166 	[BCH_CSUM_crc64_nonzero]		= 8,
1167 	[BCH_CSUM_crc64]			= 8,
1168 	[BCH_CSUM_xxhash]			= 8,
1169 	[BCH_CSUM_chacha20_poly1305_80]		= 10,
1170 	[BCH_CSUM_chacha20_poly1305_128]	= 16,
1171 };
1172 
1173 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1174 {
1175 	switch (type) {
1176 	case BCH_CSUM_chacha20_poly1305_80:
1177 	case BCH_CSUM_chacha20_poly1305_128:
1178 		return true;
1179 	default:
1180 		return false;
1181 	}
1182 }
1183 
1184 #define BCH_CSUM_OPTS()			\
1185 	x(none,			0)	\
1186 	x(crc32c,		1)	\
1187 	x(crc64,		2)	\
1188 	x(xxhash,		3)
1189 
1190 enum bch_csum_opts {
1191 #define x(t, n) BCH_CSUM_OPT_##t = n,
1192 	BCH_CSUM_OPTS()
1193 #undef x
1194 	BCH_CSUM_OPT_NR
1195 };
1196 
1197 #define BCH_COMPRESSION_TYPES()		\
1198 	x(none,			0)	\
1199 	x(lz4_old,		1)	\
1200 	x(gzip,			2)	\
1201 	x(lz4,			3)	\
1202 	x(zstd,			4)	\
1203 	x(incompressible,	5)
1204 
1205 enum bch_compression_type {
1206 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1207 	BCH_COMPRESSION_TYPES()
1208 #undef x
1209 	BCH_COMPRESSION_TYPE_NR
1210 };
1211 
1212 #define BCH_COMPRESSION_OPTS()		\
1213 	x(none,		0)		\
1214 	x(lz4,		1)		\
1215 	x(gzip,		2)		\
1216 	x(zstd,		3)
1217 
1218 enum bch_compression_opts {
1219 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1220 	BCH_COMPRESSION_OPTS()
1221 #undef x
1222 	BCH_COMPRESSION_OPT_NR
1223 };
1224 
1225 /*
1226  * Magic numbers
1227  *
1228  * The various other data structures have their own magic numbers, which are
1229  * xored with the first part of the cache set's UUID
1230  */
1231 
1232 #define BCACHE_MAGIC							\
1233 	UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca,				\
1234 		  0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1235 #define BCHFS_MAGIC							\
1236 	UUID_INIT(0xc68573f6, 0x66ce, 0x90a9,				\
1237 		  0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
1238 
1239 #define BCACHEFS_STATFS_MAGIC		0xca451a4e
1240 
1241 #define JSET_MAGIC		__cpu_to_le64(0x245235c1a3625032ULL)
1242 #define BSET_MAGIC		__cpu_to_le64(0x90135c78b99e07f5ULL)
1243 
1244 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1245 {
1246 	__le64 ret;
1247 
1248 	memcpy(&ret, &sb->uuid, sizeof(ret));
1249 	return ret;
1250 }
1251 
1252 static inline __u64 __jset_magic(struct bch_sb *sb)
1253 {
1254 	return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1255 }
1256 
1257 static inline __u64 __bset_magic(struct bch_sb *sb)
1258 {
1259 	return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1260 }
1261 
1262 /* Journal */
1263 
1264 #define JSET_KEYS_U64s	(sizeof(struct jset_entry) / sizeof(__u64))
1265 
1266 #define BCH_JSET_ENTRY_TYPES()			\
1267 	x(btree_keys,		0)		\
1268 	x(btree_root,		1)		\
1269 	x(prio_ptrs,		2)		\
1270 	x(blacklist,		3)		\
1271 	x(blacklist_v2,		4)		\
1272 	x(usage,		5)		\
1273 	x(data_usage,		6)		\
1274 	x(clock,		7)		\
1275 	x(dev_usage,		8)		\
1276 	x(log,			9)		\
1277 	x(overwrite,		10)		\
1278 	x(write_buffer_keys,	11)
1279 
1280 enum {
1281 #define x(f, nr)	BCH_JSET_ENTRY_##f	= nr,
1282 	BCH_JSET_ENTRY_TYPES()
1283 #undef x
1284 	BCH_JSET_ENTRY_NR
1285 };
1286 
1287 static inline bool jset_entry_is_key(struct jset_entry *e)
1288 {
1289 	switch (e->type) {
1290 	case BCH_JSET_ENTRY_btree_keys:
1291 	case BCH_JSET_ENTRY_btree_root:
1292 	case BCH_JSET_ENTRY_overwrite:
1293 	case BCH_JSET_ENTRY_write_buffer_keys:
1294 		return true;
1295 	}
1296 
1297 	return false;
1298 }
1299 
1300 /*
1301  * Journal sequence numbers can be blacklisted: bsets record the max sequence
1302  * number of all the journal entries they contain updates for, so that on
1303  * recovery we can ignore those bsets that contain index updates newer that what
1304  * made it into the journal.
1305  *
1306  * This means that we can't reuse that journal_seq - we have to skip it, and
1307  * then record that we skipped it so that the next time we crash and recover we
1308  * don't think there was a missing journal entry.
1309  */
1310 struct jset_entry_blacklist {
1311 	struct jset_entry	entry;
1312 	__le64			seq;
1313 };
1314 
1315 struct jset_entry_blacklist_v2 {
1316 	struct jset_entry	entry;
1317 	__le64			start;
1318 	__le64			end;
1319 };
1320 
1321 #define BCH_FS_USAGE_TYPES()			\
1322 	x(reserved,		0)		\
1323 	x(inodes,		1)		\
1324 	x(key_version,		2)
1325 
1326 enum {
1327 #define x(f, nr)	BCH_FS_USAGE_##f	= nr,
1328 	BCH_FS_USAGE_TYPES()
1329 #undef x
1330 	BCH_FS_USAGE_NR
1331 };
1332 
1333 struct jset_entry_usage {
1334 	struct jset_entry	entry;
1335 	__le64			v;
1336 } __packed;
1337 
1338 struct jset_entry_data_usage {
1339 	struct jset_entry	entry;
1340 	__le64			v;
1341 	struct bch_replicas_entry_v1 r;
1342 } __packed;
1343 
1344 struct jset_entry_clock {
1345 	struct jset_entry	entry;
1346 	__u8			rw;
1347 	__u8			pad[7];
1348 	__le64			time;
1349 } __packed;
1350 
1351 struct jset_entry_dev_usage_type {
1352 	__le64			buckets;
1353 	__le64			sectors;
1354 	__le64			fragmented;
1355 } __packed;
1356 
1357 struct jset_entry_dev_usage {
1358 	struct jset_entry	entry;
1359 	__le32			dev;
1360 	__u32			pad;
1361 
1362 	__le64			_buckets_ec;		/* No longer used */
1363 	__le64			_buckets_unavailable;	/* No longer used */
1364 
1365 	struct jset_entry_dev_usage_type d[];
1366 };
1367 
1368 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
1369 {
1370 	return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
1371 		sizeof(struct jset_entry_dev_usage_type);
1372 }
1373 
1374 struct jset_entry_log {
1375 	struct jset_entry	entry;
1376 	u8			d[];
1377 } __packed __aligned(8);
1378 
1379 /*
1380  * On disk format for a journal entry:
1381  * seq is monotonically increasing; every journal entry has its own unique
1382  * sequence number.
1383  *
1384  * last_seq is the oldest journal entry that still has keys the btree hasn't
1385  * flushed to disk yet.
1386  *
1387  * version is for on disk format changes.
1388  */
1389 struct jset {
1390 	struct bch_csum		csum;
1391 
1392 	__le64			magic;
1393 	__le64			seq;
1394 	__le32			version;
1395 	__le32			flags;
1396 
1397 	__le32			u64s; /* size of d[] in u64s */
1398 
1399 	__u8			encrypted_start[0];
1400 
1401 	__le16			_read_clock; /* no longer used */
1402 	__le16			_write_clock;
1403 
1404 	/* Sequence number of oldest dirty journal entry */
1405 	__le64			last_seq;
1406 
1407 
1408 	struct jset_entry	start[0];
1409 	__u64			_data[];
1410 } __packed __aligned(8);
1411 
1412 LE32_BITMASK(JSET_CSUM_TYPE,	struct jset, flags, 0, 4);
1413 LE32_BITMASK(JSET_BIG_ENDIAN,	struct jset, flags, 4, 5);
1414 LE32_BITMASK(JSET_NO_FLUSH,	struct jset, flags, 5, 6);
1415 
1416 #define BCH_JOURNAL_BUCKETS_MIN		8
1417 
1418 /* Btree: */
1419 
1420 enum btree_id_flags {
1421 	BTREE_ID_EXTENTS	= BIT(0),
1422 	BTREE_ID_SNAPSHOTS	= BIT(1),
1423 	BTREE_ID_SNAPSHOT_FIELD	= BIT(2),
1424 	BTREE_ID_DATA		= BIT(3),
1425 };
1426 
1427 #define BCH_BTREE_IDS()								\
1428 	x(extents,		0,	BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
1429 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1430 	  BIT_ULL(KEY_TYPE_error)|						\
1431 	  BIT_ULL(KEY_TYPE_cookie)|						\
1432 	  BIT_ULL(KEY_TYPE_extent)|						\
1433 	  BIT_ULL(KEY_TYPE_reservation)|					\
1434 	  BIT_ULL(KEY_TYPE_reflink_p)|						\
1435 	  BIT_ULL(KEY_TYPE_inline_data))					\
1436 	x(inodes,		1,	BTREE_ID_SNAPSHOTS,			\
1437 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1438 	  BIT_ULL(KEY_TYPE_inode)|						\
1439 	  BIT_ULL(KEY_TYPE_inode_v2)|						\
1440 	  BIT_ULL(KEY_TYPE_inode_v3)|						\
1441 	  BIT_ULL(KEY_TYPE_inode_generation))					\
1442 	x(dirents,		2,	BTREE_ID_SNAPSHOTS,			\
1443 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1444 	  BIT_ULL(KEY_TYPE_hash_whiteout)|					\
1445 	  BIT_ULL(KEY_TYPE_dirent))						\
1446 	x(xattrs,		3,	BTREE_ID_SNAPSHOTS,			\
1447 	  BIT_ULL(KEY_TYPE_whiteout)|						\
1448 	  BIT_ULL(KEY_TYPE_cookie)|						\
1449 	  BIT_ULL(KEY_TYPE_hash_whiteout)|					\
1450 	  BIT_ULL(KEY_TYPE_xattr))						\
1451 	x(alloc,		4,	0,					\
1452 	  BIT_ULL(KEY_TYPE_alloc)|						\
1453 	  BIT_ULL(KEY_TYPE_alloc_v2)|						\
1454 	  BIT_ULL(KEY_TYPE_alloc_v3)|						\
1455 	  BIT_ULL(KEY_TYPE_alloc_v4))						\
1456 	x(quotas,		5,	0,					\
1457 	  BIT_ULL(KEY_TYPE_quota))						\
1458 	x(stripes,		6,	0,					\
1459 	  BIT_ULL(KEY_TYPE_stripe))						\
1460 	x(reflink,		7,	BTREE_ID_EXTENTS|BTREE_ID_DATA,		\
1461 	  BIT_ULL(KEY_TYPE_reflink_v)|						\
1462 	  BIT_ULL(KEY_TYPE_indirect_inline_data))				\
1463 	x(subvolumes,		8,	0,					\
1464 	  BIT_ULL(KEY_TYPE_subvolume))						\
1465 	x(snapshots,		9,	0,					\
1466 	  BIT_ULL(KEY_TYPE_snapshot))						\
1467 	x(lru,			10,	0,					\
1468 	  BIT_ULL(KEY_TYPE_set))						\
1469 	x(freespace,		11,	BTREE_ID_EXTENTS,			\
1470 	  BIT_ULL(KEY_TYPE_set))						\
1471 	x(need_discard,		12,	0,					\
1472 	  BIT_ULL(KEY_TYPE_set))						\
1473 	x(backpointers,		13,	0,					\
1474 	  BIT_ULL(KEY_TYPE_backpointer))					\
1475 	x(bucket_gens,		14,	0,					\
1476 	  BIT_ULL(KEY_TYPE_bucket_gens))					\
1477 	x(snapshot_trees,	15,	0,					\
1478 	  BIT_ULL(KEY_TYPE_snapshot_tree))					\
1479 	x(deleted_inodes,	16,	BTREE_ID_SNAPSHOT_FIELD,		\
1480 	  BIT_ULL(KEY_TYPE_set))						\
1481 	x(logged_ops,		17,	0,					\
1482 	  BIT_ULL(KEY_TYPE_logged_op_truncate)|					\
1483 	  BIT_ULL(KEY_TYPE_logged_op_finsert))					\
1484 	x(rebalance_work,	18,	BTREE_ID_SNAPSHOT_FIELD,		\
1485 	  BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))
1486 
1487 enum btree_id {
1488 #define x(name, nr, ...) BTREE_ID_##name = nr,
1489 	BCH_BTREE_IDS()
1490 #undef x
1491 	BTREE_ID_NR
1492 };
1493 
1494 #define BTREE_MAX_DEPTH		4U
1495 
1496 /* Btree nodes */
1497 
1498 /*
1499  * Btree nodes
1500  *
1501  * On disk a btree node is a list/log of these; within each set the keys are
1502  * sorted
1503  */
1504 struct bset {
1505 	__le64			seq;
1506 
1507 	/*
1508 	 * Highest journal entry this bset contains keys for.
1509 	 * If on recovery we don't see that journal entry, this bset is ignored:
1510 	 * this allows us to preserve the order of all index updates after a
1511 	 * crash, since the journal records a total order of all index updates
1512 	 * and anything that didn't make it to the journal doesn't get used.
1513 	 */
1514 	__le64			journal_seq;
1515 
1516 	__le32			flags;
1517 	__le16			version;
1518 	__le16			u64s; /* count of d[] in u64s */
1519 
1520 	struct bkey_packed	start[0];
1521 	__u64			_data[];
1522 } __packed __aligned(8);
1523 
1524 LE32_BITMASK(BSET_CSUM_TYPE,	struct bset, flags, 0, 4);
1525 
1526 LE32_BITMASK(BSET_BIG_ENDIAN,	struct bset, flags, 4, 5);
1527 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1528 				struct bset, flags, 5, 6);
1529 
1530 /* Sector offset within the btree node: */
1531 LE32_BITMASK(BSET_OFFSET,	struct bset, flags, 16, 32);
1532 
1533 struct btree_node {
1534 	struct bch_csum		csum;
1535 	__le64			magic;
1536 
1537 	/* this flags field is encrypted, unlike bset->flags: */
1538 	__le64			flags;
1539 
1540 	/* Closed interval: */
1541 	struct bpos		min_key;
1542 	struct bpos		max_key;
1543 	struct bch_extent_ptr	_ptr; /* not used anymore */
1544 	struct bkey_format	format;
1545 
1546 	union {
1547 	struct bset		keys;
1548 	struct {
1549 		__u8		pad[22];
1550 		__le16		u64s;
1551 		__u64		_data[0];
1552 
1553 	};
1554 	};
1555 } __packed __aligned(8);
1556 
1557 LE64_BITMASK(BTREE_NODE_ID_LO,	struct btree_node, flags,  0,  4);
1558 LE64_BITMASK(BTREE_NODE_LEVEL,	struct btree_node, flags,  4,  8);
1559 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1560 				struct btree_node, flags,  8,  9);
1561 LE64_BITMASK(BTREE_NODE_ID_HI,	struct btree_node, flags,  9, 25);
1562 /* 25-32 unused */
1563 LE64_BITMASK(BTREE_NODE_SEQ,	struct btree_node, flags, 32, 64);
1564 
1565 static inline __u64 BTREE_NODE_ID(struct btree_node *n)
1566 {
1567 	return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
1568 }
1569 
1570 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
1571 {
1572 	SET_BTREE_NODE_ID_LO(n, v);
1573 	SET_BTREE_NODE_ID_HI(n, v >> 4);
1574 }
1575 
1576 struct btree_node_entry {
1577 	struct bch_csum		csum;
1578 
1579 	union {
1580 	struct bset		keys;
1581 	struct {
1582 		__u8		pad[22];
1583 		__le16		u64s;
1584 		__u64		_data[0];
1585 	};
1586 	};
1587 } __packed __aligned(8);
1588 
1589 #endif /* _BCACHEFS_FORMAT_H */
1590