xref: /linux/fs/bcachefs/checksum.h (revision a4a755c422242c27cb0f7900ac00cf33ac17b1ce)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_CHECKSUM_H
3 #define _BCACHEFS_CHECKSUM_H
4 
5 #include "bcachefs.h"
6 #include "extents_types.h"
7 #include "super-io.h"
8 
9 #include <linux/crc64.h>
10 #include <crypto/chacha.h>
11 
12 static inline bool bch2_checksum_mergeable(unsigned type)
13 {
14 
15 	switch (type) {
16 	case BCH_CSUM_none:
17 	case BCH_CSUM_crc32c:
18 	case BCH_CSUM_crc64:
19 		return true;
20 	default:
21 		return false;
22 	}
23 }
24 
25 struct bch_csum bch2_checksum_merge(unsigned, struct bch_csum,
26 				    struct bch_csum, size_t);
27 
28 #define BCH_NONCE_EXTENT	cpu_to_le32(1 << 28)
29 #define BCH_NONCE_BTREE		cpu_to_le32(2 << 28)
30 #define BCH_NONCE_JOURNAL	cpu_to_le32(3 << 28)
31 #define BCH_NONCE_PRIO		cpu_to_le32(4 << 28)
32 #define BCH_NONCE_POLY		cpu_to_le32(1 << 31)
33 
34 struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce,
35 			     const void *, size_t);
36 
37 /*
38  * This is used for various on disk data structures - bch_sb, prio_set, bset,
39  * jset: The checksum is _always_ the first field of these structs
40  */
41 #define csum_vstruct(_c, _type, _nonce, _i)				\
42 ({									\
43 	const void *_start = ((const void *) (_i)) + sizeof((_i)->csum);\
44 									\
45 	bch2_checksum(_c, _type, _nonce, _start, vstruct_end(_i) - _start);\
46 })
47 
48 static inline void bch2_csum_to_text(struct printbuf *out,
49 				     enum bch_csum_type type,
50 				     struct bch_csum csum)
51 {
52 	const u8 *p = (u8 *) &csum;
53 	unsigned bytes = type < BCH_CSUM_NR ? bch_crc_bytes[type] : 16;
54 
55 	for (unsigned i = 0; i < bytes; i++)
56 		prt_hex_byte(out, p[i]);
57 }
58 
59 static inline void bch2_csum_err_msg(struct printbuf *out,
60 				     enum bch_csum_type type,
61 				     struct bch_csum expected,
62 				     struct bch_csum got)
63 {
64 	prt_str(out, "checksum error, type ");
65 	bch2_prt_csum_type(out, type);
66 	prt_str(out, ": got ");
67 	bch2_csum_to_text(out, type, got);
68 	prt_str(out, " should be ");
69 	bch2_csum_to_text(out, type, expected);
70 }
71 
72 int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
73 int bch2_request_key(struct bch_sb *, struct bch_key *);
74 #ifndef __KERNEL__
75 int bch2_revoke_key(struct bch_sb *);
76 #endif
77 
78 int bch2_encrypt(struct bch_fs *, unsigned, struct nonce,
79 		 void *data, size_t);
80 
81 struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned,
82 				  struct nonce, struct bio *);
83 
84 int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion,
85 			struct bch_extent_crc_unpacked,
86 			struct bch_extent_crc_unpacked *,
87 			struct bch_extent_crc_unpacked *,
88 			unsigned, unsigned, unsigned);
89 
90 int __bch2_encrypt_bio(struct bch_fs *, unsigned,
91 		       struct nonce, struct bio *);
92 
93 static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
94 				   struct nonce nonce, struct bio *bio)
95 {
96 	return bch2_csum_type_is_encryption(type)
97 		? __bch2_encrypt_bio(c, type, nonce, bio)
98 		: 0;
99 }
100 
101 extern const struct bch_sb_field_ops bch_sb_field_ops_crypt;
102 
103 int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
104 			struct bch_key *);
105 
106 int bch2_disable_encryption(struct bch_fs *);
107 int bch2_enable_encryption(struct bch_fs *, bool);
108 
109 void bch2_fs_encryption_exit(struct bch_fs *);
110 int bch2_fs_encryption_init(struct bch_fs *);
111 
112 static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
113 						       bool data)
114 {
115 	switch (type) {
116 	case BCH_CSUM_OPT_none:
117 		return BCH_CSUM_none;
118 	case BCH_CSUM_OPT_crc32c:
119 		return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
120 	case BCH_CSUM_OPT_crc64:
121 		return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
122 	case BCH_CSUM_OPT_xxhash:
123 		return BCH_CSUM_xxhash;
124 	default:
125 		BUG();
126 	}
127 }
128 
129 static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
130 							 struct bch_io_opts opts)
131 {
132 	if (opts.nocow)
133 		return 0;
134 
135 	if (c->sb.encryption_type)
136 		return c->opts.wide_macs
137 			? BCH_CSUM_chacha20_poly1305_128
138 			: BCH_CSUM_chacha20_poly1305_80;
139 
140 	return bch2_csum_opt_to_type(opts.data_checksum, true);
141 }
142 
143 static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
144 {
145 	if (c->sb.encryption_type)
146 		return BCH_CSUM_chacha20_poly1305_128;
147 
148 	return bch2_csum_opt_to_type(c->opts.metadata_checksum, false);
149 }
150 
151 static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
152 					   unsigned type)
153 {
154 	if (type >= BCH_CSUM_NR)
155 		return false;
156 
157 	if (bch2_csum_type_is_encryption(type) && !c->chacha20)
158 		return false;
159 
160 	return true;
161 }
162 
163 /* returns true if not equal */
164 static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
165 {
166 	/*
167 	 * XXX: need some way of preventing the compiler from optimizing this
168 	 * into a form that isn't constant time..
169 	 */
170 	return ((l.lo ^ r.lo) | (l.hi ^ r.hi)) != 0;
171 }
172 
173 /* for skipping ahead and encrypting/decrypting at an offset: */
174 static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
175 {
176 	EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1));
177 
178 	le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE);
179 	return nonce;
180 }
181 
182 static inline struct nonce null_nonce(void)
183 {
184 	struct nonce ret;
185 
186 	memset(&ret, 0, sizeof(ret));
187 	return ret;
188 }
189 
190 static inline struct nonce extent_nonce(struct bversion version,
191 					struct bch_extent_crc_unpacked crc)
192 {
193 	unsigned compression_type = crc_is_compressed(crc)
194 		? crc.compression_type
195 		: 0;
196 	unsigned size = compression_type ? crc.uncompressed_size : 0;
197 	struct nonce nonce = (struct nonce) {{
198 		[0] = cpu_to_le32(size << 22),
199 		[1] = cpu_to_le32(version.lo),
200 		[2] = cpu_to_le32(version.lo >> 32),
201 		[3] = cpu_to_le32(version.hi|
202 				  (compression_type << 24))^BCH_NONCE_EXTENT,
203 	}};
204 
205 	return nonce_add(nonce, crc.nonce << 9);
206 }
207 
208 static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key)
209 {
210 	return le64_to_cpu(key->magic) != BCH_KEY_MAGIC;
211 }
212 
213 static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb)
214 {
215 	__le64 magic = __bch2_sb_magic(sb);
216 
217 	return (struct nonce) {{
218 		[0] = 0,
219 		[1] = 0,
220 		[2] = ((__le32 *) &magic)[0],
221 		[3] = ((__le32 *) &magic)[1],
222 	}};
223 }
224 
225 static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c)
226 {
227 	__le64 magic = bch2_sb_magic(c);
228 
229 	return (struct nonce) {{
230 		[0] = 0,
231 		[1] = 0,
232 		[2] = ((__le32 *) &magic)[0],
233 		[3] = ((__le32 *) &magic)[1],
234 	}};
235 }
236 
237 #endif /* _BCACHEFS_CHECKSUM_H */
238