xref: /linux/fs/bcachefs/checksum.c (revision a4ff64edf9edc8f05e2183610dc8306d3279c6ac)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "errcode.h"
5 #include "super.h"
6 #include "super-io.h"
7 
8 #include <linux/crc32c.h>
9 #include <linux/crypto.h>
10 #include <linux/xxhash.h>
11 #include <linux/key.h>
12 #include <linux/random.h>
13 #include <linux/scatterlist.h>
14 #include <crypto/algapi.h>
15 #include <crypto/chacha.h>
16 #include <crypto/hash.h>
17 #include <crypto/poly1305.h>
18 #include <crypto/skcipher.h>
19 #include <keys/user-type.h>
20 
21 /*
22  * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
23  * it features page merging without having the checksum algorithm lose its state.
24  * for native checksum aglorithms (like crc), a default seed value will do.
25  * for hash-like algorithms, a state needs to be stored
26  */
27 
28 struct bch2_checksum_state {
29 	union {
30 		u64 seed;
31 		struct xxh64_state h64state;
32 	};
33 	unsigned int type;
34 };
35 
36 static void bch2_checksum_init(struct bch2_checksum_state *state)
37 {
38 	switch (state->type) {
39 	case BCH_CSUM_none:
40 	case BCH_CSUM_crc32c:
41 	case BCH_CSUM_crc64:
42 		state->seed = 0;
43 		break;
44 	case BCH_CSUM_crc32c_nonzero:
45 		state->seed = U32_MAX;
46 		break;
47 	case BCH_CSUM_crc64_nonzero:
48 		state->seed = U64_MAX;
49 		break;
50 	case BCH_CSUM_xxhash:
51 		xxh64_reset(&state->h64state, 0);
52 		break;
53 	default:
54 		BUG();
55 	}
56 }
57 
58 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
59 {
60 	switch (state->type) {
61 	case BCH_CSUM_none:
62 	case BCH_CSUM_crc32c:
63 	case BCH_CSUM_crc64:
64 		return state->seed;
65 	case BCH_CSUM_crc32c_nonzero:
66 		return state->seed ^ U32_MAX;
67 	case BCH_CSUM_crc64_nonzero:
68 		return state->seed ^ U64_MAX;
69 	case BCH_CSUM_xxhash:
70 		return xxh64_digest(&state->h64state);
71 	default:
72 		BUG();
73 	}
74 }
75 
76 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
77 {
78 	switch (state->type) {
79 	case BCH_CSUM_none:
80 		return;
81 	case BCH_CSUM_crc32c_nonzero:
82 	case BCH_CSUM_crc32c:
83 		state->seed = crc32c(state->seed, data, len);
84 		break;
85 	case BCH_CSUM_crc64_nonzero:
86 	case BCH_CSUM_crc64:
87 		state->seed = crc64_be(state->seed, data, len);
88 		break;
89 	case BCH_CSUM_xxhash:
90 		xxh64_update(&state->h64state, data, len);
91 		break;
92 	default:
93 		BUG();
94 	}
95 }
96 
97 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
98 				struct nonce nonce,
99 				struct scatterlist *sg, size_t len)
100 {
101 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
102 	int ret;
103 
104 	skcipher_request_set_sync_tfm(req, tfm);
105 	skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
106 
107 	ret = crypto_skcipher_encrypt(req);
108 	if (ret)
109 		pr_err("got error %i from crypto_skcipher_encrypt()", ret);
110 
111 	return ret;
112 }
113 
114 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
115 			      struct nonce nonce,
116 			      void *buf, size_t len)
117 {
118 	if (!is_vmalloc_addr(buf)) {
119 		struct scatterlist sg;
120 
121 		sg_init_table(&sg, 1);
122 		sg_set_page(&sg,
123 			    is_vmalloc_addr(buf)
124 			    ? vmalloc_to_page(buf)
125 			    : virt_to_page(buf),
126 			    len, offset_in_page(buf));
127 		return do_encrypt_sg(tfm, nonce, &sg, len);
128 	} else {
129 		unsigned pages = buf_pages(buf, len);
130 		struct scatterlist *sg;
131 		size_t orig_len = len;
132 		int ret, i;
133 
134 		sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
135 		if (!sg)
136 			return -BCH_ERR_ENOMEM_do_encrypt;
137 
138 		sg_init_table(sg, pages);
139 
140 		for (i = 0; i < pages; i++) {
141 			unsigned offset = offset_in_page(buf);
142 			unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
143 
144 			sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
145 			buf += pg_len;
146 			len -= pg_len;
147 		}
148 
149 		ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
150 		kfree(sg);
151 		return ret;
152 	}
153 }
154 
155 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
156 			    void *buf, size_t len)
157 {
158 	struct crypto_sync_skcipher *chacha20 =
159 		crypto_alloc_sync_skcipher("chacha20", 0, 0);
160 	int ret;
161 
162 	ret = PTR_ERR_OR_ZERO(chacha20);
163 	if (ret) {
164 		pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
165 		return ret;
166 	}
167 
168 	ret = crypto_skcipher_setkey(&chacha20->base,
169 				     (void *) key, sizeof(*key));
170 	if (ret) {
171 		pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
172 		goto err;
173 	}
174 
175 	ret = do_encrypt(chacha20, nonce, buf, len);
176 err:
177 	crypto_free_sync_skcipher(chacha20);
178 	return ret;
179 }
180 
181 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
182 			struct nonce nonce)
183 {
184 	u8 key[POLY1305_KEY_SIZE];
185 	int ret;
186 
187 	nonce.d[3] ^= BCH_NONCE_POLY;
188 
189 	memset(key, 0, sizeof(key));
190 	ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
191 	if (ret)
192 		return ret;
193 
194 	desc->tfm = c->poly1305;
195 	crypto_shash_init(desc);
196 	crypto_shash_update(desc, key, sizeof(key));
197 	return 0;
198 }
199 
200 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
201 			      struct nonce nonce, const void *data, size_t len)
202 {
203 	switch (type) {
204 	case BCH_CSUM_none:
205 	case BCH_CSUM_crc32c_nonzero:
206 	case BCH_CSUM_crc64_nonzero:
207 	case BCH_CSUM_crc32c:
208 	case BCH_CSUM_xxhash:
209 	case BCH_CSUM_crc64: {
210 		struct bch2_checksum_state state;
211 
212 		state.type = type;
213 
214 		bch2_checksum_init(&state);
215 		bch2_checksum_update(&state, data, len);
216 
217 		return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
218 	}
219 
220 	case BCH_CSUM_chacha20_poly1305_80:
221 	case BCH_CSUM_chacha20_poly1305_128: {
222 		SHASH_DESC_ON_STACK(desc, c->poly1305);
223 		u8 digest[POLY1305_DIGEST_SIZE];
224 		struct bch_csum ret = { 0 };
225 
226 		gen_poly_key(c, desc, nonce);
227 
228 		crypto_shash_update(desc, data, len);
229 		crypto_shash_final(desc, digest);
230 
231 		memcpy(&ret, digest, bch_crc_bytes[type]);
232 		return ret;
233 	}
234 	default:
235 		BUG();
236 	}
237 }
238 
239 int bch2_encrypt(struct bch_fs *c, unsigned type,
240 		  struct nonce nonce, void *data, size_t len)
241 {
242 	if (!bch2_csum_type_is_encryption(type))
243 		return 0;
244 
245 	return do_encrypt(c->chacha20, nonce, data, len);
246 }
247 
248 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
249 					   struct nonce nonce, struct bio *bio,
250 					   struct bvec_iter *iter)
251 {
252 	struct bio_vec bv;
253 
254 	switch (type) {
255 	case BCH_CSUM_none:
256 		return (struct bch_csum) { 0 };
257 	case BCH_CSUM_crc32c_nonzero:
258 	case BCH_CSUM_crc64_nonzero:
259 	case BCH_CSUM_crc32c:
260 	case BCH_CSUM_xxhash:
261 	case BCH_CSUM_crc64: {
262 		struct bch2_checksum_state state;
263 
264 		state.type = type;
265 		bch2_checksum_init(&state);
266 
267 #ifdef CONFIG_HIGHMEM
268 		__bio_for_each_segment(bv, bio, *iter, *iter) {
269 			void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
270 
271 			bch2_checksum_update(&state, p, bv.bv_len);
272 			kunmap_local(p);
273 		}
274 #else
275 		__bio_for_each_bvec(bv, bio, *iter, *iter)
276 			bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
277 				bv.bv_len);
278 #endif
279 		return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
280 	}
281 
282 	case BCH_CSUM_chacha20_poly1305_80:
283 	case BCH_CSUM_chacha20_poly1305_128: {
284 		SHASH_DESC_ON_STACK(desc, c->poly1305);
285 		u8 digest[POLY1305_DIGEST_SIZE];
286 		struct bch_csum ret = { 0 };
287 
288 		gen_poly_key(c, desc, nonce);
289 
290 #ifdef CONFIG_HIGHMEM
291 		__bio_for_each_segment(bv, bio, *iter, *iter) {
292 			void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
293 
294 			crypto_shash_update(desc, p, bv.bv_len);
295 			kunmap_local(p);
296 		}
297 #else
298 		__bio_for_each_bvec(bv, bio, *iter, *iter)
299 			crypto_shash_update(desc,
300 				page_address(bv.bv_page) + bv.bv_offset,
301 				bv.bv_len);
302 #endif
303 		crypto_shash_final(desc, digest);
304 
305 		memcpy(&ret, digest, bch_crc_bytes[type]);
306 		return ret;
307 	}
308 	default:
309 		BUG();
310 	}
311 }
312 
313 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
314 				  struct nonce nonce, struct bio *bio)
315 {
316 	struct bvec_iter iter = bio->bi_iter;
317 
318 	return __bch2_checksum_bio(c, type, nonce, bio, &iter);
319 }
320 
321 int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
322 		     struct nonce nonce, struct bio *bio)
323 {
324 	struct bio_vec bv;
325 	struct bvec_iter iter;
326 	struct scatterlist sgl[16], *sg = sgl;
327 	size_t bytes = 0;
328 	int ret = 0;
329 
330 	if (!bch2_csum_type_is_encryption(type))
331 		return 0;
332 
333 	sg_init_table(sgl, ARRAY_SIZE(sgl));
334 
335 	bio_for_each_segment(bv, bio, iter) {
336 		if (sg == sgl + ARRAY_SIZE(sgl)) {
337 			sg_mark_end(sg - 1);
338 
339 			ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
340 			if (ret)
341 				return ret;
342 
343 			nonce = nonce_add(nonce, bytes);
344 			bytes = 0;
345 
346 			sg_init_table(sgl, ARRAY_SIZE(sgl));
347 			sg = sgl;
348 		}
349 
350 		sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
351 		bytes += bv.bv_len;
352 	}
353 
354 	sg_mark_end(sg - 1);
355 	return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
356 }
357 
358 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
359 				    struct bch_csum b, size_t b_len)
360 {
361 	struct bch2_checksum_state state;
362 
363 	state.type = type;
364 	bch2_checksum_init(&state);
365 	state.seed = le64_to_cpu(a.lo);
366 
367 	BUG_ON(!bch2_checksum_mergeable(type));
368 
369 	while (b_len) {
370 		unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
371 
372 		bch2_checksum_update(&state,
373 				page_address(ZERO_PAGE(0)), page_len);
374 		b_len -= page_len;
375 	}
376 	a.lo = cpu_to_le64(bch2_checksum_final(&state));
377 	a.lo ^= b.lo;
378 	a.hi ^= b.hi;
379 	return a;
380 }
381 
382 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
383 			struct bversion version,
384 			struct bch_extent_crc_unpacked crc_old,
385 			struct bch_extent_crc_unpacked *crc_a,
386 			struct bch_extent_crc_unpacked *crc_b,
387 			unsigned len_a, unsigned len_b,
388 			unsigned new_csum_type)
389 {
390 	struct bvec_iter iter = bio->bi_iter;
391 	struct nonce nonce = extent_nonce(version, crc_old);
392 	struct bch_csum merged = { 0 };
393 	struct crc_split {
394 		struct bch_extent_crc_unpacked	*crc;
395 		unsigned			len;
396 		unsigned			csum_type;
397 		struct bch_csum			csum;
398 	} splits[3] = {
399 		{ crc_a, len_a, new_csum_type, { 0 }},
400 		{ crc_b, len_b, new_csum_type, { 0 } },
401 		{ NULL,	 bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
402 	}, *i;
403 	bool mergeable = crc_old.csum_type == new_csum_type &&
404 		bch2_checksum_mergeable(new_csum_type);
405 	unsigned crc_nonce = crc_old.nonce;
406 
407 	BUG_ON(len_a + len_b > bio_sectors(bio));
408 	BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
409 	BUG_ON(crc_is_compressed(crc_old));
410 	BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
411 	       bch2_csum_type_is_encryption(new_csum_type));
412 
413 	for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
414 		iter.bi_size = i->len << 9;
415 		if (mergeable || i->crc)
416 			i->csum = __bch2_checksum_bio(c, i->csum_type,
417 						      nonce, bio, &iter);
418 		else
419 			bio_advance_iter(bio, &iter, i->len << 9);
420 		nonce = nonce_add(nonce, i->len << 9);
421 	}
422 
423 	if (mergeable)
424 		for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
425 			merged = bch2_checksum_merge(new_csum_type, merged,
426 						     i->csum, i->len << 9);
427 	else
428 		merged = bch2_checksum_bio(c, crc_old.csum_type,
429 				extent_nonce(version, crc_old), bio);
430 
431 	if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
432 		bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
433 			"expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
434 			__func__,
435 			crc_old.csum.hi,
436 			crc_old.csum.lo,
437 			merged.hi,
438 			merged.lo,
439 			bch2_csum_types[crc_old.csum_type],
440 			bch2_csum_types[new_csum_type]);
441 		return -EIO;
442 	}
443 
444 	for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
445 		if (i->crc)
446 			*i->crc = (struct bch_extent_crc_unpacked) {
447 				.csum_type		= i->csum_type,
448 				.compression_type	= crc_old.compression_type,
449 				.compressed_size	= i->len,
450 				.uncompressed_size	= i->len,
451 				.offset			= 0,
452 				.live_size		= i->len,
453 				.nonce			= crc_nonce,
454 				.csum			= i->csum,
455 			};
456 
457 		if (bch2_csum_type_is_encryption(new_csum_type))
458 			crc_nonce += i->len;
459 	}
460 
461 	return 0;
462 }
463 
464 /* BCH_SB_FIELD_crypt: */
465 
466 static int bch2_sb_crypt_validate(struct bch_sb *sb,
467 				  struct bch_sb_field *f,
468 				  struct printbuf *err)
469 {
470 	struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
471 
472 	if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
473 		prt_printf(err, "wrong size (got %zu should be %zu)",
474 		       vstruct_bytes(&crypt->field), sizeof(*crypt));
475 		return -BCH_ERR_invalid_sb_crypt;
476 	}
477 
478 	if (BCH_CRYPT_KDF_TYPE(crypt)) {
479 		prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
480 		return -BCH_ERR_invalid_sb_crypt;
481 	}
482 
483 	return 0;
484 }
485 
486 static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
487 				  struct bch_sb_field *f)
488 {
489 	struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
490 
491 	prt_printf(out, "KFD:               %llu", BCH_CRYPT_KDF_TYPE(crypt));
492 	prt_newline(out);
493 	prt_printf(out, "scrypt n:          %llu", BCH_KDF_SCRYPT_N(crypt));
494 	prt_newline(out);
495 	prt_printf(out, "scrypt r:          %llu", BCH_KDF_SCRYPT_R(crypt));
496 	prt_newline(out);
497 	prt_printf(out, "scrypt p:          %llu", BCH_KDF_SCRYPT_P(crypt));
498 	prt_newline(out);
499 }
500 
501 const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
502 	.validate	= bch2_sb_crypt_validate,
503 	.to_text	= bch2_sb_crypt_to_text,
504 };
505 
506 #ifdef __KERNEL__
507 static int __bch2_request_key(char *key_description, struct bch_key *key)
508 {
509 	struct key *keyring_key;
510 	const struct user_key_payload *ukp;
511 	int ret;
512 
513 	keyring_key = request_key(&key_type_user, key_description, NULL);
514 	if (IS_ERR(keyring_key))
515 		return PTR_ERR(keyring_key);
516 
517 	down_read(&keyring_key->sem);
518 	ukp = dereference_key_locked(keyring_key);
519 	if (ukp->datalen == sizeof(*key)) {
520 		memcpy(key, ukp->data, ukp->datalen);
521 		ret = 0;
522 	} else {
523 		ret = -EINVAL;
524 	}
525 	up_read(&keyring_key->sem);
526 	key_put(keyring_key);
527 
528 	return ret;
529 }
530 #else
531 #include <keyutils.h>
532 
533 static int __bch2_request_key(char *key_description, struct bch_key *key)
534 {
535 	key_serial_t key_id;
536 
537 	key_id = request_key("user", key_description, NULL,
538 			     KEY_SPEC_SESSION_KEYRING);
539 	if (key_id >= 0)
540 		goto got_key;
541 
542 	key_id = request_key("user", key_description, NULL,
543 			     KEY_SPEC_USER_KEYRING);
544 	if (key_id >= 0)
545 		goto got_key;
546 
547 	key_id = request_key("user", key_description, NULL,
548 			     KEY_SPEC_USER_SESSION_KEYRING);
549 	if (key_id >= 0)
550 		goto got_key;
551 
552 	return -errno;
553 got_key:
554 
555 	if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
556 		return -1;
557 
558 	return 0;
559 }
560 
561 #include "../crypto.h"
562 #endif
563 
564 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
565 {
566 	struct printbuf key_description = PRINTBUF;
567 	int ret;
568 
569 	prt_printf(&key_description, "bcachefs:");
570 	pr_uuid(&key_description, sb->user_uuid.b);
571 
572 	ret = __bch2_request_key(key_description.buf, key);
573 	printbuf_exit(&key_description);
574 
575 #ifndef __KERNEL__
576 	if (ret) {
577 		char *passphrase = read_passphrase("Enter passphrase: ");
578 		struct bch_encrypted_key sb_key;
579 
580 		bch2_passphrase_check(sb, passphrase,
581 				      key, &sb_key);
582 		ret = 0;
583 	}
584 #endif
585 
586 	/* stash with memfd, pass memfd fd to mount */
587 
588 	return ret;
589 }
590 
591 #ifndef __KERNEL__
592 int bch2_revoke_key(struct bch_sb *sb)
593 {
594 	key_serial_t key_id;
595 	struct printbuf key_description = PRINTBUF;
596 
597 	prt_printf(&key_description, "bcachefs:");
598 	pr_uuid(&key_description, sb->user_uuid.b);
599 
600 	key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
601 	printbuf_exit(&key_description);
602 	if (key_id < 0)
603 		return errno;
604 
605 	keyctl_revoke(key_id);
606 
607 	return 0;
608 }
609 #endif
610 
611 int bch2_decrypt_sb_key(struct bch_fs *c,
612 			struct bch_sb_field_crypt *crypt,
613 			struct bch_key *key)
614 {
615 	struct bch_encrypted_key sb_key = crypt->key;
616 	struct bch_key user_key;
617 	int ret = 0;
618 
619 	/* is key encrypted? */
620 	if (!bch2_key_is_encrypted(&sb_key))
621 		goto out;
622 
623 	ret = bch2_request_key(c->disk_sb.sb, &user_key);
624 	if (ret) {
625 		bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
626 		goto err;
627 	}
628 
629 	/* decrypt real key: */
630 	ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
631 				      &sb_key, sizeof(sb_key));
632 	if (ret)
633 		goto err;
634 
635 	if (bch2_key_is_encrypted(&sb_key)) {
636 		bch_err(c, "incorrect encryption key");
637 		ret = -EINVAL;
638 		goto err;
639 	}
640 out:
641 	*key = sb_key.key;
642 err:
643 	memzero_explicit(&sb_key, sizeof(sb_key));
644 	memzero_explicit(&user_key, sizeof(user_key));
645 	return ret;
646 }
647 
648 static int bch2_alloc_ciphers(struct bch_fs *c)
649 {
650 	int ret;
651 
652 	if (!c->chacha20)
653 		c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
654 	ret = PTR_ERR_OR_ZERO(c->chacha20);
655 
656 	if (ret) {
657 		bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
658 		return ret;
659 	}
660 
661 	if (!c->poly1305)
662 		c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
663 	ret = PTR_ERR_OR_ZERO(c->poly1305);
664 
665 	if (ret) {
666 		bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
667 		return ret;
668 	}
669 
670 	return 0;
671 }
672 
673 int bch2_disable_encryption(struct bch_fs *c)
674 {
675 	struct bch_sb_field_crypt *crypt;
676 	struct bch_key key;
677 	int ret = -EINVAL;
678 
679 	mutex_lock(&c->sb_lock);
680 
681 	crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
682 	if (!crypt)
683 		goto out;
684 
685 	/* is key encrypted? */
686 	ret = 0;
687 	if (bch2_key_is_encrypted(&crypt->key))
688 		goto out;
689 
690 	ret = bch2_decrypt_sb_key(c, crypt, &key);
691 	if (ret)
692 		goto out;
693 
694 	crypt->key.magic	= cpu_to_le64(BCH_KEY_MAGIC);
695 	crypt->key.key		= key;
696 
697 	SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
698 	bch2_write_super(c);
699 out:
700 	mutex_unlock(&c->sb_lock);
701 
702 	return ret;
703 }
704 
705 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
706 {
707 	struct bch_encrypted_key key;
708 	struct bch_key user_key;
709 	struct bch_sb_field_crypt *crypt;
710 	int ret = -EINVAL;
711 
712 	mutex_lock(&c->sb_lock);
713 
714 	/* Do we already have an encryption key? */
715 	if (bch2_sb_field_get(c->disk_sb.sb, crypt))
716 		goto err;
717 
718 	ret = bch2_alloc_ciphers(c);
719 	if (ret)
720 		goto err;
721 
722 	key.magic = cpu_to_le64(BCH_KEY_MAGIC);
723 	get_random_bytes(&key.key, sizeof(key.key));
724 
725 	if (keyed) {
726 		ret = bch2_request_key(c->disk_sb.sb, &user_key);
727 		if (ret) {
728 			bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
729 			goto err;
730 		}
731 
732 		ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
733 					      &key, sizeof(key));
734 		if (ret)
735 			goto err;
736 	}
737 
738 	ret = crypto_skcipher_setkey(&c->chacha20->base,
739 			(void *) &key.key, sizeof(key.key));
740 	if (ret)
741 		goto err;
742 
743 	crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
744 				     sizeof(*crypt) / sizeof(u64));
745 	if (!crypt) {
746 		ret = -BCH_ERR_ENOSPC_sb_crypt;
747 		goto err;
748 	}
749 
750 	crypt->key = key;
751 
752 	/* write superblock */
753 	SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
754 	bch2_write_super(c);
755 err:
756 	mutex_unlock(&c->sb_lock);
757 	memzero_explicit(&user_key, sizeof(user_key));
758 	memzero_explicit(&key, sizeof(key));
759 	return ret;
760 }
761 
762 void bch2_fs_encryption_exit(struct bch_fs *c)
763 {
764 	if (!IS_ERR_OR_NULL(c->poly1305))
765 		crypto_free_shash(c->poly1305);
766 	if (!IS_ERR_OR_NULL(c->chacha20))
767 		crypto_free_sync_skcipher(c->chacha20);
768 	if (!IS_ERR_OR_NULL(c->sha256))
769 		crypto_free_shash(c->sha256);
770 }
771 
772 int bch2_fs_encryption_init(struct bch_fs *c)
773 {
774 	struct bch_sb_field_crypt *crypt;
775 	struct bch_key key;
776 	int ret = 0;
777 
778 	c->sha256 = crypto_alloc_shash("sha256", 0, 0);
779 	ret = PTR_ERR_OR_ZERO(c->sha256);
780 	if (ret) {
781 		bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
782 		goto out;
783 	}
784 
785 	crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
786 	if (!crypt)
787 		goto out;
788 
789 	ret = bch2_alloc_ciphers(c);
790 	if (ret)
791 		goto out;
792 
793 	ret = bch2_decrypt_sb_key(c, crypt, &key);
794 	if (ret)
795 		goto out;
796 
797 	ret = crypto_skcipher_setkey(&c->chacha20->base,
798 			(void *) &key.key, sizeof(key.key));
799 	if (ret)
800 		goto out;
801 out:
802 	memzero_explicit(&key, sizeof(key));
803 	return ret;
804 }
805