xref: /linux/fs/bcachefs/checksum.c (revision e04e2b760ddbe3d7b283a05898c3a029085cd8cd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "errcode.h"
5 #include "super.h"
6 #include "super-io.h"
7 
8 #include <linux/crc32c.h>
9 #include <linux/crypto.h>
10 #include <linux/xxhash.h>
11 #include <linux/key.h>
12 #include <linux/random.h>
13 #include <linux/ratelimit.h>
14 #include <linux/scatterlist.h>
15 #include <crypto/algapi.h>
16 #include <crypto/chacha.h>
17 #include <crypto/hash.h>
18 #include <crypto/poly1305.h>
19 #include <crypto/skcipher.h>
20 #include <keys/user-type.h>
21 
22 /*
23  * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
24  * it features page merging without having the checksum algorithm lose its state.
25  * for native checksum aglorithms (like crc), a default seed value will do.
26  * for hash-like algorithms, a state needs to be stored
27  */
28 
29 struct bch2_checksum_state {
30 	union {
31 		u64 seed;
32 		struct xxh64_state h64state;
33 	};
34 	unsigned int type;
35 };
36 
37 static void bch2_checksum_init(struct bch2_checksum_state *state)
38 {
39 	switch (state->type) {
40 	case BCH_CSUM_none:
41 	case BCH_CSUM_crc32c:
42 	case BCH_CSUM_crc64:
43 		state->seed = 0;
44 		break;
45 	case BCH_CSUM_crc32c_nonzero:
46 		state->seed = U32_MAX;
47 		break;
48 	case BCH_CSUM_crc64_nonzero:
49 		state->seed = U64_MAX;
50 		break;
51 	case BCH_CSUM_xxhash:
52 		xxh64_reset(&state->h64state, 0);
53 		break;
54 	default:
55 		BUG();
56 	}
57 }
58 
59 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
60 {
61 	switch (state->type) {
62 	case BCH_CSUM_none:
63 	case BCH_CSUM_crc32c:
64 	case BCH_CSUM_crc64:
65 		return state->seed;
66 	case BCH_CSUM_crc32c_nonzero:
67 		return state->seed ^ U32_MAX;
68 	case BCH_CSUM_crc64_nonzero:
69 		return state->seed ^ U64_MAX;
70 	case BCH_CSUM_xxhash:
71 		return xxh64_digest(&state->h64state);
72 	default:
73 		BUG();
74 	}
75 }
76 
77 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
78 {
79 	switch (state->type) {
80 	case BCH_CSUM_none:
81 		return;
82 	case BCH_CSUM_crc32c_nonzero:
83 	case BCH_CSUM_crc32c:
84 		state->seed = crc32c(state->seed, data, len);
85 		break;
86 	case BCH_CSUM_crc64_nonzero:
87 	case BCH_CSUM_crc64:
88 		state->seed = crc64_be(state->seed, data, len);
89 		break;
90 	case BCH_CSUM_xxhash:
91 		xxh64_update(&state->h64state, data, len);
92 		break;
93 	default:
94 		BUG();
95 	}
96 }
97 
98 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
99 				struct nonce nonce,
100 				struct scatterlist *sg, size_t len)
101 {
102 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
103 	int ret;
104 
105 	skcipher_request_set_sync_tfm(req, tfm);
106 	skcipher_request_set_callback(req, 0, NULL, NULL);
107 	skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
108 
109 	ret = crypto_skcipher_encrypt(req);
110 	if (ret)
111 		pr_err("got error %i from crypto_skcipher_encrypt()", ret);
112 
113 	return ret;
114 }
115 
116 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
117 			      struct nonce nonce,
118 			      void *buf, size_t len)
119 {
120 	if (!is_vmalloc_addr(buf)) {
121 		struct scatterlist sg;
122 
123 		sg_init_table(&sg, 1);
124 		sg_set_page(&sg,
125 			    is_vmalloc_addr(buf)
126 			    ? vmalloc_to_page(buf)
127 			    : virt_to_page(buf),
128 			    len, offset_in_page(buf));
129 		return do_encrypt_sg(tfm, nonce, &sg, len);
130 	} else {
131 		unsigned pages = buf_pages(buf, len);
132 		struct scatterlist *sg;
133 		size_t orig_len = len;
134 		int ret, i;
135 
136 		sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
137 		if (!sg)
138 			return -BCH_ERR_ENOMEM_do_encrypt;
139 
140 		sg_init_table(sg, pages);
141 
142 		for (i = 0; i < pages; i++) {
143 			unsigned offset = offset_in_page(buf);
144 			unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
145 
146 			sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
147 			buf += pg_len;
148 			len -= pg_len;
149 		}
150 
151 		ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
152 		kfree(sg);
153 		return ret;
154 	}
155 }
156 
157 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
158 			    void *buf, size_t len)
159 {
160 	struct crypto_sync_skcipher *chacha20 =
161 		crypto_alloc_sync_skcipher("chacha20", 0, 0);
162 	int ret;
163 
164 	ret = PTR_ERR_OR_ZERO(chacha20);
165 	if (ret) {
166 		pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
167 		return ret;
168 	}
169 
170 	ret = crypto_skcipher_setkey(&chacha20->base,
171 				     (void *) key, sizeof(*key));
172 	if (ret) {
173 		pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
174 		goto err;
175 	}
176 
177 	ret = do_encrypt(chacha20, nonce, buf, len);
178 err:
179 	crypto_free_sync_skcipher(chacha20);
180 	return ret;
181 }
182 
183 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
184 			struct nonce nonce)
185 {
186 	u8 key[POLY1305_KEY_SIZE];
187 	int ret;
188 
189 	nonce.d[3] ^= BCH_NONCE_POLY;
190 
191 	memset(key, 0, sizeof(key));
192 	ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
193 	if (ret)
194 		return ret;
195 
196 	desc->tfm = c->poly1305;
197 	crypto_shash_init(desc);
198 	crypto_shash_update(desc, key, sizeof(key));
199 	return 0;
200 }
201 
202 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
203 			      struct nonce nonce, const void *data, size_t len)
204 {
205 	switch (type) {
206 	case BCH_CSUM_none:
207 	case BCH_CSUM_crc32c_nonzero:
208 	case BCH_CSUM_crc64_nonzero:
209 	case BCH_CSUM_crc32c:
210 	case BCH_CSUM_xxhash:
211 	case BCH_CSUM_crc64: {
212 		struct bch2_checksum_state state;
213 
214 		state.type = type;
215 
216 		bch2_checksum_init(&state);
217 		bch2_checksum_update(&state, data, len);
218 
219 		return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
220 	}
221 
222 	case BCH_CSUM_chacha20_poly1305_80:
223 	case BCH_CSUM_chacha20_poly1305_128: {
224 		SHASH_DESC_ON_STACK(desc, c->poly1305);
225 		u8 digest[POLY1305_DIGEST_SIZE];
226 		struct bch_csum ret = { 0 };
227 
228 		gen_poly_key(c, desc, nonce);
229 
230 		crypto_shash_update(desc, data, len);
231 		crypto_shash_final(desc, digest);
232 
233 		memcpy(&ret, digest, bch_crc_bytes[type]);
234 		return ret;
235 	}
236 	default:
237 		return (struct bch_csum) {};
238 	}
239 }
240 
241 int bch2_encrypt(struct bch_fs *c, unsigned type,
242 		  struct nonce nonce, void *data, size_t len)
243 {
244 	if (!bch2_csum_type_is_encryption(type))
245 		return 0;
246 
247 	return do_encrypt(c->chacha20, nonce, data, len);
248 }
249 
250 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
251 					   struct nonce nonce, struct bio *bio,
252 					   struct bvec_iter *iter)
253 {
254 	struct bio_vec bv;
255 
256 	switch (type) {
257 	case BCH_CSUM_none:
258 		return (struct bch_csum) { 0 };
259 	case BCH_CSUM_crc32c_nonzero:
260 	case BCH_CSUM_crc64_nonzero:
261 	case BCH_CSUM_crc32c:
262 	case BCH_CSUM_xxhash:
263 	case BCH_CSUM_crc64: {
264 		struct bch2_checksum_state state;
265 
266 		state.type = type;
267 		bch2_checksum_init(&state);
268 
269 #ifdef CONFIG_HIGHMEM
270 		__bio_for_each_segment(bv, bio, *iter, *iter) {
271 			void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
272 
273 			bch2_checksum_update(&state, p, bv.bv_len);
274 			kunmap_local(p);
275 		}
276 #else
277 		__bio_for_each_bvec(bv, bio, *iter, *iter)
278 			bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
279 				bv.bv_len);
280 #endif
281 		return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
282 	}
283 
284 	case BCH_CSUM_chacha20_poly1305_80:
285 	case BCH_CSUM_chacha20_poly1305_128: {
286 		SHASH_DESC_ON_STACK(desc, c->poly1305);
287 		u8 digest[POLY1305_DIGEST_SIZE];
288 		struct bch_csum ret = { 0 };
289 
290 		gen_poly_key(c, desc, nonce);
291 
292 #ifdef CONFIG_HIGHMEM
293 		__bio_for_each_segment(bv, bio, *iter, *iter) {
294 			void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
295 
296 			crypto_shash_update(desc, p, bv.bv_len);
297 			kunmap_local(p);
298 		}
299 #else
300 		__bio_for_each_bvec(bv, bio, *iter, *iter)
301 			crypto_shash_update(desc,
302 				page_address(bv.bv_page) + bv.bv_offset,
303 				bv.bv_len);
304 #endif
305 		crypto_shash_final(desc, digest);
306 
307 		memcpy(&ret, digest, bch_crc_bytes[type]);
308 		return ret;
309 	}
310 	default:
311 		return (struct bch_csum) {};
312 	}
313 }
314 
315 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
316 				  struct nonce nonce, struct bio *bio)
317 {
318 	struct bvec_iter iter = bio->bi_iter;
319 
320 	return __bch2_checksum_bio(c, type, nonce, bio, &iter);
321 }
322 
323 int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
324 		     struct nonce nonce, struct bio *bio)
325 {
326 	struct bio_vec bv;
327 	struct bvec_iter iter;
328 	struct scatterlist sgl[16], *sg = sgl;
329 	size_t bytes = 0;
330 	int ret = 0;
331 
332 	if (!bch2_csum_type_is_encryption(type))
333 		return 0;
334 
335 	sg_init_table(sgl, ARRAY_SIZE(sgl));
336 
337 	bio_for_each_segment(bv, bio, iter) {
338 		if (sg == sgl + ARRAY_SIZE(sgl)) {
339 			sg_mark_end(sg - 1);
340 
341 			ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
342 			if (ret)
343 				return ret;
344 
345 			nonce = nonce_add(nonce, bytes);
346 			bytes = 0;
347 
348 			sg_init_table(sgl, ARRAY_SIZE(sgl));
349 			sg = sgl;
350 		}
351 
352 		sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
353 		bytes += bv.bv_len;
354 	}
355 
356 	if (sg != sgl) {
357 		sg_mark_end(sg - 1);
358 		return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
359 	}
360 
361 	return ret;
362 }
363 
364 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
365 				    struct bch_csum b, size_t b_len)
366 {
367 	struct bch2_checksum_state state;
368 
369 	state.type = type;
370 	bch2_checksum_init(&state);
371 	state.seed = le64_to_cpu(a.lo);
372 
373 	BUG_ON(!bch2_checksum_mergeable(type));
374 
375 	while (b_len) {
376 		unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
377 
378 		bch2_checksum_update(&state,
379 				page_address(ZERO_PAGE(0)), page_len);
380 		b_len -= page_len;
381 	}
382 	a.lo = cpu_to_le64(bch2_checksum_final(&state));
383 	a.lo ^= b.lo;
384 	a.hi ^= b.hi;
385 	return a;
386 }
387 
388 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
389 			struct bversion version,
390 			struct bch_extent_crc_unpacked crc_old,
391 			struct bch_extent_crc_unpacked *crc_a,
392 			struct bch_extent_crc_unpacked *crc_b,
393 			unsigned len_a, unsigned len_b,
394 			unsigned new_csum_type)
395 {
396 	struct bvec_iter iter = bio->bi_iter;
397 	struct nonce nonce = extent_nonce(version, crc_old);
398 	struct bch_csum merged = { 0 };
399 	struct crc_split {
400 		struct bch_extent_crc_unpacked	*crc;
401 		unsigned			len;
402 		unsigned			csum_type;
403 		struct bch_csum			csum;
404 	} splits[3] = {
405 		{ crc_a, len_a, new_csum_type, { 0 }},
406 		{ crc_b, len_b, new_csum_type, { 0 } },
407 		{ NULL,	 bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
408 	}, *i;
409 	bool mergeable = crc_old.csum_type == new_csum_type &&
410 		bch2_checksum_mergeable(new_csum_type);
411 	unsigned crc_nonce = crc_old.nonce;
412 
413 	BUG_ON(len_a + len_b > bio_sectors(bio));
414 	BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
415 	BUG_ON(crc_is_compressed(crc_old));
416 	BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
417 	       bch2_csum_type_is_encryption(new_csum_type));
418 
419 	for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
420 		iter.bi_size = i->len << 9;
421 		if (mergeable || i->crc)
422 			i->csum = __bch2_checksum_bio(c, i->csum_type,
423 						      nonce, bio, &iter);
424 		else
425 			bio_advance_iter(bio, &iter, i->len << 9);
426 		nonce = nonce_add(nonce, i->len << 9);
427 	}
428 
429 	if (mergeable)
430 		for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
431 			merged = bch2_checksum_merge(new_csum_type, merged,
432 						     i->csum, i->len << 9);
433 	else
434 		merged = bch2_checksum_bio(c, crc_old.csum_type,
435 				extent_nonce(version, crc_old), bio);
436 
437 	if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
438 		struct printbuf buf = PRINTBUF;
439 		prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
440 			   "  expected %0llx:%0llx got %0llx:%0llx (old type ",
441 			   __func__,
442 			   crc_old.csum.hi,
443 			   crc_old.csum.lo,
444 			   merged.hi,
445 			   merged.lo);
446 		bch2_prt_csum_type(&buf, crc_old.csum_type);
447 		prt_str(&buf, " new type ");
448 		bch2_prt_csum_type(&buf, new_csum_type);
449 		prt_str(&buf, ")");
450 		WARN_RATELIMIT(1, "%s", buf.buf);
451 		printbuf_exit(&buf);
452 		return -EIO;
453 	}
454 
455 	for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
456 		if (i->crc)
457 			*i->crc = (struct bch_extent_crc_unpacked) {
458 				.csum_type		= i->csum_type,
459 				.compression_type	= crc_old.compression_type,
460 				.compressed_size	= i->len,
461 				.uncompressed_size	= i->len,
462 				.offset			= 0,
463 				.live_size		= i->len,
464 				.nonce			= crc_nonce,
465 				.csum			= i->csum,
466 			};
467 
468 		if (bch2_csum_type_is_encryption(new_csum_type))
469 			crc_nonce += i->len;
470 	}
471 
472 	return 0;
473 }
474 
475 /* BCH_SB_FIELD_crypt: */
476 
477 static int bch2_sb_crypt_validate(struct bch_sb *sb, struct bch_sb_field *f,
478 				  enum bch_validate_flags flags, struct printbuf *err)
479 {
480 	struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
481 
482 	if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
483 		prt_printf(err, "wrong size (got %zu should be %zu)",
484 		       vstruct_bytes(&crypt->field), sizeof(*crypt));
485 		return -BCH_ERR_invalid_sb_crypt;
486 	}
487 
488 	if (BCH_CRYPT_KDF_TYPE(crypt)) {
489 		prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
490 		return -BCH_ERR_invalid_sb_crypt;
491 	}
492 
493 	return 0;
494 }
495 
496 static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
497 				  struct bch_sb_field *f)
498 {
499 	struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
500 
501 	prt_printf(out, "KFD:               %llu\n", BCH_CRYPT_KDF_TYPE(crypt));
502 	prt_printf(out, "scrypt n:          %llu\n", BCH_KDF_SCRYPT_N(crypt));
503 	prt_printf(out, "scrypt r:          %llu\n", BCH_KDF_SCRYPT_R(crypt));
504 	prt_printf(out, "scrypt p:          %llu\n", BCH_KDF_SCRYPT_P(crypt));
505 }
506 
507 const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
508 	.validate	= bch2_sb_crypt_validate,
509 	.to_text	= bch2_sb_crypt_to_text,
510 };
511 
512 #ifdef __KERNEL__
513 static int __bch2_request_key(char *key_description, struct bch_key *key)
514 {
515 	struct key *keyring_key;
516 	const struct user_key_payload *ukp;
517 	int ret;
518 
519 	keyring_key = request_key(&key_type_user, key_description, NULL);
520 	if (IS_ERR(keyring_key))
521 		return PTR_ERR(keyring_key);
522 
523 	down_read(&keyring_key->sem);
524 	ukp = dereference_key_locked(keyring_key);
525 	if (ukp->datalen == sizeof(*key)) {
526 		memcpy(key, ukp->data, ukp->datalen);
527 		ret = 0;
528 	} else {
529 		ret = -EINVAL;
530 	}
531 	up_read(&keyring_key->sem);
532 	key_put(keyring_key);
533 
534 	return ret;
535 }
536 #else
537 #include <keyutils.h>
538 
539 static int __bch2_request_key(char *key_description, struct bch_key *key)
540 {
541 	key_serial_t key_id;
542 
543 	key_id = request_key("user", key_description, NULL,
544 			     KEY_SPEC_SESSION_KEYRING);
545 	if (key_id >= 0)
546 		goto got_key;
547 
548 	key_id = request_key("user", key_description, NULL,
549 			     KEY_SPEC_USER_KEYRING);
550 	if (key_id >= 0)
551 		goto got_key;
552 
553 	key_id = request_key("user", key_description, NULL,
554 			     KEY_SPEC_USER_SESSION_KEYRING);
555 	if (key_id >= 0)
556 		goto got_key;
557 
558 	return -errno;
559 got_key:
560 
561 	if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
562 		return -1;
563 
564 	return 0;
565 }
566 
567 #include "crypto.h"
568 #endif
569 
570 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
571 {
572 	struct printbuf key_description = PRINTBUF;
573 	int ret;
574 
575 	prt_printf(&key_description, "bcachefs:");
576 	pr_uuid(&key_description, sb->user_uuid.b);
577 
578 	ret = __bch2_request_key(key_description.buf, key);
579 	printbuf_exit(&key_description);
580 
581 #ifndef __KERNEL__
582 	if (ret) {
583 		char *passphrase = read_passphrase("Enter passphrase: ");
584 		struct bch_encrypted_key sb_key;
585 
586 		bch2_passphrase_check(sb, passphrase,
587 				      key, &sb_key);
588 		ret = 0;
589 	}
590 #endif
591 
592 	/* stash with memfd, pass memfd fd to mount */
593 
594 	return ret;
595 }
596 
597 #ifndef __KERNEL__
598 int bch2_revoke_key(struct bch_sb *sb)
599 {
600 	key_serial_t key_id;
601 	struct printbuf key_description = PRINTBUF;
602 
603 	prt_printf(&key_description, "bcachefs:");
604 	pr_uuid(&key_description, sb->user_uuid.b);
605 
606 	key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
607 	printbuf_exit(&key_description);
608 	if (key_id < 0)
609 		return errno;
610 
611 	keyctl_revoke(key_id);
612 
613 	return 0;
614 }
615 #endif
616 
617 int bch2_decrypt_sb_key(struct bch_fs *c,
618 			struct bch_sb_field_crypt *crypt,
619 			struct bch_key *key)
620 {
621 	struct bch_encrypted_key sb_key = crypt->key;
622 	struct bch_key user_key;
623 	int ret = 0;
624 
625 	/* is key encrypted? */
626 	if (!bch2_key_is_encrypted(&sb_key))
627 		goto out;
628 
629 	ret = bch2_request_key(c->disk_sb.sb, &user_key);
630 	if (ret) {
631 		bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
632 		goto err;
633 	}
634 
635 	/* decrypt real key: */
636 	ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
637 				      &sb_key, sizeof(sb_key));
638 	if (ret)
639 		goto err;
640 
641 	if (bch2_key_is_encrypted(&sb_key)) {
642 		bch_err(c, "incorrect encryption key");
643 		ret = -EINVAL;
644 		goto err;
645 	}
646 out:
647 	*key = sb_key.key;
648 err:
649 	memzero_explicit(&sb_key, sizeof(sb_key));
650 	memzero_explicit(&user_key, sizeof(user_key));
651 	return ret;
652 }
653 
654 static int bch2_alloc_ciphers(struct bch_fs *c)
655 {
656 	if (c->chacha20)
657 		return 0;
658 
659 	struct crypto_sync_skcipher *chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
660 	int ret = PTR_ERR_OR_ZERO(chacha20);
661 	if (ret) {
662 		bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
663 		return ret;
664 	}
665 
666 	struct crypto_shash *poly1305 = crypto_alloc_shash("poly1305", 0, 0);
667 	ret = PTR_ERR_OR_ZERO(poly1305);
668 	if (ret) {
669 		bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
670 		crypto_free_sync_skcipher(chacha20);
671 		return ret;
672 	}
673 
674 	c->chacha20	= chacha20;
675 	c->poly1305	= poly1305;
676 	return 0;
677 }
678 
679 int bch2_disable_encryption(struct bch_fs *c)
680 {
681 	struct bch_sb_field_crypt *crypt;
682 	struct bch_key key;
683 	int ret = -EINVAL;
684 
685 	mutex_lock(&c->sb_lock);
686 
687 	crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
688 	if (!crypt)
689 		goto out;
690 
691 	/* is key encrypted? */
692 	ret = 0;
693 	if (bch2_key_is_encrypted(&crypt->key))
694 		goto out;
695 
696 	ret = bch2_decrypt_sb_key(c, crypt, &key);
697 	if (ret)
698 		goto out;
699 
700 	crypt->key.magic	= cpu_to_le64(BCH_KEY_MAGIC);
701 	crypt->key.key		= key;
702 
703 	SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
704 	bch2_write_super(c);
705 out:
706 	mutex_unlock(&c->sb_lock);
707 
708 	return ret;
709 }
710 
711 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
712 {
713 	struct bch_encrypted_key key;
714 	struct bch_key user_key;
715 	struct bch_sb_field_crypt *crypt;
716 	int ret = -EINVAL;
717 
718 	mutex_lock(&c->sb_lock);
719 
720 	/* Do we already have an encryption key? */
721 	if (bch2_sb_field_get(c->disk_sb.sb, crypt))
722 		goto err;
723 
724 	ret = bch2_alloc_ciphers(c);
725 	if (ret)
726 		goto err;
727 
728 	key.magic = cpu_to_le64(BCH_KEY_MAGIC);
729 	get_random_bytes(&key.key, sizeof(key.key));
730 
731 	if (keyed) {
732 		ret = bch2_request_key(c->disk_sb.sb, &user_key);
733 		if (ret) {
734 			bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
735 			goto err;
736 		}
737 
738 		ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
739 					      &key, sizeof(key));
740 		if (ret)
741 			goto err;
742 	}
743 
744 	ret = crypto_skcipher_setkey(&c->chacha20->base,
745 			(void *) &key.key, sizeof(key.key));
746 	if (ret)
747 		goto err;
748 
749 	crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
750 				     sizeof(*crypt) / sizeof(u64));
751 	if (!crypt) {
752 		ret = -BCH_ERR_ENOSPC_sb_crypt;
753 		goto err;
754 	}
755 
756 	crypt->key = key;
757 
758 	/* write superblock */
759 	SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
760 	bch2_write_super(c);
761 err:
762 	mutex_unlock(&c->sb_lock);
763 	memzero_explicit(&user_key, sizeof(user_key));
764 	memzero_explicit(&key, sizeof(key));
765 	return ret;
766 }
767 
768 void bch2_fs_encryption_exit(struct bch_fs *c)
769 {
770 	if (c->poly1305)
771 		crypto_free_shash(c->poly1305);
772 	if (c->chacha20)
773 		crypto_free_sync_skcipher(c->chacha20);
774 	if (c->sha256)
775 		crypto_free_shash(c->sha256);
776 }
777 
778 int bch2_fs_encryption_init(struct bch_fs *c)
779 {
780 	struct bch_sb_field_crypt *crypt;
781 	struct bch_key key;
782 	int ret = 0;
783 
784 	c->sha256 = crypto_alloc_shash("sha256", 0, 0);
785 	ret = PTR_ERR_OR_ZERO(c->sha256);
786 	if (ret) {
787 		c->sha256 = NULL;
788 		bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
789 		goto out;
790 	}
791 
792 	crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
793 	if (!crypt)
794 		goto out;
795 
796 	ret = bch2_alloc_ciphers(c);
797 	if (ret)
798 		goto out;
799 
800 	ret = bch2_decrypt_sb_key(c, crypt, &key);
801 	if (ret)
802 		goto out;
803 
804 	ret = crypto_skcipher_setkey(&c->chacha20->base,
805 			(void *) &key.key, sizeof(key.key));
806 	if (ret)
807 		goto out;
808 out:
809 	memzero_explicit(&key, sizeof(key));
810 	return ret;
811 }
812