1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "errcode.h"
5 #include "error.h"
6 #include "super.h"
7 #include "super-io.h"
8
9 #include <linux/crc32c.h>
10 #include <linux/crypto.h>
11 #include <linux/xxhash.h>
12 #include <linux/key.h>
13 #include <linux/random.h>
14 #include <linux/ratelimit.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/algapi.h>
17 #include <crypto/chacha.h>
18 #include <crypto/hash.h>
19 #include <crypto/poly1305.h>
20 #include <crypto/skcipher.h>
21 #include <keys/user-type.h>
22
23 /*
24 * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
25 * it features page merging without having the checksum algorithm lose its state.
26 * for native checksum aglorithms (like crc), a default seed value will do.
27 * for hash-like algorithms, a state needs to be stored
28 */
29
30 struct bch2_checksum_state {
31 union {
32 u64 seed;
33 struct xxh64_state h64state;
34 };
35 unsigned int type;
36 };
37
bch2_checksum_init(struct bch2_checksum_state * state)38 static void bch2_checksum_init(struct bch2_checksum_state *state)
39 {
40 switch (state->type) {
41 case BCH_CSUM_none:
42 case BCH_CSUM_crc32c:
43 case BCH_CSUM_crc64:
44 state->seed = 0;
45 break;
46 case BCH_CSUM_crc32c_nonzero:
47 state->seed = U32_MAX;
48 break;
49 case BCH_CSUM_crc64_nonzero:
50 state->seed = U64_MAX;
51 break;
52 case BCH_CSUM_xxhash:
53 xxh64_reset(&state->h64state, 0);
54 break;
55 default:
56 BUG();
57 }
58 }
59
bch2_checksum_final(const struct bch2_checksum_state * state)60 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
61 {
62 switch (state->type) {
63 case BCH_CSUM_none:
64 case BCH_CSUM_crc32c:
65 case BCH_CSUM_crc64:
66 return state->seed;
67 case BCH_CSUM_crc32c_nonzero:
68 return state->seed ^ U32_MAX;
69 case BCH_CSUM_crc64_nonzero:
70 return state->seed ^ U64_MAX;
71 case BCH_CSUM_xxhash:
72 return xxh64_digest(&state->h64state);
73 default:
74 BUG();
75 }
76 }
77
bch2_checksum_update(struct bch2_checksum_state * state,const void * data,size_t len)78 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
79 {
80 switch (state->type) {
81 case BCH_CSUM_none:
82 return;
83 case BCH_CSUM_crc32c_nonzero:
84 case BCH_CSUM_crc32c:
85 state->seed = crc32c(state->seed, data, len);
86 break;
87 case BCH_CSUM_crc64_nonzero:
88 case BCH_CSUM_crc64:
89 state->seed = crc64_be(state->seed, data, len);
90 break;
91 case BCH_CSUM_xxhash:
92 xxh64_update(&state->h64state, data, len);
93 break;
94 default:
95 BUG();
96 }
97 }
98
do_encrypt_sg(struct crypto_sync_skcipher * tfm,struct nonce nonce,struct scatterlist * sg,size_t len)99 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
100 struct nonce nonce,
101 struct scatterlist *sg, size_t len)
102 {
103 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
104
105 skcipher_request_set_sync_tfm(req, tfm);
106 skcipher_request_set_callback(req, 0, NULL, NULL);
107 skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
108
109 int ret = crypto_skcipher_encrypt(req);
110 if (ret)
111 pr_err("got error %i from crypto_skcipher_encrypt()", ret);
112
113 return ret;
114 }
115
do_encrypt(struct crypto_sync_skcipher * tfm,struct nonce nonce,void * buf,size_t len)116 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
117 struct nonce nonce,
118 void *buf, size_t len)
119 {
120 if (!is_vmalloc_addr(buf)) {
121 struct scatterlist sg = {};
122
123 sg_mark_end(&sg);
124 sg_set_page(&sg, virt_to_page(buf), len, offset_in_page(buf));
125 return do_encrypt_sg(tfm, nonce, &sg, len);
126 } else {
127 DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
128 size_t sgl_len = 0;
129 int ret;
130
131 darray_init(&sgl);
132
133 while (len) {
134 unsigned offset = offset_in_page(buf);
135 struct scatterlist sg = {
136 .page_link = (unsigned long) vmalloc_to_page(buf),
137 .offset = offset,
138 .length = min(len, PAGE_SIZE - offset),
139 };
140
141 if (darray_push(&sgl, sg)) {
142 sg_mark_end(&darray_last(sgl));
143 ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
144 if (ret)
145 goto err;
146
147 nonce = nonce_add(nonce, sgl_len);
148 sgl_len = 0;
149 sgl.nr = 0;
150 BUG_ON(darray_push(&sgl, sg));
151 }
152
153 buf += sg.length;
154 len -= sg.length;
155 sgl_len += sg.length;
156 }
157
158 sg_mark_end(&darray_last(sgl));
159 ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
160 err:
161 darray_exit(&sgl);
162 return ret;
163 }
164 }
165
bch2_chacha_encrypt_key(struct bch_key * key,struct nonce nonce,void * buf,size_t len)166 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
167 void *buf, size_t len)
168 {
169 struct crypto_sync_skcipher *chacha20 =
170 crypto_alloc_sync_skcipher("chacha20", 0, 0);
171 int ret;
172
173 ret = PTR_ERR_OR_ZERO(chacha20);
174 if (ret) {
175 pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
176 return ret;
177 }
178
179 ret = crypto_skcipher_setkey(&chacha20->base,
180 (void *) key, sizeof(*key));
181 if (ret) {
182 pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
183 goto err;
184 }
185
186 ret = do_encrypt(chacha20, nonce, buf, len);
187 err:
188 crypto_free_sync_skcipher(chacha20);
189 return ret;
190 }
191
gen_poly_key(struct bch_fs * c,struct shash_desc * desc,struct nonce nonce)192 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
193 struct nonce nonce)
194 {
195 u8 key[POLY1305_KEY_SIZE];
196 int ret;
197
198 nonce.d[3] ^= BCH_NONCE_POLY;
199
200 memset(key, 0, sizeof(key));
201 ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
202 if (ret)
203 return ret;
204
205 desc->tfm = c->poly1305;
206 crypto_shash_init(desc);
207 crypto_shash_update(desc, key, sizeof(key));
208 return 0;
209 }
210
bch2_checksum(struct bch_fs * c,unsigned type,struct nonce nonce,const void * data,size_t len)211 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
212 struct nonce nonce, const void *data, size_t len)
213 {
214 switch (type) {
215 case BCH_CSUM_none:
216 case BCH_CSUM_crc32c_nonzero:
217 case BCH_CSUM_crc64_nonzero:
218 case BCH_CSUM_crc32c:
219 case BCH_CSUM_xxhash:
220 case BCH_CSUM_crc64: {
221 struct bch2_checksum_state state;
222
223 state.type = type;
224
225 bch2_checksum_init(&state);
226 bch2_checksum_update(&state, data, len);
227
228 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
229 }
230
231 case BCH_CSUM_chacha20_poly1305_80:
232 case BCH_CSUM_chacha20_poly1305_128: {
233 SHASH_DESC_ON_STACK(desc, c->poly1305);
234 u8 digest[POLY1305_DIGEST_SIZE];
235 struct bch_csum ret = { 0 };
236
237 gen_poly_key(c, desc, nonce);
238
239 crypto_shash_update(desc, data, len);
240 crypto_shash_final(desc, digest);
241
242 memcpy(&ret, digest, bch_crc_bytes[type]);
243 return ret;
244 }
245 default:
246 return (struct bch_csum) {};
247 }
248 }
249
bch2_encrypt(struct bch_fs * c,unsigned type,struct nonce nonce,void * data,size_t len)250 int bch2_encrypt(struct bch_fs *c, unsigned type,
251 struct nonce nonce, void *data, size_t len)
252 {
253 if (!bch2_csum_type_is_encryption(type))
254 return 0;
255
256 if (bch2_fs_inconsistent_on(!c->chacha20,
257 c, "attempting to encrypt without encryption key"))
258 return -BCH_ERR_no_encryption_key;
259
260 return do_encrypt(c->chacha20, nonce, data, len);
261 }
262
__bch2_checksum_bio(struct bch_fs * c,unsigned type,struct nonce nonce,struct bio * bio,struct bvec_iter * iter)263 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
264 struct nonce nonce, struct bio *bio,
265 struct bvec_iter *iter)
266 {
267 struct bio_vec bv;
268
269 switch (type) {
270 case BCH_CSUM_none:
271 return (struct bch_csum) { 0 };
272 case BCH_CSUM_crc32c_nonzero:
273 case BCH_CSUM_crc64_nonzero:
274 case BCH_CSUM_crc32c:
275 case BCH_CSUM_xxhash:
276 case BCH_CSUM_crc64: {
277 struct bch2_checksum_state state;
278
279 state.type = type;
280 bch2_checksum_init(&state);
281
282 #ifdef CONFIG_HIGHMEM
283 __bio_for_each_segment(bv, bio, *iter, *iter) {
284 void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
285
286 bch2_checksum_update(&state, p, bv.bv_len);
287 kunmap_local(p);
288 }
289 #else
290 __bio_for_each_bvec(bv, bio, *iter, *iter)
291 bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
292 bv.bv_len);
293 #endif
294 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
295 }
296
297 case BCH_CSUM_chacha20_poly1305_80:
298 case BCH_CSUM_chacha20_poly1305_128: {
299 SHASH_DESC_ON_STACK(desc, c->poly1305);
300 u8 digest[POLY1305_DIGEST_SIZE];
301 struct bch_csum ret = { 0 };
302
303 gen_poly_key(c, desc, nonce);
304
305 #ifdef CONFIG_HIGHMEM
306 __bio_for_each_segment(bv, bio, *iter, *iter) {
307 void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
308
309 crypto_shash_update(desc, p, bv.bv_len);
310 kunmap_local(p);
311 }
312 #else
313 __bio_for_each_bvec(bv, bio, *iter, *iter)
314 crypto_shash_update(desc,
315 page_address(bv.bv_page) + bv.bv_offset,
316 bv.bv_len);
317 #endif
318 crypto_shash_final(desc, digest);
319
320 memcpy(&ret, digest, bch_crc_bytes[type]);
321 return ret;
322 }
323 default:
324 return (struct bch_csum) {};
325 }
326 }
327
bch2_checksum_bio(struct bch_fs * c,unsigned type,struct nonce nonce,struct bio * bio)328 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
329 struct nonce nonce, struct bio *bio)
330 {
331 struct bvec_iter iter = bio->bi_iter;
332
333 return __bch2_checksum_bio(c, type, nonce, bio, &iter);
334 }
335
__bch2_encrypt_bio(struct bch_fs * c,unsigned type,struct nonce nonce,struct bio * bio)336 int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
337 struct nonce nonce, struct bio *bio)
338 {
339 struct bio_vec bv;
340 struct bvec_iter iter;
341 DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
342 size_t sgl_len = 0;
343 int ret = 0;
344
345 if (bch2_fs_inconsistent_on(!c->chacha20,
346 c, "attempting to encrypt without encryption key"))
347 return -BCH_ERR_no_encryption_key;
348
349 darray_init(&sgl);
350
351 bio_for_each_segment(bv, bio, iter) {
352 struct scatterlist sg = {
353 .page_link = (unsigned long) bv.bv_page,
354 .offset = bv.bv_offset,
355 .length = bv.bv_len,
356 };
357
358 if (darray_push(&sgl, sg)) {
359 sg_mark_end(&darray_last(sgl));
360 ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
361 if (ret)
362 goto err;
363
364 nonce = nonce_add(nonce, sgl_len);
365 sgl_len = 0;
366 sgl.nr = 0;
367
368 BUG_ON(darray_push(&sgl, sg));
369 }
370
371 sgl_len += sg.length;
372 }
373
374 sg_mark_end(&darray_last(sgl));
375 ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
376 err:
377 darray_exit(&sgl);
378 return ret;
379 }
380
bch2_checksum_merge(unsigned type,struct bch_csum a,struct bch_csum b,size_t b_len)381 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
382 struct bch_csum b, size_t b_len)
383 {
384 struct bch2_checksum_state state;
385
386 state.type = type;
387 bch2_checksum_init(&state);
388 state.seed = le64_to_cpu(a.lo);
389
390 BUG_ON(!bch2_checksum_mergeable(type));
391
392 while (b_len) {
393 unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
394
395 bch2_checksum_update(&state,
396 page_address(ZERO_PAGE(0)), page_len);
397 b_len -= page_len;
398 }
399 a.lo = cpu_to_le64(bch2_checksum_final(&state));
400 a.lo ^= b.lo;
401 a.hi ^= b.hi;
402 return a;
403 }
404
bch2_rechecksum_bio(struct bch_fs * c,struct bio * bio,struct bversion version,struct bch_extent_crc_unpacked crc_old,struct bch_extent_crc_unpacked * crc_a,struct bch_extent_crc_unpacked * crc_b,unsigned len_a,unsigned len_b,unsigned new_csum_type)405 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
406 struct bversion version,
407 struct bch_extent_crc_unpacked crc_old,
408 struct bch_extent_crc_unpacked *crc_a,
409 struct bch_extent_crc_unpacked *crc_b,
410 unsigned len_a, unsigned len_b,
411 unsigned new_csum_type)
412 {
413 struct bvec_iter iter = bio->bi_iter;
414 struct nonce nonce = extent_nonce(version, crc_old);
415 struct bch_csum merged = { 0 };
416 struct crc_split {
417 struct bch_extent_crc_unpacked *crc;
418 unsigned len;
419 unsigned csum_type;
420 struct bch_csum csum;
421 } splits[3] = {
422 { crc_a, len_a, new_csum_type, { 0 }},
423 { crc_b, len_b, new_csum_type, { 0 } },
424 { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
425 }, *i;
426 bool mergeable = crc_old.csum_type == new_csum_type &&
427 bch2_checksum_mergeable(new_csum_type);
428 unsigned crc_nonce = crc_old.nonce;
429
430 BUG_ON(len_a + len_b > bio_sectors(bio));
431 BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
432 BUG_ON(crc_is_compressed(crc_old));
433 BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
434 bch2_csum_type_is_encryption(new_csum_type));
435
436 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
437 iter.bi_size = i->len << 9;
438 if (mergeable || i->crc)
439 i->csum = __bch2_checksum_bio(c, i->csum_type,
440 nonce, bio, &iter);
441 else
442 bio_advance_iter(bio, &iter, i->len << 9);
443 nonce = nonce_add(nonce, i->len << 9);
444 }
445
446 if (mergeable)
447 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
448 merged = bch2_checksum_merge(new_csum_type, merged,
449 i->csum, i->len << 9);
450 else
451 merged = bch2_checksum_bio(c, crc_old.csum_type,
452 extent_nonce(version, crc_old), bio);
453
454 if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
455 struct printbuf buf = PRINTBUF;
456 prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
457 " expected %0llx:%0llx got %0llx:%0llx (old type ",
458 __func__,
459 crc_old.csum.hi,
460 crc_old.csum.lo,
461 merged.hi,
462 merged.lo);
463 bch2_prt_csum_type(&buf, crc_old.csum_type);
464 prt_str(&buf, " new type ");
465 bch2_prt_csum_type(&buf, new_csum_type);
466 prt_str(&buf, ")");
467 WARN_RATELIMIT(1, "%s", buf.buf);
468 printbuf_exit(&buf);
469 return -EIO;
470 }
471
472 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
473 if (i->crc)
474 *i->crc = (struct bch_extent_crc_unpacked) {
475 .csum_type = i->csum_type,
476 .compression_type = crc_old.compression_type,
477 .compressed_size = i->len,
478 .uncompressed_size = i->len,
479 .offset = 0,
480 .live_size = i->len,
481 .nonce = crc_nonce,
482 .csum = i->csum,
483 };
484
485 if (bch2_csum_type_is_encryption(new_csum_type))
486 crc_nonce += i->len;
487 }
488
489 return 0;
490 }
491
492 /* BCH_SB_FIELD_crypt: */
493
bch2_sb_crypt_validate(struct bch_sb * sb,struct bch_sb_field * f,enum bch_validate_flags flags,struct printbuf * err)494 static int bch2_sb_crypt_validate(struct bch_sb *sb, struct bch_sb_field *f,
495 enum bch_validate_flags flags, struct printbuf *err)
496 {
497 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
498
499 if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
500 prt_printf(err, "wrong size (got %zu should be %zu)",
501 vstruct_bytes(&crypt->field), sizeof(*crypt));
502 return -BCH_ERR_invalid_sb_crypt;
503 }
504
505 if (BCH_CRYPT_KDF_TYPE(crypt)) {
506 prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
507 return -BCH_ERR_invalid_sb_crypt;
508 }
509
510 return 0;
511 }
512
bch2_sb_crypt_to_text(struct printbuf * out,struct bch_sb * sb,struct bch_sb_field * f)513 static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
514 struct bch_sb_field *f)
515 {
516 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
517
518 prt_printf(out, "KFD: %llu\n", BCH_CRYPT_KDF_TYPE(crypt));
519 prt_printf(out, "scrypt n: %llu\n", BCH_KDF_SCRYPT_N(crypt));
520 prt_printf(out, "scrypt r: %llu\n", BCH_KDF_SCRYPT_R(crypt));
521 prt_printf(out, "scrypt p: %llu\n", BCH_KDF_SCRYPT_P(crypt));
522 }
523
524 const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
525 .validate = bch2_sb_crypt_validate,
526 .to_text = bch2_sb_crypt_to_text,
527 };
528
529 #ifdef __KERNEL__
__bch2_request_key(char * key_description,struct bch_key * key)530 static int __bch2_request_key(char *key_description, struct bch_key *key)
531 {
532 struct key *keyring_key;
533 const struct user_key_payload *ukp;
534 int ret;
535
536 keyring_key = request_key(&key_type_user, key_description, NULL);
537 if (IS_ERR(keyring_key))
538 return PTR_ERR(keyring_key);
539
540 down_read(&keyring_key->sem);
541 ukp = dereference_key_locked(keyring_key);
542 if (ukp->datalen == sizeof(*key)) {
543 memcpy(key, ukp->data, ukp->datalen);
544 ret = 0;
545 } else {
546 ret = -EINVAL;
547 }
548 up_read(&keyring_key->sem);
549 key_put(keyring_key);
550
551 return ret;
552 }
553 #else
554 #include <keyutils.h>
555
__bch2_request_key(char * key_description,struct bch_key * key)556 static int __bch2_request_key(char *key_description, struct bch_key *key)
557 {
558 key_serial_t key_id;
559
560 key_id = request_key("user", key_description, NULL,
561 KEY_SPEC_SESSION_KEYRING);
562 if (key_id >= 0)
563 goto got_key;
564
565 key_id = request_key("user", key_description, NULL,
566 KEY_SPEC_USER_KEYRING);
567 if (key_id >= 0)
568 goto got_key;
569
570 key_id = request_key("user", key_description, NULL,
571 KEY_SPEC_USER_SESSION_KEYRING);
572 if (key_id >= 0)
573 goto got_key;
574
575 return -errno;
576 got_key:
577
578 if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
579 return -1;
580
581 return 0;
582 }
583
584 #include "crypto.h"
585 #endif
586
bch2_request_key(struct bch_sb * sb,struct bch_key * key)587 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
588 {
589 struct printbuf key_description = PRINTBUF;
590 int ret;
591
592 prt_printf(&key_description, "bcachefs:");
593 pr_uuid(&key_description, sb->user_uuid.b);
594
595 ret = __bch2_request_key(key_description.buf, key);
596 printbuf_exit(&key_description);
597
598 #ifndef __KERNEL__
599 if (ret) {
600 char *passphrase = read_passphrase("Enter passphrase: ");
601 struct bch_encrypted_key sb_key;
602
603 bch2_passphrase_check(sb, passphrase,
604 key, &sb_key);
605 ret = 0;
606 }
607 #endif
608
609 /* stash with memfd, pass memfd fd to mount */
610
611 return ret;
612 }
613
614 #ifndef __KERNEL__
bch2_revoke_key(struct bch_sb * sb)615 int bch2_revoke_key(struct bch_sb *sb)
616 {
617 key_serial_t key_id;
618 struct printbuf key_description = PRINTBUF;
619
620 prt_printf(&key_description, "bcachefs:");
621 pr_uuid(&key_description, sb->user_uuid.b);
622
623 key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
624 printbuf_exit(&key_description);
625 if (key_id < 0)
626 return errno;
627
628 keyctl_revoke(key_id);
629
630 return 0;
631 }
632 #endif
633
bch2_decrypt_sb_key(struct bch_fs * c,struct bch_sb_field_crypt * crypt,struct bch_key * key)634 int bch2_decrypt_sb_key(struct bch_fs *c,
635 struct bch_sb_field_crypt *crypt,
636 struct bch_key *key)
637 {
638 struct bch_encrypted_key sb_key = crypt->key;
639 struct bch_key user_key;
640 int ret = 0;
641
642 /* is key encrypted? */
643 if (!bch2_key_is_encrypted(&sb_key))
644 goto out;
645
646 ret = bch2_request_key(c->disk_sb.sb, &user_key);
647 if (ret) {
648 bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
649 goto err;
650 }
651
652 /* decrypt real key: */
653 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
654 &sb_key, sizeof(sb_key));
655 if (ret)
656 goto err;
657
658 if (bch2_key_is_encrypted(&sb_key)) {
659 bch_err(c, "incorrect encryption key");
660 ret = -EINVAL;
661 goto err;
662 }
663 out:
664 *key = sb_key.key;
665 err:
666 memzero_explicit(&sb_key, sizeof(sb_key));
667 memzero_explicit(&user_key, sizeof(user_key));
668 return ret;
669 }
670
bch2_alloc_ciphers(struct bch_fs * c)671 static int bch2_alloc_ciphers(struct bch_fs *c)
672 {
673 if (c->chacha20)
674 return 0;
675
676 struct crypto_sync_skcipher *chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
677 int ret = PTR_ERR_OR_ZERO(chacha20);
678 if (ret) {
679 bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
680 return ret;
681 }
682
683 struct crypto_shash *poly1305 = crypto_alloc_shash("poly1305", 0, 0);
684 ret = PTR_ERR_OR_ZERO(poly1305);
685 if (ret) {
686 bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
687 crypto_free_sync_skcipher(chacha20);
688 return ret;
689 }
690
691 c->chacha20 = chacha20;
692 c->poly1305 = poly1305;
693 return 0;
694 }
695
bch2_disable_encryption(struct bch_fs * c)696 int bch2_disable_encryption(struct bch_fs *c)
697 {
698 struct bch_sb_field_crypt *crypt;
699 struct bch_key key;
700 int ret = -EINVAL;
701
702 mutex_lock(&c->sb_lock);
703
704 crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
705 if (!crypt)
706 goto out;
707
708 /* is key encrypted? */
709 ret = 0;
710 if (bch2_key_is_encrypted(&crypt->key))
711 goto out;
712
713 ret = bch2_decrypt_sb_key(c, crypt, &key);
714 if (ret)
715 goto out;
716
717 crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC);
718 crypt->key.key = key;
719
720 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
721 bch2_write_super(c);
722 out:
723 mutex_unlock(&c->sb_lock);
724
725 return ret;
726 }
727
bch2_enable_encryption(struct bch_fs * c,bool keyed)728 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
729 {
730 struct bch_encrypted_key key;
731 struct bch_key user_key;
732 struct bch_sb_field_crypt *crypt;
733 int ret = -EINVAL;
734
735 mutex_lock(&c->sb_lock);
736
737 /* Do we already have an encryption key? */
738 if (bch2_sb_field_get(c->disk_sb.sb, crypt))
739 goto err;
740
741 ret = bch2_alloc_ciphers(c);
742 if (ret)
743 goto err;
744
745 key.magic = cpu_to_le64(BCH_KEY_MAGIC);
746 get_random_bytes(&key.key, sizeof(key.key));
747
748 if (keyed) {
749 ret = bch2_request_key(c->disk_sb.sb, &user_key);
750 if (ret) {
751 bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
752 goto err;
753 }
754
755 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
756 &key, sizeof(key));
757 if (ret)
758 goto err;
759 }
760
761 ret = crypto_skcipher_setkey(&c->chacha20->base,
762 (void *) &key.key, sizeof(key.key));
763 if (ret)
764 goto err;
765
766 crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
767 sizeof(*crypt) / sizeof(u64));
768 if (!crypt) {
769 ret = -BCH_ERR_ENOSPC_sb_crypt;
770 goto err;
771 }
772
773 crypt->key = key;
774
775 /* write superblock */
776 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
777 bch2_write_super(c);
778 err:
779 mutex_unlock(&c->sb_lock);
780 memzero_explicit(&user_key, sizeof(user_key));
781 memzero_explicit(&key, sizeof(key));
782 return ret;
783 }
784
bch2_fs_encryption_exit(struct bch_fs * c)785 void bch2_fs_encryption_exit(struct bch_fs *c)
786 {
787 if (c->poly1305)
788 crypto_free_shash(c->poly1305);
789 if (c->chacha20)
790 crypto_free_sync_skcipher(c->chacha20);
791 if (c->sha256)
792 crypto_free_shash(c->sha256);
793 }
794
bch2_fs_encryption_init(struct bch_fs * c)795 int bch2_fs_encryption_init(struct bch_fs *c)
796 {
797 struct bch_sb_field_crypt *crypt;
798 struct bch_key key;
799 int ret = 0;
800
801 c->sha256 = crypto_alloc_shash("sha256", 0, 0);
802 ret = PTR_ERR_OR_ZERO(c->sha256);
803 if (ret) {
804 c->sha256 = NULL;
805 bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
806 goto out;
807 }
808
809 crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
810 if (!crypt)
811 goto out;
812
813 ret = bch2_alloc_ciphers(c);
814 if (ret)
815 goto out;
816
817 ret = bch2_decrypt_sb_key(c, crypt, &key);
818 if (ret)
819 goto out;
820
821 ret = crypto_skcipher_setkey(&c->chacha20->base,
822 (void *) &key.key, sizeof(key.key));
823 if (ret)
824 goto out;
825 out:
826 memzero_explicit(&key, sizeof(key));
827 return ret;
828 }
829