1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2019 Google LLC
4 */
5
6 /*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */
9
10 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
11
12 #include <crypto/skcipher.h>
13 #include <linux/blk-crypto.h>
14 #include <linux/blk-crypto-profile.h>
15 #include <linux/blkdev.h>
16 #include <linux/crypto.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/random.h>
20 #include <linux/scatterlist.h>
21
22 #include "blk-cgroup.h"
23 #include "blk-crypto-internal.h"
24
25 static unsigned int num_prealloc_bounce_pg = BIO_MAX_VECS;
26 module_param(num_prealloc_bounce_pg, uint, 0);
27 MODULE_PARM_DESC(num_prealloc_bounce_pg,
28 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
29
30 static unsigned int blk_crypto_num_keyslots = 100;
31 module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
32 MODULE_PARM_DESC(num_keyslots,
33 "Number of keyslots for the blk-crypto crypto API fallback");
34
35 static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
36 module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
37 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
38 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
39
40 struct bio_fallback_crypt_ctx {
41 struct bio_crypt_ctx crypt_ctx;
42 /*
43 * Copy of the bvec_iter when this bio was submitted.
44 * We only want to en/decrypt the part of the bio as described by the
45 * bvec_iter upon submission because bio might be split before being
46 * resubmitted
47 */
48 struct bvec_iter crypt_iter;
49 union {
50 struct {
51 struct work_struct work;
52 struct bio *bio;
53 };
54 struct {
55 void *bi_private_orig;
56 bio_end_io_t *bi_end_io_orig;
57 };
58 };
59 };
60
61 static struct kmem_cache *bio_fallback_crypt_ctx_cache;
62 static mempool_t *bio_fallback_crypt_ctx_pool;
63
64 /*
65 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
66 * all of a mode's tfms when that mode starts being used. Since each mode may
67 * need all the keyslots at some point, each mode needs its own tfm for each
68 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
69 * match the behavior of real inline encryption hardware (which only supports a
70 * single encryption context per keyslot), we only allow one tfm per keyslot to
71 * be used at a time - the rest of the unused tfms have their keys cleared.
72 */
73 static DEFINE_MUTEX(tfms_init_lock);
74 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
75
76 static struct blk_crypto_fallback_keyslot {
77 enum blk_crypto_mode_num crypto_mode;
78 struct crypto_sync_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
79 } *blk_crypto_keyslots;
80
81 static struct blk_crypto_profile *blk_crypto_fallback_profile;
82 static struct workqueue_struct *blk_crypto_wq;
83 static mempool_t *blk_crypto_bounce_page_pool;
84 static struct bio_set enc_bio_set;
85
86 /*
87 * This is the key we set when evicting a keyslot. This *should* be the all 0's
88 * key, but AES-XTS rejects that key, so we use some random bytes instead.
89 */
90 static u8 blank_key[BLK_CRYPTO_MAX_RAW_KEY_SIZE];
91
blk_crypto_fallback_evict_keyslot(unsigned int slot)92 static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
93 {
94 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
95 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
96 int err;
97
98 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
99
100 /* Clear the key in the skcipher */
101 err = crypto_sync_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
102 blk_crypto_modes[crypto_mode].keysize);
103 WARN_ON(err);
104 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
105 }
106
107 static int
blk_crypto_fallback_keyslot_program(struct blk_crypto_profile * profile,const struct blk_crypto_key * key,unsigned int slot)108 blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
109 const struct blk_crypto_key *key,
110 unsigned int slot)
111 {
112 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
113 const enum blk_crypto_mode_num crypto_mode =
114 key->crypto_cfg.crypto_mode;
115 int err;
116
117 if (crypto_mode != slotp->crypto_mode &&
118 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
119 blk_crypto_fallback_evict_keyslot(slot);
120
121 slotp->crypto_mode = crypto_mode;
122 err = crypto_sync_skcipher_setkey(slotp->tfms[crypto_mode], key->bytes,
123 key->size);
124 if (err) {
125 blk_crypto_fallback_evict_keyslot(slot);
126 return err;
127 }
128 return 0;
129 }
130
blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile * profile,const struct blk_crypto_key * key,unsigned int slot)131 static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile,
132 const struct blk_crypto_key *key,
133 unsigned int slot)
134 {
135 blk_crypto_fallback_evict_keyslot(slot);
136 return 0;
137 }
138
139 static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
140 .keyslot_program = blk_crypto_fallback_keyslot_program,
141 .keyslot_evict = blk_crypto_fallback_keyslot_evict,
142 };
143
blk_crypto_fallback_encrypt_endio(struct bio * enc_bio)144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
145 {
146 struct bio *src_bio = enc_bio->bi_private;
147 struct page **pages = (struct page **)enc_bio->bi_io_vec;
148 struct bio_vec *bv;
149 unsigned int i;
150
151 /*
152 * Use the same trick as the alloc side to avoid the need for an extra
153 * pages array.
154 */
155 bio_for_each_bvec_all(bv, enc_bio, i)
156 pages[i] = bv->bv_page;
157
158 i = mempool_free_bulk(blk_crypto_bounce_page_pool, (void **)pages,
159 enc_bio->bi_vcnt);
160 if (i < enc_bio->bi_vcnt)
161 release_pages(pages + i, enc_bio->bi_vcnt - i);
162
163 if (enc_bio->bi_status)
164 cmpxchg(&src_bio->bi_status, 0, enc_bio->bi_status);
165
166 bio_put(enc_bio);
167 bio_endio(src_bio);
168 }
169
170 #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
171
blk_crypto_alloc_enc_bio(struct bio * bio_src,unsigned int nr_segs,struct page *** pages_ret)172 static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
173 unsigned int nr_segs, struct page ***pages_ret)
174 {
175 unsigned int memflags = memalloc_noio_save();
176 unsigned int nr_allocated;
177 struct page **pages;
178 struct bio *bio;
179
180 bio = bio_alloc_bioset(bio_src->bi_bdev, nr_segs, bio_src->bi_opf,
181 GFP_NOIO, &enc_bio_set);
182 if (bio_flagged(bio_src, BIO_REMAPPED))
183 bio_set_flag(bio, BIO_REMAPPED);
184 bio->bi_private = bio_src;
185 bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
186 bio->bi_ioprio = bio_src->bi_ioprio;
187 bio->bi_write_hint = bio_src->bi_write_hint;
188 bio->bi_write_stream = bio_src->bi_write_stream;
189 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
190 bio_clone_blkg_association(bio, bio_src);
191
192 /*
193 * Move page array up in the allocated memory for the bio vecs as far as
194 * possible so that we can start filling biovecs from the beginning
195 * without overwriting the temporary page array.
196 */
197 static_assert(PAGE_PTRS_PER_BVEC > 1);
198 pages = (struct page **)bio->bi_io_vec;
199 pages += nr_segs * (PAGE_PTRS_PER_BVEC - 1);
200
201 /*
202 * Try a bulk allocation first. This could leave random pages in the
203 * array unallocated, but we'll fix that up later in mempool_alloc_bulk.
204 *
205 * Note: alloc_pages_bulk needs the array to be zeroed, as it assumes
206 * any non-zero slot already contains a valid allocation.
207 */
208 memset(pages, 0, sizeof(struct page *) * nr_segs);
209 nr_allocated = alloc_pages_bulk(GFP_KERNEL, nr_segs, pages);
210 if (nr_allocated < nr_segs)
211 mempool_alloc_bulk(blk_crypto_bounce_page_pool, (void **)pages,
212 nr_segs, nr_allocated);
213 memalloc_noio_restore(memflags);
214 *pages_ret = pages;
215 return bio;
216 }
217
218 static struct crypto_sync_skcipher *
blk_crypto_fallback_tfm(struct blk_crypto_keyslot * slot)219 blk_crypto_fallback_tfm(struct blk_crypto_keyslot *slot)
220 {
221 const struct blk_crypto_fallback_keyslot *slotp =
222 &blk_crypto_keyslots[blk_crypto_keyslot_index(slot)];
223
224 return slotp->tfms[slotp->crypto_mode];
225 }
226
227 union blk_crypto_iv {
228 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
229 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
230 };
231
blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],union blk_crypto_iv * iv)232 static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
233 union blk_crypto_iv *iv)
234 {
235 int i;
236
237 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
238 iv->dun[i] = cpu_to_le64(dun[i]);
239 }
240
__blk_crypto_fallback_encrypt_bio(struct bio * src_bio,struct crypto_sync_skcipher * tfm)241 static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
242 struct crypto_sync_skcipher *tfm)
243 {
244 struct bio_crypt_ctx *bc = src_bio->bi_crypt_context;
245 int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
246 SYNC_SKCIPHER_REQUEST_ON_STACK(ciph_req, tfm);
247 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
248 struct scatterlist src, dst;
249 union blk_crypto_iv iv;
250 unsigned int nr_enc_pages, enc_idx;
251 struct page **enc_pages;
252 struct bio *enc_bio;
253 unsigned int i;
254
255 skcipher_request_set_callback(ciph_req,
256 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
257 NULL, NULL);
258
259 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
260 sg_init_table(&src, 1);
261 sg_init_table(&dst, 1);
262
263 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
264 iv.bytes);
265
266 /*
267 * Encrypt each page in the source bio. Because the source bio could
268 * have bio_vecs that span more than a single page, but the encrypted
269 * bios are limited to a single page per bio_vec, this can generate
270 * more than a single encrypted bio per source bio.
271 */
272 new_bio:
273 nr_enc_pages = min(bio_segments(src_bio), BIO_MAX_VECS);
274 enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_enc_pages, &enc_pages);
275 enc_idx = 0;
276 for (;;) {
277 struct bio_vec src_bv =
278 bio_iter_iovec(src_bio, src_bio->bi_iter);
279 struct page *enc_page = enc_pages[enc_idx];
280
281 if (!IS_ALIGNED(src_bv.bv_len | src_bv.bv_offset,
282 data_unit_size)) {
283 enc_bio->bi_status = BLK_STS_INVAL;
284 goto out_free_enc_bio;
285 }
286
287 __bio_add_page(enc_bio, enc_page, src_bv.bv_len,
288 src_bv.bv_offset);
289
290 sg_set_page(&src, src_bv.bv_page, data_unit_size,
291 src_bv.bv_offset);
292 sg_set_page(&dst, enc_page, data_unit_size, src_bv.bv_offset);
293
294 /*
295 * Increment the index now that the encrypted page is added to
296 * the bio. This is important for the error unwind path.
297 */
298 enc_idx++;
299
300 /*
301 * Encrypt each data unit in this page.
302 */
303 for (i = 0; i < src_bv.bv_len; i += data_unit_size) {
304 blk_crypto_dun_to_iv(curr_dun, &iv);
305 if (crypto_skcipher_encrypt(ciph_req)) {
306 enc_bio->bi_status = BLK_STS_IOERR;
307 goto out_free_enc_bio;
308 }
309 bio_crypt_dun_increment(curr_dun, 1);
310 src.offset += data_unit_size;
311 dst.offset += data_unit_size;
312 }
313
314 bio_advance_iter_single(src_bio, &src_bio->bi_iter,
315 src_bv.bv_len);
316 if (!src_bio->bi_iter.bi_size)
317 break;
318
319 if (enc_idx == nr_enc_pages) {
320 /*
321 * For each additional encrypted bio submitted,
322 * increment the source bio's remaining count. Each
323 * encrypted bio's completion handler calls bio_endio on
324 * the source bio, so this keeps the source bio from
325 * completing until the last encrypted bio does.
326 */
327 bio_inc_remaining(src_bio);
328 submit_bio(enc_bio);
329 goto new_bio;
330 }
331 }
332
333 submit_bio(enc_bio);
334 return;
335
336 out_free_enc_bio:
337 /*
338 * Add the remaining pages to the bio so that the normal completion path
339 * in blk_crypto_fallback_encrypt_endio frees them. The exact data
340 * layout does not matter for that, so don't bother iterating the source
341 * bio.
342 */
343 for (; enc_idx < nr_enc_pages; enc_idx++)
344 __bio_add_page(enc_bio, enc_pages[enc_idx], PAGE_SIZE, 0);
345 bio_endio(enc_bio);
346 }
347
348 /*
349 * The crypto API fallback's encryption routine.
350 *
351 * Allocate one or more bios for encryption, encrypt the input bio using the
352 * crypto API, and submit the encrypted bios. Sets bio->bi_status and
353 * completes the source bio on error
354 */
blk_crypto_fallback_encrypt_bio(struct bio * src_bio)355 static void blk_crypto_fallback_encrypt_bio(struct bio *src_bio)
356 {
357 struct bio_crypt_ctx *bc = src_bio->bi_crypt_context;
358 struct blk_crypto_keyslot *slot;
359 blk_status_t status;
360
361 status = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
362 bc->bc_key, &slot);
363 if (status != BLK_STS_OK) {
364 src_bio->bi_status = status;
365 bio_endio(src_bio);
366 return;
367 }
368 __blk_crypto_fallback_encrypt_bio(src_bio,
369 blk_crypto_fallback_tfm(slot));
370 blk_crypto_put_keyslot(slot);
371 }
372
__blk_crypto_fallback_decrypt_bio(struct bio * bio,struct bio_crypt_ctx * bc,struct bvec_iter iter,struct crypto_sync_skcipher * tfm)373 static blk_status_t __blk_crypto_fallback_decrypt_bio(struct bio *bio,
374 struct bio_crypt_ctx *bc, struct bvec_iter iter,
375 struct crypto_sync_skcipher *tfm)
376 {
377 SYNC_SKCIPHER_REQUEST_ON_STACK(ciph_req, tfm);
378 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
379 union blk_crypto_iv iv;
380 struct scatterlist sg;
381 struct bio_vec bv;
382 const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
383 unsigned int i;
384
385 skcipher_request_set_callback(ciph_req,
386 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
387 NULL, NULL);
388
389 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
390 sg_init_table(&sg, 1);
391 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
392 iv.bytes);
393
394 /* Decrypt each segment in the bio */
395 __bio_for_each_segment(bv, bio, iter, iter) {
396 struct page *page = bv.bv_page;
397
398 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
399 return BLK_STS_INVAL;
400
401 sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
402
403 /* Decrypt each data unit in the segment */
404 for (i = 0; i < bv.bv_len; i += data_unit_size) {
405 blk_crypto_dun_to_iv(curr_dun, &iv);
406 if (crypto_skcipher_decrypt(ciph_req))
407 return BLK_STS_IOERR;
408 bio_crypt_dun_increment(curr_dun, 1);
409 sg.offset += data_unit_size;
410 }
411 }
412
413 return BLK_STS_OK;
414 }
415
416 /*
417 * The crypto API fallback's main decryption routine.
418 *
419 * Decrypts input bio in place, and calls bio_endio on the bio.
420 */
blk_crypto_fallback_decrypt_bio(struct work_struct * work)421 static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
422 {
423 struct bio_fallback_crypt_ctx *f_ctx =
424 container_of(work, struct bio_fallback_crypt_ctx, work);
425 struct bio *bio = f_ctx->bio;
426 struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
427 struct blk_crypto_keyslot *slot;
428 blk_status_t status;
429
430 status = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
431 bc->bc_key, &slot);
432 if (status == BLK_STS_OK) {
433 status = __blk_crypto_fallback_decrypt_bio(bio, bc,
434 f_ctx->crypt_iter,
435 blk_crypto_fallback_tfm(slot));
436 blk_crypto_put_keyslot(slot);
437 }
438 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
439
440 bio->bi_status = status;
441 bio_endio(bio);
442 }
443
444 /**
445 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
446 *
447 * @bio: the bio to queue
448 *
449 * Restore bi_private and bi_end_io, and queue the bio for decryption into a
450 * workqueue, since this function will be called from an atomic context.
451 */
blk_crypto_fallback_decrypt_endio(struct bio * bio)452 static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
453 {
454 struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
455
456 bio->bi_private = f_ctx->bi_private_orig;
457 bio->bi_end_io = f_ctx->bi_end_io_orig;
458
459 /* If there was an IO error, don't queue for decrypt. */
460 if (bio->bi_status) {
461 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
462 bio_endio(bio);
463 return;
464 }
465
466 INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
467 f_ctx->bio = bio;
468 queue_work(blk_crypto_wq, &f_ctx->work);
469 }
470
471 /**
472 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
473 * @bio: bio to prepare
474 *
475 * If bio is doing a WRITE operation, allocate one or more bios to contain the
476 * encrypted payload and submit them.
477 *
478 * For a READ operation, mark the bio for decryption by using bi_private and
479 * bi_end_io.
480 *
481 * In either case, this function will make the submitted bio(s) look like
482 * regular bios (i.e. as if no encryption context was ever specified) for the
483 * purposes of the rest of the stack except for blk-integrity (blk-integrity and
484 * blk-crypto are not currently supported together).
485 *
486 * Return: true if @bio should be submitted to the driver by the caller, else
487 * false. Sets bio->bi_status, calls bio_endio and returns false on error.
488 */
blk_crypto_fallback_bio_prep(struct bio * bio)489 bool blk_crypto_fallback_bio_prep(struct bio *bio)
490 {
491 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
492 struct bio_fallback_crypt_ctx *f_ctx;
493
494 if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
495 /* User didn't call blk_crypto_start_using_key() first */
496 bio_io_error(bio);
497 return false;
498 }
499
500 if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
501 &bc->bc_key->crypto_cfg)) {
502 bio->bi_status = BLK_STS_NOTSUPP;
503 bio_endio(bio);
504 return false;
505 }
506
507 if (bio_data_dir(bio) == WRITE) {
508 blk_crypto_fallback_encrypt_bio(bio);
509 return false;
510 }
511
512 /*
513 * bio READ case: Set up a f_ctx in the bio's bi_private and set the
514 * bi_end_io appropriately to trigger decryption when the bio is ended.
515 */
516 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
517 f_ctx->crypt_ctx = *bc;
518 f_ctx->crypt_iter = bio->bi_iter;
519 f_ctx->bi_private_orig = bio->bi_private;
520 f_ctx->bi_end_io_orig = bio->bi_end_io;
521 bio->bi_private = (void *)f_ctx;
522 bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
523 bio_crypt_free_ctx(bio);
524
525 return true;
526 }
527
blk_crypto_fallback_evict_key(const struct blk_crypto_key * key)528 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
529 {
530 return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
531 }
532
533 static bool blk_crypto_fallback_inited;
blk_crypto_fallback_init(void)534 static int blk_crypto_fallback_init(void)
535 {
536 int i;
537 int err;
538
539 if (blk_crypto_fallback_inited)
540 return 0;
541
542 get_random_bytes(blank_key, sizeof(blank_key));
543
544 err = bioset_init(&enc_bio_set, 64, 0, BIOSET_NEED_BVECS);
545 if (err)
546 goto out;
547
548 /* Dynamic allocation is needed because of lockdep_register_key(). */
549 blk_crypto_fallback_profile = kzalloc_obj(*blk_crypto_fallback_profile);
550 if (!blk_crypto_fallback_profile) {
551 err = -ENOMEM;
552 goto fail_free_bioset;
553 }
554
555 err = blk_crypto_profile_init(blk_crypto_fallback_profile,
556 blk_crypto_num_keyslots);
557 if (err)
558 goto fail_free_profile;
559 err = -ENOMEM;
560
561 blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
562 blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
563 blk_crypto_fallback_profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
564
565 /* All blk-crypto modes have a crypto API fallback. */
566 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
567 blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
568 blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
569
570 blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
571 WQ_UNBOUND | WQ_HIGHPRI |
572 WQ_MEM_RECLAIM, num_online_cpus());
573 if (!blk_crypto_wq)
574 goto fail_destroy_profile;
575
576 blk_crypto_keyslots = kzalloc_objs(blk_crypto_keyslots[0],
577 blk_crypto_num_keyslots);
578 if (!blk_crypto_keyslots)
579 goto fail_free_wq;
580
581 blk_crypto_bounce_page_pool =
582 mempool_create_page_pool(num_prealloc_bounce_pg, 0);
583 if (!blk_crypto_bounce_page_pool)
584 goto fail_free_keyslots;
585
586 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
587 if (!bio_fallback_crypt_ctx_cache)
588 goto fail_free_bounce_page_pool;
589
590 bio_fallback_crypt_ctx_pool =
591 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
592 bio_fallback_crypt_ctx_cache);
593 if (!bio_fallback_crypt_ctx_pool)
594 goto fail_free_crypt_ctx_cache;
595
596 blk_crypto_fallback_inited = true;
597
598 return 0;
599 fail_free_crypt_ctx_cache:
600 kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
601 fail_free_bounce_page_pool:
602 mempool_destroy(blk_crypto_bounce_page_pool);
603 fail_free_keyslots:
604 kfree(blk_crypto_keyslots);
605 fail_free_wq:
606 destroy_workqueue(blk_crypto_wq);
607 fail_destroy_profile:
608 blk_crypto_profile_destroy(blk_crypto_fallback_profile);
609 fail_free_profile:
610 kfree(blk_crypto_fallback_profile);
611 fail_free_bioset:
612 bioset_exit(&enc_bio_set);
613 out:
614 return err;
615 }
616
617 /*
618 * Prepare blk-crypto-fallback for the specified crypto mode.
619 * Returns -ENOPKG if the needed crypto API support is missing.
620 */
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)621 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
622 {
623 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
624 struct blk_crypto_fallback_keyslot *slotp;
625 unsigned int i;
626 int err = 0;
627
628 /*
629 * Fast path
630 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
631 * for each i are visible before we try to access them.
632 */
633 if (likely(smp_load_acquire(&tfms_inited[mode_num])))
634 return 0;
635
636 mutex_lock(&tfms_init_lock);
637 if (tfms_inited[mode_num])
638 goto out;
639
640 err = blk_crypto_fallback_init();
641 if (err)
642 goto out;
643
644 for (i = 0; i < blk_crypto_num_keyslots; i++) {
645 slotp = &blk_crypto_keyslots[i];
646 slotp->tfms[mode_num] = crypto_alloc_sync_skcipher(cipher_str,
647 0, 0);
648 if (IS_ERR(slotp->tfms[mode_num])) {
649 err = PTR_ERR(slotp->tfms[mode_num]);
650 if (err == -ENOENT) {
651 pr_warn_once("Missing crypto API support for \"%s\"\n",
652 cipher_str);
653 err = -ENOPKG;
654 }
655 slotp->tfms[mode_num] = NULL;
656 goto out_free_tfms;
657 }
658
659 crypto_sync_skcipher_set_flags(slotp->tfms[mode_num],
660 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
661 }
662
663 /*
664 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
665 * for each i are visible before we set tfms_inited[mode_num].
666 */
667 smp_store_release(&tfms_inited[mode_num], true);
668 goto out;
669
670 out_free_tfms:
671 for (i = 0; i < blk_crypto_num_keyslots; i++) {
672 slotp = &blk_crypto_keyslots[i];
673 crypto_free_sync_skcipher(slotp->tfms[mode_num]);
674 slotp->tfms[mode_num] = NULL;
675 }
676 out:
677 mutex_unlock(&tfms_init_lock);
678 return err;
679 }
680