blk-crypto.c (a892c8d52c02284076fbbacae6692aa5c5807d11) blk-crypto.c (488f6682c832e9549d28b30075f00c76328eb1be)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Google LLC
4 */
5
6/*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */

--- 5 unchanged lines hidden (view full) ---

14#include <linux/keyslot-manager.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17
18#include "blk-crypto-internal.h"
19
20const struct blk_crypto_mode blk_crypto_modes[] = {
21 [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Google LLC
4 */
5
6/*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */

--- 5 unchanged lines hidden (view full) ---

14#include <linux/keyslot-manager.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17
18#include "blk-crypto-internal.h"
19
20const struct blk_crypto_mode blk_crypto_modes[] = {
21 [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
22 .cipher_str = "xts(aes)",
22 .keysize = 64,
23 .ivsize = 16,
24 },
25 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
23 .keysize = 64,
24 .ivsize = 16,
25 },
26 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
27 .cipher_str = "essiv(cbc(aes),sha256)",
26 .keysize = 16,
27 .ivsize = 16,
28 },
29 [BLK_ENCRYPTION_MODE_ADIANTUM] = {
28 .keysize = 16,
29 .ivsize = 16,
30 },
31 [BLK_ENCRYPTION_MODE_ADIANTUM] = {
32 .cipher_str = "adiantum(xchacha12,aes)",
30 .keysize = 32,
31 .ivsize = 32,
32 },
33};
34
35/*
36 * This number needs to be at least (the number of threads doing IO
37 * concurrently) * (maximum recursive depth of a bio), so that we don't

--- 186 unchanged lines hidden (view full) ---

224 blk_crypto_rq_set_defaults(rq);
225}
226
227/**
228 * __blk_crypto_bio_prep - Prepare bio for inline encryption
229 *
230 * @bio_ptr: pointer to original bio pointer
231 *
33 .keysize = 32,
34 .ivsize = 32,
35 },
36};
37
38/*
39 * This number needs to be at least (the number of threads doing IO
40 * concurrently) * (maximum recursive depth of a bio), so that we don't

--- 186 unchanged lines hidden (view full) ---

227 blk_crypto_rq_set_defaults(rq);
228}
229
230/**
231 * __blk_crypto_bio_prep - Prepare bio for inline encryption
232 *
233 * @bio_ptr: pointer to original bio pointer
234 *
232 * Succeeds if the bio doesn't have inline encryption enabled or if the bio
233 * crypt context provided for the bio is supported by the underlying device's
234 * inline encryption hardware. Ends the bio with error otherwise.
235 * If the bio crypt context provided for the bio is supported by the underlying
236 * device's inline encryption hardware, do nothing.
235 *
237 *
238 * Otherwise, try to perform en/decryption for this bio by falling back to the
239 * kernel crypto API. When the crypto API fallback is used for encryption,
240 * blk-crypto may choose to split the bio into 2 - the first one that will
241 * continue to be processed and the second one that will be resubmitted via
242 * generic_make_request. A bounce bio will be allocated to encrypt the contents
243 * of the aforementioned "first one", and *bio_ptr will be updated to this
244 * bounce bio.
245 *
236 * Caller must ensure bio has bio_crypt_ctx.
237 *
238 * Return: true on success; false on error (and bio->bi_status will be set
239 * appropriately, and bio_endio() will have been called so bio
240 * submission should abort).
241 */
242bool __blk_crypto_bio_prep(struct bio **bio_ptr)
243{
244 struct bio *bio = *bio_ptr;
245 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
246 * Caller must ensure bio has bio_crypt_ctx.
247 *
248 * Return: true on success; false on error (and bio->bi_status will be set
249 * appropriately, and bio_endio() will have been called so bio
250 * submission should abort).
251 */
252bool __blk_crypto_bio_prep(struct bio **bio_ptr)
253{
254 struct bio *bio = *bio_ptr;
255 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
246 blk_status_t blk_st = BLK_STS_IOERR;
247
248 /* Error if bio has no data. */
256
257 /* Error if bio has no data. */
249 if (WARN_ON_ONCE(!bio_has_data(bio)))
258 if (WARN_ON_ONCE(!bio_has_data(bio))) {
259 bio->bi_status = BLK_STS_IOERR;
250 goto fail;
260 goto fail;
261 }
251
262
252 if (!bio_crypt_check_alignment(bio))
263 if (!bio_crypt_check_alignment(bio)) {
264 bio->bi_status = BLK_STS_IOERR;
253 goto fail;
265 goto fail;
266 }
254
255 /*
267
268 /*
256 * Success if device supports the encryption context.
269 * Success if device supports the encryption context, or if we succeeded
270 * in falling back to the crypto API.
257 */
271 */
258 if (!blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
259 &bc_key->crypto_cfg)) {
260 blk_st = BLK_STS_NOTSUPP;
261 goto fail;
262 }
272 if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
273 &bc_key->crypto_cfg))
274 return true;
263
275
264 return true;
276 if (blk_crypto_fallback_bio_prep(bio_ptr))
277 return true;
265fail:
278fail:
266 (*bio_ptr)->bi_status = blk_st;
267 bio_endio(*bio_ptr);
268 return false;
269}
270
271/**
272 * __blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
273 * is inserted
274 *

--- 49 unchanged lines hidden (view full) ---

324 blk_key->crypto_cfg.data_unit_size = data_unit_size;
325 blk_key->data_unit_size_bits = ilog2(data_unit_size);
326 blk_key->size = mode->keysize;
327 memcpy(blk_key->raw, raw_key, mode->keysize);
328
329 return 0;
330}
331
279 bio_endio(*bio_ptr);
280 return false;
281}
282
283/**
284 * __blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
285 * is inserted
286 *

--- 49 unchanged lines hidden (view full) ---

336 blk_key->crypto_cfg.data_unit_size = data_unit_size;
337 blk_key->data_unit_size_bits = ilog2(data_unit_size);
338 blk_key->size = mode->keysize;
339 memcpy(blk_key->raw, raw_key, mode->keysize);
340
341 return 0;
342}
343
344/*
345 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
346 * request queue it's submitted to supports inline crypto, or the
347 * blk-crypto-fallback is enabled and supports the cfg).
348 */
332bool blk_crypto_config_supported(struct request_queue *q,
333 const struct blk_crypto_config *cfg)
334{
349bool blk_crypto_config_supported(struct request_queue *q,
350 const struct blk_crypto_config *cfg)
351{
335 return blk_ksm_crypto_cfg_supported(q->ksm, cfg);
352 return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
353 blk_ksm_crypto_cfg_supported(q->ksm, cfg);
336}
337
338/**
339 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
340 * @key: A key to use on the device
341 * @q: the request queue for the device
342 *
354}
355
356/**
357 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
358 * @key: A key to use on the device
359 * @q: the request queue for the device
360 *
343 * Upper layers must call this function to ensure that the hardware supports
344 * the key's crypto settings.
361 * Upper layers must call this function to ensure that either the hardware
362 * supports the key's crypto settings, or the crypto API fallback has transforms
363 * for the needed mode allocated and ready to go. This function may allocate
364 * an skcipher, and *should not* be called from the data path, since that might
365 * cause a deadlock
345 *
366 *
346 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key
367 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
368 * blk-crypto-fallback is either disabled or the needed algorithm
369 * is disabled in the crypto API; or another -errno code.
347 */
348int blk_crypto_start_using_key(const struct blk_crypto_key *key,
349 struct request_queue *q)
350{
351 if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
352 return 0;
370 */
371int blk_crypto_start_using_key(const struct blk_crypto_key *key,
372 struct request_queue *q)
373{
374 if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
375 return 0;
353 return -ENOPKG;
376 return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
354}
355
356/**
357 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
358 * it may have been programmed into
359 * @q: The request queue who's associated inline encryption hardware this key
360 * might have been programmed into
361 * @key: The key to evict

--- 5 unchanged lines hidden (view full) ---

367 * Return: 0 on success or if key is not present in the q's ksm, -err on error.
368 */
369int blk_crypto_evict_key(struct request_queue *q,
370 const struct blk_crypto_key *key)
371{
372 if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
373 return blk_ksm_evict_key(q->ksm, key);
374
377}
378
379/**
380 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
381 * it may have been programmed into
382 * @q: The request queue who's associated inline encryption hardware this key
383 * might have been programmed into
384 * @key: The key to evict

--- 5 unchanged lines hidden (view full) ---

390 * Return: 0 on success or if key is not present in the q's ksm, -err on error.
391 */
392int blk_crypto_evict_key(struct request_queue *q,
393 const struct blk_crypto_key *key)
394{
395 if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
396 return blk_ksm_evict_key(q->ksm, key);
397
375 return 0;
398 /*
399 * If the request queue's associated inline encryption hardware didn't
400 * have support for the key, then the key might have been programmed
401 * into the fallback keyslot manager, so try to evict from there.
402 */
403 return blk_crypto_fallback_evict_key(key);
376}
404}