1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Adiantum length-preserving encryption mode 4 * 5 * Copyright 2018 Google LLC 6 */ 7 8 /* 9 * Adiantum is a tweakable, length-preserving encryption mode designed for fast 10 * and secure disk encryption, especially on CPUs without dedicated crypto 11 * instructions. Adiantum encrypts each sector using the XChaCha12 stream 12 * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on 13 * NH and Poly1305, and an invocation of the AES-256 block cipher on a single 14 * 16-byte block. See the paper for details: 15 * 16 * Adiantum: length-preserving encryption for entry-level processors 17 * (https://eprint.iacr.org/2018/720.pdf) 18 * 19 * For flexibility, this implementation also allows other ciphers: 20 * 21 * - Stream cipher: XChaCha12 or XChaCha20 22 * - Block cipher: any with a 128-bit block size and 256-bit key 23 * 24 * This implementation doesn't currently allow other ε-∆U hash functions, i.e. 25 * HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC 26 * but still provably as secure, and also the ε-∆U hash function of HBSH is 27 * formally defined to take two inputs (tweak, message) which makes it difficult 28 * to wrap with the crypto_shash API. Rather, some details need to be handled 29 * here. Nevertheless, if needed in the future, support for other ε-∆U hash 30 * functions could be added here. 31 */ 32 33 #include <crypto/b128ops.h> 34 #include <crypto/chacha.h> 35 #include <crypto/internal/cipher.h> 36 #include <crypto/internal/hash.h> 37 #include <crypto/internal/poly1305.h> 38 #include <crypto/internal/skcipher.h> 39 #include <crypto/nhpoly1305.h> 40 #include <crypto/scatterwalk.h> 41 #include <linux/module.h> 42 43 /* 44 * Size of right-hand part of input data, in bytes; also the size of the block 45 * cipher's block size and the hash function's output. 46 */ 47 #define BLOCKCIPHER_BLOCK_SIZE 16 48 49 /* Size of the block cipher key (K_E) in bytes */ 50 #define BLOCKCIPHER_KEY_SIZE 32 51 52 /* Size of the hash key (K_H) in bytes */ 53 #define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE) 54 55 /* 56 * The specification allows variable-length tweaks, but Linux's crypto API 57 * currently only allows algorithms to support a single length. The "natural" 58 * tweak length for Adiantum is 16, since that fits into one Poly1305 block for 59 * the best performance. But longer tweaks are useful for fscrypt, to avoid 60 * needing to derive per-file keys. So instead we use two blocks, or 32 bytes. 61 */ 62 #define TWEAK_SIZE 32 63 64 struct adiantum_instance_ctx { 65 struct crypto_skcipher_spawn streamcipher_spawn; 66 struct crypto_cipher_spawn blockcipher_spawn; 67 struct crypto_shash_spawn hash_spawn; 68 }; 69 70 struct adiantum_tfm_ctx { 71 struct crypto_skcipher *streamcipher; 72 struct crypto_cipher *blockcipher; 73 struct crypto_shash *hash; 74 struct poly1305_core_key header_hash_key; 75 }; 76 77 struct adiantum_request_ctx { 78 79 /* 80 * Buffer for right-hand part of data, i.e. 81 * 82 * P_L => P_M => C_M => C_R when encrypting, or 83 * C_R => C_M => P_M => P_L when decrypting. 84 * 85 * Also used to build the IV for the stream cipher. 86 */ 87 union { 88 u8 bytes[XCHACHA_IV_SIZE]; 89 __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)]; 90 le128 bignum; /* interpret as element of Z/(2^{128}Z) */ 91 } rbuf; 92 93 bool enc; /* true if encrypting, false if decrypting */ 94 95 /* 96 * The result of the Poly1305 ε-∆U hash function applied to 97 * (bulk length, tweak) 98 */ 99 le128 header_hash; 100 101 /* Sub-requests, must be last */ 102 union { 103 struct shash_desc hash_desc; 104 struct skcipher_request streamcipher_req; 105 } u; 106 }; 107 108 /* 109 * Given the XChaCha stream key K_S, derive the block cipher key K_E and the 110 * hash key K_H as follows: 111 * 112 * K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191) 113 * 114 * Note that this denotes using bits from the XChaCha keystream, which here we 115 * get indirectly by encrypting a buffer containing all 0's. 116 */ 117 static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, 118 unsigned int keylen) 119 { 120 struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 121 struct { 122 u8 iv[XCHACHA_IV_SIZE]; 123 u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE]; 124 struct scatterlist sg; 125 struct crypto_wait wait; 126 struct skcipher_request req; /* must be last */ 127 } *data; 128 u8 *keyp; 129 int err; 130 131 /* Set the stream cipher key (K_S) */ 132 crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK); 133 crypto_skcipher_set_flags(tctx->streamcipher, 134 crypto_skcipher_get_flags(tfm) & 135 CRYPTO_TFM_REQ_MASK); 136 err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen); 137 if (err) 138 return err; 139 140 /* Derive the subkeys */ 141 data = kzalloc(sizeof(*data) + 142 crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL); 143 if (!data) 144 return -ENOMEM; 145 data->iv[0] = 1; 146 sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys)); 147 crypto_init_wait(&data->wait); 148 skcipher_request_set_tfm(&data->req, tctx->streamcipher); 149 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | 150 CRYPTO_TFM_REQ_MAY_BACKLOG, 151 crypto_req_done, &data->wait); 152 skcipher_request_set_crypt(&data->req, &data->sg, &data->sg, 153 sizeof(data->derived_keys), data->iv); 154 err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait); 155 if (err) 156 goto out; 157 keyp = data->derived_keys; 158 159 /* Set the block cipher key (K_E) */ 160 crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK); 161 crypto_cipher_set_flags(tctx->blockcipher, 162 crypto_skcipher_get_flags(tfm) & 163 CRYPTO_TFM_REQ_MASK); 164 err = crypto_cipher_setkey(tctx->blockcipher, keyp, 165 BLOCKCIPHER_KEY_SIZE); 166 if (err) 167 goto out; 168 keyp += BLOCKCIPHER_KEY_SIZE; 169 170 /* Set the hash key (K_H) */ 171 poly1305_core_setkey(&tctx->header_hash_key, keyp); 172 keyp += POLY1305_BLOCK_SIZE; 173 174 crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK); 175 crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) & 176 CRYPTO_TFM_REQ_MASK); 177 err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE); 178 keyp += NHPOLY1305_KEY_SIZE; 179 WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]); 180 out: 181 kfree_sensitive(data); 182 return err; 183 } 184 185 /* Addition in Z/(2^{128}Z) */ 186 static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2) 187 { 188 u64 x = le64_to_cpu(v1->b); 189 u64 y = le64_to_cpu(v2->b); 190 191 r->b = cpu_to_le64(x + y); 192 r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) + 193 (x + y < x)); 194 } 195 196 /* Subtraction in Z/(2^{128}Z) */ 197 static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2) 198 { 199 u64 x = le64_to_cpu(v1->b); 200 u64 y = le64_to_cpu(v2->b); 201 202 r->b = cpu_to_le64(x - y); 203 r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) - 204 (x - y > x)); 205 } 206 207 /* 208 * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the 209 * result to rctx->header_hash. This is the calculation 210 * 211 * H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T) 212 * 213 * from the procedure in section 6.4 of the Adiantum paper. The resulting value 214 * is reused in both the first and second hash steps. Specifically, it's added 215 * to the result of an independently keyed ε-∆U hash function (for equal length 216 * inputs only) taken over the left-hand part (the "bulk") of the message, to 217 * give the overall Adiantum hash of the (tweak, left-hand part) pair. 218 */ 219 static void adiantum_hash_header(struct skcipher_request *req) 220 { 221 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 222 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 223 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 224 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 225 struct { 226 __le64 message_bits; 227 __le64 padding; 228 } header = { 229 .message_bits = cpu_to_le64((u64)bulk_len * 8) 230 }; 231 struct poly1305_state state; 232 233 poly1305_core_init(&state); 234 235 BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0); 236 poly1305_core_blocks(&state, &tctx->header_hash_key, 237 &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1); 238 239 BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0); 240 poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv, 241 TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1); 242 243 poly1305_core_emit(&state, NULL, &rctx->header_hash); 244 } 245 246 /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */ 247 static int adiantum_hash_message(struct skcipher_request *req, 248 struct scatterlist *sgl, unsigned int nents, 249 le128 *digest) 250 { 251 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 252 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 253 struct shash_desc *hash_desc = &rctx->u.hash_desc; 254 struct sg_mapping_iter miter; 255 unsigned int i, n; 256 int err; 257 258 err = crypto_shash_init(hash_desc); 259 if (err) 260 return err; 261 262 sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC); 263 for (i = 0; i < bulk_len; i += n) { 264 sg_miter_next(&miter); 265 n = min_t(unsigned int, miter.length, bulk_len - i); 266 err = crypto_shash_update(hash_desc, miter.addr, n); 267 if (err) 268 break; 269 } 270 sg_miter_stop(&miter); 271 if (err) 272 return err; 273 274 return crypto_shash_final(hash_desc, (u8 *)digest); 275 } 276 277 /* Continue Adiantum encryption/decryption after the stream cipher step */ 278 static int adiantum_finish(struct skcipher_request *req) 279 { 280 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 281 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 282 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 283 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 284 struct scatterlist *dst = req->dst; 285 const unsigned int dst_nents = sg_nents(dst); 286 le128 digest; 287 int err; 288 289 /* If decrypting, decrypt C_M with the block cipher to get P_M */ 290 if (!rctx->enc) 291 crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes, 292 rctx->rbuf.bytes); 293 294 /* 295 * Second hash step 296 * enc: C_R = C_M - H_{K_H}(T, C_L) 297 * dec: P_R = P_M - H_{K_H}(T, P_L) 298 */ 299 rctx->u.hash_desc.tfm = tctx->hash; 300 le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); 301 if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) { 302 /* Fast path for single-page destination */ 303 void *virt = kmap_local_page(sg_page(dst)) + dst->offset; 304 305 err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, 306 (u8 *)&digest); 307 if (err) { 308 kunmap_local(virt); 309 return err; 310 } 311 le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 312 memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128)); 313 kunmap_local(virt); 314 } else { 315 /* Slow path that works for any destination scatterlist */ 316 err = adiantum_hash_message(req, dst, dst_nents, &digest); 317 if (err) 318 return err; 319 le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 320 scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst, 321 bulk_len, sizeof(le128), 1); 322 } 323 return 0; 324 } 325 326 static void adiantum_streamcipher_done(void *data, int err) 327 { 328 struct skcipher_request *req = data; 329 330 if (!err) 331 err = adiantum_finish(req); 332 333 skcipher_request_complete(req, err); 334 } 335 336 static int adiantum_crypt(struct skcipher_request *req, bool enc) 337 { 338 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 339 const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 340 struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); 341 const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; 342 struct scatterlist *src = req->src; 343 const unsigned int src_nents = sg_nents(src); 344 unsigned int stream_len; 345 le128 digest; 346 int err; 347 348 if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE) 349 return -EINVAL; 350 351 rctx->enc = enc; 352 353 /* 354 * First hash step 355 * enc: P_M = P_R + H_{K_H}(T, P_L) 356 * dec: C_M = C_R + H_{K_H}(T, C_L) 357 */ 358 adiantum_hash_header(req); 359 rctx->u.hash_desc.tfm = tctx->hash; 360 if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) { 361 /* Fast path for single-page source */ 362 void *virt = kmap_local_page(sg_page(src)) + src->offset; 363 364 err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, 365 (u8 *)&digest); 366 memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128)); 367 kunmap_local(virt); 368 } else { 369 /* Slow path that works for any source scatterlist */ 370 err = adiantum_hash_message(req, src, src_nents, &digest); 371 scatterwalk_map_and_copy(&rctx->rbuf.bignum, src, 372 bulk_len, sizeof(le128), 0); 373 } 374 if (err) 375 return err; 376 le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); 377 le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); 378 379 /* If encrypting, encrypt P_M with the block cipher to get C_M */ 380 if (enc) 381 crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes, 382 rctx->rbuf.bytes); 383 384 /* Initialize the rest of the XChaCha IV (first part is C_M) */ 385 BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16); 386 BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */ 387 rctx->rbuf.words[4] = cpu_to_le32(1); 388 rctx->rbuf.words[5] = 0; 389 rctx->rbuf.words[6] = 0; 390 rctx->rbuf.words[7] = 0; 391 392 /* 393 * XChaCha needs to be done on all the data except the last 16 bytes; 394 * for disk encryption that usually means 4080 or 496 bytes. But ChaCha 395 * implementations tend to be most efficient when passed a whole number 396 * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes. 397 * And here it doesn't matter whether the last 16 bytes are written to, 398 * as the second hash step will overwrite them. Thus, round the XChaCha 399 * length up to the next 64-byte boundary if possible. 400 */ 401 stream_len = bulk_len; 402 if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen) 403 stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE); 404 405 skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher); 406 skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src, 407 req->dst, stream_len, &rctx->rbuf); 408 skcipher_request_set_callback(&rctx->u.streamcipher_req, 409 req->base.flags, 410 adiantum_streamcipher_done, req); 411 return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?: 412 adiantum_finish(req); 413 } 414 415 static int adiantum_encrypt(struct skcipher_request *req) 416 { 417 return adiantum_crypt(req, true); 418 } 419 420 static int adiantum_decrypt(struct skcipher_request *req) 421 { 422 return adiantum_crypt(req, false); 423 } 424 425 static int adiantum_init_tfm(struct crypto_skcipher *tfm) 426 { 427 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 428 struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst); 429 struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 430 struct crypto_skcipher *streamcipher; 431 struct crypto_cipher *blockcipher; 432 struct crypto_shash *hash; 433 unsigned int subreq_size; 434 int err; 435 436 streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn); 437 if (IS_ERR(streamcipher)) 438 return PTR_ERR(streamcipher); 439 440 blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn); 441 if (IS_ERR(blockcipher)) { 442 err = PTR_ERR(blockcipher); 443 goto err_free_streamcipher; 444 } 445 446 hash = crypto_spawn_shash(&ictx->hash_spawn); 447 if (IS_ERR(hash)) { 448 err = PTR_ERR(hash); 449 goto err_free_blockcipher; 450 } 451 452 tctx->streamcipher = streamcipher; 453 tctx->blockcipher = blockcipher; 454 tctx->hash = hash; 455 456 BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) != 457 sizeof(struct adiantum_request_ctx)); 458 subreq_size = max(sizeof_field(struct adiantum_request_ctx, 459 u.hash_desc) + 460 crypto_shash_descsize(hash), 461 sizeof_field(struct adiantum_request_ctx, 462 u.streamcipher_req) + 463 crypto_skcipher_reqsize(streamcipher)); 464 465 crypto_skcipher_set_reqsize(tfm, 466 offsetof(struct adiantum_request_ctx, u) + 467 subreq_size); 468 return 0; 469 470 err_free_blockcipher: 471 crypto_free_cipher(blockcipher); 472 err_free_streamcipher: 473 crypto_free_skcipher(streamcipher); 474 return err; 475 } 476 477 static void adiantum_exit_tfm(struct crypto_skcipher *tfm) 478 { 479 struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 480 481 crypto_free_skcipher(tctx->streamcipher); 482 crypto_free_cipher(tctx->blockcipher); 483 crypto_free_shash(tctx->hash); 484 } 485 486 static void adiantum_free_instance(struct skcipher_instance *inst) 487 { 488 struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst); 489 490 crypto_drop_skcipher(&ictx->streamcipher_spawn); 491 crypto_drop_cipher(&ictx->blockcipher_spawn); 492 crypto_drop_shash(&ictx->hash_spawn); 493 kfree(inst); 494 } 495 496 /* 497 * Check for a supported set of inner algorithms. 498 * See the comment at the beginning of this file. 499 */ 500 static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg, 501 struct crypto_alg *blockcipher_alg, 502 struct shash_alg *hash_alg) 503 { 504 if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 && 505 strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0) 506 return false; 507 508 if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE || 509 blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE) 510 return false; 511 if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE) 512 return false; 513 514 if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0) 515 return false; 516 517 return true; 518 } 519 520 static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) 521 { 522 u32 mask; 523 const char *nhpoly1305_name; 524 struct skcipher_instance *inst; 525 struct adiantum_instance_ctx *ictx; 526 struct skcipher_alg_common *streamcipher_alg; 527 struct crypto_alg *blockcipher_alg; 528 struct shash_alg *hash_alg; 529 int err; 530 531 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); 532 if (err) 533 return err; 534 535 inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); 536 if (!inst) 537 return -ENOMEM; 538 ictx = skcipher_instance_ctx(inst); 539 540 /* Stream cipher, e.g. "xchacha12" */ 541 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, 542 skcipher_crypto_instance(inst), 543 crypto_attr_alg_name(tb[1]), 0, mask); 544 if (err) 545 goto err_free_inst; 546 streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn); 547 548 /* Block cipher, e.g. "aes" */ 549 err = crypto_grab_cipher(&ictx->blockcipher_spawn, 550 skcipher_crypto_instance(inst), 551 crypto_attr_alg_name(tb[2]), 0, mask); 552 if (err) 553 goto err_free_inst; 554 blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn); 555 556 /* NHPoly1305 ε-∆U hash function */ 557 nhpoly1305_name = crypto_attr_alg_name(tb[3]); 558 if (nhpoly1305_name == ERR_PTR(-ENOENT)) 559 nhpoly1305_name = "nhpoly1305"; 560 err = crypto_grab_shash(&ictx->hash_spawn, 561 skcipher_crypto_instance(inst), 562 nhpoly1305_name, 0, mask); 563 if (err) 564 goto err_free_inst; 565 hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn); 566 567 /* Check the set of algorithms */ 568 if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg, 569 hash_alg)) { 570 pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n", 571 streamcipher_alg->base.cra_name, 572 blockcipher_alg->cra_name, hash_alg->base.cra_name); 573 err = -EINVAL; 574 goto err_free_inst; 575 } 576 577 /* Instance fields */ 578 579 err = -ENAMETOOLONG; 580 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 581 "adiantum(%s,%s)", streamcipher_alg->base.cra_name, 582 blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME) 583 goto err_free_inst; 584 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 585 "adiantum(%s,%s,%s)", 586 streamcipher_alg->base.cra_driver_name, 587 blockcipher_alg->cra_driver_name, 588 hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 589 goto err_free_inst; 590 591 inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; 592 inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); 593 inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask; 594 /* 595 * The block cipher is only invoked once per message, so for long 596 * messages (e.g. sectors for disk encryption) its performance doesn't 597 * matter as much as that of the stream cipher and hash function. Thus, 598 * weigh the block cipher's ->cra_priority less. 599 */ 600 inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority + 601 2 * hash_alg->base.cra_priority + 602 blockcipher_alg->cra_priority) / 7; 603 604 inst->alg.setkey = adiantum_setkey; 605 inst->alg.encrypt = adiantum_encrypt; 606 inst->alg.decrypt = adiantum_decrypt; 607 inst->alg.init = adiantum_init_tfm; 608 inst->alg.exit = adiantum_exit_tfm; 609 inst->alg.min_keysize = streamcipher_alg->min_keysize; 610 inst->alg.max_keysize = streamcipher_alg->max_keysize; 611 inst->alg.ivsize = TWEAK_SIZE; 612 613 inst->free = adiantum_free_instance; 614 615 err = skcipher_register_instance(tmpl, inst); 616 if (err) { 617 err_free_inst: 618 adiantum_free_instance(inst); 619 } 620 return err; 621 } 622 623 /* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */ 624 static struct crypto_template adiantum_tmpl = { 625 .name = "adiantum", 626 .create = adiantum_create, 627 .module = THIS_MODULE, 628 }; 629 630 static int __init adiantum_module_init(void) 631 { 632 return crypto_register_template(&adiantum_tmpl); 633 } 634 635 static void __exit adiantum_module_exit(void) 636 { 637 crypto_unregister_template(&adiantum_tmpl); 638 } 639 640 subsys_initcall(adiantum_module_init); 641 module_exit(adiantum_module_exit); 642 643 MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); 644 MODULE_LICENSE("GPL v2"); 645 MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); 646 MODULE_ALIAS_CRYPTO("adiantum"); 647 MODULE_IMPORT_NS(CRYPTO_INTERNAL); 648