1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale FSL CAAM support for crypto API over QI backend. 4 * Based on caamalg.c 5 * 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 * Copyright 2016-2019 NXP 8 */ 9 10 #include "compat.h" 11 #include "ctrl.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "desc_constr.h" 15 #include "error.h" 16 #include "sg_sw_qm.h" 17 #include "key_gen.h" 18 #include "qi.h" 19 #include "jr.h" 20 #include "caamalg_desc.h" 21 #include <crypto/xts.h> 22 #include <asm/unaligned.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/kernel.h> 25 26 /* 27 * crypto alg 28 */ 29 #define CAAM_CRA_PRIORITY 2000 30 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 31 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 32 SHA512_DIGEST_SIZE * 2) 33 34 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 35 CAAM_MAX_KEY_SIZE) 36 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 37 38 struct caam_alg_entry { 39 int class1_alg_type; 40 int class2_alg_type; 41 bool rfc3686; 42 bool geniv; 43 bool nodkp; 44 }; 45 46 struct caam_aead_alg { 47 struct aead_alg aead; 48 struct caam_alg_entry caam; 49 bool registered; 50 }; 51 52 struct caam_skcipher_alg { 53 struct skcipher_alg skcipher; 54 struct caam_alg_entry caam; 55 bool registered; 56 }; 57 58 /* 59 * per-session context 60 */ 61 struct caam_ctx { 62 struct device *jrdev; 63 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 64 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 65 u8 key[CAAM_MAX_KEY_SIZE]; 66 dma_addr_t key_dma; 67 enum dma_data_direction dir; 68 struct alginfo adata; 69 struct alginfo cdata; 70 unsigned int authsize; 71 struct device *qidev; 72 spinlock_t lock; /* Protects multiple init of driver context */ 73 struct caam_drv_ctx *drv_ctx[NUM_OP]; 74 bool xts_key_fallback; 75 struct crypto_skcipher *fallback; 76 }; 77 78 struct caam_skcipher_req_ctx { 79 struct skcipher_request fallback_req; 80 }; 81 82 static int aead_set_sh_desc(struct crypto_aead *aead) 83 { 84 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 85 typeof(*alg), aead); 86 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 87 unsigned int ivsize = crypto_aead_ivsize(aead); 88 u32 ctx1_iv_off = 0; 89 u32 *nonce = NULL; 90 unsigned int data_len[2]; 91 u32 inl_mask; 92 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 93 OP_ALG_AAI_CTR_MOD128); 94 const bool is_rfc3686 = alg->caam.rfc3686; 95 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 96 97 if (!ctx->cdata.keylen || !ctx->authsize) 98 return 0; 99 100 /* 101 * AES-CTR needs to load IV in CONTEXT1 reg 102 * at an offset of 128bits (16bytes) 103 * CONTEXT1[255:128] = IV 104 */ 105 if (ctr_mode) 106 ctx1_iv_off = 16; 107 108 /* 109 * RFC3686 specific: 110 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 111 */ 112 if (is_rfc3686) { 113 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 114 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 115 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 116 } 117 118 /* 119 * In case |user key| > |derived key|, using DKP<imm,imm> would result 120 * in invalid opcodes (last bytes of user key) in the resulting 121 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key 122 * addresses are needed. 123 */ 124 ctx->adata.key_virt = ctx->key; 125 ctx->adata.key_dma = ctx->key_dma; 126 127 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 128 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 129 130 data_len[0] = ctx->adata.keylen_pad; 131 data_len[1] = ctx->cdata.keylen; 132 133 if (alg->caam.geniv) 134 goto skip_enc; 135 136 /* aead_encrypt shared descriptor */ 137 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 138 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 139 DESC_JOB_IO_LEN, data_len, &inl_mask, 140 ARRAY_SIZE(data_len)) < 0) 141 return -EINVAL; 142 143 ctx->adata.key_inline = !!(inl_mask & 1); 144 ctx->cdata.key_inline = !!(inl_mask & 2); 145 146 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 147 ivsize, ctx->authsize, is_rfc3686, nonce, 148 ctx1_iv_off, true, ctrlpriv->era); 149 150 skip_enc: 151 /* aead_decrypt shared descriptor */ 152 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 153 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 154 DESC_JOB_IO_LEN, data_len, &inl_mask, 155 ARRAY_SIZE(data_len)) < 0) 156 return -EINVAL; 157 158 ctx->adata.key_inline = !!(inl_mask & 1); 159 ctx->cdata.key_inline = !!(inl_mask & 2); 160 161 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 162 ivsize, ctx->authsize, alg->caam.geniv, 163 is_rfc3686, nonce, ctx1_iv_off, true, 164 ctrlpriv->era); 165 166 if (!alg->caam.geniv) 167 goto skip_givenc; 168 169 /* aead_givencrypt shared descriptor */ 170 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 171 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 172 DESC_JOB_IO_LEN, data_len, &inl_mask, 173 ARRAY_SIZE(data_len)) < 0) 174 return -EINVAL; 175 176 ctx->adata.key_inline = !!(inl_mask & 1); 177 ctx->cdata.key_inline = !!(inl_mask & 2); 178 179 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 180 ivsize, ctx->authsize, is_rfc3686, nonce, 181 ctx1_iv_off, true, ctrlpriv->era); 182 183 skip_givenc: 184 return 0; 185 } 186 187 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 188 { 189 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); 190 191 ctx->authsize = authsize; 192 aead_set_sh_desc(authenc); 193 194 return 0; 195 } 196 197 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 198 unsigned int keylen) 199 { 200 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 201 struct device *jrdev = ctx->jrdev; 202 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 203 struct crypto_authenc_keys keys; 204 int ret = 0; 205 206 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 207 goto badkey; 208 209 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 210 keys.authkeylen + keys.enckeylen, keys.enckeylen, 211 keys.authkeylen); 212 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 213 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 214 215 /* 216 * If DKP is supported, use it in the shared descriptor to generate 217 * the split key. 218 */ 219 if (ctrlpriv->era >= 6) { 220 ctx->adata.keylen = keys.authkeylen; 221 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 222 OP_ALG_ALGSEL_MASK); 223 224 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 225 goto badkey; 226 227 memcpy(ctx->key, keys.authkey, keys.authkeylen); 228 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 229 keys.enckeylen); 230 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 231 ctx->adata.keylen_pad + 232 keys.enckeylen, ctx->dir); 233 goto skip_split_key; 234 } 235 236 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 237 keys.authkeylen, CAAM_MAX_KEY_SIZE - 238 keys.enckeylen); 239 if (ret) 240 goto badkey; 241 242 /* postpend encryption key to auth split key */ 243 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 244 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 245 ctx->adata.keylen_pad + keys.enckeylen, 246 ctx->dir); 247 248 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", 249 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 250 ctx->adata.keylen_pad + keys.enckeylen, 1); 251 252 skip_split_key: 253 ctx->cdata.keylen = keys.enckeylen; 254 255 ret = aead_set_sh_desc(aead); 256 if (ret) 257 goto badkey; 258 259 /* Now update the driver contexts with the new shared descriptor */ 260 if (ctx->drv_ctx[ENCRYPT]) { 261 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 262 ctx->sh_desc_enc); 263 if (ret) { 264 dev_err(jrdev, "driver enc context update failed\n"); 265 goto badkey; 266 } 267 } 268 269 if (ctx->drv_ctx[DECRYPT]) { 270 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 271 ctx->sh_desc_dec); 272 if (ret) { 273 dev_err(jrdev, "driver dec context update failed\n"); 274 goto badkey; 275 } 276 } 277 278 memzero_explicit(&keys, sizeof(keys)); 279 return ret; 280 badkey: 281 memzero_explicit(&keys, sizeof(keys)); 282 return -EINVAL; 283 } 284 285 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 286 unsigned int keylen) 287 { 288 struct crypto_authenc_keys keys; 289 int err; 290 291 err = crypto_authenc_extractkeys(&keys, key, keylen); 292 if (unlikely(err)) 293 return err; 294 295 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: 296 aead_setkey(aead, key, keylen); 297 298 memzero_explicit(&keys, sizeof(keys)); 299 return err; 300 } 301 302 static int gcm_set_sh_desc(struct crypto_aead *aead) 303 { 304 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 305 unsigned int ivsize = crypto_aead_ivsize(aead); 306 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 307 ctx->cdata.keylen; 308 309 if (!ctx->cdata.keylen || !ctx->authsize) 310 return 0; 311 312 /* 313 * Job Descriptor and Shared Descriptor 314 * must fit into the 64-word Descriptor h/w Buffer 315 */ 316 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { 317 ctx->cdata.key_inline = true; 318 ctx->cdata.key_virt = ctx->key; 319 } else { 320 ctx->cdata.key_inline = false; 321 ctx->cdata.key_dma = ctx->key_dma; 322 } 323 324 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 325 ctx->authsize, true); 326 327 /* 328 * Job Descriptor and Shared Descriptor 329 * must fit into the 64-word Descriptor h/w Buffer 330 */ 331 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { 332 ctx->cdata.key_inline = true; 333 ctx->cdata.key_virt = ctx->key; 334 } else { 335 ctx->cdata.key_inline = false; 336 ctx->cdata.key_dma = ctx->key_dma; 337 } 338 339 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 340 ctx->authsize, true); 341 342 return 0; 343 } 344 345 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 346 { 347 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); 348 int err; 349 350 err = crypto_gcm_check_authsize(authsize); 351 if (err) 352 return err; 353 354 ctx->authsize = authsize; 355 gcm_set_sh_desc(authenc); 356 357 return 0; 358 } 359 360 static int gcm_setkey(struct crypto_aead *aead, 361 const u8 *key, unsigned int keylen) 362 { 363 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 364 struct device *jrdev = ctx->jrdev; 365 int ret; 366 367 ret = aes_check_keylen(keylen); 368 if (ret) 369 return ret; 370 371 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 372 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 373 374 memcpy(ctx->key, key, keylen); 375 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, 376 ctx->dir); 377 ctx->cdata.keylen = keylen; 378 379 ret = gcm_set_sh_desc(aead); 380 if (ret) 381 return ret; 382 383 /* Now update the driver contexts with the new shared descriptor */ 384 if (ctx->drv_ctx[ENCRYPT]) { 385 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 386 ctx->sh_desc_enc); 387 if (ret) { 388 dev_err(jrdev, "driver enc context update failed\n"); 389 return ret; 390 } 391 } 392 393 if (ctx->drv_ctx[DECRYPT]) { 394 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 395 ctx->sh_desc_dec); 396 if (ret) { 397 dev_err(jrdev, "driver dec context update failed\n"); 398 return ret; 399 } 400 } 401 402 return 0; 403 } 404 405 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 406 { 407 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 408 unsigned int ivsize = crypto_aead_ivsize(aead); 409 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 410 ctx->cdata.keylen; 411 412 if (!ctx->cdata.keylen || !ctx->authsize) 413 return 0; 414 415 ctx->cdata.key_virt = ctx->key; 416 417 /* 418 * Job Descriptor and Shared Descriptor 419 * must fit into the 64-word Descriptor h/w Buffer 420 */ 421 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { 422 ctx->cdata.key_inline = true; 423 } else { 424 ctx->cdata.key_inline = false; 425 ctx->cdata.key_dma = ctx->key_dma; 426 } 427 428 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 429 ctx->authsize, true); 430 431 /* 432 * Job Descriptor and Shared Descriptor 433 * must fit into the 64-word Descriptor h/w Buffer 434 */ 435 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { 436 ctx->cdata.key_inline = true; 437 } else { 438 ctx->cdata.key_inline = false; 439 ctx->cdata.key_dma = ctx->key_dma; 440 } 441 442 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 443 ctx->authsize, true); 444 445 return 0; 446 } 447 448 static int rfc4106_setauthsize(struct crypto_aead *authenc, 449 unsigned int authsize) 450 { 451 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); 452 int err; 453 454 err = crypto_rfc4106_check_authsize(authsize); 455 if (err) 456 return err; 457 458 ctx->authsize = authsize; 459 rfc4106_set_sh_desc(authenc); 460 461 return 0; 462 } 463 464 static int rfc4106_setkey(struct crypto_aead *aead, 465 const u8 *key, unsigned int keylen) 466 { 467 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 468 struct device *jrdev = ctx->jrdev; 469 int ret; 470 471 ret = aes_check_keylen(keylen - 4); 472 if (ret) 473 return ret; 474 475 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 476 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 477 478 memcpy(ctx->key, key, keylen); 479 /* 480 * The last four bytes of the key material are used as the salt value 481 * in the nonce. Update the AES key length. 482 */ 483 ctx->cdata.keylen = keylen - 4; 484 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 485 ctx->cdata.keylen, ctx->dir); 486 487 ret = rfc4106_set_sh_desc(aead); 488 if (ret) 489 return ret; 490 491 /* Now update the driver contexts with the new shared descriptor */ 492 if (ctx->drv_ctx[ENCRYPT]) { 493 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 494 ctx->sh_desc_enc); 495 if (ret) { 496 dev_err(jrdev, "driver enc context update failed\n"); 497 return ret; 498 } 499 } 500 501 if (ctx->drv_ctx[DECRYPT]) { 502 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 503 ctx->sh_desc_dec); 504 if (ret) { 505 dev_err(jrdev, "driver dec context update failed\n"); 506 return ret; 507 } 508 } 509 510 return 0; 511 } 512 513 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 514 { 515 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 516 unsigned int ivsize = crypto_aead_ivsize(aead); 517 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 518 ctx->cdata.keylen; 519 520 if (!ctx->cdata.keylen || !ctx->authsize) 521 return 0; 522 523 ctx->cdata.key_virt = ctx->key; 524 525 /* 526 * Job Descriptor and Shared Descriptor 527 * must fit into the 64-word Descriptor h/w Buffer 528 */ 529 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { 530 ctx->cdata.key_inline = true; 531 } else { 532 ctx->cdata.key_inline = false; 533 ctx->cdata.key_dma = ctx->key_dma; 534 } 535 536 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 537 ctx->authsize, true); 538 539 /* 540 * Job Descriptor and Shared Descriptor 541 * must fit into the 64-word Descriptor h/w Buffer 542 */ 543 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { 544 ctx->cdata.key_inline = true; 545 } else { 546 ctx->cdata.key_inline = false; 547 ctx->cdata.key_dma = ctx->key_dma; 548 } 549 550 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 551 ctx->authsize, true); 552 553 return 0; 554 } 555 556 static int rfc4543_setauthsize(struct crypto_aead *authenc, 557 unsigned int authsize) 558 { 559 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); 560 561 if (authsize != 16) 562 return -EINVAL; 563 564 ctx->authsize = authsize; 565 rfc4543_set_sh_desc(authenc); 566 567 return 0; 568 } 569 570 static int rfc4543_setkey(struct crypto_aead *aead, 571 const u8 *key, unsigned int keylen) 572 { 573 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 574 struct device *jrdev = ctx->jrdev; 575 int ret; 576 577 ret = aes_check_keylen(keylen - 4); 578 if (ret) 579 return ret; 580 581 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 582 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 583 584 memcpy(ctx->key, key, keylen); 585 /* 586 * The last four bytes of the key material are used as the salt value 587 * in the nonce. Update the AES key length. 588 */ 589 ctx->cdata.keylen = keylen - 4; 590 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 591 ctx->cdata.keylen, ctx->dir); 592 593 ret = rfc4543_set_sh_desc(aead); 594 if (ret) 595 return ret; 596 597 /* Now update the driver contexts with the new shared descriptor */ 598 if (ctx->drv_ctx[ENCRYPT]) { 599 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 600 ctx->sh_desc_enc); 601 if (ret) { 602 dev_err(jrdev, "driver enc context update failed\n"); 603 return ret; 604 } 605 } 606 607 if (ctx->drv_ctx[DECRYPT]) { 608 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 609 ctx->sh_desc_dec); 610 if (ret) { 611 dev_err(jrdev, "driver dec context update failed\n"); 612 return ret; 613 } 614 } 615 616 return 0; 617 } 618 619 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 620 unsigned int keylen, const u32 ctx1_iv_off) 621 { 622 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); 623 struct caam_skcipher_alg *alg = 624 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 625 skcipher); 626 struct device *jrdev = ctx->jrdev; 627 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 628 const bool is_rfc3686 = alg->caam.rfc3686; 629 int ret = 0; 630 631 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 632 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 633 634 ctx->cdata.keylen = keylen; 635 ctx->cdata.key_virt = key; 636 ctx->cdata.key_inline = true; 637 638 /* skcipher encrypt, decrypt shared descriptors */ 639 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 640 is_rfc3686, ctx1_iv_off); 641 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 642 is_rfc3686, ctx1_iv_off); 643 644 /* Now update the driver contexts with the new shared descriptor */ 645 if (ctx->drv_ctx[ENCRYPT]) { 646 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 647 ctx->sh_desc_enc); 648 if (ret) { 649 dev_err(jrdev, "driver enc context update failed\n"); 650 return -EINVAL; 651 } 652 } 653 654 if (ctx->drv_ctx[DECRYPT]) { 655 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 656 ctx->sh_desc_dec); 657 if (ret) { 658 dev_err(jrdev, "driver dec context update failed\n"); 659 return -EINVAL; 660 } 661 } 662 663 return ret; 664 } 665 666 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, 667 const u8 *key, unsigned int keylen) 668 { 669 int err; 670 671 err = aes_check_keylen(keylen); 672 if (err) 673 return err; 674 675 return skcipher_setkey(skcipher, key, keylen, 0); 676 } 677 678 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, 679 const u8 *key, unsigned int keylen) 680 { 681 u32 ctx1_iv_off; 682 int err; 683 684 /* 685 * RFC3686 specific: 686 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 687 * | *key = {KEY, NONCE} 688 */ 689 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 690 keylen -= CTR_RFC3686_NONCE_SIZE; 691 692 err = aes_check_keylen(keylen); 693 if (err) 694 return err; 695 696 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 697 } 698 699 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, 700 const u8 *key, unsigned int keylen) 701 { 702 u32 ctx1_iv_off; 703 int err; 704 705 /* 706 * AES-CTR needs to load IV in CONTEXT1 reg 707 * at an offset of 128bits (16bytes) 708 * CONTEXT1[255:128] = IV 709 */ 710 ctx1_iv_off = 16; 711 712 err = aes_check_keylen(keylen); 713 if (err) 714 return err; 715 716 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 717 } 718 719 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 720 const u8 *key, unsigned int keylen) 721 { 722 return verify_skcipher_des3_key(skcipher, key) ?: 723 skcipher_setkey(skcipher, key, keylen, 0); 724 } 725 726 static int des_skcipher_setkey(struct crypto_skcipher *skcipher, 727 const u8 *key, unsigned int keylen) 728 { 729 return verify_skcipher_des_key(skcipher, key) ?: 730 skcipher_setkey(skcipher, key, keylen, 0); 731 } 732 733 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 734 unsigned int keylen) 735 { 736 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); 737 struct device *jrdev = ctx->jrdev; 738 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 739 int ret = 0; 740 int err; 741 742 err = xts_verify_key(skcipher, key, keylen); 743 if (err) { 744 dev_dbg(jrdev, "key size mismatch\n"); 745 return err; 746 } 747 748 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) 749 ctx->xts_key_fallback = true; 750 751 if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { 752 err = crypto_skcipher_setkey(ctx->fallback, key, keylen); 753 if (err) 754 return err; 755 } 756 757 ctx->cdata.keylen = keylen; 758 ctx->cdata.key_virt = key; 759 ctx->cdata.key_inline = true; 760 761 /* xts skcipher encrypt, decrypt shared descriptors */ 762 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 763 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 764 765 /* Now update the driver contexts with the new shared descriptor */ 766 if (ctx->drv_ctx[ENCRYPT]) { 767 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 768 ctx->sh_desc_enc); 769 if (ret) { 770 dev_err(jrdev, "driver enc context update failed\n"); 771 return -EINVAL; 772 } 773 } 774 775 if (ctx->drv_ctx[DECRYPT]) { 776 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 777 ctx->sh_desc_dec); 778 if (ret) { 779 dev_err(jrdev, "driver dec context update failed\n"); 780 return -EINVAL; 781 } 782 } 783 784 return ret; 785 } 786 787 /* 788 * aead_edesc - s/w-extended aead descriptor 789 * @src_nents: number of segments in input scatterlist 790 * @dst_nents: number of segments in output scatterlist 791 * @iv_dma: dma address of iv for checking continuity and link table 792 * @qm_sg_bytes: length of dma mapped h/w link table 793 * @qm_sg_dma: bus physical mapped address of h/w link table 794 * @assoclen: associated data length, in CAAM endianness 795 * @assoclen_dma: bus physical mapped address of req->assoclen 796 * @drv_req: driver-specific request structure 797 * @sgt: the h/w link table, followed by IV 798 */ 799 struct aead_edesc { 800 int src_nents; 801 int dst_nents; 802 dma_addr_t iv_dma; 803 int qm_sg_bytes; 804 dma_addr_t qm_sg_dma; 805 unsigned int assoclen; 806 dma_addr_t assoclen_dma; 807 struct caam_drv_req drv_req; 808 struct qm_sg_entry sgt[]; 809 }; 810 811 /* 812 * skcipher_edesc - s/w-extended skcipher descriptor 813 * @src_nents: number of segments in input scatterlist 814 * @dst_nents: number of segments in output scatterlist 815 * @iv_dma: dma address of iv for checking continuity and link table 816 * @qm_sg_bytes: length of dma mapped h/w link table 817 * @qm_sg_dma: bus physical mapped address of h/w link table 818 * @drv_req: driver-specific request structure 819 * @sgt: the h/w link table, followed by IV 820 */ 821 struct skcipher_edesc { 822 int src_nents; 823 int dst_nents; 824 dma_addr_t iv_dma; 825 int qm_sg_bytes; 826 dma_addr_t qm_sg_dma; 827 struct caam_drv_req drv_req; 828 struct qm_sg_entry sgt[]; 829 }; 830 831 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 832 enum optype type) 833 { 834 /* 835 * This function is called on the fast path with values of 'type' 836 * known at compile time. Invalid arguments are not expected and 837 * thus no checks are made. 838 */ 839 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 840 u32 *desc; 841 842 if (unlikely(!drv_ctx)) { 843 spin_lock(&ctx->lock); 844 845 /* Read again to check if some other core init drv_ctx */ 846 drv_ctx = ctx->drv_ctx[type]; 847 if (!drv_ctx) { 848 int cpu; 849 850 if (type == ENCRYPT) 851 desc = ctx->sh_desc_enc; 852 else /* (type == DECRYPT) */ 853 desc = ctx->sh_desc_dec; 854 855 cpu = smp_processor_id(); 856 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 857 if (!IS_ERR(drv_ctx)) 858 drv_ctx->op_type = type; 859 860 ctx->drv_ctx[type] = drv_ctx; 861 } 862 863 spin_unlock(&ctx->lock); 864 } 865 866 return drv_ctx; 867 } 868 869 static void caam_unmap(struct device *dev, struct scatterlist *src, 870 struct scatterlist *dst, int src_nents, 871 int dst_nents, dma_addr_t iv_dma, int ivsize, 872 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, 873 int qm_sg_bytes) 874 { 875 if (dst != src) { 876 if (src_nents) 877 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 878 if (dst_nents) 879 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 880 } else { 881 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 882 } 883 884 if (iv_dma) 885 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 886 if (qm_sg_bytes) 887 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 888 } 889 890 static void aead_unmap(struct device *dev, 891 struct aead_edesc *edesc, 892 struct aead_request *req) 893 { 894 struct crypto_aead *aead = crypto_aead_reqtfm(req); 895 int ivsize = crypto_aead_ivsize(aead); 896 897 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 898 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, 899 edesc->qm_sg_bytes); 900 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 901 } 902 903 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 904 struct skcipher_request *req) 905 { 906 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 907 int ivsize = crypto_skcipher_ivsize(skcipher); 908 909 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 910 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, 911 edesc->qm_sg_bytes); 912 } 913 914 static void aead_done(struct caam_drv_req *drv_req, u32 status) 915 { 916 struct device *qidev; 917 struct aead_edesc *edesc; 918 struct aead_request *aead_req = drv_req->app_ctx; 919 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 920 struct caam_ctx *caam_ctx = crypto_aead_ctx_dma(aead); 921 int ecode = 0; 922 923 qidev = caam_ctx->qidev; 924 925 if (unlikely(status)) 926 ecode = caam_jr_strstatus(qidev, status); 927 928 edesc = container_of(drv_req, typeof(*edesc), drv_req); 929 aead_unmap(qidev, edesc, aead_req); 930 931 aead_request_complete(aead_req, ecode); 932 qi_cache_free(edesc); 933 } 934 935 /* 936 * allocate and map the aead extended descriptor 937 */ 938 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 939 bool encrypt) 940 { 941 struct crypto_aead *aead = crypto_aead_reqtfm(req); 942 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 943 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 944 typeof(*alg), aead); 945 struct device *qidev = ctx->qidev; 946 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 947 GFP_KERNEL : GFP_ATOMIC; 948 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 949 int src_len, dst_len = 0; 950 struct aead_edesc *edesc; 951 dma_addr_t qm_sg_dma, iv_dma = 0; 952 int ivsize = 0; 953 unsigned int authsize = ctx->authsize; 954 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 955 int in_len, out_len; 956 struct qm_sg_entry *sg_table, *fd_sgt; 957 struct caam_drv_ctx *drv_ctx; 958 959 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 960 if (IS_ERR(drv_ctx)) 961 return (struct aead_edesc *)drv_ctx; 962 963 /* allocate space for base edesc and hw desc commands, link tables */ 964 edesc = qi_cache_alloc(flags); 965 if (unlikely(!edesc)) { 966 dev_err(qidev, "could not allocate extended descriptor\n"); 967 return ERR_PTR(-ENOMEM); 968 } 969 970 if (likely(req->src == req->dst)) { 971 src_len = req->assoclen + req->cryptlen + 972 (encrypt ? authsize : 0); 973 974 src_nents = sg_nents_for_len(req->src, src_len); 975 if (unlikely(src_nents < 0)) { 976 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 977 src_len); 978 qi_cache_free(edesc); 979 return ERR_PTR(src_nents); 980 } 981 982 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 983 DMA_BIDIRECTIONAL); 984 if (unlikely(!mapped_src_nents)) { 985 dev_err(qidev, "unable to map source\n"); 986 qi_cache_free(edesc); 987 return ERR_PTR(-ENOMEM); 988 } 989 } else { 990 src_len = req->assoclen + req->cryptlen; 991 dst_len = src_len + (encrypt ? authsize : (-authsize)); 992 993 src_nents = sg_nents_for_len(req->src, src_len); 994 if (unlikely(src_nents < 0)) { 995 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 996 src_len); 997 qi_cache_free(edesc); 998 return ERR_PTR(src_nents); 999 } 1000 1001 dst_nents = sg_nents_for_len(req->dst, dst_len); 1002 if (unlikely(dst_nents < 0)) { 1003 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1004 dst_len); 1005 qi_cache_free(edesc); 1006 return ERR_PTR(dst_nents); 1007 } 1008 1009 if (src_nents) { 1010 mapped_src_nents = dma_map_sg(qidev, req->src, 1011 src_nents, DMA_TO_DEVICE); 1012 if (unlikely(!mapped_src_nents)) { 1013 dev_err(qidev, "unable to map source\n"); 1014 qi_cache_free(edesc); 1015 return ERR_PTR(-ENOMEM); 1016 } 1017 } else { 1018 mapped_src_nents = 0; 1019 } 1020 1021 if (dst_nents) { 1022 mapped_dst_nents = dma_map_sg(qidev, req->dst, 1023 dst_nents, 1024 DMA_FROM_DEVICE); 1025 if (unlikely(!mapped_dst_nents)) { 1026 dev_err(qidev, "unable to map destination\n"); 1027 dma_unmap_sg(qidev, req->src, src_nents, 1028 DMA_TO_DEVICE); 1029 qi_cache_free(edesc); 1030 return ERR_PTR(-ENOMEM); 1031 } 1032 } else { 1033 mapped_dst_nents = 0; 1034 } 1035 } 1036 1037 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) 1038 ivsize = crypto_aead_ivsize(aead); 1039 1040 /* 1041 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1042 * Input is not contiguous. 1043 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1044 * the end of the table by allocating more S/G entries. Logic: 1045 * if (src != dst && output S/G) 1046 * pad output S/G, if needed 1047 * else if (src == dst && S/G) 1048 * overlapping S/Gs; pad one of them 1049 * else if (input S/G) ... 1050 * pad input S/G, if needed 1051 */ 1052 qm_sg_ents = 1 + !!ivsize + mapped_src_nents; 1053 if (mapped_dst_nents > 1) 1054 qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1055 else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1056 qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1057 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); 1058 else 1059 qm_sg_ents = pad_sg_nents(qm_sg_ents); 1060 1061 sg_table = &edesc->sgt[0]; 1062 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1063 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1064 CAAM_QI_MEMCACHE_SIZE)) { 1065 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1066 qm_sg_ents, ivsize); 1067 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1068 0, DMA_NONE, 0, 0); 1069 qi_cache_free(edesc); 1070 return ERR_PTR(-ENOMEM); 1071 } 1072 1073 if (ivsize) { 1074 u8 *iv = (u8 *)(sg_table + qm_sg_ents); 1075 1076 /* Make sure IV is located in a DMAable area */ 1077 memcpy(iv, req->iv, ivsize); 1078 1079 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1080 if (dma_mapping_error(qidev, iv_dma)) { 1081 dev_err(qidev, "unable to map IV\n"); 1082 caam_unmap(qidev, req->src, req->dst, src_nents, 1083 dst_nents, 0, 0, DMA_NONE, 0, 0); 1084 qi_cache_free(edesc); 1085 return ERR_PTR(-ENOMEM); 1086 } 1087 } 1088 1089 edesc->src_nents = src_nents; 1090 edesc->dst_nents = dst_nents; 1091 edesc->iv_dma = iv_dma; 1092 edesc->drv_req.app_ctx = req; 1093 edesc->drv_req.cbk = aead_done; 1094 edesc->drv_req.drv_ctx = drv_ctx; 1095 1096 edesc->assoclen = cpu_to_caam32(req->assoclen); 1097 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 1098 DMA_TO_DEVICE); 1099 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1100 dev_err(qidev, "unable to map assoclen\n"); 1101 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1102 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1103 qi_cache_free(edesc); 1104 return ERR_PTR(-ENOMEM); 1105 } 1106 1107 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 1108 qm_sg_index++; 1109 if (ivsize) { 1110 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1111 qm_sg_index++; 1112 } 1113 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 1114 qm_sg_index += mapped_src_nents; 1115 1116 if (mapped_dst_nents > 1) 1117 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 1118 1119 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1120 if (dma_mapping_error(qidev, qm_sg_dma)) { 1121 dev_err(qidev, "unable to map S/G table\n"); 1122 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1123 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1124 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1125 qi_cache_free(edesc); 1126 return ERR_PTR(-ENOMEM); 1127 } 1128 1129 edesc->qm_sg_dma = qm_sg_dma; 1130 edesc->qm_sg_bytes = qm_sg_bytes; 1131 1132 out_len = req->assoclen + req->cryptlen + 1133 (encrypt ? ctx->authsize : (-ctx->authsize)); 1134 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 1135 1136 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1137 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 1138 1139 if (req->dst == req->src) { 1140 if (mapped_src_nents == 1) 1141 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 1142 out_len, 0); 1143 else 1144 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1145 (1 + !!ivsize) * sizeof(*sg_table), 1146 out_len, 0); 1147 } else if (mapped_dst_nents <= 1) { 1148 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1149 0); 1150 } else { 1151 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 1152 qm_sg_index, out_len, 0); 1153 } 1154 1155 return edesc; 1156 } 1157 1158 static inline int aead_crypt(struct aead_request *req, bool encrypt) 1159 { 1160 struct aead_edesc *edesc; 1161 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1162 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); 1163 int ret; 1164 1165 if (unlikely(caam_congested)) 1166 return -EAGAIN; 1167 1168 /* allocate extended descriptor */ 1169 edesc = aead_edesc_alloc(req, encrypt); 1170 if (IS_ERR(edesc)) 1171 return PTR_ERR(edesc); 1172 1173 /* Create and submit job descriptor */ 1174 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1175 if (!ret) { 1176 ret = -EINPROGRESS; 1177 } else { 1178 aead_unmap(ctx->qidev, edesc, req); 1179 qi_cache_free(edesc); 1180 } 1181 1182 return ret; 1183 } 1184 1185 static int aead_encrypt(struct aead_request *req) 1186 { 1187 return aead_crypt(req, true); 1188 } 1189 1190 static int aead_decrypt(struct aead_request *req) 1191 { 1192 return aead_crypt(req, false); 1193 } 1194 1195 static int ipsec_gcm_encrypt(struct aead_request *req) 1196 { 1197 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1198 true); 1199 } 1200 1201 static int ipsec_gcm_decrypt(struct aead_request *req) 1202 { 1203 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1204 false); 1205 } 1206 1207 static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1208 { 1209 struct skcipher_edesc *edesc; 1210 struct skcipher_request *req = drv_req->app_ctx; 1211 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1212 struct caam_ctx *caam_ctx = crypto_skcipher_ctx_dma(skcipher); 1213 struct device *qidev = caam_ctx->qidev; 1214 int ivsize = crypto_skcipher_ivsize(skcipher); 1215 int ecode = 0; 1216 1217 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1218 1219 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1220 1221 if (status) 1222 ecode = caam_jr_strstatus(qidev, status); 1223 1224 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1225 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1226 edesc->src_nents > 1 ? 100 : ivsize, 1); 1227 caam_dump_sg("dst @" __stringify(__LINE__)": ", 1228 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1229 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1230 1231 skcipher_unmap(qidev, edesc, req); 1232 1233 /* 1234 * The crypto API expects us to set the IV (req->iv) to the last 1235 * ciphertext block (CBC mode) or last counter (CTR mode). 1236 * This is used e.g. by the CTS mode. 1237 */ 1238 if (!ecode) 1239 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, 1240 ivsize); 1241 1242 qi_cache_free(edesc); 1243 skcipher_request_complete(req, ecode); 1244 } 1245 1246 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1247 bool encrypt) 1248 { 1249 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1250 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); 1251 struct device *qidev = ctx->qidev; 1252 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1253 GFP_KERNEL : GFP_ATOMIC; 1254 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1255 struct skcipher_edesc *edesc; 1256 dma_addr_t iv_dma; 1257 u8 *iv; 1258 int ivsize = crypto_skcipher_ivsize(skcipher); 1259 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1260 struct qm_sg_entry *sg_table, *fd_sgt; 1261 struct caam_drv_ctx *drv_ctx; 1262 1263 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 1264 if (IS_ERR(drv_ctx)) 1265 return (struct skcipher_edesc *)drv_ctx; 1266 1267 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1268 if (unlikely(src_nents < 0)) { 1269 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1270 req->cryptlen); 1271 return ERR_PTR(src_nents); 1272 } 1273 1274 if (unlikely(req->src != req->dst)) { 1275 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 1276 if (unlikely(dst_nents < 0)) { 1277 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1278 req->cryptlen); 1279 return ERR_PTR(dst_nents); 1280 } 1281 1282 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1283 DMA_TO_DEVICE); 1284 if (unlikely(!mapped_src_nents)) { 1285 dev_err(qidev, "unable to map source\n"); 1286 return ERR_PTR(-ENOMEM); 1287 } 1288 1289 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1290 DMA_FROM_DEVICE); 1291 if (unlikely(!mapped_dst_nents)) { 1292 dev_err(qidev, "unable to map destination\n"); 1293 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1294 return ERR_PTR(-ENOMEM); 1295 } 1296 } else { 1297 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1298 DMA_BIDIRECTIONAL); 1299 if (unlikely(!mapped_src_nents)) { 1300 dev_err(qidev, "unable to map source\n"); 1301 return ERR_PTR(-ENOMEM); 1302 } 1303 } 1304 1305 qm_sg_ents = 1 + mapped_src_nents; 1306 dst_sg_idx = qm_sg_ents; 1307 1308 /* 1309 * Input, output HW S/G tables: [IV, src][dst, IV] 1310 * IV entries point to the same buffer 1311 * If src == dst, S/G entries are reused (S/G tables overlap) 1312 * 1313 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1314 * the end of the table by allocating more S/G entries. 1315 */ 1316 if (req->src != req->dst) 1317 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); 1318 else 1319 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1320 1321 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1322 if (unlikely(ALIGN(ivsize, __alignof__(*edesc)) + 1323 offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes > 1324 CAAM_QI_MEMCACHE_SIZE)) { 1325 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1326 qm_sg_ents, ivsize); 1327 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1328 0, DMA_NONE, 0, 0); 1329 return ERR_PTR(-ENOMEM); 1330 } 1331 1332 /* allocate space for base edesc, link tables and IV */ 1333 iv = qi_cache_alloc(flags); 1334 if (unlikely(!iv)) { 1335 dev_err(qidev, "could not allocate extended descriptor\n"); 1336 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1337 0, DMA_NONE, 0, 0); 1338 return ERR_PTR(-ENOMEM); 1339 } 1340 1341 edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc))); 1342 1343 /* Make sure IV is located in a DMAable area */ 1344 sg_table = &edesc->sgt[0]; 1345 memcpy(iv, req->iv, ivsize); 1346 1347 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); 1348 if (dma_mapping_error(qidev, iv_dma)) { 1349 dev_err(qidev, "unable to map IV\n"); 1350 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1351 0, DMA_NONE, 0, 0); 1352 qi_cache_free(edesc); 1353 return ERR_PTR(-ENOMEM); 1354 } 1355 1356 edesc->src_nents = src_nents; 1357 edesc->dst_nents = dst_nents; 1358 edesc->iv_dma = iv_dma; 1359 edesc->qm_sg_bytes = qm_sg_bytes; 1360 edesc->drv_req.app_ctx = req; 1361 edesc->drv_req.cbk = skcipher_done; 1362 edesc->drv_req.drv_ctx = drv_ctx; 1363 1364 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1365 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); 1366 1367 if (req->src != req->dst) 1368 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); 1369 1370 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, 1371 ivsize, 0); 1372 1373 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1374 DMA_TO_DEVICE); 1375 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1376 dev_err(qidev, "unable to map S/G table\n"); 1377 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1378 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); 1379 qi_cache_free(edesc); 1380 return ERR_PTR(-ENOMEM); 1381 } 1382 1383 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1384 1385 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1386 ivsize + req->cryptlen, 0); 1387 1388 if (req->src == req->dst) 1389 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1390 sizeof(*sg_table), req->cryptlen + ivsize, 1391 0); 1392 else 1393 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1394 sizeof(*sg_table), req->cryptlen + ivsize, 1395 0); 1396 1397 return edesc; 1398 } 1399 1400 static inline bool xts_skcipher_ivsize(struct skcipher_request *req) 1401 { 1402 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1403 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 1404 1405 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); 1406 } 1407 1408 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) 1409 { 1410 struct skcipher_edesc *edesc; 1411 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1412 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); 1413 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 1414 int ret; 1415 1416 /* 1417 * XTS is expected to return an error even for input length = 0 1418 * Note that the case input length < block size will be caught during 1419 * HW offloading and return an error. 1420 */ 1421 if (!req->cryptlen && !ctx->fallback) 1422 return 0; 1423 1424 if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || 1425 ctx->xts_key_fallback)) { 1426 struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); 1427 1428 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 1429 skcipher_request_set_callback(&rctx->fallback_req, 1430 req->base.flags, 1431 req->base.complete, 1432 req->base.data); 1433 skcipher_request_set_crypt(&rctx->fallback_req, req->src, 1434 req->dst, req->cryptlen, req->iv); 1435 1436 return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : 1437 crypto_skcipher_decrypt(&rctx->fallback_req); 1438 } 1439 1440 if (unlikely(caam_congested)) 1441 return -EAGAIN; 1442 1443 /* allocate extended descriptor */ 1444 edesc = skcipher_edesc_alloc(req, encrypt); 1445 if (IS_ERR(edesc)) 1446 return PTR_ERR(edesc); 1447 1448 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1449 if (!ret) { 1450 ret = -EINPROGRESS; 1451 } else { 1452 skcipher_unmap(ctx->qidev, edesc, req); 1453 qi_cache_free(edesc); 1454 } 1455 1456 return ret; 1457 } 1458 1459 static int skcipher_encrypt(struct skcipher_request *req) 1460 { 1461 return skcipher_crypt(req, true); 1462 } 1463 1464 static int skcipher_decrypt(struct skcipher_request *req) 1465 { 1466 return skcipher_crypt(req, false); 1467 } 1468 1469 static struct caam_skcipher_alg driver_algs[] = { 1470 { 1471 .skcipher = { 1472 .base = { 1473 .cra_name = "cbc(aes)", 1474 .cra_driver_name = "cbc-aes-caam-qi", 1475 .cra_blocksize = AES_BLOCK_SIZE, 1476 }, 1477 .setkey = aes_skcipher_setkey, 1478 .encrypt = skcipher_encrypt, 1479 .decrypt = skcipher_decrypt, 1480 .min_keysize = AES_MIN_KEY_SIZE, 1481 .max_keysize = AES_MAX_KEY_SIZE, 1482 .ivsize = AES_BLOCK_SIZE, 1483 }, 1484 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1485 }, 1486 { 1487 .skcipher = { 1488 .base = { 1489 .cra_name = "cbc(des3_ede)", 1490 .cra_driver_name = "cbc-3des-caam-qi", 1491 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1492 }, 1493 .setkey = des3_skcipher_setkey, 1494 .encrypt = skcipher_encrypt, 1495 .decrypt = skcipher_decrypt, 1496 .min_keysize = DES3_EDE_KEY_SIZE, 1497 .max_keysize = DES3_EDE_KEY_SIZE, 1498 .ivsize = DES3_EDE_BLOCK_SIZE, 1499 }, 1500 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1501 }, 1502 { 1503 .skcipher = { 1504 .base = { 1505 .cra_name = "cbc(des)", 1506 .cra_driver_name = "cbc-des-caam-qi", 1507 .cra_blocksize = DES_BLOCK_SIZE, 1508 }, 1509 .setkey = des_skcipher_setkey, 1510 .encrypt = skcipher_encrypt, 1511 .decrypt = skcipher_decrypt, 1512 .min_keysize = DES_KEY_SIZE, 1513 .max_keysize = DES_KEY_SIZE, 1514 .ivsize = DES_BLOCK_SIZE, 1515 }, 1516 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1517 }, 1518 { 1519 .skcipher = { 1520 .base = { 1521 .cra_name = "ctr(aes)", 1522 .cra_driver_name = "ctr-aes-caam-qi", 1523 .cra_blocksize = 1, 1524 }, 1525 .setkey = ctr_skcipher_setkey, 1526 .encrypt = skcipher_encrypt, 1527 .decrypt = skcipher_decrypt, 1528 .min_keysize = AES_MIN_KEY_SIZE, 1529 .max_keysize = AES_MAX_KEY_SIZE, 1530 .ivsize = AES_BLOCK_SIZE, 1531 .chunksize = AES_BLOCK_SIZE, 1532 }, 1533 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1534 OP_ALG_AAI_CTR_MOD128, 1535 }, 1536 { 1537 .skcipher = { 1538 .base = { 1539 .cra_name = "rfc3686(ctr(aes))", 1540 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1541 .cra_blocksize = 1, 1542 }, 1543 .setkey = rfc3686_skcipher_setkey, 1544 .encrypt = skcipher_encrypt, 1545 .decrypt = skcipher_decrypt, 1546 .min_keysize = AES_MIN_KEY_SIZE + 1547 CTR_RFC3686_NONCE_SIZE, 1548 .max_keysize = AES_MAX_KEY_SIZE + 1549 CTR_RFC3686_NONCE_SIZE, 1550 .ivsize = CTR_RFC3686_IV_SIZE, 1551 .chunksize = AES_BLOCK_SIZE, 1552 }, 1553 .caam = { 1554 .class1_alg_type = OP_ALG_ALGSEL_AES | 1555 OP_ALG_AAI_CTR_MOD128, 1556 .rfc3686 = true, 1557 }, 1558 }, 1559 { 1560 .skcipher = { 1561 .base = { 1562 .cra_name = "xts(aes)", 1563 .cra_driver_name = "xts-aes-caam-qi", 1564 .cra_flags = CRYPTO_ALG_NEED_FALLBACK, 1565 .cra_blocksize = AES_BLOCK_SIZE, 1566 }, 1567 .setkey = xts_skcipher_setkey, 1568 .encrypt = skcipher_encrypt, 1569 .decrypt = skcipher_decrypt, 1570 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1571 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1572 .ivsize = AES_BLOCK_SIZE, 1573 }, 1574 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1575 }, 1576 }; 1577 1578 static struct caam_aead_alg driver_aeads[] = { 1579 { 1580 .aead = { 1581 .base = { 1582 .cra_name = "rfc4106(gcm(aes))", 1583 .cra_driver_name = "rfc4106-gcm-aes-caam-qi", 1584 .cra_blocksize = 1, 1585 }, 1586 .setkey = rfc4106_setkey, 1587 .setauthsize = rfc4106_setauthsize, 1588 .encrypt = ipsec_gcm_encrypt, 1589 .decrypt = ipsec_gcm_decrypt, 1590 .ivsize = 8, 1591 .maxauthsize = AES_BLOCK_SIZE, 1592 }, 1593 .caam = { 1594 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1595 .nodkp = true, 1596 }, 1597 }, 1598 { 1599 .aead = { 1600 .base = { 1601 .cra_name = "rfc4543(gcm(aes))", 1602 .cra_driver_name = "rfc4543-gcm-aes-caam-qi", 1603 .cra_blocksize = 1, 1604 }, 1605 .setkey = rfc4543_setkey, 1606 .setauthsize = rfc4543_setauthsize, 1607 .encrypt = ipsec_gcm_encrypt, 1608 .decrypt = ipsec_gcm_decrypt, 1609 .ivsize = 8, 1610 .maxauthsize = AES_BLOCK_SIZE, 1611 }, 1612 .caam = { 1613 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1614 .nodkp = true, 1615 }, 1616 }, 1617 /* Galois Counter Mode */ 1618 { 1619 .aead = { 1620 .base = { 1621 .cra_name = "gcm(aes)", 1622 .cra_driver_name = "gcm-aes-caam-qi", 1623 .cra_blocksize = 1, 1624 }, 1625 .setkey = gcm_setkey, 1626 .setauthsize = gcm_setauthsize, 1627 .encrypt = aead_encrypt, 1628 .decrypt = aead_decrypt, 1629 .ivsize = 12, 1630 .maxauthsize = AES_BLOCK_SIZE, 1631 }, 1632 .caam = { 1633 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1634 .nodkp = true, 1635 } 1636 }, 1637 /* single-pass ipsec_esp descriptor */ 1638 { 1639 .aead = { 1640 .base = { 1641 .cra_name = "authenc(hmac(md5),cbc(aes))", 1642 .cra_driver_name = "authenc-hmac-md5-" 1643 "cbc-aes-caam-qi", 1644 .cra_blocksize = AES_BLOCK_SIZE, 1645 }, 1646 .setkey = aead_setkey, 1647 .setauthsize = aead_setauthsize, 1648 .encrypt = aead_encrypt, 1649 .decrypt = aead_decrypt, 1650 .ivsize = AES_BLOCK_SIZE, 1651 .maxauthsize = MD5_DIGEST_SIZE, 1652 }, 1653 .caam = { 1654 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1655 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1656 OP_ALG_AAI_HMAC_PRECOMP, 1657 } 1658 }, 1659 { 1660 .aead = { 1661 .base = { 1662 .cra_name = "echainiv(authenc(hmac(md5)," 1663 "cbc(aes)))", 1664 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1665 "cbc-aes-caam-qi", 1666 .cra_blocksize = AES_BLOCK_SIZE, 1667 }, 1668 .setkey = aead_setkey, 1669 .setauthsize = aead_setauthsize, 1670 .encrypt = aead_encrypt, 1671 .decrypt = aead_decrypt, 1672 .ivsize = AES_BLOCK_SIZE, 1673 .maxauthsize = MD5_DIGEST_SIZE, 1674 }, 1675 .caam = { 1676 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1677 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1678 OP_ALG_AAI_HMAC_PRECOMP, 1679 .geniv = true, 1680 } 1681 }, 1682 { 1683 .aead = { 1684 .base = { 1685 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1686 .cra_driver_name = "authenc-hmac-sha1-" 1687 "cbc-aes-caam-qi", 1688 .cra_blocksize = AES_BLOCK_SIZE, 1689 }, 1690 .setkey = aead_setkey, 1691 .setauthsize = aead_setauthsize, 1692 .encrypt = aead_encrypt, 1693 .decrypt = aead_decrypt, 1694 .ivsize = AES_BLOCK_SIZE, 1695 .maxauthsize = SHA1_DIGEST_SIZE, 1696 }, 1697 .caam = { 1698 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1699 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1700 OP_ALG_AAI_HMAC_PRECOMP, 1701 } 1702 }, 1703 { 1704 .aead = { 1705 .base = { 1706 .cra_name = "echainiv(authenc(hmac(sha1)," 1707 "cbc(aes)))", 1708 .cra_driver_name = "echainiv-authenc-" 1709 "hmac-sha1-cbc-aes-caam-qi", 1710 .cra_blocksize = AES_BLOCK_SIZE, 1711 }, 1712 .setkey = aead_setkey, 1713 .setauthsize = aead_setauthsize, 1714 .encrypt = aead_encrypt, 1715 .decrypt = aead_decrypt, 1716 .ivsize = AES_BLOCK_SIZE, 1717 .maxauthsize = SHA1_DIGEST_SIZE, 1718 }, 1719 .caam = { 1720 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1721 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1722 OP_ALG_AAI_HMAC_PRECOMP, 1723 .geniv = true, 1724 }, 1725 }, 1726 { 1727 .aead = { 1728 .base = { 1729 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1730 .cra_driver_name = "authenc-hmac-sha224-" 1731 "cbc-aes-caam-qi", 1732 .cra_blocksize = AES_BLOCK_SIZE, 1733 }, 1734 .setkey = aead_setkey, 1735 .setauthsize = aead_setauthsize, 1736 .encrypt = aead_encrypt, 1737 .decrypt = aead_decrypt, 1738 .ivsize = AES_BLOCK_SIZE, 1739 .maxauthsize = SHA224_DIGEST_SIZE, 1740 }, 1741 .caam = { 1742 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1743 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1744 OP_ALG_AAI_HMAC_PRECOMP, 1745 } 1746 }, 1747 { 1748 .aead = { 1749 .base = { 1750 .cra_name = "echainiv(authenc(hmac(sha224)," 1751 "cbc(aes)))", 1752 .cra_driver_name = "echainiv-authenc-" 1753 "hmac-sha224-cbc-aes-caam-qi", 1754 .cra_blocksize = AES_BLOCK_SIZE, 1755 }, 1756 .setkey = aead_setkey, 1757 .setauthsize = aead_setauthsize, 1758 .encrypt = aead_encrypt, 1759 .decrypt = aead_decrypt, 1760 .ivsize = AES_BLOCK_SIZE, 1761 .maxauthsize = SHA224_DIGEST_SIZE, 1762 }, 1763 .caam = { 1764 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1765 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1766 OP_ALG_AAI_HMAC_PRECOMP, 1767 .geniv = true, 1768 } 1769 }, 1770 { 1771 .aead = { 1772 .base = { 1773 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1774 .cra_driver_name = "authenc-hmac-sha256-" 1775 "cbc-aes-caam-qi", 1776 .cra_blocksize = AES_BLOCK_SIZE, 1777 }, 1778 .setkey = aead_setkey, 1779 .setauthsize = aead_setauthsize, 1780 .encrypt = aead_encrypt, 1781 .decrypt = aead_decrypt, 1782 .ivsize = AES_BLOCK_SIZE, 1783 .maxauthsize = SHA256_DIGEST_SIZE, 1784 }, 1785 .caam = { 1786 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1787 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1788 OP_ALG_AAI_HMAC_PRECOMP, 1789 } 1790 }, 1791 { 1792 .aead = { 1793 .base = { 1794 .cra_name = "echainiv(authenc(hmac(sha256)," 1795 "cbc(aes)))", 1796 .cra_driver_name = "echainiv-authenc-" 1797 "hmac-sha256-cbc-aes-" 1798 "caam-qi", 1799 .cra_blocksize = AES_BLOCK_SIZE, 1800 }, 1801 .setkey = aead_setkey, 1802 .setauthsize = aead_setauthsize, 1803 .encrypt = aead_encrypt, 1804 .decrypt = aead_decrypt, 1805 .ivsize = AES_BLOCK_SIZE, 1806 .maxauthsize = SHA256_DIGEST_SIZE, 1807 }, 1808 .caam = { 1809 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1810 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1811 OP_ALG_AAI_HMAC_PRECOMP, 1812 .geniv = true, 1813 } 1814 }, 1815 { 1816 .aead = { 1817 .base = { 1818 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1819 .cra_driver_name = "authenc-hmac-sha384-" 1820 "cbc-aes-caam-qi", 1821 .cra_blocksize = AES_BLOCK_SIZE, 1822 }, 1823 .setkey = aead_setkey, 1824 .setauthsize = aead_setauthsize, 1825 .encrypt = aead_encrypt, 1826 .decrypt = aead_decrypt, 1827 .ivsize = AES_BLOCK_SIZE, 1828 .maxauthsize = SHA384_DIGEST_SIZE, 1829 }, 1830 .caam = { 1831 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1832 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1833 OP_ALG_AAI_HMAC_PRECOMP, 1834 } 1835 }, 1836 { 1837 .aead = { 1838 .base = { 1839 .cra_name = "echainiv(authenc(hmac(sha384)," 1840 "cbc(aes)))", 1841 .cra_driver_name = "echainiv-authenc-" 1842 "hmac-sha384-cbc-aes-" 1843 "caam-qi", 1844 .cra_blocksize = AES_BLOCK_SIZE, 1845 }, 1846 .setkey = aead_setkey, 1847 .setauthsize = aead_setauthsize, 1848 .encrypt = aead_encrypt, 1849 .decrypt = aead_decrypt, 1850 .ivsize = AES_BLOCK_SIZE, 1851 .maxauthsize = SHA384_DIGEST_SIZE, 1852 }, 1853 .caam = { 1854 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1855 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1856 OP_ALG_AAI_HMAC_PRECOMP, 1857 .geniv = true, 1858 } 1859 }, 1860 { 1861 .aead = { 1862 .base = { 1863 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1864 .cra_driver_name = "authenc-hmac-sha512-" 1865 "cbc-aes-caam-qi", 1866 .cra_blocksize = AES_BLOCK_SIZE, 1867 }, 1868 .setkey = aead_setkey, 1869 .setauthsize = aead_setauthsize, 1870 .encrypt = aead_encrypt, 1871 .decrypt = aead_decrypt, 1872 .ivsize = AES_BLOCK_SIZE, 1873 .maxauthsize = SHA512_DIGEST_SIZE, 1874 }, 1875 .caam = { 1876 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1877 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1878 OP_ALG_AAI_HMAC_PRECOMP, 1879 } 1880 }, 1881 { 1882 .aead = { 1883 .base = { 1884 .cra_name = "echainiv(authenc(hmac(sha512)," 1885 "cbc(aes)))", 1886 .cra_driver_name = "echainiv-authenc-" 1887 "hmac-sha512-cbc-aes-" 1888 "caam-qi", 1889 .cra_blocksize = AES_BLOCK_SIZE, 1890 }, 1891 .setkey = aead_setkey, 1892 .setauthsize = aead_setauthsize, 1893 .encrypt = aead_encrypt, 1894 .decrypt = aead_decrypt, 1895 .ivsize = AES_BLOCK_SIZE, 1896 .maxauthsize = SHA512_DIGEST_SIZE, 1897 }, 1898 .caam = { 1899 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1900 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1901 OP_ALG_AAI_HMAC_PRECOMP, 1902 .geniv = true, 1903 } 1904 }, 1905 { 1906 .aead = { 1907 .base = { 1908 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1909 .cra_driver_name = "authenc-hmac-md5-" 1910 "cbc-des3_ede-caam-qi", 1911 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1912 }, 1913 .setkey = des3_aead_setkey, 1914 .setauthsize = aead_setauthsize, 1915 .encrypt = aead_encrypt, 1916 .decrypt = aead_decrypt, 1917 .ivsize = DES3_EDE_BLOCK_SIZE, 1918 .maxauthsize = MD5_DIGEST_SIZE, 1919 }, 1920 .caam = { 1921 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1922 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1923 OP_ALG_AAI_HMAC_PRECOMP, 1924 } 1925 }, 1926 { 1927 .aead = { 1928 .base = { 1929 .cra_name = "echainiv(authenc(hmac(md5)," 1930 "cbc(des3_ede)))", 1931 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1932 "cbc-des3_ede-caam-qi", 1933 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1934 }, 1935 .setkey = des3_aead_setkey, 1936 .setauthsize = aead_setauthsize, 1937 .encrypt = aead_encrypt, 1938 .decrypt = aead_decrypt, 1939 .ivsize = DES3_EDE_BLOCK_SIZE, 1940 .maxauthsize = MD5_DIGEST_SIZE, 1941 }, 1942 .caam = { 1943 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1944 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1945 OP_ALG_AAI_HMAC_PRECOMP, 1946 .geniv = true, 1947 } 1948 }, 1949 { 1950 .aead = { 1951 .base = { 1952 .cra_name = "authenc(hmac(sha1)," 1953 "cbc(des3_ede))", 1954 .cra_driver_name = "authenc-hmac-sha1-" 1955 "cbc-des3_ede-caam-qi", 1956 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1957 }, 1958 .setkey = des3_aead_setkey, 1959 .setauthsize = aead_setauthsize, 1960 .encrypt = aead_encrypt, 1961 .decrypt = aead_decrypt, 1962 .ivsize = DES3_EDE_BLOCK_SIZE, 1963 .maxauthsize = SHA1_DIGEST_SIZE, 1964 }, 1965 .caam = { 1966 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1967 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1968 OP_ALG_AAI_HMAC_PRECOMP, 1969 }, 1970 }, 1971 { 1972 .aead = { 1973 .base = { 1974 .cra_name = "echainiv(authenc(hmac(sha1)," 1975 "cbc(des3_ede)))", 1976 .cra_driver_name = "echainiv-authenc-" 1977 "hmac-sha1-" 1978 "cbc-des3_ede-caam-qi", 1979 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1980 }, 1981 .setkey = des3_aead_setkey, 1982 .setauthsize = aead_setauthsize, 1983 .encrypt = aead_encrypt, 1984 .decrypt = aead_decrypt, 1985 .ivsize = DES3_EDE_BLOCK_SIZE, 1986 .maxauthsize = SHA1_DIGEST_SIZE, 1987 }, 1988 .caam = { 1989 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1990 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1991 OP_ALG_AAI_HMAC_PRECOMP, 1992 .geniv = true, 1993 } 1994 }, 1995 { 1996 .aead = { 1997 .base = { 1998 .cra_name = "authenc(hmac(sha224)," 1999 "cbc(des3_ede))", 2000 .cra_driver_name = "authenc-hmac-sha224-" 2001 "cbc-des3_ede-caam-qi", 2002 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2003 }, 2004 .setkey = des3_aead_setkey, 2005 .setauthsize = aead_setauthsize, 2006 .encrypt = aead_encrypt, 2007 .decrypt = aead_decrypt, 2008 .ivsize = DES3_EDE_BLOCK_SIZE, 2009 .maxauthsize = SHA224_DIGEST_SIZE, 2010 }, 2011 .caam = { 2012 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2013 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2014 OP_ALG_AAI_HMAC_PRECOMP, 2015 }, 2016 }, 2017 { 2018 .aead = { 2019 .base = { 2020 .cra_name = "echainiv(authenc(hmac(sha224)," 2021 "cbc(des3_ede)))", 2022 .cra_driver_name = "echainiv-authenc-" 2023 "hmac-sha224-" 2024 "cbc-des3_ede-caam-qi", 2025 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2026 }, 2027 .setkey = des3_aead_setkey, 2028 .setauthsize = aead_setauthsize, 2029 .encrypt = aead_encrypt, 2030 .decrypt = aead_decrypt, 2031 .ivsize = DES3_EDE_BLOCK_SIZE, 2032 .maxauthsize = SHA224_DIGEST_SIZE, 2033 }, 2034 .caam = { 2035 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2036 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2037 OP_ALG_AAI_HMAC_PRECOMP, 2038 .geniv = true, 2039 } 2040 }, 2041 { 2042 .aead = { 2043 .base = { 2044 .cra_name = "authenc(hmac(sha256)," 2045 "cbc(des3_ede))", 2046 .cra_driver_name = "authenc-hmac-sha256-" 2047 "cbc-des3_ede-caam-qi", 2048 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2049 }, 2050 .setkey = des3_aead_setkey, 2051 .setauthsize = aead_setauthsize, 2052 .encrypt = aead_encrypt, 2053 .decrypt = aead_decrypt, 2054 .ivsize = DES3_EDE_BLOCK_SIZE, 2055 .maxauthsize = SHA256_DIGEST_SIZE, 2056 }, 2057 .caam = { 2058 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2059 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2060 OP_ALG_AAI_HMAC_PRECOMP, 2061 }, 2062 }, 2063 { 2064 .aead = { 2065 .base = { 2066 .cra_name = "echainiv(authenc(hmac(sha256)," 2067 "cbc(des3_ede)))", 2068 .cra_driver_name = "echainiv-authenc-" 2069 "hmac-sha256-" 2070 "cbc-des3_ede-caam-qi", 2071 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2072 }, 2073 .setkey = des3_aead_setkey, 2074 .setauthsize = aead_setauthsize, 2075 .encrypt = aead_encrypt, 2076 .decrypt = aead_decrypt, 2077 .ivsize = DES3_EDE_BLOCK_SIZE, 2078 .maxauthsize = SHA256_DIGEST_SIZE, 2079 }, 2080 .caam = { 2081 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2082 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2083 OP_ALG_AAI_HMAC_PRECOMP, 2084 .geniv = true, 2085 } 2086 }, 2087 { 2088 .aead = { 2089 .base = { 2090 .cra_name = "authenc(hmac(sha384)," 2091 "cbc(des3_ede))", 2092 .cra_driver_name = "authenc-hmac-sha384-" 2093 "cbc-des3_ede-caam-qi", 2094 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2095 }, 2096 .setkey = des3_aead_setkey, 2097 .setauthsize = aead_setauthsize, 2098 .encrypt = aead_encrypt, 2099 .decrypt = aead_decrypt, 2100 .ivsize = DES3_EDE_BLOCK_SIZE, 2101 .maxauthsize = SHA384_DIGEST_SIZE, 2102 }, 2103 .caam = { 2104 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2105 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2106 OP_ALG_AAI_HMAC_PRECOMP, 2107 }, 2108 }, 2109 { 2110 .aead = { 2111 .base = { 2112 .cra_name = "echainiv(authenc(hmac(sha384)," 2113 "cbc(des3_ede)))", 2114 .cra_driver_name = "echainiv-authenc-" 2115 "hmac-sha384-" 2116 "cbc-des3_ede-caam-qi", 2117 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2118 }, 2119 .setkey = des3_aead_setkey, 2120 .setauthsize = aead_setauthsize, 2121 .encrypt = aead_encrypt, 2122 .decrypt = aead_decrypt, 2123 .ivsize = DES3_EDE_BLOCK_SIZE, 2124 .maxauthsize = SHA384_DIGEST_SIZE, 2125 }, 2126 .caam = { 2127 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2128 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2129 OP_ALG_AAI_HMAC_PRECOMP, 2130 .geniv = true, 2131 } 2132 }, 2133 { 2134 .aead = { 2135 .base = { 2136 .cra_name = "authenc(hmac(sha512)," 2137 "cbc(des3_ede))", 2138 .cra_driver_name = "authenc-hmac-sha512-" 2139 "cbc-des3_ede-caam-qi", 2140 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2141 }, 2142 .setkey = des3_aead_setkey, 2143 .setauthsize = aead_setauthsize, 2144 .encrypt = aead_encrypt, 2145 .decrypt = aead_decrypt, 2146 .ivsize = DES3_EDE_BLOCK_SIZE, 2147 .maxauthsize = SHA512_DIGEST_SIZE, 2148 }, 2149 .caam = { 2150 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2151 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2152 OP_ALG_AAI_HMAC_PRECOMP, 2153 }, 2154 }, 2155 { 2156 .aead = { 2157 .base = { 2158 .cra_name = "echainiv(authenc(hmac(sha512)," 2159 "cbc(des3_ede)))", 2160 .cra_driver_name = "echainiv-authenc-" 2161 "hmac-sha512-" 2162 "cbc-des3_ede-caam-qi", 2163 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2164 }, 2165 .setkey = des3_aead_setkey, 2166 .setauthsize = aead_setauthsize, 2167 .encrypt = aead_encrypt, 2168 .decrypt = aead_decrypt, 2169 .ivsize = DES3_EDE_BLOCK_SIZE, 2170 .maxauthsize = SHA512_DIGEST_SIZE, 2171 }, 2172 .caam = { 2173 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2174 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2175 OP_ALG_AAI_HMAC_PRECOMP, 2176 .geniv = true, 2177 } 2178 }, 2179 { 2180 .aead = { 2181 .base = { 2182 .cra_name = "authenc(hmac(md5),cbc(des))", 2183 .cra_driver_name = "authenc-hmac-md5-" 2184 "cbc-des-caam-qi", 2185 .cra_blocksize = DES_BLOCK_SIZE, 2186 }, 2187 .setkey = aead_setkey, 2188 .setauthsize = aead_setauthsize, 2189 .encrypt = aead_encrypt, 2190 .decrypt = aead_decrypt, 2191 .ivsize = DES_BLOCK_SIZE, 2192 .maxauthsize = MD5_DIGEST_SIZE, 2193 }, 2194 .caam = { 2195 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2196 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2197 OP_ALG_AAI_HMAC_PRECOMP, 2198 }, 2199 }, 2200 { 2201 .aead = { 2202 .base = { 2203 .cra_name = "echainiv(authenc(hmac(md5)," 2204 "cbc(des)))", 2205 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2206 "cbc-des-caam-qi", 2207 .cra_blocksize = DES_BLOCK_SIZE, 2208 }, 2209 .setkey = aead_setkey, 2210 .setauthsize = aead_setauthsize, 2211 .encrypt = aead_encrypt, 2212 .decrypt = aead_decrypt, 2213 .ivsize = DES_BLOCK_SIZE, 2214 .maxauthsize = MD5_DIGEST_SIZE, 2215 }, 2216 .caam = { 2217 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2218 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2219 OP_ALG_AAI_HMAC_PRECOMP, 2220 .geniv = true, 2221 } 2222 }, 2223 { 2224 .aead = { 2225 .base = { 2226 .cra_name = "authenc(hmac(sha1),cbc(des))", 2227 .cra_driver_name = "authenc-hmac-sha1-" 2228 "cbc-des-caam-qi", 2229 .cra_blocksize = DES_BLOCK_SIZE, 2230 }, 2231 .setkey = aead_setkey, 2232 .setauthsize = aead_setauthsize, 2233 .encrypt = aead_encrypt, 2234 .decrypt = aead_decrypt, 2235 .ivsize = DES_BLOCK_SIZE, 2236 .maxauthsize = SHA1_DIGEST_SIZE, 2237 }, 2238 .caam = { 2239 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2240 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2241 OP_ALG_AAI_HMAC_PRECOMP, 2242 }, 2243 }, 2244 { 2245 .aead = { 2246 .base = { 2247 .cra_name = "echainiv(authenc(hmac(sha1)," 2248 "cbc(des)))", 2249 .cra_driver_name = "echainiv-authenc-" 2250 "hmac-sha1-cbc-des-caam-qi", 2251 .cra_blocksize = DES_BLOCK_SIZE, 2252 }, 2253 .setkey = aead_setkey, 2254 .setauthsize = aead_setauthsize, 2255 .encrypt = aead_encrypt, 2256 .decrypt = aead_decrypt, 2257 .ivsize = DES_BLOCK_SIZE, 2258 .maxauthsize = SHA1_DIGEST_SIZE, 2259 }, 2260 .caam = { 2261 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2262 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2263 OP_ALG_AAI_HMAC_PRECOMP, 2264 .geniv = true, 2265 } 2266 }, 2267 { 2268 .aead = { 2269 .base = { 2270 .cra_name = "authenc(hmac(sha224),cbc(des))", 2271 .cra_driver_name = "authenc-hmac-sha224-" 2272 "cbc-des-caam-qi", 2273 .cra_blocksize = DES_BLOCK_SIZE, 2274 }, 2275 .setkey = aead_setkey, 2276 .setauthsize = aead_setauthsize, 2277 .encrypt = aead_encrypt, 2278 .decrypt = aead_decrypt, 2279 .ivsize = DES_BLOCK_SIZE, 2280 .maxauthsize = SHA224_DIGEST_SIZE, 2281 }, 2282 .caam = { 2283 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2284 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2285 OP_ALG_AAI_HMAC_PRECOMP, 2286 }, 2287 }, 2288 { 2289 .aead = { 2290 .base = { 2291 .cra_name = "echainiv(authenc(hmac(sha224)," 2292 "cbc(des)))", 2293 .cra_driver_name = "echainiv-authenc-" 2294 "hmac-sha224-cbc-des-" 2295 "caam-qi", 2296 .cra_blocksize = DES_BLOCK_SIZE, 2297 }, 2298 .setkey = aead_setkey, 2299 .setauthsize = aead_setauthsize, 2300 .encrypt = aead_encrypt, 2301 .decrypt = aead_decrypt, 2302 .ivsize = DES_BLOCK_SIZE, 2303 .maxauthsize = SHA224_DIGEST_SIZE, 2304 }, 2305 .caam = { 2306 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2307 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2308 OP_ALG_AAI_HMAC_PRECOMP, 2309 .geniv = true, 2310 } 2311 }, 2312 { 2313 .aead = { 2314 .base = { 2315 .cra_name = "authenc(hmac(sha256),cbc(des))", 2316 .cra_driver_name = "authenc-hmac-sha256-" 2317 "cbc-des-caam-qi", 2318 .cra_blocksize = DES_BLOCK_SIZE, 2319 }, 2320 .setkey = aead_setkey, 2321 .setauthsize = aead_setauthsize, 2322 .encrypt = aead_encrypt, 2323 .decrypt = aead_decrypt, 2324 .ivsize = DES_BLOCK_SIZE, 2325 .maxauthsize = SHA256_DIGEST_SIZE, 2326 }, 2327 .caam = { 2328 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2329 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2330 OP_ALG_AAI_HMAC_PRECOMP, 2331 }, 2332 }, 2333 { 2334 .aead = { 2335 .base = { 2336 .cra_name = "echainiv(authenc(hmac(sha256)," 2337 "cbc(des)))", 2338 .cra_driver_name = "echainiv-authenc-" 2339 "hmac-sha256-cbc-des-" 2340 "caam-qi", 2341 .cra_blocksize = DES_BLOCK_SIZE, 2342 }, 2343 .setkey = aead_setkey, 2344 .setauthsize = aead_setauthsize, 2345 .encrypt = aead_encrypt, 2346 .decrypt = aead_decrypt, 2347 .ivsize = DES_BLOCK_SIZE, 2348 .maxauthsize = SHA256_DIGEST_SIZE, 2349 }, 2350 .caam = { 2351 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2352 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2353 OP_ALG_AAI_HMAC_PRECOMP, 2354 .geniv = true, 2355 }, 2356 }, 2357 { 2358 .aead = { 2359 .base = { 2360 .cra_name = "authenc(hmac(sha384),cbc(des))", 2361 .cra_driver_name = "authenc-hmac-sha384-" 2362 "cbc-des-caam-qi", 2363 .cra_blocksize = DES_BLOCK_SIZE, 2364 }, 2365 .setkey = aead_setkey, 2366 .setauthsize = aead_setauthsize, 2367 .encrypt = aead_encrypt, 2368 .decrypt = aead_decrypt, 2369 .ivsize = DES_BLOCK_SIZE, 2370 .maxauthsize = SHA384_DIGEST_SIZE, 2371 }, 2372 .caam = { 2373 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2374 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2375 OP_ALG_AAI_HMAC_PRECOMP, 2376 }, 2377 }, 2378 { 2379 .aead = { 2380 .base = { 2381 .cra_name = "echainiv(authenc(hmac(sha384)," 2382 "cbc(des)))", 2383 .cra_driver_name = "echainiv-authenc-" 2384 "hmac-sha384-cbc-des-" 2385 "caam-qi", 2386 .cra_blocksize = DES_BLOCK_SIZE, 2387 }, 2388 .setkey = aead_setkey, 2389 .setauthsize = aead_setauthsize, 2390 .encrypt = aead_encrypt, 2391 .decrypt = aead_decrypt, 2392 .ivsize = DES_BLOCK_SIZE, 2393 .maxauthsize = SHA384_DIGEST_SIZE, 2394 }, 2395 .caam = { 2396 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2397 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2398 OP_ALG_AAI_HMAC_PRECOMP, 2399 .geniv = true, 2400 } 2401 }, 2402 { 2403 .aead = { 2404 .base = { 2405 .cra_name = "authenc(hmac(sha512),cbc(des))", 2406 .cra_driver_name = "authenc-hmac-sha512-" 2407 "cbc-des-caam-qi", 2408 .cra_blocksize = DES_BLOCK_SIZE, 2409 }, 2410 .setkey = aead_setkey, 2411 .setauthsize = aead_setauthsize, 2412 .encrypt = aead_encrypt, 2413 .decrypt = aead_decrypt, 2414 .ivsize = DES_BLOCK_SIZE, 2415 .maxauthsize = SHA512_DIGEST_SIZE, 2416 }, 2417 .caam = { 2418 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2419 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2420 OP_ALG_AAI_HMAC_PRECOMP, 2421 } 2422 }, 2423 { 2424 .aead = { 2425 .base = { 2426 .cra_name = "echainiv(authenc(hmac(sha512)," 2427 "cbc(des)))", 2428 .cra_driver_name = "echainiv-authenc-" 2429 "hmac-sha512-cbc-des-" 2430 "caam-qi", 2431 .cra_blocksize = DES_BLOCK_SIZE, 2432 }, 2433 .setkey = aead_setkey, 2434 .setauthsize = aead_setauthsize, 2435 .encrypt = aead_encrypt, 2436 .decrypt = aead_decrypt, 2437 .ivsize = DES_BLOCK_SIZE, 2438 .maxauthsize = SHA512_DIGEST_SIZE, 2439 }, 2440 .caam = { 2441 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2442 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2443 OP_ALG_AAI_HMAC_PRECOMP, 2444 .geniv = true, 2445 } 2446 }, 2447 }; 2448 2449 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2450 bool uses_dkp) 2451 { 2452 struct caam_drv_private *priv; 2453 struct device *dev; 2454 2455 /* 2456 * distribute tfms across job rings to ensure in-order 2457 * crypto request processing per tfm 2458 */ 2459 ctx->jrdev = caam_jr_alloc(); 2460 if (IS_ERR(ctx->jrdev)) { 2461 pr_err("Job Ring Device allocation for transform failed\n"); 2462 return PTR_ERR(ctx->jrdev); 2463 } 2464 2465 dev = ctx->jrdev->parent; 2466 priv = dev_get_drvdata(dev); 2467 if (priv->era >= 6 && uses_dkp) 2468 ctx->dir = DMA_BIDIRECTIONAL; 2469 else 2470 ctx->dir = DMA_TO_DEVICE; 2471 2472 ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), 2473 ctx->dir); 2474 if (dma_mapping_error(dev, ctx->key_dma)) { 2475 dev_err(dev, "unable to map key\n"); 2476 caam_jr_free(ctx->jrdev); 2477 return -ENOMEM; 2478 } 2479 2480 /* copy descriptor header template value */ 2481 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2482 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2483 2484 ctx->qidev = dev; 2485 2486 spin_lock_init(&ctx->lock); 2487 ctx->drv_ctx[ENCRYPT] = NULL; 2488 ctx->drv_ctx[DECRYPT] = NULL; 2489 2490 return 0; 2491 } 2492 2493 static int caam_cra_init(struct crypto_skcipher *tfm) 2494 { 2495 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 2496 struct caam_skcipher_alg *caam_alg = 2497 container_of(alg, typeof(*caam_alg), skcipher); 2498 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 2499 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2500 int ret = 0; 2501 2502 if (alg_aai == OP_ALG_AAI_XTS) { 2503 const char *tfm_name = crypto_tfm_alg_name(&tfm->base); 2504 struct crypto_skcipher *fallback; 2505 2506 fallback = crypto_alloc_skcipher(tfm_name, 0, 2507 CRYPTO_ALG_NEED_FALLBACK); 2508 if (IS_ERR(fallback)) { 2509 pr_err("Failed to allocate %s fallback: %ld\n", 2510 tfm_name, PTR_ERR(fallback)); 2511 return PTR_ERR(fallback); 2512 } 2513 2514 ctx->fallback = fallback; 2515 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + 2516 crypto_skcipher_reqsize(fallback)); 2517 } 2518 2519 ret = caam_init_common(ctx, &caam_alg->caam, false); 2520 if (ret && ctx->fallback) 2521 crypto_free_skcipher(ctx->fallback); 2522 2523 return ret; 2524 } 2525 2526 static int caam_aead_init(struct crypto_aead *tfm) 2527 { 2528 struct aead_alg *alg = crypto_aead_alg(tfm); 2529 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2530 aead); 2531 struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); 2532 2533 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); 2534 } 2535 2536 static void caam_exit_common(struct caam_ctx *ctx) 2537 { 2538 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2539 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2540 2541 dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), 2542 ctx->dir); 2543 2544 caam_jr_free(ctx->jrdev); 2545 } 2546 2547 static void caam_cra_exit(struct crypto_skcipher *tfm) 2548 { 2549 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 2550 2551 if (ctx->fallback) 2552 crypto_free_skcipher(ctx->fallback); 2553 caam_exit_common(ctx); 2554 } 2555 2556 static void caam_aead_exit(struct crypto_aead *tfm) 2557 { 2558 caam_exit_common(crypto_aead_ctx_dma(tfm)); 2559 } 2560 2561 void caam_qi_algapi_exit(void) 2562 { 2563 int i; 2564 2565 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2566 struct caam_aead_alg *t_alg = driver_aeads + i; 2567 2568 if (t_alg->registered) 2569 crypto_unregister_aead(&t_alg->aead); 2570 } 2571 2572 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2573 struct caam_skcipher_alg *t_alg = driver_algs + i; 2574 2575 if (t_alg->registered) 2576 crypto_unregister_skcipher(&t_alg->skcipher); 2577 } 2578 } 2579 2580 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) 2581 { 2582 struct skcipher_alg *alg = &t_alg->skcipher; 2583 2584 alg->base.cra_module = THIS_MODULE; 2585 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2586 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); 2587 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 2588 CRYPTO_ALG_KERN_DRIVER_ONLY); 2589 2590 alg->init = caam_cra_init; 2591 alg->exit = caam_cra_exit; 2592 } 2593 2594 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2595 { 2596 struct aead_alg *alg = &t_alg->aead; 2597 2598 alg->base.cra_module = THIS_MODULE; 2599 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2600 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); 2601 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 2602 CRYPTO_ALG_KERN_DRIVER_ONLY; 2603 2604 alg->init = caam_aead_init; 2605 alg->exit = caam_aead_exit; 2606 } 2607 2608 int caam_qi_algapi_init(struct device *ctrldev) 2609 { 2610 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 2611 int i = 0, err = 0; 2612 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2613 unsigned int md_limit = SHA512_DIGEST_SIZE; 2614 bool registered = false; 2615 2616 /* Make sure this runs only on (DPAA 1.x) QI */ 2617 if (!priv->qi_present || caam_dpaa2) 2618 return 0; 2619 2620 /* 2621 * Register crypto algorithms the device supports. 2622 * First, detect presence and attributes of DES, AES, and MD blocks. 2623 */ 2624 if (priv->era < 10) { 2625 u32 cha_vid, cha_inst; 2626 2627 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2628 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 2629 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2630 2631 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2632 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> 2633 CHA_ID_LS_DES_SHIFT; 2634 aes_inst = cha_inst & CHA_ID_LS_AES_MASK; 2635 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2636 } else { 2637 u32 aesa, mdha; 2638 2639 aesa = rd_reg32(&priv->ctrl->vreg.aesa); 2640 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2641 2642 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2643 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2644 2645 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; 2646 aes_inst = aesa & CHA_VER_NUM_MASK; 2647 md_inst = mdha & CHA_VER_NUM_MASK; 2648 } 2649 2650 /* If MD is present, limit digest size based on LP256 */ 2651 if (md_inst && md_vid == CHA_VER_VID_MD_LP256) 2652 md_limit = SHA256_DIGEST_SIZE; 2653 2654 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2655 struct caam_skcipher_alg *t_alg = driver_algs + i; 2656 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; 2657 2658 /* Skip DES algorithms if not supported by device */ 2659 if (!des_inst && 2660 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2661 (alg_sel == OP_ALG_ALGSEL_DES))) 2662 continue; 2663 2664 /* Skip AES algorithms if not supported by device */ 2665 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2666 continue; 2667 2668 caam_skcipher_alg_init(t_alg); 2669 2670 err = crypto_register_skcipher(&t_alg->skcipher); 2671 if (err) { 2672 dev_warn(ctrldev, "%s alg registration failed\n", 2673 t_alg->skcipher.base.cra_driver_name); 2674 continue; 2675 } 2676 2677 t_alg->registered = true; 2678 registered = true; 2679 } 2680 2681 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2682 struct caam_aead_alg *t_alg = driver_aeads + i; 2683 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2684 OP_ALG_ALGSEL_MASK; 2685 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2686 OP_ALG_ALGSEL_MASK; 2687 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2688 2689 /* Skip DES algorithms if not supported by device */ 2690 if (!des_inst && 2691 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2692 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2693 continue; 2694 2695 /* Skip AES algorithms if not supported by device */ 2696 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2697 continue; 2698 2699 /* 2700 * Check support for AES algorithms not available 2701 * on LP devices. 2702 */ 2703 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) 2704 continue; 2705 2706 /* 2707 * Skip algorithms requiring message digests 2708 * if MD or MD size is not supported by device. 2709 */ 2710 if (c2_alg_sel && 2711 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2712 continue; 2713 2714 caam_aead_alg_init(t_alg); 2715 2716 err = crypto_register_aead(&t_alg->aead); 2717 if (err) { 2718 pr_warn("%s alg registration failed\n", 2719 t_alg->aead.base.cra_driver_name); 2720 continue; 2721 } 2722 2723 t_alg->registered = true; 2724 registered = true; 2725 } 2726 2727 if (registered) 2728 dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); 2729 2730 return err; 2731 } 2732