1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale FSL CAAM support for crypto API over QI backend. 4 * Based on caamalg.c 5 * 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 * Copyright 2016-2019 NXP 8 */ 9 10 #include "compat.h" 11 #include "ctrl.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "desc_constr.h" 15 #include "error.h" 16 #include "sg_sw_qm.h" 17 #include "key_gen.h" 18 #include "qi.h" 19 #include "jr.h" 20 #include "caamalg_desc.h" 21 22 /* 23 * crypto alg 24 */ 25 #define CAAM_CRA_PRIORITY 2000 26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 28 SHA512_DIGEST_SIZE * 2) 29 30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 31 CAAM_MAX_KEY_SIZE) 32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 33 34 struct caam_alg_entry { 35 int class1_alg_type; 36 int class2_alg_type; 37 bool rfc3686; 38 bool geniv; 39 bool nodkp; 40 }; 41 42 struct caam_aead_alg { 43 struct aead_alg aead; 44 struct caam_alg_entry caam; 45 bool registered; 46 }; 47 48 struct caam_skcipher_alg { 49 struct skcipher_alg skcipher; 50 struct caam_alg_entry caam; 51 bool registered; 52 }; 53 54 /* 55 * per-session context 56 */ 57 struct caam_ctx { 58 struct device *jrdev; 59 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 60 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 61 u8 key[CAAM_MAX_KEY_SIZE]; 62 dma_addr_t key_dma; 63 enum dma_data_direction dir; 64 struct alginfo adata; 65 struct alginfo cdata; 66 unsigned int authsize; 67 struct device *qidev; 68 spinlock_t lock; /* Protects multiple init of driver context */ 69 struct caam_drv_ctx *drv_ctx[NUM_OP]; 70 }; 71 72 static int aead_set_sh_desc(struct crypto_aead *aead) 73 { 74 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 75 typeof(*alg), aead); 76 struct caam_ctx *ctx = crypto_aead_ctx(aead); 77 unsigned int ivsize = crypto_aead_ivsize(aead); 78 u32 ctx1_iv_off = 0; 79 u32 *nonce = NULL; 80 unsigned int data_len[2]; 81 u32 inl_mask; 82 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 83 OP_ALG_AAI_CTR_MOD128); 84 const bool is_rfc3686 = alg->caam.rfc3686; 85 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 86 87 if (!ctx->cdata.keylen || !ctx->authsize) 88 return 0; 89 90 /* 91 * AES-CTR needs to load IV in CONTEXT1 reg 92 * at an offset of 128bits (16bytes) 93 * CONTEXT1[255:128] = IV 94 */ 95 if (ctr_mode) 96 ctx1_iv_off = 16; 97 98 /* 99 * RFC3686 specific: 100 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 101 */ 102 if (is_rfc3686) { 103 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 104 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 106 } 107 108 /* 109 * In case |user key| > |derived key|, using DKP<imm,imm> would result 110 * in invalid opcodes (last bytes of user key) in the resulting 111 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key 112 * addresses are needed. 113 */ 114 ctx->adata.key_virt = ctx->key; 115 ctx->adata.key_dma = ctx->key_dma; 116 117 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 118 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 119 120 data_len[0] = ctx->adata.keylen_pad; 121 data_len[1] = ctx->cdata.keylen; 122 123 if (alg->caam.geniv) 124 goto skip_enc; 125 126 /* aead_encrypt shared descriptor */ 127 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 128 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 129 DESC_JOB_IO_LEN, data_len, &inl_mask, 130 ARRAY_SIZE(data_len)) < 0) 131 return -EINVAL; 132 133 ctx->adata.key_inline = !!(inl_mask & 1); 134 ctx->cdata.key_inline = !!(inl_mask & 2); 135 136 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 137 ivsize, ctx->authsize, is_rfc3686, nonce, 138 ctx1_iv_off, true, ctrlpriv->era); 139 140 skip_enc: 141 /* aead_decrypt shared descriptor */ 142 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 143 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 144 DESC_JOB_IO_LEN, data_len, &inl_mask, 145 ARRAY_SIZE(data_len)) < 0) 146 return -EINVAL; 147 148 ctx->adata.key_inline = !!(inl_mask & 1); 149 ctx->cdata.key_inline = !!(inl_mask & 2); 150 151 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 152 ivsize, ctx->authsize, alg->caam.geniv, 153 is_rfc3686, nonce, ctx1_iv_off, true, 154 ctrlpriv->era); 155 156 if (!alg->caam.geniv) 157 goto skip_givenc; 158 159 /* aead_givencrypt shared descriptor */ 160 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 161 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 162 DESC_JOB_IO_LEN, data_len, &inl_mask, 163 ARRAY_SIZE(data_len)) < 0) 164 return -EINVAL; 165 166 ctx->adata.key_inline = !!(inl_mask & 1); 167 ctx->cdata.key_inline = !!(inl_mask & 2); 168 169 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 170 ivsize, ctx->authsize, is_rfc3686, nonce, 171 ctx1_iv_off, true, ctrlpriv->era); 172 173 skip_givenc: 174 return 0; 175 } 176 177 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 178 { 179 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 180 181 ctx->authsize = authsize; 182 aead_set_sh_desc(authenc); 183 184 return 0; 185 } 186 187 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 188 unsigned int keylen) 189 { 190 struct caam_ctx *ctx = crypto_aead_ctx(aead); 191 struct device *jrdev = ctx->jrdev; 192 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 193 struct crypto_authenc_keys keys; 194 int ret = 0; 195 196 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 197 goto badkey; 198 199 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 200 keys.authkeylen + keys.enckeylen, keys.enckeylen, 201 keys.authkeylen); 202 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 203 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 204 205 /* 206 * If DKP is supported, use it in the shared descriptor to generate 207 * the split key. 208 */ 209 if (ctrlpriv->era >= 6) { 210 ctx->adata.keylen = keys.authkeylen; 211 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 212 OP_ALG_ALGSEL_MASK); 213 214 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 215 goto badkey; 216 217 memcpy(ctx->key, keys.authkey, keys.authkeylen); 218 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 219 keys.enckeylen); 220 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 221 ctx->adata.keylen_pad + 222 keys.enckeylen, ctx->dir); 223 goto skip_split_key; 224 } 225 226 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 227 keys.authkeylen, CAAM_MAX_KEY_SIZE - 228 keys.enckeylen); 229 if (ret) 230 goto badkey; 231 232 /* postpend encryption key to auth split key */ 233 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 234 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 235 ctx->adata.keylen_pad + keys.enckeylen, 236 ctx->dir); 237 238 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", 239 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 240 ctx->adata.keylen_pad + keys.enckeylen, 1); 241 242 skip_split_key: 243 ctx->cdata.keylen = keys.enckeylen; 244 245 ret = aead_set_sh_desc(aead); 246 if (ret) 247 goto badkey; 248 249 /* Now update the driver contexts with the new shared descriptor */ 250 if (ctx->drv_ctx[ENCRYPT]) { 251 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 252 ctx->sh_desc_enc); 253 if (ret) { 254 dev_err(jrdev, "driver enc context update failed\n"); 255 goto badkey; 256 } 257 } 258 259 if (ctx->drv_ctx[DECRYPT]) { 260 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 261 ctx->sh_desc_dec); 262 if (ret) { 263 dev_err(jrdev, "driver dec context update failed\n"); 264 goto badkey; 265 } 266 } 267 268 memzero_explicit(&keys, sizeof(keys)); 269 return ret; 270 badkey: 271 memzero_explicit(&keys, sizeof(keys)); 272 return -EINVAL; 273 } 274 275 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 276 unsigned int keylen) 277 { 278 struct crypto_authenc_keys keys; 279 int err; 280 281 err = crypto_authenc_extractkeys(&keys, key, keylen); 282 if (unlikely(err)) 283 return err; 284 285 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: 286 aead_setkey(aead, key, keylen); 287 288 memzero_explicit(&keys, sizeof(keys)); 289 return err; 290 } 291 292 static int gcm_set_sh_desc(struct crypto_aead *aead) 293 { 294 struct caam_ctx *ctx = crypto_aead_ctx(aead); 295 unsigned int ivsize = crypto_aead_ivsize(aead); 296 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 297 ctx->cdata.keylen; 298 299 if (!ctx->cdata.keylen || !ctx->authsize) 300 return 0; 301 302 /* 303 * Job Descriptor and Shared Descriptor 304 * must fit into the 64-word Descriptor h/w Buffer 305 */ 306 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { 307 ctx->cdata.key_inline = true; 308 ctx->cdata.key_virt = ctx->key; 309 } else { 310 ctx->cdata.key_inline = false; 311 ctx->cdata.key_dma = ctx->key_dma; 312 } 313 314 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 315 ctx->authsize, true); 316 317 /* 318 * Job Descriptor and Shared Descriptor 319 * must fit into the 64-word Descriptor h/w Buffer 320 */ 321 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { 322 ctx->cdata.key_inline = true; 323 ctx->cdata.key_virt = ctx->key; 324 } else { 325 ctx->cdata.key_inline = false; 326 ctx->cdata.key_dma = ctx->key_dma; 327 } 328 329 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 330 ctx->authsize, true); 331 332 return 0; 333 } 334 335 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 336 { 337 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 338 int err; 339 340 err = crypto_gcm_check_authsize(authsize); 341 if (err) 342 return err; 343 344 ctx->authsize = authsize; 345 gcm_set_sh_desc(authenc); 346 347 return 0; 348 } 349 350 static int gcm_setkey(struct crypto_aead *aead, 351 const u8 *key, unsigned int keylen) 352 { 353 struct caam_ctx *ctx = crypto_aead_ctx(aead); 354 struct device *jrdev = ctx->jrdev; 355 int ret; 356 357 ret = aes_check_keylen(keylen); 358 if (ret) 359 return ret; 360 361 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 362 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 363 364 memcpy(ctx->key, key, keylen); 365 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, 366 ctx->dir); 367 ctx->cdata.keylen = keylen; 368 369 ret = gcm_set_sh_desc(aead); 370 if (ret) 371 return ret; 372 373 /* Now update the driver contexts with the new shared descriptor */ 374 if (ctx->drv_ctx[ENCRYPT]) { 375 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 376 ctx->sh_desc_enc); 377 if (ret) { 378 dev_err(jrdev, "driver enc context update failed\n"); 379 return ret; 380 } 381 } 382 383 if (ctx->drv_ctx[DECRYPT]) { 384 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 385 ctx->sh_desc_dec); 386 if (ret) { 387 dev_err(jrdev, "driver dec context update failed\n"); 388 return ret; 389 } 390 } 391 392 return 0; 393 } 394 395 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 396 { 397 struct caam_ctx *ctx = crypto_aead_ctx(aead); 398 unsigned int ivsize = crypto_aead_ivsize(aead); 399 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 400 ctx->cdata.keylen; 401 402 if (!ctx->cdata.keylen || !ctx->authsize) 403 return 0; 404 405 ctx->cdata.key_virt = ctx->key; 406 407 /* 408 * Job Descriptor and Shared Descriptor 409 * must fit into the 64-word Descriptor h/w Buffer 410 */ 411 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { 412 ctx->cdata.key_inline = true; 413 } else { 414 ctx->cdata.key_inline = false; 415 ctx->cdata.key_dma = ctx->key_dma; 416 } 417 418 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 419 ctx->authsize, true); 420 421 /* 422 * Job Descriptor and Shared Descriptor 423 * must fit into the 64-word Descriptor h/w Buffer 424 */ 425 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { 426 ctx->cdata.key_inline = true; 427 } else { 428 ctx->cdata.key_inline = false; 429 ctx->cdata.key_dma = ctx->key_dma; 430 } 431 432 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 433 ctx->authsize, true); 434 435 return 0; 436 } 437 438 static int rfc4106_setauthsize(struct crypto_aead *authenc, 439 unsigned int authsize) 440 { 441 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 442 int err; 443 444 err = crypto_rfc4106_check_authsize(authsize); 445 if (err) 446 return err; 447 448 ctx->authsize = authsize; 449 rfc4106_set_sh_desc(authenc); 450 451 return 0; 452 } 453 454 static int rfc4106_setkey(struct crypto_aead *aead, 455 const u8 *key, unsigned int keylen) 456 { 457 struct caam_ctx *ctx = crypto_aead_ctx(aead); 458 struct device *jrdev = ctx->jrdev; 459 int ret; 460 461 ret = aes_check_keylen(keylen - 4); 462 if (ret) 463 return ret; 464 465 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 466 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 467 468 memcpy(ctx->key, key, keylen); 469 /* 470 * The last four bytes of the key material are used as the salt value 471 * in the nonce. Update the AES key length. 472 */ 473 ctx->cdata.keylen = keylen - 4; 474 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 475 ctx->cdata.keylen, ctx->dir); 476 477 ret = rfc4106_set_sh_desc(aead); 478 if (ret) 479 return ret; 480 481 /* Now update the driver contexts with the new shared descriptor */ 482 if (ctx->drv_ctx[ENCRYPT]) { 483 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 484 ctx->sh_desc_enc); 485 if (ret) { 486 dev_err(jrdev, "driver enc context update failed\n"); 487 return ret; 488 } 489 } 490 491 if (ctx->drv_ctx[DECRYPT]) { 492 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 493 ctx->sh_desc_dec); 494 if (ret) { 495 dev_err(jrdev, "driver dec context update failed\n"); 496 return ret; 497 } 498 } 499 500 return 0; 501 } 502 503 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 504 { 505 struct caam_ctx *ctx = crypto_aead_ctx(aead); 506 unsigned int ivsize = crypto_aead_ivsize(aead); 507 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 508 ctx->cdata.keylen; 509 510 if (!ctx->cdata.keylen || !ctx->authsize) 511 return 0; 512 513 ctx->cdata.key_virt = ctx->key; 514 515 /* 516 * Job Descriptor and Shared Descriptor 517 * must fit into the 64-word Descriptor h/w Buffer 518 */ 519 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { 520 ctx->cdata.key_inline = true; 521 } else { 522 ctx->cdata.key_inline = false; 523 ctx->cdata.key_dma = ctx->key_dma; 524 } 525 526 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 527 ctx->authsize, true); 528 529 /* 530 * Job Descriptor and Shared Descriptor 531 * must fit into the 64-word Descriptor h/w Buffer 532 */ 533 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { 534 ctx->cdata.key_inline = true; 535 } else { 536 ctx->cdata.key_inline = false; 537 ctx->cdata.key_dma = ctx->key_dma; 538 } 539 540 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 541 ctx->authsize, true); 542 543 return 0; 544 } 545 546 static int rfc4543_setauthsize(struct crypto_aead *authenc, 547 unsigned int authsize) 548 { 549 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 550 551 if (authsize != 16) 552 return -EINVAL; 553 554 ctx->authsize = authsize; 555 rfc4543_set_sh_desc(authenc); 556 557 return 0; 558 } 559 560 static int rfc4543_setkey(struct crypto_aead *aead, 561 const u8 *key, unsigned int keylen) 562 { 563 struct caam_ctx *ctx = crypto_aead_ctx(aead); 564 struct device *jrdev = ctx->jrdev; 565 int ret; 566 567 ret = aes_check_keylen(keylen - 4); 568 if (ret) 569 return ret; 570 571 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 572 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 573 574 memcpy(ctx->key, key, keylen); 575 /* 576 * The last four bytes of the key material are used as the salt value 577 * in the nonce. Update the AES key length. 578 */ 579 ctx->cdata.keylen = keylen - 4; 580 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 581 ctx->cdata.keylen, ctx->dir); 582 583 ret = rfc4543_set_sh_desc(aead); 584 if (ret) 585 return ret; 586 587 /* Now update the driver contexts with the new shared descriptor */ 588 if (ctx->drv_ctx[ENCRYPT]) { 589 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 590 ctx->sh_desc_enc); 591 if (ret) { 592 dev_err(jrdev, "driver enc context update failed\n"); 593 return ret; 594 } 595 } 596 597 if (ctx->drv_ctx[DECRYPT]) { 598 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 599 ctx->sh_desc_dec); 600 if (ret) { 601 dev_err(jrdev, "driver dec context update failed\n"); 602 return ret; 603 } 604 } 605 606 return 0; 607 } 608 609 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 610 unsigned int keylen, const u32 ctx1_iv_off) 611 { 612 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 613 struct caam_skcipher_alg *alg = 614 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 615 skcipher); 616 struct device *jrdev = ctx->jrdev; 617 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 618 const bool is_rfc3686 = alg->caam.rfc3686; 619 int ret = 0; 620 621 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 622 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 623 624 ctx->cdata.keylen = keylen; 625 ctx->cdata.key_virt = key; 626 ctx->cdata.key_inline = true; 627 628 /* skcipher encrypt, decrypt shared descriptors */ 629 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 630 is_rfc3686, ctx1_iv_off); 631 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 632 is_rfc3686, ctx1_iv_off); 633 634 /* Now update the driver contexts with the new shared descriptor */ 635 if (ctx->drv_ctx[ENCRYPT]) { 636 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 637 ctx->sh_desc_enc); 638 if (ret) { 639 dev_err(jrdev, "driver enc context update failed\n"); 640 return -EINVAL; 641 } 642 } 643 644 if (ctx->drv_ctx[DECRYPT]) { 645 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 646 ctx->sh_desc_dec); 647 if (ret) { 648 dev_err(jrdev, "driver dec context update failed\n"); 649 return -EINVAL; 650 } 651 } 652 653 return ret; 654 } 655 656 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, 657 const u8 *key, unsigned int keylen) 658 { 659 int err; 660 661 err = aes_check_keylen(keylen); 662 if (err) 663 return err; 664 665 return skcipher_setkey(skcipher, key, keylen, 0); 666 } 667 668 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, 669 const u8 *key, unsigned int keylen) 670 { 671 u32 ctx1_iv_off; 672 int err; 673 674 /* 675 * RFC3686 specific: 676 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 677 * | *key = {KEY, NONCE} 678 */ 679 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 680 keylen -= CTR_RFC3686_NONCE_SIZE; 681 682 err = aes_check_keylen(keylen); 683 if (err) 684 return err; 685 686 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 687 } 688 689 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, 690 const u8 *key, unsigned int keylen) 691 { 692 u32 ctx1_iv_off; 693 int err; 694 695 /* 696 * AES-CTR needs to load IV in CONTEXT1 reg 697 * at an offset of 128bits (16bytes) 698 * CONTEXT1[255:128] = IV 699 */ 700 ctx1_iv_off = 16; 701 702 err = aes_check_keylen(keylen); 703 if (err) 704 return err; 705 706 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 707 } 708 709 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 710 const u8 *key, unsigned int keylen) 711 { 712 return verify_skcipher_des3_key(skcipher, key) ?: 713 skcipher_setkey(skcipher, key, keylen, 0); 714 } 715 716 static int des_skcipher_setkey(struct crypto_skcipher *skcipher, 717 const u8 *key, unsigned int keylen) 718 { 719 return verify_skcipher_des_key(skcipher, key) ?: 720 skcipher_setkey(skcipher, key, keylen, 0); 721 } 722 723 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 724 unsigned int keylen) 725 { 726 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 727 struct device *jrdev = ctx->jrdev; 728 int ret = 0; 729 730 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 731 dev_dbg(jrdev, "key size mismatch\n"); 732 return -EINVAL; 733 } 734 735 ctx->cdata.keylen = keylen; 736 ctx->cdata.key_virt = key; 737 ctx->cdata.key_inline = true; 738 739 /* xts skcipher encrypt, decrypt shared descriptors */ 740 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 741 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 742 743 /* Now update the driver contexts with the new shared descriptor */ 744 if (ctx->drv_ctx[ENCRYPT]) { 745 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 746 ctx->sh_desc_enc); 747 if (ret) { 748 dev_err(jrdev, "driver enc context update failed\n"); 749 return -EINVAL; 750 } 751 } 752 753 if (ctx->drv_ctx[DECRYPT]) { 754 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 755 ctx->sh_desc_dec); 756 if (ret) { 757 dev_err(jrdev, "driver dec context update failed\n"); 758 return -EINVAL; 759 } 760 } 761 762 return ret; 763 } 764 765 /* 766 * aead_edesc - s/w-extended aead descriptor 767 * @src_nents: number of segments in input scatterlist 768 * @dst_nents: number of segments in output scatterlist 769 * @iv_dma: dma address of iv for checking continuity and link table 770 * @qm_sg_bytes: length of dma mapped h/w link table 771 * @qm_sg_dma: bus physical mapped address of h/w link table 772 * @assoclen: associated data length, in CAAM endianness 773 * @assoclen_dma: bus physical mapped address of req->assoclen 774 * @drv_req: driver-specific request structure 775 * @sgt: the h/w link table, followed by IV 776 */ 777 struct aead_edesc { 778 int src_nents; 779 int dst_nents; 780 dma_addr_t iv_dma; 781 int qm_sg_bytes; 782 dma_addr_t qm_sg_dma; 783 unsigned int assoclen; 784 dma_addr_t assoclen_dma; 785 struct caam_drv_req drv_req; 786 struct qm_sg_entry sgt[]; 787 }; 788 789 /* 790 * skcipher_edesc - s/w-extended skcipher descriptor 791 * @src_nents: number of segments in input scatterlist 792 * @dst_nents: number of segments in output scatterlist 793 * @iv_dma: dma address of iv for checking continuity and link table 794 * @qm_sg_bytes: length of dma mapped h/w link table 795 * @qm_sg_dma: bus physical mapped address of h/w link table 796 * @drv_req: driver-specific request structure 797 * @sgt: the h/w link table, followed by IV 798 */ 799 struct skcipher_edesc { 800 int src_nents; 801 int dst_nents; 802 dma_addr_t iv_dma; 803 int qm_sg_bytes; 804 dma_addr_t qm_sg_dma; 805 struct caam_drv_req drv_req; 806 struct qm_sg_entry sgt[]; 807 }; 808 809 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 810 enum optype type) 811 { 812 /* 813 * This function is called on the fast path with values of 'type' 814 * known at compile time. Invalid arguments are not expected and 815 * thus no checks are made. 816 */ 817 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 818 u32 *desc; 819 820 if (unlikely(!drv_ctx)) { 821 spin_lock(&ctx->lock); 822 823 /* Read again to check if some other core init drv_ctx */ 824 drv_ctx = ctx->drv_ctx[type]; 825 if (!drv_ctx) { 826 int cpu; 827 828 if (type == ENCRYPT) 829 desc = ctx->sh_desc_enc; 830 else /* (type == DECRYPT) */ 831 desc = ctx->sh_desc_dec; 832 833 cpu = smp_processor_id(); 834 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 835 if (!IS_ERR_OR_NULL(drv_ctx)) 836 drv_ctx->op_type = type; 837 838 ctx->drv_ctx[type] = drv_ctx; 839 } 840 841 spin_unlock(&ctx->lock); 842 } 843 844 return drv_ctx; 845 } 846 847 static void caam_unmap(struct device *dev, struct scatterlist *src, 848 struct scatterlist *dst, int src_nents, 849 int dst_nents, dma_addr_t iv_dma, int ivsize, 850 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, 851 int qm_sg_bytes) 852 { 853 if (dst != src) { 854 if (src_nents) 855 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 856 if (dst_nents) 857 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 858 } else { 859 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 860 } 861 862 if (iv_dma) 863 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 864 if (qm_sg_bytes) 865 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 866 } 867 868 static void aead_unmap(struct device *dev, 869 struct aead_edesc *edesc, 870 struct aead_request *req) 871 { 872 struct crypto_aead *aead = crypto_aead_reqtfm(req); 873 int ivsize = crypto_aead_ivsize(aead); 874 875 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 876 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, 877 edesc->qm_sg_bytes); 878 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 879 } 880 881 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 882 struct skcipher_request *req) 883 { 884 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 885 int ivsize = crypto_skcipher_ivsize(skcipher); 886 887 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 888 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, 889 edesc->qm_sg_bytes); 890 } 891 892 static void aead_done(struct caam_drv_req *drv_req, u32 status) 893 { 894 struct device *qidev; 895 struct aead_edesc *edesc; 896 struct aead_request *aead_req = drv_req->app_ctx; 897 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 898 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); 899 int ecode = 0; 900 901 qidev = caam_ctx->qidev; 902 903 if (unlikely(status)) 904 ecode = caam_jr_strstatus(qidev, status); 905 906 edesc = container_of(drv_req, typeof(*edesc), drv_req); 907 aead_unmap(qidev, edesc, aead_req); 908 909 aead_request_complete(aead_req, ecode); 910 qi_cache_free(edesc); 911 } 912 913 /* 914 * allocate and map the aead extended descriptor 915 */ 916 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 917 bool encrypt) 918 { 919 struct crypto_aead *aead = crypto_aead_reqtfm(req); 920 struct caam_ctx *ctx = crypto_aead_ctx(aead); 921 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 922 typeof(*alg), aead); 923 struct device *qidev = ctx->qidev; 924 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 925 GFP_KERNEL : GFP_ATOMIC; 926 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 927 int src_len, dst_len = 0; 928 struct aead_edesc *edesc; 929 dma_addr_t qm_sg_dma, iv_dma = 0; 930 int ivsize = 0; 931 unsigned int authsize = ctx->authsize; 932 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 933 int in_len, out_len; 934 struct qm_sg_entry *sg_table, *fd_sgt; 935 struct caam_drv_ctx *drv_ctx; 936 937 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 938 if (IS_ERR_OR_NULL(drv_ctx)) 939 return (struct aead_edesc *)drv_ctx; 940 941 /* allocate space for base edesc and hw desc commands, link tables */ 942 edesc = qi_cache_alloc(GFP_DMA | flags); 943 if (unlikely(!edesc)) { 944 dev_err(qidev, "could not allocate extended descriptor\n"); 945 return ERR_PTR(-ENOMEM); 946 } 947 948 if (likely(req->src == req->dst)) { 949 src_len = req->assoclen + req->cryptlen + 950 (encrypt ? authsize : 0); 951 952 src_nents = sg_nents_for_len(req->src, src_len); 953 if (unlikely(src_nents < 0)) { 954 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 955 src_len); 956 qi_cache_free(edesc); 957 return ERR_PTR(src_nents); 958 } 959 960 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 961 DMA_BIDIRECTIONAL); 962 if (unlikely(!mapped_src_nents)) { 963 dev_err(qidev, "unable to map source\n"); 964 qi_cache_free(edesc); 965 return ERR_PTR(-ENOMEM); 966 } 967 } else { 968 src_len = req->assoclen + req->cryptlen; 969 dst_len = src_len + (encrypt ? authsize : (-authsize)); 970 971 src_nents = sg_nents_for_len(req->src, src_len); 972 if (unlikely(src_nents < 0)) { 973 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 974 src_len); 975 qi_cache_free(edesc); 976 return ERR_PTR(src_nents); 977 } 978 979 dst_nents = sg_nents_for_len(req->dst, dst_len); 980 if (unlikely(dst_nents < 0)) { 981 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 982 dst_len); 983 qi_cache_free(edesc); 984 return ERR_PTR(dst_nents); 985 } 986 987 if (src_nents) { 988 mapped_src_nents = dma_map_sg(qidev, req->src, 989 src_nents, DMA_TO_DEVICE); 990 if (unlikely(!mapped_src_nents)) { 991 dev_err(qidev, "unable to map source\n"); 992 qi_cache_free(edesc); 993 return ERR_PTR(-ENOMEM); 994 } 995 } else { 996 mapped_src_nents = 0; 997 } 998 999 if (dst_nents) { 1000 mapped_dst_nents = dma_map_sg(qidev, req->dst, 1001 dst_nents, 1002 DMA_FROM_DEVICE); 1003 if (unlikely(!mapped_dst_nents)) { 1004 dev_err(qidev, "unable to map destination\n"); 1005 dma_unmap_sg(qidev, req->src, src_nents, 1006 DMA_TO_DEVICE); 1007 qi_cache_free(edesc); 1008 return ERR_PTR(-ENOMEM); 1009 } 1010 } else { 1011 mapped_dst_nents = 0; 1012 } 1013 } 1014 1015 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) 1016 ivsize = crypto_aead_ivsize(aead); 1017 1018 /* 1019 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1020 * Input is not contiguous. 1021 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1022 * the end of the table by allocating more S/G entries. Logic: 1023 * if (src != dst && output S/G) 1024 * pad output S/G, if needed 1025 * else if (src == dst && S/G) 1026 * overlapping S/Gs; pad one of them 1027 * else if (input S/G) ... 1028 * pad input S/G, if needed 1029 */ 1030 qm_sg_ents = 1 + !!ivsize + mapped_src_nents; 1031 if (mapped_dst_nents > 1) 1032 qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1033 else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1034 qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1035 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); 1036 else 1037 qm_sg_ents = pad_sg_nents(qm_sg_ents); 1038 1039 sg_table = &edesc->sgt[0]; 1040 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1041 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1042 CAAM_QI_MEMCACHE_SIZE)) { 1043 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1044 qm_sg_ents, ivsize); 1045 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1046 0, DMA_NONE, 0, 0); 1047 qi_cache_free(edesc); 1048 return ERR_PTR(-ENOMEM); 1049 } 1050 1051 if (ivsize) { 1052 u8 *iv = (u8 *)(sg_table + qm_sg_ents); 1053 1054 /* Make sure IV is located in a DMAable area */ 1055 memcpy(iv, req->iv, ivsize); 1056 1057 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1058 if (dma_mapping_error(qidev, iv_dma)) { 1059 dev_err(qidev, "unable to map IV\n"); 1060 caam_unmap(qidev, req->src, req->dst, src_nents, 1061 dst_nents, 0, 0, DMA_NONE, 0, 0); 1062 qi_cache_free(edesc); 1063 return ERR_PTR(-ENOMEM); 1064 } 1065 } 1066 1067 edesc->src_nents = src_nents; 1068 edesc->dst_nents = dst_nents; 1069 edesc->iv_dma = iv_dma; 1070 edesc->drv_req.app_ctx = req; 1071 edesc->drv_req.cbk = aead_done; 1072 edesc->drv_req.drv_ctx = drv_ctx; 1073 1074 edesc->assoclen = cpu_to_caam32(req->assoclen); 1075 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 1076 DMA_TO_DEVICE); 1077 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1078 dev_err(qidev, "unable to map assoclen\n"); 1079 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1080 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1081 qi_cache_free(edesc); 1082 return ERR_PTR(-ENOMEM); 1083 } 1084 1085 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 1086 qm_sg_index++; 1087 if (ivsize) { 1088 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1089 qm_sg_index++; 1090 } 1091 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 1092 qm_sg_index += mapped_src_nents; 1093 1094 if (mapped_dst_nents > 1) 1095 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 1096 1097 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1098 if (dma_mapping_error(qidev, qm_sg_dma)) { 1099 dev_err(qidev, "unable to map S/G table\n"); 1100 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1101 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1102 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1103 qi_cache_free(edesc); 1104 return ERR_PTR(-ENOMEM); 1105 } 1106 1107 edesc->qm_sg_dma = qm_sg_dma; 1108 edesc->qm_sg_bytes = qm_sg_bytes; 1109 1110 out_len = req->assoclen + req->cryptlen + 1111 (encrypt ? ctx->authsize : (-ctx->authsize)); 1112 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 1113 1114 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1115 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 1116 1117 if (req->dst == req->src) { 1118 if (mapped_src_nents == 1) 1119 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 1120 out_len, 0); 1121 else 1122 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1123 (1 + !!ivsize) * sizeof(*sg_table), 1124 out_len, 0); 1125 } else if (mapped_dst_nents <= 1) { 1126 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1127 0); 1128 } else { 1129 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 1130 qm_sg_index, out_len, 0); 1131 } 1132 1133 return edesc; 1134 } 1135 1136 static inline int aead_crypt(struct aead_request *req, bool encrypt) 1137 { 1138 struct aead_edesc *edesc; 1139 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1140 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1141 int ret; 1142 1143 if (unlikely(caam_congested)) 1144 return -EAGAIN; 1145 1146 /* allocate extended descriptor */ 1147 edesc = aead_edesc_alloc(req, encrypt); 1148 if (IS_ERR_OR_NULL(edesc)) 1149 return PTR_ERR(edesc); 1150 1151 /* Create and submit job descriptor */ 1152 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1153 if (!ret) { 1154 ret = -EINPROGRESS; 1155 } else { 1156 aead_unmap(ctx->qidev, edesc, req); 1157 qi_cache_free(edesc); 1158 } 1159 1160 return ret; 1161 } 1162 1163 static int aead_encrypt(struct aead_request *req) 1164 { 1165 return aead_crypt(req, true); 1166 } 1167 1168 static int aead_decrypt(struct aead_request *req) 1169 { 1170 return aead_crypt(req, false); 1171 } 1172 1173 static int ipsec_gcm_encrypt(struct aead_request *req) 1174 { 1175 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1176 true); 1177 } 1178 1179 static int ipsec_gcm_decrypt(struct aead_request *req) 1180 { 1181 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1182 false); 1183 } 1184 1185 static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1186 { 1187 struct skcipher_edesc *edesc; 1188 struct skcipher_request *req = drv_req->app_ctx; 1189 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1190 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1191 struct device *qidev = caam_ctx->qidev; 1192 int ivsize = crypto_skcipher_ivsize(skcipher); 1193 int ecode = 0; 1194 1195 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1196 1197 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1198 1199 if (status) 1200 ecode = caam_jr_strstatus(qidev, status); 1201 1202 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1203 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1204 edesc->src_nents > 1 ? 100 : ivsize, 1); 1205 caam_dump_sg("dst @" __stringify(__LINE__)": ", 1206 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1207 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1208 1209 skcipher_unmap(qidev, edesc, req); 1210 1211 /* 1212 * The crypto API expects us to set the IV (req->iv) to the last 1213 * ciphertext block (CBC mode) or last counter (CTR mode). 1214 * This is used e.g. by the CTS mode. 1215 */ 1216 if (!ecode) 1217 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, 1218 ivsize); 1219 1220 qi_cache_free(edesc); 1221 skcipher_request_complete(req, ecode); 1222 } 1223 1224 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1225 bool encrypt) 1226 { 1227 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1228 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1229 struct device *qidev = ctx->qidev; 1230 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1231 GFP_KERNEL : GFP_ATOMIC; 1232 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1233 struct skcipher_edesc *edesc; 1234 dma_addr_t iv_dma; 1235 u8 *iv; 1236 int ivsize = crypto_skcipher_ivsize(skcipher); 1237 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1238 struct qm_sg_entry *sg_table, *fd_sgt; 1239 struct caam_drv_ctx *drv_ctx; 1240 1241 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 1242 if (IS_ERR_OR_NULL(drv_ctx)) 1243 return (struct skcipher_edesc *)drv_ctx; 1244 1245 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1246 if (unlikely(src_nents < 0)) { 1247 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1248 req->cryptlen); 1249 return ERR_PTR(src_nents); 1250 } 1251 1252 if (unlikely(req->src != req->dst)) { 1253 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 1254 if (unlikely(dst_nents < 0)) { 1255 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1256 req->cryptlen); 1257 return ERR_PTR(dst_nents); 1258 } 1259 1260 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1261 DMA_TO_DEVICE); 1262 if (unlikely(!mapped_src_nents)) { 1263 dev_err(qidev, "unable to map source\n"); 1264 return ERR_PTR(-ENOMEM); 1265 } 1266 1267 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1268 DMA_FROM_DEVICE); 1269 if (unlikely(!mapped_dst_nents)) { 1270 dev_err(qidev, "unable to map destination\n"); 1271 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1272 return ERR_PTR(-ENOMEM); 1273 } 1274 } else { 1275 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1276 DMA_BIDIRECTIONAL); 1277 if (unlikely(!mapped_src_nents)) { 1278 dev_err(qidev, "unable to map source\n"); 1279 return ERR_PTR(-ENOMEM); 1280 } 1281 } 1282 1283 qm_sg_ents = 1 + mapped_src_nents; 1284 dst_sg_idx = qm_sg_ents; 1285 1286 /* 1287 * Input, output HW S/G tables: [IV, src][dst, IV] 1288 * IV entries point to the same buffer 1289 * If src == dst, S/G entries are reused (S/G tables overlap) 1290 * 1291 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1292 * the end of the table by allocating more S/G entries. 1293 */ 1294 if (req->src != req->dst) 1295 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); 1296 else 1297 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1298 1299 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1300 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1301 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1302 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1303 qm_sg_ents, ivsize); 1304 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1305 0, DMA_NONE, 0, 0); 1306 return ERR_PTR(-ENOMEM); 1307 } 1308 1309 /* allocate space for base edesc, link tables and IV */ 1310 edesc = qi_cache_alloc(GFP_DMA | flags); 1311 if (unlikely(!edesc)) { 1312 dev_err(qidev, "could not allocate extended descriptor\n"); 1313 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1314 0, DMA_NONE, 0, 0); 1315 return ERR_PTR(-ENOMEM); 1316 } 1317 1318 /* Make sure IV is located in a DMAable area */ 1319 sg_table = &edesc->sgt[0]; 1320 iv = (u8 *)(sg_table + qm_sg_ents); 1321 memcpy(iv, req->iv, ivsize); 1322 1323 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); 1324 if (dma_mapping_error(qidev, iv_dma)) { 1325 dev_err(qidev, "unable to map IV\n"); 1326 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1327 0, DMA_NONE, 0, 0); 1328 qi_cache_free(edesc); 1329 return ERR_PTR(-ENOMEM); 1330 } 1331 1332 edesc->src_nents = src_nents; 1333 edesc->dst_nents = dst_nents; 1334 edesc->iv_dma = iv_dma; 1335 edesc->qm_sg_bytes = qm_sg_bytes; 1336 edesc->drv_req.app_ctx = req; 1337 edesc->drv_req.cbk = skcipher_done; 1338 edesc->drv_req.drv_ctx = drv_ctx; 1339 1340 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1341 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); 1342 1343 if (req->src != req->dst) 1344 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); 1345 1346 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, 1347 ivsize, 0); 1348 1349 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1350 DMA_TO_DEVICE); 1351 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1352 dev_err(qidev, "unable to map S/G table\n"); 1353 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1354 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); 1355 qi_cache_free(edesc); 1356 return ERR_PTR(-ENOMEM); 1357 } 1358 1359 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1360 1361 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1362 ivsize + req->cryptlen, 0); 1363 1364 if (req->src == req->dst) 1365 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1366 sizeof(*sg_table), req->cryptlen + ivsize, 1367 0); 1368 else 1369 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1370 sizeof(*sg_table), req->cryptlen + ivsize, 1371 0); 1372 1373 return edesc; 1374 } 1375 1376 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) 1377 { 1378 struct skcipher_edesc *edesc; 1379 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1380 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1381 int ret; 1382 1383 if (!req->cryptlen) 1384 return 0; 1385 1386 if (unlikely(caam_congested)) 1387 return -EAGAIN; 1388 1389 /* allocate extended descriptor */ 1390 edesc = skcipher_edesc_alloc(req, encrypt); 1391 if (IS_ERR(edesc)) 1392 return PTR_ERR(edesc); 1393 1394 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1395 if (!ret) { 1396 ret = -EINPROGRESS; 1397 } else { 1398 skcipher_unmap(ctx->qidev, edesc, req); 1399 qi_cache_free(edesc); 1400 } 1401 1402 return ret; 1403 } 1404 1405 static int skcipher_encrypt(struct skcipher_request *req) 1406 { 1407 return skcipher_crypt(req, true); 1408 } 1409 1410 static int skcipher_decrypt(struct skcipher_request *req) 1411 { 1412 return skcipher_crypt(req, false); 1413 } 1414 1415 static struct caam_skcipher_alg driver_algs[] = { 1416 { 1417 .skcipher = { 1418 .base = { 1419 .cra_name = "cbc(aes)", 1420 .cra_driver_name = "cbc-aes-caam-qi", 1421 .cra_blocksize = AES_BLOCK_SIZE, 1422 }, 1423 .setkey = aes_skcipher_setkey, 1424 .encrypt = skcipher_encrypt, 1425 .decrypt = skcipher_decrypt, 1426 .min_keysize = AES_MIN_KEY_SIZE, 1427 .max_keysize = AES_MAX_KEY_SIZE, 1428 .ivsize = AES_BLOCK_SIZE, 1429 }, 1430 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1431 }, 1432 { 1433 .skcipher = { 1434 .base = { 1435 .cra_name = "cbc(des3_ede)", 1436 .cra_driver_name = "cbc-3des-caam-qi", 1437 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1438 }, 1439 .setkey = des3_skcipher_setkey, 1440 .encrypt = skcipher_encrypt, 1441 .decrypt = skcipher_decrypt, 1442 .min_keysize = DES3_EDE_KEY_SIZE, 1443 .max_keysize = DES3_EDE_KEY_SIZE, 1444 .ivsize = DES3_EDE_BLOCK_SIZE, 1445 }, 1446 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1447 }, 1448 { 1449 .skcipher = { 1450 .base = { 1451 .cra_name = "cbc(des)", 1452 .cra_driver_name = "cbc-des-caam-qi", 1453 .cra_blocksize = DES_BLOCK_SIZE, 1454 }, 1455 .setkey = des_skcipher_setkey, 1456 .encrypt = skcipher_encrypt, 1457 .decrypt = skcipher_decrypt, 1458 .min_keysize = DES_KEY_SIZE, 1459 .max_keysize = DES_KEY_SIZE, 1460 .ivsize = DES_BLOCK_SIZE, 1461 }, 1462 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1463 }, 1464 { 1465 .skcipher = { 1466 .base = { 1467 .cra_name = "ctr(aes)", 1468 .cra_driver_name = "ctr-aes-caam-qi", 1469 .cra_blocksize = 1, 1470 }, 1471 .setkey = ctr_skcipher_setkey, 1472 .encrypt = skcipher_encrypt, 1473 .decrypt = skcipher_decrypt, 1474 .min_keysize = AES_MIN_KEY_SIZE, 1475 .max_keysize = AES_MAX_KEY_SIZE, 1476 .ivsize = AES_BLOCK_SIZE, 1477 .chunksize = AES_BLOCK_SIZE, 1478 }, 1479 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1480 OP_ALG_AAI_CTR_MOD128, 1481 }, 1482 { 1483 .skcipher = { 1484 .base = { 1485 .cra_name = "rfc3686(ctr(aes))", 1486 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1487 .cra_blocksize = 1, 1488 }, 1489 .setkey = rfc3686_skcipher_setkey, 1490 .encrypt = skcipher_encrypt, 1491 .decrypt = skcipher_decrypt, 1492 .min_keysize = AES_MIN_KEY_SIZE + 1493 CTR_RFC3686_NONCE_SIZE, 1494 .max_keysize = AES_MAX_KEY_SIZE + 1495 CTR_RFC3686_NONCE_SIZE, 1496 .ivsize = CTR_RFC3686_IV_SIZE, 1497 .chunksize = AES_BLOCK_SIZE, 1498 }, 1499 .caam = { 1500 .class1_alg_type = OP_ALG_ALGSEL_AES | 1501 OP_ALG_AAI_CTR_MOD128, 1502 .rfc3686 = true, 1503 }, 1504 }, 1505 { 1506 .skcipher = { 1507 .base = { 1508 .cra_name = "xts(aes)", 1509 .cra_driver_name = "xts-aes-caam-qi", 1510 .cra_blocksize = AES_BLOCK_SIZE, 1511 }, 1512 .setkey = xts_skcipher_setkey, 1513 .encrypt = skcipher_encrypt, 1514 .decrypt = skcipher_decrypt, 1515 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1516 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1517 .ivsize = AES_BLOCK_SIZE, 1518 }, 1519 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1520 }, 1521 }; 1522 1523 static struct caam_aead_alg driver_aeads[] = { 1524 { 1525 .aead = { 1526 .base = { 1527 .cra_name = "rfc4106(gcm(aes))", 1528 .cra_driver_name = "rfc4106-gcm-aes-caam-qi", 1529 .cra_blocksize = 1, 1530 }, 1531 .setkey = rfc4106_setkey, 1532 .setauthsize = rfc4106_setauthsize, 1533 .encrypt = ipsec_gcm_encrypt, 1534 .decrypt = ipsec_gcm_decrypt, 1535 .ivsize = 8, 1536 .maxauthsize = AES_BLOCK_SIZE, 1537 }, 1538 .caam = { 1539 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1540 .nodkp = true, 1541 }, 1542 }, 1543 { 1544 .aead = { 1545 .base = { 1546 .cra_name = "rfc4543(gcm(aes))", 1547 .cra_driver_name = "rfc4543-gcm-aes-caam-qi", 1548 .cra_blocksize = 1, 1549 }, 1550 .setkey = rfc4543_setkey, 1551 .setauthsize = rfc4543_setauthsize, 1552 .encrypt = ipsec_gcm_encrypt, 1553 .decrypt = ipsec_gcm_decrypt, 1554 .ivsize = 8, 1555 .maxauthsize = AES_BLOCK_SIZE, 1556 }, 1557 .caam = { 1558 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1559 .nodkp = true, 1560 }, 1561 }, 1562 /* Galois Counter Mode */ 1563 { 1564 .aead = { 1565 .base = { 1566 .cra_name = "gcm(aes)", 1567 .cra_driver_name = "gcm-aes-caam-qi", 1568 .cra_blocksize = 1, 1569 }, 1570 .setkey = gcm_setkey, 1571 .setauthsize = gcm_setauthsize, 1572 .encrypt = aead_encrypt, 1573 .decrypt = aead_decrypt, 1574 .ivsize = 12, 1575 .maxauthsize = AES_BLOCK_SIZE, 1576 }, 1577 .caam = { 1578 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1579 .nodkp = true, 1580 } 1581 }, 1582 /* single-pass ipsec_esp descriptor */ 1583 { 1584 .aead = { 1585 .base = { 1586 .cra_name = "authenc(hmac(md5),cbc(aes))", 1587 .cra_driver_name = "authenc-hmac-md5-" 1588 "cbc-aes-caam-qi", 1589 .cra_blocksize = AES_BLOCK_SIZE, 1590 }, 1591 .setkey = aead_setkey, 1592 .setauthsize = aead_setauthsize, 1593 .encrypt = aead_encrypt, 1594 .decrypt = aead_decrypt, 1595 .ivsize = AES_BLOCK_SIZE, 1596 .maxauthsize = MD5_DIGEST_SIZE, 1597 }, 1598 .caam = { 1599 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1600 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1601 OP_ALG_AAI_HMAC_PRECOMP, 1602 } 1603 }, 1604 { 1605 .aead = { 1606 .base = { 1607 .cra_name = "echainiv(authenc(hmac(md5)," 1608 "cbc(aes)))", 1609 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1610 "cbc-aes-caam-qi", 1611 .cra_blocksize = AES_BLOCK_SIZE, 1612 }, 1613 .setkey = aead_setkey, 1614 .setauthsize = aead_setauthsize, 1615 .encrypt = aead_encrypt, 1616 .decrypt = aead_decrypt, 1617 .ivsize = AES_BLOCK_SIZE, 1618 .maxauthsize = MD5_DIGEST_SIZE, 1619 }, 1620 .caam = { 1621 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1622 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1623 OP_ALG_AAI_HMAC_PRECOMP, 1624 .geniv = true, 1625 } 1626 }, 1627 { 1628 .aead = { 1629 .base = { 1630 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1631 .cra_driver_name = "authenc-hmac-sha1-" 1632 "cbc-aes-caam-qi", 1633 .cra_blocksize = AES_BLOCK_SIZE, 1634 }, 1635 .setkey = aead_setkey, 1636 .setauthsize = aead_setauthsize, 1637 .encrypt = aead_encrypt, 1638 .decrypt = aead_decrypt, 1639 .ivsize = AES_BLOCK_SIZE, 1640 .maxauthsize = SHA1_DIGEST_SIZE, 1641 }, 1642 .caam = { 1643 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1644 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1645 OP_ALG_AAI_HMAC_PRECOMP, 1646 } 1647 }, 1648 { 1649 .aead = { 1650 .base = { 1651 .cra_name = "echainiv(authenc(hmac(sha1)," 1652 "cbc(aes)))", 1653 .cra_driver_name = "echainiv-authenc-" 1654 "hmac-sha1-cbc-aes-caam-qi", 1655 .cra_blocksize = AES_BLOCK_SIZE, 1656 }, 1657 .setkey = aead_setkey, 1658 .setauthsize = aead_setauthsize, 1659 .encrypt = aead_encrypt, 1660 .decrypt = aead_decrypt, 1661 .ivsize = AES_BLOCK_SIZE, 1662 .maxauthsize = SHA1_DIGEST_SIZE, 1663 }, 1664 .caam = { 1665 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1666 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1667 OP_ALG_AAI_HMAC_PRECOMP, 1668 .geniv = true, 1669 }, 1670 }, 1671 { 1672 .aead = { 1673 .base = { 1674 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1675 .cra_driver_name = "authenc-hmac-sha224-" 1676 "cbc-aes-caam-qi", 1677 .cra_blocksize = AES_BLOCK_SIZE, 1678 }, 1679 .setkey = aead_setkey, 1680 .setauthsize = aead_setauthsize, 1681 .encrypt = aead_encrypt, 1682 .decrypt = aead_decrypt, 1683 .ivsize = AES_BLOCK_SIZE, 1684 .maxauthsize = SHA224_DIGEST_SIZE, 1685 }, 1686 .caam = { 1687 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1688 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1689 OP_ALG_AAI_HMAC_PRECOMP, 1690 } 1691 }, 1692 { 1693 .aead = { 1694 .base = { 1695 .cra_name = "echainiv(authenc(hmac(sha224)," 1696 "cbc(aes)))", 1697 .cra_driver_name = "echainiv-authenc-" 1698 "hmac-sha224-cbc-aes-caam-qi", 1699 .cra_blocksize = AES_BLOCK_SIZE, 1700 }, 1701 .setkey = aead_setkey, 1702 .setauthsize = aead_setauthsize, 1703 .encrypt = aead_encrypt, 1704 .decrypt = aead_decrypt, 1705 .ivsize = AES_BLOCK_SIZE, 1706 .maxauthsize = SHA224_DIGEST_SIZE, 1707 }, 1708 .caam = { 1709 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1710 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1711 OP_ALG_AAI_HMAC_PRECOMP, 1712 .geniv = true, 1713 } 1714 }, 1715 { 1716 .aead = { 1717 .base = { 1718 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1719 .cra_driver_name = "authenc-hmac-sha256-" 1720 "cbc-aes-caam-qi", 1721 .cra_blocksize = AES_BLOCK_SIZE, 1722 }, 1723 .setkey = aead_setkey, 1724 .setauthsize = aead_setauthsize, 1725 .encrypt = aead_encrypt, 1726 .decrypt = aead_decrypt, 1727 .ivsize = AES_BLOCK_SIZE, 1728 .maxauthsize = SHA256_DIGEST_SIZE, 1729 }, 1730 .caam = { 1731 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1732 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1733 OP_ALG_AAI_HMAC_PRECOMP, 1734 } 1735 }, 1736 { 1737 .aead = { 1738 .base = { 1739 .cra_name = "echainiv(authenc(hmac(sha256)," 1740 "cbc(aes)))", 1741 .cra_driver_name = "echainiv-authenc-" 1742 "hmac-sha256-cbc-aes-" 1743 "caam-qi", 1744 .cra_blocksize = AES_BLOCK_SIZE, 1745 }, 1746 .setkey = aead_setkey, 1747 .setauthsize = aead_setauthsize, 1748 .encrypt = aead_encrypt, 1749 .decrypt = aead_decrypt, 1750 .ivsize = AES_BLOCK_SIZE, 1751 .maxauthsize = SHA256_DIGEST_SIZE, 1752 }, 1753 .caam = { 1754 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1755 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1756 OP_ALG_AAI_HMAC_PRECOMP, 1757 .geniv = true, 1758 } 1759 }, 1760 { 1761 .aead = { 1762 .base = { 1763 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1764 .cra_driver_name = "authenc-hmac-sha384-" 1765 "cbc-aes-caam-qi", 1766 .cra_blocksize = AES_BLOCK_SIZE, 1767 }, 1768 .setkey = aead_setkey, 1769 .setauthsize = aead_setauthsize, 1770 .encrypt = aead_encrypt, 1771 .decrypt = aead_decrypt, 1772 .ivsize = AES_BLOCK_SIZE, 1773 .maxauthsize = SHA384_DIGEST_SIZE, 1774 }, 1775 .caam = { 1776 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1777 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1778 OP_ALG_AAI_HMAC_PRECOMP, 1779 } 1780 }, 1781 { 1782 .aead = { 1783 .base = { 1784 .cra_name = "echainiv(authenc(hmac(sha384)," 1785 "cbc(aes)))", 1786 .cra_driver_name = "echainiv-authenc-" 1787 "hmac-sha384-cbc-aes-" 1788 "caam-qi", 1789 .cra_blocksize = AES_BLOCK_SIZE, 1790 }, 1791 .setkey = aead_setkey, 1792 .setauthsize = aead_setauthsize, 1793 .encrypt = aead_encrypt, 1794 .decrypt = aead_decrypt, 1795 .ivsize = AES_BLOCK_SIZE, 1796 .maxauthsize = SHA384_DIGEST_SIZE, 1797 }, 1798 .caam = { 1799 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1800 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1801 OP_ALG_AAI_HMAC_PRECOMP, 1802 .geniv = true, 1803 } 1804 }, 1805 { 1806 .aead = { 1807 .base = { 1808 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1809 .cra_driver_name = "authenc-hmac-sha512-" 1810 "cbc-aes-caam-qi", 1811 .cra_blocksize = AES_BLOCK_SIZE, 1812 }, 1813 .setkey = aead_setkey, 1814 .setauthsize = aead_setauthsize, 1815 .encrypt = aead_encrypt, 1816 .decrypt = aead_decrypt, 1817 .ivsize = AES_BLOCK_SIZE, 1818 .maxauthsize = SHA512_DIGEST_SIZE, 1819 }, 1820 .caam = { 1821 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1822 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1823 OP_ALG_AAI_HMAC_PRECOMP, 1824 } 1825 }, 1826 { 1827 .aead = { 1828 .base = { 1829 .cra_name = "echainiv(authenc(hmac(sha512)," 1830 "cbc(aes)))", 1831 .cra_driver_name = "echainiv-authenc-" 1832 "hmac-sha512-cbc-aes-" 1833 "caam-qi", 1834 .cra_blocksize = AES_BLOCK_SIZE, 1835 }, 1836 .setkey = aead_setkey, 1837 .setauthsize = aead_setauthsize, 1838 .encrypt = aead_encrypt, 1839 .decrypt = aead_decrypt, 1840 .ivsize = AES_BLOCK_SIZE, 1841 .maxauthsize = SHA512_DIGEST_SIZE, 1842 }, 1843 .caam = { 1844 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1845 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1846 OP_ALG_AAI_HMAC_PRECOMP, 1847 .geniv = true, 1848 } 1849 }, 1850 { 1851 .aead = { 1852 .base = { 1853 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1854 .cra_driver_name = "authenc-hmac-md5-" 1855 "cbc-des3_ede-caam-qi", 1856 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1857 }, 1858 .setkey = des3_aead_setkey, 1859 .setauthsize = aead_setauthsize, 1860 .encrypt = aead_encrypt, 1861 .decrypt = aead_decrypt, 1862 .ivsize = DES3_EDE_BLOCK_SIZE, 1863 .maxauthsize = MD5_DIGEST_SIZE, 1864 }, 1865 .caam = { 1866 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1867 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1868 OP_ALG_AAI_HMAC_PRECOMP, 1869 } 1870 }, 1871 { 1872 .aead = { 1873 .base = { 1874 .cra_name = "echainiv(authenc(hmac(md5)," 1875 "cbc(des3_ede)))", 1876 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1877 "cbc-des3_ede-caam-qi", 1878 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1879 }, 1880 .setkey = des3_aead_setkey, 1881 .setauthsize = aead_setauthsize, 1882 .encrypt = aead_encrypt, 1883 .decrypt = aead_decrypt, 1884 .ivsize = DES3_EDE_BLOCK_SIZE, 1885 .maxauthsize = MD5_DIGEST_SIZE, 1886 }, 1887 .caam = { 1888 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1889 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1890 OP_ALG_AAI_HMAC_PRECOMP, 1891 .geniv = true, 1892 } 1893 }, 1894 { 1895 .aead = { 1896 .base = { 1897 .cra_name = "authenc(hmac(sha1)," 1898 "cbc(des3_ede))", 1899 .cra_driver_name = "authenc-hmac-sha1-" 1900 "cbc-des3_ede-caam-qi", 1901 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1902 }, 1903 .setkey = des3_aead_setkey, 1904 .setauthsize = aead_setauthsize, 1905 .encrypt = aead_encrypt, 1906 .decrypt = aead_decrypt, 1907 .ivsize = DES3_EDE_BLOCK_SIZE, 1908 .maxauthsize = SHA1_DIGEST_SIZE, 1909 }, 1910 .caam = { 1911 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1912 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1913 OP_ALG_AAI_HMAC_PRECOMP, 1914 }, 1915 }, 1916 { 1917 .aead = { 1918 .base = { 1919 .cra_name = "echainiv(authenc(hmac(sha1)," 1920 "cbc(des3_ede)))", 1921 .cra_driver_name = "echainiv-authenc-" 1922 "hmac-sha1-" 1923 "cbc-des3_ede-caam-qi", 1924 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1925 }, 1926 .setkey = des3_aead_setkey, 1927 .setauthsize = aead_setauthsize, 1928 .encrypt = aead_encrypt, 1929 .decrypt = aead_decrypt, 1930 .ivsize = DES3_EDE_BLOCK_SIZE, 1931 .maxauthsize = SHA1_DIGEST_SIZE, 1932 }, 1933 .caam = { 1934 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1935 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1936 OP_ALG_AAI_HMAC_PRECOMP, 1937 .geniv = true, 1938 } 1939 }, 1940 { 1941 .aead = { 1942 .base = { 1943 .cra_name = "authenc(hmac(sha224)," 1944 "cbc(des3_ede))", 1945 .cra_driver_name = "authenc-hmac-sha224-" 1946 "cbc-des3_ede-caam-qi", 1947 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1948 }, 1949 .setkey = des3_aead_setkey, 1950 .setauthsize = aead_setauthsize, 1951 .encrypt = aead_encrypt, 1952 .decrypt = aead_decrypt, 1953 .ivsize = DES3_EDE_BLOCK_SIZE, 1954 .maxauthsize = SHA224_DIGEST_SIZE, 1955 }, 1956 .caam = { 1957 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1958 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1959 OP_ALG_AAI_HMAC_PRECOMP, 1960 }, 1961 }, 1962 { 1963 .aead = { 1964 .base = { 1965 .cra_name = "echainiv(authenc(hmac(sha224)," 1966 "cbc(des3_ede)))", 1967 .cra_driver_name = "echainiv-authenc-" 1968 "hmac-sha224-" 1969 "cbc-des3_ede-caam-qi", 1970 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1971 }, 1972 .setkey = des3_aead_setkey, 1973 .setauthsize = aead_setauthsize, 1974 .encrypt = aead_encrypt, 1975 .decrypt = aead_decrypt, 1976 .ivsize = DES3_EDE_BLOCK_SIZE, 1977 .maxauthsize = SHA224_DIGEST_SIZE, 1978 }, 1979 .caam = { 1980 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1981 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1982 OP_ALG_AAI_HMAC_PRECOMP, 1983 .geniv = true, 1984 } 1985 }, 1986 { 1987 .aead = { 1988 .base = { 1989 .cra_name = "authenc(hmac(sha256)," 1990 "cbc(des3_ede))", 1991 .cra_driver_name = "authenc-hmac-sha256-" 1992 "cbc-des3_ede-caam-qi", 1993 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1994 }, 1995 .setkey = des3_aead_setkey, 1996 .setauthsize = aead_setauthsize, 1997 .encrypt = aead_encrypt, 1998 .decrypt = aead_decrypt, 1999 .ivsize = DES3_EDE_BLOCK_SIZE, 2000 .maxauthsize = SHA256_DIGEST_SIZE, 2001 }, 2002 .caam = { 2003 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2004 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2005 OP_ALG_AAI_HMAC_PRECOMP, 2006 }, 2007 }, 2008 { 2009 .aead = { 2010 .base = { 2011 .cra_name = "echainiv(authenc(hmac(sha256)," 2012 "cbc(des3_ede)))", 2013 .cra_driver_name = "echainiv-authenc-" 2014 "hmac-sha256-" 2015 "cbc-des3_ede-caam-qi", 2016 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2017 }, 2018 .setkey = des3_aead_setkey, 2019 .setauthsize = aead_setauthsize, 2020 .encrypt = aead_encrypt, 2021 .decrypt = aead_decrypt, 2022 .ivsize = DES3_EDE_BLOCK_SIZE, 2023 .maxauthsize = SHA256_DIGEST_SIZE, 2024 }, 2025 .caam = { 2026 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2027 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2028 OP_ALG_AAI_HMAC_PRECOMP, 2029 .geniv = true, 2030 } 2031 }, 2032 { 2033 .aead = { 2034 .base = { 2035 .cra_name = "authenc(hmac(sha384)," 2036 "cbc(des3_ede))", 2037 .cra_driver_name = "authenc-hmac-sha384-" 2038 "cbc-des3_ede-caam-qi", 2039 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2040 }, 2041 .setkey = des3_aead_setkey, 2042 .setauthsize = aead_setauthsize, 2043 .encrypt = aead_encrypt, 2044 .decrypt = aead_decrypt, 2045 .ivsize = DES3_EDE_BLOCK_SIZE, 2046 .maxauthsize = SHA384_DIGEST_SIZE, 2047 }, 2048 .caam = { 2049 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2050 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2051 OP_ALG_AAI_HMAC_PRECOMP, 2052 }, 2053 }, 2054 { 2055 .aead = { 2056 .base = { 2057 .cra_name = "echainiv(authenc(hmac(sha384)," 2058 "cbc(des3_ede)))", 2059 .cra_driver_name = "echainiv-authenc-" 2060 "hmac-sha384-" 2061 "cbc-des3_ede-caam-qi", 2062 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2063 }, 2064 .setkey = des3_aead_setkey, 2065 .setauthsize = aead_setauthsize, 2066 .encrypt = aead_encrypt, 2067 .decrypt = aead_decrypt, 2068 .ivsize = DES3_EDE_BLOCK_SIZE, 2069 .maxauthsize = SHA384_DIGEST_SIZE, 2070 }, 2071 .caam = { 2072 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2073 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2074 OP_ALG_AAI_HMAC_PRECOMP, 2075 .geniv = true, 2076 } 2077 }, 2078 { 2079 .aead = { 2080 .base = { 2081 .cra_name = "authenc(hmac(sha512)," 2082 "cbc(des3_ede))", 2083 .cra_driver_name = "authenc-hmac-sha512-" 2084 "cbc-des3_ede-caam-qi", 2085 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2086 }, 2087 .setkey = des3_aead_setkey, 2088 .setauthsize = aead_setauthsize, 2089 .encrypt = aead_encrypt, 2090 .decrypt = aead_decrypt, 2091 .ivsize = DES3_EDE_BLOCK_SIZE, 2092 .maxauthsize = SHA512_DIGEST_SIZE, 2093 }, 2094 .caam = { 2095 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2096 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2097 OP_ALG_AAI_HMAC_PRECOMP, 2098 }, 2099 }, 2100 { 2101 .aead = { 2102 .base = { 2103 .cra_name = "echainiv(authenc(hmac(sha512)," 2104 "cbc(des3_ede)))", 2105 .cra_driver_name = "echainiv-authenc-" 2106 "hmac-sha512-" 2107 "cbc-des3_ede-caam-qi", 2108 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2109 }, 2110 .setkey = des3_aead_setkey, 2111 .setauthsize = aead_setauthsize, 2112 .encrypt = aead_encrypt, 2113 .decrypt = aead_decrypt, 2114 .ivsize = DES3_EDE_BLOCK_SIZE, 2115 .maxauthsize = SHA512_DIGEST_SIZE, 2116 }, 2117 .caam = { 2118 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2119 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2120 OP_ALG_AAI_HMAC_PRECOMP, 2121 .geniv = true, 2122 } 2123 }, 2124 { 2125 .aead = { 2126 .base = { 2127 .cra_name = "authenc(hmac(md5),cbc(des))", 2128 .cra_driver_name = "authenc-hmac-md5-" 2129 "cbc-des-caam-qi", 2130 .cra_blocksize = DES_BLOCK_SIZE, 2131 }, 2132 .setkey = aead_setkey, 2133 .setauthsize = aead_setauthsize, 2134 .encrypt = aead_encrypt, 2135 .decrypt = aead_decrypt, 2136 .ivsize = DES_BLOCK_SIZE, 2137 .maxauthsize = MD5_DIGEST_SIZE, 2138 }, 2139 .caam = { 2140 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2141 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2142 OP_ALG_AAI_HMAC_PRECOMP, 2143 }, 2144 }, 2145 { 2146 .aead = { 2147 .base = { 2148 .cra_name = "echainiv(authenc(hmac(md5)," 2149 "cbc(des)))", 2150 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2151 "cbc-des-caam-qi", 2152 .cra_blocksize = DES_BLOCK_SIZE, 2153 }, 2154 .setkey = aead_setkey, 2155 .setauthsize = aead_setauthsize, 2156 .encrypt = aead_encrypt, 2157 .decrypt = aead_decrypt, 2158 .ivsize = DES_BLOCK_SIZE, 2159 .maxauthsize = MD5_DIGEST_SIZE, 2160 }, 2161 .caam = { 2162 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2163 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2164 OP_ALG_AAI_HMAC_PRECOMP, 2165 .geniv = true, 2166 } 2167 }, 2168 { 2169 .aead = { 2170 .base = { 2171 .cra_name = "authenc(hmac(sha1),cbc(des))", 2172 .cra_driver_name = "authenc-hmac-sha1-" 2173 "cbc-des-caam-qi", 2174 .cra_blocksize = DES_BLOCK_SIZE, 2175 }, 2176 .setkey = aead_setkey, 2177 .setauthsize = aead_setauthsize, 2178 .encrypt = aead_encrypt, 2179 .decrypt = aead_decrypt, 2180 .ivsize = DES_BLOCK_SIZE, 2181 .maxauthsize = SHA1_DIGEST_SIZE, 2182 }, 2183 .caam = { 2184 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2185 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2186 OP_ALG_AAI_HMAC_PRECOMP, 2187 }, 2188 }, 2189 { 2190 .aead = { 2191 .base = { 2192 .cra_name = "echainiv(authenc(hmac(sha1)," 2193 "cbc(des)))", 2194 .cra_driver_name = "echainiv-authenc-" 2195 "hmac-sha1-cbc-des-caam-qi", 2196 .cra_blocksize = DES_BLOCK_SIZE, 2197 }, 2198 .setkey = aead_setkey, 2199 .setauthsize = aead_setauthsize, 2200 .encrypt = aead_encrypt, 2201 .decrypt = aead_decrypt, 2202 .ivsize = DES_BLOCK_SIZE, 2203 .maxauthsize = SHA1_DIGEST_SIZE, 2204 }, 2205 .caam = { 2206 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2207 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2208 OP_ALG_AAI_HMAC_PRECOMP, 2209 .geniv = true, 2210 } 2211 }, 2212 { 2213 .aead = { 2214 .base = { 2215 .cra_name = "authenc(hmac(sha224),cbc(des))", 2216 .cra_driver_name = "authenc-hmac-sha224-" 2217 "cbc-des-caam-qi", 2218 .cra_blocksize = DES_BLOCK_SIZE, 2219 }, 2220 .setkey = aead_setkey, 2221 .setauthsize = aead_setauthsize, 2222 .encrypt = aead_encrypt, 2223 .decrypt = aead_decrypt, 2224 .ivsize = DES_BLOCK_SIZE, 2225 .maxauthsize = SHA224_DIGEST_SIZE, 2226 }, 2227 .caam = { 2228 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2229 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2230 OP_ALG_AAI_HMAC_PRECOMP, 2231 }, 2232 }, 2233 { 2234 .aead = { 2235 .base = { 2236 .cra_name = "echainiv(authenc(hmac(sha224)," 2237 "cbc(des)))", 2238 .cra_driver_name = "echainiv-authenc-" 2239 "hmac-sha224-cbc-des-" 2240 "caam-qi", 2241 .cra_blocksize = DES_BLOCK_SIZE, 2242 }, 2243 .setkey = aead_setkey, 2244 .setauthsize = aead_setauthsize, 2245 .encrypt = aead_encrypt, 2246 .decrypt = aead_decrypt, 2247 .ivsize = DES_BLOCK_SIZE, 2248 .maxauthsize = SHA224_DIGEST_SIZE, 2249 }, 2250 .caam = { 2251 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2252 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2253 OP_ALG_AAI_HMAC_PRECOMP, 2254 .geniv = true, 2255 } 2256 }, 2257 { 2258 .aead = { 2259 .base = { 2260 .cra_name = "authenc(hmac(sha256),cbc(des))", 2261 .cra_driver_name = "authenc-hmac-sha256-" 2262 "cbc-des-caam-qi", 2263 .cra_blocksize = DES_BLOCK_SIZE, 2264 }, 2265 .setkey = aead_setkey, 2266 .setauthsize = aead_setauthsize, 2267 .encrypt = aead_encrypt, 2268 .decrypt = aead_decrypt, 2269 .ivsize = DES_BLOCK_SIZE, 2270 .maxauthsize = SHA256_DIGEST_SIZE, 2271 }, 2272 .caam = { 2273 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2274 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2275 OP_ALG_AAI_HMAC_PRECOMP, 2276 }, 2277 }, 2278 { 2279 .aead = { 2280 .base = { 2281 .cra_name = "echainiv(authenc(hmac(sha256)," 2282 "cbc(des)))", 2283 .cra_driver_name = "echainiv-authenc-" 2284 "hmac-sha256-cbc-des-" 2285 "caam-qi", 2286 .cra_blocksize = DES_BLOCK_SIZE, 2287 }, 2288 .setkey = aead_setkey, 2289 .setauthsize = aead_setauthsize, 2290 .encrypt = aead_encrypt, 2291 .decrypt = aead_decrypt, 2292 .ivsize = DES_BLOCK_SIZE, 2293 .maxauthsize = SHA256_DIGEST_SIZE, 2294 }, 2295 .caam = { 2296 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2297 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2298 OP_ALG_AAI_HMAC_PRECOMP, 2299 .geniv = true, 2300 }, 2301 }, 2302 { 2303 .aead = { 2304 .base = { 2305 .cra_name = "authenc(hmac(sha384),cbc(des))", 2306 .cra_driver_name = "authenc-hmac-sha384-" 2307 "cbc-des-caam-qi", 2308 .cra_blocksize = DES_BLOCK_SIZE, 2309 }, 2310 .setkey = aead_setkey, 2311 .setauthsize = aead_setauthsize, 2312 .encrypt = aead_encrypt, 2313 .decrypt = aead_decrypt, 2314 .ivsize = DES_BLOCK_SIZE, 2315 .maxauthsize = SHA384_DIGEST_SIZE, 2316 }, 2317 .caam = { 2318 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2319 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2320 OP_ALG_AAI_HMAC_PRECOMP, 2321 }, 2322 }, 2323 { 2324 .aead = { 2325 .base = { 2326 .cra_name = "echainiv(authenc(hmac(sha384)," 2327 "cbc(des)))", 2328 .cra_driver_name = "echainiv-authenc-" 2329 "hmac-sha384-cbc-des-" 2330 "caam-qi", 2331 .cra_blocksize = DES_BLOCK_SIZE, 2332 }, 2333 .setkey = aead_setkey, 2334 .setauthsize = aead_setauthsize, 2335 .encrypt = aead_encrypt, 2336 .decrypt = aead_decrypt, 2337 .ivsize = DES_BLOCK_SIZE, 2338 .maxauthsize = SHA384_DIGEST_SIZE, 2339 }, 2340 .caam = { 2341 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2342 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2343 OP_ALG_AAI_HMAC_PRECOMP, 2344 .geniv = true, 2345 } 2346 }, 2347 { 2348 .aead = { 2349 .base = { 2350 .cra_name = "authenc(hmac(sha512),cbc(des))", 2351 .cra_driver_name = "authenc-hmac-sha512-" 2352 "cbc-des-caam-qi", 2353 .cra_blocksize = DES_BLOCK_SIZE, 2354 }, 2355 .setkey = aead_setkey, 2356 .setauthsize = aead_setauthsize, 2357 .encrypt = aead_encrypt, 2358 .decrypt = aead_decrypt, 2359 .ivsize = DES_BLOCK_SIZE, 2360 .maxauthsize = SHA512_DIGEST_SIZE, 2361 }, 2362 .caam = { 2363 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2364 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2365 OP_ALG_AAI_HMAC_PRECOMP, 2366 } 2367 }, 2368 { 2369 .aead = { 2370 .base = { 2371 .cra_name = "echainiv(authenc(hmac(sha512)," 2372 "cbc(des)))", 2373 .cra_driver_name = "echainiv-authenc-" 2374 "hmac-sha512-cbc-des-" 2375 "caam-qi", 2376 .cra_blocksize = DES_BLOCK_SIZE, 2377 }, 2378 .setkey = aead_setkey, 2379 .setauthsize = aead_setauthsize, 2380 .encrypt = aead_encrypt, 2381 .decrypt = aead_decrypt, 2382 .ivsize = DES_BLOCK_SIZE, 2383 .maxauthsize = SHA512_DIGEST_SIZE, 2384 }, 2385 .caam = { 2386 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2387 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2388 OP_ALG_AAI_HMAC_PRECOMP, 2389 .geniv = true, 2390 } 2391 }, 2392 }; 2393 2394 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2395 bool uses_dkp) 2396 { 2397 struct caam_drv_private *priv; 2398 struct device *dev; 2399 2400 /* 2401 * distribute tfms across job rings to ensure in-order 2402 * crypto request processing per tfm 2403 */ 2404 ctx->jrdev = caam_jr_alloc(); 2405 if (IS_ERR(ctx->jrdev)) { 2406 pr_err("Job Ring Device allocation for transform failed\n"); 2407 return PTR_ERR(ctx->jrdev); 2408 } 2409 2410 dev = ctx->jrdev->parent; 2411 priv = dev_get_drvdata(dev); 2412 if (priv->era >= 6 && uses_dkp) 2413 ctx->dir = DMA_BIDIRECTIONAL; 2414 else 2415 ctx->dir = DMA_TO_DEVICE; 2416 2417 ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), 2418 ctx->dir); 2419 if (dma_mapping_error(dev, ctx->key_dma)) { 2420 dev_err(dev, "unable to map key\n"); 2421 caam_jr_free(ctx->jrdev); 2422 return -ENOMEM; 2423 } 2424 2425 /* copy descriptor header template value */ 2426 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2427 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2428 2429 ctx->qidev = dev; 2430 2431 spin_lock_init(&ctx->lock); 2432 ctx->drv_ctx[ENCRYPT] = NULL; 2433 ctx->drv_ctx[DECRYPT] = NULL; 2434 2435 return 0; 2436 } 2437 2438 static int caam_cra_init(struct crypto_skcipher *tfm) 2439 { 2440 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 2441 struct caam_skcipher_alg *caam_alg = 2442 container_of(alg, typeof(*caam_alg), skcipher); 2443 2444 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, 2445 false); 2446 } 2447 2448 static int caam_aead_init(struct crypto_aead *tfm) 2449 { 2450 struct aead_alg *alg = crypto_aead_alg(tfm); 2451 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2452 aead); 2453 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2454 2455 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); 2456 } 2457 2458 static void caam_exit_common(struct caam_ctx *ctx) 2459 { 2460 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2461 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2462 2463 dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), 2464 ctx->dir); 2465 2466 caam_jr_free(ctx->jrdev); 2467 } 2468 2469 static void caam_cra_exit(struct crypto_skcipher *tfm) 2470 { 2471 caam_exit_common(crypto_skcipher_ctx(tfm)); 2472 } 2473 2474 static void caam_aead_exit(struct crypto_aead *tfm) 2475 { 2476 caam_exit_common(crypto_aead_ctx(tfm)); 2477 } 2478 2479 void caam_qi_algapi_exit(void) 2480 { 2481 int i; 2482 2483 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2484 struct caam_aead_alg *t_alg = driver_aeads + i; 2485 2486 if (t_alg->registered) 2487 crypto_unregister_aead(&t_alg->aead); 2488 } 2489 2490 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2491 struct caam_skcipher_alg *t_alg = driver_algs + i; 2492 2493 if (t_alg->registered) 2494 crypto_unregister_skcipher(&t_alg->skcipher); 2495 } 2496 } 2497 2498 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) 2499 { 2500 struct skcipher_alg *alg = &t_alg->skcipher; 2501 2502 alg->base.cra_module = THIS_MODULE; 2503 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2504 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2505 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 2506 CRYPTO_ALG_KERN_DRIVER_ONLY; 2507 2508 alg->init = caam_cra_init; 2509 alg->exit = caam_cra_exit; 2510 } 2511 2512 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2513 { 2514 struct aead_alg *alg = &t_alg->aead; 2515 2516 alg->base.cra_module = THIS_MODULE; 2517 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2518 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2519 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 2520 CRYPTO_ALG_KERN_DRIVER_ONLY; 2521 2522 alg->init = caam_aead_init; 2523 alg->exit = caam_aead_exit; 2524 } 2525 2526 int caam_qi_algapi_init(struct device *ctrldev) 2527 { 2528 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 2529 int i = 0, err = 0; 2530 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2531 unsigned int md_limit = SHA512_DIGEST_SIZE; 2532 bool registered = false; 2533 2534 /* Make sure this runs only on (DPAA 1.x) QI */ 2535 if (!priv->qi_present || caam_dpaa2) 2536 return 0; 2537 2538 /* 2539 * Register crypto algorithms the device supports. 2540 * First, detect presence and attributes of DES, AES, and MD blocks. 2541 */ 2542 if (priv->era < 10) { 2543 u32 cha_vid, cha_inst; 2544 2545 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2546 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 2547 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2548 2549 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2550 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> 2551 CHA_ID_LS_DES_SHIFT; 2552 aes_inst = cha_inst & CHA_ID_LS_AES_MASK; 2553 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2554 } else { 2555 u32 aesa, mdha; 2556 2557 aesa = rd_reg32(&priv->ctrl->vreg.aesa); 2558 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2559 2560 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2561 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2562 2563 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; 2564 aes_inst = aesa & CHA_VER_NUM_MASK; 2565 md_inst = mdha & CHA_VER_NUM_MASK; 2566 } 2567 2568 /* If MD is present, limit digest size based on LP256 */ 2569 if (md_inst && md_vid == CHA_VER_VID_MD_LP256) 2570 md_limit = SHA256_DIGEST_SIZE; 2571 2572 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2573 struct caam_skcipher_alg *t_alg = driver_algs + i; 2574 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; 2575 2576 /* Skip DES algorithms if not supported by device */ 2577 if (!des_inst && 2578 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2579 (alg_sel == OP_ALG_ALGSEL_DES))) 2580 continue; 2581 2582 /* Skip AES algorithms if not supported by device */ 2583 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2584 continue; 2585 2586 caam_skcipher_alg_init(t_alg); 2587 2588 err = crypto_register_skcipher(&t_alg->skcipher); 2589 if (err) { 2590 dev_warn(ctrldev, "%s alg registration failed\n", 2591 t_alg->skcipher.base.cra_driver_name); 2592 continue; 2593 } 2594 2595 t_alg->registered = true; 2596 registered = true; 2597 } 2598 2599 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2600 struct caam_aead_alg *t_alg = driver_aeads + i; 2601 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2602 OP_ALG_ALGSEL_MASK; 2603 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2604 OP_ALG_ALGSEL_MASK; 2605 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2606 2607 /* Skip DES algorithms if not supported by device */ 2608 if (!des_inst && 2609 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2610 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2611 continue; 2612 2613 /* Skip AES algorithms if not supported by device */ 2614 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2615 continue; 2616 2617 /* 2618 * Check support for AES algorithms not available 2619 * on LP devices. 2620 */ 2621 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) 2622 continue; 2623 2624 /* 2625 * Skip algorithms requiring message digests 2626 * if MD or MD size is not supported by device. 2627 */ 2628 if (c2_alg_sel && 2629 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2630 continue; 2631 2632 caam_aead_alg_init(t_alg); 2633 2634 err = crypto_register_aead(&t_alg->aead); 2635 if (err) { 2636 pr_warn("%s alg registration failed\n", 2637 t_alg->aead.base.cra_driver_name); 2638 continue; 2639 } 2640 2641 t_alg->registered = true; 2642 registered = true; 2643 } 2644 2645 if (registered) 2646 dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); 2647 2648 return err; 2649 } 2650