1 // SPDX-License-Identifier: GPL-2.0-only 2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 /* 4 * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine. 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/module.h> 10 #include <linux/of_device.h> 11 #include <linux/platform_device.h> 12 13 #include <crypto/aead.h> 14 #include <crypto/aes.h> 15 #include <crypto/engine.h> 16 #include <crypto/gcm.h> 17 #include <crypto/scatterwalk.h> 18 #include <crypto/xts.h> 19 #include <crypto/internal/aead.h> 20 #include <crypto/internal/hash.h> 21 #include <crypto/internal/skcipher.h> 22 23 #include "tegra-se.h" 24 25 struct tegra_aes_ctx { 26 struct tegra_se *se; 27 u32 alg; 28 u32 ivsize; 29 u32 key1_id; 30 u32 key2_id; 31 }; 32 33 struct tegra_aes_reqctx { 34 struct tegra_se_datbuf datbuf; 35 bool encrypt; 36 u32 config; 37 u32 crypto_config; 38 u32 len; 39 u32 *iv; 40 }; 41 42 struct tegra_aead_ctx { 43 struct tegra_se *se; 44 unsigned int authsize; 45 u32 alg; 46 u32 keylen; 47 u32 key_id; 48 }; 49 50 struct tegra_aead_reqctx { 51 struct tegra_se_datbuf inbuf; 52 struct tegra_se_datbuf outbuf; 53 struct scatterlist *src_sg; 54 struct scatterlist *dst_sg; 55 unsigned int assoclen; 56 unsigned int cryptlen; 57 unsigned int authsize; 58 bool encrypt; 59 u32 config; 60 u32 crypto_config; 61 u32 key_id; 62 u32 iv[4]; 63 u8 authdata[16]; 64 }; 65 66 struct tegra_cmac_ctx { 67 struct tegra_se *se; 68 unsigned int alg; 69 u32 key_id; 70 struct crypto_shash *fallback_tfm; 71 }; 72 73 struct tegra_cmac_reqctx { 74 struct scatterlist *src_sg; 75 struct tegra_se_datbuf datbuf; 76 struct tegra_se_datbuf residue; 77 unsigned int total_len; 78 unsigned int blk_size; 79 unsigned int task; 80 u32 crypto_config; 81 u32 config; 82 u32 key_id; 83 u32 *iv; 84 u32 result[CMAC_RESULT_REG_COUNT]; 85 }; 86 87 /* increment counter (128-bit int) */ 88 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) 89 { 90 do { 91 --bits; 92 nums += counter[bits]; 93 counter[bits] = nums & 0xff; 94 nums >>= 8; 95 } while (bits && nums); 96 } 97 98 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx) 99 { 100 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); 101 unsigned int offset; 102 103 offset = req->cryptlen - ctx->ivsize; 104 105 if (rctx->encrypt) 106 memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize); 107 else 108 scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0); 109 } 110 111 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx) 112 { 113 int num; 114 115 if (ctx->alg == SE_ALG_CBC) { 116 tegra_cbc_iv_copyback(req, ctx); 117 } else if (ctx->alg == SE_ALG_CTR) { 118 num = req->cryptlen / ctx->ivsize; 119 if (req->cryptlen % ctx->ivsize) 120 num++; 121 122 ctr_iv_inc(req->iv, ctx->ivsize, num); 123 } 124 } 125 126 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt) 127 { 128 switch (alg) { 129 case SE_ALG_CMAC: 130 case SE_ALG_GMAC: 131 case SE_ALG_GCM: 132 case SE_ALG_GCM_FINAL: 133 return 0; 134 case SE_ALG_CBC: 135 if (encrypt) 136 return SE_CRYPTO_CFG_CBC_ENCRYPT; 137 else 138 return SE_CRYPTO_CFG_CBC_DECRYPT; 139 case SE_ALG_ECB: 140 if (encrypt) 141 return SE_CRYPTO_CFG_ECB_ENCRYPT; 142 else 143 return SE_CRYPTO_CFG_ECB_DECRYPT; 144 case SE_ALG_XTS: 145 if (encrypt) 146 return SE_CRYPTO_CFG_XTS_ENCRYPT; 147 else 148 return SE_CRYPTO_CFG_XTS_DECRYPT; 149 150 case SE_ALG_CTR: 151 return SE_CRYPTO_CFG_CTR; 152 case SE_ALG_CBC_MAC: 153 return SE_CRYPTO_CFG_CBC_MAC; 154 155 default: 156 break; 157 } 158 159 return -EINVAL; 160 } 161 162 static int tegra234_aes_cfg(u32 alg, bool encrypt) 163 { 164 switch (alg) { 165 case SE_ALG_CBC: 166 case SE_ALG_ECB: 167 case SE_ALG_XTS: 168 case SE_ALG_CTR: 169 if (encrypt) 170 return SE_CFG_AES_ENCRYPT; 171 else 172 return SE_CFG_AES_DECRYPT; 173 174 case SE_ALG_GMAC: 175 if (encrypt) 176 return SE_CFG_GMAC_ENCRYPT; 177 else 178 return SE_CFG_GMAC_DECRYPT; 179 180 case SE_ALG_GCM: 181 if (encrypt) 182 return SE_CFG_GCM_ENCRYPT; 183 else 184 return SE_CFG_GCM_DECRYPT; 185 186 case SE_ALG_GCM_FINAL: 187 if (encrypt) 188 return SE_CFG_GCM_FINAL_ENCRYPT; 189 else 190 return SE_CFG_GCM_FINAL_DECRYPT; 191 192 case SE_ALG_CMAC: 193 return SE_CFG_CMAC; 194 195 case SE_ALG_CBC_MAC: 196 return SE_AES_ENC_ALG_AES_ENC | 197 SE_AES_DST_HASH_REG; 198 } 199 return -EINVAL; 200 } 201 202 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx, 203 struct tegra_aes_reqctx *rctx) 204 { 205 unsigned int data_count, res_bits, i = 0, j; 206 struct tegra_se *se = ctx->se; 207 u32 *cpuvaddr = se->cmdbuf->addr; 208 dma_addr_t addr = rctx->datbuf.addr; 209 210 data_count = rctx->len / AES_BLOCK_SIZE; 211 res_bits = (rctx->len % AES_BLOCK_SIZE) * 8; 212 213 /* 214 * Hardware processes data_count + 1 blocks. 215 * Reduce 1 block if there is no residue 216 */ 217 if (!res_bits) 218 data_count--; 219 220 if (rctx->iv) { 221 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); 222 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); 223 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) 224 cpuvaddr[i++] = rctx->iv[j]; 225 } 226 227 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); 228 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | 229 SE_LAST_BLOCK_RES_BITS(res_bits); 230 231 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); 232 cpuvaddr[i++] = rctx->config; 233 cpuvaddr[i++] = rctx->crypto_config; 234 235 /* Source address setting */ 236 cpuvaddr[i++] = lower_32_bits(addr); 237 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len); 238 239 /* Destination address setting */ 240 cpuvaddr[i++] = lower_32_bits(addr); 241 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | 242 SE_ADDR_HI_SZ(rctx->len); 243 244 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); 245 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | 246 SE_AES_OP_START; 247 248 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); 249 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | 250 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); 251 252 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); 253 254 return i; 255 } 256 257 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) 258 { 259 struct skcipher_request *req = container_of(areq, struct skcipher_request, base); 260 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 261 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); 262 struct tegra_se *se = ctx->se; 263 unsigned int cmdlen; 264 int ret; 265 266 rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, 267 &rctx->datbuf.addr, GFP_KERNEL); 268 if (!rctx->datbuf.buf) 269 return -ENOMEM; 270 271 rctx->datbuf.size = SE_AES_BUFLEN; 272 rctx->iv = (u32 *)req->iv; 273 rctx->len = req->cryptlen; 274 275 /* Pad input to AES Block size */ 276 if (ctx->alg != SE_ALG_XTS) { 277 if (rctx->len % AES_BLOCK_SIZE) 278 rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); 279 } 280 281 scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); 282 283 /* Prepare the command and submit for execution */ 284 cmdlen = tegra_aes_prep_cmd(ctx, rctx); 285 ret = tegra_se_host1x_submit(se, cmdlen); 286 287 /* Copy the result */ 288 tegra_aes_update_iv(req, ctx); 289 scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); 290 291 /* Free the buffer */ 292 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, 293 rctx->datbuf.buf, rctx->datbuf.addr); 294 295 crypto_finalize_skcipher_request(se->engine, req, ret); 296 297 return 0; 298 } 299 300 static int tegra_aes_cra_init(struct crypto_skcipher *tfm) 301 { 302 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 303 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 304 struct tegra_se_alg *se_alg; 305 const char *algname; 306 int ret; 307 308 se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base); 309 310 crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx)); 311 312 ctx->ivsize = crypto_skcipher_ivsize(tfm); 313 ctx->se = se_alg->se_dev; 314 ctx->key1_id = 0; 315 ctx->key2_id = 0; 316 317 algname = crypto_tfm_alg_name(&tfm->base); 318 ret = se_algname_to_algid(algname); 319 if (ret < 0) { 320 dev_err(ctx->se->dev, "invalid algorithm\n"); 321 return ret; 322 } 323 324 ctx->alg = ret; 325 326 return 0; 327 } 328 329 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm) 330 { 331 struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base); 332 333 if (ctx->key1_id) 334 tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg); 335 336 if (ctx->key2_id) 337 tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg); 338 } 339 340 static int tegra_aes_setkey(struct crypto_skcipher *tfm, 341 const u8 *key, u32 keylen) 342 { 343 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 344 345 if (aes_check_keylen(keylen)) { 346 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); 347 return -EINVAL; 348 } 349 350 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); 351 } 352 353 static int tegra_xts_setkey(struct crypto_skcipher *tfm, 354 const u8 *key, u32 keylen) 355 { 356 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 357 u32 len = keylen / 2; 358 int ret; 359 360 ret = xts_verify_key(tfm, key, keylen); 361 if (ret || aes_check_keylen(len)) { 362 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); 363 return -EINVAL; 364 } 365 366 ret = tegra_key_submit(ctx->se, key, len, 367 ctx->alg, &ctx->key1_id); 368 if (ret) 369 return ret; 370 371 return tegra_key_submit(ctx->se, key + len, len, 372 ctx->alg, &ctx->key2_id); 373 374 return 0; 375 } 376 377 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen) 378 { 379 int manifest; 380 381 manifest = SE_KAC_USER_NS; 382 383 switch (alg) { 384 case SE_ALG_CBC: 385 case SE_ALG_ECB: 386 case SE_ALG_CTR: 387 manifest |= SE_KAC_ENC; 388 break; 389 case SE_ALG_XTS: 390 manifest |= SE_KAC_XTS; 391 break; 392 case SE_ALG_GCM: 393 manifest |= SE_KAC_GCM; 394 break; 395 case SE_ALG_CMAC: 396 manifest |= SE_KAC_CMAC; 397 break; 398 case SE_ALG_CBC_MAC: 399 manifest |= SE_KAC_ENC; 400 break; 401 default: 402 return -EINVAL; 403 } 404 405 switch (keylen) { 406 case AES_KEYSIZE_128: 407 manifest |= SE_KAC_SIZE_128; 408 break; 409 case AES_KEYSIZE_192: 410 manifest |= SE_KAC_SIZE_192; 411 break; 412 case AES_KEYSIZE_256: 413 manifest |= SE_KAC_SIZE_256; 414 break; 415 default: 416 return -EINVAL; 417 } 418 419 return manifest; 420 } 421 422 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) 423 424 { 425 struct crypto_skcipher *tfm; 426 struct tegra_aes_ctx *ctx; 427 struct tegra_aes_reqctx *rctx; 428 429 tfm = crypto_skcipher_reqtfm(req); 430 ctx = crypto_skcipher_ctx(tfm); 431 rctx = skcipher_request_ctx(req); 432 433 if (ctx->alg != SE_ALG_XTS) { 434 if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) { 435 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen); 436 return -EINVAL; 437 } 438 } else if (req->cryptlen < XTS_BLOCK_SIZE) { 439 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen); 440 return -EINVAL; 441 } 442 443 if (!req->cryptlen) 444 return 0; 445 446 rctx->encrypt = encrypt; 447 rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); 448 rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); 449 rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); 450 451 if (ctx->key2_id) 452 rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); 453 454 return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); 455 } 456 457 static int tegra_aes_encrypt(struct skcipher_request *req) 458 { 459 return tegra_aes_crypt(req, true); 460 } 461 462 static int tegra_aes_decrypt(struct skcipher_request *req) 463 { 464 return tegra_aes_crypt(req, false); 465 } 466 467 static struct tegra_se_alg tegra_aes_algs[] = { 468 { 469 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, 470 .alg.skcipher.base = { 471 .init = tegra_aes_cra_init, 472 .exit = tegra_aes_cra_exit, 473 .setkey = tegra_aes_setkey, 474 .encrypt = tegra_aes_encrypt, 475 .decrypt = tegra_aes_decrypt, 476 .min_keysize = AES_MIN_KEY_SIZE, 477 .max_keysize = AES_MAX_KEY_SIZE, 478 .ivsize = AES_BLOCK_SIZE, 479 .base = { 480 .cra_name = "cbc(aes)", 481 .cra_driver_name = "cbc-aes-tegra", 482 .cra_priority = 500, 483 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 484 .cra_blocksize = AES_BLOCK_SIZE, 485 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 486 .cra_alignmask = 0xf, 487 .cra_module = THIS_MODULE, 488 }, 489 } 490 }, { 491 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, 492 .alg.skcipher.base = { 493 .init = tegra_aes_cra_init, 494 .exit = tegra_aes_cra_exit, 495 .setkey = tegra_aes_setkey, 496 .encrypt = tegra_aes_encrypt, 497 .decrypt = tegra_aes_decrypt, 498 .min_keysize = AES_MIN_KEY_SIZE, 499 .max_keysize = AES_MAX_KEY_SIZE, 500 .base = { 501 .cra_name = "ecb(aes)", 502 .cra_driver_name = "ecb-aes-tegra", 503 .cra_priority = 500, 504 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 505 .cra_blocksize = AES_BLOCK_SIZE, 506 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 507 .cra_alignmask = 0xf, 508 .cra_module = THIS_MODULE, 509 }, 510 } 511 }, { 512 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, 513 .alg.skcipher.base = { 514 .init = tegra_aes_cra_init, 515 .exit = tegra_aes_cra_exit, 516 .setkey = tegra_aes_setkey, 517 .encrypt = tegra_aes_encrypt, 518 .decrypt = tegra_aes_decrypt, 519 .min_keysize = AES_MIN_KEY_SIZE, 520 .max_keysize = AES_MAX_KEY_SIZE, 521 .ivsize = AES_BLOCK_SIZE, 522 .base = { 523 .cra_name = "ctr(aes)", 524 .cra_driver_name = "ctr-aes-tegra", 525 .cra_priority = 500, 526 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 527 .cra_blocksize = 1, 528 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 529 .cra_alignmask = 0xf, 530 .cra_module = THIS_MODULE, 531 }, 532 } 533 }, { 534 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req, 535 .alg.skcipher.base = { 536 .init = tegra_aes_cra_init, 537 .exit = tegra_aes_cra_exit, 538 .setkey = tegra_xts_setkey, 539 .encrypt = tegra_aes_encrypt, 540 .decrypt = tegra_aes_decrypt, 541 .min_keysize = 2 * AES_MIN_KEY_SIZE, 542 .max_keysize = 2 * AES_MAX_KEY_SIZE, 543 .ivsize = AES_BLOCK_SIZE, 544 .base = { 545 .cra_name = "xts(aes)", 546 .cra_driver_name = "xts-aes-tegra", 547 .cra_priority = 500, 548 .cra_blocksize = AES_BLOCK_SIZE, 549 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 550 .cra_alignmask = (__alignof__(u64) - 1), 551 .cra_module = THIS_MODULE, 552 }, 553 } 554 }, 555 }; 556 557 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx, 558 struct tegra_aead_reqctx *rctx) 559 { 560 unsigned int data_count, res_bits, i = 0; 561 struct tegra_se *se = ctx->se; 562 u32 *cpuvaddr = se->cmdbuf->addr; 563 564 data_count = (rctx->assoclen / AES_BLOCK_SIZE); 565 res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8; 566 567 /* 568 * Hardware processes data_count + 1 blocks. 569 * Reduce 1 block if there is no residue 570 */ 571 if (!res_bits) 572 data_count--; 573 574 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); 575 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | 576 SE_LAST_BLOCK_RES_BITS(res_bits); 577 578 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4); 579 cpuvaddr[i++] = rctx->config; 580 cpuvaddr[i++] = rctx->crypto_config; 581 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); 582 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | 583 SE_ADDR_HI_SZ(rctx->assoclen); 584 585 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); 586 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | 587 SE_AES_OP_INIT | SE_AES_OP_LASTBUF | 588 SE_AES_OP_START; 589 590 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); 591 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | 592 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); 593 594 return i; 595 } 596 597 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx, 598 struct tegra_aead_reqctx *rctx) 599 { 600 unsigned int data_count, res_bits, i = 0, j; 601 struct tegra_se *se = ctx->se; 602 u32 *cpuvaddr = se->cmdbuf->addr, op; 603 604 data_count = (rctx->cryptlen / AES_BLOCK_SIZE); 605 res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8; 606 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | 607 SE_AES_OP_LASTBUF | SE_AES_OP_START; 608 609 /* 610 * If there is no assoc data, 611 * this will be the init command 612 */ 613 if (!rctx->assoclen) 614 op |= SE_AES_OP_INIT; 615 616 /* 617 * Hardware processes data_count + 1 blocks. 618 * Reduce 1 block if there is no residue 619 */ 620 if (!res_bits) 621 data_count--; 622 623 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); 624 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); 625 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) 626 cpuvaddr[i++] = rctx->iv[j]; 627 628 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); 629 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | 630 SE_LAST_BLOCK_RES_BITS(res_bits); 631 632 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); 633 cpuvaddr[i++] = rctx->config; 634 cpuvaddr[i++] = rctx->crypto_config; 635 636 /* Source Address */ 637 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); 638 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | 639 SE_ADDR_HI_SZ(rctx->cryptlen); 640 641 /* Destination Address */ 642 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); 643 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | 644 SE_ADDR_HI_SZ(rctx->cryptlen); 645 646 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); 647 cpuvaddr[i++] = op; 648 649 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); 650 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | 651 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); 652 653 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); 654 return i; 655 } 656 657 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr, 658 struct tegra_aead_reqctx *rctx) 659 { 660 unsigned int i = 0, j; 661 u32 op; 662 663 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL | 664 SE_AES_OP_LASTBUF | SE_AES_OP_START; 665 666 /* 667 * Set init for zero sized vector 668 */ 669 if (!rctx->assoclen && !rctx->cryptlen) 670 op |= SE_AES_OP_INIT; 671 672 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2); 673 cpuvaddr[i++] = rctx->assoclen * 8; 674 cpuvaddr[i++] = 0; 675 676 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2); 677 cpuvaddr[i++] = rctx->cryptlen * 8; 678 cpuvaddr[i++] = 0; 679 680 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); 681 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); 682 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) 683 cpuvaddr[i++] = rctx->iv[j]; 684 685 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); 686 cpuvaddr[i++] = rctx->config; 687 cpuvaddr[i++] = rctx->crypto_config; 688 cpuvaddr[i++] = 0; 689 cpuvaddr[i++] = 0; 690 691 /* Destination Address */ 692 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); 693 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | 694 SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */ 695 696 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); 697 cpuvaddr[i++] = op; 698 699 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); 700 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | 701 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); 702 703 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config); 704 705 return i; 706 } 707 708 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) 709 { 710 struct tegra_se *se = ctx->se; 711 unsigned int cmdlen; 712 713 scatterwalk_map_and_copy(rctx->inbuf.buf, 714 rctx->src_sg, 0, rctx->assoclen, 0); 715 716 rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); 717 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | 718 SE_AES_KEY_INDEX(ctx->key_id); 719 720 cmdlen = tegra_gmac_prep_cmd(ctx, rctx); 721 722 return tegra_se_host1x_submit(se, cmdlen); 723 } 724 725 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) 726 { 727 struct tegra_se *se = ctx->se; 728 int cmdlen, ret; 729 730 scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg, 731 rctx->assoclen, rctx->cryptlen, 0); 732 733 rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt); 734 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) | 735 SE_AES_KEY_INDEX(ctx->key_id); 736 737 /* Prepare command and submit */ 738 cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); 739 ret = tegra_se_host1x_submit(se, cmdlen); 740 if (ret) 741 return ret; 742 743 /* Copy the result */ 744 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, 745 rctx->assoclen, rctx->cryptlen, 1); 746 747 return 0; 748 } 749 750 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) 751 { 752 struct tegra_se *se = ctx->se; 753 u32 *cpuvaddr = se->cmdbuf->addr; 754 int cmdlen, ret, offset; 755 756 rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); 757 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | 758 SE_AES_KEY_INDEX(ctx->key_id); 759 760 /* Prepare command and submit */ 761 cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); 762 ret = tegra_se_host1x_submit(se, cmdlen); 763 if (ret) 764 return ret; 765 766 if (rctx->encrypt) { 767 /* Copy the result */ 768 offset = rctx->assoclen + rctx->cryptlen; 769 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, 770 offset, rctx->authsize, 1); 771 } 772 773 return 0; 774 } 775 776 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx) 777 { 778 unsigned int offset; 779 u8 mac[16]; 780 781 offset = rctx->assoclen + rctx->cryptlen; 782 scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0); 783 784 if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize)) 785 return -EBADMSG; 786 787 return 0; 788 } 789 790 static inline int tegra_ccm_check_iv(const u8 *iv) 791 { 792 /* iv[0] gives value of q-1 793 * 2 <= q <= 8 as per NIST 800-38C notation 794 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation 795 */ 796 if (iv[0] < 1 || iv[0] > 7) { 797 pr_debug("ccm_check_iv failed %d\n", iv[0]); 798 return -EINVAL; 799 } 800 801 return 0; 802 } 803 804 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx, 805 struct tegra_aead_reqctx *rctx) 806 { 807 unsigned int data_count, i = 0; 808 struct tegra_se *se = ctx->se; 809 u32 *cpuvaddr = se->cmdbuf->addr; 810 811 data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1; 812 813 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); 814 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count); 815 816 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); 817 cpuvaddr[i++] = rctx->config; 818 cpuvaddr[i++] = rctx->crypto_config; 819 820 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); 821 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | 822 SE_ADDR_HI_SZ(rctx->inbuf.size); 823 824 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); 825 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | 826 SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */ 827 828 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); 829 cpuvaddr[i++] = SE_AES_OP_WRSTALL | 830 SE_AES_OP_LASTBUF | SE_AES_OP_START; 831 832 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); 833 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | 834 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); 835 836 return i; 837 } 838 839 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx, 840 struct tegra_aead_reqctx *rctx) 841 { 842 unsigned int i = 0, j; 843 struct tegra_se *se = ctx->se; 844 u32 *cpuvaddr = se->cmdbuf->addr; 845 846 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); 847 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); 848 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) 849 cpuvaddr[i++] = rctx->iv[j]; 850 851 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); 852 cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1; 853 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); 854 cpuvaddr[i++] = rctx->config; 855 cpuvaddr[i++] = rctx->crypto_config; 856 857 /* Source address setting */ 858 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr); 859 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) | 860 SE_ADDR_HI_SZ(rctx->inbuf.size); 861 862 /* Destination address setting */ 863 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr); 864 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) | 865 SE_ADDR_HI_SZ(rctx->inbuf.size); 866 867 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); 868 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF | 869 SE_AES_OP_START; 870 871 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); 872 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | 873 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); 874 875 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", 876 rctx->config, rctx->crypto_config); 877 878 return i; 879 } 880 881 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) 882 { 883 struct tegra_se *se = ctx->se; 884 int cmdlen; 885 886 rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); 887 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, 888 rctx->encrypt) | 889 SE_AES_KEY_INDEX(ctx->key_id); 890 891 /* Prepare command and submit */ 892 cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); 893 894 return tegra_se_host1x_submit(se, cmdlen); 895 } 896 897 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) 898 { 899 __be32 data; 900 901 memset(block, 0, csize); 902 block += csize; 903 904 if (csize >= 4) 905 csize = 4; 906 else if (msglen > (1 << (8 * csize))) 907 return -EOVERFLOW; 908 909 data = cpu_to_be32(msglen); 910 memcpy(block - csize, (u8 *)&data + 4 - csize, csize); 911 912 return 0; 913 } 914 915 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce) 916 { 917 unsigned int q, t; 918 u8 *q_ptr, *iv = (u8 *)rctx->iv; 919 920 memcpy(nonce, rctx->iv, 16); 921 922 /*** 1. Prepare Flags Octet ***/ 923 924 /* Encode t (mac length) */ 925 t = rctx->authsize; 926 nonce[0] |= (((t - 2) / 2) << 3); 927 928 /* Adata */ 929 if (rctx->assoclen) 930 nonce[0] |= (1 << 6); 931 932 /*** Encode Q - message length ***/ 933 q = iv[0] + 1; 934 q_ptr = nonce + 16 - q; 935 936 return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q); 937 } 938 939 static int tegra_ccm_format_adata(u8 *adata, unsigned int a) 940 { 941 int len = 0; 942 943 /* add control info for associated data 944 * RFC 3610 and NIST Special Publication 800-38C 945 */ 946 if (a < 65280) { 947 *(__be16 *)adata = cpu_to_be16(a); 948 len = 2; 949 } else { 950 *(__be16 *)adata = cpu_to_be16(0xfffe); 951 *(__be32 *)&adata[2] = cpu_to_be32(a); 952 len = 6; 953 } 954 955 return len; 956 } 957 958 static int tegra_ccm_add_padding(u8 *buf, unsigned int len) 959 { 960 unsigned int padlen = 16 - (len % 16); 961 u8 padding[16] = {0}; 962 963 if (padlen == 16) 964 return 0; 965 966 memcpy(buf, padding, padlen); 967 968 return padlen; 969 } 970 971 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx) 972 { 973 unsigned int alen = 0, offset = 0; 974 u8 nonce[16], adata[16]; 975 int ret; 976 977 ret = tegra_ccm_format_nonce(rctx, nonce); 978 if (ret) 979 return ret; 980 981 memcpy(rctx->inbuf.buf, nonce, 16); 982 offset = 16; 983 984 if (rctx->assoclen) { 985 alen = tegra_ccm_format_adata(adata, rctx->assoclen); 986 memcpy(rctx->inbuf.buf + offset, adata, alen); 987 offset += alen; 988 989 scatterwalk_map_and_copy(rctx->inbuf.buf + offset, 990 rctx->src_sg, 0, rctx->assoclen, 0); 991 992 offset += rctx->assoclen; 993 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, 994 rctx->assoclen + alen); 995 } 996 997 return offset; 998 } 999 1000 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx) 1001 { 1002 u32 result[16]; 1003 int i, ret; 1004 1005 /* Read and clear Result */ 1006 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1007 result[i] = readl(se->base + se->hw->regs->result + (i * 4)); 1008 1009 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1010 writel(0, se->base + se->hw->regs->result + (i * 4)); 1011 1012 if (rctx->encrypt) { 1013 memcpy(rctx->authdata, result, rctx->authsize); 1014 } else { 1015 ret = crypto_memneq(rctx->authdata, result, rctx->authsize); 1016 if (ret) 1017 return -EBADMSG; 1018 } 1019 1020 return 0; 1021 } 1022 1023 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx) 1024 { 1025 /* Copy result */ 1026 scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg, 1027 rctx->assoclen, rctx->cryptlen, 1); 1028 1029 if (rctx->encrypt) 1030 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg, 1031 rctx->assoclen + rctx->cryptlen, 1032 rctx->authsize, 1); 1033 else 1034 memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize); 1035 1036 return 0; 1037 } 1038 1039 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) 1040 { 1041 struct tegra_se *se = ctx->se; 1042 struct scatterlist *sg; 1043 int offset, ret; 1044 1045 offset = tegra_ccm_format_blocks(rctx); 1046 if (offset < 0) 1047 return -EINVAL; 1048 1049 /* Copy plain text to the buffer */ 1050 sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg; 1051 1052 scatterwalk_map_and_copy(rctx->inbuf.buf + offset, 1053 sg, rctx->assoclen, 1054 rctx->cryptlen, 0); 1055 offset += rctx->cryptlen; 1056 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen); 1057 1058 rctx->inbuf.size = offset; 1059 1060 ret = tegra_ccm_do_cbcmac(ctx, rctx); 1061 if (ret) 1062 return ret; 1063 1064 return tegra_ccm_mac_result(se, rctx); 1065 } 1066 1067 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) 1068 { 1069 struct tegra_se *se = ctx->se; 1070 unsigned int cmdlen, offset = 0; 1071 struct scatterlist *sg = rctx->src_sg; 1072 int ret; 1073 1074 rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt); 1075 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) | 1076 SE_AES_KEY_INDEX(ctx->key_id); 1077 1078 /* Copy authdata in the top of buffer for encryption/decryption */ 1079 if (rctx->encrypt) 1080 memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize); 1081 else 1082 scatterwalk_map_and_copy(rctx->inbuf.buf, sg, 1083 rctx->assoclen + rctx->cryptlen, 1084 rctx->authsize, 0); 1085 1086 offset += rctx->authsize; 1087 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize); 1088 1089 /* If there is no cryptlen, proceed to submit the task */ 1090 if (rctx->cryptlen) { 1091 scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg, 1092 rctx->assoclen, rctx->cryptlen, 0); 1093 offset += rctx->cryptlen; 1094 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen); 1095 } 1096 1097 rctx->inbuf.size = offset; 1098 1099 /* Prepare command and submit */ 1100 cmdlen = tegra_ctr_prep_cmd(ctx, rctx); 1101 ret = tegra_se_host1x_submit(se, cmdlen); 1102 if (ret) 1103 return ret; 1104 1105 return tegra_ccm_ctr_result(se, rctx); 1106 } 1107 1108 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, 1109 struct tegra_aead_reqctx *rctx) 1110 { 1111 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1112 u8 *iv = (u8 *)rctx->iv; 1113 int ret, i; 1114 1115 rctx->src_sg = req->src; 1116 rctx->dst_sg = req->dst; 1117 rctx->assoclen = req->assoclen; 1118 rctx->authsize = crypto_aead_authsize(tfm); 1119 1120 memcpy(iv, req->iv, 16); 1121 1122 ret = tegra_ccm_check_iv(iv); 1123 if (ret) 1124 return ret; 1125 1126 /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of 1127 * zero to encrypt auth tag. 1128 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0). 1129 */ 1130 memset(iv + 15 - iv[0], 0, iv[0] + 1); 1131 1132 /* Clear any previous result */ 1133 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1134 writel(0, se->base + se->hw->regs->result + (i * 4)); 1135 1136 return 0; 1137 } 1138 1139 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) 1140 { 1141 struct aead_request *req = container_of(areq, struct aead_request, base); 1142 struct tegra_aead_reqctx *rctx = aead_request_ctx(req); 1143 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1144 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); 1145 struct tegra_se *se = ctx->se; 1146 int ret; 1147 1148 /* Allocate buffers required */ 1149 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, 1150 &rctx->inbuf.addr, GFP_KERNEL); 1151 if (!rctx->inbuf.buf) 1152 return -ENOMEM; 1153 1154 rctx->inbuf.size = SE_AES_BUFLEN; 1155 1156 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, 1157 &rctx->outbuf.addr, GFP_KERNEL); 1158 if (!rctx->outbuf.buf) { 1159 ret = -ENOMEM; 1160 goto outbuf_err; 1161 } 1162 1163 rctx->outbuf.size = SE_AES_BUFLEN; 1164 1165 ret = tegra_ccm_crypt_init(req, se, rctx); 1166 if (ret) 1167 goto out; 1168 1169 if (rctx->encrypt) { 1170 rctx->cryptlen = req->cryptlen; 1171 1172 /* CBC MAC Operation */ 1173 ret = tegra_ccm_compute_auth(ctx, rctx); 1174 if (ret) 1175 goto out; 1176 1177 /* CTR operation */ 1178 ret = tegra_ccm_do_ctr(ctx, rctx); 1179 if (ret) 1180 goto out; 1181 } else { 1182 rctx->cryptlen = req->cryptlen - ctx->authsize; 1183 1184 /* CTR operation */ 1185 ret = tegra_ccm_do_ctr(ctx, rctx); 1186 if (ret) 1187 goto out; 1188 1189 /* CBC MAC Operation */ 1190 ret = tegra_ccm_compute_auth(ctx, rctx); 1191 if (ret) 1192 goto out; 1193 } 1194 1195 out: 1196 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, 1197 rctx->outbuf.buf, rctx->outbuf.addr); 1198 1199 outbuf_err: 1200 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, 1201 rctx->inbuf.buf, rctx->inbuf.addr); 1202 1203 crypto_finalize_aead_request(ctx->se->engine, req, ret); 1204 1205 return 0; 1206 } 1207 1208 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) 1209 { 1210 struct aead_request *req = container_of(areq, struct aead_request, base); 1211 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1212 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); 1213 struct tegra_aead_reqctx *rctx = aead_request_ctx(req); 1214 int ret; 1215 1216 /* Allocate buffers required */ 1217 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, 1218 &rctx->inbuf.addr, GFP_KERNEL); 1219 if (!rctx->inbuf.buf) 1220 return -ENOMEM; 1221 1222 rctx->inbuf.size = SE_AES_BUFLEN; 1223 1224 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, 1225 &rctx->outbuf.addr, GFP_KERNEL); 1226 if (!rctx->outbuf.buf) { 1227 ret = -ENOMEM; 1228 goto outbuf_err; 1229 } 1230 1231 rctx->outbuf.size = SE_AES_BUFLEN; 1232 1233 rctx->src_sg = req->src; 1234 rctx->dst_sg = req->dst; 1235 rctx->assoclen = req->assoclen; 1236 rctx->authsize = crypto_aead_authsize(tfm); 1237 1238 if (rctx->encrypt) 1239 rctx->cryptlen = req->cryptlen; 1240 else 1241 rctx->cryptlen = req->cryptlen - ctx->authsize; 1242 1243 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); 1244 rctx->iv[3] = (1 << 24); 1245 1246 /* If there is associated data perform GMAC operation */ 1247 if (rctx->assoclen) { 1248 ret = tegra_gcm_do_gmac(ctx, rctx); 1249 if (ret) 1250 goto out; 1251 } 1252 1253 /* GCM Encryption/Decryption operation */ 1254 if (rctx->cryptlen) { 1255 ret = tegra_gcm_do_crypt(ctx, rctx); 1256 if (ret) 1257 goto out; 1258 } 1259 1260 /* GCM_FINAL operation */ 1261 ret = tegra_gcm_do_final(ctx, rctx); 1262 if (ret) 1263 goto out; 1264 1265 if (!rctx->encrypt) 1266 ret = tegra_gcm_do_verify(ctx->se, rctx); 1267 1268 out: 1269 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, 1270 rctx->outbuf.buf, rctx->outbuf.addr); 1271 1272 outbuf_err: 1273 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, 1274 rctx->inbuf.buf, rctx->inbuf.addr); 1275 1276 /* Finalize the request if there are no errors */ 1277 crypto_finalize_aead_request(ctx->se->engine, req, ret); 1278 1279 return 0; 1280 } 1281 1282 static int tegra_aead_cra_init(struct crypto_aead *tfm) 1283 { 1284 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); 1285 struct aead_alg *alg = crypto_aead_alg(tfm); 1286 struct tegra_se_alg *se_alg; 1287 const char *algname; 1288 int ret; 1289 1290 algname = crypto_tfm_alg_name(&tfm->base); 1291 1292 se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base); 1293 1294 crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx)); 1295 1296 ctx->se = se_alg->se_dev; 1297 ctx->key_id = 0; 1298 1299 ret = se_algname_to_algid(algname); 1300 if (ret < 0) { 1301 dev_err(ctx->se->dev, "invalid algorithm\n"); 1302 return ret; 1303 } 1304 1305 ctx->alg = ret; 1306 1307 return 0; 1308 } 1309 1310 static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 1311 { 1312 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); 1313 1314 switch (authsize) { 1315 case 4: 1316 case 6: 1317 case 8: 1318 case 10: 1319 case 12: 1320 case 14: 1321 case 16: 1322 break; 1323 default: 1324 return -EINVAL; 1325 } 1326 1327 ctx->authsize = authsize; 1328 1329 return 0; 1330 } 1331 1332 static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 1333 { 1334 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); 1335 int ret; 1336 1337 ret = crypto_gcm_check_authsize(authsize); 1338 if (ret) 1339 return ret; 1340 1341 ctx->authsize = authsize; 1342 1343 return 0; 1344 } 1345 1346 static void tegra_aead_cra_exit(struct crypto_aead *tfm) 1347 { 1348 struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base); 1349 1350 if (ctx->key_id) 1351 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); 1352 } 1353 1354 static int tegra_aead_crypt(struct aead_request *req, bool encrypt) 1355 { 1356 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1357 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); 1358 struct tegra_aead_reqctx *rctx = aead_request_ctx(req); 1359 1360 rctx->encrypt = encrypt; 1361 1362 return crypto_transfer_aead_request_to_engine(ctx->se->engine, req); 1363 } 1364 1365 static int tegra_aead_encrypt(struct aead_request *req) 1366 { 1367 return tegra_aead_crypt(req, true); 1368 } 1369 1370 static int tegra_aead_decrypt(struct aead_request *req) 1371 { 1372 return tegra_aead_crypt(req, false); 1373 } 1374 1375 static int tegra_aead_setkey(struct crypto_aead *tfm, 1376 const u8 *key, u32 keylen) 1377 { 1378 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); 1379 1380 if (aes_check_keylen(keylen)) { 1381 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); 1382 return -EINVAL; 1383 } 1384 1385 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); 1386 } 1387 1388 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx, 1389 struct tegra_cmac_reqctx *rctx) 1390 { 1391 unsigned int data_count, res_bits = 0, i = 0, j; 1392 struct tegra_se *se = ctx->se; 1393 u32 *cpuvaddr = se->cmdbuf->addr, op; 1394 1395 data_count = (rctx->datbuf.size / AES_BLOCK_SIZE); 1396 1397 op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF; 1398 1399 if (!(rctx->task & SHA_UPDATE)) { 1400 op |= SE_AES_OP_FINAL; 1401 res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8; 1402 } 1403 1404 if (!res_bits && data_count) 1405 data_count--; 1406 1407 if (rctx->task & SHA_FIRST) { 1408 rctx->task &= ~SHA_FIRST; 1409 1410 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT); 1411 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr); 1412 /* Load 0 IV */ 1413 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++) 1414 cpuvaddr[i++] = 0; 1415 } 1416 1417 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1); 1418 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) | 1419 SE_LAST_BLOCK_RES_BITS(res_bits); 1420 1421 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6); 1422 cpuvaddr[i++] = rctx->config; 1423 cpuvaddr[i++] = rctx->crypto_config; 1424 1425 /* Source Address */ 1426 cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr); 1427 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | 1428 SE_ADDR_HI_SZ(rctx->datbuf.size); 1429 cpuvaddr[i++] = 0; 1430 cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE); 1431 1432 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1); 1433 cpuvaddr[i++] = op; 1434 1435 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); 1436 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | 1437 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); 1438 1439 return i; 1440 } 1441 1442 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx) 1443 { 1444 int i; 1445 1446 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1447 rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); 1448 } 1449 1450 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx) 1451 { 1452 int i; 1453 1454 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1455 writel(rctx->result[i], 1456 se->base + se->hw->regs->result + (i * 4)); 1457 } 1458 1459 static int tegra_cmac_do_update(struct ahash_request *req) 1460 { 1461 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1462 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1463 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1464 struct tegra_se *se = ctx->se; 1465 unsigned int nblks, nresidue, cmdlen; 1466 int ret; 1467 1468 if (!req->nbytes) 1469 return 0; 1470 1471 nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; 1472 nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; 1473 1474 /* 1475 * Reserve the last block as residue during final() to process. 1476 */ 1477 if (!nresidue && nblks) { 1478 nresidue += rctx->blk_size; 1479 nblks--; 1480 } 1481 1482 rctx->src_sg = req->src; 1483 rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; 1484 rctx->total_len += rctx->datbuf.size; 1485 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); 1486 rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id); 1487 1488 /* 1489 * Keep one block and residue bytes in residue and 1490 * return. The bytes will be processed in final() 1491 */ 1492 if (nblks < 1) { 1493 scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, 1494 rctx->src_sg, 0, req->nbytes, 0); 1495 1496 rctx->residue.size += req->nbytes; 1497 return 0; 1498 } 1499 1500 /* Copy the previous residue first */ 1501 if (rctx->residue.size) 1502 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); 1503 1504 scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size, 1505 rctx->src_sg, 0, req->nbytes - nresidue, 0); 1506 1507 scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg, 1508 req->nbytes - nresidue, nresidue, 0); 1509 1510 /* Update residue value with the residue after current block */ 1511 rctx->residue.size = nresidue; 1512 1513 /* 1514 * If this is not the first 'update' call, paste the previous copied 1515 * intermediate results to the registers so that it gets picked up. 1516 * This is to support the import/export functionality. 1517 */ 1518 if (!(rctx->task & SHA_FIRST)) 1519 tegra_cmac_paste_result(ctx->se, rctx); 1520 1521 cmdlen = tegra_cmac_prep_cmd(ctx, rctx); 1522 1523 ret = tegra_se_host1x_submit(se, cmdlen); 1524 /* 1525 * If this is not the final update, copy the intermediate results 1526 * from the registers so that it can be used in the next 'update' 1527 * call. This is to support the import/export functionality. 1528 */ 1529 if (!(rctx->task & SHA_FINAL)) 1530 tegra_cmac_copy_result(ctx->se, rctx); 1531 1532 return ret; 1533 } 1534 1535 static int tegra_cmac_do_final(struct ahash_request *req) 1536 { 1537 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1538 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1539 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1540 struct tegra_se *se = ctx->se; 1541 u32 *result = (u32 *)req->result; 1542 int ret = 0, i, cmdlen; 1543 1544 if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { 1545 return crypto_shash_tfm_digest(ctx->fallback_tfm, 1546 rctx->datbuf.buf, 0, req->result); 1547 } 1548 1549 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); 1550 rctx->datbuf.size = rctx->residue.size; 1551 rctx->total_len += rctx->residue.size; 1552 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); 1553 1554 /* Prepare command and submit */ 1555 cmdlen = tegra_cmac_prep_cmd(ctx, rctx); 1556 ret = tegra_se_host1x_submit(se, cmdlen); 1557 if (ret) 1558 goto out; 1559 1560 /* Read and clear Result register */ 1561 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1562 result[i] = readl(se->base + se->hw->regs->result + (i * 4)); 1563 1564 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1565 writel(0, se->base + se->hw->regs->result + (i * 4)); 1566 1567 out: 1568 dma_free_coherent(se->dev, SE_SHA_BUFLEN, 1569 rctx->datbuf.buf, rctx->datbuf.addr); 1570 dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, 1571 rctx->residue.buf, rctx->residue.addr); 1572 return ret; 1573 } 1574 1575 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) 1576 { 1577 struct ahash_request *req = ahash_request_cast(areq); 1578 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1579 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1580 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1581 struct tegra_se *se = ctx->se; 1582 int ret; 1583 1584 if (rctx->task & SHA_UPDATE) { 1585 ret = tegra_cmac_do_update(req); 1586 rctx->task &= ~SHA_UPDATE; 1587 } 1588 1589 if (rctx->task & SHA_FINAL) { 1590 ret = tegra_cmac_do_final(req); 1591 rctx->task &= ~SHA_FINAL; 1592 } 1593 1594 crypto_finalize_hash_request(se->engine, req, ret); 1595 1596 return 0; 1597 } 1598 1599 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx, 1600 const char *algname) 1601 { 1602 unsigned int statesize; 1603 1604 ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK); 1605 1606 if (IS_ERR(ctx->fallback_tfm)) { 1607 dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname); 1608 ctx->fallback_tfm = NULL; 1609 return; 1610 } 1611 1612 statesize = crypto_shash_statesize(ctx->fallback_tfm); 1613 1614 if (statesize > sizeof(struct tegra_cmac_reqctx)) 1615 crypto_ahash_set_statesize(tfm, statesize); 1616 } 1617 1618 static int tegra_cmac_cra_init(struct crypto_tfm *tfm) 1619 { 1620 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm); 1621 struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm); 1622 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); 1623 struct tegra_se_alg *se_alg; 1624 const char *algname; 1625 int ret; 1626 1627 algname = crypto_tfm_alg_name(tfm); 1628 se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base); 1629 1630 crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx)); 1631 1632 ctx->se = se_alg->se_dev; 1633 ctx->key_id = 0; 1634 1635 ret = se_algname_to_algid(algname); 1636 if (ret < 0) { 1637 dev_err(ctx->se->dev, "invalid algorithm\n"); 1638 return ret; 1639 } 1640 1641 ctx->alg = ret; 1642 1643 tegra_cmac_init_fallback(ahash_tfm, ctx, algname); 1644 1645 return 0; 1646 } 1647 1648 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm) 1649 { 1650 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm); 1651 1652 if (ctx->fallback_tfm) 1653 crypto_free_shash(ctx->fallback_tfm); 1654 1655 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); 1656 } 1657 1658 static int tegra_cmac_init(struct ahash_request *req) 1659 { 1660 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1661 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1662 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1663 struct tegra_se *se = ctx->se; 1664 int i; 1665 1666 rctx->total_len = 0; 1667 rctx->datbuf.size = 0; 1668 rctx->residue.size = 0; 1669 rctx->task = SHA_FIRST; 1670 rctx->blk_size = crypto_ahash_blocksize(tfm); 1671 1672 rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, 1673 &rctx->residue.addr, GFP_KERNEL); 1674 if (!rctx->residue.buf) 1675 goto resbuf_fail; 1676 1677 rctx->residue.size = 0; 1678 1679 rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, 1680 &rctx->datbuf.addr, GFP_KERNEL); 1681 if (!rctx->datbuf.buf) 1682 goto datbuf_fail; 1683 1684 rctx->datbuf.size = 0; 1685 1686 /* Clear any previous result */ 1687 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) 1688 writel(0, se->base + se->hw->regs->result + (i * 4)); 1689 1690 return 0; 1691 1692 datbuf_fail: 1693 dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, 1694 rctx->residue.addr); 1695 resbuf_fail: 1696 return -ENOMEM; 1697 } 1698 1699 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, 1700 unsigned int keylen) 1701 { 1702 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1703 1704 if (aes_check_keylen(keylen)) { 1705 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); 1706 return -EINVAL; 1707 } 1708 1709 if (ctx->fallback_tfm) 1710 crypto_shash_setkey(ctx->fallback_tfm, key, keylen); 1711 1712 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); 1713 } 1714 1715 static int tegra_cmac_update(struct ahash_request *req) 1716 { 1717 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1718 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1719 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1720 1721 rctx->task |= SHA_UPDATE; 1722 1723 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); 1724 } 1725 1726 static int tegra_cmac_final(struct ahash_request *req) 1727 { 1728 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1729 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1730 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1731 1732 rctx->task |= SHA_FINAL; 1733 1734 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); 1735 } 1736 1737 static int tegra_cmac_finup(struct ahash_request *req) 1738 { 1739 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1740 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1741 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1742 1743 rctx->task |= SHA_UPDATE | SHA_FINAL; 1744 1745 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); 1746 } 1747 1748 static int tegra_cmac_digest(struct ahash_request *req) 1749 { 1750 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1751 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); 1752 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1753 1754 tegra_cmac_init(req); 1755 rctx->task |= SHA_UPDATE | SHA_FINAL; 1756 1757 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); 1758 } 1759 1760 static int tegra_cmac_export(struct ahash_request *req, void *out) 1761 { 1762 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1763 1764 memcpy(out, rctx, sizeof(*rctx)); 1765 1766 return 0; 1767 } 1768 1769 static int tegra_cmac_import(struct ahash_request *req, const void *in) 1770 { 1771 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); 1772 1773 memcpy(rctx, in, sizeof(*rctx)); 1774 1775 return 0; 1776 } 1777 1778 static struct tegra_se_alg tegra_aead_algs[] = { 1779 { 1780 .alg.aead.op.do_one_request = tegra_gcm_do_one_req, 1781 .alg.aead.base = { 1782 .init = tegra_aead_cra_init, 1783 .exit = tegra_aead_cra_exit, 1784 .setkey = tegra_aead_setkey, 1785 .setauthsize = tegra_gcm_setauthsize, 1786 .encrypt = tegra_aead_encrypt, 1787 .decrypt = tegra_aead_decrypt, 1788 .maxauthsize = AES_BLOCK_SIZE, 1789 .ivsize = GCM_AES_IV_SIZE, 1790 .base = { 1791 .cra_name = "gcm(aes)", 1792 .cra_driver_name = "gcm-aes-tegra", 1793 .cra_priority = 500, 1794 .cra_blocksize = 1, 1795 .cra_ctxsize = sizeof(struct tegra_aead_ctx), 1796 .cra_alignmask = 0xf, 1797 .cra_module = THIS_MODULE, 1798 }, 1799 } 1800 }, { 1801 .alg.aead.op.do_one_request = tegra_ccm_do_one_req, 1802 .alg.aead.base = { 1803 .init = tegra_aead_cra_init, 1804 .exit = tegra_aead_cra_exit, 1805 .setkey = tegra_aead_setkey, 1806 .setauthsize = tegra_ccm_setauthsize, 1807 .encrypt = tegra_aead_encrypt, 1808 .decrypt = tegra_aead_decrypt, 1809 .maxauthsize = AES_BLOCK_SIZE, 1810 .ivsize = AES_BLOCK_SIZE, 1811 .chunksize = AES_BLOCK_SIZE, 1812 .base = { 1813 .cra_name = "ccm(aes)", 1814 .cra_driver_name = "ccm-aes-tegra", 1815 .cra_priority = 500, 1816 .cra_blocksize = 1, 1817 .cra_ctxsize = sizeof(struct tegra_aead_ctx), 1818 .cra_alignmask = 0xf, 1819 .cra_module = THIS_MODULE, 1820 }, 1821 } 1822 } 1823 }; 1824 1825 static struct tegra_se_alg tegra_cmac_algs[] = { 1826 { 1827 .alg.ahash.op.do_one_request = tegra_cmac_do_one_req, 1828 .alg.ahash.base = { 1829 .init = tegra_cmac_init, 1830 .setkey = tegra_cmac_setkey, 1831 .update = tegra_cmac_update, 1832 .final = tegra_cmac_final, 1833 .finup = tegra_cmac_finup, 1834 .digest = tegra_cmac_digest, 1835 .export = tegra_cmac_export, 1836 .import = tegra_cmac_import, 1837 .halg.digestsize = AES_BLOCK_SIZE, 1838 .halg.statesize = sizeof(struct tegra_cmac_reqctx), 1839 .halg.base = { 1840 .cra_name = "cmac(aes)", 1841 .cra_driver_name = "tegra-se-cmac", 1842 .cra_priority = 300, 1843 .cra_flags = CRYPTO_ALG_TYPE_AHASH, 1844 .cra_blocksize = AES_BLOCK_SIZE, 1845 .cra_ctxsize = sizeof(struct tegra_cmac_ctx), 1846 .cra_alignmask = 0, 1847 .cra_module = THIS_MODULE, 1848 .cra_init = tegra_cmac_cra_init, 1849 .cra_exit = tegra_cmac_cra_exit, 1850 } 1851 } 1852 } 1853 }; 1854 1855 int tegra_init_aes(struct tegra_se *se) 1856 { 1857 struct aead_engine_alg *aead_alg; 1858 struct ahash_engine_alg *ahash_alg; 1859 struct skcipher_engine_alg *sk_alg; 1860 int i, ret; 1861 1862 se->manifest = tegra_aes_kac_manifest; 1863 1864 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) { 1865 sk_alg = &tegra_aes_algs[i].alg.skcipher; 1866 tegra_aes_algs[i].se_dev = se; 1867 1868 ret = crypto_engine_register_skcipher(sk_alg); 1869 if (ret) { 1870 dev_err(se->dev, "failed to register %s\n", 1871 sk_alg->base.base.cra_name); 1872 goto err_aes; 1873 } 1874 } 1875 1876 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) { 1877 aead_alg = &tegra_aead_algs[i].alg.aead; 1878 tegra_aead_algs[i].se_dev = se; 1879 1880 ret = crypto_engine_register_aead(aead_alg); 1881 if (ret) { 1882 dev_err(se->dev, "failed to register %s\n", 1883 aead_alg->base.base.cra_name); 1884 goto err_aead; 1885 } 1886 } 1887 1888 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) { 1889 ahash_alg = &tegra_cmac_algs[i].alg.ahash; 1890 tegra_cmac_algs[i].se_dev = se; 1891 1892 ret = crypto_engine_register_ahash(ahash_alg); 1893 if (ret) { 1894 dev_err(se->dev, "failed to register %s\n", 1895 ahash_alg->base.halg.base.cra_name); 1896 goto err_cmac; 1897 } 1898 } 1899 1900 return 0; 1901 1902 err_cmac: 1903 while (i--) 1904 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash); 1905 1906 i = ARRAY_SIZE(tegra_aead_algs); 1907 err_aead: 1908 while (i--) 1909 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead); 1910 1911 i = ARRAY_SIZE(tegra_aes_algs); 1912 err_aes: 1913 while (i--) 1914 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher); 1915 1916 return ret; 1917 } 1918 1919 void tegra_deinit_aes(struct tegra_se *se) 1920 { 1921 int i; 1922 1923 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) 1924 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher); 1925 1926 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) 1927 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead); 1928 1929 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) 1930 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash); 1931 } 1932