1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4 * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
5 */
6
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 #include <crypto/gcm.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/skcipher.h>
22
23 #include "tegra-se.h"
24
25 struct tegra_aes_ctx {
26 struct tegra_se *se;
27 u32 alg;
28 u32 ivsize;
29 u32 key1_id;
30 u32 key2_id;
31 };
32
33 struct tegra_aes_reqctx {
34 struct tegra_se_datbuf datbuf;
35 bool encrypt;
36 u32 config;
37 u32 crypto_config;
38 u32 len;
39 u32 *iv;
40 };
41
42 struct tegra_aead_ctx {
43 struct tegra_se *se;
44 unsigned int authsize;
45 u32 alg;
46 u32 keylen;
47 u32 key_id;
48 };
49
50 struct tegra_aead_reqctx {
51 struct tegra_se_datbuf inbuf;
52 struct tegra_se_datbuf outbuf;
53 struct scatterlist *src_sg;
54 struct scatterlist *dst_sg;
55 unsigned int assoclen;
56 unsigned int cryptlen;
57 unsigned int authsize;
58 bool encrypt;
59 u32 config;
60 u32 crypto_config;
61 u32 key_id;
62 u32 iv[4];
63 u8 authdata[16];
64 };
65
66 struct tegra_cmac_ctx {
67 struct tegra_se *se;
68 unsigned int alg;
69 u32 key_id;
70 struct crypto_shash *fallback_tfm;
71 };
72
73 struct tegra_cmac_reqctx {
74 struct scatterlist *src_sg;
75 struct tegra_se_datbuf datbuf;
76 struct tegra_se_datbuf residue;
77 unsigned int total_len;
78 unsigned int blk_size;
79 unsigned int task;
80 u32 crypto_config;
81 u32 config;
82 u32 key_id;
83 u32 *iv;
84 u32 result[CMAC_RESULT_REG_COUNT];
85 };
86
87 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)88 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
89 {
90 do {
91 --bits;
92 nums += counter[bits];
93 counter[bits] = nums & 0xff;
94 nums >>= 8;
95 } while (bits && nums);
96 }
97
tegra_cbc_iv_copyback(struct skcipher_request * req,struct tegra_aes_ctx * ctx)98 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
99 {
100 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
101 unsigned int offset;
102
103 offset = req->cryptlen - ctx->ivsize;
104
105 if (rctx->encrypt)
106 memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
107 else
108 scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
109 }
110
tegra_aes_update_iv(struct skcipher_request * req,struct tegra_aes_ctx * ctx)111 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
112 {
113 int num;
114
115 if (ctx->alg == SE_ALG_CBC) {
116 tegra_cbc_iv_copyback(req, ctx);
117 } else if (ctx->alg == SE_ALG_CTR) {
118 num = req->cryptlen / ctx->ivsize;
119 if (req->cryptlen % ctx->ivsize)
120 num++;
121
122 ctr_iv_inc(req->iv, ctx->ivsize, num);
123 }
124 }
125
tegra234_aes_crypto_cfg(u32 alg,bool encrypt)126 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
127 {
128 switch (alg) {
129 case SE_ALG_CMAC:
130 case SE_ALG_GMAC:
131 case SE_ALG_GCM:
132 case SE_ALG_GCM_FINAL:
133 return 0;
134 case SE_ALG_CBC:
135 if (encrypt)
136 return SE_CRYPTO_CFG_CBC_ENCRYPT;
137 else
138 return SE_CRYPTO_CFG_CBC_DECRYPT;
139 case SE_ALG_ECB:
140 if (encrypt)
141 return SE_CRYPTO_CFG_ECB_ENCRYPT;
142 else
143 return SE_CRYPTO_CFG_ECB_DECRYPT;
144 case SE_ALG_XTS:
145 if (encrypt)
146 return SE_CRYPTO_CFG_XTS_ENCRYPT;
147 else
148 return SE_CRYPTO_CFG_XTS_DECRYPT;
149
150 case SE_ALG_CTR:
151 return SE_CRYPTO_CFG_CTR;
152 case SE_ALG_CBC_MAC:
153 return SE_CRYPTO_CFG_CBC_MAC;
154
155 default:
156 break;
157 }
158
159 return -EINVAL;
160 }
161
tegra234_aes_cfg(u32 alg,bool encrypt)162 static int tegra234_aes_cfg(u32 alg, bool encrypt)
163 {
164 switch (alg) {
165 case SE_ALG_CBC:
166 case SE_ALG_ECB:
167 case SE_ALG_XTS:
168 case SE_ALG_CTR:
169 if (encrypt)
170 return SE_CFG_AES_ENCRYPT;
171 else
172 return SE_CFG_AES_DECRYPT;
173
174 case SE_ALG_GMAC:
175 if (encrypt)
176 return SE_CFG_GMAC_ENCRYPT;
177 else
178 return SE_CFG_GMAC_DECRYPT;
179
180 case SE_ALG_GCM:
181 if (encrypt)
182 return SE_CFG_GCM_ENCRYPT;
183 else
184 return SE_CFG_GCM_DECRYPT;
185
186 case SE_ALG_GCM_FINAL:
187 if (encrypt)
188 return SE_CFG_GCM_FINAL_ENCRYPT;
189 else
190 return SE_CFG_GCM_FINAL_DECRYPT;
191
192 case SE_ALG_CMAC:
193 return SE_CFG_CMAC;
194
195 case SE_ALG_CBC_MAC:
196 return SE_AES_ENC_ALG_AES_ENC |
197 SE_AES_DST_HASH_REG;
198 }
199 return -EINVAL;
200 }
201
tegra_aes_prep_cmd(struct tegra_aes_ctx * ctx,struct tegra_aes_reqctx * rctx)202 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
203 struct tegra_aes_reqctx *rctx)
204 {
205 unsigned int data_count, res_bits, i = 0, j;
206 struct tegra_se *se = ctx->se;
207 u32 *cpuvaddr = se->cmdbuf->addr;
208 dma_addr_t addr = rctx->datbuf.addr;
209
210 data_count = rctx->len / AES_BLOCK_SIZE;
211 res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
212
213 /*
214 * Hardware processes data_count + 1 blocks.
215 * Reduce 1 block if there is no residue
216 */
217 if (!res_bits)
218 data_count--;
219
220 if (rctx->iv) {
221 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
222 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
223 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
224 cpuvaddr[i++] = rctx->iv[j];
225 }
226
227 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
228 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
229 SE_LAST_BLOCK_RES_BITS(res_bits);
230
231 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
232 cpuvaddr[i++] = rctx->config;
233 cpuvaddr[i++] = rctx->crypto_config;
234
235 /* Source address setting */
236 cpuvaddr[i++] = lower_32_bits(addr);
237 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
238
239 /* Destination address setting */
240 cpuvaddr[i++] = lower_32_bits(addr);
241 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
242 SE_ADDR_HI_SZ(rctx->len);
243
244 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
245 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
246 SE_AES_OP_START;
247
248 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
249 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
250 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
251
252 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
253
254 return i;
255 }
256
tegra_aes_do_one_req(struct crypto_engine * engine,void * areq)257 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
258 {
259 struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
260 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
261 struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
262 struct tegra_se *se = ctx->se;
263 unsigned int cmdlen;
264 int ret;
265
266 rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
267 &rctx->datbuf.addr, GFP_KERNEL);
268 if (!rctx->datbuf.buf)
269 return -ENOMEM;
270
271 rctx->datbuf.size = SE_AES_BUFLEN;
272 rctx->iv = (u32 *)req->iv;
273 rctx->len = req->cryptlen;
274
275 /* Pad input to AES Block size */
276 if (ctx->alg != SE_ALG_XTS) {
277 if (rctx->len % AES_BLOCK_SIZE)
278 rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
279 }
280
281 scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
282
283 /* Prepare the command and submit for execution */
284 cmdlen = tegra_aes_prep_cmd(ctx, rctx);
285 ret = tegra_se_host1x_submit(se, cmdlen);
286
287 /* Copy the result */
288 tegra_aes_update_iv(req, ctx);
289 scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
290
291 /* Free the buffer */
292 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
293 rctx->datbuf.buf, rctx->datbuf.addr);
294
295 crypto_finalize_skcipher_request(se->engine, req, ret);
296
297 return 0;
298 }
299
tegra_aes_cra_init(struct crypto_skcipher * tfm)300 static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
301 {
302 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
303 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
304 struct tegra_se_alg *se_alg;
305 const char *algname;
306 int ret;
307
308 se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
309
310 crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
311
312 ctx->ivsize = crypto_skcipher_ivsize(tfm);
313 ctx->se = se_alg->se_dev;
314 ctx->key1_id = 0;
315 ctx->key2_id = 0;
316
317 algname = crypto_tfm_alg_name(&tfm->base);
318 ret = se_algname_to_algid(algname);
319 if (ret < 0) {
320 dev_err(ctx->se->dev, "invalid algorithm\n");
321 return ret;
322 }
323
324 ctx->alg = ret;
325
326 return 0;
327 }
328
tegra_aes_cra_exit(struct crypto_skcipher * tfm)329 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
330 {
331 struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
332
333 if (ctx->key1_id)
334 tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
335
336 if (ctx->key2_id)
337 tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
338 }
339
tegra_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)340 static int tegra_aes_setkey(struct crypto_skcipher *tfm,
341 const u8 *key, u32 keylen)
342 {
343 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
344
345 if (aes_check_keylen(keylen)) {
346 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
347 return -EINVAL;
348 }
349
350 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
351 }
352
tegra_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)353 static int tegra_xts_setkey(struct crypto_skcipher *tfm,
354 const u8 *key, u32 keylen)
355 {
356 struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
357 u32 len = keylen / 2;
358 int ret;
359
360 ret = xts_verify_key(tfm, key, keylen);
361 if (ret || aes_check_keylen(len)) {
362 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
363 return -EINVAL;
364 }
365
366 ret = tegra_key_submit(ctx->se, key, len,
367 ctx->alg, &ctx->key1_id);
368 if (ret)
369 return ret;
370
371 return tegra_key_submit(ctx->se, key + len, len,
372 ctx->alg, &ctx->key2_id);
373
374 return 0;
375 }
376
tegra_aes_kac_manifest(u32 user,u32 alg,u32 keylen)377 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
378 {
379 int manifest;
380
381 manifest = SE_KAC_USER_NS;
382
383 switch (alg) {
384 case SE_ALG_CBC:
385 case SE_ALG_ECB:
386 case SE_ALG_CTR:
387 manifest |= SE_KAC_ENC;
388 break;
389 case SE_ALG_XTS:
390 manifest |= SE_KAC_XTS;
391 break;
392 case SE_ALG_GCM:
393 manifest |= SE_KAC_GCM;
394 break;
395 case SE_ALG_CMAC:
396 manifest |= SE_KAC_CMAC;
397 break;
398 case SE_ALG_CBC_MAC:
399 manifest |= SE_KAC_ENC;
400 break;
401 default:
402 return -EINVAL;
403 }
404
405 switch (keylen) {
406 case AES_KEYSIZE_128:
407 manifest |= SE_KAC_SIZE_128;
408 break;
409 case AES_KEYSIZE_192:
410 manifest |= SE_KAC_SIZE_192;
411 break;
412 case AES_KEYSIZE_256:
413 manifest |= SE_KAC_SIZE_256;
414 break;
415 default:
416 return -EINVAL;
417 }
418
419 return manifest;
420 }
421
tegra_aes_crypt(struct skcipher_request * req,bool encrypt)422 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
423
424 {
425 struct crypto_skcipher *tfm;
426 struct tegra_aes_ctx *ctx;
427 struct tegra_aes_reqctx *rctx;
428
429 tfm = crypto_skcipher_reqtfm(req);
430 ctx = crypto_skcipher_ctx(tfm);
431 rctx = skcipher_request_ctx(req);
432
433 if (ctx->alg != SE_ALG_XTS) {
434 if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
435 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
436 return -EINVAL;
437 }
438 } else if (req->cryptlen < XTS_BLOCK_SIZE) {
439 dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
440 return -EINVAL;
441 }
442
443 if (!req->cryptlen)
444 return 0;
445
446 rctx->encrypt = encrypt;
447 rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
448 rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
449 rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
450
451 if (ctx->key2_id)
452 rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
453
454 return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
455 }
456
tegra_aes_encrypt(struct skcipher_request * req)457 static int tegra_aes_encrypt(struct skcipher_request *req)
458 {
459 return tegra_aes_crypt(req, true);
460 }
461
tegra_aes_decrypt(struct skcipher_request * req)462 static int tegra_aes_decrypt(struct skcipher_request *req)
463 {
464 return tegra_aes_crypt(req, false);
465 }
466
467 static struct tegra_se_alg tegra_aes_algs[] = {
468 {
469 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
470 .alg.skcipher.base = {
471 .init = tegra_aes_cra_init,
472 .exit = tegra_aes_cra_exit,
473 .setkey = tegra_aes_setkey,
474 .encrypt = tegra_aes_encrypt,
475 .decrypt = tegra_aes_decrypt,
476 .min_keysize = AES_MIN_KEY_SIZE,
477 .max_keysize = AES_MAX_KEY_SIZE,
478 .ivsize = AES_BLOCK_SIZE,
479 .base = {
480 .cra_name = "cbc(aes)",
481 .cra_driver_name = "cbc-aes-tegra",
482 .cra_priority = 500,
483 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
484 .cra_blocksize = AES_BLOCK_SIZE,
485 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
486 .cra_alignmask = 0xf,
487 .cra_module = THIS_MODULE,
488 },
489 }
490 }, {
491 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
492 .alg.skcipher.base = {
493 .init = tegra_aes_cra_init,
494 .exit = tegra_aes_cra_exit,
495 .setkey = tegra_aes_setkey,
496 .encrypt = tegra_aes_encrypt,
497 .decrypt = tegra_aes_decrypt,
498 .min_keysize = AES_MIN_KEY_SIZE,
499 .max_keysize = AES_MAX_KEY_SIZE,
500 .base = {
501 .cra_name = "ecb(aes)",
502 .cra_driver_name = "ecb-aes-tegra",
503 .cra_priority = 500,
504 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
505 .cra_blocksize = AES_BLOCK_SIZE,
506 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
507 .cra_alignmask = 0xf,
508 .cra_module = THIS_MODULE,
509 },
510 }
511 }, {
512 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
513 .alg.skcipher.base = {
514 .init = tegra_aes_cra_init,
515 .exit = tegra_aes_cra_exit,
516 .setkey = tegra_aes_setkey,
517 .encrypt = tegra_aes_encrypt,
518 .decrypt = tegra_aes_decrypt,
519 .min_keysize = AES_MIN_KEY_SIZE,
520 .max_keysize = AES_MAX_KEY_SIZE,
521 .ivsize = AES_BLOCK_SIZE,
522 .base = {
523 .cra_name = "ctr(aes)",
524 .cra_driver_name = "ctr-aes-tegra",
525 .cra_priority = 500,
526 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
527 .cra_blocksize = 1,
528 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
529 .cra_alignmask = 0xf,
530 .cra_module = THIS_MODULE,
531 },
532 }
533 }, {
534 .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
535 .alg.skcipher.base = {
536 .init = tegra_aes_cra_init,
537 .exit = tegra_aes_cra_exit,
538 .setkey = tegra_xts_setkey,
539 .encrypt = tegra_aes_encrypt,
540 .decrypt = tegra_aes_decrypt,
541 .min_keysize = 2 * AES_MIN_KEY_SIZE,
542 .max_keysize = 2 * AES_MAX_KEY_SIZE,
543 .ivsize = AES_BLOCK_SIZE,
544 .base = {
545 .cra_name = "xts(aes)",
546 .cra_driver_name = "xts-aes-tegra",
547 .cra_priority = 500,
548 .cra_blocksize = AES_BLOCK_SIZE,
549 .cra_ctxsize = sizeof(struct tegra_aes_ctx),
550 .cra_alignmask = (__alignof__(u64) - 1),
551 .cra_module = THIS_MODULE,
552 },
553 }
554 },
555 };
556
tegra_gmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)557 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
558 struct tegra_aead_reqctx *rctx)
559 {
560 unsigned int data_count, res_bits, i = 0;
561 struct tegra_se *se = ctx->se;
562 u32 *cpuvaddr = se->cmdbuf->addr;
563
564 data_count = (rctx->assoclen / AES_BLOCK_SIZE);
565 res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
566
567 /*
568 * Hardware processes data_count + 1 blocks.
569 * Reduce 1 block if there is no residue
570 */
571 if (!res_bits)
572 data_count--;
573
574 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
575 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
576 SE_LAST_BLOCK_RES_BITS(res_bits);
577
578 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
579 cpuvaddr[i++] = rctx->config;
580 cpuvaddr[i++] = rctx->crypto_config;
581 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
582 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
583 SE_ADDR_HI_SZ(rctx->assoclen);
584
585 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
586 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
587 SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
588 SE_AES_OP_START;
589
590 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
591 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
592 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
593
594 return i;
595 }
596
tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)597 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
598 struct tegra_aead_reqctx *rctx)
599 {
600 unsigned int data_count, res_bits, i = 0, j;
601 struct tegra_se *se = ctx->se;
602 u32 *cpuvaddr = se->cmdbuf->addr, op;
603
604 data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
605 res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
606 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
607 SE_AES_OP_LASTBUF | SE_AES_OP_START;
608
609 /*
610 * If there is no assoc data,
611 * this will be the init command
612 */
613 if (!rctx->assoclen)
614 op |= SE_AES_OP_INIT;
615
616 /*
617 * Hardware processes data_count + 1 blocks.
618 * Reduce 1 block if there is no residue
619 */
620 if (!res_bits)
621 data_count--;
622
623 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
624 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
625 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
626 cpuvaddr[i++] = rctx->iv[j];
627
628 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
629 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
630 SE_LAST_BLOCK_RES_BITS(res_bits);
631
632 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
633 cpuvaddr[i++] = rctx->config;
634 cpuvaddr[i++] = rctx->crypto_config;
635
636 /* Source Address */
637 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
638 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
639 SE_ADDR_HI_SZ(rctx->cryptlen);
640
641 /* Destination Address */
642 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
643 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
644 SE_ADDR_HI_SZ(rctx->cryptlen);
645
646 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
647 cpuvaddr[i++] = op;
648
649 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
650 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
651 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
652
653 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
654 return i;
655 }
656
tegra_gcm_prep_final_cmd(struct tegra_se * se,u32 * cpuvaddr,struct tegra_aead_reqctx * rctx)657 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
658 struct tegra_aead_reqctx *rctx)
659 {
660 unsigned int i = 0, j;
661 u32 op;
662
663 op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
664 SE_AES_OP_LASTBUF | SE_AES_OP_START;
665
666 /*
667 * Set init for zero sized vector
668 */
669 if (!rctx->assoclen && !rctx->cryptlen)
670 op |= SE_AES_OP_INIT;
671
672 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
673 cpuvaddr[i++] = rctx->assoclen * 8;
674 cpuvaddr[i++] = 0;
675
676 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
677 cpuvaddr[i++] = rctx->cryptlen * 8;
678 cpuvaddr[i++] = 0;
679
680 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
681 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
682 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
683 cpuvaddr[i++] = rctx->iv[j];
684
685 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
686 cpuvaddr[i++] = rctx->config;
687 cpuvaddr[i++] = rctx->crypto_config;
688 cpuvaddr[i++] = 0;
689 cpuvaddr[i++] = 0;
690
691 /* Destination Address */
692 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
693 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
694 SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
695
696 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
697 cpuvaddr[i++] = op;
698
699 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
700 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
701 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
702
703 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
704
705 return i;
706 }
707
tegra_gcm_do_gmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)708 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
709 {
710 struct tegra_se *se = ctx->se;
711 unsigned int cmdlen;
712
713 scatterwalk_map_and_copy(rctx->inbuf.buf,
714 rctx->src_sg, 0, rctx->assoclen, 0);
715
716 rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
717 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
718 SE_AES_KEY_INDEX(ctx->key_id);
719
720 cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
721
722 return tegra_se_host1x_submit(se, cmdlen);
723 }
724
tegra_gcm_do_crypt(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)725 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
726 {
727 struct tegra_se *se = ctx->se;
728 int cmdlen, ret;
729
730 scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
731 rctx->assoclen, rctx->cryptlen, 0);
732
733 rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
734 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
735 SE_AES_KEY_INDEX(ctx->key_id);
736
737 /* Prepare command and submit */
738 cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
739 ret = tegra_se_host1x_submit(se, cmdlen);
740 if (ret)
741 return ret;
742
743 /* Copy the result */
744 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
745 rctx->assoclen, rctx->cryptlen, 1);
746
747 return 0;
748 }
749
tegra_gcm_do_final(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)750 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
751 {
752 struct tegra_se *se = ctx->se;
753 u32 *cpuvaddr = se->cmdbuf->addr;
754 int cmdlen, ret, offset;
755
756 rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
757 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
758 SE_AES_KEY_INDEX(ctx->key_id);
759
760 /* Prepare command and submit */
761 cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
762 ret = tegra_se_host1x_submit(se, cmdlen);
763 if (ret)
764 return ret;
765
766 if (rctx->encrypt) {
767 /* Copy the result */
768 offset = rctx->assoclen + rctx->cryptlen;
769 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
770 offset, rctx->authsize, 1);
771 }
772
773 return 0;
774 }
775
tegra_gcm_do_verify(struct tegra_se * se,struct tegra_aead_reqctx * rctx)776 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
777 {
778 unsigned int offset;
779 u8 mac[16];
780
781 offset = rctx->assoclen + rctx->cryptlen;
782 scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
783
784 if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
785 return -EBADMSG;
786
787 return 0;
788 }
789
tegra_ccm_check_iv(const u8 * iv)790 static inline int tegra_ccm_check_iv(const u8 *iv)
791 {
792 /* iv[0] gives value of q-1
793 * 2 <= q <= 8 as per NIST 800-38C notation
794 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
795 */
796 if (iv[0] < 1 || iv[0] > 7) {
797 pr_debug("ccm_check_iv failed %d\n", iv[0]);
798 return -EINVAL;
799 }
800
801 return 0;
802 }
803
tegra_cbcmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)804 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
805 struct tegra_aead_reqctx *rctx)
806 {
807 unsigned int data_count, i = 0;
808 struct tegra_se *se = ctx->se;
809 u32 *cpuvaddr = se->cmdbuf->addr;
810
811 data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
812
813 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
814 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
815
816 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
817 cpuvaddr[i++] = rctx->config;
818 cpuvaddr[i++] = rctx->crypto_config;
819
820 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
821 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
822 SE_ADDR_HI_SZ(rctx->inbuf.size);
823
824 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
825 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
826 SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
827
828 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
829 cpuvaddr[i++] = SE_AES_OP_WRSTALL |
830 SE_AES_OP_LASTBUF | SE_AES_OP_START;
831
832 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
833 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
834 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
835
836 return i;
837 }
838
tegra_ctr_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)839 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
840 struct tegra_aead_reqctx *rctx)
841 {
842 unsigned int i = 0, j;
843 struct tegra_se *se = ctx->se;
844 u32 *cpuvaddr = se->cmdbuf->addr;
845
846 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
847 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
848 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
849 cpuvaddr[i++] = rctx->iv[j];
850
851 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
852 cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
853 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
854 cpuvaddr[i++] = rctx->config;
855 cpuvaddr[i++] = rctx->crypto_config;
856
857 /* Source address setting */
858 cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
859 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
860 SE_ADDR_HI_SZ(rctx->inbuf.size);
861
862 /* Destination address setting */
863 cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
864 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
865 SE_ADDR_HI_SZ(rctx->inbuf.size);
866
867 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
868 cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
869 SE_AES_OP_START;
870
871 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
872 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
873 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
874
875 dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
876 rctx->config, rctx->crypto_config);
877
878 return i;
879 }
880
tegra_ccm_do_cbcmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)881 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
882 {
883 struct tegra_se *se = ctx->se;
884 int cmdlen;
885
886 rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
887 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
888 rctx->encrypt) |
889 SE_AES_KEY_INDEX(ctx->key_id);
890
891 /* Prepare command and submit */
892 cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
893
894 return tegra_se_host1x_submit(se, cmdlen);
895 }
896
tegra_ccm_set_msg_len(u8 * block,unsigned int msglen,int csize)897 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
898 {
899 __be32 data;
900
901 memset(block, 0, csize);
902 block += csize;
903
904 if (csize >= 4)
905 csize = 4;
906 else if (msglen > (1 << (8 * csize)))
907 return -EOVERFLOW;
908
909 data = cpu_to_be32(msglen);
910 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
911
912 return 0;
913 }
914
tegra_ccm_format_nonce(struct tegra_aead_reqctx * rctx,u8 * nonce)915 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
916 {
917 unsigned int q, t;
918 u8 *q_ptr, *iv = (u8 *)rctx->iv;
919
920 memcpy(nonce, rctx->iv, 16);
921
922 /*** 1. Prepare Flags Octet ***/
923
924 /* Encode t (mac length) */
925 t = rctx->authsize;
926 nonce[0] |= (((t - 2) / 2) << 3);
927
928 /* Adata */
929 if (rctx->assoclen)
930 nonce[0] |= (1 << 6);
931
932 /*** Encode Q - message length ***/
933 q = iv[0] + 1;
934 q_ptr = nonce + 16 - q;
935
936 return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
937 }
938
tegra_ccm_format_adata(u8 * adata,unsigned int a)939 static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
940 {
941 int len = 0;
942
943 /* add control info for associated data
944 * RFC 3610 and NIST Special Publication 800-38C
945 */
946 if (a < 65280) {
947 *(__be16 *)adata = cpu_to_be16(a);
948 len = 2;
949 } else {
950 *(__be16 *)adata = cpu_to_be16(0xfffe);
951 *(__be32 *)&adata[2] = cpu_to_be32(a);
952 len = 6;
953 }
954
955 return len;
956 }
957
tegra_ccm_add_padding(u8 * buf,unsigned int len)958 static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
959 {
960 unsigned int padlen = 16 - (len % 16);
961 u8 padding[16] = {0};
962
963 if (padlen == 16)
964 return 0;
965
966 memcpy(buf, padding, padlen);
967
968 return padlen;
969 }
970
tegra_ccm_format_blocks(struct tegra_aead_reqctx * rctx)971 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
972 {
973 unsigned int alen = 0, offset = 0;
974 u8 nonce[16], adata[16];
975 int ret;
976
977 ret = tegra_ccm_format_nonce(rctx, nonce);
978 if (ret)
979 return ret;
980
981 memcpy(rctx->inbuf.buf, nonce, 16);
982 offset = 16;
983
984 if (rctx->assoclen) {
985 alen = tegra_ccm_format_adata(adata, rctx->assoclen);
986 memcpy(rctx->inbuf.buf + offset, adata, alen);
987 offset += alen;
988
989 scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
990 rctx->src_sg, 0, rctx->assoclen, 0);
991
992 offset += rctx->assoclen;
993 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
994 rctx->assoclen + alen);
995 }
996
997 return offset;
998 }
999
tegra_ccm_mac_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1000 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1001 {
1002 u32 result[16];
1003 int i, ret;
1004
1005 /* Read and clear Result */
1006 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1007 result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1008
1009 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1010 writel(0, se->base + se->hw->regs->result + (i * 4));
1011
1012 if (rctx->encrypt) {
1013 memcpy(rctx->authdata, result, rctx->authsize);
1014 } else {
1015 ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
1016 if (ret)
1017 return -EBADMSG;
1018 }
1019
1020 return 0;
1021 }
1022
tegra_ccm_ctr_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1023 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1024 {
1025 /* Copy result */
1026 scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
1027 rctx->assoclen, rctx->cryptlen, 1);
1028
1029 if (rctx->encrypt)
1030 scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
1031 rctx->assoclen + rctx->cryptlen,
1032 rctx->authsize, 1);
1033 else
1034 memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
1035
1036 return 0;
1037 }
1038
tegra_ccm_compute_auth(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1039 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1040 {
1041 struct tegra_se *se = ctx->se;
1042 struct scatterlist *sg;
1043 int offset, ret;
1044
1045 offset = tegra_ccm_format_blocks(rctx);
1046 if (offset < 0)
1047 return -EINVAL;
1048
1049 /* Copy plain text to the buffer */
1050 sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
1051
1052 scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1053 sg, rctx->assoclen,
1054 rctx->cryptlen, 0);
1055 offset += rctx->cryptlen;
1056 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1057
1058 rctx->inbuf.size = offset;
1059
1060 ret = tegra_ccm_do_cbcmac(ctx, rctx);
1061 if (ret)
1062 return ret;
1063
1064 return tegra_ccm_mac_result(se, rctx);
1065 }
1066
tegra_ccm_do_ctr(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1067 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1068 {
1069 struct tegra_se *se = ctx->se;
1070 unsigned int cmdlen, offset = 0;
1071 struct scatterlist *sg = rctx->src_sg;
1072 int ret;
1073
1074 rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
1075 rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
1076 SE_AES_KEY_INDEX(ctx->key_id);
1077
1078 /* Copy authdata in the top of buffer for encryption/decryption */
1079 if (rctx->encrypt)
1080 memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
1081 else
1082 scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
1083 rctx->assoclen + rctx->cryptlen,
1084 rctx->authsize, 0);
1085
1086 offset += rctx->authsize;
1087 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
1088
1089 /* If there is no cryptlen, proceed to submit the task */
1090 if (rctx->cryptlen) {
1091 scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
1092 rctx->assoclen, rctx->cryptlen, 0);
1093 offset += rctx->cryptlen;
1094 offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1095 }
1096
1097 rctx->inbuf.size = offset;
1098
1099 /* Prepare command and submit */
1100 cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
1101 ret = tegra_se_host1x_submit(se, cmdlen);
1102 if (ret)
1103 return ret;
1104
1105 return tegra_ccm_ctr_result(se, rctx);
1106 }
1107
tegra_ccm_crypt_init(struct aead_request * req,struct tegra_se * se,struct tegra_aead_reqctx * rctx)1108 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
1109 struct tegra_aead_reqctx *rctx)
1110 {
1111 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1112 u8 *iv = (u8 *)rctx->iv;
1113 int ret, i;
1114
1115 rctx->src_sg = req->src;
1116 rctx->dst_sg = req->dst;
1117 rctx->assoclen = req->assoclen;
1118 rctx->authsize = crypto_aead_authsize(tfm);
1119
1120 memcpy(iv, req->iv, 16);
1121
1122 ret = tegra_ccm_check_iv(iv);
1123 if (ret)
1124 return ret;
1125
1126 /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1127 * zero to encrypt auth tag.
1128 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1129 */
1130 memset(iv + 15 - iv[0], 0, iv[0] + 1);
1131
1132 /* Clear any previous result */
1133 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1134 writel(0, se->base + se->hw->regs->result + (i * 4));
1135
1136 return 0;
1137 }
1138
tegra_ccm_do_one_req(struct crypto_engine * engine,void * areq)1139 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
1140 {
1141 struct aead_request *req = container_of(areq, struct aead_request, base);
1142 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1143 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1144 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1145 struct tegra_se *se = ctx->se;
1146 int ret;
1147
1148 /* Allocate buffers required */
1149 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1150 &rctx->inbuf.addr, GFP_KERNEL);
1151 if (!rctx->inbuf.buf)
1152 return -ENOMEM;
1153
1154 rctx->inbuf.size = SE_AES_BUFLEN;
1155
1156 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1157 &rctx->outbuf.addr, GFP_KERNEL);
1158 if (!rctx->outbuf.buf) {
1159 ret = -ENOMEM;
1160 goto outbuf_err;
1161 }
1162
1163 rctx->outbuf.size = SE_AES_BUFLEN;
1164
1165 ret = tegra_ccm_crypt_init(req, se, rctx);
1166 if (ret)
1167 goto out;
1168
1169 if (rctx->encrypt) {
1170 rctx->cryptlen = req->cryptlen;
1171
1172 /* CBC MAC Operation */
1173 ret = tegra_ccm_compute_auth(ctx, rctx);
1174 if (ret)
1175 goto out;
1176
1177 /* CTR operation */
1178 ret = tegra_ccm_do_ctr(ctx, rctx);
1179 if (ret)
1180 goto out;
1181 } else {
1182 rctx->cryptlen = req->cryptlen - ctx->authsize;
1183 if (ret)
1184 goto out;
1185
1186 /* CTR operation */
1187 ret = tegra_ccm_do_ctr(ctx, rctx);
1188 if (ret)
1189 goto out;
1190
1191 /* CBC MAC Operation */
1192 ret = tegra_ccm_compute_auth(ctx, rctx);
1193 if (ret)
1194 goto out;
1195 }
1196
1197 out:
1198 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1199 rctx->outbuf.buf, rctx->outbuf.addr);
1200
1201 outbuf_err:
1202 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1203 rctx->inbuf.buf, rctx->inbuf.addr);
1204
1205 crypto_finalize_aead_request(ctx->se->engine, req, ret);
1206
1207 return 0;
1208 }
1209
tegra_gcm_do_one_req(struct crypto_engine * engine,void * areq)1210 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
1211 {
1212 struct aead_request *req = container_of(areq, struct aead_request, base);
1213 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1215 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1216 int ret;
1217
1218 /* Allocate buffers required */
1219 rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1220 &rctx->inbuf.addr, GFP_KERNEL);
1221 if (!rctx->inbuf.buf)
1222 return -ENOMEM;
1223
1224 rctx->inbuf.size = SE_AES_BUFLEN;
1225
1226 rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
1227 &rctx->outbuf.addr, GFP_KERNEL);
1228 if (!rctx->outbuf.buf) {
1229 ret = -ENOMEM;
1230 goto outbuf_err;
1231 }
1232
1233 rctx->outbuf.size = SE_AES_BUFLEN;
1234
1235 rctx->src_sg = req->src;
1236 rctx->dst_sg = req->dst;
1237 rctx->assoclen = req->assoclen;
1238 rctx->authsize = crypto_aead_authsize(tfm);
1239
1240 if (rctx->encrypt)
1241 rctx->cryptlen = req->cryptlen;
1242 else
1243 rctx->cryptlen = req->cryptlen - ctx->authsize;
1244
1245 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
1246 rctx->iv[3] = (1 << 24);
1247
1248 /* If there is associated data perform GMAC operation */
1249 if (rctx->assoclen) {
1250 ret = tegra_gcm_do_gmac(ctx, rctx);
1251 if (ret)
1252 goto out;
1253 }
1254
1255 /* GCM Encryption/Decryption operation */
1256 if (rctx->cryptlen) {
1257 ret = tegra_gcm_do_crypt(ctx, rctx);
1258 if (ret)
1259 goto out;
1260 }
1261
1262 /* GCM_FINAL operation */
1263 ret = tegra_gcm_do_final(ctx, rctx);
1264 if (ret)
1265 goto out;
1266
1267 if (!rctx->encrypt)
1268 ret = tegra_gcm_do_verify(ctx->se, rctx);
1269
1270 out:
1271 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1272 rctx->outbuf.buf, rctx->outbuf.addr);
1273
1274 outbuf_err:
1275 dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
1276 rctx->inbuf.buf, rctx->inbuf.addr);
1277
1278 /* Finalize the request if there are no errors */
1279 crypto_finalize_aead_request(ctx->se->engine, req, ret);
1280
1281 return 0;
1282 }
1283
tegra_aead_cra_init(struct crypto_aead * tfm)1284 static int tegra_aead_cra_init(struct crypto_aead *tfm)
1285 {
1286 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1287 struct aead_alg *alg = crypto_aead_alg(tfm);
1288 struct tegra_se_alg *se_alg;
1289 const char *algname;
1290 int ret;
1291
1292 algname = crypto_tfm_alg_name(&tfm->base);
1293
1294 se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
1295
1296 crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
1297
1298 ctx->se = se_alg->se_dev;
1299 ctx->key_id = 0;
1300
1301 ret = se_algname_to_algid(algname);
1302 if (ret < 0) {
1303 dev_err(ctx->se->dev, "invalid algorithm\n");
1304 return ret;
1305 }
1306
1307 ctx->alg = ret;
1308
1309 return 0;
1310 }
1311
tegra_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1312 static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1313 {
1314 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1315
1316 switch (authsize) {
1317 case 4:
1318 case 6:
1319 case 8:
1320 case 10:
1321 case 12:
1322 case 14:
1323 case 16:
1324 break;
1325 default:
1326 return -EINVAL;
1327 }
1328
1329 ctx->authsize = authsize;
1330
1331 return 0;
1332 }
1333
tegra_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1334 static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1335 {
1336 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1337 int ret;
1338
1339 ret = crypto_gcm_check_authsize(authsize);
1340 if (ret)
1341 return ret;
1342
1343 ctx->authsize = authsize;
1344
1345 return 0;
1346 }
1347
tegra_aead_cra_exit(struct crypto_aead * tfm)1348 static void tegra_aead_cra_exit(struct crypto_aead *tfm)
1349 {
1350 struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
1351
1352 if (ctx->key_id)
1353 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1354 }
1355
tegra_aead_crypt(struct aead_request * req,bool encrypt)1356 static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
1357 {
1358 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1359 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1360 struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1361
1362 rctx->encrypt = encrypt;
1363
1364 return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
1365 }
1366
tegra_aead_encrypt(struct aead_request * req)1367 static int tegra_aead_encrypt(struct aead_request *req)
1368 {
1369 return tegra_aead_crypt(req, true);
1370 }
1371
tegra_aead_decrypt(struct aead_request * req)1372 static int tegra_aead_decrypt(struct aead_request *req)
1373 {
1374 return tegra_aead_crypt(req, false);
1375 }
1376
tegra_aead_setkey(struct crypto_aead * tfm,const u8 * key,u32 keylen)1377 static int tegra_aead_setkey(struct crypto_aead *tfm,
1378 const u8 *key, u32 keylen)
1379 {
1380 struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1381
1382 if (aes_check_keylen(keylen)) {
1383 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1384 return -EINVAL;
1385 }
1386
1387 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1388 }
1389
tegra_cmac_prep_cmd(struct tegra_cmac_ctx * ctx,struct tegra_cmac_reqctx * rctx)1390 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
1391 struct tegra_cmac_reqctx *rctx)
1392 {
1393 unsigned int data_count, res_bits = 0, i = 0, j;
1394 struct tegra_se *se = ctx->se;
1395 u32 *cpuvaddr = se->cmdbuf->addr, op;
1396
1397 data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
1398
1399 op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
1400
1401 if (!(rctx->task & SHA_UPDATE)) {
1402 op |= SE_AES_OP_FINAL;
1403 res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
1404 }
1405
1406 if (!res_bits && data_count)
1407 data_count--;
1408
1409 if (rctx->task & SHA_FIRST) {
1410 rctx->task &= ~SHA_FIRST;
1411
1412 cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
1413 cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
1414 /* Load 0 IV */
1415 for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
1416 cpuvaddr[i++] = 0;
1417 }
1418
1419 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
1420 cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
1421 SE_LAST_BLOCK_RES_BITS(res_bits);
1422
1423 cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
1424 cpuvaddr[i++] = rctx->config;
1425 cpuvaddr[i++] = rctx->crypto_config;
1426
1427 /* Source Address */
1428 cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
1429 cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
1430 SE_ADDR_HI_SZ(rctx->datbuf.size);
1431 cpuvaddr[i++] = 0;
1432 cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
1433
1434 cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
1435 cpuvaddr[i++] = op;
1436
1437 cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1438 cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
1439 host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
1440
1441 return i;
1442 }
1443
tegra_cmac_copy_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1444 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1445 {
1446 int i;
1447
1448 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1449 rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1450 }
1451
tegra_cmac_paste_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1452 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1453 {
1454 int i;
1455
1456 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1457 writel(rctx->result[i],
1458 se->base + se->hw->regs->result + (i * 4));
1459 }
1460
tegra_cmac_do_update(struct ahash_request * req)1461 static int tegra_cmac_do_update(struct ahash_request *req)
1462 {
1463 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1464 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1465 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1466 struct tegra_se *se = ctx->se;
1467 unsigned int nblks, nresidue, cmdlen;
1468 int ret;
1469
1470 if (!req->nbytes)
1471 return 0;
1472
1473 nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
1474 nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
1475
1476 /*
1477 * Reserve the last block as residue during final() to process.
1478 */
1479 if (!nresidue && nblks) {
1480 nresidue += rctx->blk_size;
1481 nblks--;
1482 }
1483
1484 rctx->src_sg = req->src;
1485 rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
1486 rctx->total_len += rctx->datbuf.size;
1487 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1488 rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
1489
1490 /*
1491 * Keep one block and residue bytes in residue and
1492 * return. The bytes will be processed in final()
1493 */
1494 if (nblks < 1) {
1495 scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
1496 rctx->src_sg, 0, req->nbytes, 0);
1497
1498 rctx->residue.size += req->nbytes;
1499 return 0;
1500 }
1501
1502 /* Copy the previous residue first */
1503 if (rctx->residue.size)
1504 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1505
1506 scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
1507 rctx->src_sg, 0, req->nbytes - nresidue, 0);
1508
1509 scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
1510 req->nbytes - nresidue, nresidue, 0);
1511
1512 /* Update residue value with the residue after current block */
1513 rctx->residue.size = nresidue;
1514
1515 /*
1516 * If this is not the first 'update' call, paste the previous copied
1517 * intermediate results to the registers so that it gets picked up.
1518 * This is to support the import/export functionality.
1519 */
1520 if (!(rctx->task & SHA_FIRST))
1521 tegra_cmac_paste_result(ctx->se, rctx);
1522
1523 cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1524
1525 ret = tegra_se_host1x_submit(se, cmdlen);
1526 /*
1527 * If this is not the final update, copy the intermediate results
1528 * from the registers so that it can be used in the next 'update'
1529 * call. This is to support the import/export functionality.
1530 */
1531 if (!(rctx->task & SHA_FINAL))
1532 tegra_cmac_copy_result(ctx->se, rctx);
1533
1534 return ret;
1535 }
1536
tegra_cmac_do_final(struct ahash_request * req)1537 static int tegra_cmac_do_final(struct ahash_request *req)
1538 {
1539 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1540 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1541 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1542 struct tegra_se *se = ctx->se;
1543 u32 *result = (u32 *)req->result;
1544 int ret = 0, i, cmdlen;
1545
1546 if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
1547 return crypto_shash_tfm_digest(ctx->fallback_tfm,
1548 rctx->datbuf.buf, 0, req->result);
1549 }
1550
1551 memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1552 rctx->datbuf.size = rctx->residue.size;
1553 rctx->total_len += rctx->residue.size;
1554 rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1555
1556 /* Prepare command and submit */
1557 cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1558 ret = tegra_se_host1x_submit(se, cmdlen);
1559 if (ret)
1560 goto out;
1561
1562 /* Read and clear Result register */
1563 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1564 result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1565
1566 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1567 writel(0, se->base + se->hw->regs->result + (i * 4));
1568
1569 out:
1570 dma_free_coherent(se->dev, SE_SHA_BUFLEN,
1571 rctx->datbuf.buf, rctx->datbuf.addr);
1572 dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
1573 rctx->residue.buf, rctx->residue.addr);
1574 return ret;
1575 }
1576
tegra_cmac_do_one_req(struct crypto_engine * engine,void * areq)1577 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
1578 {
1579 struct ahash_request *req = ahash_request_cast(areq);
1580 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1581 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1582 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1583 struct tegra_se *se = ctx->se;
1584 int ret;
1585
1586 if (rctx->task & SHA_UPDATE) {
1587 ret = tegra_cmac_do_update(req);
1588 rctx->task &= ~SHA_UPDATE;
1589 }
1590
1591 if (rctx->task & SHA_FINAL) {
1592 ret = tegra_cmac_do_final(req);
1593 rctx->task &= ~SHA_FINAL;
1594 }
1595
1596 crypto_finalize_hash_request(se->engine, req, ret);
1597
1598 return 0;
1599 }
1600
tegra_cmac_init_fallback(struct crypto_ahash * tfm,struct tegra_cmac_ctx * ctx,const char * algname)1601 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
1602 const char *algname)
1603 {
1604 unsigned int statesize;
1605
1606 ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
1607
1608 if (IS_ERR(ctx->fallback_tfm)) {
1609 dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
1610 ctx->fallback_tfm = NULL;
1611 return;
1612 }
1613
1614 statesize = crypto_shash_statesize(ctx->fallback_tfm);
1615
1616 if (statesize > sizeof(struct tegra_cmac_reqctx))
1617 crypto_ahash_set_statesize(tfm, statesize);
1618 }
1619
tegra_cmac_cra_init(struct crypto_tfm * tfm)1620 static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
1621 {
1622 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1623 struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
1624 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
1625 struct tegra_se_alg *se_alg;
1626 const char *algname;
1627 int ret;
1628
1629 algname = crypto_tfm_alg_name(tfm);
1630 se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
1631
1632 crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
1633
1634 ctx->se = se_alg->se_dev;
1635 ctx->key_id = 0;
1636
1637 ret = se_algname_to_algid(algname);
1638 if (ret < 0) {
1639 dev_err(ctx->se->dev, "invalid algorithm\n");
1640 return ret;
1641 }
1642
1643 ctx->alg = ret;
1644
1645 tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
1646
1647 return 0;
1648 }
1649
tegra_cmac_cra_exit(struct crypto_tfm * tfm)1650 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
1651 {
1652 struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1653
1654 if (ctx->fallback_tfm)
1655 crypto_free_shash(ctx->fallback_tfm);
1656
1657 tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1658 }
1659
tegra_cmac_init(struct ahash_request * req)1660 static int tegra_cmac_init(struct ahash_request *req)
1661 {
1662 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1663 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1664 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1665 struct tegra_se *se = ctx->se;
1666 int i;
1667
1668 rctx->total_len = 0;
1669 rctx->datbuf.size = 0;
1670 rctx->residue.size = 0;
1671 rctx->task = SHA_FIRST;
1672 rctx->blk_size = crypto_ahash_blocksize(tfm);
1673
1674 rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
1675 &rctx->residue.addr, GFP_KERNEL);
1676 if (!rctx->residue.buf)
1677 goto resbuf_fail;
1678
1679 rctx->residue.size = 0;
1680
1681 rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
1682 &rctx->datbuf.addr, GFP_KERNEL);
1683 if (!rctx->datbuf.buf)
1684 goto datbuf_fail;
1685
1686 rctx->datbuf.size = 0;
1687
1688 /* Clear any previous result */
1689 for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1690 writel(0, se->base + se->hw->regs->result + (i * 4));
1691
1692 return 0;
1693
1694 datbuf_fail:
1695 dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
1696 rctx->residue.addr);
1697 resbuf_fail:
1698 return -ENOMEM;
1699 }
1700
tegra_cmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1701 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1702 unsigned int keylen)
1703 {
1704 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1705
1706 if (aes_check_keylen(keylen)) {
1707 dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1708 return -EINVAL;
1709 }
1710
1711 if (ctx->fallback_tfm)
1712 crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
1713
1714 return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1715 }
1716
tegra_cmac_update(struct ahash_request * req)1717 static int tegra_cmac_update(struct ahash_request *req)
1718 {
1719 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1720 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1721 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1722
1723 rctx->task |= SHA_UPDATE;
1724
1725 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1726 }
1727
tegra_cmac_final(struct ahash_request * req)1728 static int tegra_cmac_final(struct ahash_request *req)
1729 {
1730 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1731 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1732 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1733
1734 rctx->task |= SHA_FINAL;
1735
1736 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1737 }
1738
tegra_cmac_finup(struct ahash_request * req)1739 static int tegra_cmac_finup(struct ahash_request *req)
1740 {
1741 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1742 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1743 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1744
1745 rctx->task |= SHA_UPDATE | SHA_FINAL;
1746
1747 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1748 }
1749
tegra_cmac_digest(struct ahash_request * req)1750 static int tegra_cmac_digest(struct ahash_request *req)
1751 {
1752 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1753 struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1754 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1755
1756 tegra_cmac_init(req);
1757 rctx->task |= SHA_UPDATE | SHA_FINAL;
1758
1759 return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1760 }
1761
tegra_cmac_export(struct ahash_request * req,void * out)1762 static int tegra_cmac_export(struct ahash_request *req, void *out)
1763 {
1764 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1765
1766 memcpy(out, rctx, sizeof(*rctx));
1767
1768 return 0;
1769 }
1770
tegra_cmac_import(struct ahash_request * req,const void * in)1771 static int tegra_cmac_import(struct ahash_request *req, const void *in)
1772 {
1773 struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1774
1775 memcpy(rctx, in, sizeof(*rctx));
1776
1777 return 0;
1778 }
1779
1780 static struct tegra_se_alg tegra_aead_algs[] = {
1781 {
1782 .alg.aead.op.do_one_request = tegra_gcm_do_one_req,
1783 .alg.aead.base = {
1784 .init = tegra_aead_cra_init,
1785 .exit = tegra_aead_cra_exit,
1786 .setkey = tegra_aead_setkey,
1787 .setauthsize = tegra_gcm_setauthsize,
1788 .encrypt = tegra_aead_encrypt,
1789 .decrypt = tegra_aead_decrypt,
1790 .maxauthsize = AES_BLOCK_SIZE,
1791 .ivsize = GCM_AES_IV_SIZE,
1792 .base = {
1793 .cra_name = "gcm(aes)",
1794 .cra_driver_name = "gcm-aes-tegra",
1795 .cra_priority = 500,
1796 .cra_blocksize = 1,
1797 .cra_ctxsize = sizeof(struct tegra_aead_ctx),
1798 .cra_alignmask = 0xf,
1799 .cra_module = THIS_MODULE,
1800 },
1801 }
1802 }, {
1803 .alg.aead.op.do_one_request = tegra_ccm_do_one_req,
1804 .alg.aead.base = {
1805 .init = tegra_aead_cra_init,
1806 .exit = tegra_aead_cra_exit,
1807 .setkey = tegra_aead_setkey,
1808 .setauthsize = tegra_ccm_setauthsize,
1809 .encrypt = tegra_aead_encrypt,
1810 .decrypt = tegra_aead_decrypt,
1811 .maxauthsize = AES_BLOCK_SIZE,
1812 .ivsize = AES_BLOCK_SIZE,
1813 .chunksize = AES_BLOCK_SIZE,
1814 .base = {
1815 .cra_name = "ccm(aes)",
1816 .cra_driver_name = "ccm-aes-tegra",
1817 .cra_priority = 500,
1818 .cra_blocksize = 1,
1819 .cra_ctxsize = sizeof(struct tegra_aead_ctx),
1820 .cra_alignmask = 0xf,
1821 .cra_module = THIS_MODULE,
1822 },
1823 }
1824 }
1825 };
1826
1827 static struct tegra_se_alg tegra_cmac_algs[] = {
1828 {
1829 .alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
1830 .alg.ahash.base = {
1831 .init = tegra_cmac_init,
1832 .setkey = tegra_cmac_setkey,
1833 .update = tegra_cmac_update,
1834 .final = tegra_cmac_final,
1835 .finup = tegra_cmac_finup,
1836 .digest = tegra_cmac_digest,
1837 .export = tegra_cmac_export,
1838 .import = tegra_cmac_import,
1839 .halg.digestsize = AES_BLOCK_SIZE,
1840 .halg.statesize = sizeof(struct tegra_cmac_reqctx),
1841 .halg.base = {
1842 .cra_name = "cmac(aes)",
1843 .cra_driver_name = "tegra-se-cmac",
1844 .cra_priority = 300,
1845 .cra_flags = CRYPTO_ALG_TYPE_AHASH,
1846 .cra_blocksize = AES_BLOCK_SIZE,
1847 .cra_ctxsize = sizeof(struct tegra_cmac_ctx),
1848 .cra_alignmask = 0,
1849 .cra_module = THIS_MODULE,
1850 .cra_init = tegra_cmac_cra_init,
1851 .cra_exit = tegra_cmac_cra_exit,
1852 }
1853 }
1854 }
1855 };
1856
tegra_init_aes(struct tegra_se * se)1857 int tegra_init_aes(struct tegra_se *se)
1858 {
1859 struct aead_engine_alg *aead_alg;
1860 struct ahash_engine_alg *ahash_alg;
1861 struct skcipher_engine_alg *sk_alg;
1862 int i, ret;
1863
1864 se->manifest = tegra_aes_kac_manifest;
1865
1866 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
1867 sk_alg = &tegra_aes_algs[i].alg.skcipher;
1868 tegra_aes_algs[i].se_dev = se;
1869
1870 ret = crypto_engine_register_skcipher(sk_alg);
1871 if (ret) {
1872 dev_err(se->dev, "failed to register %s\n",
1873 sk_alg->base.base.cra_name);
1874 goto err_aes;
1875 }
1876 }
1877
1878 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
1879 aead_alg = &tegra_aead_algs[i].alg.aead;
1880 tegra_aead_algs[i].se_dev = se;
1881
1882 ret = crypto_engine_register_aead(aead_alg);
1883 if (ret) {
1884 dev_err(se->dev, "failed to register %s\n",
1885 aead_alg->base.base.cra_name);
1886 goto err_aead;
1887 }
1888 }
1889
1890 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
1891 ahash_alg = &tegra_cmac_algs[i].alg.ahash;
1892 tegra_cmac_algs[i].se_dev = se;
1893
1894 ret = crypto_engine_register_ahash(ahash_alg);
1895 if (ret) {
1896 dev_err(se->dev, "failed to register %s\n",
1897 ahash_alg->base.halg.base.cra_name);
1898 goto err_cmac;
1899 }
1900 }
1901
1902 return 0;
1903
1904 err_cmac:
1905 while (i--)
1906 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
1907
1908 i = ARRAY_SIZE(tegra_aead_algs);
1909 err_aead:
1910 while (i--)
1911 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
1912
1913 i = ARRAY_SIZE(tegra_aes_algs);
1914 err_aes:
1915 while (i--)
1916 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
1917
1918 return ret;
1919 }
1920
tegra_deinit_aes(struct tegra_se * se)1921 void tegra_deinit_aes(struct tegra_se *se)
1922 {
1923 int i;
1924
1925 for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
1926 crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
1927
1928 for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
1929 crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
1930
1931 for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
1932 crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
1933 }
1934