xref: /linux/drivers/crypto/tegra/tegra-se-aes.c (revision e31fd36da9c41f9f664e51a35860e9f606e81ef4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4  * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 
13 #include <crypto/aead.h>
14 #include <crypto/aes.h>
15 #include <crypto/engine.h>
16 #include <crypto/gcm.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/xts.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/skcipher.h>
22 
23 #include "tegra-se.h"
24 
25 struct tegra_aes_ctx {
26 	struct tegra_se *se;
27 	u32 alg;
28 	u32 ivsize;
29 	u32 key1_id;
30 	u32 key2_id;
31 	u32 keylen;
32 	u8 key1[AES_MAX_KEY_SIZE];
33 	u8 key2[AES_MAX_KEY_SIZE];
34 };
35 
36 struct tegra_aes_reqctx {
37 	struct tegra_se_datbuf datbuf;
38 	bool encrypt;
39 	u32 config;
40 	u32 crypto_config;
41 	u32 len;
42 	u32 *iv;
43 };
44 
45 struct tegra_aead_ctx {
46 	struct tegra_se *se;
47 	unsigned int authsize;
48 	u32 alg;
49 	u32 key_id;
50 	u32 keylen;
51 	u8 key[AES_MAX_KEY_SIZE];
52 };
53 
54 struct tegra_aead_reqctx {
55 	struct tegra_se_datbuf inbuf;
56 	struct tegra_se_datbuf outbuf;
57 	struct scatterlist *src_sg;
58 	struct scatterlist *dst_sg;
59 	unsigned int assoclen;
60 	unsigned int cryptlen;
61 	unsigned int authsize;
62 	bool encrypt;
63 	u32 crypto_config;
64 	u32 config;
65 	u32 key_id;
66 	u32 iv[4];
67 	u8 authdata[16];
68 };
69 
70 struct tegra_cmac_ctx {
71 	struct tegra_se *se;
72 	unsigned int alg;
73 	u32 key_id;
74 	u32 keylen;
75 	u8 key[AES_MAX_KEY_SIZE];
76 	struct crypto_shash *fallback_tfm;
77 };
78 
79 struct tegra_cmac_reqctx {
80 	struct scatterlist *src_sg;
81 	struct tegra_se_datbuf datbuf;
82 	struct tegra_se_datbuf residue;
83 	unsigned int total_len;
84 	unsigned int blk_size;
85 	unsigned int task;
86 	u32 crypto_config;
87 	u32 config;
88 	u32 key_id;
89 	u32 *iv;
90 	u32 result[CMAC_RESULT_REG_COUNT];
91 };
92 
93 /* increment counter (128-bit int) */
94 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
95 {
96 	do {
97 		--bits;
98 		nums += counter[bits];
99 		counter[bits] = nums & 0xff;
100 		nums >>= 8;
101 	} while (bits && nums);
102 }
103 
104 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
105 {
106 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
107 	unsigned int offset;
108 
109 	offset = req->cryptlen - ctx->ivsize;
110 
111 	if (rctx->encrypt)
112 		memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
113 	else
114 		scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
115 }
116 
117 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
118 {
119 	int num;
120 
121 	if (ctx->alg == SE_ALG_CBC) {
122 		tegra_cbc_iv_copyback(req, ctx);
123 	} else if (ctx->alg == SE_ALG_CTR) {
124 		num = req->cryptlen / ctx->ivsize;
125 		if (req->cryptlen % ctx->ivsize)
126 			num++;
127 
128 		ctr_iv_inc(req->iv, ctx->ivsize, num);
129 	}
130 }
131 
132 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
133 {
134 	switch (alg) {
135 	case SE_ALG_CMAC:
136 	case SE_ALG_GMAC:
137 	case SE_ALG_GCM:
138 	case SE_ALG_GCM_FINAL:
139 		return 0;
140 	case SE_ALG_CBC:
141 		if (encrypt)
142 			return SE_CRYPTO_CFG_CBC_ENCRYPT;
143 		else
144 			return SE_CRYPTO_CFG_CBC_DECRYPT;
145 	case SE_ALG_ECB:
146 		if (encrypt)
147 			return SE_CRYPTO_CFG_ECB_ENCRYPT;
148 		else
149 			return SE_CRYPTO_CFG_ECB_DECRYPT;
150 	case SE_ALG_XTS:
151 		if (encrypt)
152 			return SE_CRYPTO_CFG_XTS_ENCRYPT;
153 		else
154 			return SE_CRYPTO_CFG_XTS_DECRYPT;
155 
156 	case SE_ALG_CTR:
157 		return SE_CRYPTO_CFG_CTR;
158 	case SE_ALG_CBC_MAC:
159 		return SE_CRYPTO_CFG_CBC_MAC;
160 
161 	default:
162 		break;
163 	}
164 
165 	return -EINVAL;
166 }
167 
168 static int tegra234_aes_cfg(u32 alg, bool encrypt)
169 {
170 	switch (alg) {
171 	case SE_ALG_CBC:
172 	case SE_ALG_ECB:
173 	case SE_ALG_XTS:
174 	case SE_ALG_CTR:
175 		if (encrypt)
176 			return SE_CFG_AES_ENCRYPT;
177 		else
178 			return SE_CFG_AES_DECRYPT;
179 
180 	case SE_ALG_GMAC:
181 		if (encrypt)
182 			return SE_CFG_GMAC_ENCRYPT;
183 		else
184 			return SE_CFG_GMAC_DECRYPT;
185 
186 	case SE_ALG_GCM:
187 		if (encrypt)
188 			return SE_CFG_GCM_ENCRYPT;
189 		else
190 			return SE_CFG_GCM_DECRYPT;
191 
192 	case SE_ALG_GCM_FINAL:
193 		if (encrypt)
194 			return SE_CFG_GCM_FINAL_ENCRYPT;
195 		else
196 			return SE_CFG_GCM_FINAL_DECRYPT;
197 
198 	case SE_ALG_CMAC:
199 		return SE_CFG_CMAC;
200 
201 	case SE_ALG_CBC_MAC:
202 		return SE_AES_ENC_ALG_AES_ENC |
203 		       SE_AES_DST_HASH_REG;
204 	}
205 	return -EINVAL;
206 }
207 
208 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
209 				       struct tegra_aes_reqctx *rctx)
210 {
211 	unsigned int data_count, res_bits, i = 0, j;
212 	struct tegra_se *se = ctx->se;
213 	u32 *cpuvaddr = se->cmdbuf->addr;
214 	dma_addr_t addr = rctx->datbuf.addr;
215 
216 	data_count = rctx->len / AES_BLOCK_SIZE;
217 	res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
218 
219 	/*
220 	 * Hardware processes data_count + 1 blocks.
221 	 * Reduce 1 block if there is no residue
222 	 */
223 	if (!res_bits)
224 		data_count--;
225 
226 	if (rctx->iv) {
227 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
228 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
229 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
230 			cpuvaddr[i++] = rctx->iv[j];
231 	}
232 
233 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
234 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
235 			SE_LAST_BLOCK_RES_BITS(res_bits);
236 
237 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
238 	cpuvaddr[i++] = rctx->config;
239 	cpuvaddr[i++] = rctx->crypto_config;
240 
241 	/* Source address setting */
242 	cpuvaddr[i++] = lower_32_bits(addr);
243 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
244 
245 	/* Destination address setting */
246 	cpuvaddr[i++] = lower_32_bits(addr);
247 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
248 			SE_ADDR_HI_SZ(rctx->len);
249 
250 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
251 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
252 			SE_AES_OP_START;
253 
254 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
255 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
256 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
257 
258 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
259 
260 	return i;
261 }
262 
263 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
264 {
265 	struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
266 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
267 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
268 	struct tegra_se *se = ctx->se;
269 	unsigned int cmdlen, key1_id, key2_id;
270 	int ret;
271 
272 	rctx->iv = (u32 *)req->iv;
273 	rctx->len = req->cryptlen;
274 	key1_id = ctx->key1_id;
275 	key2_id = ctx->key2_id;
276 
277 	/* Pad input to AES Block size */
278 	if (ctx->alg != SE_ALG_XTS) {
279 		if (rctx->len % AES_BLOCK_SIZE)
280 			rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
281 	}
282 
283 	rctx->datbuf.size = rctx->len;
284 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
285 					      &rctx->datbuf.addr, GFP_KERNEL);
286 	if (!rctx->datbuf.buf) {
287 		ret = -ENOMEM;
288 		goto out_finalize;
289 	}
290 
291 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
292 
293 	rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
294 	rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
295 
296 	if (!key1_id) {
297 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
298 						    ctx->keylen, ctx->alg, &key1_id);
299 		if (ret)
300 			goto out;
301 	}
302 
303 	rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
304 
305 	if (ctx->alg == SE_ALG_XTS) {
306 		if (!key2_id) {
307 			ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
308 							    ctx->keylen, ctx->alg, &key2_id);
309 			if (ret)
310 				goto out;
311 		}
312 
313 		rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
314 	}
315 
316 	/* Prepare the command and submit for execution */
317 	cmdlen = tegra_aes_prep_cmd(ctx, rctx);
318 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
319 
320 	/* Copy the result */
321 	tegra_aes_update_iv(req, ctx);
322 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
323 
324 out:
325 	/* Free the buffer */
326 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
327 			  rctx->datbuf.buf, rctx->datbuf.addr);
328 
329 	if (tegra_key_is_reserved(key1_id))
330 		tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
331 
332 	if (tegra_key_is_reserved(key2_id))
333 		tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
334 
335 out_finalize:
336 	crypto_finalize_skcipher_request(se->engine, req, ret);
337 
338 	return 0;
339 }
340 
341 static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
342 {
343 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
344 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
345 	struct tegra_se_alg *se_alg;
346 	const char *algname;
347 	int ret;
348 
349 	se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
350 
351 	crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
352 
353 	ctx->ivsize = crypto_skcipher_ivsize(tfm);
354 	ctx->se = se_alg->se_dev;
355 	ctx->key1_id = 0;
356 	ctx->key2_id = 0;
357 	ctx->keylen = 0;
358 
359 	algname = crypto_tfm_alg_name(&tfm->base);
360 	ret = se_algname_to_algid(algname);
361 	if (ret < 0) {
362 		dev_err(ctx->se->dev, "invalid algorithm\n");
363 		return ret;
364 	}
365 
366 	ctx->alg = ret;
367 
368 	return 0;
369 }
370 
371 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
372 {
373 	struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
374 
375 	if (ctx->key1_id)
376 		tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
377 
378 	if (ctx->key2_id)
379 		tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
380 }
381 
382 static int tegra_aes_setkey(struct crypto_skcipher *tfm,
383 			    const u8 *key, u32 keylen)
384 {
385 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
386 	int ret;
387 
388 	if (aes_check_keylen(keylen)) {
389 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
390 		return -EINVAL;
391 	}
392 
393 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
394 	if (ret) {
395 		ctx->keylen = keylen;
396 		memcpy(ctx->key1, key, keylen);
397 	}
398 
399 	return 0;
400 }
401 
402 static int tegra_xts_setkey(struct crypto_skcipher *tfm,
403 			    const u8 *key, u32 keylen)
404 {
405 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
406 	u32 len = keylen / 2;
407 	int ret;
408 
409 	ret = xts_verify_key(tfm, key, keylen);
410 	if (ret || aes_check_keylen(len)) {
411 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
412 		return -EINVAL;
413 	}
414 
415 	ret = tegra_key_submit(ctx->se, key, len,
416 			       ctx->alg, &ctx->key1_id);
417 	if (ret) {
418 		ctx->keylen = len;
419 		memcpy(ctx->key1, key, len);
420 	}
421 
422 	ret = tegra_key_submit(ctx->se, key + len, len,
423 			       ctx->alg, &ctx->key2_id);
424 	if (ret) {
425 		ctx->keylen = len;
426 		memcpy(ctx->key2, key + len, len);
427 	}
428 
429 	return 0;
430 }
431 
432 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
433 {
434 	int manifest;
435 
436 	manifest = SE_KAC_USER_NS;
437 
438 	switch (alg) {
439 	case SE_ALG_CBC:
440 	case SE_ALG_ECB:
441 	case SE_ALG_CTR:
442 		manifest |= SE_KAC_ENC;
443 		break;
444 	case SE_ALG_XTS:
445 		manifest |= SE_KAC_XTS;
446 		break;
447 	case SE_ALG_GCM:
448 		manifest |= SE_KAC_GCM;
449 		break;
450 	case SE_ALG_CMAC:
451 		manifest |= SE_KAC_CMAC;
452 		break;
453 	case SE_ALG_CBC_MAC:
454 		manifest |= SE_KAC_ENC;
455 		break;
456 	default:
457 		return -EINVAL;
458 	}
459 
460 	switch (keylen) {
461 	case AES_KEYSIZE_128:
462 		manifest |= SE_KAC_SIZE_128;
463 		break;
464 	case AES_KEYSIZE_192:
465 		manifest |= SE_KAC_SIZE_192;
466 		break;
467 	case AES_KEYSIZE_256:
468 		manifest |= SE_KAC_SIZE_256;
469 		break;
470 	default:
471 		return -EINVAL;
472 	}
473 
474 	return manifest;
475 }
476 
477 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
478 
479 {
480 	struct crypto_skcipher *tfm;
481 	struct tegra_aes_ctx *ctx;
482 	struct tegra_aes_reqctx *rctx;
483 
484 	tfm = crypto_skcipher_reqtfm(req);
485 	ctx  = crypto_skcipher_ctx(tfm);
486 	rctx = skcipher_request_ctx(req);
487 
488 	if (ctx->alg != SE_ALG_XTS) {
489 		if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
490 			dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
491 			return -EINVAL;
492 		}
493 	} else if (req->cryptlen < XTS_BLOCK_SIZE) {
494 		dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
495 		return -EINVAL;
496 	}
497 
498 	if (!req->cryptlen)
499 		return 0;
500 
501 	if (ctx->alg == SE_ALG_ECB)
502 		req->iv = NULL;
503 
504 	rctx->encrypt = encrypt;
505 
506 	return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
507 }
508 
509 static int tegra_aes_encrypt(struct skcipher_request *req)
510 {
511 	return tegra_aes_crypt(req, true);
512 }
513 
514 static int tegra_aes_decrypt(struct skcipher_request *req)
515 {
516 	return tegra_aes_crypt(req, false);
517 }
518 
519 static struct tegra_se_alg tegra_aes_algs[] = {
520 	{
521 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
522 		.alg.skcipher.base = {
523 			.init = tegra_aes_cra_init,
524 			.exit = tegra_aes_cra_exit,
525 			.setkey	= tegra_aes_setkey,
526 			.encrypt = tegra_aes_encrypt,
527 			.decrypt = tegra_aes_decrypt,
528 			.min_keysize = AES_MIN_KEY_SIZE,
529 			.max_keysize = AES_MAX_KEY_SIZE,
530 			.ivsize	= AES_BLOCK_SIZE,
531 			.base = {
532 				.cra_name = "cbc(aes)",
533 				.cra_driver_name = "cbc-aes-tegra",
534 				.cra_priority = 500,
535 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
536 				.cra_blocksize = AES_BLOCK_SIZE,
537 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
538 				.cra_alignmask = 0xf,
539 				.cra_module = THIS_MODULE,
540 			},
541 		}
542 	}, {
543 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
544 		.alg.skcipher.base = {
545 			.init = tegra_aes_cra_init,
546 			.exit = tegra_aes_cra_exit,
547 			.setkey	= tegra_aes_setkey,
548 			.encrypt = tegra_aes_encrypt,
549 			.decrypt = tegra_aes_decrypt,
550 			.min_keysize = AES_MIN_KEY_SIZE,
551 			.max_keysize = AES_MAX_KEY_SIZE,
552 			.base = {
553 				.cra_name = "ecb(aes)",
554 				.cra_driver_name = "ecb-aes-tegra",
555 				.cra_priority = 500,
556 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
557 				.cra_blocksize = AES_BLOCK_SIZE,
558 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
559 				.cra_alignmask = 0xf,
560 				.cra_module = THIS_MODULE,
561 			},
562 		}
563 	}, {
564 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
565 		.alg.skcipher.base = {
566 			.init = tegra_aes_cra_init,
567 			.exit = tegra_aes_cra_exit,
568 			.setkey = tegra_aes_setkey,
569 			.encrypt = tegra_aes_encrypt,
570 			.decrypt = tegra_aes_decrypt,
571 			.min_keysize = AES_MIN_KEY_SIZE,
572 			.max_keysize = AES_MAX_KEY_SIZE,
573 			.ivsize	= AES_BLOCK_SIZE,
574 			.base = {
575 				.cra_name = "ctr(aes)",
576 				.cra_driver_name = "ctr-aes-tegra",
577 				.cra_priority = 500,
578 				.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
579 				.cra_blocksize = 1,
580 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
581 				.cra_alignmask = 0xf,
582 				.cra_module = THIS_MODULE,
583 			},
584 		}
585 	}, {
586 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
587 		.alg.skcipher.base = {
588 			.init = tegra_aes_cra_init,
589 			.exit = tegra_aes_cra_exit,
590 			.setkey	= tegra_xts_setkey,
591 			.encrypt = tegra_aes_encrypt,
592 			.decrypt = tegra_aes_decrypt,
593 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
594 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
595 			.ivsize	= AES_BLOCK_SIZE,
596 			.base = {
597 				.cra_name = "xts(aes)",
598 				.cra_driver_name = "xts-aes-tegra",
599 				.cra_priority = 500,
600 				.cra_blocksize = AES_BLOCK_SIZE,
601 				.cra_ctxsize	   = sizeof(struct tegra_aes_ctx),
602 				.cra_alignmask	   = (__alignof__(u64) - 1),
603 				.cra_module	   = THIS_MODULE,
604 			},
605 		}
606 	},
607 };
608 
609 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
610 					struct tegra_aead_reqctx *rctx)
611 {
612 	unsigned int data_count, res_bits, i = 0;
613 	struct tegra_se *se = ctx->se;
614 	u32 *cpuvaddr = se->cmdbuf->addr;
615 
616 	data_count = (rctx->assoclen / AES_BLOCK_SIZE);
617 	res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
618 
619 	/*
620 	 * Hardware processes data_count + 1 blocks.
621 	 * Reduce 1 block if there is no residue
622 	 */
623 	if (!res_bits)
624 		data_count--;
625 
626 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
627 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
628 			SE_LAST_BLOCK_RES_BITS(res_bits);
629 
630 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
631 	cpuvaddr[i++] = rctx->config;
632 	cpuvaddr[i++] = rctx->crypto_config;
633 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
634 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
635 			SE_ADDR_HI_SZ(rctx->assoclen);
636 
637 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
638 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
639 			SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
640 			SE_AES_OP_START;
641 
642 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
643 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
644 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
645 
646 	return i;
647 }
648 
649 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
650 					     struct tegra_aead_reqctx *rctx)
651 {
652 	unsigned int data_count, res_bits, i = 0, j;
653 	struct tegra_se *se = ctx->se;
654 	u32 *cpuvaddr = se->cmdbuf->addr, op;
655 
656 	data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
657 	res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
658 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
659 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
660 
661 	/*
662 	 * If there is no assoc data,
663 	 * this will be the init command
664 	 */
665 	if (!rctx->assoclen)
666 		op |= SE_AES_OP_INIT;
667 
668 	/*
669 	 * Hardware processes data_count + 1 blocks.
670 	 * Reduce 1 block if there is no residue
671 	 */
672 	if (!res_bits)
673 		data_count--;
674 
675 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
676 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
677 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
678 		cpuvaddr[i++] = rctx->iv[j];
679 
680 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
681 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
682 			SE_LAST_BLOCK_RES_BITS(res_bits);
683 
684 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
685 	cpuvaddr[i++] = rctx->config;
686 	cpuvaddr[i++] = rctx->crypto_config;
687 
688 	/* Source Address */
689 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
690 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
691 			SE_ADDR_HI_SZ(rctx->cryptlen);
692 
693 	/* Destination Address */
694 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
695 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
696 			SE_ADDR_HI_SZ(rctx->cryptlen);
697 
698 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
699 	cpuvaddr[i++] = op;
700 
701 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
702 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
703 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
704 
705 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
706 	return i;
707 }
708 
709 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
710 				    struct tegra_aead_reqctx *rctx)
711 {
712 	unsigned int i = 0, j;
713 	u32 op;
714 
715 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
716 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
717 
718 	/*
719 	 * Set init for zero sized vector
720 	 */
721 	if (!rctx->assoclen && !rctx->cryptlen)
722 		op |= SE_AES_OP_INIT;
723 
724 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
725 	cpuvaddr[i++] = rctx->assoclen * 8;
726 	cpuvaddr[i++] = 0;
727 
728 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
729 	cpuvaddr[i++] = rctx->cryptlen * 8;
730 	cpuvaddr[i++] = 0;
731 
732 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
733 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
734 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
735 		cpuvaddr[i++] = rctx->iv[j];
736 
737 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
738 	cpuvaddr[i++] = rctx->config;
739 	cpuvaddr[i++] = rctx->crypto_config;
740 	cpuvaddr[i++] = 0;
741 	cpuvaddr[i++] = 0;
742 
743 	/* Destination Address */
744 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
745 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
746 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
747 
748 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
749 	cpuvaddr[i++] = op;
750 
751 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
752 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
753 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
754 
755 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
756 
757 	return i;
758 }
759 
760 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
761 {
762 	struct tegra_se *se = ctx->se;
763 	unsigned int cmdlen;
764 
765 	scatterwalk_map_and_copy(rctx->inbuf.buf,
766 				 rctx->src_sg, 0, rctx->assoclen, 0);
767 
768 	rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
769 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
770 			      SE_AES_KEY_INDEX(rctx->key_id);
771 
772 	cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
773 
774 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
775 }
776 
777 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
778 {
779 	struct tegra_se *se = ctx->se;
780 	int cmdlen, ret;
781 
782 	scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
783 				 rctx->assoclen, rctx->cryptlen, 0);
784 
785 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
786 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
787 			      SE_AES_KEY_INDEX(rctx->key_id);
788 
789 	/* Prepare command and submit */
790 	cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
791 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
792 	if (ret)
793 		return ret;
794 
795 	/* Copy the result */
796 	scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
797 				 rctx->assoclen, rctx->cryptlen, 1);
798 
799 	return 0;
800 }
801 
802 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
803 {
804 	struct tegra_se *se = ctx->se;
805 	u32 *cpuvaddr = se->cmdbuf->addr;
806 	int cmdlen, ret, offset;
807 
808 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
809 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
810 			      SE_AES_KEY_INDEX(rctx->key_id);
811 
812 	/* Prepare command and submit */
813 	cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
814 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
815 	if (ret)
816 		return ret;
817 
818 	if (rctx->encrypt) {
819 		/* Copy the result */
820 		offset = rctx->assoclen + rctx->cryptlen;
821 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
822 					 offset, rctx->authsize, 1);
823 	}
824 
825 	return 0;
826 }
827 
828 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
829 {
830 	unsigned int offset;
831 	u8 mac[16];
832 
833 	offset = rctx->assoclen + rctx->cryptlen;
834 	scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
835 
836 	if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
837 		return -EBADMSG;
838 
839 	return 0;
840 }
841 
842 static inline int tegra_ccm_check_iv(const u8 *iv)
843 {
844 	/* iv[0] gives value of q-1
845 	 * 2 <= q <= 8 as per NIST 800-38C notation
846 	 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
847 	 */
848 	if (iv[0] < 1 || iv[0] > 7) {
849 		pr_debug("ccm_check_iv failed %d\n", iv[0]);
850 		return -EINVAL;
851 	}
852 
853 	return 0;
854 }
855 
856 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
857 					  struct tegra_aead_reqctx *rctx)
858 {
859 	unsigned int data_count, i = 0;
860 	struct tegra_se *se = ctx->se;
861 	u32 *cpuvaddr = se->cmdbuf->addr;
862 
863 	data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
864 
865 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
866 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
867 
868 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
869 	cpuvaddr[i++] = rctx->config;
870 	cpuvaddr[i++] = rctx->crypto_config;
871 
872 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
873 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
874 			SE_ADDR_HI_SZ(rctx->inbuf.size);
875 
876 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
877 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
878 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
879 
880 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
881 	cpuvaddr[i++] = SE_AES_OP_WRSTALL |
882 			SE_AES_OP_LASTBUF | SE_AES_OP_START;
883 
884 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
885 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
886 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
887 
888 	return i;
889 }
890 
891 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
892 				       struct tegra_aead_reqctx *rctx)
893 {
894 	unsigned int i = 0, j;
895 	struct tegra_se *se = ctx->se;
896 	u32 *cpuvaddr = se->cmdbuf->addr;
897 
898 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
899 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
900 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
901 		cpuvaddr[i++] = rctx->iv[j];
902 
903 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
904 	cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
905 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
906 	cpuvaddr[i++] = rctx->config;
907 	cpuvaddr[i++] = rctx->crypto_config;
908 
909 	/* Source address setting */
910 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
911 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
912 			SE_ADDR_HI_SZ(rctx->inbuf.size);
913 
914 	/* Destination address setting */
915 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
916 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
917 			SE_ADDR_HI_SZ(rctx->inbuf.size);
918 
919 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
920 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
921 			SE_AES_OP_START;
922 
923 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
924 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
925 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
926 
927 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
928 		rctx->config, rctx->crypto_config);
929 
930 	return i;
931 }
932 
933 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
934 {
935 	struct tegra_se *se = ctx->se;
936 	int cmdlen;
937 
938 	rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
939 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
940 						      rctx->encrypt) |
941 						      SE_AES_KEY_INDEX(rctx->key_id);
942 
943 	/* Prepare command and submit */
944 	cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
945 
946 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
947 }
948 
949 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
950 {
951 	__be32 data;
952 
953 	memset(block, 0, csize);
954 	block += csize;
955 
956 	if (csize >= 4)
957 		csize = 4;
958 	else if (msglen > (1 << (8 * csize)))
959 		return -EOVERFLOW;
960 
961 	data = cpu_to_be32(msglen);
962 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
963 
964 	return 0;
965 }
966 
967 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
968 {
969 	unsigned int q, t;
970 	u8 *q_ptr, *iv = (u8 *)rctx->iv;
971 
972 	memcpy(nonce, rctx->iv, 16);
973 
974 	/*** 1. Prepare Flags Octet ***/
975 
976 	/* Encode t (mac length) */
977 	t = rctx->authsize;
978 	nonce[0] |= (((t - 2) / 2) << 3);
979 
980 	/* Adata */
981 	if (rctx->assoclen)
982 		nonce[0] |= (1 << 6);
983 
984 	/*** Encode Q - message length ***/
985 	q = iv[0] + 1;
986 	q_ptr = nonce + 16 - q;
987 
988 	return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
989 }
990 
991 static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
992 {
993 	int len = 0;
994 
995 	/* add control info for associated data
996 	 * RFC 3610 and NIST Special Publication 800-38C
997 	 */
998 	if (a < 65280) {
999 		*(__be16 *)adata = cpu_to_be16(a);
1000 		len = 2;
1001 	} else	{
1002 		*(__be16 *)adata = cpu_to_be16(0xfffe);
1003 		*(__be32 *)&adata[2] = cpu_to_be32(a);
1004 		len = 6;
1005 	}
1006 
1007 	return len;
1008 }
1009 
1010 static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
1011 {
1012 	unsigned int padlen = 16 - (len % 16);
1013 	u8 padding[16] = {0};
1014 
1015 	if (padlen == 16)
1016 		return 0;
1017 
1018 	memcpy(buf, padding, padlen);
1019 
1020 	return padlen;
1021 }
1022 
1023 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
1024 {
1025 	unsigned int alen = 0, offset = 0;
1026 	u8 nonce[16], adata[16];
1027 	int ret;
1028 
1029 	ret = tegra_ccm_format_nonce(rctx, nonce);
1030 	if (ret)
1031 		return ret;
1032 
1033 	memcpy(rctx->inbuf.buf, nonce, 16);
1034 	offset = 16;
1035 
1036 	if (rctx->assoclen) {
1037 		alen = tegra_ccm_format_adata(adata, rctx->assoclen);
1038 		memcpy(rctx->inbuf.buf + offset, adata, alen);
1039 		offset += alen;
1040 
1041 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1042 					 rctx->src_sg, 0, rctx->assoclen, 0);
1043 
1044 		offset += rctx->assoclen;
1045 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
1046 					 rctx->assoclen + alen);
1047 	}
1048 
1049 	return offset;
1050 }
1051 
1052 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1053 {
1054 	u32 result[16];
1055 	int i, ret;
1056 
1057 	/* Read and clear Result */
1058 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1059 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1060 
1061 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1062 		writel(0, se->base + se->hw->regs->result + (i * 4));
1063 
1064 	if (rctx->encrypt) {
1065 		memcpy(rctx->authdata, result, rctx->authsize);
1066 	} else {
1067 		ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
1068 		if (ret)
1069 			return -EBADMSG;
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1076 {
1077 	/* Copy result */
1078 	scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
1079 				 rctx->assoclen, rctx->cryptlen, 1);
1080 
1081 	if (rctx->encrypt)
1082 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
1083 					 rctx->assoclen + rctx->cryptlen,
1084 					 rctx->authsize, 1);
1085 	else
1086 		memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
1087 
1088 	return 0;
1089 }
1090 
1091 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1092 {
1093 	struct tegra_se *se = ctx->se;
1094 	struct scatterlist *sg;
1095 	int offset, ret;
1096 
1097 	offset = tegra_ccm_format_blocks(rctx);
1098 	if (offset < 0)
1099 		return -EINVAL;
1100 
1101 	/* Copy plain text to the buffer */
1102 	sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
1103 
1104 	scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1105 				 sg, rctx->assoclen,
1106 				 rctx->cryptlen, 0);
1107 	offset += rctx->cryptlen;
1108 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1109 
1110 	rctx->inbuf.size = offset;
1111 
1112 	ret = tegra_ccm_do_cbcmac(ctx, rctx);
1113 	if (ret)
1114 		return ret;
1115 
1116 	return tegra_ccm_mac_result(se, rctx);
1117 }
1118 
1119 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1120 {
1121 	struct tegra_se *se = ctx->se;
1122 	unsigned int cmdlen, offset = 0;
1123 	struct scatterlist *sg = rctx->src_sg;
1124 	int ret;
1125 
1126 	rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
1127 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
1128 			      SE_AES_KEY_INDEX(rctx->key_id);
1129 
1130 	/* Copy authdata in the top of buffer for encryption/decryption */
1131 	if (rctx->encrypt)
1132 		memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
1133 	else
1134 		scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
1135 					 rctx->assoclen + rctx->cryptlen,
1136 					 rctx->authsize, 0);
1137 
1138 	offset += rctx->authsize;
1139 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
1140 
1141 	/* If there is no cryptlen, proceed to submit the task */
1142 	if (rctx->cryptlen) {
1143 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
1144 					 rctx->assoclen, rctx->cryptlen, 0);
1145 		offset += rctx->cryptlen;
1146 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1147 	}
1148 
1149 	rctx->inbuf.size = offset;
1150 
1151 	/* Prepare command and submit */
1152 	cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
1153 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1154 	if (ret)
1155 		return ret;
1156 
1157 	return tegra_ccm_ctr_result(se, rctx);
1158 }
1159 
1160 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
1161 				struct tegra_aead_reqctx *rctx)
1162 {
1163 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1164 	u8 *iv = (u8 *)rctx->iv;
1165 	int ret, i;
1166 
1167 	rctx->src_sg = req->src;
1168 	rctx->dst_sg = req->dst;
1169 	rctx->assoclen = req->assoclen;
1170 	rctx->authsize = crypto_aead_authsize(tfm);
1171 
1172 	if (rctx->encrypt)
1173 		rctx->cryptlen = req->cryptlen;
1174 	else
1175 		rctx->cryptlen = req->cryptlen - rctx->authsize;
1176 
1177 	memcpy(iv, req->iv, 16);
1178 
1179 	ret = tegra_ccm_check_iv(iv);
1180 	if (ret)
1181 		return ret;
1182 
1183 	/* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1184 	 * zero to encrypt auth tag.
1185 	 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1186 	 */
1187 	memset(iv + 15 - iv[0], 0, iv[0] + 1);
1188 
1189 	/* Clear any previous result */
1190 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1191 		writel(0, se->base + se->hw->regs->result + (i * 4));
1192 
1193 	return 0;
1194 }
1195 
1196 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
1197 {
1198 	struct aead_request *req = container_of(areq, struct aead_request, base);
1199 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1200 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1201 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1202 	struct tegra_se *se = ctx->se;
1203 	int ret;
1204 
1205 	ret = tegra_ccm_crypt_init(req, se, rctx);
1206 	if (ret)
1207 		goto out_finalize;
1208 
1209 	rctx->key_id = ctx->key_id;
1210 
1211 	/* Allocate buffers required */
1212 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1213 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1214 					     &rctx->inbuf.addr, GFP_KERNEL);
1215 	if (!rctx->inbuf.buf)
1216 		goto out_finalize;
1217 
1218 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1219 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1220 					      &rctx->outbuf.addr, GFP_KERNEL);
1221 	if (!rctx->outbuf.buf) {
1222 		ret = -ENOMEM;
1223 		goto out_free_inbuf;
1224 	}
1225 
1226 	if (!ctx->key_id) {
1227 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1228 						    ctx->keylen, ctx->alg, &rctx->key_id);
1229 		if (ret)
1230 			goto out;
1231 	}
1232 
1233 	if (rctx->encrypt) {
1234 		/* CBC MAC Operation */
1235 		ret = tegra_ccm_compute_auth(ctx, rctx);
1236 		if (ret)
1237 			goto out;
1238 
1239 		/* CTR operation */
1240 		ret = tegra_ccm_do_ctr(ctx, rctx);
1241 		if (ret)
1242 			goto out;
1243 	} else {
1244 		/* CTR operation */
1245 		ret = tegra_ccm_do_ctr(ctx, rctx);
1246 		if (ret)
1247 			goto out;
1248 
1249 		/* CBC MAC Operation */
1250 		ret = tegra_ccm_compute_auth(ctx, rctx);
1251 		if (ret)
1252 			goto out;
1253 	}
1254 
1255 out:
1256 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1257 			  rctx->outbuf.buf, rctx->outbuf.addr);
1258 
1259 out_free_inbuf:
1260 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1261 			  rctx->inbuf.buf, rctx->inbuf.addr);
1262 
1263 	if (tegra_key_is_reserved(rctx->key_id))
1264 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1265 
1266 out_finalize:
1267 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1268 
1269 	return 0;
1270 }
1271 
1272 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
1273 {
1274 	struct aead_request *req = container_of(areq, struct aead_request, base);
1275 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1276 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1277 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1278 	int ret;
1279 
1280 	rctx->src_sg = req->src;
1281 	rctx->dst_sg = req->dst;
1282 	rctx->assoclen = req->assoclen;
1283 	rctx->authsize = crypto_aead_authsize(tfm);
1284 
1285 	if (rctx->encrypt)
1286 		rctx->cryptlen = req->cryptlen;
1287 	else
1288 		rctx->cryptlen = req->cryptlen - ctx->authsize;
1289 
1290 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
1291 	rctx->iv[3] = (1 << 24);
1292 
1293 	rctx->key_id = ctx->key_id;
1294 
1295 	/* Allocate buffers required */
1296 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1297 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1298 					     &rctx->inbuf.addr, GFP_KERNEL);
1299 	if (!rctx->inbuf.buf) {
1300 		ret = -ENOMEM;
1301 		goto out_finalize;
1302 	}
1303 
1304 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1305 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1306 					      &rctx->outbuf.addr, GFP_KERNEL);
1307 	if (!rctx->outbuf.buf) {
1308 		ret = -ENOMEM;
1309 		goto out_free_inbuf;
1310 	}
1311 
1312 	if (!ctx->key_id) {
1313 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1314 						    ctx->keylen, ctx->alg, &rctx->key_id);
1315 		if (ret)
1316 			goto out;
1317 	}
1318 
1319 	/* If there is associated data perform GMAC operation */
1320 	if (rctx->assoclen) {
1321 		ret = tegra_gcm_do_gmac(ctx, rctx);
1322 		if (ret)
1323 			goto out;
1324 	}
1325 
1326 	/* GCM Encryption/Decryption operation */
1327 	if (rctx->cryptlen) {
1328 		ret = tegra_gcm_do_crypt(ctx, rctx);
1329 		if (ret)
1330 			goto out;
1331 	}
1332 
1333 	/* GCM_FINAL operation */
1334 	ret = tegra_gcm_do_final(ctx, rctx);
1335 	if (ret)
1336 		goto out;
1337 
1338 	if (!rctx->encrypt)
1339 		ret = tegra_gcm_do_verify(ctx->se, rctx);
1340 
1341 out:
1342 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1343 			  rctx->outbuf.buf, rctx->outbuf.addr);
1344 
1345 out_free_inbuf:
1346 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1347 			  rctx->inbuf.buf, rctx->inbuf.addr);
1348 
1349 	if (tegra_key_is_reserved(rctx->key_id))
1350 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1351 
1352 out_finalize:
1353 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1354 
1355 	return 0;
1356 }
1357 
1358 static int tegra_aead_cra_init(struct crypto_aead *tfm)
1359 {
1360 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1361 	struct aead_alg *alg = crypto_aead_alg(tfm);
1362 	struct tegra_se_alg *se_alg;
1363 	const char *algname;
1364 	int ret;
1365 
1366 	algname = crypto_tfm_alg_name(&tfm->base);
1367 
1368 	se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
1369 
1370 	crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
1371 
1372 	ctx->se = se_alg->se_dev;
1373 	ctx->key_id = 0;
1374 	ctx->keylen = 0;
1375 
1376 	ret = se_algname_to_algid(algname);
1377 	if (ret < 0) {
1378 		dev_err(ctx->se->dev, "invalid algorithm\n");
1379 		return ret;
1380 	}
1381 
1382 	ctx->alg = ret;
1383 
1384 	return 0;
1385 }
1386 
1387 static int tegra_ccm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1388 {
1389 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1390 
1391 	switch (authsize) {
1392 	case 4:
1393 	case 6:
1394 	case 8:
1395 	case 10:
1396 	case 12:
1397 	case 14:
1398 	case 16:
1399 		break;
1400 	default:
1401 		return -EINVAL;
1402 	}
1403 
1404 	ctx->authsize = authsize;
1405 
1406 	return 0;
1407 }
1408 
1409 static int tegra_gcm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1410 {
1411 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1412 	int ret;
1413 
1414 	ret = crypto_gcm_check_authsize(authsize);
1415 	if (ret)
1416 		return ret;
1417 
1418 	ctx->authsize = authsize;
1419 
1420 	return 0;
1421 }
1422 
1423 static void tegra_aead_cra_exit(struct crypto_aead *tfm)
1424 {
1425 	struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
1426 
1427 	if (ctx->key_id)
1428 		tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1429 }
1430 
1431 static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
1432 {
1433 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1434 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1435 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1436 
1437 	rctx->encrypt = encrypt;
1438 
1439 	return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
1440 }
1441 
1442 static int tegra_aead_encrypt(struct aead_request *req)
1443 {
1444 	return tegra_aead_crypt(req, true);
1445 }
1446 
1447 static int tegra_aead_decrypt(struct aead_request *req)
1448 {
1449 	return tegra_aead_crypt(req, false);
1450 }
1451 
1452 static int tegra_aead_setkey(struct crypto_aead *tfm,
1453 			     const u8 *key, u32 keylen)
1454 {
1455 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1456 	int ret;
1457 
1458 	if (aes_check_keylen(keylen)) {
1459 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1460 		return -EINVAL;
1461 	}
1462 
1463 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1464 	if (ret) {
1465 		ctx->keylen = keylen;
1466 		memcpy(ctx->key, key, keylen);
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
1473 					struct tegra_cmac_reqctx *rctx)
1474 {
1475 	unsigned int data_count, res_bits = 0, i = 0, j;
1476 	struct tegra_se *se = ctx->se;
1477 	u32 *cpuvaddr = se->cmdbuf->addr, op;
1478 
1479 	data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
1480 
1481 	op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
1482 
1483 	if (!(rctx->task & SHA_UPDATE)) {
1484 		op |= SE_AES_OP_FINAL;
1485 		res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
1486 	}
1487 
1488 	if (!res_bits && data_count)
1489 		data_count--;
1490 
1491 	if (rctx->task & SHA_FIRST) {
1492 		rctx->task &= ~SHA_FIRST;
1493 
1494 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
1495 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
1496 		/* Load 0 IV */
1497 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
1498 			cpuvaddr[i++] = 0;
1499 	}
1500 
1501 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
1502 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
1503 			SE_LAST_BLOCK_RES_BITS(res_bits);
1504 
1505 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
1506 	cpuvaddr[i++] = rctx->config;
1507 	cpuvaddr[i++] = rctx->crypto_config;
1508 
1509 	/* Source Address */
1510 	cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
1511 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
1512 			SE_ADDR_HI_SZ(rctx->datbuf.size);
1513 	cpuvaddr[i++] = 0;
1514 	cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
1515 
1516 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
1517 	cpuvaddr[i++] = op;
1518 
1519 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1520 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
1521 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
1522 
1523 	return i;
1524 }
1525 
1526 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1527 {
1528 	int i;
1529 
1530 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1531 		rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1532 }
1533 
1534 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1535 {
1536 	int i;
1537 
1538 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1539 		writel(rctx->result[i],
1540 		       se->base + se->hw->regs->result + (i * 4));
1541 }
1542 
1543 static int tegra_cmac_do_init(struct ahash_request *req)
1544 {
1545 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1546 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1547 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1548 	struct tegra_se *se = ctx->se;
1549 	int i;
1550 
1551 	rctx->total_len = 0;
1552 	rctx->datbuf.size = 0;
1553 	rctx->residue.size = 0;
1554 	rctx->key_id = ctx->key_id;
1555 	rctx->task |= SHA_FIRST;
1556 	rctx->blk_size = crypto_ahash_blocksize(tfm);
1557 
1558 	rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
1559 					       &rctx->residue.addr, GFP_KERNEL);
1560 	if (!rctx->residue.buf)
1561 		return -ENOMEM;
1562 
1563 	rctx->residue.size = 0;
1564 
1565 	/* Clear any previous result */
1566 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1567 		writel(0, se->base + se->hw->regs->result + (i * 4));
1568 
1569 	return 0;
1570 }
1571 
1572 static int tegra_cmac_do_update(struct ahash_request *req)
1573 {
1574 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1575 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1576 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1577 	struct tegra_se *se = ctx->se;
1578 	unsigned int nblks, nresidue, cmdlen;
1579 	int ret;
1580 
1581 	if (!req->nbytes)
1582 		return 0;
1583 
1584 	nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
1585 	nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
1586 
1587 	/*
1588 	 * Reserve the last block as residue during final() to process.
1589 	 */
1590 	if (!nresidue && nblks) {
1591 		nresidue += rctx->blk_size;
1592 		nblks--;
1593 	}
1594 
1595 	rctx->src_sg = req->src;
1596 	rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
1597 	rctx->total_len += rctx->datbuf.size;
1598 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1599 	rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
1600 
1601 	/*
1602 	 * Keep one block and residue bytes in residue and
1603 	 * return. The bytes will be processed in final()
1604 	 */
1605 	if (nblks < 1) {
1606 		scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
1607 					 rctx->src_sg, 0, req->nbytes, 0);
1608 
1609 		rctx->residue.size += req->nbytes;
1610 		return 0;
1611 	}
1612 
1613 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
1614 					      &rctx->datbuf.addr, GFP_KERNEL);
1615 	if (!rctx->datbuf.buf)
1616 		return -ENOMEM;
1617 
1618 	/* Copy the previous residue first */
1619 	if (rctx->residue.size)
1620 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1621 
1622 	scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
1623 				 rctx->src_sg, 0, req->nbytes - nresidue, 0);
1624 
1625 	scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
1626 				 req->nbytes - nresidue, nresidue, 0);
1627 
1628 	/* Update residue value with the residue after current block */
1629 	rctx->residue.size = nresidue;
1630 
1631 	/*
1632 	 * If this is not the first task, paste the previous copied
1633 	 * intermediate results to the registers so that it gets picked up.
1634 	 */
1635 	if (!(rctx->task & SHA_FIRST))
1636 		tegra_cmac_paste_result(ctx->se, rctx);
1637 
1638 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1639 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1640 
1641 	tegra_cmac_copy_result(ctx->se, rctx);
1642 
1643 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
1644 			  rctx->datbuf.buf, rctx->datbuf.addr);
1645 
1646 	return ret;
1647 }
1648 
1649 static int tegra_cmac_do_final(struct ahash_request *req)
1650 {
1651 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1652 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1653 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1654 	struct tegra_se *se = ctx->se;
1655 	u32 *result = (u32 *)req->result;
1656 	int ret = 0, i, cmdlen;
1657 
1658 	if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
1659 		return crypto_shash_tfm_digest(ctx->fallback_tfm,
1660 					NULL, 0, req->result);
1661 	}
1662 
1663 	if (rctx->residue.size) {
1664 		rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
1665 						      &rctx->datbuf.addr, GFP_KERNEL);
1666 		if (!rctx->datbuf.buf) {
1667 			ret = -ENOMEM;
1668 			goto out_free;
1669 		}
1670 
1671 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1672 	}
1673 
1674 	rctx->datbuf.size = rctx->residue.size;
1675 	rctx->total_len += rctx->residue.size;
1676 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1677 
1678 	/*
1679 	 * If this is not the first task, paste the previous copied
1680 	 * intermediate results to the registers so that it gets picked up.
1681 	 */
1682 	if (!(rctx->task & SHA_FIRST))
1683 		tegra_cmac_paste_result(ctx->se, rctx);
1684 
1685 	/* Prepare command and submit */
1686 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1687 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1688 	if (ret)
1689 		goto out;
1690 
1691 	/* Read and clear Result register */
1692 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1693 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1694 
1695 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1696 		writel(0, se->base + se->hw->regs->result + (i * 4));
1697 
1698 out:
1699 	if (rctx->residue.size)
1700 		dma_free_coherent(se->dev, rctx->datbuf.size,
1701 				  rctx->datbuf.buf, rctx->datbuf.addr);
1702 out_free:
1703 	dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
1704 			  rctx->residue.buf, rctx->residue.addr);
1705 	return ret;
1706 }
1707 
1708 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
1709 {
1710 	struct ahash_request *req = ahash_request_cast(areq);
1711 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1712 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1713 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1714 	struct tegra_se *se = ctx->se;
1715 	int ret = 0;
1716 
1717 	if (rctx->task & SHA_INIT) {
1718 		ret = tegra_cmac_do_init(req);
1719 		if (ret)
1720 			goto out;
1721 
1722 		rctx->task &= ~SHA_INIT;
1723 	}
1724 
1725 	if (!ctx->key_id) {
1726 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1727 						    ctx->keylen, ctx->alg, &rctx->key_id);
1728 		if (ret)
1729 			goto out;
1730 	}
1731 
1732 	if (rctx->task & SHA_UPDATE) {
1733 		ret = tegra_cmac_do_update(req);
1734 		if (ret)
1735 			goto out;
1736 
1737 		rctx->task &= ~SHA_UPDATE;
1738 	}
1739 
1740 	if (rctx->task & SHA_FINAL) {
1741 		ret = tegra_cmac_do_final(req);
1742 		if (ret)
1743 			goto out;
1744 
1745 		rctx->task &= ~SHA_FINAL;
1746 	}
1747 out:
1748 	if (tegra_key_is_reserved(rctx->key_id))
1749 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1750 
1751 	crypto_finalize_hash_request(se->engine, req, ret);
1752 
1753 	return 0;
1754 }
1755 
1756 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
1757 				     const char *algname)
1758 {
1759 	unsigned int statesize;
1760 
1761 	ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
1762 
1763 	if (IS_ERR(ctx->fallback_tfm)) {
1764 		dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
1765 		ctx->fallback_tfm = NULL;
1766 		return;
1767 	}
1768 
1769 	statesize = crypto_shash_statesize(ctx->fallback_tfm);
1770 
1771 	if (statesize > sizeof(struct tegra_cmac_reqctx))
1772 		crypto_ahash_set_statesize(tfm, statesize);
1773 }
1774 
1775 static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
1776 {
1777 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1778 	struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
1779 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
1780 	struct tegra_se_alg *se_alg;
1781 	const char *algname;
1782 	int ret;
1783 
1784 	algname = crypto_tfm_alg_name(tfm);
1785 	se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
1786 
1787 	crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
1788 
1789 	ctx->se = se_alg->se_dev;
1790 	ctx->key_id = 0;
1791 	ctx->keylen = 0;
1792 
1793 	ret = se_algname_to_algid(algname);
1794 	if (ret < 0) {
1795 		dev_err(ctx->se->dev, "invalid algorithm\n");
1796 		return ret;
1797 	}
1798 
1799 	ctx->alg = ret;
1800 
1801 	tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
1802 
1803 	return 0;
1804 }
1805 
1806 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
1807 {
1808 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1809 
1810 	if (ctx->fallback_tfm)
1811 		crypto_free_shash(ctx->fallback_tfm);
1812 
1813 	tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1814 }
1815 
1816 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1817 			     unsigned int keylen)
1818 {
1819 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1820 	int ret;
1821 
1822 	if (aes_check_keylen(keylen)) {
1823 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1824 		return -EINVAL;
1825 	}
1826 
1827 	if (ctx->fallback_tfm)
1828 		crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
1829 
1830 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1831 	if (ret) {
1832 		ctx->keylen = keylen;
1833 		memcpy(ctx->key, key, keylen);
1834 	}
1835 
1836 	return 0;
1837 }
1838 
1839 static int tegra_cmac_init(struct ahash_request *req)
1840 {
1841 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1842 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1843 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1844 
1845 	rctx->task = SHA_INIT;
1846 
1847 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1848 }
1849 
1850 static int tegra_cmac_update(struct ahash_request *req)
1851 {
1852 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1853 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1854 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1855 
1856 	rctx->task |= SHA_UPDATE;
1857 
1858 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1859 }
1860 
1861 static int tegra_cmac_final(struct ahash_request *req)
1862 {
1863 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1864 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1865 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1866 
1867 	rctx->task |= SHA_FINAL;
1868 
1869 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1870 }
1871 
1872 static int tegra_cmac_finup(struct ahash_request *req)
1873 {
1874 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1875 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1876 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1877 
1878 	rctx->task |= SHA_UPDATE | SHA_FINAL;
1879 
1880 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1881 }
1882 
1883 static int tegra_cmac_digest(struct ahash_request *req)
1884 {
1885 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1886 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1887 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1888 
1889 	rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
1890 
1891 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1892 }
1893 
1894 static int tegra_cmac_export(struct ahash_request *req, void *out)
1895 {
1896 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1897 
1898 	memcpy(out, rctx, sizeof(*rctx));
1899 
1900 	return 0;
1901 }
1902 
1903 static int tegra_cmac_import(struct ahash_request *req, const void *in)
1904 {
1905 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1906 
1907 	memcpy(rctx, in, sizeof(*rctx));
1908 
1909 	return 0;
1910 }
1911 
1912 static struct tegra_se_alg tegra_aead_algs[] = {
1913 	{
1914 		.alg.aead.op.do_one_request = tegra_gcm_do_one_req,
1915 		.alg.aead.base = {
1916 			.init = tegra_aead_cra_init,
1917 			.exit = tegra_aead_cra_exit,
1918 			.setkey = tegra_aead_setkey,
1919 			.setauthsize = tegra_gcm_setauthsize,
1920 			.encrypt = tegra_aead_encrypt,
1921 			.decrypt = tegra_aead_decrypt,
1922 			.maxauthsize = AES_BLOCK_SIZE,
1923 			.ivsize	= GCM_AES_IV_SIZE,
1924 			.base = {
1925 				.cra_name = "gcm(aes)",
1926 				.cra_driver_name = "gcm-aes-tegra",
1927 				.cra_priority = 500,
1928 				.cra_blocksize = 1,
1929 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1930 				.cra_alignmask = 0xf,
1931 				.cra_module = THIS_MODULE,
1932 			},
1933 		}
1934 	}, {
1935 		.alg.aead.op.do_one_request = tegra_ccm_do_one_req,
1936 		.alg.aead.base = {
1937 			.init = tegra_aead_cra_init,
1938 			.exit = tegra_aead_cra_exit,
1939 			.setkey	= tegra_aead_setkey,
1940 			.setauthsize = tegra_ccm_setauthsize,
1941 			.encrypt = tegra_aead_encrypt,
1942 			.decrypt = tegra_aead_decrypt,
1943 			.maxauthsize = AES_BLOCK_SIZE,
1944 			.ivsize	= AES_BLOCK_SIZE,
1945 			.chunksize = AES_BLOCK_SIZE,
1946 			.base = {
1947 				.cra_name = "ccm(aes)",
1948 				.cra_driver_name = "ccm-aes-tegra",
1949 				.cra_priority = 500,
1950 				.cra_blocksize = 1,
1951 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1952 				.cra_alignmask = 0xf,
1953 				.cra_module = THIS_MODULE,
1954 			},
1955 		}
1956 	}
1957 };
1958 
1959 static struct tegra_se_alg tegra_cmac_algs[] = {
1960 	{
1961 		.alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
1962 		.alg.ahash.base = {
1963 			.init = tegra_cmac_init,
1964 			.setkey	= tegra_cmac_setkey,
1965 			.update = tegra_cmac_update,
1966 			.final = tegra_cmac_final,
1967 			.finup = tegra_cmac_finup,
1968 			.digest = tegra_cmac_digest,
1969 			.export = tegra_cmac_export,
1970 			.import = tegra_cmac_import,
1971 			.halg.digestsize = AES_BLOCK_SIZE,
1972 			.halg.statesize = sizeof(struct tegra_cmac_reqctx),
1973 			.halg.base = {
1974 				.cra_name = "cmac(aes)",
1975 				.cra_driver_name = "tegra-se-cmac",
1976 				.cra_priority = 300,
1977 				.cra_flags = CRYPTO_ALG_TYPE_AHASH,
1978 				.cra_blocksize = AES_BLOCK_SIZE,
1979 				.cra_ctxsize = sizeof(struct tegra_cmac_ctx),
1980 				.cra_alignmask = 0,
1981 				.cra_module = THIS_MODULE,
1982 				.cra_init = tegra_cmac_cra_init,
1983 				.cra_exit = tegra_cmac_cra_exit,
1984 			}
1985 		}
1986 	}
1987 };
1988 
1989 int tegra_init_aes(struct tegra_se *se)
1990 {
1991 	struct aead_engine_alg *aead_alg;
1992 	struct ahash_engine_alg *ahash_alg;
1993 	struct skcipher_engine_alg *sk_alg;
1994 	int i, ret;
1995 
1996 	se->manifest = tegra_aes_kac_manifest;
1997 
1998 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
1999 		sk_alg = &tegra_aes_algs[i].alg.skcipher;
2000 		tegra_aes_algs[i].se_dev = se;
2001 
2002 		ret = crypto_engine_register_skcipher(sk_alg);
2003 		if (ret) {
2004 			dev_err(se->dev, "failed to register %s\n",
2005 				sk_alg->base.base.cra_name);
2006 			goto err_aes;
2007 		}
2008 	}
2009 
2010 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
2011 		aead_alg = &tegra_aead_algs[i].alg.aead;
2012 		tegra_aead_algs[i].se_dev = se;
2013 
2014 		ret = crypto_engine_register_aead(aead_alg);
2015 		if (ret) {
2016 			dev_err(se->dev, "failed to register %s\n",
2017 				aead_alg->base.base.cra_name);
2018 			goto err_aead;
2019 		}
2020 	}
2021 
2022 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
2023 		ahash_alg = &tegra_cmac_algs[i].alg.ahash;
2024 		tegra_cmac_algs[i].se_dev = se;
2025 
2026 		ret = crypto_engine_register_ahash(ahash_alg);
2027 		if (ret) {
2028 			dev_err(se->dev, "failed to register %s\n",
2029 				ahash_alg->base.halg.base.cra_name);
2030 			goto err_cmac;
2031 		}
2032 	}
2033 
2034 	return 0;
2035 
2036 err_cmac:
2037 	while (i--)
2038 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2039 
2040 	i = ARRAY_SIZE(tegra_aead_algs);
2041 err_aead:
2042 	while (i--)
2043 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2044 
2045 	i = ARRAY_SIZE(tegra_aes_algs);
2046 err_aes:
2047 	while (i--)
2048 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2049 
2050 	return ret;
2051 }
2052 
2053 void tegra_deinit_aes(struct tegra_se *se)
2054 {
2055 	int i;
2056 
2057 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
2058 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2059 
2060 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
2061 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2062 
2063 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
2064 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2065 }
2066