xref: /linux/drivers/crypto/tegra/tegra-se-aes.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 /*
4  * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
5  */
6 
7 #include <linux/bottom_half.h>
8 #include <linux/clk.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/platform_device.h>
13 
14 #include <crypto/aead.h>
15 #include <crypto/aes.h>
16 #include <crypto/engine.h>
17 #include <crypto/gcm.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/xts.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/skcipher.h>
23 
24 #include "tegra-se.h"
25 
26 struct tegra_aes_ctx {
27 	struct tegra_se *se;
28 	u32 alg;
29 	u32 ivsize;
30 	u32 key1_id;
31 	u32 key2_id;
32 	u32 keylen;
33 	u8 key1[AES_MAX_KEY_SIZE];
34 	u8 key2[AES_MAX_KEY_SIZE];
35 };
36 
37 struct tegra_aes_reqctx {
38 	struct tegra_se_datbuf datbuf;
39 	bool encrypt;
40 	u32 config;
41 	u32 crypto_config;
42 	u32 len;
43 	u32 *iv;
44 };
45 
46 struct tegra_aead_ctx {
47 	struct tegra_se *se;
48 	unsigned int authsize;
49 	u32 alg;
50 	u32 key_id;
51 	u32 keylen;
52 	u8 key[AES_MAX_KEY_SIZE];
53 };
54 
55 struct tegra_aead_reqctx {
56 	struct tegra_se_datbuf inbuf;
57 	struct tegra_se_datbuf outbuf;
58 	struct scatterlist *src_sg;
59 	struct scatterlist *dst_sg;
60 	unsigned int assoclen;
61 	unsigned int cryptlen;
62 	unsigned int authsize;
63 	bool encrypt;
64 	u32 crypto_config;
65 	u32 config;
66 	u32 key_id;
67 	u32 iv[4];
68 	u8 authdata[16];
69 };
70 
71 struct tegra_cmac_ctx {
72 	struct tegra_se *se;
73 	unsigned int alg;
74 	u32 key_id;
75 	u32 keylen;
76 	u8 key[AES_MAX_KEY_SIZE];
77 	struct crypto_shash *fallback_tfm;
78 };
79 
80 struct tegra_cmac_reqctx {
81 	struct scatterlist *src_sg;
82 	struct tegra_se_datbuf datbuf;
83 	struct tegra_se_datbuf residue;
84 	unsigned int total_len;
85 	unsigned int blk_size;
86 	unsigned int task;
87 	u32 crypto_config;
88 	u32 config;
89 	u32 key_id;
90 	u32 *iv;
91 	u32 result[CMAC_RESULT_REG_COUNT];
92 };
93 
94 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)95 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
96 {
97 	do {
98 		--bits;
99 		nums += counter[bits];
100 		counter[bits] = nums & 0xff;
101 		nums >>= 8;
102 	} while (bits && nums);
103 }
104 
tegra_cbc_iv_copyback(struct skcipher_request * req,struct tegra_aes_ctx * ctx)105 static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
106 {
107 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
108 	unsigned int offset;
109 
110 	offset = req->cryptlen - ctx->ivsize;
111 
112 	if (rctx->encrypt)
113 		memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
114 	else
115 		scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
116 }
117 
tegra_aes_update_iv(struct skcipher_request * req,struct tegra_aes_ctx * ctx)118 static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
119 {
120 	int num;
121 
122 	if (ctx->alg == SE_ALG_CBC) {
123 		tegra_cbc_iv_copyback(req, ctx);
124 	} else if (ctx->alg == SE_ALG_CTR) {
125 		num = req->cryptlen / ctx->ivsize;
126 		if (req->cryptlen % ctx->ivsize)
127 			num++;
128 
129 		ctr_iv_inc(req->iv, ctx->ivsize, num);
130 	}
131 }
132 
tegra234_aes_crypto_cfg(u32 alg,bool encrypt)133 static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
134 {
135 	switch (alg) {
136 	case SE_ALG_CMAC:
137 	case SE_ALG_GMAC:
138 	case SE_ALG_GCM:
139 	case SE_ALG_GCM_FINAL:
140 		return 0;
141 	case SE_ALG_CBC:
142 		if (encrypt)
143 			return SE_CRYPTO_CFG_CBC_ENCRYPT;
144 		else
145 			return SE_CRYPTO_CFG_CBC_DECRYPT;
146 	case SE_ALG_ECB:
147 		if (encrypt)
148 			return SE_CRYPTO_CFG_ECB_ENCRYPT;
149 		else
150 			return SE_CRYPTO_CFG_ECB_DECRYPT;
151 	case SE_ALG_XTS:
152 		if (encrypt)
153 			return SE_CRYPTO_CFG_XTS_ENCRYPT;
154 		else
155 			return SE_CRYPTO_CFG_XTS_DECRYPT;
156 
157 	case SE_ALG_CTR:
158 		return SE_CRYPTO_CFG_CTR;
159 	case SE_ALG_CBC_MAC:
160 		return SE_CRYPTO_CFG_CBC_MAC;
161 
162 	default:
163 		break;
164 	}
165 
166 	return -EINVAL;
167 }
168 
tegra234_aes_cfg(u32 alg,bool encrypt)169 static int tegra234_aes_cfg(u32 alg, bool encrypt)
170 {
171 	switch (alg) {
172 	case SE_ALG_CBC:
173 	case SE_ALG_ECB:
174 	case SE_ALG_XTS:
175 	case SE_ALG_CTR:
176 		if (encrypt)
177 			return SE_CFG_AES_ENCRYPT;
178 		else
179 			return SE_CFG_AES_DECRYPT;
180 
181 	case SE_ALG_GMAC:
182 		if (encrypt)
183 			return SE_CFG_GMAC_ENCRYPT;
184 		else
185 			return SE_CFG_GMAC_DECRYPT;
186 
187 	case SE_ALG_GCM:
188 		if (encrypt)
189 			return SE_CFG_GCM_ENCRYPT;
190 		else
191 			return SE_CFG_GCM_DECRYPT;
192 
193 	case SE_ALG_GCM_FINAL:
194 		if (encrypt)
195 			return SE_CFG_GCM_FINAL_ENCRYPT;
196 		else
197 			return SE_CFG_GCM_FINAL_DECRYPT;
198 
199 	case SE_ALG_CMAC:
200 		return SE_CFG_CMAC;
201 
202 	case SE_ALG_CBC_MAC:
203 		return SE_AES_ENC_ALG_AES_ENC |
204 		       SE_AES_DST_HASH_REG;
205 	}
206 	return -EINVAL;
207 }
208 
tegra_aes_prep_cmd(struct tegra_aes_ctx * ctx,struct tegra_aes_reqctx * rctx)209 static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
210 				       struct tegra_aes_reqctx *rctx)
211 {
212 	unsigned int data_count, res_bits, i = 0, j;
213 	struct tegra_se *se = ctx->se;
214 	u32 *cpuvaddr = se->cmdbuf->addr;
215 	dma_addr_t addr = rctx->datbuf.addr;
216 
217 	data_count = rctx->len / AES_BLOCK_SIZE;
218 	res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
219 
220 	/*
221 	 * Hardware processes data_count + 1 blocks.
222 	 * Reduce 1 block if there is no residue
223 	 */
224 	if (!res_bits)
225 		data_count--;
226 
227 	if (rctx->iv) {
228 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
229 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
230 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
231 			cpuvaddr[i++] = rctx->iv[j];
232 	}
233 
234 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
235 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
236 			SE_LAST_BLOCK_RES_BITS(res_bits);
237 
238 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
239 	cpuvaddr[i++] = rctx->config;
240 	cpuvaddr[i++] = rctx->crypto_config;
241 
242 	/* Source address setting */
243 	cpuvaddr[i++] = lower_32_bits(addr);
244 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
245 
246 	/* Destination address setting */
247 	cpuvaddr[i++] = lower_32_bits(addr);
248 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
249 			SE_ADDR_HI_SZ(rctx->len);
250 
251 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
252 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
253 			SE_AES_OP_START;
254 
255 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
256 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
257 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
258 
259 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
260 
261 	return i;
262 }
263 
tegra_aes_do_one_req(struct crypto_engine * engine,void * areq)264 static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
265 {
266 	struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
267 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
268 	struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
269 	struct tegra_se *se = ctx->se;
270 	unsigned int cmdlen, key1_id, key2_id;
271 	int ret;
272 
273 	rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
274 	rctx->len = req->cryptlen;
275 	key1_id = ctx->key1_id;
276 	key2_id = ctx->key2_id;
277 
278 	/* Pad input to AES Block size */
279 	if (ctx->alg != SE_ALG_XTS) {
280 		if (rctx->len % AES_BLOCK_SIZE)
281 			rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
282 	}
283 
284 	rctx->datbuf.size = rctx->len;
285 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
286 					      &rctx->datbuf.addr, GFP_KERNEL);
287 	if (!rctx->datbuf.buf) {
288 		ret = -ENOMEM;
289 		goto out_finalize;
290 	}
291 
292 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
293 
294 	rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
295 	rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
296 
297 	if (!key1_id) {
298 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
299 						    ctx->keylen, ctx->alg, &key1_id);
300 		if (ret)
301 			goto out;
302 	}
303 
304 	rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
305 
306 	if (ctx->alg == SE_ALG_XTS) {
307 		if (!key2_id) {
308 			ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
309 							    ctx->keylen, ctx->alg, &key2_id);
310 			if (ret)
311 				goto out;
312 		}
313 
314 		rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
315 	}
316 
317 	/* Prepare the command and submit for execution */
318 	cmdlen = tegra_aes_prep_cmd(ctx, rctx);
319 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
320 
321 	/* Copy the result */
322 	tegra_aes_update_iv(req, ctx);
323 	scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
324 
325 out:
326 	/* Free the buffer */
327 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
328 			  rctx->datbuf.buf, rctx->datbuf.addr);
329 
330 	if (tegra_key_is_reserved(key1_id))
331 		tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
332 
333 	if (tegra_key_is_reserved(key2_id))
334 		tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
335 
336 out_finalize:
337 	local_bh_disable();
338 	crypto_finalize_skcipher_request(se->engine, req, ret);
339 	local_bh_enable();
340 
341 	return 0;
342 }
343 
tegra_aes_cra_init(struct crypto_skcipher * tfm)344 static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
345 {
346 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
347 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
348 	struct tegra_se_alg *se_alg;
349 	const char *algname;
350 	int ret;
351 
352 	se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
353 
354 	crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
355 
356 	ctx->ivsize = crypto_skcipher_ivsize(tfm);
357 	ctx->se = se_alg->se_dev;
358 	ctx->key1_id = 0;
359 	ctx->key2_id = 0;
360 	ctx->keylen = 0;
361 
362 	algname = crypto_tfm_alg_name(&tfm->base);
363 	ret = se_algname_to_algid(algname);
364 	if (ret < 0) {
365 		dev_err(ctx->se->dev, "invalid algorithm\n");
366 		return ret;
367 	}
368 
369 	ctx->alg = ret;
370 
371 	return 0;
372 }
373 
tegra_aes_cra_exit(struct crypto_skcipher * tfm)374 static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
375 {
376 	struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
377 
378 	if (ctx->key1_id)
379 		tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
380 
381 	if (ctx->key2_id)
382 		tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
383 }
384 
tegra_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)385 static int tegra_aes_setkey(struct crypto_skcipher *tfm,
386 			    const u8 *key, u32 keylen)
387 {
388 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
389 	int ret;
390 
391 	if (aes_check_keylen(keylen)) {
392 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
393 		return -EINVAL;
394 	}
395 
396 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
397 	if (ret) {
398 		ctx->keylen = keylen;
399 		memcpy(ctx->key1, key, keylen);
400 	}
401 
402 	return 0;
403 }
404 
tegra_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)405 static int tegra_xts_setkey(struct crypto_skcipher *tfm,
406 			    const u8 *key, u32 keylen)
407 {
408 	struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
409 	u32 len = keylen / 2;
410 	int ret;
411 
412 	ret = xts_verify_key(tfm, key, keylen);
413 	if (ret || aes_check_keylen(len)) {
414 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
415 		return -EINVAL;
416 	}
417 
418 	ret = tegra_key_submit(ctx->se, key, len,
419 			       ctx->alg, &ctx->key1_id);
420 	if (ret) {
421 		ctx->keylen = len;
422 		memcpy(ctx->key1, key, len);
423 	}
424 
425 	ret = tegra_key_submit(ctx->se, key + len, len,
426 			       ctx->alg, &ctx->key2_id);
427 	if (ret) {
428 		ctx->keylen = len;
429 		memcpy(ctx->key2, key + len, len);
430 	}
431 
432 	return 0;
433 }
434 
tegra_aes_kac_manifest(u32 user,u32 alg,u32 keylen)435 static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
436 {
437 	int manifest;
438 
439 	manifest = SE_KAC_USER_NS;
440 
441 	switch (alg) {
442 	case SE_ALG_CBC:
443 	case SE_ALG_ECB:
444 	case SE_ALG_CTR:
445 		manifest |= SE_KAC_ENC;
446 		break;
447 	case SE_ALG_XTS:
448 		manifest |= SE_KAC_XTS;
449 		break;
450 	case SE_ALG_GCM:
451 		manifest |= SE_KAC_GCM;
452 		break;
453 	case SE_ALG_CMAC:
454 		manifest |= SE_KAC_CMAC;
455 		break;
456 	case SE_ALG_CBC_MAC:
457 		manifest |= SE_KAC_ENC;
458 		break;
459 	default:
460 		return -EINVAL;
461 	}
462 
463 	switch (keylen) {
464 	case AES_KEYSIZE_128:
465 		manifest |= SE_KAC_SIZE_128;
466 		break;
467 	case AES_KEYSIZE_192:
468 		manifest |= SE_KAC_SIZE_192;
469 		break;
470 	case AES_KEYSIZE_256:
471 		manifest |= SE_KAC_SIZE_256;
472 		break;
473 	default:
474 		return -EINVAL;
475 	}
476 
477 	return manifest;
478 }
479 
tegra_aes_crypt(struct skcipher_request * req,bool encrypt)480 static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
481 
482 {
483 	struct crypto_skcipher *tfm;
484 	struct tegra_aes_ctx *ctx;
485 	struct tegra_aes_reqctx *rctx;
486 
487 	tfm = crypto_skcipher_reqtfm(req);
488 	ctx  = crypto_skcipher_ctx(tfm);
489 	rctx = skcipher_request_ctx(req);
490 
491 	if (ctx->alg != SE_ALG_XTS) {
492 		if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
493 			dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
494 			return -EINVAL;
495 		}
496 	} else if (req->cryptlen < XTS_BLOCK_SIZE) {
497 		dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
498 		return -EINVAL;
499 	}
500 
501 	if (!req->cryptlen)
502 		return 0;
503 
504 	rctx->encrypt = encrypt;
505 
506 	return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
507 }
508 
tegra_aes_encrypt(struct skcipher_request * req)509 static int tegra_aes_encrypt(struct skcipher_request *req)
510 {
511 	return tegra_aes_crypt(req, true);
512 }
513 
tegra_aes_decrypt(struct skcipher_request * req)514 static int tegra_aes_decrypt(struct skcipher_request *req)
515 {
516 	return tegra_aes_crypt(req, false);
517 }
518 
519 static struct tegra_se_alg tegra_aes_algs[] = {
520 	{
521 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
522 		.alg.skcipher.base = {
523 			.init = tegra_aes_cra_init,
524 			.exit = tegra_aes_cra_exit,
525 			.setkey	= tegra_aes_setkey,
526 			.encrypt = tegra_aes_encrypt,
527 			.decrypt = tegra_aes_decrypt,
528 			.min_keysize = AES_MIN_KEY_SIZE,
529 			.max_keysize = AES_MAX_KEY_SIZE,
530 			.ivsize	= AES_BLOCK_SIZE,
531 			.base = {
532 				.cra_name = "cbc(aes)",
533 				.cra_driver_name = "cbc-aes-tegra",
534 				.cra_priority = 500,
535 				.cra_flags = CRYPTO_ALG_ASYNC,
536 				.cra_blocksize = AES_BLOCK_SIZE,
537 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
538 				.cra_alignmask = 0xf,
539 				.cra_module = THIS_MODULE,
540 			},
541 		}
542 	}, {
543 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
544 		.alg.skcipher.base = {
545 			.init = tegra_aes_cra_init,
546 			.exit = tegra_aes_cra_exit,
547 			.setkey	= tegra_aes_setkey,
548 			.encrypt = tegra_aes_encrypt,
549 			.decrypt = tegra_aes_decrypt,
550 			.min_keysize = AES_MIN_KEY_SIZE,
551 			.max_keysize = AES_MAX_KEY_SIZE,
552 			.base = {
553 				.cra_name = "ecb(aes)",
554 				.cra_driver_name = "ecb-aes-tegra",
555 				.cra_priority = 500,
556 				.cra_flags = CRYPTO_ALG_ASYNC,
557 				.cra_blocksize = AES_BLOCK_SIZE,
558 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
559 				.cra_alignmask = 0xf,
560 				.cra_module = THIS_MODULE,
561 			},
562 		}
563 	}, {
564 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
565 		.alg.skcipher.base = {
566 			.init = tegra_aes_cra_init,
567 			.exit = tegra_aes_cra_exit,
568 			.setkey = tegra_aes_setkey,
569 			.encrypt = tegra_aes_encrypt,
570 			.decrypt = tegra_aes_decrypt,
571 			.min_keysize = AES_MIN_KEY_SIZE,
572 			.max_keysize = AES_MAX_KEY_SIZE,
573 			.ivsize	= AES_BLOCK_SIZE,
574 			.base = {
575 				.cra_name = "ctr(aes)",
576 				.cra_driver_name = "ctr-aes-tegra",
577 				.cra_priority = 500,
578 				.cra_flags = CRYPTO_ALG_ASYNC,
579 				.cra_blocksize = 1,
580 				.cra_ctxsize = sizeof(struct tegra_aes_ctx),
581 				.cra_alignmask = 0xf,
582 				.cra_module = THIS_MODULE,
583 			},
584 		}
585 	}, {
586 		.alg.skcipher.op.do_one_request	= tegra_aes_do_one_req,
587 		.alg.skcipher.base = {
588 			.init = tegra_aes_cra_init,
589 			.exit = tegra_aes_cra_exit,
590 			.setkey	= tegra_xts_setkey,
591 			.encrypt = tegra_aes_encrypt,
592 			.decrypt = tegra_aes_decrypt,
593 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
594 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
595 			.ivsize	= AES_BLOCK_SIZE,
596 			.base = {
597 				.cra_name = "xts(aes)",
598 				.cra_driver_name = "xts-aes-tegra",
599 				.cra_priority = 500,
600 				.cra_flags = CRYPTO_ALG_ASYNC,
601 				.cra_blocksize = AES_BLOCK_SIZE,
602 				.cra_ctxsize	   = sizeof(struct tegra_aes_ctx),
603 				.cra_alignmask	   = (__alignof__(u64) - 1),
604 				.cra_module	   = THIS_MODULE,
605 			},
606 		}
607 	},
608 };
609 
tegra_gmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)610 static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
611 					struct tegra_aead_reqctx *rctx)
612 {
613 	unsigned int data_count, res_bits, i = 0;
614 	struct tegra_se *se = ctx->se;
615 	u32 *cpuvaddr = se->cmdbuf->addr;
616 
617 	data_count = (rctx->assoclen / AES_BLOCK_SIZE);
618 	res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
619 
620 	/*
621 	 * Hardware processes data_count + 1 blocks.
622 	 * Reduce 1 block if there is no residue
623 	 */
624 	if (!res_bits)
625 		data_count--;
626 
627 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
628 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
629 			SE_LAST_BLOCK_RES_BITS(res_bits);
630 
631 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
632 	cpuvaddr[i++] = rctx->config;
633 	cpuvaddr[i++] = rctx->crypto_config;
634 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
635 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
636 			SE_ADDR_HI_SZ(rctx->assoclen);
637 
638 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
639 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
640 			SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
641 			SE_AES_OP_START;
642 
643 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
644 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
645 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
646 
647 	return i;
648 }
649 
tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)650 static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
651 					     struct tegra_aead_reqctx *rctx)
652 {
653 	unsigned int data_count, res_bits, i = 0, j;
654 	struct tegra_se *se = ctx->se;
655 	u32 *cpuvaddr = se->cmdbuf->addr, op;
656 
657 	data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
658 	res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
659 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
660 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
661 
662 	/*
663 	 * If there is no assoc data,
664 	 * this will be the init command
665 	 */
666 	if (!rctx->assoclen)
667 		op |= SE_AES_OP_INIT;
668 
669 	/*
670 	 * Hardware processes data_count + 1 blocks.
671 	 * Reduce 1 block if there is no residue
672 	 */
673 	if (!res_bits)
674 		data_count--;
675 
676 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
677 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
678 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
679 		cpuvaddr[i++] = rctx->iv[j];
680 
681 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
682 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
683 			SE_LAST_BLOCK_RES_BITS(res_bits);
684 
685 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
686 	cpuvaddr[i++] = rctx->config;
687 	cpuvaddr[i++] = rctx->crypto_config;
688 
689 	/* Source Address */
690 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
691 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
692 			SE_ADDR_HI_SZ(rctx->cryptlen);
693 
694 	/* Destination Address */
695 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
696 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
697 			SE_ADDR_HI_SZ(rctx->cryptlen);
698 
699 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
700 	cpuvaddr[i++] = op;
701 
702 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
703 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
704 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
705 
706 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
707 	return i;
708 }
709 
tegra_gcm_prep_final_cmd(struct tegra_se * se,u32 * cpuvaddr,struct tegra_aead_reqctx * rctx)710 static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
711 				    struct tegra_aead_reqctx *rctx)
712 {
713 	unsigned int i = 0, j;
714 	u32 op;
715 
716 	op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
717 	     SE_AES_OP_LASTBUF | SE_AES_OP_START;
718 
719 	/*
720 	 * Set init for zero sized vector
721 	 */
722 	if (!rctx->assoclen && !rctx->cryptlen)
723 		op |= SE_AES_OP_INIT;
724 
725 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
726 	cpuvaddr[i++] = rctx->assoclen * 8;
727 	cpuvaddr[i++] = 0;
728 
729 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
730 	cpuvaddr[i++] = rctx->cryptlen * 8;
731 	cpuvaddr[i++] = 0;
732 
733 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
734 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
735 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
736 		cpuvaddr[i++] = rctx->iv[j];
737 
738 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
739 	cpuvaddr[i++] = rctx->config;
740 	cpuvaddr[i++] = rctx->crypto_config;
741 	cpuvaddr[i++] = 0;
742 	cpuvaddr[i++] = 0;
743 
744 	/* Destination Address */
745 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
746 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
747 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
748 
749 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
750 	cpuvaddr[i++] = op;
751 
752 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
753 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
754 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
755 
756 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
757 
758 	return i;
759 }
760 
tegra_gcm_do_gmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)761 static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
762 {
763 	struct tegra_se *se = ctx->se;
764 	unsigned int cmdlen;
765 
766 	scatterwalk_map_and_copy(rctx->inbuf.buf,
767 				 rctx->src_sg, 0, rctx->assoclen, 0);
768 
769 	rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
770 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
771 			      SE_AES_KEY_INDEX(rctx->key_id);
772 
773 	cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
774 
775 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
776 }
777 
tegra_gcm_do_crypt(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)778 static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
779 {
780 	struct tegra_se *se = ctx->se;
781 	int cmdlen, ret;
782 
783 	scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
784 				 rctx->assoclen, rctx->cryptlen, 0);
785 
786 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
787 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
788 			      SE_AES_KEY_INDEX(rctx->key_id);
789 
790 	/* Prepare command and submit */
791 	cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
792 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
793 	if (ret)
794 		return ret;
795 
796 	/* Copy the result */
797 	scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
798 				 rctx->assoclen, rctx->cryptlen, 1);
799 
800 	return 0;
801 }
802 
tegra_gcm_do_final(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)803 static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
804 {
805 	struct tegra_se *se = ctx->se;
806 	u32 *cpuvaddr = se->cmdbuf->addr;
807 	int cmdlen, ret, offset;
808 
809 	rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
810 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
811 			      SE_AES_KEY_INDEX(rctx->key_id);
812 
813 	/* Prepare command and submit */
814 	cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
815 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
816 	if (ret)
817 		return ret;
818 
819 	if (rctx->encrypt) {
820 		/* Copy the result */
821 		offset = rctx->assoclen + rctx->cryptlen;
822 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
823 					 offset, rctx->authsize, 1);
824 	}
825 
826 	return 0;
827 }
828 
tegra_gcm_do_verify(struct tegra_se * se,struct tegra_aead_reqctx * rctx)829 static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
830 {
831 	unsigned int offset;
832 	u8 mac[16];
833 
834 	offset = rctx->assoclen + rctx->cryptlen;
835 	scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
836 
837 	if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
838 		return -EBADMSG;
839 
840 	return 0;
841 }
842 
tegra_ccm_check_iv(const u8 * iv)843 static inline int tegra_ccm_check_iv(const u8 *iv)
844 {
845 	/* iv[0] gives value of q-1
846 	 * 2 <= q <= 8 as per NIST 800-38C notation
847 	 * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
848 	 */
849 	if (iv[0] < 1 || iv[0] > 7) {
850 		pr_debug("ccm_check_iv failed %d\n", iv[0]);
851 		return -EINVAL;
852 	}
853 
854 	return 0;
855 }
856 
tegra_cbcmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)857 static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
858 					  struct tegra_aead_reqctx *rctx)
859 {
860 	unsigned int data_count, i = 0;
861 	struct tegra_se *se = ctx->se;
862 	u32 *cpuvaddr = se->cmdbuf->addr;
863 
864 	data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
865 
866 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
867 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
868 
869 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
870 	cpuvaddr[i++] = rctx->config;
871 	cpuvaddr[i++] = rctx->crypto_config;
872 
873 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
874 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
875 			SE_ADDR_HI_SZ(rctx->inbuf.size);
876 
877 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
878 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
879 			SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
880 
881 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
882 	cpuvaddr[i++] = SE_AES_OP_WRSTALL |
883 			SE_AES_OP_LASTBUF | SE_AES_OP_START;
884 
885 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
886 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
887 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
888 
889 	return i;
890 }
891 
tegra_ctr_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)892 static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
893 				       struct tegra_aead_reqctx *rctx)
894 {
895 	unsigned int i = 0, j;
896 	struct tegra_se *se = ctx->se;
897 	u32 *cpuvaddr = se->cmdbuf->addr;
898 
899 	cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
900 	cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
901 	for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
902 		cpuvaddr[i++] = rctx->iv[j];
903 
904 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
905 	cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
906 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
907 	cpuvaddr[i++] = rctx->config;
908 	cpuvaddr[i++] = rctx->crypto_config;
909 
910 	/* Source address setting */
911 	cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
912 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
913 			SE_ADDR_HI_SZ(rctx->inbuf.size);
914 
915 	/* Destination address setting */
916 	cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
917 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
918 			SE_ADDR_HI_SZ(rctx->inbuf.size);
919 
920 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
921 	cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
922 			SE_AES_OP_START;
923 
924 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
925 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
926 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
927 
928 	dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
929 		rctx->config, rctx->crypto_config);
930 
931 	return i;
932 }
933 
tegra_ccm_do_cbcmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)934 static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
935 {
936 	struct tegra_se *se = ctx->se;
937 	int cmdlen;
938 
939 	rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
940 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
941 						      rctx->encrypt) |
942 						      SE_AES_KEY_INDEX(rctx->key_id);
943 
944 	/* Prepare command and submit */
945 	cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
946 
947 	return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
948 }
949 
tegra_ccm_set_msg_len(u8 * block,unsigned int msglen,int csize)950 static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
951 {
952 	__be32 data;
953 
954 	memset(block, 0, csize);
955 	block += csize;
956 
957 	if (csize >= 4)
958 		csize = 4;
959 	else if (msglen > (1 << (8 * csize)))
960 		return -EOVERFLOW;
961 
962 	data = cpu_to_be32(msglen);
963 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
964 
965 	return 0;
966 }
967 
tegra_ccm_format_nonce(struct tegra_aead_reqctx * rctx,u8 * nonce)968 static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
969 {
970 	unsigned int q, t;
971 	u8 *q_ptr, *iv = (u8 *)rctx->iv;
972 
973 	memcpy(nonce, rctx->iv, 16);
974 
975 	/*** 1. Prepare Flags Octet ***/
976 
977 	/* Encode t (mac length) */
978 	t = rctx->authsize;
979 	nonce[0] |= (((t - 2) / 2) << 3);
980 
981 	/* Adata */
982 	if (rctx->assoclen)
983 		nonce[0] |= (1 << 6);
984 
985 	/*** Encode Q - message length ***/
986 	q = iv[0] + 1;
987 	q_ptr = nonce + 16 - q;
988 
989 	return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
990 }
991 
tegra_ccm_format_adata(u8 * adata,unsigned int a)992 static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
993 {
994 	int len = 0;
995 
996 	/* add control info for associated data
997 	 * RFC 3610 and NIST Special Publication 800-38C
998 	 */
999 	if (a < 65280) {
1000 		*(__be16 *)adata = cpu_to_be16(a);
1001 		len = 2;
1002 	} else	{
1003 		*(__be16 *)adata = cpu_to_be16(0xfffe);
1004 		*(__be32 *)&adata[2] = cpu_to_be32(a);
1005 		len = 6;
1006 	}
1007 
1008 	return len;
1009 }
1010 
tegra_ccm_add_padding(u8 * buf,unsigned int len)1011 static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
1012 {
1013 	unsigned int padlen = 16 - (len % 16);
1014 	u8 padding[16] = {0};
1015 
1016 	if (padlen == 16)
1017 		return 0;
1018 
1019 	memcpy(buf, padding, padlen);
1020 
1021 	return padlen;
1022 }
1023 
tegra_ccm_format_blocks(struct tegra_aead_reqctx * rctx)1024 static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
1025 {
1026 	unsigned int alen = 0, offset = 0;
1027 	u8 nonce[16], adata[16];
1028 	int ret;
1029 
1030 	ret = tegra_ccm_format_nonce(rctx, nonce);
1031 	if (ret)
1032 		return ret;
1033 
1034 	memcpy(rctx->inbuf.buf, nonce, 16);
1035 	offset = 16;
1036 
1037 	if (rctx->assoclen) {
1038 		alen = tegra_ccm_format_adata(adata, rctx->assoclen);
1039 		memcpy(rctx->inbuf.buf + offset, adata, alen);
1040 		offset += alen;
1041 
1042 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1043 					 rctx->src_sg, 0, rctx->assoclen, 0);
1044 
1045 		offset += rctx->assoclen;
1046 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
1047 					 rctx->assoclen + alen);
1048 	}
1049 
1050 	return offset;
1051 }
1052 
tegra_ccm_mac_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1053 static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1054 {
1055 	u32 result[16];
1056 	int i, ret;
1057 
1058 	/* Read and clear Result */
1059 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1060 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1061 
1062 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1063 		writel(0, se->base + se->hw->regs->result + (i * 4));
1064 
1065 	if (rctx->encrypt) {
1066 		memcpy(rctx->authdata, result, rctx->authsize);
1067 	} else {
1068 		ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
1069 		if (ret)
1070 			return -EBADMSG;
1071 	}
1072 
1073 	return 0;
1074 }
1075 
tegra_ccm_ctr_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)1076 static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
1077 {
1078 	/* Copy result */
1079 	scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
1080 				 rctx->assoclen, rctx->cryptlen, 1);
1081 
1082 	if (rctx->encrypt)
1083 		scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
1084 					 rctx->assoclen + rctx->cryptlen,
1085 					 rctx->authsize, 1);
1086 	else
1087 		memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
1088 
1089 	return 0;
1090 }
1091 
tegra_ccm_compute_auth(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1092 static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1093 {
1094 	struct tegra_se *se = ctx->se;
1095 	struct scatterlist *sg;
1096 	int offset, ret;
1097 
1098 	offset = tegra_ccm_format_blocks(rctx);
1099 	if (offset < 0)
1100 		return -EINVAL;
1101 
1102 	/* Copy plain text to the buffer */
1103 	sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
1104 
1105 	scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
1106 				 sg, rctx->assoclen,
1107 				 rctx->cryptlen, 0);
1108 	offset += rctx->cryptlen;
1109 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1110 
1111 	rctx->inbuf.size = offset;
1112 
1113 	ret = tegra_ccm_do_cbcmac(ctx, rctx);
1114 	if (ret)
1115 		return ret;
1116 
1117 	return tegra_ccm_mac_result(se, rctx);
1118 }
1119 
tegra_ccm_do_ctr(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)1120 static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
1121 {
1122 	struct tegra_se *se = ctx->se;
1123 	unsigned int cmdlen, offset = 0;
1124 	struct scatterlist *sg = rctx->src_sg;
1125 	int ret;
1126 
1127 	rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
1128 	rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
1129 			      SE_AES_KEY_INDEX(rctx->key_id);
1130 
1131 	/* Copy authdata in the top of buffer for encryption/decryption */
1132 	if (rctx->encrypt)
1133 		memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
1134 	else
1135 		scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
1136 					 rctx->assoclen + rctx->cryptlen,
1137 					 rctx->authsize, 0);
1138 
1139 	offset += rctx->authsize;
1140 	offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
1141 
1142 	/* If there is no cryptlen, proceed to submit the task */
1143 	if (rctx->cryptlen) {
1144 		scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
1145 					 rctx->assoclen, rctx->cryptlen, 0);
1146 		offset += rctx->cryptlen;
1147 		offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
1148 	}
1149 
1150 	rctx->inbuf.size = offset;
1151 
1152 	/* Prepare command and submit */
1153 	cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
1154 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1155 	if (ret)
1156 		return ret;
1157 
1158 	return tegra_ccm_ctr_result(se, rctx);
1159 }
1160 
tegra_ccm_crypt_init(struct aead_request * req,struct tegra_se * se,struct tegra_aead_reqctx * rctx)1161 static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
1162 				struct tegra_aead_reqctx *rctx)
1163 {
1164 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1165 	u8 *iv = (u8 *)rctx->iv;
1166 	int ret, i;
1167 
1168 	rctx->src_sg = req->src;
1169 	rctx->dst_sg = req->dst;
1170 	rctx->assoclen = req->assoclen;
1171 	rctx->authsize = crypto_aead_authsize(tfm);
1172 
1173 	if (rctx->encrypt)
1174 		rctx->cryptlen = req->cryptlen;
1175 	else
1176 		rctx->cryptlen = req->cryptlen - rctx->authsize;
1177 
1178 	memcpy(iv, req->iv, 16);
1179 
1180 	ret = tegra_ccm_check_iv(iv);
1181 	if (ret)
1182 		return ret;
1183 
1184 	/* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
1185 	 * zero to encrypt auth tag.
1186 	 * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
1187 	 */
1188 	memset(iv + 15 - iv[0], 0, iv[0] + 1);
1189 
1190 	/* Clear any previous result */
1191 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1192 		writel(0, se->base + se->hw->regs->result + (i * 4));
1193 
1194 	return 0;
1195 }
1196 
tegra_ccm_do_one_req(struct crypto_engine * engine,void * areq)1197 static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
1198 {
1199 	struct aead_request *req = container_of(areq, struct aead_request, base);
1200 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1201 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1202 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1203 	struct tegra_se *se = ctx->se;
1204 	int ret;
1205 
1206 	ret = tegra_ccm_crypt_init(req, se, rctx);
1207 	if (ret)
1208 		goto out_finalize;
1209 
1210 	rctx->key_id = ctx->key_id;
1211 
1212 	/* Allocate buffers required */
1213 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1214 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1215 					     &rctx->inbuf.addr, GFP_KERNEL);
1216 	if (!rctx->inbuf.buf)
1217 		goto out_finalize;
1218 
1219 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
1220 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1221 					      &rctx->outbuf.addr, GFP_KERNEL);
1222 	if (!rctx->outbuf.buf) {
1223 		ret = -ENOMEM;
1224 		goto out_free_inbuf;
1225 	}
1226 
1227 	if (!ctx->key_id) {
1228 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1229 						    ctx->keylen, ctx->alg, &rctx->key_id);
1230 		if (ret)
1231 			goto out;
1232 	}
1233 
1234 	if (rctx->encrypt) {
1235 		/* CBC MAC Operation */
1236 		ret = tegra_ccm_compute_auth(ctx, rctx);
1237 		if (ret)
1238 			goto out;
1239 
1240 		/* CTR operation */
1241 		ret = tegra_ccm_do_ctr(ctx, rctx);
1242 		if (ret)
1243 			goto out;
1244 	} else {
1245 		/* CTR operation */
1246 		ret = tegra_ccm_do_ctr(ctx, rctx);
1247 		if (ret)
1248 			goto out;
1249 
1250 		/* CBC MAC Operation */
1251 		ret = tegra_ccm_compute_auth(ctx, rctx);
1252 		if (ret)
1253 			goto out;
1254 	}
1255 
1256 out:
1257 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1258 			  rctx->outbuf.buf, rctx->outbuf.addr);
1259 
1260 out_free_inbuf:
1261 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1262 			  rctx->inbuf.buf, rctx->inbuf.addr);
1263 
1264 	if (tegra_key_is_reserved(rctx->key_id))
1265 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1266 
1267 out_finalize:
1268 	local_bh_disable();
1269 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1270 	local_bh_enable();
1271 
1272 	return 0;
1273 }
1274 
tegra_gcm_do_one_req(struct crypto_engine * engine,void * areq)1275 static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
1276 {
1277 	struct aead_request *req = container_of(areq, struct aead_request, base);
1278 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1279 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1280 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1281 	int ret;
1282 
1283 	rctx->src_sg = req->src;
1284 	rctx->dst_sg = req->dst;
1285 	rctx->assoclen = req->assoclen;
1286 	rctx->authsize = crypto_aead_authsize(tfm);
1287 
1288 	if (rctx->encrypt)
1289 		rctx->cryptlen = req->cryptlen;
1290 	else
1291 		rctx->cryptlen = req->cryptlen - ctx->authsize;
1292 
1293 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
1294 	rctx->iv[3] = (1 << 24);
1295 
1296 	rctx->key_id = ctx->key_id;
1297 
1298 	/* Allocate buffers required */
1299 	rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1300 	rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
1301 					     &rctx->inbuf.addr, GFP_KERNEL);
1302 	if (!rctx->inbuf.buf) {
1303 		ret = -ENOMEM;
1304 		goto out_finalize;
1305 	}
1306 
1307 	rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
1308 	rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
1309 					      &rctx->outbuf.addr, GFP_KERNEL);
1310 	if (!rctx->outbuf.buf) {
1311 		ret = -ENOMEM;
1312 		goto out_free_inbuf;
1313 	}
1314 
1315 	if (!ctx->key_id) {
1316 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1317 						    ctx->keylen, ctx->alg, &rctx->key_id);
1318 		if (ret)
1319 			goto out;
1320 	}
1321 
1322 	/* If there is associated data perform GMAC operation */
1323 	if (rctx->assoclen) {
1324 		ret = tegra_gcm_do_gmac(ctx, rctx);
1325 		if (ret)
1326 			goto out;
1327 	}
1328 
1329 	/* GCM Encryption/Decryption operation */
1330 	if (rctx->cryptlen) {
1331 		ret = tegra_gcm_do_crypt(ctx, rctx);
1332 		if (ret)
1333 			goto out;
1334 	}
1335 
1336 	/* GCM_FINAL operation */
1337 	ret = tegra_gcm_do_final(ctx, rctx);
1338 	if (ret)
1339 		goto out;
1340 
1341 	if (!rctx->encrypt)
1342 		ret = tegra_gcm_do_verify(ctx->se, rctx);
1343 
1344 out:
1345 	dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
1346 			  rctx->outbuf.buf, rctx->outbuf.addr);
1347 
1348 out_free_inbuf:
1349 	dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
1350 			  rctx->inbuf.buf, rctx->inbuf.addr);
1351 
1352 	if (tegra_key_is_reserved(rctx->key_id))
1353 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1354 
1355 out_finalize:
1356 	local_bh_disable();
1357 	crypto_finalize_aead_request(ctx->se->engine, req, ret);
1358 	local_bh_enable();
1359 
1360 	return 0;
1361 }
1362 
tegra_aead_cra_init(struct crypto_aead * tfm)1363 static int tegra_aead_cra_init(struct crypto_aead *tfm)
1364 {
1365 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1366 	struct aead_alg *alg = crypto_aead_alg(tfm);
1367 	struct tegra_se_alg *se_alg;
1368 	const char *algname;
1369 	int ret;
1370 
1371 	algname = crypto_tfm_alg_name(&tfm->base);
1372 
1373 	se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
1374 
1375 	crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
1376 
1377 	ctx->se = se_alg->se_dev;
1378 	ctx->key_id = 0;
1379 	ctx->keylen = 0;
1380 
1381 	ret = se_algname_to_algid(algname);
1382 	if (ret < 0) {
1383 		dev_err(ctx->se->dev, "invalid algorithm\n");
1384 		return ret;
1385 	}
1386 
1387 	ctx->alg = ret;
1388 
1389 	return 0;
1390 }
1391 
tegra_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1392 static int tegra_ccm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1393 {
1394 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1395 
1396 	switch (authsize) {
1397 	case 4:
1398 	case 6:
1399 	case 8:
1400 	case 10:
1401 	case 12:
1402 	case 14:
1403 	case 16:
1404 		break;
1405 	default:
1406 		return -EINVAL;
1407 	}
1408 
1409 	ctx->authsize = authsize;
1410 
1411 	return 0;
1412 }
1413 
tegra_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1414 static int tegra_gcm_setauthsize(struct crypto_aead *tfm,  unsigned int authsize)
1415 {
1416 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1417 	int ret;
1418 
1419 	ret = crypto_gcm_check_authsize(authsize);
1420 	if (ret)
1421 		return ret;
1422 
1423 	ctx->authsize = authsize;
1424 
1425 	return 0;
1426 }
1427 
tegra_aead_cra_exit(struct crypto_aead * tfm)1428 static void tegra_aead_cra_exit(struct crypto_aead *tfm)
1429 {
1430 	struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
1431 
1432 	if (ctx->key_id)
1433 		tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1434 }
1435 
tegra_aead_crypt(struct aead_request * req,bool encrypt)1436 static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
1437 {
1438 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1439 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1440 	struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
1441 
1442 	rctx->encrypt = encrypt;
1443 
1444 	return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
1445 }
1446 
tegra_aead_encrypt(struct aead_request * req)1447 static int tegra_aead_encrypt(struct aead_request *req)
1448 {
1449 	return tegra_aead_crypt(req, true);
1450 }
1451 
tegra_aead_decrypt(struct aead_request * req)1452 static int tegra_aead_decrypt(struct aead_request *req)
1453 {
1454 	return tegra_aead_crypt(req, false);
1455 }
1456 
tegra_aead_setkey(struct crypto_aead * tfm,const u8 * key,u32 keylen)1457 static int tegra_aead_setkey(struct crypto_aead *tfm,
1458 			     const u8 *key, u32 keylen)
1459 {
1460 	struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
1461 	int ret;
1462 
1463 	if (aes_check_keylen(keylen)) {
1464 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1465 		return -EINVAL;
1466 	}
1467 
1468 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1469 	if (ret) {
1470 		ctx->keylen = keylen;
1471 		memcpy(ctx->key, key, keylen);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
tegra_cmac_prep_cmd(struct tegra_cmac_ctx * ctx,struct tegra_cmac_reqctx * rctx)1477 static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
1478 					struct tegra_cmac_reqctx *rctx)
1479 {
1480 	unsigned int data_count, res_bits = 0, i = 0, j;
1481 	struct tegra_se *se = ctx->se;
1482 	u32 *cpuvaddr = se->cmdbuf->addr, op;
1483 
1484 	data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
1485 
1486 	op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
1487 
1488 	if (!(rctx->task & SHA_UPDATE)) {
1489 		op |= SE_AES_OP_FINAL;
1490 		res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
1491 	}
1492 
1493 	if (!res_bits && data_count)
1494 		data_count--;
1495 
1496 	if (rctx->task & SHA_FIRST) {
1497 		rctx->task &= ~SHA_FIRST;
1498 
1499 		cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
1500 		cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
1501 		/* Load 0 IV */
1502 		for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
1503 			cpuvaddr[i++] = 0;
1504 	}
1505 
1506 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
1507 	cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
1508 			SE_LAST_BLOCK_RES_BITS(res_bits);
1509 
1510 	cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
1511 	cpuvaddr[i++] = rctx->config;
1512 	cpuvaddr[i++] = rctx->crypto_config;
1513 
1514 	/* Source Address */
1515 	cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
1516 	cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
1517 			SE_ADDR_HI_SZ(rctx->datbuf.size);
1518 	cpuvaddr[i++] = 0;
1519 	cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
1520 
1521 	cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
1522 	cpuvaddr[i++] = op;
1523 
1524 	cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
1525 	cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
1526 			host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
1527 
1528 	return i;
1529 }
1530 
tegra_cmac_copy_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1531 static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1532 {
1533 	int i;
1534 
1535 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1536 		rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1537 }
1538 
tegra_cmac_paste_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)1539 static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
1540 {
1541 	int i;
1542 
1543 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1544 		writel(rctx->result[i],
1545 		       se->base + se->hw->regs->result + (i * 4));
1546 }
1547 
tegra_cmac_do_init(struct ahash_request * req)1548 static int tegra_cmac_do_init(struct ahash_request *req)
1549 {
1550 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1551 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1552 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1553 	struct tegra_se *se = ctx->se;
1554 	int i;
1555 
1556 	rctx->total_len = 0;
1557 	rctx->datbuf.size = 0;
1558 	rctx->residue.size = 0;
1559 	rctx->key_id = ctx->key_id;
1560 	rctx->task |= SHA_FIRST;
1561 	rctx->blk_size = crypto_ahash_blocksize(tfm);
1562 
1563 	rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
1564 					       &rctx->residue.addr, GFP_KERNEL);
1565 	if (!rctx->residue.buf)
1566 		return -ENOMEM;
1567 
1568 	rctx->residue.size = 0;
1569 
1570 	/* Clear any previous result */
1571 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1572 		writel(0, se->base + se->hw->regs->result + (i * 4));
1573 
1574 	return 0;
1575 }
1576 
tegra_cmac_do_update(struct ahash_request * req)1577 static int tegra_cmac_do_update(struct ahash_request *req)
1578 {
1579 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1580 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1581 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1582 	struct tegra_se *se = ctx->se;
1583 	unsigned int nblks, nresidue, cmdlen;
1584 	int ret;
1585 
1586 	if (!req->nbytes)
1587 		return 0;
1588 
1589 	nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
1590 	nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
1591 
1592 	/*
1593 	 * Reserve the last block as residue during final() to process.
1594 	 */
1595 	if (!nresidue && nblks) {
1596 		nresidue += rctx->blk_size;
1597 		nblks--;
1598 	}
1599 
1600 	rctx->src_sg = req->src;
1601 	rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
1602 	rctx->total_len += rctx->datbuf.size;
1603 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1604 	rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
1605 
1606 	/*
1607 	 * Keep one block and residue bytes in residue and
1608 	 * return. The bytes will be processed in final()
1609 	 */
1610 	if (nblks < 1) {
1611 		scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
1612 					 rctx->src_sg, 0, req->nbytes, 0);
1613 
1614 		rctx->residue.size += req->nbytes;
1615 		return 0;
1616 	}
1617 
1618 	rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
1619 					      &rctx->datbuf.addr, GFP_KERNEL);
1620 	if (!rctx->datbuf.buf)
1621 		return -ENOMEM;
1622 
1623 	/* Copy the previous residue first */
1624 	if (rctx->residue.size)
1625 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1626 
1627 	scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
1628 				 rctx->src_sg, 0, req->nbytes - nresidue, 0);
1629 
1630 	scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
1631 				 req->nbytes - nresidue, nresidue, 0);
1632 
1633 	/* Update residue value with the residue after current block */
1634 	rctx->residue.size = nresidue;
1635 
1636 	/*
1637 	 * If this is not the first task, paste the previous copied
1638 	 * intermediate results to the registers so that it gets picked up.
1639 	 */
1640 	if (!(rctx->task & SHA_FIRST))
1641 		tegra_cmac_paste_result(ctx->se, rctx);
1642 
1643 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1644 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1645 
1646 	tegra_cmac_copy_result(ctx->se, rctx);
1647 
1648 	dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
1649 			  rctx->datbuf.buf, rctx->datbuf.addr);
1650 
1651 	return ret;
1652 }
1653 
tegra_cmac_do_final(struct ahash_request * req)1654 static int tegra_cmac_do_final(struct ahash_request *req)
1655 {
1656 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1657 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1658 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1659 	struct tegra_se *se = ctx->se;
1660 	u32 *result = (u32 *)req->result;
1661 	int ret = 0, i, cmdlen;
1662 
1663 	if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
1664 		return crypto_shash_tfm_digest(ctx->fallback_tfm,
1665 					NULL, 0, req->result);
1666 	}
1667 
1668 	if (rctx->residue.size) {
1669 		rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
1670 						      &rctx->datbuf.addr, GFP_KERNEL);
1671 		if (!rctx->datbuf.buf) {
1672 			ret = -ENOMEM;
1673 			goto out_free;
1674 		}
1675 
1676 		memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
1677 	}
1678 
1679 	rctx->datbuf.size = rctx->residue.size;
1680 	rctx->total_len += rctx->residue.size;
1681 	rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
1682 
1683 	/*
1684 	 * If this is not the first task, paste the previous copied
1685 	 * intermediate results to the registers so that it gets picked up.
1686 	 */
1687 	if (!(rctx->task & SHA_FIRST))
1688 		tegra_cmac_paste_result(ctx->se, rctx);
1689 
1690 	/* Prepare command and submit */
1691 	cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
1692 	ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
1693 	if (ret)
1694 		goto out;
1695 
1696 	/* Read and clear Result register */
1697 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1698 		result[i] = readl(se->base + se->hw->regs->result + (i * 4));
1699 
1700 	for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
1701 		writel(0, se->base + se->hw->regs->result + (i * 4));
1702 
1703 out:
1704 	if (rctx->residue.size)
1705 		dma_free_coherent(se->dev, rctx->datbuf.size,
1706 				  rctx->datbuf.buf, rctx->datbuf.addr);
1707 out_free:
1708 	dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
1709 			  rctx->residue.buf, rctx->residue.addr);
1710 	return ret;
1711 }
1712 
tegra_cmac_do_one_req(struct crypto_engine * engine,void * areq)1713 static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
1714 {
1715 	struct ahash_request *req = ahash_request_cast(areq);
1716 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1717 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1718 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1719 	struct tegra_se *se = ctx->se;
1720 	int ret = 0;
1721 
1722 	if (rctx->task & SHA_INIT) {
1723 		ret = tegra_cmac_do_init(req);
1724 		if (ret)
1725 			goto out;
1726 
1727 		rctx->task &= ~SHA_INIT;
1728 	}
1729 
1730 	if (!ctx->key_id) {
1731 		ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
1732 						    ctx->keylen, ctx->alg, &rctx->key_id);
1733 		if (ret)
1734 			goto out;
1735 	}
1736 
1737 	if (rctx->task & SHA_UPDATE) {
1738 		ret = tegra_cmac_do_update(req);
1739 		if (ret)
1740 			goto out;
1741 
1742 		rctx->task &= ~SHA_UPDATE;
1743 	}
1744 
1745 	if (rctx->task & SHA_FINAL) {
1746 		ret = tegra_cmac_do_final(req);
1747 		if (ret)
1748 			goto out;
1749 
1750 		rctx->task &= ~SHA_FINAL;
1751 	}
1752 out:
1753 	if (tegra_key_is_reserved(rctx->key_id))
1754 		tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
1755 
1756 	local_bh_disable();
1757 	crypto_finalize_hash_request(se->engine, req, ret);
1758 	local_bh_enable();
1759 
1760 	return 0;
1761 }
1762 
tegra_cmac_init_fallback(struct crypto_ahash * tfm,struct tegra_cmac_ctx * ctx,const char * algname)1763 static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
1764 				     const char *algname)
1765 {
1766 	unsigned int statesize;
1767 
1768 	ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
1769 
1770 	if (IS_ERR(ctx->fallback_tfm)) {
1771 		dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
1772 		ctx->fallback_tfm = NULL;
1773 		return;
1774 	}
1775 
1776 	statesize = crypto_shash_statesize(ctx->fallback_tfm);
1777 
1778 	if (statesize > sizeof(struct tegra_cmac_reqctx))
1779 		crypto_ahash_set_statesize(tfm, statesize);
1780 }
1781 
tegra_cmac_cra_init(struct crypto_tfm * tfm)1782 static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
1783 {
1784 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1785 	struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
1786 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
1787 	struct tegra_se_alg *se_alg;
1788 	const char *algname;
1789 	int ret;
1790 
1791 	algname = crypto_tfm_alg_name(tfm);
1792 	se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
1793 
1794 	crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
1795 
1796 	ctx->se = se_alg->se_dev;
1797 	ctx->key_id = 0;
1798 	ctx->keylen = 0;
1799 
1800 	ret = se_algname_to_algid(algname);
1801 	if (ret < 0) {
1802 		dev_err(ctx->se->dev, "invalid algorithm\n");
1803 		return ret;
1804 	}
1805 
1806 	ctx->alg = ret;
1807 
1808 	tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
1809 
1810 	return 0;
1811 }
1812 
tegra_cmac_cra_exit(struct crypto_tfm * tfm)1813 static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
1814 {
1815 	struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
1816 
1817 	if (ctx->fallback_tfm)
1818 		crypto_free_shash(ctx->fallback_tfm);
1819 
1820 	tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
1821 }
1822 
tegra_cmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1823 static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1824 			     unsigned int keylen)
1825 {
1826 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1827 	int ret;
1828 
1829 	if (aes_check_keylen(keylen)) {
1830 		dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
1831 		return -EINVAL;
1832 	}
1833 
1834 	if (ctx->fallback_tfm)
1835 		crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
1836 
1837 	ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
1838 	if (ret) {
1839 		ctx->keylen = keylen;
1840 		memcpy(ctx->key, key, keylen);
1841 	}
1842 
1843 	return 0;
1844 }
1845 
tegra_cmac_init(struct ahash_request * req)1846 static int tegra_cmac_init(struct ahash_request *req)
1847 {
1848 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1849 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1850 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1851 
1852 	rctx->task = SHA_INIT;
1853 
1854 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1855 }
1856 
tegra_cmac_update(struct ahash_request * req)1857 static int tegra_cmac_update(struct ahash_request *req)
1858 {
1859 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1860 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1861 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1862 
1863 	rctx->task |= SHA_UPDATE;
1864 
1865 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1866 }
1867 
tegra_cmac_final(struct ahash_request * req)1868 static int tegra_cmac_final(struct ahash_request *req)
1869 {
1870 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1871 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1872 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1873 
1874 	rctx->task |= SHA_FINAL;
1875 
1876 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1877 }
1878 
tegra_cmac_finup(struct ahash_request * req)1879 static int tegra_cmac_finup(struct ahash_request *req)
1880 {
1881 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1882 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1883 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1884 
1885 	rctx->task |= SHA_UPDATE | SHA_FINAL;
1886 
1887 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1888 }
1889 
tegra_cmac_digest(struct ahash_request * req)1890 static int tegra_cmac_digest(struct ahash_request *req)
1891 {
1892 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1893 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
1894 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1895 
1896 	rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
1897 
1898 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
1899 }
1900 
tegra_cmac_export(struct ahash_request * req,void * out)1901 static int tegra_cmac_export(struct ahash_request *req, void *out)
1902 {
1903 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1904 
1905 	memcpy(out, rctx, sizeof(*rctx));
1906 
1907 	return 0;
1908 }
1909 
tegra_cmac_import(struct ahash_request * req,const void * in)1910 static int tegra_cmac_import(struct ahash_request *req, const void *in)
1911 {
1912 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
1913 
1914 	memcpy(rctx, in, sizeof(*rctx));
1915 
1916 	return 0;
1917 }
1918 
1919 static struct tegra_se_alg tegra_aead_algs[] = {
1920 	{
1921 		.alg.aead.op.do_one_request = tegra_gcm_do_one_req,
1922 		.alg.aead.base = {
1923 			.init = tegra_aead_cra_init,
1924 			.exit = tegra_aead_cra_exit,
1925 			.setkey = tegra_aead_setkey,
1926 			.setauthsize = tegra_gcm_setauthsize,
1927 			.encrypt = tegra_aead_encrypt,
1928 			.decrypt = tegra_aead_decrypt,
1929 			.maxauthsize = AES_BLOCK_SIZE,
1930 			.ivsize	= GCM_AES_IV_SIZE,
1931 			.base = {
1932 				.cra_name = "gcm(aes)",
1933 				.cra_driver_name = "gcm-aes-tegra",
1934 				.cra_priority = 500,
1935 				.cra_flags = CRYPTO_ALG_ASYNC,
1936 				.cra_blocksize = 1,
1937 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1938 				.cra_alignmask = 0xf,
1939 				.cra_module = THIS_MODULE,
1940 			},
1941 		}
1942 	}, {
1943 		.alg.aead.op.do_one_request = tegra_ccm_do_one_req,
1944 		.alg.aead.base = {
1945 			.init = tegra_aead_cra_init,
1946 			.exit = tegra_aead_cra_exit,
1947 			.setkey	= tegra_aead_setkey,
1948 			.setauthsize = tegra_ccm_setauthsize,
1949 			.encrypt = tegra_aead_encrypt,
1950 			.decrypt = tegra_aead_decrypt,
1951 			.maxauthsize = AES_BLOCK_SIZE,
1952 			.ivsize	= AES_BLOCK_SIZE,
1953 			.chunksize = AES_BLOCK_SIZE,
1954 			.base = {
1955 				.cra_name = "ccm(aes)",
1956 				.cra_driver_name = "ccm-aes-tegra",
1957 				.cra_priority = 500,
1958 				.cra_flags = CRYPTO_ALG_ASYNC,
1959 				.cra_blocksize = 1,
1960 				.cra_ctxsize = sizeof(struct tegra_aead_ctx),
1961 				.cra_alignmask = 0xf,
1962 				.cra_module = THIS_MODULE,
1963 			},
1964 		}
1965 	}
1966 };
1967 
1968 static struct tegra_se_alg tegra_cmac_algs[] = {
1969 	{
1970 		.alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
1971 		.alg.ahash.base = {
1972 			.init = tegra_cmac_init,
1973 			.setkey	= tegra_cmac_setkey,
1974 			.update = tegra_cmac_update,
1975 			.final = tegra_cmac_final,
1976 			.finup = tegra_cmac_finup,
1977 			.digest = tegra_cmac_digest,
1978 			.export = tegra_cmac_export,
1979 			.import = tegra_cmac_import,
1980 			.halg.digestsize = AES_BLOCK_SIZE,
1981 			.halg.statesize = sizeof(struct tegra_cmac_reqctx),
1982 			.halg.base = {
1983 				.cra_name = "cmac(aes)",
1984 				.cra_driver_name = "tegra-se-cmac",
1985 				.cra_priority = 300,
1986 				.cra_flags = CRYPTO_ALG_ASYNC,
1987 				.cra_blocksize = AES_BLOCK_SIZE,
1988 				.cra_ctxsize = sizeof(struct tegra_cmac_ctx),
1989 				.cra_alignmask = 0,
1990 				.cra_module = THIS_MODULE,
1991 				.cra_init = tegra_cmac_cra_init,
1992 				.cra_exit = tegra_cmac_cra_exit,
1993 			}
1994 		}
1995 	}
1996 };
1997 
tegra_init_aes(struct tegra_se * se)1998 int tegra_init_aes(struct tegra_se *se)
1999 {
2000 	struct aead_engine_alg *aead_alg;
2001 	struct ahash_engine_alg *ahash_alg;
2002 	struct skcipher_engine_alg *sk_alg;
2003 	int i, ret;
2004 
2005 	se->manifest = tegra_aes_kac_manifest;
2006 
2007 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
2008 		sk_alg = &tegra_aes_algs[i].alg.skcipher;
2009 		tegra_aes_algs[i].se_dev = se;
2010 
2011 		ret = crypto_engine_register_skcipher(sk_alg);
2012 		if (ret) {
2013 			dev_err(se->dev, "failed to register %s\n",
2014 				sk_alg->base.base.cra_name);
2015 			goto err_aes;
2016 		}
2017 	}
2018 
2019 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
2020 		aead_alg = &tegra_aead_algs[i].alg.aead;
2021 		tegra_aead_algs[i].se_dev = se;
2022 
2023 		ret = crypto_engine_register_aead(aead_alg);
2024 		if (ret) {
2025 			dev_err(se->dev, "failed to register %s\n",
2026 				aead_alg->base.base.cra_name);
2027 			goto err_aead;
2028 		}
2029 	}
2030 
2031 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
2032 		ahash_alg = &tegra_cmac_algs[i].alg.ahash;
2033 		tegra_cmac_algs[i].se_dev = se;
2034 
2035 		ret = crypto_engine_register_ahash(ahash_alg);
2036 		if (ret) {
2037 			dev_err(se->dev, "failed to register %s\n",
2038 				ahash_alg->base.halg.base.cra_name);
2039 			goto err_cmac;
2040 		}
2041 	}
2042 
2043 	return 0;
2044 
2045 err_cmac:
2046 	while (i--)
2047 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2048 
2049 	i = ARRAY_SIZE(tegra_aead_algs);
2050 err_aead:
2051 	while (i--)
2052 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2053 
2054 	i = ARRAY_SIZE(tegra_aes_algs);
2055 err_aes:
2056 	while (i--)
2057 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2058 
2059 	return ret;
2060 }
2061 
tegra_deinit_aes(struct tegra_se * se)2062 void tegra_deinit_aes(struct tegra_se *se)
2063 {
2064 	int i;
2065 
2066 	for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
2067 		crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
2068 
2069 	for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
2070 		crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
2071 
2072 	for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
2073 		crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
2074 }
2075