10880bb3bSAkhil R // SPDX-License-Identifier: GPL-2.0-only
20880bb3bSAkhil R // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
30880bb3bSAkhil R /*
40880bb3bSAkhil R * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
50880bb3bSAkhil R */
60880bb3bSAkhil R
70880bb3bSAkhil R #include <linux/clk.h>
80880bb3bSAkhil R #include <linux/dma-mapping.h>
90880bb3bSAkhil R #include <linux/module.h>
100880bb3bSAkhil R #include <linux/of_device.h>
110880bb3bSAkhil R #include <linux/platform_device.h>
120880bb3bSAkhil R
130880bb3bSAkhil R #include <crypto/aead.h>
140880bb3bSAkhil R #include <crypto/aes.h>
150880bb3bSAkhil R #include <crypto/engine.h>
160880bb3bSAkhil R #include <crypto/gcm.h>
170880bb3bSAkhil R #include <crypto/scatterwalk.h>
180880bb3bSAkhil R #include <crypto/xts.h>
190880bb3bSAkhil R #include <crypto/internal/aead.h>
200880bb3bSAkhil R #include <crypto/internal/hash.h>
210880bb3bSAkhil R #include <crypto/internal/skcipher.h>
220880bb3bSAkhil R
230880bb3bSAkhil R #include "tegra-se.h"
240880bb3bSAkhil R
250880bb3bSAkhil R struct tegra_aes_ctx {
260880bb3bSAkhil R struct tegra_se *se;
270880bb3bSAkhil R u32 alg;
280880bb3bSAkhil R u32 ivsize;
290880bb3bSAkhil R u32 key1_id;
300880bb3bSAkhil R u32 key2_id;
310880bb3bSAkhil R };
320880bb3bSAkhil R
330880bb3bSAkhil R struct tegra_aes_reqctx {
340880bb3bSAkhil R struct tegra_se_datbuf datbuf;
350880bb3bSAkhil R bool encrypt;
360880bb3bSAkhil R u32 config;
370880bb3bSAkhil R u32 crypto_config;
380880bb3bSAkhil R u32 len;
390880bb3bSAkhil R u32 *iv;
400880bb3bSAkhil R };
410880bb3bSAkhil R
420880bb3bSAkhil R struct tegra_aead_ctx {
430880bb3bSAkhil R struct tegra_se *se;
440880bb3bSAkhil R unsigned int authsize;
450880bb3bSAkhil R u32 alg;
460880bb3bSAkhil R u32 keylen;
470880bb3bSAkhil R u32 key_id;
480880bb3bSAkhil R };
490880bb3bSAkhil R
500880bb3bSAkhil R struct tegra_aead_reqctx {
510880bb3bSAkhil R struct tegra_se_datbuf inbuf;
520880bb3bSAkhil R struct tegra_se_datbuf outbuf;
530880bb3bSAkhil R struct scatterlist *src_sg;
540880bb3bSAkhil R struct scatterlist *dst_sg;
550880bb3bSAkhil R unsigned int assoclen;
560880bb3bSAkhil R unsigned int cryptlen;
570880bb3bSAkhil R unsigned int authsize;
580880bb3bSAkhil R bool encrypt;
590880bb3bSAkhil R u32 config;
600880bb3bSAkhil R u32 crypto_config;
610880bb3bSAkhil R u32 key_id;
620880bb3bSAkhil R u32 iv[4];
630880bb3bSAkhil R u8 authdata[16];
640880bb3bSAkhil R };
650880bb3bSAkhil R
660880bb3bSAkhil R struct tegra_cmac_ctx {
670880bb3bSAkhil R struct tegra_se *se;
680880bb3bSAkhil R unsigned int alg;
690880bb3bSAkhil R u32 key_id;
700880bb3bSAkhil R struct crypto_shash *fallback_tfm;
710880bb3bSAkhil R };
720880bb3bSAkhil R
730880bb3bSAkhil R struct tegra_cmac_reqctx {
740880bb3bSAkhil R struct scatterlist *src_sg;
750880bb3bSAkhil R struct tegra_se_datbuf datbuf;
760880bb3bSAkhil R struct tegra_se_datbuf residue;
770880bb3bSAkhil R unsigned int total_len;
780880bb3bSAkhil R unsigned int blk_size;
790880bb3bSAkhil R unsigned int task;
800880bb3bSAkhil R u32 crypto_config;
810880bb3bSAkhil R u32 config;
820880bb3bSAkhil R u32 key_id;
830880bb3bSAkhil R u32 *iv;
840880bb3bSAkhil R u32 result[CMAC_RESULT_REG_COUNT];
850880bb3bSAkhil R };
860880bb3bSAkhil R
870880bb3bSAkhil R /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)880880bb3bSAkhil R static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
890880bb3bSAkhil R {
900880bb3bSAkhil R do {
910880bb3bSAkhil R --bits;
920880bb3bSAkhil R nums += counter[bits];
930880bb3bSAkhil R counter[bits] = nums & 0xff;
940880bb3bSAkhil R nums >>= 8;
950880bb3bSAkhil R } while (bits && nums);
960880bb3bSAkhil R }
970880bb3bSAkhil R
tegra_cbc_iv_copyback(struct skcipher_request * req,struct tegra_aes_ctx * ctx)980880bb3bSAkhil R static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
990880bb3bSAkhil R {
1000880bb3bSAkhil R struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
1010880bb3bSAkhil R unsigned int offset;
1020880bb3bSAkhil R
1030880bb3bSAkhil R offset = req->cryptlen - ctx->ivsize;
1040880bb3bSAkhil R
1050880bb3bSAkhil R if (rctx->encrypt)
1060880bb3bSAkhil R memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
1070880bb3bSAkhil R else
1080880bb3bSAkhil R scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
1090880bb3bSAkhil R }
1100880bb3bSAkhil R
tegra_aes_update_iv(struct skcipher_request * req,struct tegra_aes_ctx * ctx)1110880bb3bSAkhil R static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
1120880bb3bSAkhil R {
1130880bb3bSAkhil R int num;
1140880bb3bSAkhil R
1150880bb3bSAkhil R if (ctx->alg == SE_ALG_CBC) {
1160880bb3bSAkhil R tegra_cbc_iv_copyback(req, ctx);
1170880bb3bSAkhil R } else if (ctx->alg == SE_ALG_CTR) {
1180880bb3bSAkhil R num = req->cryptlen / ctx->ivsize;
1190880bb3bSAkhil R if (req->cryptlen % ctx->ivsize)
1200880bb3bSAkhil R num++;
1210880bb3bSAkhil R
1220880bb3bSAkhil R ctr_iv_inc(req->iv, ctx->ivsize, num);
1230880bb3bSAkhil R }
1240880bb3bSAkhil R }
1250880bb3bSAkhil R
tegra234_aes_crypto_cfg(u32 alg,bool encrypt)1260880bb3bSAkhil R static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
1270880bb3bSAkhil R {
1280880bb3bSAkhil R switch (alg) {
1290880bb3bSAkhil R case SE_ALG_CMAC:
1300880bb3bSAkhil R case SE_ALG_GMAC:
1310880bb3bSAkhil R case SE_ALG_GCM:
1320880bb3bSAkhil R case SE_ALG_GCM_FINAL:
1330880bb3bSAkhil R return 0;
1340880bb3bSAkhil R case SE_ALG_CBC:
1350880bb3bSAkhil R if (encrypt)
1360880bb3bSAkhil R return SE_CRYPTO_CFG_CBC_ENCRYPT;
1370880bb3bSAkhil R else
1380880bb3bSAkhil R return SE_CRYPTO_CFG_CBC_DECRYPT;
1390880bb3bSAkhil R case SE_ALG_ECB:
1400880bb3bSAkhil R if (encrypt)
1410880bb3bSAkhil R return SE_CRYPTO_CFG_ECB_ENCRYPT;
1420880bb3bSAkhil R else
1430880bb3bSAkhil R return SE_CRYPTO_CFG_ECB_DECRYPT;
1440880bb3bSAkhil R case SE_ALG_XTS:
1450880bb3bSAkhil R if (encrypt)
1460880bb3bSAkhil R return SE_CRYPTO_CFG_XTS_ENCRYPT;
1470880bb3bSAkhil R else
1480880bb3bSAkhil R return SE_CRYPTO_CFG_XTS_DECRYPT;
1490880bb3bSAkhil R
1500880bb3bSAkhil R case SE_ALG_CTR:
1510880bb3bSAkhil R return SE_CRYPTO_CFG_CTR;
1520880bb3bSAkhil R case SE_ALG_CBC_MAC:
1530880bb3bSAkhil R return SE_CRYPTO_CFG_CBC_MAC;
1540880bb3bSAkhil R
1550880bb3bSAkhil R default:
1560880bb3bSAkhil R break;
1570880bb3bSAkhil R }
1580880bb3bSAkhil R
1590880bb3bSAkhil R return -EINVAL;
1600880bb3bSAkhil R }
1610880bb3bSAkhil R
tegra234_aes_cfg(u32 alg,bool encrypt)1620880bb3bSAkhil R static int tegra234_aes_cfg(u32 alg, bool encrypt)
1630880bb3bSAkhil R {
1640880bb3bSAkhil R switch (alg) {
1650880bb3bSAkhil R case SE_ALG_CBC:
1660880bb3bSAkhil R case SE_ALG_ECB:
1670880bb3bSAkhil R case SE_ALG_XTS:
1680880bb3bSAkhil R case SE_ALG_CTR:
1690880bb3bSAkhil R if (encrypt)
1700880bb3bSAkhil R return SE_CFG_AES_ENCRYPT;
1710880bb3bSAkhil R else
1720880bb3bSAkhil R return SE_CFG_AES_DECRYPT;
1730880bb3bSAkhil R
1740880bb3bSAkhil R case SE_ALG_GMAC:
1750880bb3bSAkhil R if (encrypt)
1760880bb3bSAkhil R return SE_CFG_GMAC_ENCRYPT;
1770880bb3bSAkhil R else
1780880bb3bSAkhil R return SE_CFG_GMAC_DECRYPT;
1790880bb3bSAkhil R
1800880bb3bSAkhil R case SE_ALG_GCM:
1810880bb3bSAkhil R if (encrypt)
1820880bb3bSAkhil R return SE_CFG_GCM_ENCRYPT;
1830880bb3bSAkhil R else
1840880bb3bSAkhil R return SE_CFG_GCM_DECRYPT;
1850880bb3bSAkhil R
1860880bb3bSAkhil R case SE_ALG_GCM_FINAL:
1870880bb3bSAkhil R if (encrypt)
1880880bb3bSAkhil R return SE_CFG_GCM_FINAL_ENCRYPT;
1890880bb3bSAkhil R else
1900880bb3bSAkhil R return SE_CFG_GCM_FINAL_DECRYPT;
1910880bb3bSAkhil R
1920880bb3bSAkhil R case SE_ALG_CMAC:
1930880bb3bSAkhil R return SE_CFG_CMAC;
1940880bb3bSAkhil R
1950880bb3bSAkhil R case SE_ALG_CBC_MAC:
1960880bb3bSAkhil R return SE_AES_ENC_ALG_AES_ENC |
1970880bb3bSAkhil R SE_AES_DST_HASH_REG;
1980880bb3bSAkhil R }
1990880bb3bSAkhil R return -EINVAL;
2000880bb3bSAkhil R }
2010880bb3bSAkhil R
tegra_aes_prep_cmd(struct tegra_aes_ctx * ctx,struct tegra_aes_reqctx * rctx)2020880bb3bSAkhil R static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
2030880bb3bSAkhil R struct tegra_aes_reqctx *rctx)
2040880bb3bSAkhil R {
2050880bb3bSAkhil R unsigned int data_count, res_bits, i = 0, j;
2060880bb3bSAkhil R struct tegra_se *se = ctx->se;
2070880bb3bSAkhil R u32 *cpuvaddr = se->cmdbuf->addr;
2080880bb3bSAkhil R dma_addr_t addr = rctx->datbuf.addr;
2090880bb3bSAkhil R
2100880bb3bSAkhil R data_count = rctx->len / AES_BLOCK_SIZE;
2110880bb3bSAkhil R res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
2120880bb3bSAkhil R
2130880bb3bSAkhil R /*
2140880bb3bSAkhil R * Hardware processes data_count + 1 blocks.
2150880bb3bSAkhil R * Reduce 1 block if there is no residue
2160880bb3bSAkhil R */
2170880bb3bSAkhil R if (!res_bits)
2180880bb3bSAkhil R data_count--;
2190880bb3bSAkhil R
2200880bb3bSAkhil R if (rctx->iv) {
2210880bb3bSAkhil R cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
2220880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
2230880bb3bSAkhil R for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
2240880bb3bSAkhil R cpuvaddr[i++] = rctx->iv[j];
2250880bb3bSAkhil R }
2260880bb3bSAkhil R
2270880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
2280880bb3bSAkhil R cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
2290880bb3bSAkhil R SE_LAST_BLOCK_RES_BITS(res_bits);
2300880bb3bSAkhil R
2310880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
2320880bb3bSAkhil R cpuvaddr[i++] = rctx->config;
2330880bb3bSAkhil R cpuvaddr[i++] = rctx->crypto_config;
2340880bb3bSAkhil R
2350880bb3bSAkhil R /* Source address setting */
2360880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(addr);
2370880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
2380880bb3bSAkhil R
2390880bb3bSAkhil R /* Destination address setting */
2400880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(addr);
2410880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
2420880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->len);
2430880bb3bSAkhil R
2440880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
2450880bb3bSAkhil R cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
2460880bb3bSAkhil R SE_AES_OP_START;
2470880bb3bSAkhil R
2480880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
2490880bb3bSAkhil R cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
2500880bb3bSAkhil R host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
2510880bb3bSAkhil R
2520880bb3bSAkhil R dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
2530880bb3bSAkhil R
2540880bb3bSAkhil R return i;
2550880bb3bSAkhil R }
2560880bb3bSAkhil R
tegra_aes_do_one_req(struct crypto_engine * engine,void * areq)2570880bb3bSAkhil R static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
2580880bb3bSAkhil R {
2590880bb3bSAkhil R struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
2600880bb3bSAkhil R struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
2610880bb3bSAkhil R struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
2620880bb3bSAkhil R struct tegra_se *se = ctx->se;
2630880bb3bSAkhil R unsigned int cmdlen;
2640880bb3bSAkhil R int ret;
2650880bb3bSAkhil R
2660880bb3bSAkhil R rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
2670880bb3bSAkhil R &rctx->datbuf.addr, GFP_KERNEL);
2680880bb3bSAkhil R if (!rctx->datbuf.buf)
2690880bb3bSAkhil R return -ENOMEM;
2700880bb3bSAkhil R
2710880bb3bSAkhil R rctx->datbuf.size = SE_AES_BUFLEN;
2720880bb3bSAkhil R rctx->iv = (u32 *)req->iv;
2730880bb3bSAkhil R rctx->len = req->cryptlen;
2740880bb3bSAkhil R
2750880bb3bSAkhil R /* Pad input to AES Block size */
2760880bb3bSAkhil R if (ctx->alg != SE_ALG_XTS) {
2770880bb3bSAkhil R if (rctx->len % AES_BLOCK_SIZE)
2780880bb3bSAkhil R rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
2790880bb3bSAkhil R }
2800880bb3bSAkhil R
2810880bb3bSAkhil R scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
2820880bb3bSAkhil R
2830880bb3bSAkhil R /* Prepare the command and submit for execution */
2840880bb3bSAkhil R cmdlen = tegra_aes_prep_cmd(ctx, rctx);
2850880bb3bSAkhil R ret = tegra_se_host1x_submit(se, cmdlen);
2860880bb3bSAkhil R
2870880bb3bSAkhil R /* Copy the result */
2880880bb3bSAkhil R tegra_aes_update_iv(req, ctx);
2890880bb3bSAkhil R scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
2900880bb3bSAkhil R
2910880bb3bSAkhil R /* Free the buffer */
2920880bb3bSAkhil R dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
2930880bb3bSAkhil R rctx->datbuf.buf, rctx->datbuf.addr);
2940880bb3bSAkhil R
2950880bb3bSAkhil R crypto_finalize_skcipher_request(se->engine, req, ret);
2960880bb3bSAkhil R
2970880bb3bSAkhil R return 0;
2980880bb3bSAkhil R }
2990880bb3bSAkhil R
tegra_aes_cra_init(struct crypto_skcipher * tfm)3000880bb3bSAkhil R static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
3010880bb3bSAkhil R {
3020880bb3bSAkhil R struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
3030880bb3bSAkhil R struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3040880bb3bSAkhil R struct tegra_se_alg *se_alg;
3050880bb3bSAkhil R const char *algname;
3060880bb3bSAkhil R int ret;
3070880bb3bSAkhil R
3080880bb3bSAkhil R se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
3090880bb3bSAkhil R
3100880bb3bSAkhil R crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
3110880bb3bSAkhil R
3120880bb3bSAkhil R ctx->ivsize = crypto_skcipher_ivsize(tfm);
3130880bb3bSAkhil R ctx->se = se_alg->se_dev;
3140880bb3bSAkhil R ctx->key1_id = 0;
3150880bb3bSAkhil R ctx->key2_id = 0;
3160880bb3bSAkhil R
3170880bb3bSAkhil R algname = crypto_tfm_alg_name(&tfm->base);
3180880bb3bSAkhil R ret = se_algname_to_algid(algname);
3190880bb3bSAkhil R if (ret < 0) {
3200880bb3bSAkhil R dev_err(ctx->se->dev, "invalid algorithm\n");
3210880bb3bSAkhil R return ret;
3220880bb3bSAkhil R }
3230880bb3bSAkhil R
3240880bb3bSAkhil R ctx->alg = ret;
3250880bb3bSAkhil R
3260880bb3bSAkhil R return 0;
3270880bb3bSAkhil R }
3280880bb3bSAkhil R
tegra_aes_cra_exit(struct crypto_skcipher * tfm)3290880bb3bSAkhil R static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
3300880bb3bSAkhil R {
3310880bb3bSAkhil R struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
3320880bb3bSAkhil R
3330880bb3bSAkhil R if (ctx->key1_id)
3340880bb3bSAkhil R tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
3350880bb3bSAkhil R
3360880bb3bSAkhil R if (ctx->key2_id)
3370880bb3bSAkhil R tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
3380880bb3bSAkhil R }
3390880bb3bSAkhil R
tegra_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)3400880bb3bSAkhil R static int tegra_aes_setkey(struct crypto_skcipher *tfm,
3410880bb3bSAkhil R const u8 *key, u32 keylen)
3420880bb3bSAkhil R {
3430880bb3bSAkhil R struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
3440880bb3bSAkhil R
3450880bb3bSAkhil R if (aes_check_keylen(keylen)) {
3460880bb3bSAkhil R dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
3470880bb3bSAkhil R return -EINVAL;
3480880bb3bSAkhil R }
3490880bb3bSAkhil R
3500880bb3bSAkhil R return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
3510880bb3bSAkhil R }
3520880bb3bSAkhil R
tegra_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)3530880bb3bSAkhil R static int tegra_xts_setkey(struct crypto_skcipher *tfm,
3540880bb3bSAkhil R const u8 *key, u32 keylen)
3550880bb3bSAkhil R {
3560880bb3bSAkhil R struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
3570880bb3bSAkhil R u32 len = keylen / 2;
3580880bb3bSAkhil R int ret;
3590880bb3bSAkhil R
3600880bb3bSAkhil R ret = xts_verify_key(tfm, key, keylen);
3610880bb3bSAkhil R if (ret || aes_check_keylen(len)) {
3620880bb3bSAkhil R dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
3630880bb3bSAkhil R return -EINVAL;
3640880bb3bSAkhil R }
3650880bb3bSAkhil R
3660880bb3bSAkhil R ret = tegra_key_submit(ctx->se, key, len,
3670880bb3bSAkhil R ctx->alg, &ctx->key1_id);
3680880bb3bSAkhil R if (ret)
3690880bb3bSAkhil R return ret;
3700880bb3bSAkhil R
3710880bb3bSAkhil R return tegra_key_submit(ctx->se, key + len, len,
3720880bb3bSAkhil R ctx->alg, &ctx->key2_id);
3730880bb3bSAkhil R
3740880bb3bSAkhil R return 0;
3750880bb3bSAkhil R }
3760880bb3bSAkhil R
tegra_aes_kac_manifest(u32 user,u32 alg,u32 keylen)3770880bb3bSAkhil R static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
3780880bb3bSAkhil R {
3790880bb3bSAkhil R int manifest;
3800880bb3bSAkhil R
3810880bb3bSAkhil R manifest = SE_KAC_USER_NS;
3820880bb3bSAkhil R
3830880bb3bSAkhil R switch (alg) {
3840880bb3bSAkhil R case SE_ALG_CBC:
3850880bb3bSAkhil R case SE_ALG_ECB:
3860880bb3bSAkhil R case SE_ALG_CTR:
3870880bb3bSAkhil R manifest |= SE_KAC_ENC;
3880880bb3bSAkhil R break;
3890880bb3bSAkhil R case SE_ALG_XTS:
3900880bb3bSAkhil R manifest |= SE_KAC_XTS;
3910880bb3bSAkhil R break;
3920880bb3bSAkhil R case SE_ALG_GCM:
3930880bb3bSAkhil R manifest |= SE_KAC_GCM;
3940880bb3bSAkhil R break;
3950880bb3bSAkhil R case SE_ALG_CMAC:
3960880bb3bSAkhil R manifest |= SE_KAC_CMAC;
3970880bb3bSAkhil R break;
3980880bb3bSAkhil R case SE_ALG_CBC_MAC:
3990880bb3bSAkhil R manifest |= SE_KAC_ENC;
4000880bb3bSAkhil R break;
4010880bb3bSAkhil R default:
4020880bb3bSAkhil R return -EINVAL;
4030880bb3bSAkhil R }
4040880bb3bSAkhil R
4050880bb3bSAkhil R switch (keylen) {
4060880bb3bSAkhil R case AES_KEYSIZE_128:
4070880bb3bSAkhil R manifest |= SE_KAC_SIZE_128;
4080880bb3bSAkhil R break;
4090880bb3bSAkhil R case AES_KEYSIZE_192:
4100880bb3bSAkhil R manifest |= SE_KAC_SIZE_192;
4110880bb3bSAkhil R break;
4120880bb3bSAkhil R case AES_KEYSIZE_256:
4130880bb3bSAkhil R manifest |= SE_KAC_SIZE_256;
4140880bb3bSAkhil R break;
4150880bb3bSAkhil R default:
4160880bb3bSAkhil R return -EINVAL;
4170880bb3bSAkhil R }
4180880bb3bSAkhil R
4190880bb3bSAkhil R return manifest;
4200880bb3bSAkhil R }
4210880bb3bSAkhil R
tegra_aes_crypt(struct skcipher_request * req,bool encrypt)4220880bb3bSAkhil R static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
4230880bb3bSAkhil R
4240880bb3bSAkhil R {
4250880bb3bSAkhil R struct crypto_skcipher *tfm;
4260880bb3bSAkhil R struct tegra_aes_ctx *ctx;
4270880bb3bSAkhil R struct tegra_aes_reqctx *rctx;
4280880bb3bSAkhil R
4290880bb3bSAkhil R tfm = crypto_skcipher_reqtfm(req);
4300880bb3bSAkhil R ctx = crypto_skcipher_ctx(tfm);
4310880bb3bSAkhil R rctx = skcipher_request_ctx(req);
4320880bb3bSAkhil R
4330880bb3bSAkhil R if (ctx->alg != SE_ALG_XTS) {
4340880bb3bSAkhil R if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
4350880bb3bSAkhil R dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
4360880bb3bSAkhil R return -EINVAL;
4370880bb3bSAkhil R }
4380880bb3bSAkhil R } else if (req->cryptlen < XTS_BLOCK_SIZE) {
4390880bb3bSAkhil R dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
4400880bb3bSAkhil R return -EINVAL;
4410880bb3bSAkhil R }
4420880bb3bSAkhil R
4430880bb3bSAkhil R if (!req->cryptlen)
4440880bb3bSAkhil R return 0;
4450880bb3bSAkhil R
4460880bb3bSAkhil R rctx->encrypt = encrypt;
4470880bb3bSAkhil R rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
4480880bb3bSAkhil R rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
4490880bb3bSAkhil R rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
4500880bb3bSAkhil R
4510880bb3bSAkhil R if (ctx->key2_id)
4520880bb3bSAkhil R rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
4530880bb3bSAkhil R
4540880bb3bSAkhil R return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
4550880bb3bSAkhil R }
4560880bb3bSAkhil R
tegra_aes_encrypt(struct skcipher_request * req)4570880bb3bSAkhil R static int tegra_aes_encrypt(struct skcipher_request *req)
4580880bb3bSAkhil R {
4590880bb3bSAkhil R return tegra_aes_crypt(req, true);
4600880bb3bSAkhil R }
4610880bb3bSAkhil R
tegra_aes_decrypt(struct skcipher_request * req)4620880bb3bSAkhil R static int tegra_aes_decrypt(struct skcipher_request *req)
4630880bb3bSAkhil R {
4640880bb3bSAkhil R return tegra_aes_crypt(req, false);
4650880bb3bSAkhil R }
4660880bb3bSAkhil R
4670880bb3bSAkhil R static struct tegra_se_alg tegra_aes_algs[] = {
4680880bb3bSAkhil R {
4690880bb3bSAkhil R .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
4700880bb3bSAkhil R .alg.skcipher.base = {
4710880bb3bSAkhil R .init = tegra_aes_cra_init,
4720880bb3bSAkhil R .exit = tegra_aes_cra_exit,
4730880bb3bSAkhil R .setkey = tegra_aes_setkey,
4740880bb3bSAkhil R .encrypt = tegra_aes_encrypt,
4750880bb3bSAkhil R .decrypt = tegra_aes_decrypt,
4760880bb3bSAkhil R .min_keysize = AES_MIN_KEY_SIZE,
4770880bb3bSAkhil R .max_keysize = AES_MAX_KEY_SIZE,
4780880bb3bSAkhil R .ivsize = AES_BLOCK_SIZE,
4790880bb3bSAkhil R .base = {
4800880bb3bSAkhil R .cra_name = "cbc(aes)",
4810880bb3bSAkhil R .cra_driver_name = "cbc-aes-tegra",
4820880bb3bSAkhil R .cra_priority = 500,
4830880bb3bSAkhil R .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
4840880bb3bSAkhil R .cra_blocksize = AES_BLOCK_SIZE,
4850880bb3bSAkhil R .cra_ctxsize = sizeof(struct tegra_aes_ctx),
4860880bb3bSAkhil R .cra_alignmask = 0xf,
4870880bb3bSAkhil R .cra_module = THIS_MODULE,
4880880bb3bSAkhil R },
4890880bb3bSAkhil R }
4900880bb3bSAkhil R }, {
4910880bb3bSAkhil R .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
4920880bb3bSAkhil R .alg.skcipher.base = {
4930880bb3bSAkhil R .init = tegra_aes_cra_init,
4940880bb3bSAkhil R .exit = tegra_aes_cra_exit,
4950880bb3bSAkhil R .setkey = tegra_aes_setkey,
4960880bb3bSAkhil R .encrypt = tegra_aes_encrypt,
4970880bb3bSAkhil R .decrypt = tegra_aes_decrypt,
4980880bb3bSAkhil R .min_keysize = AES_MIN_KEY_SIZE,
4990880bb3bSAkhil R .max_keysize = AES_MAX_KEY_SIZE,
5000880bb3bSAkhil R .base = {
5010880bb3bSAkhil R .cra_name = "ecb(aes)",
5020880bb3bSAkhil R .cra_driver_name = "ecb-aes-tegra",
5030880bb3bSAkhil R .cra_priority = 500,
5040880bb3bSAkhil R .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
5050880bb3bSAkhil R .cra_blocksize = AES_BLOCK_SIZE,
5060880bb3bSAkhil R .cra_ctxsize = sizeof(struct tegra_aes_ctx),
5070880bb3bSAkhil R .cra_alignmask = 0xf,
5080880bb3bSAkhil R .cra_module = THIS_MODULE,
5090880bb3bSAkhil R },
5100880bb3bSAkhil R }
5110880bb3bSAkhil R }, {
5120880bb3bSAkhil R .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
5130880bb3bSAkhil R .alg.skcipher.base = {
5140880bb3bSAkhil R .init = tegra_aes_cra_init,
5150880bb3bSAkhil R .exit = tegra_aes_cra_exit,
5160880bb3bSAkhil R .setkey = tegra_aes_setkey,
5170880bb3bSAkhil R .encrypt = tegra_aes_encrypt,
5180880bb3bSAkhil R .decrypt = tegra_aes_decrypt,
5190880bb3bSAkhil R .min_keysize = AES_MIN_KEY_SIZE,
5200880bb3bSAkhil R .max_keysize = AES_MAX_KEY_SIZE,
5210880bb3bSAkhil R .ivsize = AES_BLOCK_SIZE,
5220880bb3bSAkhil R .base = {
5230880bb3bSAkhil R .cra_name = "ctr(aes)",
5240880bb3bSAkhil R .cra_driver_name = "ctr-aes-tegra",
5250880bb3bSAkhil R .cra_priority = 500,
5260880bb3bSAkhil R .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
5270880bb3bSAkhil R .cra_blocksize = 1,
5280880bb3bSAkhil R .cra_ctxsize = sizeof(struct tegra_aes_ctx),
5290880bb3bSAkhil R .cra_alignmask = 0xf,
5300880bb3bSAkhil R .cra_module = THIS_MODULE,
5310880bb3bSAkhil R },
5320880bb3bSAkhil R }
5330880bb3bSAkhil R }, {
5340880bb3bSAkhil R .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
5350880bb3bSAkhil R .alg.skcipher.base = {
5360880bb3bSAkhil R .init = tegra_aes_cra_init,
5370880bb3bSAkhil R .exit = tegra_aes_cra_exit,
5380880bb3bSAkhil R .setkey = tegra_xts_setkey,
5390880bb3bSAkhil R .encrypt = tegra_aes_encrypt,
5400880bb3bSAkhil R .decrypt = tegra_aes_decrypt,
5410880bb3bSAkhil R .min_keysize = 2 * AES_MIN_KEY_SIZE,
5420880bb3bSAkhil R .max_keysize = 2 * AES_MAX_KEY_SIZE,
5430880bb3bSAkhil R .ivsize = AES_BLOCK_SIZE,
5440880bb3bSAkhil R .base = {
5450880bb3bSAkhil R .cra_name = "xts(aes)",
5460880bb3bSAkhil R .cra_driver_name = "xts-aes-tegra",
5470880bb3bSAkhil R .cra_priority = 500,
5480880bb3bSAkhil R .cra_blocksize = AES_BLOCK_SIZE,
5490880bb3bSAkhil R .cra_ctxsize = sizeof(struct tegra_aes_ctx),
5500880bb3bSAkhil R .cra_alignmask = (__alignof__(u64) - 1),
5510880bb3bSAkhil R .cra_module = THIS_MODULE,
5520880bb3bSAkhil R },
5530880bb3bSAkhil R }
5540880bb3bSAkhil R },
5550880bb3bSAkhil R };
5560880bb3bSAkhil R
tegra_gmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)5570880bb3bSAkhil R static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
5580880bb3bSAkhil R struct tegra_aead_reqctx *rctx)
5590880bb3bSAkhil R {
5600880bb3bSAkhil R unsigned int data_count, res_bits, i = 0;
5610880bb3bSAkhil R struct tegra_se *se = ctx->se;
5620880bb3bSAkhil R u32 *cpuvaddr = se->cmdbuf->addr;
5630880bb3bSAkhil R
5640880bb3bSAkhil R data_count = (rctx->assoclen / AES_BLOCK_SIZE);
5650880bb3bSAkhil R res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
5660880bb3bSAkhil R
5670880bb3bSAkhil R /*
5680880bb3bSAkhil R * Hardware processes data_count + 1 blocks.
5690880bb3bSAkhil R * Reduce 1 block if there is no residue
5700880bb3bSAkhil R */
5710880bb3bSAkhil R if (!res_bits)
5720880bb3bSAkhil R data_count--;
5730880bb3bSAkhil R
5740880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
5750880bb3bSAkhil R cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
5760880bb3bSAkhil R SE_LAST_BLOCK_RES_BITS(res_bits);
5770880bb3bSAkhil R
5780880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
5790880bb3bSAkhil R cpuvaddr[i++] = rctx->config;
5800880bb3bSAkhil R cpuvaddr[i++] = rctx->crypto_config;
5810880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
5820880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
5830880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->assoclen);
5840880bb3bSAkhil R
5850880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
5860880bb3bSAkhil R cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
5870880bb3bSAkhil R SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
5880880bb3bSAkhil R SE_AES_OP_START;
5890880bb3bSAkhil R
5900880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
5910880bb3bSAkhil R cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
5920880bb3bSAkhil R host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
5930880bb3bSAkhil R
5940880bb3bSAkhil R return i;
5950880bb3bSAkhil R }
5960880bb3bSAkhil R
tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)5970880bb3bSAkhil R static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
5980880bb3bSAkhil R struct tegra_aead_reqctx *rctx)
5990880bb3bSAkhil R {
6000880bb3bSAkhil R unsigned int data_count, res_bits, i = 0, j;
6010880bb3bSAkhil R struct tegra_se *se = ctx->se;
6020880bb3bSAkhil R u32 *cpuvaddr = se->cmdbuf->addr, op;
6030880bb3bSAkhil R
6040880bb3bSAkhil R data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
6050880bb3bSAkhil R res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
6060880bb3bSAkhil R op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
6070880bb3bSAkhil R SE_AES_OP_LASTBUF | SE_AES_OP_START;
6080880bb3bSAkhil R
6090880bb3bSAkhil R /*
6100880bb3bSAkhil R * If there is no assoc data,
6110880bb3bSAkhil R * this will be the init command
6120880bb3bSAkhil R */
6130880bb3bSAkhil R if (!rctx->assoclen)
6140880bb3bSAkhil R op |= SE_AES_OP_INIT;
6150880bb3bSAkhil R
6160880bb3bSAkhil R /*
6170880bb3bSAkhil R * Hardware processes data_count + 1 blocks.
6180880bb3bSAkhil R * Reduce 1 block if there is no residue
6190880bb3bSAkhil R */
6200880bb3bSAkhil R if (!res_bits)
6210880bb3bSAkhil R data_count--;
6220880bb3bSAkhil R
6230880bb3bSAkhil R cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
6240880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
6250880bb3bSAkhil R for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
6260880bb3bSAkhil R cpuvaddr[i++] = rctx->iv[j];
6270880bb3bSAkhil R
6280880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
6290880bb3bSAkhil R cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
6300880bb3bSAkhil R SE_LAST_BLOCK_RES_BITS(res_bits);
6310880bb3bSAkhil R
6320880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
6330880bb3bSAkhil R cpuvaddr[i++] = rctx->config;
6340880bb3bSAkhil R cpuvaddr[i++] = rctx->crypto_config;
6350880bb3bSAkhil R
6360880bb3bSAkhil R /* Source Address */
6370880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
6380880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
6390880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->cryptlen);
6400880bb3bSAkhil R
6410880bb3bSAkhil R /* Destination Address */
6420880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
6430880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
6440880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->cryptlen);
6450880bb3bSAkhil R
6460880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
6470880bb3bSAkhil R cpuvaddr[i++] = op;
6480880bb3bSAkhil R
6490880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
6500880bb3bSAkhil R cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
6510880bb3bSAkhil R host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
6520880bb3bSAkhil R
6530880bb3bSAkhil R dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
6540880bb3bSAkhil R return i;
6550880bb3bSAkhil R }
6560880bb3bSAkhil R
tegra_gcm_prep_final_cmd(struct tegra_se * se,u32 * cpuvaddr,struct tegra_aead_reqctx * rctx)6570880bb3bSAkhil R static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
6580880bb3bSAkhil R struct tegra_aead_reqctx *rctx)
6590880bb3bSAkhil R {
6600880bb3bSAkhil R unsigned int i = 0, j;
6610880bb3bSAkhil R u32 op;
6620880bb3bSAkhil R
6630880bb3bSAkhil R op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
6640880bb3bSAkhil R SE_AES_OP_LASTBUF | SE_AES_OP_START;
6650880bb3bSAkhil R
6660880bb3bSAkhil R /*
6670880bb3bSAkhil R * Set init for zero sized vector
6680880bb3bSAkhil R */
6690880bb3bSAkhil R if (!rctx->assoclen && !rctx->cryptlen)
6700880bb3bSAkhil R op |= SE_AES_OP_INIT;
6710880bb3bSAkhil R
6720880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
6730880bb3bSAkhil R cpuvaddr[i++] = rctx->assoclen * 8;
6740880bb3bSAkhil R cpuvaddr[i++] = 0;
6750880bb3bSAkhil R
6760880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
6770880bb3bSAkhil R cpuvaddr[i++] = rctx->cryptlen * 8;
6780880bb3bSAkhil R cpuvaddr[i++] = 0;
6790880bb3bSAkhil R
6800880bb3bSAkhil R cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
6810880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
6820880bb3bSAkhil R for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
6830880bb3bSAkhil R cpuvaddr[i++] = rctx->iv[j];
6840880bb3bSAkhil R
6850880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
6860880bb3bSAkhil R cpuvaddr[i++] = rctx->config;
6870880bb3bSAkhil R cpuvaddr[i++] = rctx->crypto_config;
6880880bb3bSAkhil R cpuvaddr[i++] = 0;
6890880bb3bSAkhil R cpuvaddr[i++] = 0;
6900880bb3bSAkhil R
6910880bb3bSAkhil R /* Destination Address */
6920880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
6930880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
6940880bb3bSAkhil R SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
6950880bb3bSAkhil R
6960880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
6970880bb3bSAkhil R cpuvaddr[i++] = op;
6980880bb3bSAkhil R
6990880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
7000880bb3bSAkhil R cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
7010880bb3bSAkhil R host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
7020880bb3bSAkhil R
7030880bb3bSAkhil R dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
7040880bb3bSAkhil R
7050880bb3bSAkhil R return i;
7060880bb3bSAkhil R }
7070880bb3bSAkhil R
tegra_gcm_do_gmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)7080880bb3bSAkhil R static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
7090880bb3bSAkhil R {
7100880bb3bSAkhil R struct tegra_se *se = ctx->se;
7110880bb3bSAkhil R unsigned int cmdlen;
7120880bb3bSAkhil R
7130880bb3bSAkhil R scatterwalk_map_and_copy(rctx->inbuf.buf,
7140880bb3bSAkhil R rctx->src_sg, 0, rctx->assoclen, 0);
7150880bb3bSAkhil R
7160880bb3bSAkhil R rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
7170880bb3bSAkhil R rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
7180880bb3bSAkhil R SE_AES_KEY_INDEX(ctx->key_id);
7190880bb3bSAkhil R
7200880bb3bSAkhil R cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
7210880bb3bSAkhil R
7220880bb3bSAkhil R return tegra_se_host1x_submit(se, cmdlen);
7230880bb3bSAkhil R }
7240880bb3bSAkhil R
tegra_gcm_do_crypt(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)7250880bb3bSAkhil R static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
7260880bb3bSAkhil R {
7270880bb3bSAkhil R struct tegra_se *se = ctx->se;
7280880bb3bSAkhil R int cmdlen, ret;
7290880bb3bSAkhil R
7300880bb3bSAkhil R scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
7310880bb3bSAkhil R rctx->assoclen, rctx->cryptlen, 0);
7320880bb3bSAkhil R
7330880bb3bSAkhil R rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
7340880bb3bSAkhil R rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
7350880bb3bSAkhil R SE_AES_KEY_INDEX(ctx->key_id);
7360880bb3bSAkhil R
7370880bb3bSAkhil R /* Prepare command and submit */
7380880bb3bSAkhil R cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
7390880bb3bSAkhil R ret = tegra_se_host1x_submit(se, cmdlen);
7400880bb3bSAkhil R if (ret)
7410880bb3bSAkhil R return ret;
7420880bb3bSAkhil R
7430880bb3bSAkhil R /* Copy the result */
7440880bb3bSAkhil R scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
7450880bb3bSAkhil R rctx->assoclen, rctx->cryptlen, 1);
7460880bb3bSAkhil R
7470880bb3bSAkhil R return 0;
7480880bb3bSAkhil R }
7490880bb3bSAkhil R
tegra_gcm_do_final(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)7500880bb3bSAkhil R static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
7510880bb3bSAkhil R {
7520880bb3bSAkhil R struct tegra_se *se = ctx->se;
7530880bb3bSAkhil R u32 *cpuvaddr = se->cmdbuf->addr;
7540880bb3bSAkhil R int cmdlen, ret, offset;
7550880bb3bSAkhil R
7560880bb3bSAkhil R rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
7570880bb3bSAkhil R rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
7580880bb3bSAkhil R SE_AES_KEY_INDEX(ctx->key_id);
7590880bb3bSAkhil R
7600880bb3bSAkhil R /* Prepare command and submit */
7610880bb3bSAkhil R cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
7620880bb3bSAkhil R ret = tegra_se_host1x_submit(se, cmdlen);
7630880bb3bSAkhil R if (ret)
7640880bb3bSAkhil R return ret;
7650880bb3bSAkhil R
7660880bb3bSAkhil R if (rctx->encrypt) {
7670880bb3bSAkhil R /* Copy the result */
7680880bb3bSAkhil R offset = rctx->assoclen + rctx->cryptlen;
7690880bb3bSAkhil R scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
7700880bb3bSAkhil R offset, rctx->authsize, 1);
7710880bb3bSAkhil R }
7720880bb3bSAkhil R
7730880bb3bSAkhil R return 0;
7740880bb3bSAkhil R }
7750880bb3bSAkhil R
tegra_gcm_do_verify(struct tegra_se * se,struct tegra_aead_reqctx * rctx)7760880bb3bSAkhil R static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
7770880bb3bSAkhil R {
7780880bb3bSAkhil R unsigned int offset;
7790880bb3bSAkhil R u8 mac[16];
7800880bb3bSAkhil R
7810880bb3bSAkhil R offset = rctx->assoclen + rctx->cryptlen;
7820880bb3bSAkhil R scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
7830880bb3bSAkhil R
7840880bb3bSAkhil R if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
7850880bb3bSAkhil R return -EBADMSG;
7860880bb3bSAkhil R
7870880bb3bSAkhil R return 0;
7880880bb3bSAkhil R }
7890880bb3bSAkhil R
tegra_ccm_check_iv(const u8 * iv)7900880bb3bSAkhil R static inline int tegra_ccm_check_iv(const u8 *iv)
7910880bb3bSAkhil R {
7920880bb3bSAkhil R /* iv[0] gives value of q-1
7930880bb3bSAkhil R * 2 <= q <= 8 as per NIST 800-38C notation
7940880bb3bSAkhil R * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
7950880bb3bSAkhil R */
7960880bb3bSAkhil R if (iv[0] < 1 || iv[0] > 7) {
7970880bb3bSAkhil R pr_debug("ccm_check_iv failed %d\n", iv[0]);
7980880bb3bSAkhil R return -EINVAL;
7990880bb3bSAkhil R }
8000880bb3bSAkhil R
8010880bb3bSAkhil R return 0;
8020880bb3bSAkhil R }
8030880bb3bSAkhil R
tegra_cbcmac_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)8040880bb3bSAkhil R static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
8050880bb3bSAkhil R struct tegra_aead_reqctx *rctx)
8060880bb3bSAkhil R {
8070880bb3bSAkhil R unsigned int data_count, i = 0;
8080880bb3bSAkhil R struct tegra_se *se = ctx->se;
8090880bb3bSAkhil R u32 *cpuvaddr = se->cmdbuf->addr;
8100880bb3bSAkhil R
8110880bb3bSAkhil R data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
8120880bb3bSAkhil R
8130880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
8140880bb3bSAkhil R cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
8150880bb3bSAkhil R
8160880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
8170880bb3bSAkhil R cpuvaddr[i++] = rctx->config;
8180880bb3bSAkhil R cpuvaddr[i++] = rctx->crypto_config;
8190880bb3bSAkhil R
8200880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
8210880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
8220880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->inbuf.size);
8230880bb3bSAkhil R
8240880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
8250880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
8260880bb3bSAkhil R SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
8270880bb3bSAkhil R
8280880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
8290880bb3bSAkhil R cpuvaddr[i++] = SE_AES_OP_WRSTALL |
8300880bb3bSAkhil R SE_AES_OP_LASTBUF | SE_AES_OP_START;
8310880bb3bSAkhil R
8320880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
8330880bb3bSAkhil R cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
8340880bb3bSAkhil R host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
8350880bb3bSAkhil R
8360880bb3bSAkhil R return i;
8370880bb3bSAkhil R }
8380880bb3bSAkhil R
tegra_ctr_prep_cmd(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)8390880bb3bSAkhil R static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
8400880bb3bSAkhil R struct tegra_aead_reqctx *rctx)
8410880bb3bSAkhil R {
8420880bb3bSAkhil R unsigned int i = 0, j;
8430880bb3bSAkhil R struct tegra_se *se = ctx->se;
8440880bb3bSAkhil R u32 *cpuvaddr = se->cmdbuf->addr;
8450880bb3bSAkhil R
8460880bb3bSAkhil R cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
8470880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
8480880bb3bSAkhil R for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
8490880bb3bSAkhil R cpuvaddr[i++] = rctx->iv[j];
8500880bb3bSAkhil R
8510880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
8520880bb3bSAkhil R cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
8530880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
8540880bb3bSAkhil R cpuvaddr[i++] = rctx->config;
8550880bb3bSAkhil R cpuvaddr[i++] = rctx->crypto_config;
8560880bb3bSAkhil R
8570880bb3bSAkhil R /* Source address setting */
8580880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
8590880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
8600880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->inbuf.size);
8610880bb3bSAkhil R
8620880bb3bSAkhil R /* Destination address setting */
8630880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
8640880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
8650880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->inbuf.size);
8660880bb3bSAkhil R
8670880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
8680880bb3bSAkhil R cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
8690880bb3bSAkhil R SE_AES_OP_START;
8700880bb3bSAkhil R
8710880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
8720880bb3bSAkhil R cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
8730880bb3bSAkhil R host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
8740880bb3bSAkhil R
8750880bb3bSAkhil R dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
8760880bb3bSAkhil R rctx->config, rctx->crypto_config);
8770880bb3bSAkhil R
8780880bb3bSAkhil R return i;
8790880bb3bSAkhil R }
8800880bb3bSAkhil R
tegra_ccm_do_cbcmac(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)8810880bb3bSAkhil R static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
8820880bb3bSAkhil R {
8830880bb3bSAkhil R struct tegra_se *se = ctx->se;
8840880bb3bSAkhil R int cmdlen;
8850880bb3bSAkhil R
8860880bb3bSAkhil R rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
8870880bb3bSAkhil R rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
8880880bb3bSAkhil R rctx->encrypt) |
8890880bb3bSAkhil R SE_AES_KEY_INDEX(ctx->key_id);
8900880bb3bSAkhil R
8910880bb3bSAkhil R /* Prepare command and submit */
8920880bb3bSAkhil R cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
8930880bb3bSAkhil R
8940880bb3bSAkhil R return tegra_se_host1x_submit(se, cmdlen);
8950880bb3bSAkhil R }
8960880bb3bSAkhil R
tegra_ccm_set_msg_len(u8 * block,unsigned int msglen,int csize)8970880bb3bSAkhil R static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
8980880bb3bSAkhil R {
8990880bb3bSAkhil R __be32 data;
9000880bb3bSAkhil R
9010880bb3bSAkhil R memset(block, 0, csize);
9020880bb3bSAkhil R block += csize;
9030880bb3bSAkhil R
9040880bb3bSAkhil R if (csize >= 4)
9050880bb3bSAkhil R csize = 4;
9060880bb3bSAkhil R else if (msglen > (1 << (8 * csize)))
9070880bb3bSAkhil R return -EOVERFLOW;
9080880bb3bSAkhil R
9090880bb3bSAkhil R data = cpu_to_be32(msglen);
9100880bb3bSAkhil R memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
9110880bb3bSAkhil R
9120880bb3bSAkhil R return 0;
9130880bb3bSAkhil R }
9140880bb3bSAkhil R
tegra_ccm_format_nonce(struct tegra_aead_reqctx * rctx,u8 * nonce)9150880bb3bSAkhil R static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
9160880bb3bSAkhil R {
9170880bb3bSAkhil R unsigned int q, t;
9180880bb3bSAkhil R u8 *q_ptr, *iv = (u8 *)rctx->iv;
9190880bb3bSAkhil R
9200880bb3bSAkhil R memcpy(nonce, rctx->iv, 16);
9210880bb3bSAkhil R
9220880bb3bSAkhil R /*** 1. Prepare Flags Octet ***/
9230880bb3bSAkhil R
9240880bb3bSAkhil R /* Encode t (mac length) */
9250880bb3bSAkhil R t = rctx->authsize;
9260880bb3bSAkhil R nonce[0] |= (((t - 2) / 2) << 3);
9270880bb3bSAkhil R
9280880bb3bSAkhil R /* Adata */
9290880bb3bSAkhil R if (rctx->assoclen)
9300880bb3bSAkhil R nonce[0] |= (1 << 6);
9310880bb3bSAkhil R
9320880bb3bSAkhil R /*** Encode Q - message length ***/
9330880bb3bSAkhil R q = iv[0] + 1;
9340880bb3bSAkhil R q_ptr = nonce + 16 - q;
9350880bb3bSAkhil R
9360880bb3bSAkhil R return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
9370880bb3bSAkhil R }
9380880bb3bSAkhil R
tegra_ccm_format_adata(u8 * adata,unsigned int a)9390880bb3bSAkhil R static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
9400880bb3bSAkhil R {
9410880bb3bSAkhil R int len = 0;
9420880bb3bSAkhil R
9430880bb3bSAkhil R /* add control info for associated data
9440880bb3bSAkhil R * RFC 3610 and NIST Special Publication 800-38C
9450880bb3bSAkhil R */
9460880bb3bSAkhil R if (a < 65280) {
9470880bb3bSAkhil R *(__be16 *)adata = cpu_to_be16(a);
9480880bb3bSAkhil R len = 2;
9490880bb3bSAkhil R } else {
9500880bb3bSAkhil R *(__be16 *)adata = cpu_to_be16(0xfffe);
9510880bb3bSAkhil R *(__be32 *)&adata[2] = cpu_to_be32(a);
9520880bb3bSAkhil R len = 6;
9530880bb3bSAkhil R }
9540880bb3bSAkhil R
9550880bb3bSAkhil R return len;
9560880bb3bSAkhil R }
9570880bb3bSAkhil R
tegra_ccm_add_padding(u8 * buf,unsigned int len)9580880bb3bSAkhil R static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
9590880bb3bSAkhil R {
9600880bb3bSAkhil R unsigned int padlen = 16 - (len % 16);
9610880bb3bSAkhil R u8 padding[16] = {0};
9620880bb3bSAkhil R
9630880bb3bSAkhil R if (padlen == 16)
9640880bb3bSAkhil R return 0;
9650880bb3bSAkhil R
9660880bb3bSAkhil R memcpy(buf, padding, padlen);
9670880bb3bSAkhil R
9680880bb3bSAkhil R return padlen;
9690880bb3bSAkhil R }
9700880bb3bSAkhil R
tegra_ccm_format_blocks(struct tegra_aead_reqctx * rctx)9710880bb3bSAkhil R static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
9720880bb3bSAkhil R {
9730880bb3bSAkhil R unsigned int alen = 0, offset = 0;
9740880bb3bSAkhil R u8 nonce[16], adata[16];
9750880bb3bSAkhil R int ret;
9760880bb3bSAkhil R
9770880bb3bSAkhil R ret = tegra_ccm_format_nonce(rctx, nonce);
9780880bb3bSAkhil R if (ret)
9790880bb3bSAkhil R return ret;
9800880bb3bSAkhil R
9810880bb3bSAkhil R memcpy(rctx->inbuf.buf, nonce, 16);
9820880bb3bSAkhil R offset = 16;
9830880bb3bSAkhil R
9840880bb3bSAkhil R if (rctx->assoclen) {
9850880bb3bSAkhil R alen = tegra_ccm_format_adata(adata, rctx->assoclen);
9860880bb3bSAkhil R memcpy(rctx->inbuf.buf + offset, adata, alen);
9870880bb3bSAkhil R offset += alen;
9880880bb3bSAkhil R
9890880bb3bSAkhil R scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
9900880bb3bSAkhil R rctx->src_sg, 0, rctx->assoclen, 0);
9910880bb3bSAkhil R
9920880bb3bSAkhil R offset += rctx->assoclen;
9930880bb3bSAkhil R offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
9940880bb3bSAkhil R rctx->assoclen + alen);
9950880bb3bSAkhil R }
9960880bb3bSAkhil R
9970880bb3bSAkhil R return offset;
9980880bb3bSAkhil R }
9990880bb3bSAkhil R
tegra_ccm_mac_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)10000880bb3bSAkhil R static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
10010880bb3bSAkhil R {
10020880bb3bSAkhil R u32 result[16];
10030880bb3bSAkhil R int i, ret;
10040880bb3bSAkhil R
10050880bb3bSAkhil R /* Read and clear Result */
10060880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
10070880bb3bSAkhil R result[i] = readl(se->base + se->hw->regs->result + (i * 4));
10080880bb3bSAkhil R
10090880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
10100880bb3bSAkhil R writel(0, se->base + se->hw->regs->result + (i * 4));
10110880bb3bSAkhil R
10120880bb3bSAkhil R if (rctx->encrypt) {
10130880bb3bSAkhil R memcpy(rctx->authdata, result, rctx->authsize);
10140880bb3bSAkhil R } else {
10150880bb3bSAkhil R ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
10160880bb3bSAkhil R if (ret)
10170880bb3bSAkhil R return -EBADMSG;
10180880bb3bSAkhil R }
10190880bb3bSAkhil R
10200880bb3bSAkhil R return 0;
10210880bb3bSAkhil R }
10220880bb3bSAkhil R
tegra_ccm_ctr_result(struct tegra_se * se,struct tegra_aead_reqctx * rctx)10230880bb3bSAkhil R static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
10240880bb3bSAkhil R {
10250880bb3bSAkhil R /* Copy result */
10260880bb3bSAkhil R scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
10270880bb3bSAkhil R rctx->assoclen, rctx->cryptlen, 1);
10280880bb3bSAkhil R
10290880bb3bSAkhil R if (rctx->encrypt)
10300880bb3bSAkhil R scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
10310880bb3bSAkhil R rctx->assoclen + rctx->cryptlen,
10320880bb3bSAkhil R rctx->authsize, 1);
10330880bb3bSAkhil R else
10340880bb3bSAkhil R memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
10350880bb3bSAkhil R
10360880bb3bSAkhil R return 0;
10370880bb3bSAkhil R }
10380880bb3bSAkhil R
tegra_ccm_compute_auth(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)10390880bb3bSAkhil R static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
10400880bb3bSAkhil R {
10410880bb3bSAkhil R struct tegra_se *se = ctx->se;
10420880bb3bSAkhil R struct scatterlist *sg;
10430880bb3bSAkhil R int offset, ret;
10440880bb3bSAkhil R
10450880bb3bSAkhil R offset = tegra_ccm_format_blocks(rctx);
10460880bb3bSAkhil R if (offset < 0)
10470880bb3bSAkhil R return -EINVAL;
10480880bb3bSAkhil R
10490880bb3bSAkhil R /* Copy plain text to the buffer */
10500880bb3bSAkhil R sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
10510880bb3bSAkhil R
10520880bb3bSAkhil R scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
10530880bb3bSAkhil R sg, rctx->assoclen,
10540880bb3bSAkhil R rctx->cryptlen, 0);
10550880bb3bSAkhil R offset += rctx->cryptlen;
10560880bb3bSAkhil R offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
10570880bb3bSAkhil R
10580880bb3bSAkhil R rctx->inbuf.size = offset;
10590880bb3bSAkhil R
10600880bb3bSAkhil R ret = tegra_ccm_do_cbcmac(ctx, rctx);
10610880bb3bSAkhil R if (ret)
10620880bb3bSAkhil R return ret;
10630880bb3bSAkhil R
10640880bb3bSAkhil R return tegra_ccm_mac_result(se, rctx);
10650880bb3bSAkhil R }
10660880bb3bSAkhil R
tegra_ccm_do_ctr(struct tegra_aead_ctx * ctx,struct tegra_aead_reqctx * rctx)10670880bb3bSAkhil R static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
10680880bb3bSAkhil R {
10690880bb3bSAkhil R struct tegra_se *se = ctx->se;
10700880bb3bSAkhil R unsigned int cmdlen, offset = 0;
10710880bb3bSAkhil R struct scatterlist *sg = rctx->src_sg;
10720880bb3bSAkhil R int ret;
10730880bb3bSAkhil R
10740880bb3bSAkhil R rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
10750880bb3bSAkhil R rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
10760880bb3bSAkhil R SE_AES_KEY_INDEX(ctx->key_id);
10770880bb3bSAkhil R
10780880bb3bSAkhil R /* Copy authdata in the top of buffer for encryption/decryption */
10790880bb3bSAkhil R if (rctx->encrypt)
10800880bb3bSAkhil R memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
10810880bb3bSAkhil R else
10820880bb3bSAkhil R scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
10830880bb3bSAkhil R rctx->assoclen + rctx->cryptlen,
10840880bb3bSAkhil R rctx->authsize, 0);
10850880bb3bSAkhil R
10860880bb3bSAkhil R offset += rctx->authsize;
10870880bb3bSAkhil R offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
10880880bb3bSAkhil R
10890880bb3bSAkhil R /* If there is no cryptlen, proceed to submit the task */
10900880bb3bSAkhil R if (rctx->cryptlen) {
10910880bb3bSAkhil R scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
10920880bb3bSAkhil R rctx->assoclen, rctx->cryptlen, 0);
10930880bb3bSAkhil R offset += rctx->cryptlen;
10940880bb3bSAkhil R offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
10950880bb3bSAkhil R }
10960880bb3bSAkhil R
10970880bb3bSAkhil R rctx->inbuf.size = offset;
10980880bb3bSAkhil R
10990880bb3bSAkhil R /* Prepare command and submit */
11000880bb3bSAkhil R cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
11010880bb3bSAkhil R ret = tegra_se_host1x_submit(se, cmdlen);
11020880bb3bSAkhil R if (ret)
11030880bb3bSAkhil R return ret;
11040880bb3bSAkhil R
11050880bb3bSAkhil R return tegra_ccm_ctr_result(se, rctx);
11060880bb3bSAkhil R }
11070880bb3bSAkhil R
tegra_ccm_crypt_init(struct aead_request * req,struct tegra_se * se,struct tegra_aead_reqctx * rctx)11080880bb3bSAkhil R static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
11090880bb3bSAkhil R struct tegra_aead_reqctx *rctx)
11100880bb3bSAkhil R {
11110880bb3bSAkhil R struct crypto_aead *tfm = crypto_aead_reqtfm(req);
11120880bb3bSAkhil R u8 *iv = (u8 *)rctx->iv;
11130880bb3bSAkhil R int ret, i;
11140880bb3bSAkhil R
11150880bb3bSAkhil R rctx->src_sg = req->src;
11160880bb3bSAkhil R rctx->dst_sg = req->dst;
11170880bb3bSAkhil R rctx->assoclen = req->assoclen;
11180880bb3bSAkhil R rctx->authsize = crypto_aead_authsize(tfm);
11190880bb3bSAkhil R
11200880bb3bSAkhil R memcpy(iv, req->iv, 16);
11210880bb3bSAkhil R
11220880bb3bSAkhil R ret = tegra_ccm_check_iv(iv);
11230880bb3bSAkhil R if (ret)
11240880bb3bSAkhil R return ret;
11250880bb3bSAkhil R
11260880bb3bSAkhil R /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
11270880bb3bSAkhil R * zero to encrypt auth tag.
11280880bb3bSAkhil R * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
11290880bb3bSAkhil R */
11300880bb3bSAkhil R memset(iv + 15 - iv[0], 0, iv[0] + 1);
11310880bb3bSAkhil R
11320880bb3bSAkhil R /* Clear any previous result */
11330880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
11340880bb3bSAkhil R writel(0, se->base + se->hw->regs->result + (i * 4));
11350880bb3bSAkhil R
11360880bb3bSAkhil R return 0;
11370880bb3bSAkhil R }
11380880bb3bSAkhil R
tegra_ccm_do_one_req(struct crypto_engine * engine,void * areq)11390880bb3bSAkhil R static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
11400880bb3bSAkhil R {
11410880bb3bSAkhil R struct aead_request *req = container_of(areq, struct aead_request, base);
11420880bb3bSAkhil R struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
11430880bb3bSAkhil R struct crypto_aead *tfm = crypto_aead_reqtfm(req);
11440880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
11450880bb3bSAkhil R struct tegra_se *se = ctx->se;
11460880bb3bSAkhil R int ret;
11470880bb3bSAkhil R
11480880bb3bSAkhil R /* Allocate buffers required */
11490880bb3bSAkhil R rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
11500880bb3bSAkhil R &rctx->inbuf.addr, GFP_KERNEL);
11510880bb3bSAkhil R if (!rctx->inbuf.buf)
11520880bb3bSAkhil R return -ENOMEM;
11530880bb3bSAkhil R
11540880bb3bSAkhil R rctx->inbuf.size = SE_AES_BUFLEN;
11550880bb3bSAkhil R
11560880bb3bSAkhil R rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
11570880bb3bSAkhil R &rctx->outbuf.addr, GFP_KERNEL);
11580880bb3bSAkhil R if (!rctx->outbuf.buf) {
1159*5ae6d3f5SDan Carpenter ret = -ENOMEM;
11600880bb3bSAkhil R goto outbuf_err;
11610880bb3bSAkhil R }
11620880bb3bSAkhil R
11630880bb3bSAkhil R rctx->outbuf.size = SE_AES_BUFLEN;
11640880bb3bSAkhil R
11650880bb3bSAkhil R ret = tegra_ccm_crypt_init(req, se, rctx);
11660880bb3bSAkhil R if (ret)
11670880bb3bSAkhil R goto out;
11680880bb3bSAkhil R
11690880bb3bSAkhil R if (rctx->encrypt) {
11700880bb3bSAkhil R rctx->cryptlen = req->cryptlen;
11710880bb3bSAkhil R
11720880bb3bSAkhil R /* CBC MAC Operation */
11730880bb3bSAkhil R ret = tegra_ccm_compute_auth(ctx, rctx);
11740880bb3bSAkhil R if (ret)
11750880bb3bSAkhil R goto out;
11760880bb3bSAkhil R
11770880bb3bSAkhil R /* CTR operation */
11780880bb3bSAkhil R ret = tegra_ccm_do_ctr(ctx, rctx);
11790880bb3bSAkhil R if (ret)
11800880bb3bSAkhil R goto out;
11810880bb3bSAkhil R } else {
11820880bb3bSAkhil R rctx->cryptlen = req->cryptlen - ctx->authsize;
11830880bb3bSAkhil R if (ret)
11840880bb3bSAkhil R goto out;
11850880bb3bSAkhil R
11860880bb3bSAkhil R /* CTR operation */
11870880bb3bSAkhil R ret = tegra_ccm_do_ctr(ctx, rctx);
11880880bb3bSAkhil R if (ret)
11890880bb3bSAkhil R goto out;
11900880bb3bSAkhil R
11910880bb3bSAkhil R /* CBC MAC Operation */
11920880bb3bSAkhil R ret = tegra_ccm_compute_auth(ctx, rctx);
11930880bb3bSAkhil R if (ret)
11940880bb3bSAkhil R goto out;
11950880bb3bSAkhil R }
11960880bb3bSAkhil R
11970880bb3bSAkhil R out:
11980880bb3bSAkhil R dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
11990880bb3bSAkhil R rctx->outbuf.buf, rctx->outbuf.addr);
12000880bb3bSAkhil R
12010880bb3bSAkhil R outbuf_err:
12020880bb3bSAkhil R dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
12030880bb3bSAkhil R rctx->inbuf.buf, rctx->inbuf.addr);
12040880bb3bSAkhil R
12050880bb3bSAkhil R crypto_finalize_aead_request(ctx->se->engine, req, ret);
12060880bb3bSAkhil R
12070880bb3bSAkhil R return 0;
12080880bb3bSAkhil R }
12090880bb3bSAkhil R
tegra_gcm_do_one_req(struct crypto_engine * engine,void * areq)12100880bb3bSAkhil R static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
12110880bb3bSAkhil R {
12120880bb3bSAkhil R struct aead_request *req = container_of(areq, struct aead_request, base);
12130880bb3bSAkhil R struct crypto_aead *tfm = crypto_aead_reqtfm(req);
12140880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
12150880bb3bSAkhil R struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
12160880bb3bSAkhil R int ret;
12170880bb3bSAkhil R
12180880bb3bSAkhil R /* Allocate buffers required */
12190880bb3bSAkhil R rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
12200880bb3bSAkhil R &rctx->inbuf.addr, GFP_KERNEL);
12210880bb3bSAkhil R if (!rctx->inbuf.buf)
12220880bb3bSAkhil R return -ENOMEM;
12230880bb3bSAkhil R
12240880bb3bSAkhil R rctx->inbuf.size = SE_AES_BUFLEN;
12250880bb3bSAkhil R
12260880bb3bSAkhil R rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
12270880bb3bSAkhil R &rctx->outbuf.addr, GFP_KERNEL);
12280880bb3bSAkhil R if (!rctx->outbuf.buf) {
1229*5ae6d3f5SDan Carpenter ret = -ENOMEM;
12300880bb3bSAkhil R goto outbuf_err;
12310880bb3bSAkhil R }
12320880bb3bSAkhil R
12330880bb3bSAkhil R rctx->outbuf.size = SE_AES_BUFLEN;
12340880bb3bSAkhil R
12350880bb3bSAkhil R rctx->src_sg = req->src;
12360880bb3bSAkhil R rctx->dst_sg = req->dst;
12370880bb3bSAkhil R rctx->assoclen = req->assoclen;
12380880bb3bSAkhil R rctx->authsize = crypto_aead_authsize(tfm);
12390880bb3bSAkhil R
12400880bb3bSAkhil R if (rctx->encrypt)
12410880bb3bSAkhil R rctx->cryptlen = req->cryptlen;
12420880bb3bSAkhil R else
12430880bb3bSAkhil R rctx->cryptlen = req->cryptlen - ctx->authsize;
12440880bb3bSAkhil R
12450880bb3bSAkhil R memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
12460880bb3bSAkhil R rctx->iv[3] = (1 << 24);
12470880bb3bSAkhil R
12480880bb3bSAkhil R /* If there is associated data perform GMAC operation */
12490880bb3bSAkhil R if (rctx->assoclen) {
12500880bb3bSAkhil R ret = tegra_gcm_do_gmac(ctx, rctx);
12510880bb3bSAkhil R if (ret)
12520880bb3bSAkhil R goto out;
12530880bb3bSAkhil R }
12540880bb3bSAkhil R
12550880bb3bSAkhil R /* GCM Encryption/Decryption operation */
12560880bb3bSAkhil R if (rctx->cryptlen) {
12570880bb3bSAkhil R ret = tegra_gcm_do_crypt(ctx, rctx);
12580880bb3bSAkhil R if (ret)
12590880bb3bSAkhil R goto out;
12600880bb3bSAkhil R }
12610880bb3bSAkhil R
12620880bb3bSAkhil R /* GCM_FINAL operation */
12630880bb3bSAkhil R ret = tegra_gcm_do_final(ctx, rctx);
12640880bb3bSAkhil R if (ret)
12650880bb3bSAkhil R goto out;
12660880bb3bSAkhil R
12670880bb3bSAkhil R if (!rctx->encrypt)
12680880bb3bSAkhil R ret = tegra_gcm_do_verify(ctx->se, rctx);
12690880bb3bSAkhil R
12700880bb3bSAkhil R out:
12710880bb3bSAkhil R dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
12720880bb3bSAkhil R rctx->outbuf.buf, rctx->outbuf.addr);
12730880bb3bSAkhil R
12740880bb3bSAkhil R outbuf_err:
12750880bb3bSAkhil R dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
12760880bb3bSAkhil R rctx->inbuf.buf, rctx->inbuf.addr);
12770880bb3bSAkhil R
12780880bb3bSAkhil R /* Finalize the request if there are no errors */
12790880bb3bSAkhil R crypto_finalize_aead_request(ctx->se->engine, req, ret);
12800880bb3bSAkhil R
12810880bb3bSAkhil R return 0;
12820880bb3bSAkhil R }
12830880bb3bSAkhil R
tegra_aead_cra_init(struct crypto_aead * tfm)12840880bb3bSAkhil R static int tegra_aead_cra_init(struct crypto_aead *tfm)
12850880bb3bSAkhil R {
12860880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
12870880bb3bSAkhil R struct aead_alg *alg = crypto_aead_alg(tfm);
12880880bb3bSAkhil R struct tegra_se_alg *se_alg;
12890880bb3bSAkhil R const char *algname;
12900880bb3bSAkhil R int ret;
12910880bb3bSAkhil R
12920880bb3bSAkhil R algname = crypto_tfm_alg_name(&tfm->base);
12930880bb3bSAkhil R
12940880bb3bSAkhil R se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
12950880bb3bSAkhil R
12960880bb3bSAkhil R crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
12970880bb3bSAkhil R
12980880bb3bSAkhil R ctx->se = se_alg->se_dev;
12990880bb3bSAkhil R ctx->key_id = 0;
13000880bb3bSAkhil R
13010880bb3bSAkhil R ret = se_algname_to_algid(algname);
13020880bb3bSAkhil R if (ret < 0) {
13030880bb3bSAkhil R dev_err(ctx->se->dev, "invalid algorithm\n");
13040880bb3bSAkhil R return ret;
13050880bb3bSAkhil R }
13060880bb3bSAkhil R
13070880bb3bSAkhil R ctx->alg = ret;
13080880bb3bSAkhil R
13090880bb3bSAkhil R return 0;
13100880bb3bSAkhil R }
13110880bb3bSAkhil R
tegra_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)13120880bb3bSAkhil R static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
13130880bb3bSAkhil R {
13140880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
13150880bb3bSAkhil R
13160880bb3bSAkhil R switch (authsize) {
13170880bb3bSAkhil R case 4:
13180880bb3bSAkhil R case 6:
13190880bb3bSAkhil R case 8:
13200880bb3bSAkhil R case 10:
13210880bb3bSAkhil R case 12:
13220880bb3bSAkhil R case 14:
13230880bb3bSAkhil R case 16:
13240880bb3bSAkhil R break;
13250880bb3bSAkhil R default:
13260880bb3bSAkhil R return -EINVAL;
13270880bb3bSAkhil R }
13280880bb3bSAkhil R
13290880bb3bSAkhil R ctx->authsize = authsize;
13300880bb3bSAkhil R
13310880bb3bSAkhil R return 0;
13320880bb3bSAkhil R }
13330880bb3bSAkhil R
tegra_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)13340880bb3bSAkhil R static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
13350880bb3bSAkhil R {
13360880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
13370880bb3bSAkhil R int ret;
13380880bb3bSAkhil R
13390880bb3bSAkhil R ret = crypto_gcm_check_authsize(authsize);
13400880bb3bSAkhil R if (ret)
13410880bb3bSAkhil R return ret;
13420880bb3bSAkhil R
13430880bb3bSAkhil R ctx->authsize = authsize;
13440880bb3bSAkhil R
13450880bb3bSAkhil R return 0;
13460880bb3bSAkhil R }
13470880bb3bSAkhil R
tegra_aead_cra_exit(struct crypto_aead * tfm)13480880bb3bSAkhil R static void tegra_aead_cra_exit(struct crypto_aead *tfm)
13490880bb3bSAkhil R {
13500880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
13510880bb3bSAkhil R
13520880bb3bSAkhil R if (ctx->key_id)
13530880bb3bSAkhil R tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
13540880bb3bSAkhil R }
13550880bb3bSAkhil R
tegra_aead_crypt(struct aead_request * req,bool encrypt)13560880bb3bSAkhil R static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
13570880bb3bSAkhil R {
13580880bb3bSAkhil R struct crypto_aead *tfm = crypto_aead_reqtfm(req);
13590880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
13600880bb3bSAkhil R struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
13610880bb3bSAkhil R
13620880bb3bSAkhil R rctx->encrypt = encrypt;
13630880bb3bSAkhil R
13640880bb3bSAkhil R return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
13650880bb3bSAkhil R }
13660880bb3bSAkhil R
tegra_aead_encrypt(struct aead_request * req)13670880bb3bSAkhil R static int tegra_aead_encrypt(struct aead_request *req)
13680880bb3bSAkhil R {
13690880bb3bSAkhil R return tegra_aead_crypt(req, true);
13700880bb3bSAkhil R }
13710880bb3bSAkhil R
tegra_aead_decrypt(struct aead_request * req)13720880bb3bSAkhil R static int tegra_aead_decrypt(struct aead_request *req)
13730880bb3bSAkhil R {
13740880bb3bSAkhil R return tegra_aead_crypt(req, false);
13750880bb3bSAkhil R }
13760880bb3bSAkhil R
tegra_aead_setkey(struct crypto_aead * tfm,const u8 * key,u32 keylen)13770880bb3bSAkhil R static int tegra_aead_setkey(struct crypto_aead *tfm,
13780880bb3bSAkhil R const u8 *key, u32 keylen)
13790880bb3bSAkhil R {
13800880bb3bSAkhil R struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
13810880bb3bSAkhil R
13820880bb3bSAkhil R if (aes_check_keylen(keylen)) {
13830880bb3bSAkhil R dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
13840880bb3bSAkhil R return -EINVAL;
13850880bb3bSAkhil R }
13860880bb3bSAkhil R
13870880bb3bSAkhil R return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
13880880bb3bSAkhil R }
13890880bb3bSAkhil R
tegra_cmac_prep_cmd(struct tegra_cmac_ctx * ctx,struct tegra_cmac_reqctx * rctx)13900880bb3bSAkhil R static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
13910880bb3bSAkhil R struct tegra_cmac_reqctx *rctx)
13920880bb3bSAkhil R {
13930880bb3bSAkhil R unsigned int data_count, res_bits = 0, i = 0, j;
13940880bb3bSAkhil R struct tegra_se *se = ctx->se;
13950880bb3bSAkhil R u32 *cpuvaddr = se->cmdbuf->addr, op;
13960880bb3bSAkhil R
13970880bb3bSAkhil R data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
13980880bb3bSAkhil R
13990880bb3bSAkhil R op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
14000880bb3bSAkhil R
14010880bb3bSAkhil R if (!(rctx->task & SHA_UPDATE)) {
14020880bb3bSAkhil R op |= SE_AES_OP_FINAL;
14030880bb3bSAkhil R res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
14040880bb3bSAkhil R }
14050880bb3bSAkhil R
14060880bb3bSAkhil R if (!res_bits && data_count)
14070880bb3bSAkhil R data_count--;
14080880bb3bSAkhil R
14090880bb3bSAkhil R if (rctx->task & SHA_FIRST) {
14100880bb3bSAkhil R rctx->task &= ~SHA_FIRST;
14110880bb3bSAkhil R
14120880bb3bSAkhil R cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
14130880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
14140880bb3bSAkhil R /* Load 0 IV */
14150880bb3bSAkhil R for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
14160880bb3bSAkhil R cpuvaddr[i++] = 0;
14170880bb3bSAkhil R }
14180880bb3bSAkhil R
14190880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
14200880bb3bSAkhil R cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
14210880bb3bSAkhil R SE_LAST_BLOCK_RES_BITS(res_bits);
14220880bb3bSAkhil R
14230880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
14240880bb3bSAkhil R cpuvaddr[i++] = rctx->config;
14250880bb3bSAkhil R cpuvaddr[i++] = rctx->crypto_config;
14260880bb3bSAkhil R
14270880bb3bSAkhil R /* Source Address */
14280880bb3bSAkhil R cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
14290880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
14300880bb3bSAkhil R SE_ADDR_HI_SZ(rctx->datbuf.size);
14310880bb3bSAkhil R cpuvaddr[i++] = 0;
14320880bb3bSAkhil R cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
14330880bb3bSAkhil R
14340880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
14350880bb3bSAkhil R cpuvaddr[i++] = op;
14360880bb3bSAkhil R
14370880bb3bSAkhil R cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
14380880bb3bSAkhil R cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
14390880bb3bSAkhil R host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
14400880bb3bSAkhil R
14410880bb3bSAkhil R return i;
14420880bb3bSAkhil R }
14430880bb3bSAkhil R
tegra_cmac_copy_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)14440880bb3bSAkhil R static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
14450880bb3bSAkhil R {
14460880bb3bSAkhil R int i;
14470880bb3bSAkhil R
14480880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
14490880bb3bSAkhil R rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
14500880bb3bSAkhil R }
14510880bb3bSAkhil R
tegra_cmac_paste_result(struct tegra_se * se,struct tegra_cmac_reqctx * rctx)14520880bb3bSAkhil R static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
14530880bb3bSAkhil R {
14540880bb3bSAkhil R int i;
14550880bb3bSAkhil R
14560880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
14570880bb3bSAkhil R writel(rctx->result[i],
14580880bb3bSAkhil R se->base + se->hw->regs->result + (i * 4));
14590880bb3bSAkhil R }
14600880bb3bSAkhil R
tegra_cmac_do_update(struct ahash_request * req)14610880bb3bSAkhil R static int tegra_cmac_do_update(struct ahash_request *req)
14620880bb3bSAkhil R {
14630880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
14640880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
14650880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
14660880bb3bSAkhil R struct tegra_se *se = ctx->se;
14670880bb3bSAkhil R unsigned int nblks, nresidue, cmdlen;
14680880bb3bSAkhil R int ret;
14690880bb3bSAkhil R
14700880bb3bSAkhil R if (!req->nbytes)
14710880bb3bSAkhil R return 0;
14720880bb3bSAkhil R
14730880bb3bSAkhil R nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
14740880bb3bSAkhil R nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
14750880bb3bSAkhil R
14760880bb3bSAkhil R /*
14770880bb3bSAkhil R * Reserve the last block as residue during final() to process.
14780880bb3bSAkhil R */
14790880bb3bSAkhil R if (!nresidue && nblks) {
14800880bb3bSAkhil R nresidue += rctx->blk_size;
14810880bb3bSAkhil R nblks--;
14820880bb3bSAkhil R }
14830880bb3bSAkhil R
14840880bb3bSAkhil R rctx->src_sg = req->src;
14850880bb3bSAkhil R rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
14860880bb3bSAkhil R rctx->total_len += rctx->datbuf.size;
14870880bb3bSAkhil R rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
14880880bb3bSAkhil R rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
14890880bb3bSAkhil R
14900880bb3bSAkhil R /*
14910880bb3bSAkhil R * Keep one block and residue bytes in residue and
14920880bb3bSAkhil R * return. The bytes will be processed in final()
14930880bb3bSAkhil R */
14940880bb3bSAkhil R if (nblks < 1) {
14950880bb3bSAkhil R scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
14960880bb3bSAkhil R rctx->src_sg, 0, req->nbytes, 0);
14970880bb3bSAkhil R
14980880bb3bSAkhil R rctx->residue.size += req->nbytes;
14990880bb3bSAkhil R return 0;
15000880bb3bSAkhil R }
15010880bb3bSAkhil R
15020880bb3bSAkhil R /* Copy the previous residue first */
15030880bb3bSAkhil R if (rctx->residue.size)
15040880bb3bSAkhil R memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
15050880bb3bSAkhil R
15060880bb3bSAkhil R scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
15070880bb3bSAkhil R rctx->src_sg, 0, req->nbytes - nresidue, 0);
15080880bb3bSAkhil R
15090880bb3bSAkhil R scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
15100880bb3bSAkhil R req->nbytes - nresidue, nresidue, 0);
15110880bb3bSAkhil R
15120880bb3bSAkhil R /* Update residue value with the residue after current block */
15130880bb3bSAkhil R rctx->residue.size = nresidue;
15140880bb3bSAkhil R
15150880bb3bSAkhil R /*
15160880bb3bSAkhil R * If this is not the first 'update' call, paste the previous copied
15170880bb3bSAkhil R * intermediate results to the registers so that it gets picked up.
15180880bb3bSAkhil R * This is to support the import/export functionality.
15190880bb3bSAkhil R */
15200880bb3bSAkhil R if (!(rctx->task & SHA_FIRST))
15210880bb3bSAkhil R tegra_cmac_paste_result(ctx->se, rctx);
15220880bb3bSAkhil R
15230880bb3bSAkhil R cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
15240880bb3bSAkhil R
15250880bb3bSAkhil R ret = tegra_se_host1x_submit(se, cmdlen);
15260880bb3bSAkhil R /*
15270880bb3bSAkhil R * If this is not the final update, copy the intermediate results
15280880bb3bSAkhil R * from the registers so that it can be used in the next 'update'
15290880bb3bSAkhil R * call. This is to support the import/export functionality.
15300880bb3bSAkhil R */
15310880bb3bSAkhil R if (!(rctx->task & SHA_FINAL))
15320880bb3bSAkhil R tegra_cmac_copy_result(ctx->se, rctx);
15330880bb3bSAkhil R
15340880bb3bSAkhil R return ret;
15350880bb3bSAkhil R }
15360880bb3bSAkhil R
tegra_cmac_do_final(struct ahash_request * req)15370880bb3bSAkhil R static int tegra_cmac_do_final(struct ahash_request *req)
15380880bb3bSAkhil R {
15390880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
15400880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
15410880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
15420880bb3bSAkhil R struct tegra_se *se = ctx->se;
15430880bb3bSAkhil R u32 *result = (u32 *)req->result;
15440880bb3bSAkhil R int ret = 0, i, cmdlen;
15450880bb3bSAkhil R
15460880bb3bSAkhil R if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
15470880bb3bSAkhil R return crypto_shash_tfm_digest(ctx->fallback_tfm,
15480880bb3bSAkhil R rctx->datbuf.buf, 0, req->result);
15490880bb3bSAkhil R }
15500880bb3bSAkhil R
15510880bb3bSAkhil R memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
15520880bb3bSAkhil R rctx->datbuf.size = rctx->residue.size;
15530880bb3bSAkhil R rctx->total_len += rctx->residue.size;
15540880bb3bSAkhil R rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
15550880bb3bSAkhil R
15560880bb3bSAkhil R /* Prepare command and submit */
15570880bb3bSAkhil R cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
15580880bb3bSAkhil R ret = tegra_se_host1x_submit(se, cmdlen);
15590880bb3bSAkhil R if (ret)
15600880bb3bSAkhil R goto out;
15610880bb3bSAkhil R
15620880bb3bSAkhil R /* Read and clear Result register */
15630880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
15640880bb3bSAkhil R result[i] = readl(se->base + se->hw->regs->result + (i * 4));
15650880bb3bSAkhil R
15660880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
15670880bb3bSAkhil R writel(0, se->base + se->hw->regs->result + (i * 4));
15680880bb3bSAkhil R
15690880bb3bSAkhil R out:
15700880bb3bSAkhil R dma_free_coherent(se->dev, SE_SHA_BUFLEN,
15710880bb3bSAkhil R rctx->datbuf.buf, rctx->datbuf.addr);
15720880bb3bSAkhil R dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
15730880bb3bSAkhil R rctx->residue.buf, rctx->residue.addr);
15740880bb3bSAkhil R return ret;
15750880bb3bSAkhil R }
15760880bb3bSAkhil R
tegra_cmac_do_one_req(struct crypto_engine * engine,void * areq)15770880bb3bSAkhil R static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
15780880bb3bSAkhil R {
15790880bb3bSAkhil R struct ahash_request *req = ahash_request_cast(areq);
15800880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
15810880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
15820880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
15830880bb3bSAkhil R struct tegra_se *se = ctx->se;
15840880bb3bSAkhil R int ret;
15850880bb3bSAkhil R
15860880bb3bSAkhil R if (rctx->task & SHA_UPDATE) {
15870880bb3bSAkhil R ret = tegra_cmac_do_update(req);
15880880bb3bSAkhil R rctx->task &= ~SHA_UPDATE;
15890880bb3bSAkhil R }
15900880bb3bSAkhil R
15910880bb3bSAkhil R if (rctx->task & SHA_FINAL) {
15920880bb3bSAkhil R ret = tegra_cmac_do_final(req);
15930880bb3bSAkhil R rctx->task &= ~SHA_FINAL;
15940880bb3bSAkhil R }
15950880bb3bSAkhil R
15960880bb3bSAkhil R crypto_finalize_hash_request(se->engine, req, ret);
15970880bb3bSAkhil R
15980880bb3bSAkhil R return 0;
15990880bb3bSAkhil R }
16000880bb3bSAkhil R
tegra_cmac_init_fallback(struct crypto_ahash * tfm,struct tegra_cmac_ctx * ctx,const char * algname)16010880bb3bSAkhil R static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
16020880bb3bSAkhil R const char *algname)
16030880bb3bSAkhil R {
16040880bb3bSAkhil R unsigned int statesize;
16050880bb3bSAkhil R
16060880bb3bSAkhil R ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
16070880bb3bSAkhil R
16080880bb3bSAkhil R if (IS_ERR(ctx->fallback_tfm)) {
16090880bb3bSAkhil R dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
16100880bb3bSAkhil R ctx->fallback_tfm = NULL;
16110880bb3bSAkhil R return;
16120880bb3bSAkhil R }
16130880bb3bSAkhil R
16140880bb3bSAkhil R statesize = crypto_shash_statesize(ctx->fallback_tfm);
16150880bb3bSAkhil R
16160880bb3bSAkhil R if (statesize > sizeof(struct tegra_cmac_reqctx))
16170880bb3bSAkhil R crypto_ahash_set_statesize(tfm, statesize);
16180880bb3bSAkhil R }
16190880bb3bSAkhil R
tegra_cmac_cra_init(struct crypto_tfm * tfm)16200880bb3bSAkhil R static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
16210880bb3bSAkhil R {
16220880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
16230880bb3bSAkhil R struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
16240880bb3bSAkhil R struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
16250880bb3bSAkhil R struct tegra_se_alg *se_alg;
16260880bb3bSAkhil R const char *algname;
16270880bb3bSAkhil R int ret;
16280880bb3bSAkhil R
16290880bb3bSAkhil R algname = crypto_tfm_alg_name(tfm);
16300880bb3bSAkhil R se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
16310880bb3bSAkhil R
16320880bb3bSAkhil R crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
16330880bb3bSAkhil R
16340880bb3bSAkhil R ctx->se = se_alg->se_dev;
16350880bb3bSAkhil R ctx->key_id = 0;
16360880bb3bSAkhil R
16370880bb3bSAkhil R ret = se_algname_to_algid(algname);
16380880bb3bSAkhil R if (ret < 0) {
16390880bb3bSAkhil R dev_err(ctx->se->dev, "invalid algorithm\n");
16400880bb3bSAkhil R return ret;
16410880bb3bSAkhil R }
16420880bb3bSAkhil R
16430880bb3bSAkhil R ctx->alg = ret;
16440880bb3bSAkhil R
16450880bb3bSAkhil R tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
16460880bb3bSAkhil R
16470880bb3bSAkhil R return 0;
16480880bb3bSAkhil R }
16490880bb3bSAkhil R
tegra_cmac_cra_exit(struct crypto_tfm * tfm)16500880bb3bSAkhil R static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
16510880bb3bSAkhil R {
16520880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
16530880bb3bSAkhil R
16540880bb3bSAkhil R if (ctx->fallback_tfm)
16550880bb3bSAkhil R crypto_free_shash(ctx->fallback_tfm);
16560880bb3bSAkhil R
16570880bb3bSAkhil R tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
16580880bb3bSAkhil R }
16590880bb3bSAkhil R
tegra_cmac_init(struct ahash_request * req)16600880bb3bSAkhil R static int tegra_cmac_init(struct ahash_request *req)
16610880bb3bSAkhil R {
16620880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
16630880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
16640880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
16650880bb3bSAkhil R struct tegra_se *se = ctx->se;
16660880bb3bSAkhil R int i;
16670880bb3bSAkhil R
16680880bb3bSAkhil R rctx->total_len = 0;
16690880bb3bSAkhil R rctx->datbuf.size = 0;
16700880bb3bSAkhil R rctx->residue.size = 0;
16710880bb3bSAkhil R rctx->task = SHA_FIRST;
16720880bb3bSAkhil R rctx->blk_size = crypto_ahash_blocksize(tfm);
16730880bb3bSAkhil R
16740880bb3bSAkhil R rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
16750880bb3bSAkhil R &rctx->residue.addr, GFP_KERNEL);
16760880bb3bSAkhil R if (!rctx->residue.buf)
16770880bb3bSAkhil R goto resbuf_fail;
16780880bb3bSAkhil R
16790880bb3bSAkhil R rctx->residue.size = 0;
16800880bb3bSAkhil R
16810880bb3bSAkhil R rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
16820880bb3bSAkhil R &rctx->datbuf.addr, GFP_KERNEL);
16830880bb3bSAkhil R if (!rctx->datbuf.buf)
16840880bb3bSAkhil R goto datbuf_fail;
16850880bb3bSAkhil R
16860880bb3bSAkhil R rctx->datbuf.size = 0;
16870880bb3bSAkhil R
16880880bb3bSAkhil R /* Clear any previous result */
16890880bb3bSAkhil R for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
16900880bb3bSAkhil R writel(0, se->base + se->hw->regs->result + (i * 4));
16910880bb3bSAkhil R
16920880bb3bSAkhil R return 0;
16930880bb3bSAkhil R
16940880bb3bSAkhil R datbuf_fail:
16950880bb3bSAkhil R dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
16960880bb3bSAkhil R rctx->residue.addr);
16970880bb3bSAkhil R resbuf_fail:
16980880bb3bSAkhil R return -ENOMEM;
16990880bb3bSAkhil R }
17000880bb3bSAkhil R
tegra_cmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)17010880bb3bSAkhil R static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
17020880bb3bSAkhil R unsigned int keylen)
17030880bb3bSAkhil R {
17040880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
17050880bb3bSAkhil R
17060880bb3bSAkhil R if (aes_check_keylen(keylen)) {
17070880bb3bSAkhil R dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
17080880bb3bSAkhil R return -EINVAL;
17090880bb3bSAkhil R }
17100880bb3bSAkhil R
17110880bb3bSAkhil R if (ctx->fallback_tfm)
17120880bb3bSAkhil R crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
17130880bb3bSAkhil R
17140880bb3bSAkhil R return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
17150880bb3bSAkhil R }
17160880bb3bSAkhil R
tegra_cmac_update(struct ahash_request * req)17170880bb3bSAkhil R static int tegra_cmac_update(struct ahash_request *req)
17180880bb3bSAkhil R {
17190880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
17200880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
17210880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
17220880bb3bSAkhil R
17230880bb3bSAkhil R rctx->task |= SHA_UPDATE;
17240880bb3bSAkhil R
17250880bb3bSAkhil R return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
17260880bb3bSAkhil R }
17270880bb3bSAkhil R
tegra_cmac_final(struct ahash_request * req)17280880bb3bSAkhil R static int tegra_cmac_final(struct ahash_request *req)
17290880bb3bSAkhil R {
17300880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
17310880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
17320880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
17330880bb3bSAkhil R
17340880bb3bSAkhil R rctx->task |= SHA_FINAL;
17350880bb3bSAkhil R
17360880bb3bSAkhil R return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
17370880bb3bSAkhil R }
17380880bb3bSAkhil R
tegra_cmac_finup(struct ahash_request * req)17390880bb3bSAkhil R static int tegra_cmac_finup(struct ahash_request *req)
17400880bb3bSAkhil R {
17410880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
17420880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
17430880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
17440880bb3bSAkhil R
17450880bb3bSAkhil R rctx->task |= SHA_UPDATE | SHA_FINAL;
17460880bb3bSAkhil R
17470880bb3bSAkhil R return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
17480880bb3bSAkhil R }
17490880bb3bSAkhil R
tegra_cmac_digest(struct ahash_request * req)17500880bb3bSAkhil R static int tegra_cmac_digest(struct ahash_request *req)
17510880bb3bSAkhil R {
17520880bb3bSAkhil R struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
17530880bb3bSAkhil R struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
17540880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
17550880bb3bSAkhil R
17560880bb3bSAkhil R tegra_cmac_init(req);
17570880bb3bSAkhil R rctx->task |= SHA_UPDATE | SHA_FINAL;
17580880bb3bSAkhil R
17590880bb3bSAkhil R return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
17600880bb3bSAkhil R }
17610880bb3bSAkhil R
tegra_cmac_export(struct ahash_request * req,void * out)17620880bb3bSAkhil R static int tegra_cmac_export(struct ahash_request *req, void *out)
17630880bb3bSAkhil R {
17640880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
17650880bb3bSAkhil R
17660880bb3bSAkhil R memcpy(out, rctx, sizeof(*rctx));
17670880bb3bSAkhil R
17680880bb3bSAkhil R return 0;
17690880bb3bSAkhil R }
17700880bb3bSAkhil R
tegra_cmac_import(struct ahash_request * req,const void * in)17710880bb3bSAkhil R static int tegra_cmac_import(struct ahash_request *req, const void *in)
17720880bb3bSAkhil R {
17730880bb3bSAkhil R struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
17740880bb3bSAkhil R
17750880bb3bSAkhil R memcpy(rctx, in, sizeof(*rctx));
17760880bb3bSAkhil R
17770880bb3bSAkhil R return 0;
17780880bb3bSAkhil R }
17790880bb3bSAkhil R
17800880bb3bSAkhil R static struct tegra_se_alg tegra_aead_algs[] = {
17810880bb3bSAkhil R {
17820880bb3bSAkhil R .alg.aead.op.do_one_request = tegra_gcm_do_one_req,
17830880bb3bSAkhil R .alg.aead.base = {
17840880bb3bSAkhil R .init = tegra_aead_cra_init,
17850880bb3bSAkhil R .exit = tegra_aead_cra_exit,
17860880bb3bSAkhil R .setkey = tegra_aead_setkey,
17870880bb3bSAkhil R .setauthsize = tegra_gcm_setauthsize,
17880880bb3bSAkhil R .encrypt = tegra_aead_encrypt,
17890880bb3bSAkhil R .decrypt = tegra_aead_decrypt,
17900880bb3bSAkhil R .maxauthsize = AES_BLOCK_SIZE,
17910880bb3bSAkhil R .ivsize = GCM_AES_IV_SIZE,
17920880bb3bSAkhil R .base = {
17930880bb3bSAkhil R .cra_name = "gcm(aes)",
17940880bb3bSAkhil R .cra_driver_name = "gcm-aes-tegra",
17950880bb3bSAkhil R .cra_priority = 500,
17960880bb3bSAkhil R .cra_blocksize = 1,
17970880bb3bSAkhil R .cra_ctxsize = sizeof(struct tegra_aead_ctx),
17980880bb3bSAkhil R .cra_alignmask = 0xf,
17990880bb3bSAkhil R .cra_module = THIS_MODULE,
18000880bb3bSAkhil R },
18010880bb3bSAkhil R }
18020880bb3bSAkhil R }, {
18030880bb3bSAkhil R .alg.aead.op.do_one_request = tegra_ccm_do_one_req,
18040880bb3bSAkhil R .alg.aead.base = {
18050880bb3bSAkhil R .init = tegra_aead_cra_init,
18060880bb3bSAkhil R .exit = tegra_aead_cra_exit,
18070880bb3bSAkhil R .setkey = tegra_aead_setkey,
18080880bb3bSAkhil R .setauthsize = tegra_ccm_setauthsize,
18090880bb3bSAkhil R .encrypt = tegra_aead_encrypt,
18100880bb3bSAkhil R .decrypt = tegra_aead_decrypt,
18110880bb3bSAkhil R .maxauthsize = AES_BLOCK_SIZE,
18120880bb3bSAkhil R .ivsize = AES_BLOCK_SIZE,
18130880bb3bSAkhil R .chunksize = AES_BLOCK_SIZE,
18140880bb3bSAkhil R .base = {
18150880bb3bSAkhil R .cra_name = "ccm(aes)",
18160880bb3bSAkhil R .cra_driver_name = "ccm-aes-tegra",
18170880bb3bSAkhil R .cra_priority = 500,
18180880bb3bSAkhil R .cra_blocksize = 1,
18190880bb3bSAkhil R .cra_ctxsize = sizeof(struct tegra_aead_ctx),
18200880bb3bSAkhil R .cra_alignmask = 0xf,
18210880bb3bSAkhil R .cra_module = THIS_MODULE,
18220880bb3bSAkhil R },
18230880bb3bSAkhil R }
18240880bb3bSAkhil R }
18250880bb3bSAkhil R };
18260880bb3bSAkhil R
18270880bb3bSAkhil R static struct tegra_se_alg tegra_cmac_algs[] = {
18280880bb3bSAkhil R {
18290880bb3bSAkhil R .alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
18300880bb3bSAkhil R .alg.ahash.base = {
18310880bb3bSAkhil R .init = tegra_cmac_init,
18320880bb3bSAkhil R .setkey = tegra_cmac_setkey,
18330880bb3bSAkhil R .update = tegra_cmac_update,
18340880bb3bSAkhil R .final = tegra_cmac_final,
18350880bb3bSAkhil R .finup = tegra_cmac_finup,
18360880bb3bSAkhil R .digest = tegra_cmac_digest,
18370880bb3bSAkhil R .export = tegra_cmac_export,
18380880bb3bSAkhil R .import = tegra_cmac_import,
18390880bb3bSAkhil R .halg.digestsize = AES_BLOCK_SIZE,
18400880bb3bSAkhil R .halg.statesize = sizeof(struct tegra_cmac_reqctx),
18410880bb3bSAkhil R .halg.base = {
18420880bb3bSAkhil R .cra_name = "cmac(aes)",
18430880bb3bSAkhil R .cra_driver_name = "tegra-se-cmac",
18440880bb3bSAkhil R .cra_priority = 300,
18450880bb3bSAkhil R .cra_flags = CRYPTO_ALG_TYPE_AHASH,
18460880bb3bSAkhil R .cra_blocksize = AES_BLOCK_SIZE,
18470880bb3bSAkhil R .cra_ctxsize = sizeof(struct tegra_cmac_ctx),
18480880bb3bSAkhil R .cra_alignmask = 0,
18490880bb3bSAkhil R .cra_module = THIS_MODULE,
18500880bb3bSAkhil R .cra_init = tegra_cmac_cra_init,
18510880bb3bSAkhil R .cra_exit = tegra_cmac_cra_exit,
18520880bb3bSAkhil R }
18530880bb3bSAkhil R }
18540880bb3bSAkhil R }
18550880bb3bSAkhil R };
18560880bb3bSAkhil R
tegra_init_aes(struct tegra_se * se)18570880bb3bSAkhil R int tegra_init_aes(struct tegra_se *se)
18580880bb3bSAkhil R {
18590880bb3bSAkhil R struct aead_engine_alg *aead_alg;
18600880bb3bSAkhil R struct ahash_engine_alg *ahash_alg;
18610880bb3bSAkhil R struct skcipher_engine_alg *sk_alg;
18620880bb3bSAkhil R int i, ret;
18630880bb3bSAkhil R
18640880bb3bSAkhil R se->manifest = tegra_aes_kac_manifest;
18650880bb3bSAkhil R
18660880bb3bSAkhil R for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
18670880bb3bSAkhil R sk_alg = &tegra_aes_algs[i].alg.skcipher;
18680880bb3bSAkhil R tegra_aes_algs[i].se_dev = se;
18690880bb3bSAkhil R
18700880bb3bSAkhil R ret = crypto_engine_register_skcipher(sk_alg);
18710880bb3bSAkhil R if (ret) {
18720880bb3bSAkhil R dev_err(se->dev, "failed to register %s\n",
18730880bb3bSAkhil R sk_alg->base.base.cra_name);
18740880bb3bSAkhil R goto err_aes;
18750880bb3bSAkhil R }
18760880bb3bSAkhil R }
18770880bb3bSAkhil R
18780880bb3bSAkhil R for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
18790880bb3bSAkhil R aead_alg = &tegra_aead_algs[i].alg.aead;
18800880bb3bSAkhil R tegra_aead_algs[i].se_dev = se;
18810880bb3bSAkhil R
18820880bb3bSAkhil R ret = crypto_engine_register_aead(aead_alg);
18830880bb3bSAkhil R if (ret) {
18840880bb3bSAkhil R dev_err(se->dev, "failed to register %s\n",
18850880bb3bSAkhil R aead_alg->base.base.cra_name);
18860880bb3bSAkhil R goto err_aead;
18870880bb3bSAkhil R }
18880880bb3bSAkhil R }
18890880bb3bSAkhil R
18900880bb3bSAkhil R for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
18910880bb3bSAkhil R ahash_alg = &tegra_cmac_algs[i].alg.ahash;
18920880bb3bSAkhil R tegra_cmac_algs[i].se_dev = se;
18930880bb3bSAkhil R
18940880bb3bSAkhil R ret = crypto_engine_register_ahash(ahash_alg);
18950880bb3bSAkhil R if (ret) {
18960880bb3bSAkhil R dev_err(se->dev, "failed to register %s\n",
18970880bb3bSAkhil R ahash_alg->base.halg.base.cra_name);
18980880bb3bSAkhil R goto err_cmac;
18990880bb3bSAkhil R }
19000880bb3bSAkhil R }
19010880bb3bSAkhil R
19020880bb3bSAkhil R return 0;
19030880bb3bSAkhil R
19040880bb3bSAkhil R err_cmac:
19050880bb3bSAkhil R while (i--)
19060880bb3bSAkhil R crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
19070880bb3bSAkhil R
19080880bb3bSAkhil R i = ARRAY_SIZE(tegra_aead_algs);
19090880bb3bSAkhil R err_aead:
19100880bb3bSAkhil R while (i--)
19110880bb3bSAkhil R crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
19120880bb3bSAkhil R
19130880bb3bSAkhil R i = ARRAY_SIZE(tegra_aes_algs);
19140880bb3bSAkhil R err_aes:
19150880bb3bSAkhil R while (i--)
19160880bb3bSAkhil R crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
19170880bb3bSAkhil R
19180880bb3bSAkhil R return ret;
19190880bb3bSAkhil R }
19200880bb3bSAkhil R
tegra_deinit_aes(struct tegra_se * se)19210880bb3bSAkhil R void tegra_deinit_aes(struct tegra_se *se)
19220880bb3bSAkhil R {
19230880bb3bSAkhil R int i;
19240880bb3bSAkhil R
19250880bb3bSAkhil R for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
19260880bb3bSAkhil R crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
19270880bb3bSAkhil R
19280880bb3bSAkhil R for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
19290880bb3bSAkhil R crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
19300880bb3bSAkhil R
19310880bb3bSAkhil R for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
19320880bb3bSAkhil R crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
19330880bb3bSAkhil R }
1934