1618b5dc4SHoria Geantă // SPDX-License-Identifier: GPL-2.0+ 2045e3678SYuan Kang /* 3045e3678SYuan Kang * caam - Freescale FSL CAAM support for ahash functions of crypto API 4045e3678SYuan Kang * 5045e3678SYuan Kang * Copyright 2011 Freescale Semiconductor, Inc. 6ae1dd17dSHoria GeantA * Copyright 2018-2019, 2023 NXP 7045e3678SYuan Kang * 8045e3678SYuan Kang * Based on caamalg.c crypto API driver. 9045e3678SYuan Kang * 10045e3678SYuan Kang * relationship of digest job descriptor or first job descriptor after init to 11045e3678SYuan Kang * shared descriptors: 12045e3678SYuan Kang * 13045e3678SYuan Kang * --------------- --------------- 14045e3678SYuan Kang * | JobDesc #1 |-------------------->| ShareDesc | 15045e3678SYuan Kang * | *(packet 1) | | (hashKey) | 16045e3678SYuan Kang * --------------- | (operation) | 17045e3678SYuan Kang * --------------- 18045e3678SYuan Kang * 19045e3678SYuan Kang * relationship of subsequent job descriptors to shared descriptors: 20045e3678SYuan Kang * 21045e3678SYuan Kang * --------------- --------------- 22045e3678SYuan Kang * | JobDesc #2 |-------------------->| ShareDesc | 23045e3678SYuan Kang * | *(packet 2) | |------------->| (hashKey) | 24045e3678SYuan Kang * --------------- | |-------->| (operation) | 25045e3678SYuan Kang * . | | | (load ctx2) | 26045e3678SYuan Kang * . | | --------------- 27045e3678SYuan Kang * --------------- | | 28045e3678SYuan Kang * | JobDesc #3 |------| | 29045e3678SYuan Kang * | *(packet 3) | | 30045e3678SYuan Kang * --------------- | 31045e3678SYuan Kang * . | 32045e3678SYuan Kang * . | 33045e3678SYuan Kang * --------------- | 34045e3678SYuan Kang * | JobDesc #4 |------------ 35045e3678SYuan Kang * | *(packet 4) | 36045e3678SYuan Kang * --------------- 37045e3678SYuan Kang * 38045e3678SYuan Kang * The SharedDesc never changes for a connection unless rekeyed, but 39045e3678SYuan Kang * each packet will likely be in a different place. So all we need 40045e3678SYuan Kang * to know to process the packet is where the input is, where the 41045e3678SYuan Kang * output goes, and what context we want to process with. Context is 42045e3678SYuan Kang * in the SharedDesc, packet references in the JobDesc. 43045e3678SYuan Kang * 44045e3678SYuan Kang * So, a job desc looks like: 45045e3678SYuan Kang * 46045e3678SYuan Kang * --------------------- 47045e3678SYuan Kang * | Header | 48045e3678SYuan Kang * | ShareDesc Pointer | 49045e3678SYuan Kang * | SEQ_OUT_PTR | 50045e3678SYuan Kang * | (output buffer) | 51045e3678SYuan Kang * | (output length) | 52045e3678SYuan Kang * | SEQ_IN_PTR | 53045e3678SYuan Kang * | (input buffer) | 54045e3678SYuan Kang * | (input length) | 55045e3678SYuan Kang * --------------------- 56045e3678SYuan Kang */ 57045e3678SYuan Kang 58045e3678SYuan Kang #include "compat.h" 59045e3678SYuan Kang 60045e3678SYuan Kang #include "regs.h" 61045e3678SYuan Kang #include "intern.h" 62045e3678SYuan Kang #include "desc_constr.h" 63045e3678SYuan Kang #include "jr.h" 64045e3678SYuan Kang #include "error.h" 65045e3678SYuan Kang #include "sg_sw_sec4.h" 66045e3678SYuan Kang #include "key_gen.h" 670efa7579SHoria Geantă #include "caamhash_desc.h" 6821b014f0SIuliana Prodan #include <crypto/engine.h> 69199354d7SHerbert Xu #include <linux/dma-mapping.h> 70199354d7SHerbert Xu #include <linux/kernel.h> 71045e3678SYuan Kang 72045e3678SYuan Kang #define CAAM_CRA_PRIORITY 3000 73045e3678SYuan Kang 74045e3678SYuan Kang /* max hash key is max split key size */ 75045e3678SYuan Kang #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 76045e3678SYuan Kang 77045e3678SYuan Kang #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 78045e3678SYuan Kang #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 79045e3678SYuan Kang 80045e3678SYuan Kang #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 81045e3678SYuan Kang CAAM_MAX_HASH_KEY_SIZE) 82045e3678SYuan Kang #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 83045e3678SYuan Kang 84045e3678SYuan Kang /* caam context sizes for hashes: running digest + 8 */ 85045e3678SYuan Kang #define HASH_MSG_LEN 8 86045e3678SYuan Kang #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 87045e3678SYuan Kang 88cfc6f11bSRuchika Gupta static struct list_head hash_list; 89cfc6f11bSRuchika Gupta 90045e3678SYuan Kang /* ahash per-session context */ 91045e3678SYuan Kang struct caam_hash_ctx { 9221b014f0SIuliana Prodan struct crypto_engine_ctx enginectx; 93e11793f5SRussell King u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 94e11793f5SRussell King u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 95e11793f5SRussell King u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 96e11793f5SRussell King u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 9712b8567fSIuliana Prodan u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; 98e11793f5SRussell King dma_addr_t sh_desc_update_dma ____cacheline_aligned; 99045e3678SYuan Kang dma_addr_t sh_desc_update_first_dma; 100045e3678SYuan Kang dma_addr_t sh_desc_fin_dma; 101045e3678SYuan Kang dma_addr_t sh_desc_digest_dma; 1027e0880b9SHoria Geantă enum dma_data_direction dir; 103e9b4913aSHoria Geantă enum dma_data_direction key_dir; 104e11793f5SRussell King struct device *jrdev; 105045e3678SYuan Kang int ctx_len; 106db57656bSHoria Geantă struct alginfo adata; 107045e3678SYuan Kang }; 108045e3678SYuan Kang 109045e3678SYuan Kang /* ahash state */ 110045e3678SYuan Kang struct caam_hash_state { 111045e3678SYuan Kang dma_addr_t buf_dma; 112045e3678SYuan Kang dma_addr_t ctx_dma; 11365055e21SFranck LENORMAND int ctx_dma_len; 11446b49abcSAndrei Botila u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 11546b49abcSAndrei Botila int buflen; 11646b49abcSAndrei Botila int next_buflen; 117e7472422SVictoria Milhoan u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 11821b014f0SIuliana Prodan int (*update)(struct ahash_request *req) ____cacheline_aligned; 119045e3678SYuan Kang int (*final)(struct ahash_request *req); 120045e3678SYuan Kang int (*finup)(struct ahash_request *req); 12121b014f0SIuliana Prodan struct ahash_edesc *edesc; 12221b014f0SIuliana Prodan void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err, 12321b014f0SIuliana Prodan void *context); 124045e3678SYuan Kang }; 125045e3678SYuan Kang 1265ec90831SRussell King struct caam_export_state { 1275ec90831SRussell King u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 1285ec90831SRussell King u8 caam_ctx[MAX_CTX_LEN]; 1295ec90831SRussell King int buflen; 1305ec90831SRussell King int (*update)(struct ahash_request *req); 1315ec90831SRussell King int (*final)(struct ahash_request *req); 1325ec90831SRussell King int (*finup)(struct ahash_request *req); 1335ec90831SRussell King }; 1345ec90831SRussell King 13587870cfbSIuliana Prodan static inline bool is_cmac_aes(u32 algtype) 13612b8567fSIuliana Prodan { 13712b8567fSIuliana Prodan return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == 13887870cfbSIuliana Prodan (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); 13912b8567fSIuliana Prodan } 140045e3678SYuan Kang /* Common job descriptor seq in/out ptr routines */ 141045e3678SYuan Kang 142045e3678SYuan Kang /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 143ce572085SHoria Geanta static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 144045e3678SYuan Kang struct caam_hash_state *state, 145045e3678SYuan Kang int ctx_len) 146045e3678SYuan Kang { 14765055e21SFranck LENORMAND state->ctx_dma_len = ctx_len; 148045e3678SYuan Kang state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 149045e3678SYuan Kang ctx_len, DMA_FROM_DEVICE); 150ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->ctx_dma)) { 151ce572085SHoria Geanta dev_err(jrdev, "unable to map ctx\n"); 15287ec02e7SHoria Geantă state->ctx_dma = 0; 153ce572085SHoria Geanta return -ENOMEM; 154ce572085SHoria Geanta } 155ce572085SHoria Geanta 156045e3678SYuan Kang append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 157ce572085SHoria Geanta 158ce572085SHoria Geanta return 0; 159045e3678SYuan Kang } 160045e3678SYuan Kang 161944c3d4dSHoria Geantă /* Map current buffer in state (if length > 0) and put it in link table */ 162944c3d4dSHoria Geantă static inline int buf_map_to_sec4_sg(struct device *jrdev, 163045e3678SYuan Kang struct sec4_sg_entry *sec4_sg, 164944c3d4dSHoria Geantă struct caam_hash_state *state) 165045e3678SYuan Kang { 16646b49abcSAndrei Botila int buflen = state->buflen; 167045e3678SYuan Kang 168944c3d4dSHoria Geantă if (!buflen) 169944c3d4dSHoria Geantă return 0; 170045e3678SYuan Kang 17146b49abcSAndrei Botila state->buf_dma = dma_map_single(jrdev, state->buf, buflen, 172944c3d4dSHoria Geantă DMA_TO_DEVICE); 173944c3d4dSHoria Geantă if (dma_mapping_error(jrdev, state->buf_dma)) { 174944c3d4dSHoria Geantă dev_err(jrdev, "unable to map buf\n"); 175944c3d4dSHoria Geantă state->buf_dma = 0; 176944c3d4dSHoria Geantă return -ENOMEM; 177045e3678SYuan Kang } 178045e3678SYuan Kang 179944c3d4dSHoria Geantă dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 180045e3678SYuan Kang 181944c3d4dSHoria Geantă return 0; 182045e3678SYuan Kang } 183045e3678SYuan Kang 184045e3678SYuan Kang /* Map state->caam_ctx, and add it to link table */ 185dfcd8393SHoria Geantă static inline int ctx_map_to_sec4_sg(struct device *jrdev, 186ce572085SHoria Geanta struct caam_hash_state *state, int ctx_len, 187ce572085SHoria Geanta struct sec4_sg_entry *sec4_sg, u32 flag) 188045e3678SYuan Kang { 18965055e21SFranck LENORMAND state->ctx_dma_len = ctx_len; 190045e3678SYuan Kang state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 191ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->ctx_dma)) { 192ce572085SHoria Geanta dev_err(jrdev, "unable to map ctx\n"); 19387ec02e7SHoria Geantă state->ctx_dma = 0; 194ce572085SHoria Geanta return -ENOMEM; 195ce572085SHoria Geanta } 196ce572085SHoria Geanta 197045e3678SYuan Kang dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 198ce572085SHoria Geanta 199ce572085SHoria Geanta return 0; 200045e3678SYuan Kang } 201045e3678SYuan Kang 202045e3678SYuan Kang static int ahash_set_sh_desc(struct crypto_ahash *ahash) 203045e3678SYuan Kang { 2044cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 205045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 206045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 2077e0880b9SHoria Geantă struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 208045e3678SYuan Kang u32 *desc; 209045e3678SYuan Kang 2107e0880b9SHoria Geantă ctx->adata.key_virt = ctx->key; 2117e0880b9SHoria Geantă 212045e3678SYuan Kang /* ahash_update shared descriptor */ 213045e3678SYuan Kang desc = ctx->sh_desc_update; 2140efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 2150efa7579SHoria Geantă ctx->ctx_len, true, ctrlpriv->era); 216bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 2177e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 2186e005503SSascha Hauer 2196e005503SSascha Hauer print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", 2206e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 2216e005503SSascha Hauer 1); 222045e3678SYuan Kang 223045e3678SYuan Kang /* ahash_update_first shared descriptor */ 224045e3678SYuan Kang desc = ctx->sh_desc_update_first; 2250efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 2260efa7579SHoria Geantă ctx->ctx_len, false, ctrlpriv->era); 227bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 2287e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 2296e005503SSascha Hauer print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) 2306e005503SSascha Hauer ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 2316e005503SSascha Hauer desc_bytes(desc), 1); 232045e3678SYuan Kang 233045e3678SYuan Kang /* ahash_final shared descriptor */ 234045e3678SYuan Kang desc = ctx->sh_desc_fin; 2350efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 2360efa7579SHoria Geantă ctx->ctx_len, true, ctrlpriv->era); 237bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 2387e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 2396e005503SSascha Hauer 2406e005503SSascha Hauer print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", 241045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 242045e3678SYuan Kang desc_bytes(desc), 1); 243045e3678SYuan Kang 244045e3678SYuan Kang /* ahash_digest shared descriptor */ 245045e3678SYuan Kang desc = ctx->sh_desc_digest; 2460efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 2470efa7579SHoria Geantă ctx->ctx_len, false, ctrlpriv->era); 248bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 2497e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 2506e005503SSascha Hauer 2516e005503SSascha Hauer print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", 252045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 253045e3678SYuan Kang desc_bytes(desc), 1); 254045e3678SYuan Kang 255045e3678SYuan Kang return 0; 256045e3678SYuan Kang } 257045e3678SYuan Kang 25812b8567fSIuliana Prodan static int axcbc_set_sh_desc(struct crypto_ahash *ahash) 25912b8567fSIuliana Prodan { 2604cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 26112b8567fSIuliana Prodan int digestsize = crypto_ahash_digestsize(ahash); 26212b8567fSIuliana Prodan struct device *jrdev = ctx->jrdev; 26312b8567fSIuliana Prodan u32 *desc; 26412b8567fSIuliana Prodan 26512b8567fSIuliana Prodan /* shared descriptor for ahash_update */ 26612b8567fSIuliana Prodan desc = ctx->sh_desc_update; 26787870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 268a2fb864cSHoria Geantă ctx->ctx_len, ctx->ctx_len); 26912b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 27012b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 27112b8567fSIuliana Prodan print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", 27212b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 27312b8567fSIuliana Prodan 1); 27412b8567fSIuliana Prodan 27512b8567fSIuliana Prodan /* shared descriptor for ahash_{final,finup} */ 27612b8567fSIuliana Prodan desc = ctx->sh_desc_fin; 27787870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 278a2fb864cSHoria Geantă digestsize, ctx->ctx_len); 27912b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 28012b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 28112b8567fSIuliana Prodan print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", 28212b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 28312b8567fSIuliana Prodan 1); 28412b8567fSIuliana Prodan 28512b8567fSIuliana Prodan /* key is immediate data for INIT and INITFINAL states */ 28612b8567fSIuliana Prodan ctx->adata.key_virt = ctx->key; 28712b8567fSIuliana Prodan 28812b8567fSIuliana Prodan /* shared descriptor for first invocation of ahash_update */ 28912b8567fSIuliana Prodan desc = ctx->sh_desc_update_first; 29087870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 291a2fb864cSHoria Geantă ctx->ctx_len); 29212b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 29312b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 2946e005503SSascha Hauer print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) 2956e005503SSascha Hauer " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 2966e005503SSascha Hauer desc_bytes(desc), 1); 29712b8567fSIuliana Prodan 29812b8567fSIuliana Prodan /* shared descriptor for ahash_digest */ 29912b8567fSIuliana Prodan desc = ctx->sh_desc_digest; 30087870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 301a2fb864cSHoria Geantă digestsize, ctx->ctx_len); 30212b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 30312b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 30412b8567fSIuliana Prodan print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", 30512b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 30612b8567fSIuliana Prodan 1); 30787870cfbSIuliana Prodan return 0; 30887870cfbSIuliana Prodan } 30987870cfbSIuliana Prodan 31087870cfbSIuliana Prodan static int acmac_set_sh_desc(struct crypto_ahash *ahash) 31187870cfbSIuliana Prodan { 3124cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 31387870cfbSIuliana Prodan int digestsize = crypto_ahash_digestsize(ahash); 31487870cfbSIuliana Prodan struct device *jrdev = ctx->jrdev; 31587870cfbSIuliana Prodan u32 *desc; 31687870cfbSIuliana Prodan 31787870cfbSIuliana Prodan /* shared descriptor for ahash_update */ 31887870cfbSIuliana Prodan desc = ctx->sh_desc_update; 31987870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 320a2fb864cSHoria Geantă ctx->ctx_len, ctx->ctx_len); 32187870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 32287870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 32387870cfbSIuliana Prodan print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", 32487870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, 32587870cfbSIuliana Prodan desc_bytes(desc), 1); 32687870cfbSIuliana Prodan 32787870cfbSIuliana Prodan /* shared descriptor for ahash_{final,finup} */ 32887870cfbSIuliana Prodan desc = ctx->sh_desc_fin; 32987870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 330a2fb864cSHoria Geantă digestsize, ctx->ctx_len); 33187870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 33287870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 33387870cfbSIuliana Prodan print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", 33487870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, 33587870cfbSIuliana Prodan desc_bytes(desc), 1); 33687870cfbSIuliana Prodan 33787870cfbSIuliana Prodan /* shared descriptor for first invocation of ahash_update */ 33887870cfbSIuliana Prodan desc = ctx->sh_desc_update_first; 33987870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 340a2fb864cSHoria Geantă ctx->ctx_len); 34187870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 34287870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 3436e005503SSascha Hauer print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) 3446e005503SSascha Hauer " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 34587870cfbSIuliana Prodan desc_bytes(desc), 1); 34687870cfbSIuliana Prodan 34787870cfbSIuliana Prodan /* shared descriptor for ahash_digest */ 34887870cfbSIuliana Prodan desc = ctx->sh_desc_digest; 34987870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 350a2fb864cSHoria Geantă digestsize, ctx->ctx_len); 35187870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 35287870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 35387870cfbSIuliana Prodan print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", 35487870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, 35587870cfbSIuliana Prodan desc_bytes(desc), 1); 35612b8567fSIuliana Prodan 35712b8567fSIuliana Prodan return 0; 35812b8567fSIuliana Prodan } 35912b8567fSIuliana Prodan 360045e3678SYuan Kang /* Digest hash size if it is too large */ 36130724445SHoria Geantă static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, 36230724445SHoria Geantă u32 digestsize) 363045e3678SYuan Kang { 364045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 365045e3678SYuan Kang u32 *desc; 366045e3678SYuan Kang struct split_key_result result; 36730724445SHoria Geantă dma_addr_t key_dma; 3689e6df0fdSMarkus Elfring int ret; 369045e3678SYuan Kang 370199354d7SHerbert Xu desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL); 371*3de0152bSChristophe JAILLET if (!desc) 3722af8f4a2SKim Phillips return -ENOMEM; 373045e3678SYuan Kang 374045e3678SYuan Kang init_job_desc(desc, 0); 375045e3678SYuan Kang 37630724445SHoria Geantă key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); 37730724445SHoria Geantă if (dma_mapping_error(jrdev, key_dma)) { 37830724445SHoria Geantă dev_err(jrdev, "unable to map key memory\n"); 379045e3678SYuan Kang kfree(desc); 380045e3678SYuan Kang return -ENOMEM; 381045e3678SYuan Kang } 382045e3678SYuan Kang 383045e3678SYuan Kang /* Job descriptor to perform unkeyed hash on key_in */ 384db57656bSHoria Geantă append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 385045e3678SYuan Kang OP_ALG_AS_INITFINAL); 38630724445SHoria Geantă append_seq_in_ptr(desc, key_dma, *keylen, 0); 387045e3678SYuan Kang append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 388045e3678SYuan Kang FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 38930724445SHoria Geantă append_seq_out_ptr(desc, key_dma, digestsize, 0); 390045e3678SYuan Kang append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 391045e3678SYuan Kang LDST_SRCDST_BYTE_CONTEXT); 392045e3678SYuan Kang 3936e005503SSascha Hauer print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", 39430724445SHoria Geantă DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 3956e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 3966e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 3976e005503SSascha Hauer 1); 398045e3678SYuan Kang 399045e3678SYuan Kang result.err = 0; 400045e3678SYuan Kang init_completion(&result.completion); 401045e3678SYuan Kang 402045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 4034d370a10SIuliana Prodan if (ret == -EINPROGRESS) { 404045e3678SYuan Kang /* in progress */ 4057459e1d2SHoria Geantă wait_for_completion(&result.completion); 406045e3678SYuan Kang ret = result.err; 4076e005503SSascha Hauer 4086e005503SSascha Hauer print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", 4096e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, key, 4106e005503SSascha Hauer digestsize, 1); 411045e3678SYuan Kang } 41230724445SHoria Geantă dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); 413045e3678SYuan Kang 414e11aa9f1SHoria Geanta *keylen = digestsize; 415e11aa9f1SHoria Geanta 416045e3678SYuan Kang kfree(desc); 417045e3678SYuan Kang 418045e3678SYuan Kang return ret; 419045e3678SYuan Kang } 420045e3678SYuan Kang 421045e3678SYuan Kang static int ahash_setkey(struct crypto_ahash *ahash, 422045e3678SYuan Kang const u8 *key, unsigned int keylen) 423045e3678SYuan Kang { 4244cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 4256e005503SSascha Hauer struct device *jrdev = ctx->jrdev; 426045e3678SYuan Kang int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 427045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 4287e0880b9SHoria Geantă struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 4299e6df0fdSMarkus Elfring int ret; 430045e3678SYuan Kang u8 *hashed_key = NULL; 431045e3678SYuan Kang 4326e005503SSascha Hauer dev_dbg(jrdev, "keylen %d\n", keylen); 433045e3678SYuan Kang 434045e3678SYuan Kang if (keylen > blocksize) { 435199354d7SHerbert Xu unsigned int aligned_len = 436199354d7SHerbert Xu ALIGN(keylen, dma_get_cache_alignment()); 437199354d7SHerbert Xu 438199354d7SHerbert Xu if (aligned_len < keylen) 439199354d7SHerbert Xu return -EOVERFLOW; 440199354d7SHerbert Xu 441199354d7SHerbert Xu hashed_key = kmemdup(key, keylen, GFP_KERNEL); 442045e3678SYuan Kang if (!hashed_key) 443045e3678SYuan Kang return -ENOMEM; 44430724445SHoria Geantă ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 445045e3678SYuan Kang if (ret) 446d6e7a7d0SMarkus Elfring goto bad_free_key; 447045e3678SYuan Kang key = hashed_key; 448045e3678SYuan Kang } 449045e3678SYuan Kang 4507e0880b9SHoria Geantă /* 4517e0880b9SHoria Geantă * If DKP is supported, use it in the shared descriptor to generate 4527e0880b9SHoria Geantă * the split key. 4537e0880b9SHoria Geantă */ 4547e0880b9SHoria Geantă if (ctrlpriv->era >= 6) { 4557e0880b9SHoria Geantă ctx->adata.key_inline = true; 4567e0880b9SHoria Geantă ctx->adata.keylen = keylen; 4577e0880b9SHoria Geantă ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 4587e0880b9SHoria Geantă OP_ALG_ALGSEL_MASK); 4597e0880b9SHoria Geantă 4607e0880b9SHoria Geantă if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 461d6e7a7d0SMarkus Elfring goto bad_free_key; 462045e3678SYuan Kang 4637e0880b9SHoria Geantă memcpy(ctx->key, key, keylen); 464e9b4913aSHoria Geantă 465e9b4913aSHoria Geantă /* 466e9b4913aSHoria Geantă * In case |user key| > |derived key|, using DKP<imm,imm> 467e9b4913aSHoria Geantă * would result in invalid opcodes (last bytes of user key) in 468e9b4913aSHoria Geantă * the resulting descriptor. Use DKP<ptr,imm> instead => both 469e9b4913aSHoria Geantă * virtual and dma key addresses are needed. 470e9b4913aSHoria Geantă */ 471e9b4913aSHoria Geantă if (keylen > ctx->adata.keylen_pad) 472e9b4913aSHoria Geantă dma_sync_single_for_device(ctx->jrdev, 473e9b4913aSHoria Geantă ctx->adata.key_dma, 474e9b4913aSHoria Geantă ctx->adata.keylen_pad, 475e9b4913aSHoria Geantă DMA_TO_DEVICE); 4767e0880b9SHoria Geantă } else { 4777e0880b9SHoria Geantă ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 4787e0880b9SHoria Geantă keylen, CAAM_MAX_HASH_KEY_SIZE); 4797e0880b9SHoria Geantă if (ret) 4807e0880b9SHoria Geantă goto bad_free_key; 4817e0880b9SHoria Geantă } 482045e3678SYuan Kang 483045e3678SYuan Kang kfree(hashed_key); 484cfb725f6SHoria Geantă return ahash_set_sh_desc(ahash); 485d6e7a7d0SMarkus Elfring bad_free_key: 486045e3678SYuan Kang kfree(hashed_key); 487045e3678SYuan Kang return -EINVAL; 488045e3678SYuan Kang } 489045e3678SYuan Kang 49012b8567fSIuliana Prodan static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, 49112b8567fSIuliana Prodan unsigned int keylen) 49212b8567fSIuliana Prodan { 4934cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 49412b8567fSIuliana Prodan struct device *jrdev = ctx->jrdev; 49512b8567fSIuliana Prodan 496674f368aSEric Biggers if (keylen != AES_KEYSIZE_128) 497836d8f43SIuliana Prodan return -EINVAL; 498836d8f43SIuliana Prodan 49912b8567fSIuliana Prodan memcpy(ctx->key, key, keylen); 500a2fb864cSHoria Geantă dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, 501a2fb864cSHoria Geantă DMA_TO_DEVICE); 50212b8567fSIuliana Prodan ctx->adata.keylen = keylen; 50312b8567fSIuliana Prodan 50412b8567fSIuliana Prodan print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", 50512b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); 50612b8567fSIuliana Prodan 50712b8567fSIuliana Prodan return axcbc_set_sh_desc(ahash); 50812b8567fSIuliana Prodan } 50987870cfbSIuliana Prodan 51087870cfbSIuliana Prodan static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, 51187870cfbSIuliana Prodan unsigned int keylen) 51287870cfbSIuliana Prodan { 5134cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 514836d8f43SIuliana Prodan int err; 515836d8f43SIuliana Prodan 516836d8f43SIuliana Prodan err = aes_check_keylen(keylen); 517674f368aSEric Biggers if (err) 518836d8f43SIuliana Prodan return err; 51987870cfbSIuliana Prodan 52087870cfbSIuliana Prodan /* key is immediate data for all cmac shared descriptors */ 52187870cfbSIuliana Prodan ctx->adata.key_virt = key; 52287870cfbSIuliana Prodan ctx->adata.keylen = keylen; 52387870cfbSIuliana Prodan 52487870cfbSIuliana Prodan print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", 52587870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 52687870cfbSIuliana Prodan 52787870cfbSIuliana Prodan return acmac_set_sh_desc(ahash); 52887870cfbSIuliana Prodan } 52987870cfbSIuliana Prodan 530045e3678SYuan Kang /* 531045e3678SYuan Kang * ahash_edesc - s/w-extended ahash descriptor 532045e3678SYuan Kang * @sec4_sg_dma: physical mapped address of h/w link table 533045e3678SYuan Kang * @src_nents: number of segments in input scatterlist 534045e3678SYuan Kang * @sec4_sg_bytes: length of dma mapped sec4_sg space 53521b014f0SIuliana Prodan * @bklog: stored to determine if the request needs backlog 536045e3678SYuan Kang * @hw_desc: the h/w job descriptor followed by any referenced link tables 537343e44b1SRussell King * @sec4_sg: h/w link table 538045e3678SYuan Kang */ 539045e3678SYuan Kang struct ahash_edesc { 540045e3678SYuan Kang dma_addr_t sec4_sg_dma; 541045e3678SYuan Kang int src_nents; 542045e3678SYuan Kang int sec4_sg_bytes; 54321b014f0SIuliana Prodan bool bklog; 5441a3daadcSAndrey Smirnov u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned; 5455a8a0765SGustavo A. R. Silva struct sec4_sg_entry sec4_sg[]; 546045e3678SYuan Kang }; 547045e3678SYuan Kang 548045e3678SYuan Kang static inline void ahash_unmap(struct device *dev, 549045e3678SYuan Kang struct ahash_edesc *edesc, 550045e3678SYuan Kang struct ahash_request *req, int dst_len) 551045e3678SYuan Kang { 5524cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 553944c3d4dSHoria Geantă 554045e3678SYuan Kang if (edesc->src_nents) 55513fb8fd7SLABBE Corentin dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 556045e3678SYuan Kang 557045e3678SYuan Kang if (edesc->sec4_sg_bytes) 558045e3678SYuan Kang dma_unmap_single(dev, edesc->sec4_sg_dma, 559045e3678SYuan Kang edesc->sec4_sg_bytes, DMA_TO_DEVICE); 560944c3d4dSHoria Geantă 561944c3d4dSHoria Geantă if (state->buf_dma) { 56246b49abcSAndrei Botila dma_unmap_single(dev, state->buf_dma, state->buflen, 563944c3d4dSHoria Geantă DMA_TO_DEVICE); 564944c3d4dSHoria Geantă state->buf_dma = 0; 565944c3d4dSHoria Geantă } 566045e3678SYuan Kang } 567045e3678SYuan Kang 568045e3678SYuan Kang static inline void ahash_unmap_ctx(struct device *dev, 569045e3678SYuan Kang struct ahash_edesc *edesc, 570045e3678SYuan Kang struct ahash_request *req, int dst_len, u32 flag) 571045e3678SYuan Kang { 5724cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 573045e3678SYuan Kang 57487ec02e7SHoria Geantă if (state->ctx_dma) { 57565055e21SFranck LENORMAND dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); 57687ec02e7SHoria Geantă state->ctx_dma = 0; 57787ec02e7SHoria Geantă } 578045e3678SYuan Kang ahash_unmap(dev, edesc, req, dst_len); 579045e3678SYuan Kang } 580045e3678SYuan Kang 581c3f7394eSIuliana Prodan static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, 582c3f7394eSIuliana Prodan void *context, enum dma_data_direction dir) 583045e3678SYuan Kang { 584045e3678SYuan Kang struct ahash_request *req = context; 58521b014f0SIuliana Prodan struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); 586045e3678SYuan Kang struct ahash_edesc *edesc; 587045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 588045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 5894cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 5904cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 5911984aaeeSHoria Geantă int ecode = 0; 59263db32e6SIuliana Prodan bool has_bklog; 593045e3678SYuan Kang 5946e005503SSascha Hauer dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 595045e3678SYuan Kang 59621b014f0SIuliana Prodan edesc = state->edesc; 59763db32e6SIuliana Prodan has_bklog = edesc->bklog; 59821b014f0SIuliana Prodan 599fa9659cdSMarek Vasut if (err) 6001984aaeeSHoria Geantă ecode = caam_jr_strstatus(jrdev, err); 601045e3678SYuan Kang 602c3f7394eSIuliana Prodan ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); 603c19650d6SHoria Geantă memcpy(req->result, state->caam_ctx, digestsize); 604045e3678SYuan Kang kfree(edesc); 605045e3678SYuan Kang 6066e005503SSascha Hauer print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 607045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 608045e3678SYuan Kang ctx->ctx_len, 1); 609045e3678SYuan Kang 61021b014f0SIuliana Prodan /* 61121b014f0SIuliana Prodan * If no backlog flag, the completion of the request is done 61221b014f0SIuliana Prodan * by CAAM, not crypto engine. 61321b014f0SIuliana Prodan */ 61463db32e6SIuliana Prodan if (!has_bklog) 6154bc713a4SHerbert Xu ahash_request_complete(req, ecode); 61621b014f0SIuliana Prodan else 61721b014f0SIuliana Prodan crypto_finalize_hash_request(jrp->engine, req, ecode); 618045e3678SYuan Kang } 619045e3678SYuan Kang 620c3f7394eSIuliana Prodan static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 621c3f7394eSIuliana Prodan void *context) 622c3f7394eSIuliana Prodan { 623c3f7394eSIuliana Prodan ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE); 624c3f7394eSIuliana Prodan } 625c3f7394eSIuliana Prodan 626c3f7394eSIuliana Prodan static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 627c3f7394eSIuliana Prodan void *context) 628c3f7394eSIuliana Prodan { 629c3f7394eSIuliana Prodan ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL); 630c3f7394eSIuliana Prodan } 631c3f7394eSIuliana Prodan 632c3f7394eSIuliana Prodan static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, 633c3f7394eSIuliana Prodan void *context, enum dma_data_direction dir) 634c3f7394eSIuliana Prodan { 635c3f7394eSIuliana Prodan struct ahash_request *req = context; 63621b014f0SIuliana Prodan struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); 637c3f7394eSIuliana Prodan struct ahash_edesc *edesc; 638c3f7394eSIuliana Prodan struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 6394cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 6404cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 641c3f7394eSIuliana Prodan int digestsize = crypto_ahash_digestsize(ahash); 642c3f7394eSIuliana Prodan int ecode = 0; 64363db32e6SIuliana Prodan bool has_bklog; 644c3f7394eSIuliana Prodan 645c3f7394eSIuliana Prodan dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 646c3f7394eSIuliana Prodan 64721b014f0SIuliana Prodan edesc = state->edesc; 64863db32e6SIuliana Prodan has_bklog = edesc->bklog; 649c3f7394eSIuliana Prodan if (err) 650c3f7394eSIuliana Prodan ecode = caam_jr_strstatus(jrdev, err); 651c3f7394eSIuliana Prodan 652c3f7394eSIuliana Prodan ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); 653c3f7394eSIuliana Prodan kfree(edesc); 654c3f7394eSIuliana Prodan 655c3f7394eSIuliana Prodan scatterwalk_map_and_copy(state->buf, req->src, 656c3f7394eSIuliana Prodan req->nbytes - state->next_buflen, 657c3f7394eSIuliana Prodan state->next_buflen, 0); 658c3f7394eSIuliana Prodan state->buflen = state->next_buflen; 659c3f7394eSIuliana Prodan 660c3f7394eSIuliana Prodan print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 661c3f7394eSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, state->buf, 662c3f7394eSIuliana Prodan state->buflen, 1); 663c3f7394eSIuliana Prodan 664c3f7394eSIuliana Prodan print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 665c3f7394eSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 666c3f7394eSIuliana Prodan ctx->ctx_len, 1); 667c3f7394eSIuliana Prodan if (req->result) 668c3f7394eSIuliana Prodan print_hex_dump_debug("result@"__stringify(__LINE__)": ", 669c3f7394eSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, req->result, 670c3f7394eSIuliana Prodan digestsize, 1); 671c3f7394eSIuliana Prodan 67221b014f0SIuliana Prodan /* 67321b014f0SIuliana Prodan * If no backlog flag, the completion of the request is done 67421b014f0SIuliana Prodan * by CAAM, not crypto engine. 67521b014f0SIuliana Prodan */ 67663db32e6SIuliana Prodan if (!has_bklog) 6774bc713a4SHerbert Xu ahash_request_complete(req, ecode); 67821b014f0SIuliana Prodan else 67921b014f0SIuliana Prodan crypto_finalize_hash_request(jrp->engine, req, ecode); 68021b014f0SIuliana Prodan 681c3f7394eSIuliana Prodan } 682c3f7394eSIuliana Prodan 683045e3678SYuan Kang static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 684045e3678SYuan Kang void *context) 685045e3678SYuan Kang { 686c3f7394eSIuliana Prodan ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL); 687045e3678SYuan Kang } 688045e3678SYuan Kang 689045e3678SYuan Kang static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 690045e3678SYuan Kang void *context) 691045e3678SYuan Kang { 692c3f7394eSIuliana Prodan ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE); 693045e3678SYuan Kang } 694045e3678SYuan Kang 6955588d039SRussell King /* 6965588d039SRussell King * Allocate an enhanced descriptor, which contains the hardware descriptor 6975588d039SRussell King * and space for hardware scatter table containing sg_num entries. 6985588d039SRussell King */ 6992ba1e798SIuliana Prodan static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, 70030a43b44SRussell King int sg_num, u32 *sh_desc, 7012ba1e798SIuliana Prodan dma_addr_t sh_desc_dma) 7025588d039SRussell King { 7034cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 7042ba1e798SIuliana Prodan gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 7052ba1e798SIuliana Prodan GFP_KERNEL : GFP_ATOMIC; 7065588d039SRussell King struct ahash_edesc *edesc; 7075588d039SRussell King 7086df04505SChristophe JAILLET edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); 709*3de0152bSChristophe JAILLET if (!edesc) 7105588d039SRussell King return NULL; 7115588d039SRussell King 71221b014f0SIuliana Prodan state->edesc = edesc; 71321b014f0SIuliana Prodan 71430a43b44SRussell King init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 71530a43b44SRussell King HDR_SHARE_DEFER | HDR_REVERSE); 71630a43b44SRussell King 7175588d039SRussell King return edesc; 7185588d039SRussell King } 7195588d039SRussell King 72065cf164aSRussell King static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 72165cf164aSRussell King struct ahash_edesc *edesc, 72265cf164aSRussell King struct ahash_request *req, int nents, 72365cf164aSRussell King unsigned int first_sg, 72465cf164aSRussell King unsigned int first_bytes, size_t to_hash) 72565cf164aSRussell King { 72665cf164aSRussell King dma_addr_t src_dma; 72765cf164aSRussell King u32 options; 72865cf164aSRussell King 72965cf164aSRussell King if (nents > 1 || first_sg) { 73065cf164aSRussell King struct sec4_sg_entry *sg = edesc->sec4_sg; 731a5e5c133SHoria Geantă unsigned int sgsize = sizeof(*sg) * 732a5e5c133SHoria Geantă pad_sg_nents(first_sg + nents); 73365cf164aSRussell King 734059d73eeSHoria Geantă sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); 73565cf164aSRussell King 73665cf164aSRussell King src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 73765cf164aSRussell King if (dma_mapping_error(ctx->jrdev, src_dma)) { 73865cf164aSRussell King dev_err(ctx->jrdev, "unable to map S/G table\n"); 73965cf164aSRussell King return -ENOMEM; 74065cf164aSRussell King } 74165cf164aSRussell King 74265cf164aSRussell King edesc->sec4_sg_bytes = sgsize; 74365cf164aSRussell King edesc->sec4_sg_dma = src_dma; 74465cf164aSRussell King options = LDST_SGF; 74565cf164aSRussell King } else { 74665cf164aSRussell King src_dma = sg_dma_address(req->src); 74765cf164aSRussell King options = 0; 74865cf164aSRussell King } 74965cf164aSRussell King 75065cf164aSRussell King append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 75165cf164aSRussell King options); 75265cf164aSRussell King 75365cf164aSRussell King return 0; 75465cf164aSRussell King } 75565cf164aSRussell King 75621b014f0SIuliana Prodan static int ahash_do_one_req(struct crypto_engine *engine, void *areq) 75721b014f0SIuliana Prodan { 75821b014f0SIuliana Prodan struct ahash_request *req = ahash_request_cast(areq); 7594cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req)); 7604cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 76121b014f0SIuliana Prodan struct device *jrdev = ctx->jrdev; 76221b014f0SIuliana Prodan u32 *desc = state->edesc->hw_desc; 76321b014f0SIuliana Prodan int ret; 76421b014f0SIuliana Prodan 76521b014f0SIuliana Prodan state->edesc->bklog = true; 76621b014f0SIuliana Prodan 76721b014f0SIuliana Prodan ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req); 76821b014f0SIuliana Prodan 769087e1d71SGaurav Jain if (ret == -ENOSPC && engine->retry_support) 770087e1d71SGaurav Jain return ret; 771087e1d71SGaurav Jain 77221b014f0SIuliana Prodan if (ret != -EINPROGRESS) { 77321b014f0SIuliana Prodan ahash_unmap(jrdev, state->edesc, req, 0); 77421b014f0SIuliana Prodan kfree(state->edesc); 77521b014f0SIuliana Prodan } else { 77621b014f0SIuliana Prodan ret = 0; 77721b014f0SIuliana Prodan } 77821b014f0SIuliana Prodan 77921b014f0SIuliana Prodan return ret; 78021b014f0SIuliana Prodan } 78121b014f0SIuliana Prodan 78221b014f0SIuliana Prodan static int ahash_enqueue_req(struct device *jrdev, 78321b014f0SIuliana Prodan void (*cbk)(struct device *jrdev, u32 *desc, 78421b014f0SIuliana Prodan u32 err, void *context), 78521b014f0SIuliana Prodan struct ahash_request *req, 78621b014f0SIuliana Prodan int dst_len, enum dma_data_direction dir) 78721b014f0SIuliana Prodan { 78821b014f0SIuliana Prodan struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); 7894cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 79021b014f0SIuliana Prodan struct ahash_edesc *edesc = state->edesc; 79121b014f0SIuliana Prodan u32 *desc = edesc->hw_desc; 79221b014f0SIuliana Prodan int ret; 79321b014f0SIuliana Prodan 79421b014f0SIuliana Prodan state->ahash_op_done = cbk; 79521b014f0SIuliana Prodan 79621b014f0SIuliana Prodan /* 79721b014f0SIuliana Prodan * Only the backlog request are sent to crypto-engine since the others 79821b014f0SIuliana Prodan * can be handled by CAAM, if free, especially since JR has up to 1024 79921b014f0SIuliana Prodan * entries (more than the 10 entries from crypto-engine). 80021b014f0SIuliana Prodan */ 80121b014f0SIuliana Prodan if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) 80221b014f0SIuliana Prodan ret = crypto_transfer_hash_request_to_engine(jrpriv->engine, 80321b014f0SIuliana Prodan req); 80421b014f0SIuliana Prodan else 80521b014f0SIuliana Prodan ret = caam_jr_enqueue(jrdev, desc, cbk, req); 80621b014f0SIuliana Prodan 80721b014f0SIuliana Prodan if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { 80821b014f0SIuliana Prodan ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir); 80921b014f0SIuliana Prodan kfree(edesc); 81021b014f0SIuliana Prodan } 81121b014f0SIuliana Prodan 81221b014f0SIuliana Prodan return ret; 81321b014f0SIuliana Prodan } 81421b014f0SIuliana Prodan 815045e3678SYuan Kang /* submit update job descriptor */ 816045e3678SYuan Kang static int ahash_update_ctx(struct ahash_request *req) 817045e3678SYuan Kang { 818045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 8194cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 8204cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 821045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 82246b49abcSAndrei Botila u8 *buf = state->buf; 82346b49abcSAndrei Botila int *buflen = &state->buflen; 82446b49abcSAndrei Botila int *next_buflen = &state->next_buflen; 82512b8567fSIuliana Prodan int blocksize = crypto_ahash_blocksize(ahash); 826045e3678SYuan Kang int in_len = *buflen + req->nbytes, to_hash; 82730a43b44SRussell King u32 *desc; 828bc13c69eSRussell King int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 829045e3678SYuan Kang struct ahash_edesc *edesc; 830045e3678SYuan Kang int ret = 0; 831045e3678SYuan Kang 83212b8567fSIuliana Prodan *next_buflen = in_len & (blocksize - 1); 833045e3678SYuan Kang to_hash = in_len - *next_buflen; 834045e3678SYuan Kang 83512b8567fSIuliana Prodan /* 83687870cfbSIuliana Prodan * For XCBC and CMAC, if to_hash is multiple of block size, 83712b8567fSIuliana Prodan * keep last block in internal buffer 83812b8567fSIuliana Prodan */ 83987870cfbSIuliana Prodan if ((is_xcbc_aes(ctx->adata.algtype) || 84087870cfbSIuliana Prodan is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 84112b8567fSIuliana Prodan (*next_buflen == 0)) { 84212b8567fSIuliana Prodan *next_buflen = blocksize; 84312b8567fSIuliana Prodan to_hash -= blocksize; 84412b8567fSIuliana Prodan } 84512b8567fSIuliana Prodan 846045e3678SYuan Kang if (to_hash) { 847a5e5c133SHoria Geantă int pad_nents; 848059d73eeSHoria Geantă int src_len = req->nbytes - *next_buflen; 849a5e5c133SHoria Geantă 850059d73eeSHoria Geantă src_nents = sg_nents_for_len(req->src, src_len); 851f9970c28SLABBE Corentin if (src_nents < 0) { 852f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 853f9970c28SLABBE Corentin return src_nents; 854f9970c28SLABBE Corentin } 855bc13c69eSRussell King 856bc13c69eSRussell King if (src_nents) { 857bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 858bc13c69eSRussell King DMA_TO_DEVICE); 859bc13c69eSRussell King if (!mapped_nents) { 860bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 861bc13c69eSRussell King return -ENOMEM; 862bc13c69eSRussell King } 863bc13c69eSRussell King } else { 864bc13c69eSRussell King mapped_nents = 0; 865bc13c69eSRussell King } 866bc13c69eSRussell King 867045e3678SYuan Kang sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 868a5e5c133SHoria Geantă pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); 869a5e5c133SHoria Geantă sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 870045e3678SYuan Kang 871045e3678SYuan Kang /* 872045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 873045e3678SYuan Kang * link tables 874045e3678SYuan Kang */ 8752ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, 8762ba1e798SIuliana Prodan ctx->sh_desc_update_dma); 877045e3678SYuan Kang if (!edesc) { 878bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 879045e3678SYuan Kang return -ENOMEM; 880045e3678SYuan Kang } 881045e3678SYuan Kang 882045e3678SYuan Kang edesc->src_nents = src_nents; 883045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 884045e3678SYuan Kang 885dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 886045e3678SYuan Kang edesc->sec4_sg, DMA_BIDIRECTIONAL); 887ce572085SHoria Geanta if (ret) 88858b0e5d0SMarkus Elfring goto unmap_ctx; 889045e3678SYuan Kang 890944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 891944c3d4dSHoria Geantă if (ret) 892944c3d4dSHoria Geantă goto unmap_ctx; 893045e3678SYuan Kang 894b4e9e931SIuliana Prodan if (mapped_nents) 895059d73eeSHoria Geantă sg_to_sec4_sg_last(req->src, src_len, 896bc13c69eSRussell King edesc->sec4_sg + sec4_sg_src_index, 897bc13c69eSRussell King 0); 898b4e9e931SIuliana Prodan else 899b4e9e931SIuliana Prodan sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 900b4e9e931SIuliana Prodan 1); 901b4e9e931SIuliana Prodan 902045e3678SYuan Kang desc = edesc->hw_desc; 903045e3678SYuan Kang 9041da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 9051da2be33SRuchika Gupta sec4_sg_bytes, 9061da2be33SRuchika Gupta DMA_TO_DEVICE); 907ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 908ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 90932686d34SRussell King ret = -ENOMEM; 91058b0e5d0SMarkus Elfring goto unmap_ctx; 911ce572085SHoria Geanta } 9121da2be33SRuchika Gupta 913045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 914045e3678SYuan Kang to_hash, LDST_SGF); 915045e3678SYuan Kang 916045e3678SYuan Kang append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 917045e3678SYuan Kang 9186e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 919045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 920045e3678SYuan Kang desc_bytes(desc), 1); 921045e3678SYuan Kang 92221b014f0SIuliana Prodan ret = ahash_enqueue_req(jrdev, ahash_done_bi, req, 92321b014f0SIuliana Prodan ctx->ctx_len, DMA_BIDIRECTIONAL); 924045e3678SYuan Kang } else if (*next_buflen) { 925307fd543SCristian Stoica scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 926307fd543SCristian Stoica req->nbytes, 0); 927045e3678SYuan Kang *buflen = *next_buflen; 9286e005503SSascha Hauer 9296e005503SSascha Hauer print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 93046b49abcSAndrei Botila DUMP_PREFIX_ADDRESS, 16, 4, buf, 93146b49abcSAndrei Botila *buflen, 1); 93246b49abcSAndrei Botila } 933045e3678SYuan Kang 934045e3678SYuan Kang return ret; 93558b0e5d0SMarkus Elfring unmap_ctx: 93632686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 93732686d34SRussell King kfree(edesc); 93832686d34SRussell King return ret; 939045e3678SYuan Kang } 940045e3678SYuan Kang 941045e3678SYuan Kang static int ahash_final_ctx(struct ahash_request *req) 942045e3678SYuan Kang { 943045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 9444cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 9454cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 946045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 94746b49abcSAndrei Botila int buflen = state->buflen; 94830a43b44SRussell King u32 *desc; 949a5e5c133SHoria Geantă int sec4_sg_bytes; 950045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 951045e3678SYuan Kang struct ahash_edesc *edesc; 9529e6df0fdSMarkus Elfring int ret; 953045e3678SYuan Kang 954a5e5c133SHoria Geantă sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * 955a5e5c133SHoria Geantă sizeof(struct sec4_sg_entry); 956045e3678SYuan Kang 957045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 9582ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, 9592ba1e798SIuliana Prodan ctx->sh_desc_fin_dma); 9605588d039SRussell King if (!edesc) 961045e3678SYuan Kang return -ENOMEM; 962045e3678SYuan Kang 963045e3678SYuan Kang desc = edesc->hw_desc; 964045e3678SYuan Kang 965045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 966045e3678SYuan Kang 967dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 968c19650d6SHoria Geantă edesc->sec4_sg, DMA_BIDIRECTIONAL); 969ce572085SHoria Geanta if (ret) 97058b0e5d0SMarkus Elfring goto unmap_ctx; 971045e3678SYuan Kang 972944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 973944c3d4dSHoria Geantă if (ret) 974944c3d4dSHoria Geantă goto unmap_ctx; 975944c3d4dSHoria Geantă 976a5e5c133SHoria Geantă sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); 977045e3678SYuan Kang 9781da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 9791da2be33SRuchika Gupta sec4_sg_bytes, DMA_TO_DEVICE); 980ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 981ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 98232686d34SRussell King ret = -ENOMEM; 98358b0e5d0SMarkus Elfring goto unmap_ctx; 984ce572085SHoria Geanta } 9851da2be33SRuchika Gupta 986045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 987045e3678SYuan Kang LDST_SGF); 988c19650d6SHoria Geantă append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 989045e3678SYuan Kang 9906e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 9916e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 9926e005503SSascha Hauer 1); 993045e3678SYuan Kang 99421b014f0SIuliana Prodan return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, 99521b014f0SIuliana Prodan digestsize, DMA_BIDIRECTIONAL); 99658b0e5d0SMarkus Elfring unmap_ctx: 997c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 998045e3678SYuan Kang kfree(edesc); 999045e3678SYuan Kang return ret; 1000045e3678SYuan Kang } 1001045e3678SYuan Kang 1002045e3678SYuan Kang static int ahash_finup_ctx(struct ahash_request *req) 1003045e3678SYuan Kang { 1004045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 10054cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 10064cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1007045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 100846b49abcSAndrei Botila int buflen = state->buflen; 100930a43b44SRussell King u32 *desc; 101065cf164aSRussell King int sec4_sg_src_index; 1011bc13c69eSRussell King int src_nents, mapped_nents; 1012045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1013045e3678SYuan Kang struct ahash_edesc *edesc; 10149e6df0fdSMarkus Elfring int ret; 1015045e3678SYuan Kang 101613fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, req->nbytes); 1017f9970c28SLABBE Corentin if (src_nents < 0) { 1018f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1019f9970c28SLABBE Corentin return src_nents; 1020f9970c28SLABBE Corentin } 1021bc13c69eSRussell King 1022bc13c69eSRussell King if (src_nents) { 1023bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1024bc13c69eSRussell King DMA_TO_DEVICE); 1025bc13c69eSRussell King if (!mapped_nents) { 1026bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1027bc13c69eSRussell King return -ENOMEM; 1028bc13c69eSRussell King } 1029bc13c69eSRussell King } else { 1030bc13c69eSRussell King mapped_nents = 0; 1031bc13c69eSRussell King } 1032bc13c69eSRussell King 1033045e3678SYuan Kang sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1034045e3678SYuan Kang 1035045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 10362ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, 10372ba1e798SIuliana Prodan ctx->sh_desc_fin, ctx->sh_desc_fin_dma); 1038045e3678SYuan Kang if (!edesc) { 1039bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1040045e3678SYuan Kang return -ENOMEM; 1041045e3678SYuan Kang } 1042045e3678SYuan Kang 1043045e3678SYuan Kang desc = edesc->hw_desc; 1044045e3678SYuan Kang 1045045e3678SYuan Kang edesc->src_nents = src_nents; 1046045e3678SYuan Kang 1047dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 1048c19650d6SHoria Geantă edesc->sec4_sg, DMA_BIDIRECTIONAL); 1049ce572085SHoria Geanta if (ret) 105058b0e5d0SMarkus Elfring goto unmap_ctx; 1051045e3678SYuan Kang 1052944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 1053944c3d4dSHoria Geantă if (ret) 1054944c3d4dSHoria Geantă goto unmap_ctx; 1055045e3678SYuan Kang 105665cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 105765cf164aSRussell King sec4_sg_src_index, ctx->ctx_len + buflen, 105865cf164aSRussell King req->nbytes); 105965cf164aSRussell King if (ret) 106058b0e5d0SMarkus Elfring goto unmap_ctx; 1061045e3678SYuan Kang 1062c19650d6SHoria Geantă append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 1063045e3678SYuan Kang 10646e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 10656e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 10666e005503SSascha Hauer 1); 1067045e3678SYuan Kang 106821b014f0SIuliana Prodan return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, 106921b014f0SIuliana Prodan digestsize, DMA_BIDIRECTIONAL); 107058b0e5d0SMarkus Elfring unmap_ctx: 1071c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 1072045e3678SYuan Kang kfree(edesc); 1073045e3678SYuan Kang return ret; 1074045e3678SYuan Kang } 1075045e3678SYuan Kang 1076045e3678SYuan Kang static int ahash_digest(struct ahash_request *req) 1077045e3678SYuan Kang { 1078045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 10794cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 10804cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1081045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 108230a43b44SRussell King u32 *desc; 1083045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 108465cf164aSRussell King int src_nents, mapped_nents; 1085045e3678SYuan Kang struct ahash_edesc *edesc; 10869e6df0fdSMarkus Elfring int ret; 1087045e3678SYuan Kang 1088944c3d4dSHoria Geantă state->buf_dma = 0; 1089944c3d4dSHoria Geantă 10903d5a2db6SRussell King src_nents = sg_nents_for_len(req->src, req->nbytes); 1091f9970c28SLABBE Corentin if (src_nents < 0) { 1092f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1093f9970c28SLABBE Corentin return src_nents; 1094f9970c28SLABBE Corentin } 1095bc13c69eSRussell King 1096bc13c69eSRussell King if (src_nents) { 1097bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1098bc13c69eSRussell King DMA_TO_DEVICE); 1099bc13c69eSRussell King if (!mapped_nents) { 1100bc13c69eSRussell King dev_err(jrdev, "unable to map source for DMA\n"); 1101bc13c69eSRussell King return -ENOMEM; 1102bc13c69eSRussell King } 1103bc13c69eSRussell King } else { 1104bc13c69eSRussell King mapped_nents = 0; 1105bc13c69eSRussell King } 1106bc13c69eSRussell King 1107045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 11082ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, 11092ba1e798SIuliana Prodan ctx->sh_desc_digest, ctx->sh_desc_digest_dma); 1110045e3678SYuan Kang if (!edesc) { 1111bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1112045e3678SYuan Kang return -ENOMEM; 1113045e3678SYuan Kang } 1114343e44b1SRussell King 1115045e3678SYuan Kang edesc->src_nents = src_nents; 1116045e3678SYuan Kang 111765cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 111865cf164aSRussell King req->nbytes); 111965cf164aSRussell King if (ret) { 112032686d34SRussell King ahash_unmap(jrdev, edesc, req, digestsize); 112132686d34SRussell King kfree(edesc); 112265cf164aSRussell King return ret; 1123ce572085SHoria Geanta } 112465cf164aSRussell King 112565cf164aSRussell King desc = edesc->hw_desc; 1126045e3678SYuan Kang 1127c19650d6SHoria Geantă ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1128c19650d6SHoria Geantă if (ret) { 112932686d34SRussell King ahash_unmap(jrdev, edesc, req, digestsize); 113032686d34SRussell King kfree(edesc); 1131ce572085SHoria Geanta return -ENOMEM; 1132ce572085SHoria Geanta } 1133045e3678SYuan Kang 11346e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 11356e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 11366e005503SSascha Hauer 1); 1137045e3678SYuan Kang 113821b014f0SIuliana Prodan return ahash_enqueue_req(jrdev, ahash_done, req, digestsize, 113921b014f0SIuliana Prodan DMA_FROM_DEVICE); 1140045e3678SYuan Kang } 1141045e3678SYuan Kang 1142045e3678SYuan Kang /* submit ahash final if it the first job descriptor */ 1143045e3678SYuan Kang static int ahash_final_no_ctx(struct ahash_request *req) 1144045e3678SYuan Kang { 1145045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 11464cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 11474cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1148045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 114946b49abcSAndrei Botila u8 *buf = state->buf; 115046b49abcSAndrei Botila int buflen = state->buflen; 115130a43b44SRussell King u32 *desc; 1152045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1153045e3678SYuan Kang struct ahash_edesc *edesc; 11549e6df0fdSMarkus Elfring int ret; 1155045e3678SYuan Kang 1156045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 11572ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, 11582ba1e798SIuliana Prodan ctx->sh_desc_digest_dma); 11595588d039SRussell King if (!edesc) 1160045e3678SYuan Kang return -ENOMEM; 1161045e3678SYuan Kang 1162045e3678SYuan Kang desc = edesc->hw_desc; 1163045e3678SYuan Kang 116404e6d25cSAymen Sghaier if (buflen) { 116504e6d25cSAymen Sghaier state->buf_dma = dma_map_single(jrdev, buf, buflen, 116604e6d25cSAymen Sghaier DMA_TO_DEVICE); 1167ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->buf_dma)) { 1168ce572085SHoria Geanta dev_err(jrdev, "unable to map src\n"); 116906435f34SMarkus Elfring goto unmap; 1170ce572085SHoria Geanta } 1171045e3678SYuan Kang 1172045e3678SYuan Kang append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 117304e6d25cSAymen Sghaier } 1174045e3678SYuan Kang 1175c19650d6SHoria Geantă ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1176c19650d6SHoria Geantă if (ret) 117706435f34SMarkus Elfring goto unmap; 1178045e3678SYuan Kang 11796e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 11806e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 11816e005503SSascha Hauer 1); 1182045e3678SYuan Kang 118321b014f0SIuliana Prodan return ahash_enqueue_req(jrdev, ahash_done, req, 118421b014f0SIuliana Prodan digestsize, DMA_FROM_DEVICE); 118506435f34SMarkus Elfring unmap: 118606435f34SMarkus Elfring ahash_unmap(jrdev, edesc, req, digestsize); 118706435f34SMarkus Elfring kfree(edesc); 118806435f34SMarkus Elfring return -ENOMEM; 1189045e3678SYuan Kang } 1190045e3678SYuan Kang 1191045e3678SYuan Kang /* submit ahash update if it the first job descriptor after update */ 1192045e3678SYuan Kang static int ahash_update_no_ctx(struct ahash_request *req) 1193045e3678SYuan Kang { 1194045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 11954cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 11964cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1197045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 119846b49abcSAndrei Botila u8 *buf = state->buf; 119946b49abcSAndrei Botila int *buflen = &state->buflen; 120046b49abcSAndrei Botila int *next_buflen = &state->next_buflen; 120112b8567fSIuliana Prodan int blocksize = crypto_ahash_blocksize(ahash); 1202045e3678SYuan Kang int in_len = *buflen + req->nbytes, to_hash; 1203bc13c69eSRussell King int sec4_sg_bytes, src_nents, mapped_nents; 1204045e3678SYuan Kang struct ahash_edesc *edesc; 120530a43b44SRussell King u32 *desc; 1206045e3678SYuan Kang int ret = 0; 1207045e3678SYuan Kang 120812b8567fSIuliana Prodan *next_buflen = in_len & (blocksize - 1); 1209045e3678SYuan Kang to_hash = in_len - *next_buflen; 1210045e3678SYuan Kang 121112b8567fSIuliana Prodan /* 121287870cfbSIuliana Prodan * For XCBC and CMAC, if to_hash is multiple of block size, 121312b8567fSIuliana Prodan * keep last block in internal buffer 121412b8567fSIuliana Prodan */ 121587870cfbSIuliana Prodan if ((is_xcbc_aes(ctx->adata.algtype) || 121687870cfbSIuliana Prodan is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 121712b8567fSIuliana Prodan (*next_buflen == 0)) { 121812b8567fSIuliana Prodan *next_buflen = blocksize; 121912b8567fSIuliana Prodan to_hash -= blocksize; 122012b8567fSIuliana Prodan } 122112b8567fSIuliana Prodan 1222045e3678SYuan Kang if (to_hash) { 1223a5e5c133SHoria Geantă int pad_nents; 1224059d73eeSHoria Geantă int src_len = req->nbytes - *next_buflen; 1225a5e5c133SHoria Geantă 1226059d73eeSHoria Geantă src_nents = sg_nents_for_len(req->src, src_len); 1227f9970c28SLABBE Corentin if (src_nents < 0) { 1228f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1229f9970c28SLABBE Corentin return src_nents; 1230f9970c28SLABBE Corentin } 1231bc13c69eSRussell King 1232bc13c69eSRussell King if (src_nents) { 1233bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1234bc13c69eSRussell King DMA_TO_DEVICE); 1235bc13c69eSRussell King if (!mapped_nents) { 1236bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1237bc13c69eSRussell King return -ENOMEM; 1238bc13c69eSRussell King } 1239bc13c69eSRussell King } else { 1240bc13c69eSRussell King mapped_nents = 0; 1241bc13c69eSRussell King } 1242bc13c69eSRussell King 1243a5e5c133SHoria Geantă pad_nents = pad_sg_nents(1 + mapped_nents); 1244a5e5c133SHoria Geantă sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 1245045e3678SYuan Kang 1246045e3678SYuan Kang /* 1247045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 1248045e3678SYuan Kang * link tables 1249045e3678SYuan Kang */ 12502ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, pad_nents, 125130a43b44SRussell King ctx->sh_desc_update_first, 12522ba1e798SIuliana Prodan ctx->sh_desc_update_first_dma); 1253045e3678SYuan Kang if (!edesc) { 1254bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1255045e3678SYuan Kang return -ENOMEM; 1256045e3678SYuan Kang } 1257045e3678SYuan Kang 1258045e3678SYuan Kang edesc->src_nents = src_nents; 1259045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1260045e3678SYuan Kang 1261944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1262944c3d4dSHoria Geantă if (ret) 1263944c3d4dSHoria Geantă goto unmap_ctx; 1264944c3d4dSHoria Geantă 1265059d73eeSHoria Geantă sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); 1266bc13c69eSRussell King 1267045e3678SYuan Kang desc = edesc->hw_desc; 1268045e3678SYuan Kang 12691da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 12701da2be33SRuchika Gupta sec4_sg_bytes, 12711da2be33SRuchika Gupta DMA_TO_DEVICE); 1272ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1273ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 127432686d34SRussell King ret = -ENOMEM; 127558b0e5d0SMarkus Elfring goto unmap_ctx; 1276ce572085SHoria Geanta } 12771da2be33SRuchika Gupta 1278045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1279045e3678SYuan Kang 1280ce572085SHoria Geanta ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1281ce572085SHoria Geanta if (ret) 128258b0e5d0SMarkus Elfring goto unmap_ctx; 1283045e3678SYuan Kang 12846e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1285045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 1286045e3678SYuan Kang desc_bytes(desc), 1); 1287045e3678SYuan Kang 128821b014f0SIuliana Prodan ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, 128921b014f0SIuliana Prodan ctx->ctx_len, DMA_TO_DEVICE); 129021b014f0SIuliana Prodan if ((ret != -EINPROGRESS) && (ret != -EBUSY)) 129121b014f0SIuliana Prodan return ret; 1292045e3678SYuan Kang state->update = ahash_update_ctx; 1293045e3678SYuan Kang state->finup = ahash_finup_ctx; 1294045e3678SYuan Kang state->final = ahash_final_ctx; 1295045e3678SYuan Kang } else if (*next_buflen) { 1296307fd543SCristian Stoica scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1297307fd543SCristian Stoica req->nbytes, 0); 1298045e3678SYuan Kang *buflen = *next_buflen; 12996e005503SSascha Hauer 13006e005503SSascha Hauer print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 130146b49abcSAndrei Botila DUMP_PREFIX_ADDRESS, 16, 4, buf, 130246b49abcSAndrei Botila *buflen, 1); 130346b49abcSAndrei Botila } 1304045e3678SYuan Kang 1305045e3678SYuan Kang return ret; 130658b0e5d0SMarkus Elfring unmap_ctx: 130732686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 130832686d34SRussell King kfree(edesc); 130932686d34SRussell King return ret; 1310045e3678SYuan Kang } 1311045e3678SYuan Kang 1312045e3678SYuan Kang /* submit ahash finup if it the first job descriptor after update */ 1313045e3678SYuan Kang static int ahash_finup_no_ctx(struct ahash_request *req) 1314045e3678SYuan Kang { 1315045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 13164cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 13174cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1318045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 131946b49abcSAndrei Botila int buflen = state->buflen; 132030a43b44SRussell King u32 *desc; 1321bc13c69eSRussell King int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1322045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1323045e3678SYuan Kang struct ahash_edesc *edesc; 13249e6df0fdSMarkus Elfring int ret; 1325045e3678SYuan Kang 132613fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, req->nbytes); 1327f9970c28SLABBE Corentin if (src_nents < 0) { 1328f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1329f9970c28SLABBE Corentin return src_nents; 1330f9970c28SLABBE Corentin } 1331bc13c69eSRussell King 1332bc13c69eSRussell King if (src_nents) { 1333bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1334bc13c69eSRussell King DMA_TO_DEVICE); 1335bc13c69eSRussell King if (!mapped_nents) { 1336bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1337bc13c69eSRussell King return -ENOMEM; 1338bc13c69eSRussell King } 1339bc13c69eSRussell King } else { 1340bc13c69eSRussell King mapped_nents = 0; 1341bc13c69eSRussell King } 1342bc13c69eSRussell King 1343045e3678SYuan Kang sec4_sg_src_index = 2; 1344bc13c69eSRussell King sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1345045e3678SYuan Kang sizeof(struct sec4_sg_entry); 1346045e3678SYuan Kang 1347045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 13482ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, 13492ba1e798SIuliana Prodan ctx->sh_desc_digest, ctx->sh_desc_digest_dma); 1350045e3678SYuan Kang if (!edesc) { 1351bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1352045e3678SYuan Kang return -ENOMEM; 1353045e3678SYuan Kang } 1354045e3678SYuan Kang 1355045e3678SYuan Kang desc = edesc->hw_desc; 1356045e3678SYuan Kang 1357045e3678SYuan Kang edesc->src_nents = src_nents; 1358045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1359045e3678SYuan Kang 1360944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1361944c3d4dSHoria Geantă if (ret) 1362944c3d4dSHoria Geantă goto unmap; 1363045e3678SYuan Kang 136465cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 136565cf164aSRussell King req->nbytes); 136665cf164aSRussell King if (ret) { 1367ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 136806435f34SMarkus Elfring goto unmap; 1369ce572085SHoria Geanta } 13701da2be33SRuchika Gupta 1371c19650d6SHoria Geantă ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1372c19650d6SHoria Geantă if (ret) 137306435f34SMarkus Elfring goto unmap; 1374045e3678SYuan Kang 13756e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 13766e005503SSascha Hauer DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 13776e005503SSascha Hauer 1); 1378045e3678SYuan Kang 137921b014f0SIuliana Prodan return ahash_enqueue_req(jrdev, ahash_done, req, 138021b014f0SIuliana Prodan digestsize, DMA_FROM_DEVICE); 138106435f34SMarkus Elfring unmap: 138206435f34SMarkus Elfring ahash_unmap(jrdev, edesc, req, digestsize); 138306435f34SMarkus Elfring kfree(edesc); 138406435f34SMarkus Elfring return -ENOMEM; 138506435f34SMarkus Elfring 1386045e3678SYuan Kang } 1387045e3678SYuan Kang 1388045e3678SYuan Kang /* submit first update job descriptor after init */ 1389045e3678SYuan Kang static int ahash_update_first(struct ahash_request *req) 1390045e3678SYuan Kang { 1391045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 13924cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 13934cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1394045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 139546b49abcSAndrei Botila u8 *buf = state->buf; 139646b49abcSAndrei Botila int *buflen = &state->buflen; 139746b49abcSAndrei Botila int *next_buflen = &state->next_buflen; 1398045e3678SYuan Kang int to_hash; 139912b8567fSIuliana Prodan int blocksize = crypto_ahash_blocksize(ahash); 140030a43b44SRussell King u32 *desc; 140165cf164aSRussell King int src_nents, mapped_nents; 1402045e3678SYuan Kang struct ahash_edesc *edesc; 1403045e3678SYuan Kang int ret = 0; 1404045e3678SYuan Kang 140512b8567fSIuliana Prodan *next_buflen = req->nbytes & (blocksize - 1); 1406045e3678SYuan Kang to_hash = req->nbytes - *next_buflen; 1407045e3678SYuan Kang 140812b8567fSIuliana Prodan /* 140987870cfbSIuliana Prodan * For XCBC and CMAC, if to_hash is multiple of block size, 141012b8567fSIuliana Prodan * keep last block in internal buffer 141112b8567fSIuliana Prodan */ 141287870cfbSIuliana Prodan if ((is_xcbc_aes(ctx->adata.algtype) || 141387870cfbSIuliana Prodan is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 141412b8567fSIuliana Prodan (*next_buflen == 0)) { 141512b8567fSIuliana Prodan *next_buflen = blocksize; 141612b8567fSIuliana Prodan to_hash -= blocksize; 141712b8567fSIuliana Prodan } 141812b8567fSIuliana Prodan 1419045e3678SYuan Kang if (to_hash) { 14203d5a2db6SRussell King src_nents = sg_nents_for_len(req->src, 14213d5a2db6SRussell King req->nbytes - *next_buflen); 1422f9970c28SLABBE Corentin if (src_nents < 0) { 1423f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1424f9970c28SLABBE Corentin return src_nents; 1425f9970c28SLABBE Corentin } 1426bc13c69eSRussell King 1427bc13c69eSRussell King if (src_nents) { 1428bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1429bc13c69eSRussell King DMA_TO_DEVICE); 1430bc13c69eSRussell King if (!mapped_nents) { 1431bc13c69eSRussell King dev_err(jrdev, "unable to map source for DMA\n"); 1432bc13c69eSRussell King return -ENOMEM; 1433bc13c69eSRussell King } 1434bc13c69eSRussell King } else { 1435bc13c69eSRussell King mapped_nents = 0; 1436bc13c69eSRussell King } 1437045e3678SYuan Kang 1438045e3678SYuan Kang /* 1439045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 1440045e3678SYuan Kang * link tables 1441045e3678SYuan Kang */ 14422ba1e798SIuliana Prodan edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? 144330a43b44SRussell King mapped_nents : 0, 144430a43b44SRussell King ctx->sh_desc_update_first, 14452ba1e798SIuliana Prodan ctx->sh_desc_update_first_dma); 1446045e3678SYuan Kang if (!edesc) { 1447bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1448045e3678SYuan Kang return -ENOMEM; 1449045e3678SYuan Kang } 1450045e3678SYuan Kang 1451045e3678SYuan Kang edesc->src_nents = src_nents; 1452045e3678SYuan Kang 145365cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 145465cf164aSRussell King to_hash); 145565cf164aSRussell King if (ret) 145658b0e5d0SMarkus Elfring goto unmap_ctx; 1457045e3678SYuan Kang 1458045e3678SYuan Kang desc = edesc->hw_desc; 1459045e3678SYuan Kang 1460ce572085SHoria Geanta ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1461ce572085SHoria Geanta if (ret) 146258b0e5d0SMarkus Elfring goto unmap_ctx; 1463045e3678SYuan Kang 14646e005503SSascha Hauer print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1465045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 1466045e3678SYuan Kang desc_bytes(desc), 1); 1467045e3678SYuan Kang 146821b014f0SIuliana Prodan ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, 146921b014f0SIuliana Prodan ctx->ctx_len, DMA_TO_DEVICE); 147021b014f0SIuliana Prodan if ((ret != -EINPROGRESS) && (ret != -EBUSY)) 147121b014f0SIuliana Prodan return ret; 1472045e3678SYuan Kang state->update = ahash_update_ctx; 1473045e3678SYuan Kang state->finup = ahash_finup_ctx; 1474045e3678SYuan Kang state->final = ahash_final_ctx; 1475045e3678SYuan Kang } else if (*next_buflen) { 1476045e3678SYuan Kang state->update = ahash_update_no_ctx; 1477045e3678SYuan Kang state->finup = ahash_finup_no_ctx; 1478045e3678SYuan Kang state->final = ahash_final_no_ctx; 147946b49abcSAndrei Botila scatterwalk_map_and_copy(buf, req->src, 0, 1480307fd543SCristian Stoica req->nbytes, 0); 148146b49abcSAndrei Botila *buflen = *next_buflen; 14826e005503SSascha Hauer 148346b49abcSAndrei Botila print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 148446b49abcSAndrei Botila DUMP_PREFIX_ADDRESS, 16, 4, buf, 148546b49abcSAndrei Botila *buflen, 1); 148646b49abcSAndrei Botila } 1487045e3678SYuan Kang 1488045e3678SYuan Kang return ret; 148958b0e5d0SMarkus Elfring unmap_ctx: 149032686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 149132686d34SRussell King kfree(edesc); 149232686d34SRussell King return ret; 1493045e3678SYuan Kang } 1494045e3678SYuan Kang 1495045e3678SYuan Kang static int ahash_finup_first(struct ahash_request *req) 1496045e3678SYuan Kang { 1497045e3678SYuan Kang return ahash_digest(req); 1498045e3678SYuan Kang } 1499045e3678SYuan Kang 1500045e3678SYuan Kang static int ahash_init(struct ahash_request *req) 1501045e3678SYuan Kang { 15024cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1503045e3678SYuan Kang 1504045e3678SYuan Kang state->update = ahash_update_first; 1505045e3678SYuan Kang state->finup = ahash_finup_first; 1506045e3678SYuan Kang state->final = ahash_final_no_ctx; 1507045e3678SYuan Kang 150887ec02e7SHoria Geantă state->ctx_dma = 0; 150965055e21SFranck LENORMAND state->ctx_dma_len = 0; 1510de0e35ecSHoria Geanta state->buf_dma = 0; 151146b49abcSAndrei Botila state->buflen = 0; 151246b49abcSAndrei Botila state->next_buflen = 0; 1513045e3678SYuan Kang 1514045e3678SYuan Kang return 0; 1515045e3678SYuan Kang } 1516045e3678SYuan Kang 1517045e3678SYuan Kang static int ahash_update(struct ahash_request *req) 1518045e3678SYuan Kang { 15194cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1520045e3678SYuan Kang 1521045e3678SYuan Kang return state->update(req); 1522045e3678SYuan Kang } 1523045e3678SYuan Kang 1524045e3678SYuan Kang static int ahash_finup(struct ahash_request *req) 1525045e3678SYuan Kang { 15264cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1527045e3678SYuan Kang 1528045e3678SYuan Kang return state->finup(req); 1529045e3678SYuan Kang } 1530045e3678SYuan Kang 1531045e3678SYuan Kang static int ahash_final(struct ahash_request *req) 1532045e3678SYuan Kang { 15334cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 1534045e3678SYuan Kang 1535045e3678SYuan Kang return state->final(req); 1536045e3678SYuan Kang } 1537045e3678SYuan Kang 1538045e3678SYuan Kang static int ahash_export(struct ahash_request *req, void *out) 1539045e3678SYuan Kang { 15404cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 15415ec90831SRussell King struct caam_export_state *export = out; 154246b49abcSAndrei Botila u8 *buf = state->buf; 154346b49abcSAndrei Botila int len = state->buflen; 15445ec90831SRussell King 15455ec90831SRussell King memcpy(export->buf, buf, len); 15465ec90831SRussell King memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 15475ec90831SRussell King export->buflen = len; 15485ec90831SRussell King export->update = state->update; 15495ec90831SRussell King export->final = state->final; 15505ec90831SRussell King export->finup = state->finup; 1551434b4212SRussell King 1552045e3678SYuan Kang return 0; 1553045e3678SYuan Kang } 1554045e3678SYuan Kang 1555045e3678SYuan Kang static int ahash_import(struct ahash_request *req, const void *in) 1556045e3678SYuan Kang { 15574cb4f7c1SHerbert Xu struct caam_hash_state *state = ahash_request_ctx_dma(req); 15585ec90831SRussell King const struct caam_export_state *export = in; 1559045e3678SYuan Kang 15605ec90831SRussell King memset(state, 0, sizeof(*state)); 156146b49abcSAndrei Botila memcpy(state->buf, export->buf, export->buflen); 15625ec90831SRussell King memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 156346b49abcSAndrei Botila state->buflen = export->buflen; 15645ec90831SRussell King state->update = export->update; 15655ec90831SRussell King state->final = export->final; 15665ec90831SRussell King state->finup = export->finup; 1567434b4212SRussell King 1568045e3678SYuan Kang return 0; 1569045e3678SYuan Kang } 1570045e3678SYuan Kang 1571045e3678SYuan Kang struct caam_hash_template { 1572045e3678SYuan Kang char name[CRYPTO_MAX_ALG_NAME]; 1573045e3678SYuan Kang char driver_name[CRYPTO_MAX_ALG_NAME]; 1574b0e09baeSYuan Kang char hmac_name[CRYPTO_MAX_ALG_NAME]; 1575b0e09baeSYuan Kang char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1576045e3678SYuan Kang unsigned int blocksize; 1577045e3678SYuan Kang struct ahash_alg template_ahash; 1578045e3678SYuan Kang u32 alg_type; 1579045e3678SYuan Kang }; 1580045e3678SYuan Kang 1581045e3678SYuan Kang /* ahash descriptors */ 1582045e3678SYuan Kang static struct caam_hash_template driver_hash[] = { 1583045e3678SYuan Kang { 1584b0e09baeSYuan Kang .name = "sha1", 1585b0e09baeSYuan Kang .driver_name = "sha1-caam", 1586b0e09baeSYuan Kang .hmac_name = "hmac(sha1)", 1587b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha1-caam", 1588045e3678SYuan Kang .blocksize = SHA1_BLOCK_SIZE, 1589045e3678SYuan Kang .template_ahash = { 1590045e3678SYuan Kang .init = ahash_init, 1591045e3678SYuan Kang .update = ahash_update, 1592045e3678SYuan Kang .final = ahash_final, 1593045e3678SYuan Kang .finup = ahash_finup, 1594045e3678SYuan Kang .digest = ahash_digest, 1595045e3678SYuan Kang .export = ahash_export, 1596045e3678SYuan Kang .import = ahash_import, 1597045e3678SYuan Kang .setkey = ahash_setkey, 1598045e3678SYuan Kang .halg = { 1599045e3678SYuan Kang .digestsize = SHA1_DIGEST_SIZE, 16005ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1601045e3678SYuan Kang }, 1602045e3678SYuan Kang }, 1603045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA1, 1604045e3678SYuan Kang }, { 1605b0e09baeSYuan Kang .name = "sha224", 1606b0e09baeSYuan Kang .driver_name = "sha224-caam", 1607b0e09baeSYuan Kang .hmac_name = "hmac(sha224)", 1608b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha224-caam", 1609045e3678SYuan Kang .blocksize = SHA224_BLOCK_SIZE, 1610045e3678SYuan Kang .template_ahash = { 1611045e3678SYuan Kang .init = ahash_init, 1612045e3678SYuan Kang .update = ahash_update, 1613045e3678SYuan Kang .final = ahash_final, 1614045e3678SYuan Kang .finup = ahash_finup, 1615045e3678SYuan Kang .digest = ahash_digest, 1616045e3678SYuan Kang .export = ahash_export, 1617045e3678SYuan Kang .import = ahash_import, 1618045e3678SYuan Kang .setkey = ahash_setkey, 1619045e3678SYuan Kang .halg = { 1620045e3678SYuan Kang .digestsize = SHA224_DIGEST_SIZE, 16215ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1622045e3678SYuan Kang }, 1623045e3678SYuan Kang }, 1624045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA224, 1625045e3678SYuan Kang }, { 1626b0e09baeSYuan Kang .name = "sha256", 1627b0e09baeSYuan Kang .driver_name = "sha256-caam", 1628b0e09baeSYuan Kang .hmac_name = "hmac(sha256)", 1629b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha256-caam", 1630045e3678SYuan Kang .blocksize = SHA256_BLOCK_SIZE, 1631045e3678SYuan Kang .template_ahash = { 1632045e3678SYuan Kang .init = ahash_init, 1633045e3678SYuan Kang .update = ahash_update, 1634045e3678SYuan Kang .final = ahash_final, 1635045e3678SYuan Kang .finup = ahash_finup, 1636045e3678SYuan Kang .digest = ahash_digest, 1637045e3678SYuan Kang .export = ahash_export, 1638045e3678SYuan Kang .import = ahash_import, 1639045e3678SYuan Kang .setkey = ahash_setkey, 1640045e3678SYuan Kang .halg = { 1641045e3678SYuan Kang .digestsize = SHA256_DIGEST_SIZE, 16425ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1643045e3678SYuan Kang }, 1644045e3678SYuan Kang }, 1645045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA256, 1646045e3678SYuan Kang }, { 1647b0e09baeSYuan Kang .name = "sha384", 1648b0e09baeSYuan Kang .driver_name = "sha384-caam", 1649b0e09baeSYuan Kang .hmac_name = "hmac(sha384)", 1650b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha384-caam", 1651045e3678SYuan Kang .blocksize = SHA384_BLOCK_SIZE, 1652045e3678SYuan Kang .template_ahash = { 1653045e3678SYuan Kang .init = ahash_init, 1654045e3678SYuan Kang .update = ahash_update, 1655045e3678SYuan Kang .final = ahash_final, 1656045e3678SYuan Kang .finup = ahash_finup, 1657045e3678SYuan Kang .digest = ahash_digest, 1658045e3678SYuan Kang .export = ahash_export, 1659045e3678SYuan Kang .import = ahash_import, 1660045e3678SYuan Kang .setkey = ahash_setkey, 1661045e3678SYuan Kang .halg = { 1662045e3678SYuan Kang .digestsize = SHA384_DIGEST_SIZE, 16635ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1664045e3678SYuan Kang }, 1665045e3678SYuan Kang }, 1666045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA384, 1667045e3678SYuan Kang }, { 1668b0e09baeSYuan Kang .name = "sha512", 1669b0e09baeSYuan Kang .driver_name = "sha512-caam", 1670b0e09baeSYuan Kang .hmac_name = "hmac(sha512)", 1671b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha512-caam", 1672045e3678SYuan Kang .blocksize = SHA512_BLOCK_SIZE, 1673045e3678SYuan Kang .template_ahash = { 1674045e3678SYuan Kang .init = ahash_init, 1675045e3678SYuan Kang .update = ahash_update, 1676045e3678SYuan Kang .final = ahash_final, 1677045e3678SYuan Kang .finup = ahash_finup, 1678045e3678SYuan Kang .digest = ahash_digest, 1679045e3678SYuan Kang .export = ahash_export, 1680045e3678SYuan Kang .import = ahash_import, 1681045e3678SYuan Kang .setkey = ahash_setkey, 1682045e3678SYuan Kang .halg = { 1683045e3678SYuan Kang .digestsize = SHA512_DIGEST_SIZE, 16845ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1685045e3678SYuan Kang }, 1686045e3678SYuan Kang }, 1687045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA512, 1688045e3678SYuan Kang }, { 1689b0e09baeSYuan Kang .name = "md5", 1690b0e09baeSYuan Kang .driver_name = "md5-caam", 1691b0e09baeSYuan Kang .hmac_name = "hmac(md5)", 1692b0e09baeSYuan Kang .hmac_driver_name = "hmac-md5-caam", 1693045e3678SYuan Kang .blocksize = MD5_BLOCK_WORDS * 4, 1694045e3678SYuan Kang .template_ahash = { 1695045e3678SYuan Kang .init = ahash_init, 1696045e3678SYuan Kang .update = ahash_update, 1697045e3678SYuan Kang .final = ahash_final, 1698045e3678SYuan Kang .finup = ahash_finup, 1699045e3678SYuan Kang .digest = ahash_digest, 1700045e3678SYuan Kang .export = ahash_export, 1701045e3678SYuan Kang .import = ahash_import, 1702045e3678SYuan Kang .setkey = ahash_setkey, 1703045e3678SYuan Kang .halg = { 1704045e3678SYuan Kang .digestsize = MD5_DIGEST_SIZE, 17055ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1706045e3678SYuan Kang }, 1707045e3678SYuan Kang }, 1708045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_MD5, 170912b8567fSIuliana Prodan }, { 171012b8567fSIuliana Prodan .hmac_name = "xcbc(aes)", 171112b8567fSIuliana Prodan .hmac_driver_name = "xcbc-aes-caam", 171212b8567fSIuliana Prodan .blocksize = AES_BLOCK_SIZE, 171312b8567fSIuliana Prodan .template_ahash = { 171412b8567fSIuliana Prodan .init = ahash_init, 171512b8567fSIuliana Prodan .update = ahash_update, 171612b8567fSIuliana Prodan .final = ahash_final, 171712b8567fSIuliana Prodan .finup = ahash_finup, 171812b8567fSIuliana Prodan .digest = ahash_digest, 171912b8567fSIuliana Prodan .export = ahash_export, 172012b8567fSIuliana Prodan .import = ahash_import, 172112b8567fSIuliana Prodan .setkey = axcbc_setkey, 172212b8567fSIuliana Prodan .halg = { 172312b8567fSIuliana Prodan .digestsize = AES_BLOCK_SIZE, 172412b8567fSIuliana Prodan .statesize = sizeof(struct caam_export_state), 172512b8567fSIuliana Prodan }, 172612b8567fSIuliana Prodan }, 172712b8567fSIuliana Prodan .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, 172887870cfbSIuliana Prodan }, { 172987870cfbSIuliana Prodan .hmac_name = "cmac(aes)", 173087870cfbSIuliana Prodan .hmac_driver_name = "cmac-aes-caam", 173187870cfbSIuliana Prodan .blocksize = AES_BLOCK_SIZE, 173287870cfbSIuliana Prodan .template_ahash = { 173387870cfbSIuliana Prodan .init = ahash_init, 173487870cfbSIuliana Prodan .update = ahash_update, 173587870cfbSIuliana Prodan .final = ahash_final, 173687870cfbSIuliana Prodan .finup = ahash_finup, 173787870cfbSIuliana Prodan .digest = ahash_digest, 173887870cfbSIuliana Prodan .export = ahash_export, 173987870cfbSIuliana Prodan .import = ahash_import, 174087870cfbSIuliana Prodan .setkey = acmac_setkey, 174187870cfbSIuliana Prodan .halg = { 174287870cfbSIuliana Prodan .digestsize = AES_BLOCK_SIZE, 174387870cfbSIuliana Prodan .statesize = sizeof(struct caam_export_state), 174487870cfbSIuliana Prodan }, 174587870cfbSIuliana Prodan }, 174687870cfbSIuliana Prodan .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, 1747045e3678SYuan Kang }, 1748045e3678SYuan Kang }; 1749045e3678SYuan Kang 1750045e3678SYuan Kang struct caam_hash_alg { 1751045e3678SYuan Kang struct list_head entry; 1752045e3678SYuan Kang int alg_type; 1753045e3678SYuan Kang struct ahash_alg ahash_alg; 1754045e3678SYuan Kang }; 1755045e3678SYuan Kang 1756045e3678SYuan Kang static int caam_hash_cra_init(struct crypto_tfm *tfm) 1757045e3678SYuan Kang { 1758045e3678SYuan Kang struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1759045e3678SYuan Kang struct crypto_alg *base = tfm->__crt_alg; 1760045e3678SYuan Kang struct hash_alg_common *halg = 1761045e3678SYuan Kang container_of(base, struct hash_alg_common, base); 1762045e3678SYuan Kang struct ahash_alg *alg = 1763045e3678SYuan Kang container_of(halg, struct ahash_alg, halg); 1764045e3678SYuan Kang struct caam_hash_alg *caam_hash = 1765045e3678SYuan Kang container_of(alg, struct caam_hash_alg, ahash_alg); 17664cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); 1767045e3678SYuan Kang /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1768045e3678SYuan Kang static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1769045e3678SYuan Kang HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1770045e3678SYuan Kang HASH_MSG_LEN + 32, 1771045e3678SYuan Kang HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1772045e3678SYuan Kang HASH_MSG_LEN + 64, 1773045e3678SYuan Kang HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 177421b014f0SIuliana Prodan const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx, 177521b014f0SIuliana Prodan sh_desc_update); 1776bbf22344SHoria Geantă dma_addr_t dma_addr; 17777e0880b9SHoria Geantă struct caam_drv_private *priv; 1778045e3678SYuan Kang 1779045e3678SYuan Kang /* 1780cfc6f11bSRuchika Gupta * Get a Job ring from Job Ring driver to ensure in-order 1781045e3678SYuan Kang * crypto request processing per tfm 1782045e3678SYuan Kang */ 1783cfc6f11bSRuchika Gupta ctx->jrdev = caam_jr_alloc(); 1784cfc6f11bSRuchika Gupta if (IS_ERR(ctx->jrdev)) { 1785cfc6f11bSRuchika Gupta pr_err("Job Ring Device allocation for transform failed\n"); 1786cfc6f11bSRuchika Gupta return PTR_ERR(ctx->jrdev); 1787cfc6f11bSRuchika Gupta } 1788bbf22344SHoria Geantă 17897e0880b9SHoria Geantă priv = dev_get_drvdata(ctx->jrdev->parent); 179012b8567fSIuliana Prodan 179112b8567fSIuliana Prodan if (is_xcbc_aes(caam_hash->alg_type)) { 179212b8567fSIuliana Prodan ctx->dir = DMA_TO_DEVICE; 1793e9b4913aSHoria Geantă ctx->key_dir = DMA_BIDIRECTIONAL; 179412b8567fSIuliana Prodan ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 179512b8567fSIuliana Prodan ctx->ctx_len = 48; 1796e9b4913aSHoria Geantă } else if (is_cmac_aes(caam_hash->alg_type)) { 1797e9b4913aSHoria Geantă ctx->dir = DMA_TO_DEVICE; 1798e9b4913aSHoria Geantă ctx->key_dir = DMA_NONE; 1799e9b4913aSHoria Geantă ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1800e9b4913aSHoria Geantă ctx->ctx_len = 32; 1801e9b4913aSHoria Geantă } else { 1802e9b4913aSHoria Geantă if (priv->era >= 6) { 1803e9b4913aSHoria Geantă ctx->dir = DMA_BIDIRECTIONAL; 1804e9b4913aSHoria Geantă ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE; 1805e9b4913aSHoria Geantă } else { 1806e9b4913aSHoria Geantă ctx->dir = DMA_TO_DEVICE; 1807e9b4913aSHoria Geantă ctx->key_dir = DMA_NONE; 1808e9b4913aSHoria Geantă } 1809e9b4913aSHoria Geantă ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1810e9b4913aSHoria Geantă ctx->ctx_len = runninglen[(ctx->adata.algtype & 1811e9b4913aSHoria Geantă OP_ALG_ALGSEL_SUBMASK) >> 1812e9b4913aSHoria Geantă OP_ALG_ALGSEL_SHIFT]; 1813e9b4913aSHoria Geantă } 181412b8567fSIuliana Prodan 1815e9b4913aSHoria Geantă if (ctx->key_dir != DMA_NONE) { 1816a2fb864cSHoria Geantă ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, 181712b8567fSIuliana Prodan ARRAY_SIZE(ctx->key), 1818e9b4913aSHoria Geantă ctx->key_dir, 181912b8567fSIuliana Prodan DMA_ATTR_SKIP_CPU_SYNC); 1820a2fb864cSHoria Geantă if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { 182112b8567fSIuliana Prodan dev_err(ctx->jrdev, "unable to map key\n"); 182212b8567fSIuliana Prodan caam_jr_free(ctx->jrdev); 182312b8567fSIuliana Prodan return -ENOMEM; 182412b8567fSIuliana Prodan } 182512b8567fSIuliana Prodan } 18267e0880b9SHoria Geantă 1827bbf22344SHoria Geantă dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 182821b014f0SIuliana Prodan offsetof(struct caam_hash_ctx, key) - 182921b014f0SIuliana Prodan sh_desc_update_offset, 18307e0880b9SHoria Geantă ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1831bbf22344SHoria Geantă if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1832bbf22344SHoria Geantă dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 183312b8567fSIuliana Prodan 1834e9b4913aSHoria Geantă if (ctx->key_dir != DMA_NONE) 1835a2fb864cSHoria Geantă dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, 183612b8567fSIuliana Prodan ARRAY_SIZE(ctx->key), 1837e9b4913aSHoria Geantă ctx->key_dir, 183812b8567fSIuliana Prodan DMA_ATTR_SKIP_CPU_SYNC); 183912b8567fSIuliana Prodan 1840bbf22344SHoria Geantă caam_jr_free(ctx->jrdev); 1841bbf22344SHoria Geantă return -ENOMEM; 1842bbf22344SHoria Geantă } 1843bbf22344SHoria Geantă 1844bbf22344SHoria Geantă ctx->sh_desc_update_dma = dma_addr; 1845bbf22344SHoria Geantă ctx->sh_desc_update_first_dma = dma_addr + 1846bbf22344SHoria Geantă offsetof(struct caam_hash_ctx, 184721b014f0SIuliana Prodan sh_desc_update_first) - 184821b014f0SIuliana Prodan sh_desc_update_offset; 1849bbf22344SHoria Geantă ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 185021b014f0SIuliana Prodan sh_desc_fin) - 185121b014f0SIuliana Prodan sh_desc_update_offset; 1852bbf22344SHoria Geantă ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 185321b014f0SIuliana Prodan sh_desc_digest) - 185421b014f0SIuliana Prodan sh_desc_update_offset; 185521b014f0SIuliana Prodan 185621b014f0SIuliana Prodan ctx->enginectx.op.do_one_request = ahash_do_one_req; 1857bbf22344SHoria Geantă 18584cb4f7c1SHerbert Xu crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); 18599a2537d0SIuliana Prodan 18609a2537d0SIuliana Prodan /* 18619a2537d0SIuliana Prodan * For keyed hash algorithms shared descriptors 18629a2537d0SIuliana Prodan * will be created later in setkey() callback 18639a2537d0SIuliana Prodan */ 18649a2537d0SIuliana Prodan return alg->setkey ? 0 : ahash_set_sh_desc(ahash); 1865045e3678SYuan Kang } 1866045e3678SYuan Kang 1867045e3678SYuan Kang static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1868045e3678SYuan Kang { 18694cb4f7c1SHerbert Xu struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); 1870045e3678SYuan Kang 1871bbf22344SHoria Geantă dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 187221b014f0SIuliana Prodan offsetof(struct caam_hash_ctx, key) - 187321b014f0SIuliana Prodan offsetof(struct caam_hash_ctx, sh_desc_update), 18747e0880b9SHoria Geantă ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1875e9b4913aSHoria Geantă if (ctx->key_dir != DMA_NONE) 1876a2fb864cSHoria Geantă dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, 1877e9b4913aSHoria Geantă ARRAY_SIZE(ctx->key), ctx->key_dir, 187812b8567fSIuliana Prodan DMA_ATTR_SKIP_CPU_SYNC); 1879cfc6f11bSRuchika Gupta caam_jr_free(ctx->jrdev); 1880045e3678SYuan Kang } 1881045e3678SYuan Kang 18821b46c90cSHoria Geantă void caam_algapi_hash_exit(void) 1883045e3678SYuan Kang { 1884045e3678SYuan Kang struct caam_hash_alg *t_alg, *n; 1885045e3678SYuan Kang 1886cfc6f11bSRuchika Gupta if (!hash_list.next) 1887045e3678SYuan Kang return; 1888045e3678SYuan Kang 1889cfc6f11bSRuchika Gupta list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1890045e3678SYuan Kang crypto_unregister_ahash(&t_alg->ahash_alg); 1891045e3678SYuan Kang list_del(&t_alg->entry); 1892045e3678SYuan Kang kfree(t_alg); 1893045e3678SYuan Kang } 1894045e3678SYuan Kang } 1895045e3678SYuan Kang 1896045e3678SYuan Kang static struct caam_hash_alg * 1897cfc6f11bSRuchika Gupta caam_hash_alloc(struct caam_hash_template *template, 1898b0e09baeSYuan Kang bool keyed) 1899045e3678SYuan Kang { 1900045e3678SYuan Kang struct caam_hash_alg *t_alg; 1901045e3678SYuan Kang struct ahash_alg *halg; 1902045e3678SYuan Kang struct crypto_alg *alg; 1903045e3678SYuan Kang 19049c4f9733SFabio Estevam t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1905*3de0152bSChristophe JAILLET if (!t_alg) 1906045e3678SYuan Kang return ERR_PTR(-ENOMEM); 1907045e3678SYuan Kang 1908045e3678SYuan Kang t_alg->ahash_alg = template->template_ahash; 1909045e3678SYuan Kang halg = &t_alg->ahash_alg; 1910045e3678SYuan Kang alg = &halg->halg.base; 1911045e3678SYuan Kang 1912b0e09baeSYuan Kang if (keyed) { 1913b0e09baeSYuan Kang snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1914b0e09baeSYuan Kang template->hmac_name); 1915b0e09baeSYuan Kang snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1916b0e09baeSYuan Kang template->hmac_driver_name); 1917b0e09baeSYuan Kang } else { 1918b0e09baeSYuan Kang snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1919b0e09baeSYuan Kang template->name); 1920045e3678SYuan Kang snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1921045e3678SYuan Kang template->driver_name); 1922a0118c8bSRussell King t_alg->ahash_alg.setkey = NULL; 1923b0e09baeSYuan Kang } 1924045e3678SYuan Kang alg->cra_module = THIS_MODULE; 1925045e3678SYuan Kang alg->cra_init = caam_hash_cra_init; 1926045e3678SYuan Kang alg->cra_exit = caam_hash_cra_exit; 19274cb4f7c1SHerbert Xu alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); 1928045e3678SYuan Kang alg->cra_priority = CAAM_CRA_PRIORITY; 1929045e3678SYuan Kang alg->cra_blocksize = template->blocksize; 1930045e3678SYuan Kang alg->cra_alignmask = 0; 1931b8aa7dc5SMikulas Patocka alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; 1932045e3678SYuan Kang 1933045e3678SYuan Kang t_alg->alg_type = template->alg_type; 1934045e3678SYuan Kang 1935045e3678SYuan Kang return t_alg; 1936045e3678SYuan Kang } 1937045e3678SYuan Kang 19381b46c90cSHoria Geantă int caam_algapi_hash_init(struct device *ctrldev) 1939045e3678SYuan Kang { 1940045e3678SYuan Kang int i = 0, err = 0; 19411b46c90cSHoria Geantă struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 1942bf83490eSVictoria Milhoan unsigned int md_limit = SHA512_DIGEST_SIZE; 1943d239b10dSHoria Geantă u32 md_inst, md_vid; 1944045e3678SYuan Kang 1945bf83490eSVictoria Milhoan /* 1946bf83490eSVictoria Milhoan * Register crypto algorithms the device supports. First, identify 1947bf83490eSVictoria Milhoan * presence and attributes of MD block. 1948bf83490eSVictoria Milhoan */ 1949d239b10dSHoria Geantă if (priv->era < 10) { 1950ae1dd17dSHoria GeantA struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon; 1951ae1dd17dSHoria GeantA 1952ae1dd17dSHoria GeantA md_vid = (rd_reg32(&perfmon->cha_id_ls) & 1953d239b10dSHoria Geantă CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 1954ae1dd17dSHoria GeantA md_inst = (rd_reg32(&perfmon->cha_num_ls) & 1955d239b10dSHoria Geantă CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 1956d239b10dSHoria Geantă } else { 1957ae1dd17dSHoria GeantA u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha); 1958d239b10dSHoria Geantă 1959d239b10dSHoria Geantă md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 1960d239b10dSHoria Geantă md_inst = mdha & CHA_VER_NUM_MASK; 1961d239b10dSHoria Geantă } 1962bf83490eSVictoria Milhoan 1963bf83490eSVictoria Milhoan /* 1964bf83490eSVictoria Milhoan * Skip registration of any hashing algorithms if MD block 1965bf83490eSVictoria Milhoan * is not present. 1966bf83490eSVictoria Milhoan */ 19671b46c90cSHoria Geantă if (!md_inst) 19680435d47eSIuliana Prodan return 0; 1969bf83490eSVictoria Milhoan 1970bf83490eSVictoria Milhoan /* Limit digest size based on LP256 */ 1971d239b10dSHoria Geantă if (md_vid == CHA_VER_VID_MD_LP256) 1972bf83490eSVictoria Milhoan md_limit = SHA256_DIGEST_SIZE; 1973bf83490eSVictoria Milhoan 1974cfc6f11bSRuchika Gupta INIT_LIST_HEAD(&hash_list); 1975045e3678SYuan Kang 1976045e3678SYuan Kang /* register crypto algorithms the device supports */ 1977045e3678SYuan Kang for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1978045e3678SYuan Kang struct caam_hash_alg *t_alg; 1979bf83490eSVictoria Milhoan struct caam_hash_template *alg = driver_hash + i; 1980bf83490eSVictoria Milhoan 1981bf83490eSVictoria Milhoan /* If MD size is not supported by device, skip registration */ 198212b8567fSIuliana Prodan if (is_mdha(alg->alg_type) && 198312b8567fSIuliana Prodan alg->template_ahash.halg.digestsize > md_limit) 1984bf83490eSVictoria Milhoan continue; 1985045e3678SYuan Kang 1986b0e09baeSYuan Kang /* register hmac version */ 1987bf83490eSVictoria Milhoan t_alg = caam_hash_alloc(alg, true); 1988b0e09baeSYuan Kang if (IS_ERR(t_alg)) { 1989b0e09baeSYuan Kang err = PTR_ERR(t_alg); 19900f103b37SIuliana Prodan pr_warn("%s alg allocation failed\n", 19910f103b37SIuliana Prodan alg->hmac_driver_name); 1992b0e09baeSYuan Kang continue; 1993b0e09baeSYuan Kang } 1994b0e09baeSYuan Kang 1995b0e09baeSYuan Kang err = crypto_register_ahash(&t_alg->ahash_alg); 1996b0e09baeSYuan Kang if (err) { 19976ea30f0aSRussell King pr_warn("%s alg registration failed: %d\n", 19986ea30f0aSRussell King t_alg->ahash_alg.halg.base.cra_driver_name, 19996ea30f0aSRussell King err); 2000b0e09baeSYuan Kang kfree(t_alg); 2001b0e09baeSYuan Kang } else 2002cfc6f11bSRuchika Gupta list_add_tail(&t_alg->entry, &hash_list); 2003b0e09baeSYuan Kang 200412b8567fSIuliana Prodan if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) 200512b8567fSIuliana Prodan continue; 200612b8567fSIuliana Prodan 2007b0e09baeSYuan Kang /* register unkeyed version */ 2008bf83490eSVictoria Milhoan t_alg = caam_hash_alloc(alg, false); 2009045e3678SYuan Kang if (IS_ERR(t_alg)) { 2010045e3678SYuan Kang err = PTR_ERR(t_alg); 2011bf83490eSVictoria Milhoan pr_warn("%s alg allocation failed\n", alg->driver_name); 2012045e3678SYuan Kang continue; 2013045e3678SYuan Kang } 2014045e3678SYuan Kang 2015045e3678SYuan Kang err = crypto_register_ahash(&t_alg->ahash_alg); 2016045e3678SYuan Kang if (err) { 20176ea30f0aSRussell King pr_warn("%s alg registration failed: %d\n", 20186ea30f0aSRussell King t_alg->ahash_alg.halg.base.cra_driver_name, 20196ea30f0aSRussell King err); 2020045e3678SYuan Kang kfree(t_alg); 2021045e3678SYuan Kang } else 2022cfc6f11bSRuchika Gupta list_add_tail(&t_alg->entry, &hash_list); 2023045e3678SYuan Kang } 2024045e3678SYuan Kang 2025045e3678SYuan Kang return err; 2026045e3678SYuan Kang } 2027