1618b5dc4SHoria Geantă // SPDX-License-Identifier: GPL-2.0+ 2045e3678SYuan Kang /* 3045e3678SYuan Kang * caam - Freescale FSL CAAM support for ahash functions of crypto API 4045e3678SYuan Kang * 5045e3678SYuan Kang * Copyright 2011 Freescale Semiconductor, Inc. 687870cfbSIuliana Prodan * Copyright 2018-2019 NXP 7045e3678SYuan Kang * 8045e3678SYuan Kang * Based on caamalg.c crypto API driver. 9045e3678SYuan Kang * 10045e3678SYuan Kang * relationship of digest job descriptor or first job descriptor after init to 11045e3678SYuan Kang * shared descriptors: 12045e3678SYuan Kang * 13045e3678SYuan Kang * --------------- --------------- 14045e3678SYuan Kang * | JobDesc #1 |-------------------->| ShareDesc | 15045e3678SYuan Kang * | *(packet 1) | | (hashKey) | 16045e3678SYuan Kang * --------------- | (operation) | 17045e3678SYuan Kang * --------------- 18045e3678SYuan Kang * 19045e3678SYuan Kang * relationship of subsequent job descriptors to shared descriptors: 20045e3678SYuan Kang * 21045e3678SYuan Kang * --------------- --------------- 22045e3678SYuan Kang * | JobDesc #2 |-------------------->| ShareDesc | 23045e3678SYuan Kang * | *(packet 2) | |------------->| (hashKey) | 24045e3678SYuan Kang * --------------- | |-------->| (operation) | 25045e3678SYuan Kang * . | | | (load ctx2) | 26045e3678SYuan Kang * . | | --------------- 27045e3678SYuan Kang * --------------- | | 28045e3678SYuan Kang * | JobDesc #3 |------| | 29045e3678SYuan Kang * | *(packet 3) | | 30045e3678SYuan Kang * --------------- | 31045e3678SYuan Kang * . | 32045e3678SYuan Kang * . | 33045e3678SYuan Kang * --------------- | 34045e3678SYuan Kang * | JobDesc #4 |------------ 35045e3678SYuan Kang * | *(packet 4) | 36045e3678SYuan Kang * --------------- 37045e3678SYuan Kang * 38045e3678SYuan Kang * The SharedDesc never changes for a connection unless rekeyed, but 39045e3678SYuan Kang * each packet will likely be in a different place. So all we need 40045e3678SYuan Kang * to know to process the packet is where the input is, where the 41045e3678SYuan Kang * output goes, and what context we want to process with. Context is 42045e3678SYuan Kang * in the SharedDesc, packet references in the JobDesc. 43045e3678SYuan Kang * 44045e3678SYuan Kang * So, a job desc looks like: 45045e3678SYuan Kang * 46045e3678SYuan Kang * --------------------- 47045e3678SYuan Kang * | Header | 48045e3678SYuan Kang * | ShareDesc Pointer | 49045e3678SYuan Kang * | SEQ_OUT_PTR | 50045e3678SYuan Kang * | (output buffer) | 51045e3678SYuan Kang * | (output length) | 52045e3678SYuan Kang * | SEQ_IN_PTR | 53045e3678SYuan Kang * | (input buffer) | 54045e3678SYuan Kang * | (input length) | 55045e3678SYuan Kang * --------------------- 56045e3678SYuan Kang */ 57045e3678SYuan Kang 58045e3678SYuan Kang #include "compat.h" 59045e3678SYuan Kang 60045e3678SYuan Kang #include "regs.h" 61045e3678SYuan Kang #include "intern.h" 62045e3678SYuan Kang #include "desc_constr.h" 63045e3678SYuan Kang #include "jr.h" 64045e3678SYuan Kang #include "error.h" 65045e3678SYuan Kang #include "sg_sw_sec4.h" 66045e3678SYuan Kang #include "key_gen.h" 670efa7579SHoria Geantă #include "caamhash_desc.h" 68045e3678SYuan Kang 69045e3678SYuan Kang #define CAAM_CRA_PRIORITY 3000 70045e3678SYuan Kang 71045e3678SYuan Kang /* max hash key is max split key size */ 72045e3678SYuan Kang #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 73045e3678SYuan Kang 74045e3678SYuan Kang #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 75045e3678SYuan Kang #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 76045e3678SYuan Kang 77045e3678SYuan Kang #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 78045e3678SYuan Kang CAAM_MAX_HASH_KEY_SIZE) 79045e3678SYuan Kang #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 80045e3678SYuan Kang 81045e3678SYuan Kang /* caam context sizes for hashes: running digest + 8 */ 82045e3678SYuan Kang #define HASH_MSG_LEN 8 83045e3678SYuan Kang #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 84045e3678SYuan Kang 85045e3678SYuan Kang #ifdef DEBUG 86045e3678SYuan Kang /* for print_hex_dumps with line references */ 87045e3678SYuan Kang #define debug(format, arg...) printk(format, arg) 88045e3678SYuan Kang #else 89045e3678SYuan Kang #define debug(format, arg...) 90045e3678SYuan Kang #endif 91045e3678SYuan Kang 92cfc6f11bSRuchika Gupta 93cfc6f11bSRuchika Gupta static struct list_head hash_list; 94cfc6f11bSRuchika Gupta 95045e3678SYuan Kang /* ahash per-session context */ 96045e3678SYuan Kang struct caam_hash_ctx { 97e11793f5SRussell King u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 98e11793f5SRussell King u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 99e11793f5SRussell King u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 100e11793f5SRussell King u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 10112b8567fSIuliana Prodan u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; 102e11793f5SRussell King dma_addr_t sh_desc_update_dma ____cacheline_aligned; 103045e3678SYuan Kang dma_addr_t sh_desc_update_first_dma; 104045e3678SYuan Kang dma_addr_t sh_desc_fin_dma; 105045e3678SYuan Kang dma_addr_t sh_desc_digest_dma; 10612b8567fSIuliana Prodan dma_addr_t key_dma; 1077e0880b9SHoria Geantă enum dma_data_direction dir; 108e11793f5SRussell King struct device *jrdev; 109045e3678SYuan Kang int ctx_len; 110db57656bSHoria Geantă struct alginfo adata; 111045e3678SYuan Kang }; 112045e3678SYuan Kang 113045e3678SYuan Kang /* ahash state */ 114045e3678SYuan Kang struct caam_hash_state { 115045e3678SYuan Kang dma_addr_t buf_dma; 116045e3678SYuan Kang dma_addr_t ctx_dma; 11765055e21SFranck LENORMAND int ctx_dma_len; 118045e3678SYuan Kang u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 119045e3678SYuan Kang int buflen_0; 120045e3678SYuan Kang u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 121045e3678SYuan Kang int buflen_1; 122e7472422SVictoria Milhoan u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 123045e3678SYuan Kang int (*update)(struct ahash_request *req); 124045e3678SYuan Kang int (*final)(struct ahash_request *req); 125045e3678SYuan Kang int (*finup)(struct ahash_request *req); 126045e3678SYuan Kang int current_buf; 127045e3678SYuan Kang }; 128045e3678SYuan Kang 1295ec90831SRussell King struct caam_export_state { 1305ec90831SRussell King u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 1315ec90831SRussell King u8 caam_ctx[MAX_CTX_LEN]; 1325ec90831SRussell King int buflen; 1335ec90831SRussell King int (*update)(struct ahash_request *req); 1345ec90831SRussell King int (*final)(struct ahash_request *req); 1355ec90831SRussell King int (*finup)(struct ahash_request *req); 1365ec90831SRussell King }; 1375ec90831SRussell King 1380355d23dSHoria Geantă static inline void switch_buf(struct caam_hash_state *state) 1390355d23dSHoria Geantă { 1400355d23dSHoria Geantă state->current_buf ^= 1; 1410355d23dSHoria Geantă } 1420355d23dSHoria Geantă 1430355d23dSHoria Geantă static inline u8 *current_buf(struct caam_hash_state *state) 1440355d23dSHoria Geantă { 1450355d23dSHoria Geantă return state->current_buf ? state->buf_1 : state->buf_0; 1460355d23dSHoria Geantă } 1470355d23dSHoria Geantă 1480355d23dSHoria Geantă static inline u8 *alt_buf(struct caam_hash_state *state) 1490355d23dSHoria Geantă { 1500355d23dSHoria Geantă return state->current_buf ? state->buf_0 : state->buf_1; 1510355d23dSHoria Geantă } 1520355d23dSHoria Geantă 1530355d23dSHoria Geantă static inline int *current_buflen(struct caam_hash_state *state) 1540355d23dSHoria Geantă { 1550355d23dSHoria Geantă return state->current_buf ? &state->buflen_1 : &state->buflen_0; 1560355d23dSHoria Geantă } 1570355d23dSHoria Geantă 1580355d23dSHoria Geantă static inline int *alt_buflen(struct caam_hash_state *state) 1590355d23dSHoria Geantă { 1600355d23dSHoria Geantă return state->current_buf ? &state->buflen_0 : &state->buflen_1; 1610355d23dSHoria Geantă } 1620355d23dSHoria Geantă 16387870cfbSIuliana Prodan static inline bool is_cmac_aes(u32 algtype) 16412b8567fSIuliana Prodan { 16512b8567fSIuliana Prodan return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == 16687870cfbSIuliana Prodan (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); 16712b8567fSIuliana Prodan } 168045e3678SYuan Kang /* Common job descriptor seq in/out ptr routines */ 169045e3678SYuan Kang 170045e3678SYuan Kang /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 171ce572085SHoria Geanta static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 172045e3678SYuan Kang struct caam_hash_state *state, 173045e3678SYuan Kang int ctx_len) 174045e3678SYuan Kang { 17565055e21SFranck LENORMAND state->ctx_dma_len = ctx_len; 176045e3678SYuan Kang state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 177045e3678SYuan Kang ctx_len, DMA_FROM_DEVICE); 178ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->ctx_dma)) { 179ce572085SHoria Geanta dev_err(jrdev, "unable to map ctx\n"); 18087ec02e7SHoria Geantă state->ctx_dma = 0; 181ce572085SHoria Geanta return -ENOMEM; 182ce572085SHoria Geanta } 183ce572085SHoria Geanta 184045e3678SYuan Kang append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 185ce572085SHoria Geanta 186ce572085SHoria Geanta return 0; 187045e3678SYuan Kang } 188045e3678SYuan Kang 189944c3d4dSHoria Geantă /* Map current buffer in state (if length > 0) and put it in link table */ 190944c3d4dSHoria Geantă static inline int buf_map_to_sec4_sg(struct device *jrdev, 191045e3678SYuan Kang struct sec4_sg_entry *sec4_sg, 192944c3d4dSHoria Geantă struct caam_hash_state *state) 193045e3678SYuan Kang { 194944c3d4dSHoria Geantă int buflen = *current_buflen(state); 195045e3678SYuan Kang 196944c3d4dSHoria Geantă if (!buflen) 197944c3d4dSHoria Geantă return 0; 198045e3678SYuan Kang 199944c3d4dSHoria Geantă state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, 200944c3d4dSHoria Geantă DMA_TO_DEVICE); 201944c3d4dSHoria Geantă if (dma_mapping_error(jrdev, state->buf_dma)) { 202944c3d4dSHoria Geantă dev_err(jrdev, "unable to map buf\n"); 203944c3d4dSHoria Geantă state->buf_dma = 0; 204944c3d4dSHoria Geantă return -ENOMEM; 205045e3678SYuan Kang } 206045e3678SYuan Kang 207944c3d4dSHoria Geantă dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 208045e3678SYuan Kang 209944c3d4dSHoria Geantă return 0; 210045e3678SYuan Kang } 211045e3678SYuan Kang 212045e3678SYuan Kang /* Map state->caam_ctx, and add it to link table */ 213dfcd8393SHoria Geantă static inline int ctx_map_to_sec4_sg(struct device *jrdev, 214ce572085SHoria Geanta struct caam_hash_state *state, int ctx_len, 215ce572085SHoria Geanta struct sec4_sg_entry *sec4_sg, u32 flag) 216045e3678SYuan Kang { 21765055e21SFranck LENORMAND state->ctx_dma_len = ctx_len; 218045e3678SYuan Kang state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 219ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->ctx_dma)) { 220ce572085SHoria Geanta dev_err(jrdev, "unable to map ctx\n"); 22187ec02e7SHoria Geantă state->ctx_dma = 0; 222ce572085SHoria Geanta return -ENOMEM; 223ce572085SHoria Geanta } 224ce572085SHoria Geanta 225045e3678SYuan Kang dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 226ce572085SHoria Geanta 227ce572085SHoria Geanta return 0; 228045e3678SYuan Kang } 229045e3678SYuan Kang 230045e3678SYuan Kang static int ahash_set_sh_desc(struct crypto_ahash *ahash) 231045e3678SYuan Kang { 232045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 233045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 234045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 2357e0880b9SHoria Geantă struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 236045e3678SYuan Kang u32 *desc; 237045e3678SYuan Kang 2387e0880b9SHoria Geantă ctx->adata.key_virt = ctx->key; 2397e0880b9SHoria Geantă 240045e3678SYuan Kang /* ahash_update shared descriptor */ 241045e3678SYuan Kang desc = ctx->sh_desc_update; 2420efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 2430efa7579SHoria Geantă ctx->ctx_len, true, ctrlpriv->era); 244bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 2457e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 246045e3678SYuan Kang #ifdef DEBUG 247514df281SAlex Porosanu print_hex_dump(KERN_ERR, 248514df281SAlex Porosanu "ahash update shdesc@"__stringify(__LINE__)": ", 249045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 250045e3678SYuan Kang #endif 251045e3678SYuan Kang 252045e3678SYuan Kang /* ahash_update_first shared descriptor */ 253045e3678SYuan Kang desc = ctx->sh_desc_update_first; 2540efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 2550efa7579SHoria Geantă ctx->ctx_len, false, ctrlpriv->era); 256bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 2577e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 258045e3678SYuan Kang #ifdef DEBUG 259514df281SAlex Porosanu print_hex_dump(KERN_ERR, 260514df281SAlex Porosanu "ahash update first shdesc@"__stringify(__LINE__)": ", 261045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 262045e3678SYuan Kang #endif 263045e3678SYuan Kang 264045e3678SYuan Kang /* ahash_final shared descriptor */ 265045e3678SYuan Kang desc = ctx->sh_desc_fin; 2660efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 2670efa7579SHoria Geantă ctx->ctx_len, true, ctrlpriv->era); 268bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 2697e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 270045e3678SYuan Kang #ifdef DEBUG 271514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 272045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 273045e3678SYuan Kang desc_bytes(desc), 1); 274045e3678SYuan Kang #endif 275045e3678SYuan Kang 276045e3678SYuan Kang /* ahash_digest shared descriptor */ 277045e3678SYuan Kang desc = ctx->sh_desc_digest; 2780efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 2790efa7579SHoria Geantă ctx->ctx_len, false, ctrlpriv->era); 280bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 2817e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 282045e3678SYuan Kang #ifdef DEBUG 283514df281SAlex Porosanu print_hex_dump(KERN_ERR, 284514df281SAlex Porosanu "ahash digest shdesc@"__stringify(__LINE__)": ", 285045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 286045e3678SYuan Kang desc_bytes(desc), 1); 287045e3678SYuan Kang #endif 288045e3678SYuan Kang 289045e3678SYuan Kang return 0; 290045e3678SYuan Kang } 291045e3678SYuan Kang 29212b8567fSIuliana Prodan static int axcbc_set_sh_desc(struct crypto_ahash *ahash) 29312b8567fSIuliana Prodan { 29412b8567fSIuliana Prodan struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 29512b8567fSIuliana Prodan int digestsize = crypto_ahash_digestsize(ahash); 29612b8567fSIuliana Prodan struct device *jrdev = ctx->jrdev; 29712b8567fSIuliana Prodan u32 *desc; 29812b8567fSIuliana Prodan 29912b8567fSIuliana Prodan /* key is loaded from memory for UPDATE and FINALIZE states */ 30012b8567fSIuliana Prodan ctx->adata.key_dma = ctx->key_dma; 30112b8567fSIuliana Prodan 30212b8567fSIuliana Prodan /* shared descriptor for ahash_update */ 30312b8567fSIuliana Prodan desc = ctx->sh_desc_update; 30487870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 30587870cfbSIuliana Prodan ctx->ctx_len, ctx->ctx_len, 0); 30612b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 30712b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 30812b8567fSIuliana Prodan print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", 30912b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 31012b8567fSIuliana Prodan 1); 31112b8567fSIuliana Prodan 31212b8567fSIuliana Prodan /* shared descriptor for ahash_{final,finup} */ 31312b8567fSIuliana Prodan desc = ctx->sh_desc_fin; 31487870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 31587870cfbSIuliana Prodan digestsize, ctx->ctx_len, 0); 31612b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 31712b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 31812b8567fSIuliana Prodan print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", 31912b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 32012b8567fSIuliana Prodan 1); 32112b8567fSIuliana Prodan 32212b8567fSIuliana Prodan /* key is immediate data for INIT and INITFINAL states */ 32312b8567fSIuliana Prodan ctx->adata.key_virt = ctx->key; 32412b8567fSIuliana Prodan 32512b8567fSIuliana Prodan /* shared descriptor for first invocation of ahash_update */ 32612b8567fSIuliana Prodan desc = ctx->sh_desc_update_first; 32787870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 32812b8567fSIuliana Prodan ctx->ctx_len, ctx->key_dma); 32912b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 33012b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 33112b8567fSIuliana Prodan print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", 33212b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 33312b8567fSIuliana Prodan 1); 33412b8567fSIuliana Prodan 33512b8567fSIuliana Prodan /* shared descriptor for ahash_digest */ 33612b8567fSIuliana Prodan desc = ctx->sh_desc_digest; 33787870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 33887870cfbSIuliana Prodan digestsize, ctx->ctx_len, 0); 33912b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 34012b8567fSIuliana Prodan desc_bytes(desc), ctx->dir); 34112b8567fSIuliana Prodan print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", 34212b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 34312b8567fSIuliana Prodan 1); 34487870cfbSIuliana Prodan return 0; 34587870cfbSIuliana Prodan } 34687870cfbSIuliana Prodan 34787870cfbSIuliana Prodan static int acmac_set_sh_desc(struct crypto_ahash *ahash) 34887870cfbSIuliana Prodan { 34987870cfbSIuliana Prodan struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 35087870cfbSIuliana Prodan int digestsize = crypto_ahash_digestsize(ahash); 35187870cfbSIuliana Prodan struct device *jrdev = ctx->jrdev; 35287870cfbSIuliana Prodan u32 *desc; 35387870cfbSIuliana Prodan 35487870cfbSIuliana Prodan /* shared descriptor for ahash_update */ 35587870cfbSIuliana Prodan desc = ctx->sh_desc_update; 35687870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 35787870cfbSIuliana Prodan ctx->ctx_len, ctx->ctx_len, 0); 35887870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 35987870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 36087870cfbSIuliana Prodan print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", 36187870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, 36287870cfbSIuliana Prodan desc_bytes(desc), 1); 36387870cfbSIuliana Prodan 36487870cfbSIuliana Prodan /* shared descriptor for ahash_{final,finup} */ 36587870cfbSIuliana Prodan desc = ctx->sh_desc_fin; 36687870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 36787870cfbSIuliana Prodan digestsize, ctx->ctx_len, 0); 36887870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 36987870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 37087870cfbSIuliana Prodan print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", 37187870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, 37287870cfbSIuliana Prodan desc_bytes(desc), 1); 37387870cfbSIuliana Prodan 37487870cfbSIuliana Prodan /* shared descriptor for first invocation of ahash_update */ 37587870cfbSIuliana Prodan desc = ctx->sh_desc_update_first; 37687870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 37787870cfbSIuliana Prodan ctx->ctx_len, 0); 37887870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 37987870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 38087870cfbSIuliana Prodan print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", 38187870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, 38287870cfbSIuliana Prodan desc_bytes(desc), 1); 38387870cfbSIuliana Prodan 38487870cfbSIuliana Prodan /* shared descriptor for ahash_digest */ 38587870cfbSIuliana Prodan desc = ctx->sh_desc_digest; 38687870cfbSIuliana Prodan cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 38787870cfbSIuliana Prodan digestsize, ctx->ctx_len, 0); 38887870cfbSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 38987870cfbSIuliana Prodan desc_bytes(desc), ctx->dir); 39087870cfbSIuliana Prodan print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", 39187870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, desc, 39287870cfbSIuliana Prodan desc_bytes(desc), 1); 39312b8567fSIuliana Prodan 39412b8567fSIuliana Prodan return 0; 39512b8567fSIuliana Prodan } 39612b8567fSIuliana Prodan 397045e3678SYuan Kang /* Digest hash size if it is too large */ 398*30724445SHoria Geantă static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, 399*30724445SHoria Geantă u32 digestsize) 400045e3678SYuan Kang { 401045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 402045e3678SYuan Kang u32 *desc; 403045e3678SYuan Kang struct split_key_result result; 404*30724445SHoria Geantă dma_addr_t key_dma; 4059e6df0fdSMarkus Elfring int ret; 406045e3678SYuan Kang 4079c23b7d3SVakul Garg desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 4082af8f4a2SKim Phillips if (!desc) { 4092af8f4a2SKim Phillips dev_err(jrdev, "unable to allocate key input memory\n"); 4102af8f4a2SKim Phillips return -ENOMEM; 4112af8f4a2SKim Phillips } 412045e3678SYuan Kang 413045e3678SYuan Kang init_job_desc(desc, 0); 414045e3678SYuan Kang 415*30724445SHoria Geantă key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); 416*30724445SHoria Geantă if (dma_mapping_error(jrdev, key_dma)) { 417*30724445SHoria Geantă dev_err(jrdev, "unable to map key memory\n"); 418045e3678SYuan Kang kfree(desc); 419045e3678SYuan Kang return -ENOMEM; 420045e3678SYuan Kang } 421045e3678SYuan Kang 422045e3678SYuan Kang /* Job descriptor to perform unkeyed hash on key_in */ 423db57656bSHoria Geantă append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 424045e3678SYuan Kang OP_ALG_AS_INITFINAL); 425*30724445SHoria Geantă append_seq_in_ptr(desc, key_dma, *keylen, 0); 426045e3678SYuan Kang append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 427045e3678SYuan Kang FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 428*30724445SHoria Geantă append_seq_out_ptr(desc, key_dma, digestsize, 0); 429045e3678SYuan Kang append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 430045e3678SYuan Kang LDST_SRCDST_BYTE_CONTEXT); 431045e3678SYuan Kang 432045e3678SYuan Kang #ifdef DEBUG 433514df281SAlex Porosanu print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 434*30724445SHoria Geantă DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 435514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 436045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 437045e3678SYuan Kang #endif 438045e3678SYuan Kang 439045e3678SYuan Kang result.err = 0; 440045e3678SYuan Kang init_completion(&result.completion); 441045e3678SYuan Kang 442045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 443045e3678SYuan Kang if (!ret) { 444045e3678SYuan Kang /* in progress */ 4457459e1d2SHoria Geantă wait_for_completion(&result.completion); 446045e3678SYuan Kang ret = result.err; 447045e3678SYuan Kang #ifdef DEBUG 448514df281SAlex Porosanu print_hex_dump(KERN_ERR, 449514df281SAlex Porosanu "digested key@"__stringify(__LINE__)": ", 450*30724445SHoria Geantă DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); 451045e3678SYuan Kang #endif 452045e3678SYuan Kang } 453*30724445SHoria Geantă dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); 454045e3678SYuan Kang 455e11aa9f1SHoria Geanta *keylen = digestsize; 456e11aa9f1SHoria Geanta 457045e3678SYuan Kang kfree(desc); 458045e3678SYuan Kang 459045e3678SYuan Kang return ret; 460045e3678SYuan Kang } 461045e3678SYuan Kang 462045e3678SYuan Kang static int ahash_setkey(struct crypto_ahash *ahash, 463045e3678SYuan Kang const u8 *key, unsigned int keylen) 464045e3678SYuan Kang { 465045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 466045e3678SYuan Kang int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 467045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 4687e0880b9SHoria Geantă struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 4699e6df0fdSMarkus Elfring int ret; 470045e3678SYuan Kang u8 *hashed_key = NULL; 471045e3678SYuan Kang 472045e3678SYuan Kang #ifdef DEBUG 473045e3678SYuan Kang printk(KERN_ERR "keylen %d\n", keylen); 474045e3678SYuan Kang #endif 475045e3678SYuan Kang 476045e3678SYuan Kang if (keylen > blocksize) { 477*30724445SHoria Geantă hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 478045e3678SYuan Kang if (!hashed_key) 479045e3678SYuan Kang return -ENOMEM; 480*30724445SHoria Geantă ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 481045e3678SYuan Kang if (ret) 482d6e7a7d0SMarkus Elfring goto bad_free_key; 483045e3678SYuan Kang key = hashed_key; 484045e3678SYuan Kang } 485045e3678SYuan Kang 4867e0880b9SHoria Geantă /* 4877e0880b9SHoria Geantă * If DKP is supported, use it in the shared descriptor to generate 4887e0880b9SHoria Geantă * the split key. 4897e0880b9SHoria Geantă */ 4907e0880b9SHoria Geantă if (ctrlpriv->era >= 6) { 4917e0880b9SHoria Geantă ctx->adata.key_inline = true; 4927e0880b9SHoria Geantă ctx->adata.keylen = keylen; 4937e0880b9SHoria Geantă ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 4947e0880b9SHoria Geantă OP_ALG_ALGSEL_MASK); 4957e0880b9SHoria Geantă 4967e0880b9SHoria Geantă if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 497d6e7a7d0SMarkus Elfring goto bad_free_key; 498045e3678SYuan Kang 4997e0880b9SHoria Geantă memcpy(ctx->key, key, keylen); 5007e0880b9SHoria Geantă } else { 5017e0880b9SHoria Geantă ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 5027e0880b9SHoria Geantă keylen, CAAM_MAX_HASH_KEY_SIZE); 5037e0880b9SHoria Geantă if (ret) 5047e0880b9SHoria Geantă goto bad_free_key; 5057e0880b9SHoria Geantă } 506045e3678SYuan Kang 507045e3678SYuan Kang kfree(hashed_key); 508cfb725f6SHoria Geantă return ahash_set_sh_desc(ahash); 509d6e7a7d0SMarkus Elfring bad_free_key: 510045e3678SYuan Kang kfree(hashed_key); 511045e3678SYuan Kang crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 512045e3678SYuan Kang return -EINVAL; 513045e3678SYuan Kang } 514045e3678SYuan Kang 51512b8567fSIuliana Prodan static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, 51612b8567fSIuliana Prodan unsigned int keylen) 51712b8567fSIuliana Prodan { 51812b8567fSIuliana Prodan struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 51912b8567fSIuliana Prodan struct device *jrdev = ctx->jrdev; 52012b8567fSIuliana Prodan 52112b8567fSIuliana Prodan memcpy(ctx->key, key, keylen); 52212b8567fSIuliana Prodan dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 52312b8567fSIuliana Prodan ctx->adata.keylen = keylen; 52412b8567fSIuliana Prodan 52512b8567fSIuliana Prodan print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", 52612b8567fSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); 52712b8567fSIuliana Prodan 52812b8567fSIuliana Prodan return axcbc_set_sh_desc(ahash); 52912b8567fSIuliana Prodan } 53087870cfbSIuliana Prodan 53187870cfbSIuliana Prodan static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, 53287870cfbSIuliana Prodan unsigned int keylen) 53387870cfbSIuliana Prodan { 53487870cfbSIuliana Prodan struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 53587870cfbSIuliana Prodan 53687870cfbSIuliana Prodan /* key is immediate data for all cmac shared descriptors */ 53787870cfbSIuliana Prodan ctx->adata.key_virt = key; 53887870cfbSIuliana Prodan ctx->adata.keylen = keylen; 53987870cfbSIuliana Prodan 54087870cfbSIuliana Prodan print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", 54187870cfbSIuliana Prodan DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 54287870cfbSIuliana Prodan 54387870cfbSIuliana Prodan return acmac_set_sh_desc(ahash); 54487870cfbSIuliana Prodan } 54587870cfbSIuliana Prodan 546045e3678SYuan Kang /* 547045e3678SYuan Kang * ahash_edesc - s/w-extended ahash descriptor 548045e3678SYuan Kang * @sec4_sg_dma: physical mapped address of h/w link table 549045e3678SYuan Kang * @src_nents: number of segments in input scatterlist 550045e3678SYuan Kang * @sec4_sg_bytes: length of dma mapped sec4_sg space 551045e3678SYuan Kang * @hw_desc: the h/w job descriptor followed by any referenced link tables 552343e44b1SRussell King * @sec4_sg: h/w link table 553045e3678SYuan Kang */ 554045e3678SYuan Kang struct ahash_edesc { 555045e3678SYuan Kang dma_addr_t sec4_sg_dma; 556045e3678SYuan Kang int src_nents; 557045e3678SYuan Kang int sec4_sg_bytes; 558d7b24ed4SRussell King u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 559343e44b1SRussell King struct sec4_sg_entry sec4_sg[0]; 560045e3678SYuan Kang }; 561045e3678SYuan Kang 562045e3678SYuan Kang static inline void ahash_unmap(struct device *dev, 563045e3678SYuan Kang struct ahash_edesc *edesc, 564045e3678SYuan Kang struct ahash_request *req, int dst_len) 565045e3678SYuan Kang { 566944c3d4dSHoria Geantă struct caam_hash_state *state = ahash_request_ctx(req); 567944c3d4dSHoria Geantă 568045e3678SYuan Kang if (edesc->src_nents) 56913fb8fd7SLABBE Corentin dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 570045e3678SYuan Kang 571045e3678SYuan Kang if (edesc->sec4_sg_bytes) 572045e3678SYuan Kang dma_unmap_single(dev, edesc->sec4_sg_dma, 573045e3678SYuan Kang edesc->sec4_sg_bytes, DMA_TO_DEVICE); 574944c3d4dSHoria Geantă 575944c3d4dSHoria Geantă if (state->buf_dma) { 576944c3d4dSHoria Geantă dma_unmap_single(dev, state->buf_dma, *current_buflen(state), 577944c3d4dSHoria Geantă DMA_TO_DEVICE); 578944c3d4dSHoria Geantă state->buf_dma = 0; 579944c3d4dSHoria Geantă } 580045e3678SYuan Kang } 581045e3678SYuan Kang 582045e3678SYuan Kang static inline void ahash_unmap_ctx(struct device *dev, 583045e3678SYuan Kang struct ahash_edesc *edesc, 584045e3678SYuan Kang struct ahash_request *req, int dst_len, u32 flag) 585045e3678SYuan Kang { 586045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 587045e3678SYuan Kang 58887ec02e7SHoria Geantă if (state->ctx_dma) { 58965055e21SFranck LENORMAND dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); 59087ec02e7SHoria Geantă state->ctx_dma = 0; 59187ec02e7SHoria Geantă } 592045e3678SYuan Kang ahash_unmap(dev, edesc, req, dst_len); 593045e3678SYuan Kang } 594045e3678SYuan Kang 595045e3678SYuan Kang static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 596045e3678SYuan Kang void *context) 597045e3678SYuan Kang { 598045e3678SYuan Kang struct ahash_request *req = context; 599045e3678SYuan Kang struct ahash_edesc *edesc; 600045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 601045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 602c19650d6SHoria Geantă struct caam_hash_state *state = ahash_request_ctx(req); 603045e3678SYuan Kang #ifdef DEBUG 604045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 605045e3678SYuan Kang 606045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 607045e3678SYuan Kang #endif 608045e3678SYuan Kang 6094ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 610fa9659cdSMarek Vasut if (err) 611fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 612045e3678SYuan Kang 613c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 614c19650d6SHoria Geantă memcpy(req->result, state->caam_ctx, digestsize); 615045e3678SYuan Kang kfree(edesc); 616045e3678SYuan Kang 617045e3678SYuan Kang #ifdef DEBUG 618514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 619045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 620045e3678SYuan Kang ctx->ctx_len, 1); 621045e3678SYuan Kang #endif 622045e3678SYuan Kang 623045e3678SYuan Kang req->base.complete(&req->base, err); 624045e3678SYuan Kang } 625045e3678SYuan Kang 626045e3678SYuan Kang static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 627045e3678SYuan Kang void *context) 628045e3678SYuan Kang { 629045e3678SYuan Kang struct ahash_request *req = context; 630045e3678SYuan Kang struct ahash_edesc *edesc; 631045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 632045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 633045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 634944c3d4dSHoria Geantă #ifdef DEBUG 635045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 636045e3678SYuan Kang 637045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 638045e3678SYuan Kang #endif 639045e3678SYuan Kang 6404ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 641fa9659cdSMarek Vasut if (err) 642fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 643045e3678SYuan Kang 644045e3678SYuan Kang ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 645944c3d4dSHoria Geantă switch_buf(state); 646045e3678SYuan Kang kfree(edesc); 647045e3678SYuan Kang 648045e3678SYuan Kang #ifdef DEBUG 649514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 650045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 651045e3678SYuan Kang ctx->ctx_len, 1); 652045e3678SYuan Kang if (req->result) 653514df281SAlex Porosanu print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 654045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->result, 655045e3678SYuan Kang digestsize, 1); 656045e3678SYuan Kang #endif 657045e3678SYuan Kang 658045e3678SYuan Kang req->base.complete(&req->base, err); 659045e3678SYuan Kang } 660045e3678SYuan Kang 661045e3678SYuan Kang static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 662045e3678SYuan Kang void *context) 663045e3678SYuan Kang { 664045e3678SYuan Kang struct ahash_request *req = context; 665045e3678SYuan Kang struct ahash_edesc *edesc; 666045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 667045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 668c19650d6SHoria Geantă struct caam_hash_state *state = ahash_request_ctx(req); 669045e3678SYuan Kang #ifdef DEBUG 670045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 671045e3678SYuan Kang 672045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 673045e3678SYuan Kang #endif 674045e3678SYuan Kang 6754ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 676fa9659cdSMarek Vasut if (err) 677fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 678045e3678SYuan Kang 679c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 680c19650d6SHoria Geantă memcpy(req->result, state->caam_ctx, digestsize); 681045e3678SYuan Kang kfree(edesc); 682045e3678SYuan Kang 683045e3678SYuan Kang #ifdef DEBUG 684514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 685045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 686045e3678SYuan Kang ctx->ctx_len, 1); 687045e3678SYuan Kang #endif 688045e3678SYuan Kang 689045e3678SYuan Kang req->base.complete(&req->base, err); 690045e3678SYuan Kang } 691045e3678SYuan Kang 692045e3678SYuan Kang static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 693045e3678SYuan Kang void *context) 694045e3678SYuan Kang { 695045e3678SYuan Kang struct ahash_request *req = context; 696045e3678SYuan Kang struct ahash_edesc *edesc; 697045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 698045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 699045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 700944c3d4dSHoria Geantă #ifdef DEBUG 701045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 702045e3678SYuan Kang 703045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 704045e3678SYuan Kang #endif 705045e3678SYuan Kang 7064ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 707fa9659cdSMarek Vasut if (err) 708fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 709045e3678SYuan Kang 710ef62b231SHoria Geanta ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 711944c3d4dSHoria Geantă switch_buf(state); 712045e3678SYuan Kang kfree(edesc); 713045e3678SYuan Kang 714045e3678SYuan Kang #ifdef DEBUG 715514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 716045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 717045e3678SYuan Kang ctx->ctx_len, 1); 718045e3678SYuan Kang if (req->result) 719514df281SAlex Porosanu print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 720045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->result, 721045e3678SYuan Kang digestsize, 1); 722045e3678SYuan Kang #endif 723045e3678SYuan Kang 724045e3678SYuan Kang req->base.complete(&req->base, err); 725045e3678SYuan Kang } 726045e3678SYuan Kang 7275588d039SRussell King /* 7285588d039SRussell King * Allocate an enhanced descriptor, which contains the hardware descriptor 7295588d039SRussell King * and space for hardware scatter table containing sg_num entries. 7305588d039SRussell King */ 7315588d039SRussell King static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 73230a43b44SRussell King int sg_num, u32 *sh_desc, 73330a43b44SRussell King dma_addr_t sh_desc_dma, 73430a43b44SRussell King gfp_t flags) 7355588d039SRussell King { 7365588d039SRussell King struct ahash_edesc *edesc; 7375588d039SRussell King unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 7385588d039SRussell King 7395588d039SRussell King edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 7405588d039SRussell King if (!edesc) { 7415588d039SRussell King dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 7425588d039SRussell King return NULL; 7435588d039SRussell King } 7445588d039SRussell King 74530a43b44SRussell King init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 74630a43b44SRussell King HDR_SHARE_DEFER | HDR_REVERSE); 74730a43b44SRussell King 7485588d039SRussell King return edesc; 7495588d039SRussell King } 7505588d039SRussell King 75165cf164aSRussell King static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 75265cf164aSRussell King struct ahash_edesc *edesc, 75365cf164aSRussell King struct ahash_request *req, int nents, 75465cf164aSRussell King unsigned int first_sg, 75565cf164aSRussell King unsigned int first_bytes, size_t to_hash) 75665cf164aSRussell King { 75765cf164aSRussell King dma_addr_t src_dma; 75865cf164aSRussell King u32 options; 75965cf164aSRussell King 76065cf164aSRussell King if (nents > 1 || first_sg) { 76165cf164aSRussell King struct sec4_sg_entry *sg = edesc->sec4_sg; 76265cf164aSRussell King unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 76365cf164aSRussell King 76465cf164aSRussell King sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 76565cf164aSRussell King 76665cf164aSRussell King src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 76765cf164aSRussell King if (dma_mapping_error(ctx->jrdev, src_dma)) { 76865cf164aSRussell King dev_err(ctx->jrdev, "unable to map S/G table\n"); 76965cf164aSRussell King return -ENOMEM; 77065cf164aSRussell King } 77165cf164aSRussell King 77265cf164aSRussell King edesc->sec4_sg_bytes = sgsize; 77365cf164aSRussell King edesc->sec4_sg_dma = src_dma; 77465cf164aSRussell King options = LDST_SGF; 77565cf164aSRussell King } else { 77665cf164aSRussell King src_dma = sg_dma_address(req->src); 77765cf164aSRussell King options = 0; 77865cf164aSRussell King } 77965cf164aSRussell King 78065cf164aSRussell King append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 78165cf164aSRussell King options); 78265cf164aSRussell King 78365cf164aSRussell King return 0; 78465cf164aSRussell King } 78565cf164aSRussell King 786045e3678SYuan Kang /* submit update job descriptor */ 787045e3678SYuan Kang static int ahash_update_ctx(struct ahash_request *req) 788045e3678SYuan Kang { 789045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 790045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 791045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 792045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 793019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 794019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 7950355d23dSHoria Geantă u8 *buf = current_buf(state); 7960355d23dSHoria Geantă int *buflen = current_buflen(state); 7970355d23dSHoria Geantă u8 *next_buf = alt_buf(state); 79812b8567fSIuliana Prodan int blocksize = crypto_ahash_blocksize(ahash); 7990355d23dSHoria Geantă int *next_buflen = alt_buflen(state), last_buflen; 800045e3678SYuan Kang int in_len = *buflen + req->nbytes, to_hash; 80130a43b44SRussell King u32 *desc; 802bc13c69eSRussell King int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 803045e3678SYuan Kang struct ahash_edesc *edesc; 804045e3678SYuan Kang int ret = 0; 805045e3678SYuan Kang 806045e3678SYuan Kang last_buflen = *next_buflen; 80712b8567fSIuliana Prodan *next_buflen = in_len & (blocksize - 1); 808045e3678SYuan Kang to_hash = in_len - *next_buflen; 809045e3678SYuan Kang 81012b8567fSIuliana Prodan /* 81187870cfbSIuliana Prodan * For XCBC and CMAC, if to_hash is multiple of block size, 81212b8567fSIuliana Prodan * keep last block in internal buffer 81312b8567fSIuliana Prodan */ 81487870cfbSIuliana Prodan if ((is_xcbc_aes(ctx->adata.algtype) || 81587870cfbSIuliana Prodan is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 81612b8567fSIuliana Prodan (*next_buflen == 0)) { 81712b8567fSIuliana Prodan *next_buflen = blocksize; 81812b8567fSIuliana Prodan to_hash -= blocksize; 81912b8567fSIuliana Prodan } 82012b8567fSIuliana Prodan 821045e3678SYuan Kang if (to_hash) { 82213fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, 82313fb8fd7SLABBE Corentin req->nbytes - (*next_buflen)); 824f9970c28SLABBE Corentin if (src_nents < 0) { 825f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 826f9970c28SLABBE Corentin return src_nents; 827f9970c28SLABBE Corentin } 828bc13c69eSRussell King 829bc13c69eSRussell King if (src_nents) { 830bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 831bc13c69eSRussell King DMA_TO_DEVICE); 832bc13c69eSRussell King if (!mapped_nents) { 833bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 834bc13c69eSRussell King return -ENOMEM; 835bc13c69eSRussell King } 836bc13c69eSRussell King } else { 837bc13c69eSRussell King mapped_nents = 0; 838bc13c69eSRussell King } 839bc13c69eSRussell King 840045e3678SYuan Kang sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 841bc13c69eSRussell King sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 842045e3678SYuan Kang sizeof(struct sec4_sg_entry); 843045e3678SYuan Kang 844045e3678SYuan Kang /* 845045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 846045e3678SYuan Kang * link tables 847045e3678SYuan Kang */ 8485588d039SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 84930a43b44SRussell King ctx->sh_desc_update, 85030a43b44SRussell King ctx->sh_desc_update_dma, flags); 851045e3678SYuan Kang if (!edesc) { 852bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 853045e3678SYuan Kang return -ENOMEM; 854045e3678SYuan Kang } 855045e3678SYuan Kang 856045e3678SYuan Kang edesc->src_nents = src_nents; 857045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 858045e3678SYuan Kang 859dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 860045e3678SYuan Kang edesc->sec4_sg, DMA_BIDIRECTIONAL); 861ce572085SHoria Geanta if (ret) 86258b0e5d0SMarkus Elfring goto unmap_ctx; 863045e3678SYuan Kang 864944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 865944c3d4dSHoria Geantă if (ret) 866944c3d4dSHoria Geantă goto unmap_ctx; 867045e3678SYuan Kang 868bc13c69eSRussell King if (mapped_nents) { 869bc13c69eSRussell King sg_to_sec4_sg_last(req->src, mapped_nents, 870bc13c69eSRussell King edesc->sec4_sg + sec4_sg_src_index, 871bc13c69eSRussell King 0); 8728af7b0f8SVictoria Milhoan if (*next_buflen) 873307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, 874307fd543SCristian Stoica to_hash - *buflen, 875307fd543SCristian Stoica *next_buflen, 0); 876045e3678SYuan Kang } else { 877297b9cebSHoria Geantă sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 878297b9cebSHoria Geantă 1); 879045e3678SYuan Kang } 880045e3678SYuan Kang 881045e3678SYuan Kang desc = edesc->hw_desc; 882045e3678SYuan Kang 8831da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 8841da2be33SRuchika Gupta sec4_sg_bytes, 8851da2be33SRuchika Gupta DMA_TO_DEVICE); 886ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 887ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 88832686d34SRussell King ret = -ENOMEM; 88958b0e5d0SMarkus Elfring goto unmap_ctx; 890ce572085SHoria Geanta } 8911da2be33SRuchika Gupta 892045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 893045e3678SYuan Kang to_hash, LDST_SGF); 894045e3678SYuan Kang 895045e3678SYuan Kang append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 896045e3678SYuan Kang 897045e3678SYuan Kang #ifdef DEBUG 898514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 899045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 900045e3678SYuan Kang desc_bytes(desc), 1); 901045e3678SYuan Kang #endif 902045e3678SYuan Kang 903045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 90432686d34SRussell King if (ret) 90558b0e5d0SMarkus Elfring goto unmap_ctx; 90632686d34SRussell King 907045e3678SYuan Kang ret = -EINPROGRESS; 908045e3678SYuan Kang } else if (*next_buflen) { 909307fd543SCristian Stoica scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 910307fd543SCristian Stoica req->nbytes, 0); 911045e3678SYuan Kang *buflen = *next_buflen; 912045e3678SYuan Kang *next_buflen = last_buflen; 913045e3678SYuan Kang } 914045e3678SYuan Kang #ifdef DEBUG 915514df281SAlex Porosanu print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 916045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 917514df281SAlex Porosanu print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 918045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 919045e3678SYuan Kang *next_buflen, 1); 920045e3678SYuan Kang #endif 921045e3678SYuan Kang 922045e3678SYuan Kang return ret; 92358b0e5d0SMarkus Elfring unmap_ctx: 92432686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 92532686d34SRussell King kfree(edesc); 92632686d34SRussell King return ret; 927045e3678SYuan Kang } 928045e3678SYuan Kang 929045e3678SYuan Kang static int ahash_final_ctx(struct ahash_request *req) 930045e3678SYuan Kang { 931045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 932045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 933045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 934045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 935019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 936019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 9370355d23dSHoria Geantă int buflen = *current_buflen(state); 93830a43b44SRussell King u32 *desc; 939b310c178SHoria Geant? int sec4_sg_bytes, sec4_sg_src_index; 940045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 941045e3678SYuan Kang struct ahash_edesc *edesc; 9429e6df0fdSMarkus Elfring int ret; 943045e3678SYuan Kang 944b310c178SHoria Geant? sec4_sg_src_index = 1 + (buflen ? 1 : 0); 945b310c178SHoria Geant? sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 946045e3678SYuan Kang 947045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 94830a43b44SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 94930a43b44SRussell King ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 95030a43b44SRussell King flags); 9515588d039SRussell King if (!edesc) 952045e3678SYuan Kang return -ENOMEM; 953045e3678SYuan Kang 954045e3678SYuan Kang desc = edesc->hw_desc; 955045e3678SYuan Kang 956045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 957045e3678SYuan Kang 958dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 959c19650d6SHoria Geantă edesc->sec4_sg, DMA_BIDIRECTIONAL); 960ce572085SHoria Geanta if (ret) 96158b0e5d0SMarkus Elfring goto unmap_ctx; 962045e3678SYuan Kang 963944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 964944c3d4dSHoria Geantă if (ret) 965944c3d4dSHoria Geantă goto unmap_ctx; 966944c3d4dSHoria Geantă 967297b9cebSHoria Geantă sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 968045e3678SYuan Kang 9691da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 9701da2be33SRuchika Gupta sec4_sg_bytes, DMA_TO_DEVICE); 971ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 972ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 97332686d34SRussell King ret = -ENOMEM; 97458b0e5d0SMarkus Elfring goto unmap_ctx; 975ce572085SHoria Geanta } 9761da2be33SRuchika Gupta 977045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 978045e3678SYuan Kang LDST_SGF); 979c19650d6SHoria Geantă append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 980045e3678SYuan Kang 981045e3678SYuan Kang #ifdef DEBUG 982514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 983045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 984045e3678SYuan Kang #endif 985045e3678SYuan Kang 986045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 98732686d34SRussell King if (ret) 98858b0e5d0SMarkus Elfring goto unmap_ctx; 98932686d34SRussell King 99032686d34SRussell King return -EINPROGRESS; 99158b0e5d0SMarkus Elfring unmap_ctx: 992c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 993045e3678SYuan Kang kfree(edesc); 994045e3678SYuan Kang return ret; 995045e3678SYuan Kang } 996045e3678SYuan Kang 997045e3678SYuan Kang static int ahash_finup_ctx(struct ahash_request *req) 998045e3678SYuan Kang { 999045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1000045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1001045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1002045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1003019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1004019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 10050355d23dSHoria Geantă int buflen = *current_buflen(state); 100630a43b44SRussell King u32 *desc; 100765cf164aSRussell King int sec4_sg_src_index; 1008bc13c69eSRussell King int src_nents, mapped_nents; 1009045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1010045e3678SYuan Kang struct ahash_edesc *edesc; 10119e6df0fdSMarkus Elfring int ret; 1012045e3678SYuan Kang 101313fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, req->nbytes); 1014f9970c28SLABBE Corentin if (src_nents < 0) { 1015f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1016f9970c28SLABBE Corentin return src_nents; 1017f9970c28SLABBE Corentin } 1018bc13c69eSRussell King 1019bc13c69eSRussell King if (src_nents) { 1020bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1021bc13c69eSRussell King DMA_TO_DEVICE); 1022bc13c69eSRussell King if (!mapped_nents) { 1023bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1024bc13c69eSRussell King return -ENOMEM; 1025bc13c69eSRussell King } 1026bc13c69eSRussell King } else { 1027bc13c69eSRussell King mapped_nents = 0; 1028bc13c69eSRussell King } 1029bc13c69eSRussell King 1030045e3678SYuan Kang sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1031045e3678SYuan Kang 1032045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 10335588d039SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 10349a1a1c08SHoria Geantă ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 10355588d039SRussell King flags); 1036045e3678SYuan Kang if (!edesc) { 1037bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1038045e3678SYuan Kang return -ENOMEM; 1039045e3678SYuan Kang } 1040045e3678SYuan Kang 1041045e3678SYuan Kang desc = edesc->hw_desc; 1042045e3678SYuan Kang 1043045e3678SYuan Kang edesc->src_nents = src_nents; 1044045e3678SYuan Kang 1045dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 1046c19650d6SHoria Geantă edesc->sec4_sg, DMA_BIDIRECTIONAL); 1047ce572085SHoria Geanta if (ret) 104858b0e5d0SMarkus Elfring goto unmap_ctx; 1049045e3678SYuan Kang 1050944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 1051944c3d4dSHoria Geantă if (ret) 1052944c3d4dSHoria Geantă goto unmap_ctx; 1053045e3678SYuan Kang 105465cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 105565cf164aSRussell King sec4_sg_src_index, ctx->ctx_len + buflen, 105665cf164aSRussell King req->nbytes); 105765cf164aSRussell King if (ret) 105858b0e5d0SMarkus Elfring goto unmap_ctx; 1059045e3678SYuan Kang 1060c19650d6SHoria Geantă append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 1061045e3678SYuan Kang 1062045e3678SYuan Kang #ifdef DEBUG 1063514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1064045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1065045e3678SYuan Kang #endif 1066045e3678SYuan Kang 1067045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 106832686d34SRussell King if (ret) 106958b0e5d0SMarkus Elfring goto unmap_ctx; 107032686d34SRussell King 107132686d34SRussell King return -EINPROGRESS; 107258b0e5d0SMarkus Elfring unmap_ctx: 1073c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 1074045e3678SYuan Kang kfree(edesc); 1075045e3678SYuan Kang return ret; 1076045e3678SYuan Kang } 1077045e3678SYuan Kang 1078045e3678SYuan Kang static int ahash_digest(struct ahash_request *req) 1079045e3678SYuan Kang { 1080045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1081045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1082944c3d4dSHoria Geantă struct caam_hash_state *state = ahash_request_ctx(req); 1083045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1084019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1085019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 108630a43b44SRussell King u32 *desc; 1087045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 108865cf164aSRussell King int src_nents, mapped_nents; 1089045e3678SYuan Kang struct ahash_edesc *edesc; 10909e6df0fdSMarkus Elfring int ret; 1091045e3678SYuan Kang 1092944c3d4dSHoria Geantă state->buf_dma = 0; 1093944c3d4dSHoria Geantă 10943d5a2db6SRussell King src_nents = sg_nents_for_len(req->src, req->nbytes); 1095f9970c28SLABBE Corentin if (src_nents < 0) { 1096f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1097f9970c28SLABBE Corentin return src_nents; 1098f9970c28SLABBE Corentin } 1099bc13c69eSRussell King 1100bc13c69eSRussell King if (src_nents) { 1101bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1102bc13c69eSRussell King DMA_TO_DEVICE); 1103bc13c69eSRussell King if (!mapped_nents) { 1104bc13c69eSRussell King dev_err(jrdev, "unable to map source for DMA\n"); 1105bc13c69eSRussell King return -ENOMEM; 1106bc13c69eSRussell King } 1107bc13c69eSRussell King } else { 1108bc13c69eSRussell King mapped_nents = 0; 1109bc13c69eSRussell King } 1110bc13c69eSRussell King 1111045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 11125588d039SRussell King edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 111330a43b44SRussell King ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 11145588d039SRussell King flags); 1115045e3678SYuan Kang if (!edesc) { 1116bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1117045e3678SYuan Kang return -ENOMEM; 1118045e3678SYuan Kang } 1119343e44b1SRussell King 1120045e3678SYuan Kang edesc->src_nents = src_nents; 1121045e3678SYuan Kang 112265cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 112365cf164aSRussell King req->nbytes); 112465cf164aSRussell King if (ret) { 112532686d34SRussell King ahash_unmap(jrdev, edesc, req, digestsize); 112632686d34SRussell King kfree(edesc); 112765cf164aSRussell King return ret; 1128ce572085SHoria Geanta } 112965cf164aSRussell King 113065cf164aSRussell King desc = edesc->hw_desc; 1131045e3678SYuan Kang 1132c19650d6SHoria Geantă ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1133c19650d6SHoria Geantă if (ret) { 113432686d34SRussell King ahash_unmap(jrdev, edesc, req, digestsize); 113532686d34SRussell King kfree(edesc); 1136ce572085SHoria Geanta return -ENOMEM; 1137ce572085SHoria Geanta } 1138045e3678SYuan Kang 1139045e3678SYuan Kang #ifdef DEBUG 1140514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1141045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1142045e3678SYuan Kang #endif 1143045e3678SYuan Kang 1144045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1145045e3678SYuan Kang if (!ret) { 1146045e3678SYuan Kang ret = -EINPROGRESS; 1147045e3678SYuan Kang } else { 1148c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1149045e3678SYuan Kang kfree(edesc); 1150045e3678SYuan Kang } 1151045e3678SYuan Kang 1152045e3678SYuan Kang return ret; 1153045e3678SYuan Kang } 1154045e3678SYuan Kang 1155045e3678SYuan Kang /* submit ahash final if it the first job descriptor */ 1156045e3678SYuan Kang static int ahash_final_no_ctx(struct ahash_request *req) 1157045e3678SYuan Kang { 1158045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1159045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1160045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1161045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1162019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1163019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 11640355d23dSHoria Geantă u8 *buf = current_buf(state); 11650355d23dSHoria Geantă int buflen = *current_buflen(state); 116630a43b44SRussell King u32 *desc; 1167045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1168045e3678SYuan Kang struct ahash_edesc *edesc; 11699e6df0fdSMarkus Elfring int ret; 1170045e3678SYuan Kang 1171045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 117230a43b44SRussell King edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 117330a43b44SRussell King ctx->sh_desc_digest_dma, flags); 11745588d039SRussell King if (!edesc) 1175045e3678SYuan Kang return -ENOMEM; 1176045e3678SYuan Kang 1177045e3678SYuan Kang desc = edesc->hw_desc; 1178045e3678SYuan Kang 117904e6d25cSAymen Sghaier if (buflen) { 118004e6d25cSAymen Sghaier state->buf_dma = dma_map_single(jrdev, buf, buflen, 118104e6d25cSAymen Sghaier DMA_TO_DEVICE); 1182ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->buf_dma)) { 1183ce572085SHoria Geanta dev_err(jrdev, "unable to map src\n"); 118406435f34SMarkus Elfring goto unmap; 1185ce572085SHoria Geanta } 1186045e3678SYuan Kang 1187045e3678SYuan Kang append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 118804e6d25cSAymen Sghaier } 1189045e3678SYuan Kang 1190c19650d6SHoria Geantă ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1191c19650d6SHoria Geantă if (ret) 119206435f34SMarkus Elfring goto unmap; 1193045e3678SYuan Kang 1194045e3678SYuan Kang #ifdef DEBUG 1195514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1196045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1197045e3678SYuan Kang #endif 1198045e3678SYuan Kang 1199045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1200045e3678SYuan Kang if (!ret) { 1201045e3678SYuan Kang ret = -EINPROGRESS; 1202045e3678SYuan Kang } else { 1203c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1204045e3678SYuan Kang kfree(edesc); 1205045e3678SYuan Kang } 1206045e3678SYuan Kang 1207045e3678SYuan Kang return ret; 120806435f34SMarkus Elfring unmap: 120906435f34SMarkus Elfring ahash_unmap(jrdev, edesc, req, digestsize); 121006435f34SMarkus Elfring kfree(edesc); 121106435f34SMarkus Elfring return -ENOMEM; 121206435f34SMarkus Elfring 1213045e3678SYuan Kang } 1214045e3678SYuan Kang 1215045e3678SYuan Kang /* submit ahash update if it the first job descriptor after update */ 1216045e3678SYuan Kang static int ahash_update_no_ctx(struct ahash_request *req) 1217045e3678SYuan Kang { 1218045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1219045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1220045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1221045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1222019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1223019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 12240355d23dSHoria Geantă u8 *buf = current_buf(state); 12250355d23dSHoria Geantă int *buflen = current_buflen(state); 122612b8567fSIuliana Prodan int blocksize = crypto_ahash_blocksize(ahash); 12270355d23dSHoria Geantă u8 *next_buf = alt_buf(state); 12280355d23dSHoria Geantă int *next_buflen = alt_buflen(state); 1229045e3678SYuan Kang int in_len = *buflen + req->nbytes, to_hash; 1230bc13c69eSRussell King int sec4_sg_bytes, src_nents, mapped_nents; 1231045e3678SYuan Kang struct ahash_edesc *edesc; 123230a43b44SRussell King u32 *desc; 1233045e3678SYuan Kang int ret = 0; 1234045e3678SYuan Kang 123512b8567fSIuliana Prodan *next_buflen = in_len & (blocksize - 1); 1236045e3678SYuan Kang to_hash = in_len - *next_buflen; 1237045e3678SYuan Kang 123812b8567fSIuliana Prodan /* 123987870cfbSIuliana Prodan * For XCBC and CMAC, if to_hash is multiple of block size, 124012b8567fSIuliana Prodan * keep last block in internal buffer 124112b8567fSIuliana Prodan */ 124287870cfbSIuliana Prodan if ((is_xcbc_aes(ctx->adata.algtype) || 124387870cfbSIuliana Prodan is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 124412b8567fSIuliana Prodan (*next_buflen == 0)) { 124512b8567fSIuliana Prodan *next_buflen = blocksize; 124612b8567fSIuliana Prodan to_hash -= blocksize; 124712b8567fSIuliana Prodan } 124812b8567fSIuliana Prodan 1249045e3678SYuan Kang if (to_hash) { 125013fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, 12513d5a2db6SRussell King req->nbytes - *next_buflen); 1252f9970c28SLABBE Corentin if (src_nents < 0) { 1253f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1254f9970c28SLABBE Corentin return src_nents; 1255f9970c28SLABBE Corentin } 1256bc13c69eSRussell King 1257bc13c69eSRussell King if (src_nents) { 1258bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1259bc13c69eSRussell King DMA_TO_DEVICE); 1260bc13c69eSRussell King if (!mapped_nents) { 1261bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1262bc13c69eSRussell King return -ENOMEM; 1263bc13c69eSRussell King } 1264bc13c69eSRussell King } else { 1265bc13c69eSRussell King mapped_nents = 0; 1266bc13c69eSRussell King } 1267bc13c69eSRussell King 1268bc13c69eSRussell King sec4_sg_bytes = (1 + mapped_nents) * 1269045e3678SYuan Kang sizeof(struct sec4_sg_entry); 1270045e3678SYuan Kang 1271045e3678SYuan Kang /* 1272045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 1273045e3678SYuan Kang * link tables 1274045e3678SYuan Kang */ 127530a43b44SRussell King edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 127630a43b44SRussell King ctx->sh_desc_update_first, 127730a43b44SRussell King ctx->sh_desc_update_first_dma, 127830a43b44SRussell King flags); 1279045e3678SYuan Kang if (!edesc) { 1280bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1281045e3678SYuan Kang return -ENOMEM; 1282045e3678SYuan Kang } 1283045e3678SYuan Kang 1284045e3678SYuan Kang edesc->src_nents = src_nents; 1285045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1286045e3678SYuan Kang 1287944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1288944c3d4dSHoria Geantă if (ret) 1289944c3d4dSHoria Geantă goto unmap_ctx; 1290944c3d4dSHoria Geantă 1291bc13c69eSRussell King sg_to_sec4_sg_last(req->src, mapped_nents, 1292bc13c69eSRussell King edesc->sec4_sg + 1, 0); 1293bc13c69eSRussell King 1294045e3678SYuan Kang if (*next_buflen) { 1295307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, 1296307fd543SCristian Stoica to_hash - *buflen, 1297307fd543SCristian Stoica *next_buflen, 0); 1298045e3678SYuan Kang } 1299045e3678SYuan Kang 1300045e3678SYuan Kang desc = edesc->hw_desc; 1301045e3678SYuan Kang 13021da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 13031da2be33SRuchika Gupta sec4_sg_bytes, 13041da2be33SRuchika Gupta DMA_TO_DEVICE); 1305ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1306ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 130732686d34SRussell King ret = -ENOMEM; 130858b0e5d0SMarkus Elfring goto unmap_ctx; 1309ce572085SHoria Geanta } 13101da2be33SRuchika Gupta 1311045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1312045e3678SYuan Kang 1313ce572085SHoria Geanta ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1314ce572085SHoria Geanta if (ret) 131558b0e5d0SMarkus Elfring goto unmap_ctx; 1316045e3678SYuan Kang 1317045e3678SYuan Kang #ifdef DEBUG 1318514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1319045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 1320045e3678SYuan Kang desc_bytes(desc), 1); 1321045e3678SYuan Kang #endif 1322045e3678SYuan Kang 1323045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 132432686d34SRussell King if (ret) 132558b0e5d0SMarkus Elfring goto unmap_ctx; 132632686d34SRussell King 1327045e3678SYuan Kang ret = -EINPROGRESS; 1328045e3678SYuan Kang state->update = ahash_update_ctx; 1329045e3678SYuan Kang state->finup = ahash_finup_ctx; 1330045e3678SYuan Kang state->final = ahash_final_ctx; 1331045e3678SYuan Kang } else if (*next_buflen) { 1332307fd543SCristian Stoica scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1333307fd543SCristian Stoica req->nbytes, 0); 1334045e3678SYuan Kang *buflen = *next_buflen; 1335045e3678SYuan Kang *next_buflen = 0; 1336045e3678SYuan Kang } 1337045e3678SYuan Kang #ifdef DEBUG 1338514df281SAlex Porosanu print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1339045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1340514df281SAlex Porosanu print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1341045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1342045e3678SYuan Kang *next_buflen, 1); 1343045e3678SYuan Kang #endif 1344045e3678SYuan Kang 1345045e3678SYuan Kang return ret; 134658b0e5d0SMarkus Elfring unmap_ctx: 134732686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 134832686d34SRussell King kfree(edesc); 134932686d34SRussell King return ret; 1350045e3678SYuan Kang } 1351045e3678SYuan Kang 1352045e3678SYuan Kang /* submit ahash finup if it the first job descriptor after update */ 1353045e3678SYuan Kang static int ahash_finup_no_ctx(struct ahash_request *req) 1354045e3678SYuan Kang { 1355045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1356045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1357045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1358045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1359019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1360019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 13610355d23dSHoria Geantă int buflen = *current_buflen(state); 136230a43b44SRussell King u32 *desc; 1363bc13c69eSRussell King int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1364045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1365045e3678SYuan Kang struct ahash_edesc *edesc; 13669e6df0fdSMarkus Elfring int ret; 1367045e3678SYuan Kang 136813fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, req->nbytes); 1369f9970c28SLABBE Corentin if (src_nents < 0) { 1370f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1371f9970c28SLABBE Corentin return src_nents; 1372f9970c28SLABBE Corentin } 1373bc13c69eSRussell King 1374bc13c69eSRussell King if (src_nents) { 1375bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1376bc13c69eSRussell King DMA_TO_DEVICE); 1377bc13c69eSRussell King if (!mapped_nents) { 1378bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1379bc13c69eSRussell King return -ENOMEM; 1380bc13c69eSRussell King } 1381bc13c69eSRussell King } else { 1382bc13c69eSRussell King mapped_nents = 0; 1383bc13c69eSRussell King } 1384bc13c69eSRussell King 1385045e3678SYuan Kang sec4_sg_src_index = 2; 1386bc13c69eSRussell King sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1387045e3678SYuan Kang sizeof(struct sec4_sg_entry); 1388045e3678SYuan Kang 1389045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 139030a43b44SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 139130a43b44SRussell King ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 139230a43b44SRussell King flags); 1393045e3678SYuan Kang if (!edesc) { 1394bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1395045e3678SYuan Kang return -ENOMEM; 1396045e3678SYuan Kang } 1397045e3678SYuan Kang 1398045e3678SYuan Kang desc = edesc->hw_desc; 1399045e3678SYuan Kang 1400045e3678SYuan Kang edesc->src_nents = src_nents; 1401045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1402045e3678SYuan Kang 1403944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1404944c3d4dSHoria Geantă if (ret) 1405944c3d4dSHoria Geantă goto unmap; 1406045e3678SYuan Kang 140765cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 140865cf164aSRussell King req->nbytes); 140965cf164aSRussell King if (ret) { 1410ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 141106435f34SMarkus Elfring goto unmap; 1412ce572085SHoria Geanta } 14131da2be33SRuchika Gupta 1414c19650d6SHoria Geantă ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1415c19650d6SHoria Geantă if (ret) 141606435f34SMarkus Elfring goto unmap; 1417045e3678SYuan Kang 1418045e3678SYuan Kang #ifdef DEBUG 1419514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1420045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1421045e3678SYuan Kang #endif 1422045e3678SYuan Kang 1423045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1424045e3678SYuan Kang if (!ret) { 1425045e3678SYuan Kang ret = -EINPROGRESS; 1426045e3678SYuan Kang } else { 1427c19650d6SHoria Geantă ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1428045e3678SYuan Kang kfree(edesc); 1429045e3678SYuan Kang } 1430045e3678SYuan Kang 1431045e3678SYuan Kang return ret; 143206435f34SMarkus Elfring unmap: 143306435f34SMarkus Elfring ahash_unmap(jrdev, edesc, req, digestsize); 143406435f34SMarkus Elfring kfree(edesc); 143506435f34SMarkus Elfring return -ENOMEM; 143606435f34SMarkus Elfring 1437045e3678SYuan Kang } 1438045e3678SYuan Kang 1439045e3678SYuan Kang /* submit first update job descriptor after init */ 1440045e3678SYuan Kang static int ahash_update_first(struct ahash_request *req) 1441045e3678SYuan Kang { 1442045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1443045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1444045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1445045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1446019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1447019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 1448944c3d4dSHoria Geantă u8 *next_buf = alt_buf(state); 1449944c3d4dSHoria Geantă int *next_buflen = alt_buflen(state); 1450045e3678SYuan Kang int to_hash; 145112b8567fSIuliana Prodan int blocksize = crypto_ahash_blocksize(ahash); 145230a43b44SRussell King u32 *desc; 145365cf164aSRussell King int src_nents, mapped_nents; 1454045e3678SYuan Kang struct ahash_edesc *edesc; 1455045e3678SYuan Kang int ret = 0; 1456045e3678SYuan Kang 145712b8567fSIuliana Prodan *next_buflen = req->nbytes & (blocksize - 1); 1458045e3678SYuan Kang to_hash = req->nbytes - *next_buflen; 1459045e3678SYuan Kang 146012b8567fSIuliana Prodan /* 146187870cfbSIuliana Prodan * For XCBC and CMAC, if to_hash is multiple of block size, 146212b8567fSIuliana Prodan * keep last block in internal buffer 146312b8567fSIuliana Prodan */ 146487870cfbSIuliana Prodan if ((is_xcbc_aes(ctx->adata.algtype) || 146587870cfbSIuliana Prodan is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 146612b8567fSIuliana Prodan (*next_buflen == 0)) { 146712b8567fSIuliana Prodan *next_buflen = blocksize; 146812b8567fSIuliana Prodan to_hash -= blocksize; 146912b8567fSIuliana Prodan } 147012b8567fSIuliana Prodan 1471045e3678SYuan Kang if (to_hash) { 14723d5a2db6SRussell King src_nents = sg_nents_for_len(req->src, 14733d5a2db6SRussell King req->nbytes - *next_buflen); 1474f9970c28SLABBE Corentin if (src_nents < 0) { 1475f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1476f9970c28SLABBE Corentin return src_nents; 1477f9970c28SLABBE Corentin } 1478bc13c69eSRussell King 1479bc13c69eSRussell King if (src_nents) { 1480bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1481bc13c69eSRussell King DMA_TO_DEVICE); 1482bc13c69eSRussell King if (!mapped_nents) { 1483bc13c69eSRussell King dev_err(jrdev, "unable to map source for DMA\n"); 1484bc13c69eSRussell King return -ENOMEM; 1485bc13c69eSRussell King } 1486bc13c69eSRussell King } else { 1487bc13c69eSRussell King mapped_nents = 0; 1488bc13c69eSRussell King } 1489045e3678SYuan Kang 1490045e3678SYuan Kang /* 1491045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 1492045e3678SYuan Kang * link tables 1493045e3678SYuan Kang */ 14945588d039SRussell King edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 149530a43b44SRussell King mapped_nents : 0, 149630a43b44SRussell King ctx->sh_desc_update_first, 149730a43b44SRussell King ctx->sh_desc_update_first_dma, 149830a43b44SRussell King flags); 1499045e3678SYuan Kang if (!edesc) { 1500bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1501045e3678SYuan Kang return -ENOMEM; 1502045e3678SYuan Kang } 1503045e3678SYuan Kang 1504045e3678SYuan Kang edesc->src_nents = src_nents; 1505045e3678SYuan Kang 150665cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 150765cf164aSRussell King to_hash); 150865cf164aSRussell King if (ret) 150958b0e5d0SMarkus Elfring goto unmap_ctx; 1510045e3678SYuan Kang 1511045e3678SYuan Kang if (*next_buflen) 1512307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1513307fd543SCristian Stoica *next_buflen, 0); 1514045e3678SYuan Kang 1515045e3678SYuan Kang desc = edesc->hw_desc; 1516045e3678SYuan Kang 1517ce572085SHoria Geanta ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1518ce572085SHoria Geanta if (ret) 151958b0e5d0SMarkus Elfring goto unmap_ctx; 1520045e3678SYuan Kang 1521045e3678SYuan Kang #ifdef DEBUG 1522514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1523045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 1524045e3678SYuan Kang desc_bytes(desc), 1); 1525045e3678SYuan Kang #endif 1526045e3678SYuan Kang 152732686d34SRussell King ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 152832686d34SRussell King if (ret) 152958b0e5d0SMarkus Elfring goto unmap_ctx; 153032686d34SRussell King 1531045e3678SYuan Kang ret = -EINPROGRESS; 1532045e3678SYuan Kang state->update = ahash_update_ctx; 1533045e3678SYuan Kang state->finup = ahash_finup_ctx; 1534045e3678SYuan Kang state->final = ahash_final_ctx; 1535045e3678SYuan Kang } else if (*next_buflen) { 1536045e3678SYuan Kang state->update = ahash_update_no_ctx; 1537045e3678SYuan Kang state->finup = ahash_finup_no_ctx; 1538045e3678SYuan Kang state->final = ahash_final_no_ctx; 1539307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, 0, 1540307fd543SCristian Stoica req->nbytes, 0); 1541944c3d4dSHoria Geantă switch_buf(state); 1542045e3678SYuan Kang } 1543045e3678SYuan Kang #ifdef DEBUG 1544514df281SAlex Porosanu print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1545045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1546045e3678SYuan Kang *next_buflen, 1); 1547045e3678SYuan Kang #endif 1548045e3678SYuan Kang 1549045e3678SYuan Kang return ret; 155058b0e5d0SMarkus Elfring unmap_ctx: 155132686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 155232686d34SRussell King kfree(edesc); 155332686d34SRussell King return ret; 1554045e3678SYuan Kang } 1555045e3678SYuan Kang 1556045e3678SYuan Kang static int ahash_finup_first(struct ahash_request *req) 1557045e3678SYuan Kang { 1558045e3678SYuan Kang return ahash_digest(req); 1559045e3678SYuan Kang } 1560045e3678SYuan Kang 1561045e3678SYuan Kang static int ahash_init(struct ahash_request *req) 1562045e3678SYuan Kang { 1563045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1564045e3678SYuan Kang 1565045e3678SYuan Kang state->update = ahash_update_first; 1566045e3678SYuan Kang state->finup = ahash_finup_first; 1567045e3678SYuan Kang state->final = ahash_final_no_ctx; 1568045e3678SYuan Kang 156987ec02e7SHoria Geantă state->ctx_dma = 0; 157065055e21SFranck LENORMAND state->ctx_dma_len = 0; 1571045e3678SYuan Kang state->current_buf = 0; 1572de0e35ecSHoria Geanta state->buf_dma = 0; 15736fd4b156SSteve Cornelius state->buflen_0 = 0; 15746fd4b156SSteve Cornelius state->buflen_1 = 0; 1575045e3678SYuan Kang 1576045e3678SYuan Kang return 0; 1577045e3678SYuan Kang } 1578045e3678SYuan Kang 1579045e3678SYuan Kang static int ahash_update(struct ahash_request *req) 1580045e3678SYuan Kang { 1581045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1582045e3678SYuan Kang 1583045e3678SYuan Kang return state->update(req); 1584045e3678SYuan Kang } 1585045e3678SYuan Kang 1586045e3678SYuan Kang static int ahash_finup(struct ahash_request *req) 1587045e3678SYuan Kang { 1588045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1589045e3678SYuan Kang 1590045e3678SYuan Kang return state->finup(req); 1591045e3678SYuan Kang } 1592045e3678SYuan Kang 1593045e3678SYuan Kang static int ahash_final(struct ahash_request *req) 1594045e3678SYuan Kang { 1595045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1596045e3678SYuan Kang 1597045e3678SYuan Kang return state->final(req); 1598045e3678SYuan Kang } 1599045e3678SYuan Kang 1600045e3678SYuan Kang static int ahash_export(struct ahash_request *req, void *out) 1601045e3678SYuan Kang { 1602045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 16035ec90831SRussell King struct caam_export_state *export = out; 16045ec90831SRussell King int len; 16055ec90831SRussell King u8 *buf; 1606045e3678SYuan Kang 16075ec90831SRussell King if (state->current_buf) { 16085ec90831SRussell King buf = state->buf_1; 16095ec90831SRussell King len = state->buflen_1; 16105ec90831SRussell King } else { 16115ec90831SRussell King buf = state->buf_0; 1612f456cd2dSFabio Estevam len = state->buflen_0; 16135ec90831SRussell King } 16145ec90831SRussell King 16155ec90831SRussell King memcpy(export->buf, buf, len); 16165ec90831SRussell King memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 16175ec90831SRussell King export->buflen = len; 16185ec90831SRussell King export->update = state->update; 16195ec90831SRussell King export->final = state->final; 16205ec90831SRussell King export->finup = state->finup; 1621434b4212SRussell King 1622045e3678SYuan Kang return 0; 1623045e3678SYuan Kang } 1624045e3678SYuan Kang 1625045e3678SYuan Kang static int ahash_import(struct ahash_request *req, const void *in) 1626045e3678SYuan Kang { 1627045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 16285ec90831SRussell King const struct caam_export_state *export = in; 1629045e3678SYuan Kang 16305ec90831SRussell King memset(state, 0, sizeof(*state)); 16315ec90831SRussell King memcpy(state->buf_0, export->buf, export->buflen); 16325ec90831SRussell King memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 16335ec90831SRussell King state->buflen_0 = export->buflen; 16345ec90831SRussell King state->update = export->update; 16355ec90831SRussell King state->final = export->final; 16365ec90831SRussell King state->finup = export->finup; 1637434b4212SRussell King 1638045e3678SYuan Kang return 0; 1639045e3678SYuan Kang } 1640045e3678SYuan Kang 1641045e3678SYuan Kang struct caam_hash_template { 1642045e3678SYuan Kang char name[CRYPTO_MAX_ALG_NAME]; 1643045e3678SYuan Kang char driver_name[CRYPTO_MAX_ALG_NAME]; 1644b0e09baeSYuan Kang char hmac_name[CRYPTO_MAX_ALG_NAME]; 1645b0e09baeSYuan Kang char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1646045e3678SYuan Kang unsigned int blocksize; 1647045e3678SYuan Kang struct ahash_alg template_ahash; 1648045e3678SYuan Kang u32 alg_type; 1649045e3678SYuan Kang }; 1650045e3678SYuan Kang 1651045e3678SYuan Kang /* ahash descriptors */ 1652045e3678SYuan Kang static struct caam_hash_template driver_hash[] = { 1653045e3678SYuan Kang { 1654b0e09baeSYuan Kang .name = "sha1", 1655b0e09baeSYuan Kang .driver_name = "sha1-caam", 1656b0e09baeSYuan Kang .hmac_name = "hmac(sha1)", 1657b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha1-caam", 1658045e3678SYuan Kang .blocksize = SHA1_BLOCK_SIZE, 1659045e3678SYuan Kang .template_ahash = { 1660045e3678SYuan Kang .init = ahash_init, 1661045e3678SYuan Kang .update = ahash_update, 1662045e3678SYuan Kang .final = ahash_final, 1663045e3678SYuan Kang .finup = ahash_finup, 1664045e3678SYuan Kang .digest = ahash_digest, 1665045e3678SYuan Kang .export = ahash_export, 1666045e3678SYuan Kang .import = ahash_import, 1667045e3678SYuan Kang .setkey = ahash_setkey, 1668045e3678SYuan Kang .halg = { 1669045e3678SYuan Kang .digestsize = SHA1_DIGEST_SIZE, 16705ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1671045e3678SYuan Kang }, 1672045e3678SYuan Kang }, 1673045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA1, 1674045e3678SYuan Kang }, { 1675b0e09baeSYuan Kang .name = "sha224", 1676b0e09baeSYuan Kang .driver_name = "sha224-caam", 1677b0e09baeSYuan Kang .hmac_name = "hmac(sha224)", 1678b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha224-caam", 1679045e3678SYuan Kang .blocksize = SHA224_BLOCK_SIZE, 1680045e3678SYuan Kang .template_ahash = { 1681045e3678SYuan Kang .init = ahash_init, 1682045e3678SYuan Kang .update = ahash_update, 1683045e3678SYuan Kang .final = ahash_final, 1684045e3678SYuan Kang .finup = ahash_finup, 1685045e3678SYuan Kang .digest = ahash_digest, 1686045e3678SYuan Kang .export = ahash_export, 1687045e3678SYuan Kang .import = ahash_import, 1688045e3678SYuan Kang .setkey = ahash_setkey, 1689045e3678SYuan Kang .halg = { 1690045e3678SYuan Kang .digestsize = SHA224_DIGEST_SIZE, 16915ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1692045e3678SYuan Kang }, 1693045e3678SYuan Kang }, 1694045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA224, 1695045e3678SYuan Kang }, { 1696b0e09baeSYuan Kang .name = "sha256", 1697b0e09baeSYuan Kang .driver_name = "sha256-caam", 1698b0e09baeSYuan Kang .hmac_name = "hmac(sha256)", 1699b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha256-caam", 1700045e3678SYuan Kang .blocksize = SHA256_BLOCK_SIZE, 1701045e3678SYuan Kang .template_ahash = { 1702045e3678SYuan Kang .init = ahash_init, 1703045e3678SYuan Kang .update = ahash_update, 1704045e3678SYuan Kang .final = ahash_final, 1705045e3678SYuan Kang .finup = ahash_finup, 1706045e3678SYuan Kang .digest = ahash_digest, 1707045e3678SYuan Kang .export = ahash_export, 1708045e3678SYuan Kang .import = ahash_import, 1709045e3678SYuan Kang .setkey = ahash_setkey, 1710045e3678SYuan Kang .halg = { 1711045e3678SYuan Kang .digestsize = SHA256_DIGEST_SIZE, 17125ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1713045e3678SYuan Kang }, 1714045e3678SYuan Kang }, 1715045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA256, 1716045e3678SYuan Kang }, { 1717b0e09baeSYuan Kang .name = "sha384", 1718b0e09baeSYuan Kang .driver_name = "sha384-caam", 1719b0e09baeSYuan Kang .hmac_name = "hmac(sha384)", 1720b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha384-caam", 1721045e3678SYuan Kang .blocksize = SHA384_BLOCK_SIZE, 1722045e3678SYuan Kang .template_ahash = { 1723045e3678SYuan Kang .init = ahash_init, 1724045e3678SYuan Kang .update = ahash_update, 1725045e3678SYuan Kang .final = ahash_final, 1726045e3678SYuan Kang .finup = ahash_finup, 1727045e3678SYuan Kang .digest = ahash_digest, 1728045e3678SYuan Kang .export = ahash_export, 1729045e3678SYuan Kang .import = ahash_import, 1730045e3678SYuan Kang .setkey = ahash_setkey, 1731045e3678SYuan Kang .halg = { 1732045e3678SYuan Kang .digestsize = SHA384_DIGEST_SIZE, 17335ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1734045e3678SYuan Kang }, 1735045e3678SYuan Kang }, 1736045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA384, 1737045e3678SYuan Kang }, { 1738b0e09baeSYuan Kang .name = "sha512", 1739b0e09baeSYuan Kang .driver_name = "sha512-caam", 1740b0e09baeSYuan Kang .hmac_name = "hmac(sha512)", 1741b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha512-caam", 1742045e3678SYuan Kang .blocksize = SHA512_BLOCK_SIZE, 1743045e3678SYuan Kang .template_ahash = { 1744045e3678SYuan Kang .init = ahash_init, 1745045e3678SYuan Kang .update = ahash_update, 1746045e3678SYuan Kang .final = ahash_final, 1747045e3678SYuan Kang .finup = ahash_finup, 1748045e3678SYuan Kang .digest = ahash_digest, 1749045e3678SYuan Kang .export = ahash_export, 1750045e3678SYuan Kang .import = ahash_import, 1751045e3678SYuan Kang .setkey = ahash_setkey, 1752045e3678SYuan Kang .halg = { 1753045e3678SYuan Kang .digestsize = SHA512_DIGEST_SIZE, 17545ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1755045e3678SYuan Kang }, 1756045e3678SYuan Kang }, 1757045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA512, 1758045e3678SYuan Kang }, { 1759b0e09baeSYuan Kang .name = "md5", 1760b0e09baeSYuan Kang .driver_name = "md5-caam", 1761b0e09baeSYuan Kang .hmac_name = "hmac(md5)", 1762b0e09baeSYuan Kang .hmac_driver_name = "hmac-md5-caam", 1763045e3678SYuan Kang .blocksize = MD5_BLOCK_WORDS * 4, 1764045e3678SYuan Kang .template_ahash = { 1765045e3678SYuan Kang .init = ahash_init, 1766045e3678SYuan Kang .update = ahash_update, 1767045e3678SYuan Kang .final = ahash_final, 1768045e3678SYuan Kang .finup = ahash_finup, 1769045e3678SYuan Kang .digest = ahash_digest, 1770045e3678SYuan Kang .export = ahash_export, 1771045e3678SYuan Kang .import = ahash_import, 1772045e3678SYuan Kang .setkey = ahash_setkey, 1773045e3678SYuan Kang .halg = { 1774045e3678SYuan Kang .digestsize = MD5_DIGEST_SIZE, 17755ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1776045e3678SYuan Kang }, 1777045e3678SYuan Kang }, 1778045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_MD5, 177912b8567fSIuliana Prodan }, { 178012b8567fSIuliana Prodan .hmac_name = "xcbc(aes)", 178112b8567fSIuliana Prodan .hmac_driver_name = "xcbc-aes-caam", 178212b8567fSIuliana Prodan .blocksize = AES_BLOCK_SIZE, 178312b8567fSIuliana Prodan .template_ahash = { 178412b8567fSIuliana Prodan .init = ahash_init, 178512b8567fSIuliana Prodan .update = ahash_update, 178612b8567fSIuliana Prodan .final = ahash_final, 178712b8567fSIuliana Prodan .finup = ahash_finup, 178812b8567fSIuliana Prodan .digest = ahash_digest, 178912b8567fSIuliana Prodan .export = ahash_export, 179012b8567fSIuliana Prodan .import = ahash_import, 179112b8567fSIuliana Prodan .setkey = axcbc_setkey, 179212b8567fSIuliana Prodan .halg = { 179312b8567fSIuliana Prodan .digestsize = AES_BLOCK_SIZE, 179412b8567fSIuliana Prodan .statesize = sizeof(struct caam_export_state), 179512b8567fSIuliana Prodan }, 179612b8567fSIuliana Prodan }, 179712b8567fSIuliana Prodan .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, 179887870cfbSIuliana Prodan }, { 179987870cfbSIuliana Prodan .hmac_name = "cmac(aes)", 180087870cfbSIuliana Prodan .hmac_driver_name = "cmac-aes-caam", 180187870cfbSIuliana Prodan .blocksize = AES_BLOCK_SIZE, 180287870cfbSIuliana Prodan .template_ahash = { 180387870cfbSIuliana Prodan .init = ahash_init, 180487870cfbSIuliana Prodan .update = ahash_update, 180587870cfbSIuliana Prodan .final = ahash_final, 180687870cfbSIuliana Prodan .finup = ahash_finup, 180787870cfbSIuliana Prodan .digest = ahash_digest, 180887870cfbSIuliana Prodan .export = ahash_export, 180987870cfbSIuliana Prodan .import = ahash_import, 181087870cfbSIuliana Prodan .setkey = acmac_setkey, 181187870cfbSIuliana Prodan .halg = { 181287870cfbSIuliana Prodan .digestsize = AES_BLOCK_SIZE, 181387870cfbSIuliana Prodan .statesize = sizeof(struct caam_export_state), 181487870cfbSIuliana Prodan }, 181587870cfbSIuliana Prodan }, 181687870cfbSIuliana Prodan .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, 1817045e3678SYuan Kang }, 1818045e3678SYuan Kang }; 1819045e3678SYuan Kang 1820045e3678SYuan Kang struct caam_hash_alg { 1821045e3678SYuan Kang struct list_head entry; 1822045e3678SYuan Kang int alg_type; 1823045e3678SYuan Kang struct ahash_alg ahash_alg; 1824045e3678SYuan Kang }; 1825045e3678SYuan Kang 1826045e3678SYuan Kang static int caam_hash_cra_init(struct crypto_tfm *tfm) 1827045e3678SYuan Kang { 1828045e3678SYuan Kang struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1829045e3678SYuan Kang struct crypto_alg *base = tfm->__crt_alg; 1830045e3678SYuan Kang struct hash_alg_common *halg = 1831045e3678SYuan Kang container_of(base, struct hash_alg_common, base); 1832045e3678SYuan Kang struct ahash_alg *alg = 1833045e3678SYuan Kang container_of(halg, struct ahash_alg, halg); 1834045e3678SYuan Kang struct caam_hash_alg *caam_hash = 1835045e3678SYuan Kang container_of(alg, struct caam_hash_alg, ahash_alg); 1836045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1837045e3678SYuan Kang /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1838045e3678SYuan Kang static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1839045e3678SYuan Kang HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1840045e3678SYuan Kang HASH_MSG_LEN + 32, 1841045e3678SYuan Kang HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1842045e3678SYuan Kang HASH_MSG_LEN + 64, 1843045e3678SYuan Kang HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1844bbf22344SHoria Geantă dma_addr_t dma_addr; 18457e0880b9SHoria Geantă struct caam_drv_private *priv; 1846045e3678SYuan Kang 1847045e3678SYuan Kang /* 1848cfc6f11bSRuchika Gupta * Get a Job ring from Job Ring driver to ensure in-order 1849045e3678SYuan Kang * crypto request processing per tfm 1850045e3678SYuan Kang */ 1851cfc6f11bSRuchika Gupta ctx->jrdev = caam_jr_alloc(); 1852cfc6f11bSRuchika Gupta if (IS_ERR(ctx->jrdev)) { 1853cfc6f11bSRuchika Gupta pr_err("Job Ring Device allocation for transform failed\n"); 1854cfc6f11bSRuchika Gupta return PTR_ERR(ctx->jrdev); 1855cfc6f11bSRuchika Gupta } 1856bbf22344SHoria Geantă 18577e0880b9SHoria Geantă priv = dev_get_drvdata(ctx->jrdev->parent); 185812b8567fSIuliana Prodan 185912b8567fSIuliana Prodan if (is_xcbc_aes(caam_hash->alg_type)) { 186012b8567fSIuliana Prodan ctx->dir = DMA_TO_DEVICE; 186112b8567fSIuliana Prodan ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 186212b8567fSIuliana Prodan ctx->ctx_len = 48; 186312b8567fSIuliana Prodan 186412b8567fSIuliana Prodan ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, 186512b8567fSIuliana Prodan ARRAY_SIZE(ctx->key), 186612b8567fSIuliana Prodan DMA_BIDIRECTIONAL, 186712b8567fSIuliana Prodan DMA_ATTR_SKIP_CPU_SYNC); 186812b8567fSIuliana Prodan if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 186912b8567fSIuliana Prodan dev_err(ctx->jrdev, "unable to map key\n"); 187012b8567fSIuliana Prodan caam_jr_free(ctx->jrdev); 187112b8567fSIuliana Prodan return -ENOMEM; 187212b8567fSIuliana Prodan } 187387870cfbSIuliana Prodan } else if (is_cmac_aes(caam_hash->alg_type)) { 187487870cfbSIuliana Prodan ctx->dir = DMA_TO_DEVICE; 187587870cfbSIuliana Prodan ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 187687870cfbSIuliana Prodan ctx->ctx_len = 32; 187712b8567fSIuliana Prodan } else { 18787e0880b9SHoria Geantă ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 187912b8567fSIuliana Prodan ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 188012b8567fSIuliana Prodan ctx->ctx_len = runninglen[(ctx->adata.algtype & 188112b8567fSIuliana Prodan OP_ALG_ALGSEL_SUBMASK) >> 188212b8567fSIuliana Prodan OP_ALG_ALGSEL_SHIFT]; 188312b8567fSIuliana Prodan } 18847e0880b9SHoria Geantă 1885bbf22344SHoria Geantă dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 18868e731ee5SHoria Geantă offsetof(struct caam_hash_ctx, key), 18877e0880b9SHoria Geantă ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1888bbf22344SHoria Geantă if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1889bbf22344SHoria Geantă dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 189012b8567fSIuliana Prodan 189112b8567fSIuliana Prodan if (is_xcbc_aes(caam_hash->alg_type)) 189212b8567fSIuliana Prodan dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 189312b8567fSIuliana Prodan ARRAY_SIZE(ctx->key), 189412b8567fSIuliana Prodan DMA_BIDIRECTIONAL, 189512b8567fSIuliana Prodan DMA_ATTR_SKIP_CPU_SYNC); 189612b8567fSIuliana Prodan 1897bbf22344SHoria Geantă caam_jr_free(ctx->jrdev); 1898bbf22344SHoria Geantă return -ENOMEM; 1899bbf22344SHoria Geantă } 1900bbf22344SHoria Geantă 1901bbf22344SHoria Geantă ctx->sh_desc_update_dma = dma_addr; 1902bbf22344SHoria Geantă ctx->sh_desc_update_first_dma = dma_addr + 1903bbf22344SHoria Geantă offsetof(struct caam_hash_ctx, 1904bbf22344SHoria Geantă sh_desc_update_first); 1905bbf22344SHoria Geantă ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1906bbf22344SHoria Geantă sh_desc_fin); 1907bbf22344SHoria Geantă ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1908bbf22344SHoria Geantă sh_desc_digest); 1909bbf22344SHoria Geantă 1910045e3678SYuan Kang crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1911045e3678SYuan Kang sizeof(struct caam_hash_state)); 19129a2537d0SIuliana Prodan 19139a2537d0SIuliana Prodan /* 19149a2537d0SIuliana Prodan * For keyed hash algorithms shared descriptors 19159a2537d0SIuliana Prodan * will be created later in setkey() callback 19169a2537d0SIuliana Prodan */ 19179a2537d0SIuliana Prodan return alg->setkey ? 0 : ahash_set_sh_desc(ahash); 1918045e3678SYuan Kang } 1919045e3678SYuan Kang 1920045e3678SYuan Kang static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1921045e3678SYuan Kang { 1922045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1923045e3678SYuan Kang 1924bbf22344SHoria Geantă dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 192512b8567fSIuliana Prodan offsetof(struct caam_hash_ctx, key), 19267e0880b9SHoria Geantă ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 192712b8567fSIuliana Prodan if (is_xcbc_aes(ctx->adata.algtype)) 192812b8567fSIuliana Prodan dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 192912b8567fSIuliana Prodan ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, 193012b8567fSIuliana Prodan DMA_ATTR_SKIP_CPU_SYNC); 1931cfc6f11bSRuchika Gupta caam_jr_free(ctx->jrdev); 1932045e3678SYuan Kang } 1933045e3678SYuan Kang 1934045e3678SYuan Kang static void __exit caam_algapi_hash_exit(void) 1935045e3678SYuan Kang { 1936045e3678SYuan Kang struct caam_hash_alg *t_alg, *n; 1937045e3678SYuan Kang 1938cfc6f11bSRuchika Gupta if (!hash_list.next) 1939045e3678SYuan Kang return; 1940045e3678SYuan Kang 1941cfc6f11bSRuchika Gupta list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1942045e3678SYuan Kang crypto_unregister_ahash(&t_alg->ahash_alg); 1943045e3678SYuan Kang list_del(&t_alg->entry); 1944045e3678SYuan Kang kfree(t_alg); 1945045e3678SYuan Kang } 1946045e3678SYuan Kang } 1947045e3678SYuan Kang 1948045e3678SYuan Kang static struct caam_hash_alg * 1949cfc6f11bSRuchika Gupta caam_hash_alloc(struct caam_hash_template *template, 1950b0e09baeSYuan Kang bool keyed) 1951045e3678SYuan Kang { 1952045e3678SYuan Kang struct caam_hash_alg *t_alg; 1953045e3678SYuan Kang struct ahash_alg *halg; 1954045e3678SYuan Kang struct crypto_alg *alg; 1955045e3678SYuan Kang 19569c4f9733SFabio Estevam t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1957045e3678SYuan Kang if (!t_alg) { 1958cfc6f11bSRuchika Gupta pr_err("failed to allocate t_alg\n"); 1959045e3678SYuan Kang return ERR_PTR(-ENOMEM); 1960045e3678SYuan Kang } 1961045e3678SYuan Kang 1962045e3678SYuan Kang t_alg->ahash_alg = template->template_ahash; 1963045e3678SYuan Kang halg = &t_alg->ahash_alg; 1964045e3678SYuan Kang alg = &halg->halg.base; 1965045e3678SYuan Kang 1966b0e09baeSYuan Kang if (keyed) { 1967b0e09baeSYuan Kang snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1968b0e09baeSYuan Kang template->hmac_name); 1969b0e09baeSYuan Kang snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1970b0e09baeSYuan Kang template->hmac_driver_name); 1971b0e09baeSYuan Kang } else { 1972b0e09baeSYuan Kang snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1973b0e09baeSYuan Kang template->name); 1974045e3678SYuan Kang snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1975045e3678SYuan Kang template->driver_name); 1976a0118c8bSRussell King t_alg->ahash_alg.setkey = NULL; 1977b0e09baeSYuan Kang } 1978045e3678SYuan Kang alg->cra_module = THIS_MODULE; 1979045e3678SYuan Kang alg->cra_init = caam_hash_cra_init; 1980045e3678SYuan Kang alg->cra_exit = caam_hash_cra_exit; 1981045e3678SYuan Kang alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1982045e3678SYuan Kang alg->cra_priority = CAAM_CRA_PRIORITY; 1983045e3678SYuan Kang alg->cra_blocksize = template->blocksize; 1984045e3678SYuan Kang alg->cra_alignmask = 0; 19856a38f622SEric Biggers alg->cra_flags = CRYPTO_ALG_ASYNC; 1986045e3678SYuan Kang 1987045e3678SYuan Kang t_alg->alg_type = template->alg_type; 1988045e3678SYuan Kang 1989045e3678SYuan Kang return t_alg; 1990045e3678SYuan Kang } 1991045e3678SYuan Kang 1992045e3678SYuan Kang static int __init caam_algapi_hash_init(void) 1993045e3678SYuan Kang { 199435af6403SRuchika Gupta struct device_node *dev_node; 199535af6403SRuchika Gupta struct platform_device *pdev; 199635af6403SRuchika Gupta struct device *ctrldev; 1997045e3678SYuan Kang int i = 0, err = 0; 1998bf83490eSVictoria Milhoan struct caam_drv_private *priv; 1999bf83490eSVictoria Milhoan unsigned int md_limit = SHA512_DIGEST_SIZE; 2000d239b10dSHoria Geantă u32 md_inst, md_vid; 2001045e3678SYuan Kang 200235af6403SRuchika Gupta dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 200335af6403SRuchika Gupta if (!dev_node) { 200435af6403SRuchika Gupta dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 200535af6403SRuchika Gupta if (!dev_node) 200635af6403SRuchika Gupta return -ENODEV; 200735af6403SRuchika Gupta } 200835af6403SRuchika Gupta 200935af6403SRuchika Gupta pdev = of_find_device_by_node(dev_node); 201035af6403SRuchika Gupta if (!pdev) { 201135af6403SRuchika Gupta of_node_put(dev_node); 201235af6403SRuchika Gupta return -ENODEV; 201335af6403SRuchika Gupta } 201435af6403SRuchika Gupta 201535af6403SRuchika Gupta ctrldev = &pdev->dev; 201635af6403SRuchika Gupta priv = dev_get_drvdata(ctrldev); 201735af6403SRuchika Gupta of_node_put(dev_node); 201835af6403SRuchika Gupta 201935af6403SRuchika Gupta /* 202035af6403SRuchika Gupta * If priv is NULL, it's probably because the caam driver wasn't 202135af6403SRuchika Gupta * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 202235af6403SRuchika Gupta */ 202335af6403SRuchika Gupta if (!priv) 202435af6403SRuchika Gupta return -ENODEV; 202535af6403SRuchika Gupta 2026bf83490eSVictoria Milhoan /* 2027bf83490eSVictoria Milhoan * Register crypto algorithms the device supports. First, identify 2028bf83490eSVictoria Milhoan * presence and attributes of MD block. 2029bf83490eSVictoria Milhoan */ 2030d239b10dSHoria Geantă if (priv->era < 10) { 2031d239b10dSHoria Geantă md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & 2032d239b10dSHoria Geantă CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2033d239b10dSHoria Geantă md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 2034d239b10dSHoria Geantă CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2035d239b10dSHoria Geantă } else { 2036d239b10dSHoria Geantă u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2037d239b10dSHoria Geantă 2038d239b10dSHoria Geantă md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2039d239b10dSHoria Geantă md_inst = mdha & CHA_VER_NUM_MASK; 2040d239b10dSHoria Geantă } 2041bf83490eSVictoria Milhoan 2042bf83490eSVictoria Milhoan /* 2043bf83490eSVictoria Milhoan * Skip registration of any hashing algorithms if MD block 2044bf83490eSVictoria Milhoan * is not present. 2045bf83490eSVictoria Milhoan */ 2046d239b10dSHoria Geantă if (!md_inst) 2047bf83490eSVictoria Milhoan return -ENODEV; 2048bf83490eSVictoria Milhoan 2049bf83490eSVictoria Milhoan /* Limit digest size based on LP256 */ 2050d239b10dSHoria Geantă if (md_vid == CHA_VER_VID_MD_LP256) 2051bf83490eSVictoria Milhoan md_limit = SHA256_DIGEST_SIZE; 2052bf83490eSVictoria Milhoan 2053cfc6f11bSRuchika Gupta INIT_LIST_HEAD(&hash_list); 2054045e3678SYuan Kang 2055045e3678SYuan Kang /* register crypto algorithms the device supports */ 2056045e3678SYuan Kang for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 2057045e3678SYuan Kang struct caam_hash_alg *t_alg; 2058bf83490eSVictoria Milhoan struct caam_hash_template *alg = driver_hash + i; 2059bf83490eSVictoria Milhoan 2060bf83490eSVictoria Milhoan /* If MD size is not supported by device, skip registration */ 206112b8567fSIuliana Prodan if (is_mdha(alg->alg_type) && 206212b8567fSIuliana Prodan alg->template_ahash.halg.digestsize > md_limit) 2063bf83490eSVictoria Milhoan continue; 2064045e3678SYuan Kang 2065b0e09baeSYuan Kang /* register hmac version */ 2066bf83490eSVictoria Milhoan t_alg = caam_hash_alloc(alg, true); 2067b0e09baeSYuan Kang if (IS_ERR(t_alg)) { 2068b0e09baeSYuan Kang err = PTR_ERR(t_alg); 20690f103b37SIuliana Prodan pr_warn("%s alg allocation failed\n", 20700f103b37SIuliana Prodan alg->hmac_driver_name); 2071b0e09baeSYuan Kang continue; 2072b0e09baeSYuan Kang } 2073b0e09baeSYuan Kang 2074b0e09baeSYuan Kang err = crypto_register_ahash(&t_alg->ahash_alg); 2075b0e09baeSYuan Kang if (err) { 20766ea30f0aSRussell King pr_warn("%s alg registration failed: %d\n", 20776ea30f0aSRussell King t_alg->ahash_alg.halg.base.cra_driver_name, 20786ea30f0aSRussell King err); 2079b0e09baeSYuan Kang kfree(t_alg); 2080b0e09baeSYuan Kang } else 2081cfc6f11bSRuchika Gupta list_add_tail(&t_alg->entry, &hash_list); 2082b0e09baeSYuan Kang 208312b8567fSIuliana Prodan if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) 208412b8567fSIuliana Prodan continue; 208512b8567fSIuliana Prodan 2086b0e09baeSYuan Kang /* register unkeyed version */ 2087bf83490eSVictoria Milhoan t_alg = caam_hash_alloc(alg, false); 2088045e3678SYuan Kang if (IS_ERR(t_alg)) { 2089045e3678SYuan Kang err = PTR_ERR(t_alg); 2090bf83490eSVictoria Milhoan pr_warn("%s alg allocation failed\n", alg->driver_name); 2091045e3678SYuan Kang continue; 2092045e3678SYuan Kang } 2093045e3678SYuan Kang 2094045e3678SYuan Kang err = crypto_register_ahash(&t_alg->ahash_alg); 2095045e3678SYuan Kang if (err) { 20966ea30f0aSRussell King pr_warn("%s alg registration failed: %d\n", 20976ea30f0aSRussell King t_alg->ahash_alg.halg.base.cra_driver_name, 20986ea30f0aSRussell King err); 2099045e3678SYuan Kang kfree(t_alg); 2100045e3678SYuan Kang } else 2101cfc6f11bSRuchika Gupta list_add_tail(&t_alg->entry, &hash_list); 2102045e3678SYuan Kang } 2103045e3678SYuan Kang 2104045e3678SYuan Kang return err; 2105045e3678SYuan Kang } 2106045e3678SYuan Kang 2107045e3678SYuan Kang module_init(caam_algapi_hash_init); 2108045e3678SYuan Kang module_exit(caam_algapi_hash_exit); 2109045e3678SYuan Kang 2110045e3678SYuan Kang MODULE_LICENSE("GPL"); 2111045e3678SYuan Kang MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 2112045e3678SYuan Kang MODULE_AUTHOR("Freescale Semiconductor - NMG"); 2113