1045e3678SYuan Kang /* 2045e3678SYuan Kang * caam - Freescale FSL CAAM support for ahash functions of crypto API 3045e3678SYuan Kang * 4045e3678SYuan Kang * Copyright 2011 Freescale Semiconductor, Inc. 5045e3678SYuan Kang * 6045e3678SYuan Kang * Based on caamalg.c crypto API driver. 7045e3678SYuan Kang * 8045e3678SYuan Kang * relationship of digest job descriptor or first job descriptor after init to 9045e3678SYuan Kang * shared descriptors: 10045e3678SYuan Kang * 11045e3678SYuan Kang * --------------- --------------- 12045e3678SYuan Kang * | JobDesc #1 |-------------------->| ShareDesc | 13045e3678SYuan Kang * | *(packet 1) | | (hashKey) | 14045e3678SYuan Kang * --------------- | (operation) | 15045e3678SYuan Kang * --------------- 16045e3678SYuan Kang * 17045e3678SYuan Kang * relationship of subsequent job descriptors to shared descriptors: 18045e3678SYuan Kang * 19045e3678SYuan Kang * --------------- --------------- 20045e3678SYuan Kang * | JobDesc #2 |-------------------->| ShareDesc | 21045e3678SYuan Kang * | *(packet 2) | |------------->| (hashKey) | 22045e3678SYuan Kang * --------------- | |-------->| (operation) | 23045e3678SYuan Kang * . | | | (load ctx2) | 24045e3678SYuan Kang * . | | --------------- 25045e3678SYuan Kang * --------------- | | 26045e3678SYuan Kang * | JobDesc #3 |------| | 27045e3678SYuan Kang * | *(packet 3) | | 28045e3678SYuan Kang * --------------- | 29045e3678SYuan Kang * . | 30045e3678SYuan Kang * . | 31045e3678SYuan Kang * --------------- | 32045e3678SYuan Kang * | JobDesc #4 |------------ 33045e3678SYuan Kang * | *(packet 4) | 34045e3678SYuan Kang * --------------- 35045e3678SYuan Kang * 36045e3678SYuan Kang * The SharedDesc never changes for a connection unless rekeyed, but 37045e3678SYuan Kang * each packet will likely be in a different place. So all we need 38045e3678SYuan Kang * to know to process the packet is where the input is, where the 39045e3678SYuan Kang * output goes, and what context we want to process with. Context is 40045e3678SYuan Kang * in the SharedDesc, packet references in the JobDesc. 41045e3678SYuan Kang * 42045e3678SYuan Kang * So, a job desc looks like: 43045e3678SYuan Kang * 44045e3678SYuan Kang * --------------------- 45045e3678SYuan Kang * | Header | 46045e3678SYuan Kang * | ShareDesc Pointer | 47045e3678SYuan Kang * | SEQ_OUT_PTR | 48045e3678SYuan Kang * | (output buffer) | 49045e3678SYuan Kang * | (output length) | 50045e3678SYuan Kang * | SEQ_IN_PTR | 51045e3678SYuan Kang * | (input buffer) | 52045e3678SYuan Kang * | (input length) | 53045e3678SYuan Kang * --------------------- 54045e3678SYuan Kang */ 55045e3678SYuan Kang 56045e3678SYuan Kang #include "compat.h" 57045e3678SYuan Kang 58045e3678SYuan Kang #include "regs.h" 59045e3678SYuan Kang #include "intern.h" 60045e3678SYuan Kang #include "desc_constr.h" 61045e3678SYuan Kang #include "jr.h" 62045e3678SYuan Kang #include "error.h" 63045e3678SYuan Kang #include "sg_sw_sec4.h" 64045e3678SYuan Kang #include "key_gen.h" 65*0efa7579SHoria Geantă #include "caamhash_desc.h" 66045e3678SYuan Kang 67045e3678SYuan Kang #define CAAM_CRA_PRIORITY 3000 68045e3678SYuan Kang 69045e3678SYuan Kang /* max hash key is max split key size */ 70045e3678SYuan Kang #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 71045e3678SYuan Kang 72045e3678SYuan Kang #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 73045e3678SYuan Kang #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 74045e3678SYuan Kang 75045e3678SYuan Kang #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 76045e3678SYuan Kang CAAM_MAX_HASH_KEY_SIZE) 77045e3678SYuan Kang #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 78045e3678SYuan Kang 79045e3678SYuan Kang /* caam context sizes for hashes: running digest + 8 */ 80045e3678SYuan Kang #define HASH_MSG_LEN 8 81045e3678SYuan Kang #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 82045e3678SYuan Kang 83045e3678SYuan Kang #ifdef DEBUG 84045e3678SYuan Kang /* for print_hex_dumps with line references */ 85045e3678SYuan Kang #define debug(format, arg...) printk(format, arg) 86045e3678SYuan Kang #else 87045e3678SYuan Kang #define debug(format, arg...) 88045e3678SYuan Kang #endif 89045e3678SYuan Kang 90cfc6f11bSRuchika Gupta 91cfc6f11bSRuchika Gupta static struct list_head hash_list; 92cfc6f11bSRuchika Gupta 93045e3678SYuan Kang /* ahash per-session context */ 94045e3678SYuan Kang struct caam_hash_ctx { 95e11793f5SRussell King u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 96e11793f5SRussell King u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 97e11793f5SRussell King u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 98e11793f5SRussell King u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 99e11793f5SRussell King dma_addr_t sh_desc_update_dma ____cacheline_aligned; 100045e3678SYuan Kang dma_addr_t sh_desc_update_first_dma; 101045e3678SYuan Kang dma_addr_t sh_desc_fin_dma; 102045e3678SYuan Kang dma_addr_t sh_desc_digest_dma; 1037e0880b9SHoria Geantă enum dma_data_direction dir; 104e11793f5SRussell King struct device *jrdev; 105045e3678SYuan Kang u8 key[CAAM_MAX_HASH_KEY_SIZE]; 106045e3678SYuan Kang int ctx_len; 107db57656bSHoria Geantă struct alginfo adata; 108045e3678SYuan Kang }; 109045e3678SYuan Kang 110045e3678SYuan Kang /* ahash state */ 111045e3678SYuan Kang struct caam_hash_state { 112045e3678SYuan Kang dma_addr_t buf_dma; 113045e3678SYuan Kang dma_addr_t ctx_dma; 114045e3678SYuan Kang u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 115045e3678SYuan Kang int buflen_0; 116045e3678SYuan Kang u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 117045e3678SYuan Kang int buflen_1; 118e7472422SVictoria Milhoan u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 119045e3678SYuan Kang int (*update)(struct ahash_request *req); 120045e3678SYuan Kang int (*final)(struct ahash_request *req); 121045e3678SYuan Kang int (*finup)(struct ahash_request *req); 122045e3678SYuan Kang int current_buf; 123045e3678SYuan Kang }; 124045e3678SYuan Kang 1255ec90831SRussell King struct caam_export_state { 1265ec90831SRussell King u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 1275ec90831SRussell King u8 caam_ctx[MAX_CTX_LEN]; 1285ec90831SRussell King int buflen; 1295ec90831SRussell King int (*update)(struct ahash_request *req); 1305ec90831SRussell King int (*final)(struct ahash_request *req); 1315ec90831SRussell King int (*finup)(struct ahash_request *req); 1325ec90831SRussell King }; 1335ec90831SRussell King 1340355d23dSHoria Geantă static inline void switch_buf(struct caam_hash_state *state) 1350355d23dSHoria Geantă { 1360355d23dSHoria Geantă state->current_buf ^= 1; 1370355d23dSHoria Geantă } 1380355d23dSHoria Geantă 1390355d23dSHoria Geantă static inline u8 *current_buf(struct caam_hash_state *state) 1400355d23dSHoria Geantă { 1410355d23dSHoria Geantă return state->current_buf ? state->buf_1 : state->buf_0; 1420355d23dSHoria Geantă } 1430355d23dSHoria Geantă 1440355d23dSHoria Geantă static inline u8 *alt_buf(struct caam_hash_state *state) 1450355d23dSHoria Geantă { 1460355d23dSHoria Geantă return state->current_buf ? state->buf_0 : state->buf_1; 1470355d23dSHoria Geantă } 1480355d23dSHoria Geantă 1490355d23dSHoria Geantă static inline int *current_buflen(struct caam_hash_state *state) 1500355d23dSHoria Geantă { 1510355d23dSHoria Geantă return state->current_buf ? &state->buflen_1 : &state->buflen_0; 1520355d23dSHoria Geantă } 1530355d23dSHoria Geantă 1540355d23dSHoria Geantă static inline int *alt_buflen(struct caam_hash_state *state) 1550355d23dSHoria Geantă { 1560355d23dSHoria Geantă return state->current_buf ? &state->buflen_0 : &state->buflen_1; 1570355d23dSHoria Geantă } 1580355d23dSHoria Geantă 159045e3678SYuan Kang /* Common job descriptor seq in/out ptr routines */ 160045e3678SYuan Kang 161045e3678SYuan Kang /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 162ce572085SHoria Geanta static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 163045e3678SYuan Kang struct caam_hash_state *state, 164045e3678SYuan Kang int ctx_len) 165045e3678SYuan Kang { 166045e3678SYuan Kang state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 167045e3678SYuan Kang ctx_len, DMA_FROM_DEVICE); 168ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->ctx_dma)) { 169ce572085SHoria Geanta dev_err(jrdev, "unable to map ctx\n"); 17087ec02e7SHoria Geantă state->ctx_dma = 0; 171ce572085SHoria Geanta return -ENOMEM; 172ce572085SHoria Geanta } 173ce572085SHoria Geanta 174045e3678SYuan Kang append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 175ce572085SHoria Geanta 176ce572085SHoria Geanta return 0; 177045e3678SYuan Kang } 178045e3678SYuan Kang 179045e3678SYuan Kang /* Map req->result, and append seq_out_ptr command that points to it */ 180045e3678SYuan Kang static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 181045e3678SYuan Kang u8 *result, int digestsize) 182045e3678SYuan Kang { 183045e3678SYuan Kang dma_addr_t dst_dma; 184045e3678SYuan Kang 185045e3678SYuan Kang dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 186045e3678SYuan Kang append_seq_out_ptr(desc, dst_dma, digestsize, 0); 187045e3678SYuan Kang 188045e3678SYuan Kang return dst_dma; 189045e3678SYuan Kang } 190045e3678SYuan Kang 191944c3d4dSHoria Geantă /* Map current buffer in state (if length > 0) and put it in link table */ 192944c3d4dSHoria Geantă static inline int buf_map_to_sec4_sg(struct device *jrdev, 193045e3678SYuan Kang struct sec4_sg_entry *sec4_sg, 194944c3d4dSHoria Geantă struct caam_hash_state *state) 195045e3678SYuan Kang { 196944c3d4dSHoria Geantă int buflen = *current_buflen(state); 197045e3678SYuan Kang 198944c3d4dSHoria Geantă if (!buflen) 199944c3d4dSHoria Geantă return 0; 200045e3678SYuan Kang 201944c3d4dSHoria Geantă state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, 202944c3d4dSHoria Geantă DMA_TO_DEVICE); 203944c3d4dSHoria Geantă if (dma_mapping_error(jrdev, state->buf_dma)) { 204944c3d4dSHoria Geantă dev_err(jrdev, "unable to map buf\n"); 205944c3d4dSHoria Geantă state->buf_dma = 0; 206944c3d4dSHoria Geantă return -ENOMEM; 207045e3678SYuan Kang } 208045e3678SYuan Kang 209944c3d4dSHoria Geantă dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 210045e3678SYuan Kang 211944c3d4dSHoria Geantă return 0; 212045e3678SYuan Kang } 213045e3678SYuan Kang 214045e3678SYuan Kang /* Map state->caam_ctx, and add it to link table */ 215dfcd8393SHoria Geantă static inline int ctx_map_to_sec4_sg(struct device *jrdev, 216ce572085SHoria Geanta struct caam_hash_state *state, int ctx_len, 217ce572085SHoria Geanta struct sec4_sg_entry *sec4_sg, u32 flag) 218045e3678SYuan Kang { 219045e3678SYuan Kang state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 220ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->ctx_dma)) { 221ce572085SHoria Geanta dev_err(jrdev, "unable to map ctx\n"); 22287ec02e7SHoria Geantă state->ctx_dma = 0; 223ce572085SHoria Geanta return -ENOMEM; 224ce572085SHoria Geanta } 225ce572085SHoria Geanta 226045e3678SYuan Kang dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 227ce572085SHoria Geanta 228ce572085SHoria Geanta return 0; 229045e3678SYuan Kang } 230045e3678SYuan Kang 231045e3678SYuan Kang static int ahash_set_sh_desc(struct crypto_ahash *ahash) 232045e3678SYuan Kang { 233045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 234045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 235045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 2367e0880b9SHoria Geantă struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 237045e3678SYuan Kang u32 *desc; 238045e3678SYuan Kang 2397e0880b9SHoria Geantă ctx->adata.key_virt = ctx->key; 2407e0880b9SHoria Geantă 241045e3678SYuan Kang /* ahash_update shared descriptor */ 242045e3678SYuan Kang desc = ctx->sh_desc_update; 243*0efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 244*0efa7579SHoria Geantă ctx->ctx_len, true, ctrlpriv->era); 245bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 2467e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 247045e3678SYuan Kang #ifdef DEBUG 248514df281SAlex Porosanu print_hex_dump(KERN_ERR, 249514df281SAlex Porosanu "ahash update shdesc@"__stringify(__LINE__)": ", 250045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 251045e3678SYuan Kang #endif 252045e3678SYuan Kang 253045e3678SYuan Kang /* ahash_update_first shared descriptor */ 254045e3678SYuan Kang desc = ctx->sh_desc_update_first; 255*0efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 256*0efa7579SHoria Geantă ctx->ctx_len, false, ctrlpriv->era); 257bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 2587e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 259045e3678SYuan Kang #ifdef DEBUG 260514df281SAlex Porosanu print_hex_dump(KERN_ERR, 261514df281SAlex Porosanu "ahash update first shdesc@"__stringify(__LINE__)": ", 262045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 263045e3678SYuan Kang #endif 264045e3678SYuan Kang 265045e3678SYuan Kang /* ahash_final shared descriptor */ 266045e3678SYuan Kang desc = ctx->sh_desc_fin; 267*0efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 268*0efa7579SHoria Geantă ctx->ctx_len, true, ctrlpriv->era); 269bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 2707e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 271045e3678SYuan Kang #ifdef DEBUG 272514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 273045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 274045e3678SYuan Kang desc_bytes(desc), 1); 275045e3678SYuan Kang #endif 276045e3678SYuan Kang 277045e3678SYuan Kang /* ahash_digest shared descriptor */ 278045e3678SYuan Kang desc = ctx->sh_desc_digest; 279*0efa7579SHoria Geantă cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 280*0efa7579SHoria Geantă ctx->ctx_len, false, ctrlpriv->era); 281bbf22344SHoria Geantă dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 2827e0880b9SHoria Geantă desc_bytes(desc), ctx->dir); 283045e3678SYuan Kang #ifdef DEBUG 284514df281SAlex Porosanu print_hex_dump(KERN_ERR, 285514df281SAlex Porosanu "ahash digest shdesc@"__stringify(__LINE__)": ", 286045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 287045e3678SYuan Kang desc_bytes(desc), 1); 288045e3678SYuan Kang #endif 289045e3678SYuan Kang 290045e3678SYuan Kang return 0; 291045e3678SYuan Kang } 292045e3678SYuan Kang 293045e3678SYuan Kang /* Digest hash size if it is too large */ 29466b3e887SKim Phillips static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 295045e3678SYuan Kang u32 *keylen, u8 *key_out, u32 digestsize) 296045e3678SYuan Kang { 297045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 298045e3678SYuan Kang u32 *desc; 299045e3678SYuan Kang struct split_key_result result; 300045e3678SYuan Kang dma_addr_t src_dma, dst_dma; 3019e6df0fdSMarkus Elfring int ret; 302045e3678SYuan Kang 3039c23b7d3SVakul Garg desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 3042af8f4a2SKim Phillips if (!desc) { 3052af8f4a2SKim Phillips dev_err(jrdev, "unable to allocate key input memory\n"); 3062af8f4a2SKim Phillips return -ENOMEM; 3072af8f4a2SKim Phillips } 308045e3678SYuan Kang 309045e3678SYuan Kang init_job_desc(desc, 0); 310045e3678SYuan Kang 311045e3678SYuan Kang src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 312045e3678SYuan Kang DMA_TO_DEVICE); 313045e3678SYuan Kang if (dma_mapping_error(jrdev, src_dma)) { 314045e3678SYuan Kang dev_err(jrdev, "unable to map key input memory\n"); 315045e3678SYuan Kang kfree(desc); 316045e3678SYuan Kang return -ENOMEM; 317045e3678SYuan Kang } 318045e3678SYuan Kang dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 319045e3678SYuan Kang DMA_FROM_DEVICE); 320045e3678SYuan Kang if (dma_mapping_error(jrdev, dst_dma)) { 321045e3678SYuan Kang dev_err(jrdev, "unable to map key output memory\n"); 322045e3678SYuan Kang dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 323045e3678SYuan Kang kfree(desc); 324045e3678SYuan Kang return -ENOMEM; 325045e3678SYuan Kang } 326045e3678SYuan Kang 327045e3678SYuan Kang /* Job descriptor to perform unkeyed hash on key_in */ 328db57656bSHoria Geantă append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 329045e3678SYuan Kang OP_ALG_AS_INITFINAL); 330045e3678SYuan Kang append_seq_in_ptr(desc, src_dma, *keylen, 0); 331045e3678SYuan Kang append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 332045e3678SYuan Kang FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 333045e3678SYuan Kang append_seq_out_ptr(desc, dst_dma, digestsize, 0); 334045e3678SYuan Kang append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 335045e3678SYuan Kang LDST_SRCDST_BYTE_CONTEXT); 336045e3678SYuan Kang 337045e3678SYuan Kang #ifdef DEBUG 338514df281SAlex Porosanu print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 339045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 340514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 341045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 342045e3678SYuan Kang #endif 343045e3678SYuan Kang 344045e3678SYuan Kang result.err = 0; 345045e3678SYuan Kang init_completion(&result.completion); 346045e3678SYuan Kang 347045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 348045e3678SYuan Kang if (!ret) { 349045e3678SYuan Kang /* in progress */ 3507459e1d2SHoria Geantă wait_for_completion(&result.completion); 351045e3678SYuan Kang ret = result.err; 352045e3678SYuan Kang #ifdef DEBUG 353514df281SAlex Porosanu print_hex_dump(KERN_ERR, 354514df281SAlex Porosanu "digested key@"__stringify(__LINE__)": ", 355045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, key_in, 356045e3678SYuan Kang digestsize, 1); 357045e3678SYuan Kang #endif 358045e3678SYuan Kang } 359045e3678SYuan Kang dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 360045e3678SYuan Kang dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 361045e3678SYuan Kang 362e11aa9f1SHoria Geanta *keylen = digestsize; 363e11aa9f1SHoria Geanta 364045e3678SYuan Kang kfree(desc); 365045e3678SYuan Kang 366045e3678SYuan Kang return ret; 367045e3678SYuan Kang } 368045e3678SYuan Kang 369045e3678SYuan Kang static int ahash_setkey(struct crypto_ahash *ahash, 370045e3678SYuan Kang const u8 *key, unsigned int keylen) 371045e3678SYuan Kang { 372045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 373045e3678SYuan Kang int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 374045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 3757e0880b9SHoria Geantă struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 3769e6df0fdSMarkus Elfring int ret; 377045e3678SYuan Kang u8 *hashed_key = NULL; 378045e3678SYuan Kang 379045e3678SYuan Kang #ifdef DEBUG 380045e3678SYuan Kang printk(KERN_ERR "keylen %d\n", keylen); 381045e3678SYuan Kang #endif 382045e3678SYuan Kang 383045e3678SYuan Kang if (keylen > blocksize) { 384e7a33c4dSMarkus Elfring hashed_key = kmalloc_array(digestsize, 385e7a33c4dSMarkus Elfring sizeof(*hashed_key), 386e7a33c4dSMarkus Elfring GFP_KERNEL | GFP_DMA); 387045e3678SYuan Kang if (!hashed_key) 388045e3678SYuan Kang return -ENOMEM; 389045e3678SYuan Kang ret = hash_digest_key(ctx, key, &keylen, hashed_key, 390045e3678SYuan Kang digestsize); 391045e3678SYuan Kang if (ret) 392d6e7a7d0SMarkus Elfring goto bad_free_key; 393045e3678SYuan Kang key = hashed_key; 394045e3678SYuan Kang } 395045e3678SYuan Kang 3967e0880b9SHoria Geantă /* 3977e0880b9SHoria Geantă * If DKP is supported, use it in the shared descriptor to generate 3987e0880b9SHoria Geantă * the split key. 3997e0880b9SHoria Geantă */ 4007e0880b9SHoria Geantă if (ctrlpriv->era >= 6) { 4017e0880b9SHoria Geantă ctx->adata.key_inline = true; 4027e0880b9SHoria Geantă ctx->adata.keylen = keylen; 4037e0880b9SHoria Geantă ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 4047e0880b9SHoria Geantă OP_ALG_ALGSEL_MASK); 4057e0880b9SHoria Geantă 4067e0880b9SHoria Geantă if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 407d6e7a7d0SMarkus Elfring goto bad_free_key; 408045e3678SYuan Kang 4097e0880b9SHoria Geantă memcpy(ctx->key, key, keylen); 4107e0880b9SHoria Geantă } else { 4117e0880b9SHoria Geantă ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 4127e0880b9SHoria Geantă keylen, CAAM_MAX_HASH_KEY_SIZE); 4137e0880b9SHoria Geantă if (ret) 4147e0880b9SHoria Geantă goto bad_free_key; 4157e0880b9SHoria Geantă } 416045e3678SYuan Kang 417045e3678SYuan Kang kfree(hashed_key); 418cfb725f6SHoria Geantă return ahash_set_sh_desc(ahash); 419d6e7a7d0SMarkus Elfring bad_free_key: 420045e3678SYuan Kang kfree(hashed_key); 421045e3678SYuan Kang crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 422045e3678SYuan Kang return -EINVAL; 423045e3678SYuan Kang } 424045e3678SYuan Kang 425045e3678SYuan Kang /* 426045e3678SYuan Kang * ahash_edesc - s/w-extended ahash descriptor 427045e3678SYuan Kang * @dst_dma: physical mapped address of req->result 428045e3678SYuan Kang * @sec4_sg_dma: physical mapped address of h/w link table 429045e3678SYuan Kang * @src_nents: number of segments in input scatterlist 430045e3678SYuan Kang * @sec4_sg_bytes: length of dma mapped sec4_sg space 431045e3678SYuan Kang * @hw_desc: the h/w job descriptor followed by any referenced link tables 432343e44b1SRussell King * @sec4_sg: h/w link table 433045e3678SYuan Kang */ 434045e3678SYuan Kang struct ahash_edesc { 435045e3678SYuan Kang dma_addr_t dst_dma; 436045e3678SYuan Kang dma_addr_t sec4_sg_dma; 437045e3678SYuan Kang int src_nents; 438045e3678SYuan Kang int sec4_sg_bytes; 439d7b24ed4SRussell King u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 440343e44b1SRussell King struct sec4_sg_entry sec4_sg[0]; 441045e3678SYuan Kang }; 442045e3678SYuan Kang 443045e3678SYuan Kang static inline void ahash_unmap(struct device *dev, 444045e3678SYuan Kang struct ahash_edesc *edesc, 445045e3678SYuan Kang struct ahash_request *req, int dst_len) 446045e3678SYuan Kang { 447944c3d4dSHoria Geantă struct caam_hash_state *state = ahash_request_ctx(req); 448944c3d4dSHoria Geantă 449045e3678SYuan Kang if (edesc->src_nents) 45013fb8fd7SLABBE Corentin dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 451045e3678SYuan Kang if (edesc->dst_dma) 452045e3678SYuan Kang dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 453045e3678SYuan Kang 454045e3678SYuan Kang if (edesc->sec4_sg_bytes) 455045e3678SYuan Kang dma_unmap_single(dev, edesc->sec4_sg_dma, 456045e3678SYuan Kang edesc->sec4_sg_bytes, DMA_TO_DEVICE); 457944c3d4dSHoria Geantă 458944c3d4dSHoria Geantă if (state->buf_dma) { 459944c3d4dSHoria Geantă dma_unmap_single(dev, state->buf_dma, *current_buflen(state), 460944c3d4dSHoria Geantă DMA_TO_DEVICE); 461944c3d4dSHoria Geantă state->buf_dma = 0; 462944c3d4dSHoria Geantă } 463045e3678SYuan Kang } 464045e3678SYuan Kang 465045e3678SYuan Kang static inline void ahash_unmap_ctx(struct device *dev, 466045e3678SYuan Kang struct ahash_edesc *edesc, 467045e3678SYuan Kang struct ahash_request *req, int dst_len, u32 flag) 468045e3678SYuan Kang { 469045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 470045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 471045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 472045e3678SYuan Kang 47387ec02e7SHoria Geantă if (state->ctx_dma) { 474045e3678SYuan Kang dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 47587ec02e7SHoria Geantă state->ctx_dma = 0; 47687ec02e7SHoria Geantă } 477045e3678SYuan Kang ahash_unmap(dev, edesc, req, dst_len); 478045e3678SYuan Kang } 479045e3678SYuan Kang 480045e3678SYuan Kang static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 481045e3678SYuan Kang void *context) 482045e3678SYuan Kang { 483045e3678SYuan Kang struct ahash_request *req = context; 484045e3678SYuan Kang struct ahash_edesc *edesc; 485045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 486045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 487045e3678SYuan Kang #ifdef DEBUG 488045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 489045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 490045e3678SYuan Kang 491045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 492045e3678SYuan Kang #endif 493045e3678SYuan Kang 4944ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 495fa9659cdSMarek Vasut if (err) 496fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 497045e3678SYuan Kang 498045e3678SYuan Kang ahash_unmap(jrdev, edesc, req, digestsize); 499045e3678SYuan Kang kfree(edesc); 500045e3678SYuan Kang 501045e3678SYuan Kang #ifdef DEBUG 502514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 503045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 504045e3678SYuan Kang ctx->ctx_len, 1); 505045e3678SYuan Kang if (req->result) 506514df281SAlex Porosanu print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 507045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->result, 508045e3678SYuan Kang digestsize, 1); 509045e3678SYuan Kang #endif 510045e3678SYuan Kang 511045e3678SYuan Kang req->base.complete(&req->base, err); 512045e3678SYuan Kang } 513045e3678SYuan Kang 514045e3678SYuan Kang static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 515045e3678SYuan Kang void *context) 516045e3678SYuan Kang { 517045e3678SYuan Kang struct ahash_request *req = context; 518045e3678SYuan Kang struct ahash_edesc *edesc; 519045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 520045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 521045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 522944c3d4dSHoria Geantă #ifdef DEBUG 523045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 524045e3678SYuan Kang 525045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 526045e3678SYuan Kang #endif 527045e3678SYuan Kang 5284ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 529fa9659cdSMarek Vasut if (err) 530fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 531045e3678SYuan Kang 532045e3678SYuan Kang ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 533944c3d4dSHoria Geantă switch_buf(state); 534045e3678SYuan Kang kfree(edesc); 535045e3678SYuan Kang 536045e3678SYuan Kang #ifdef DEBUG 537514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 538045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 539045e3678SYuan Kang ctx->ctx_len, 1); 540045e3678SYuan Kang if (req->result) 541514df281SAlex Porosanu print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 542045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->result, 543045e3678SYuan Kang digestsize, 1); 544045e3678SYuan Kang #endif 545045e3678SYuan Kang 546045e3678SYuan Kang req->base.complete(&req->base, err); 547045e3678SYuan Kang } 548045e3678SYuan Kang 549045e3678SYuan Kang static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 550045e3678SYuan Kang void *context) 551045e3678SYuan Kang { 552045e3678SYuan Kang struct ahash_request *req = context; 553045e3678SYuan Kang struct ahash_edesc *edesc; 554045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 555045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 556045e3678SYuan Kang #ifdef DEBUG 557045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 558045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 559045e3678SYuan Kang 560045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 561045e3678SYuan Kang #endif 562045e3678SYuan Kang 5634ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 564fa9659cdSMarek Vasut if (err) 565fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 566045e3678SYuan Kang 567bc9e05f9SHoria Geanta ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); 568045e3678SYuan Kang kfree(edesc); 569045e3678SYuan Kang 570045e3678SYuan Kang #ifdef DEBUG 571514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 572045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 573045e3678SYuan Kang ctx->ctx_len, 1); 574045e3678SYuan Kang if (req->result) 575514df281SAlex Porosanu print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 576045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->result, 577045e3678SYuan Kang digestsize, 1); 578045e3678SYuan Kang #endif 579045e3678SYuan Kang 580045e3678SYuan Kang req->base.complete(&req->base, err); 581045e3678SYuan Kang } 582045e3678SYuan Kang 583045e3678SYuan Kang static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 584045e3678SYuan Kang void *context) 585045e3678SYuan Kang { 586045e3678SYuan Kang struct ahash_request *req = context; 587045e3678SYuan Kang struct ahash_edesc *edesc; 588045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 589045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 590045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 591944c3d4dSHoria Geantă #ifdef DEBUG 592045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 593045e3678SYuan Kang 594045e3678SYuan Kang dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 595045e3678SYuan Kang #endif 596045e3678SYuan Kang 5974ca7c7d8SHoria Geantă edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 598fa9659cdSMarek Vasut if (err) 599fa9659cdSMarek Vasut caam_jr_strstatus(jrdev, err); 600045e3678SYuan Kang 601ef62b231SHoria Geanta ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 602944c3d4dSHoria Geantă switch_buf(state); 603045e3678SYuan Kang kfree(edesc); 604045e3678SYuan Kang 605045e3678SYuan Kang #ifdef DEBUG 606514df281SAlex Porosanu print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 607045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 608045e3678SYuan Kang ctx->ctx_len, 1); 609045e3678SYuan Kang if (req->result) 610514df281SAlex Porosanu print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 611045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, req->result, 612045e3678SYuan Kang digestsize, 1); 613045e3678SYuan Kang #endif 614045e3678SYuan Kang 615045e3678SYuan Kang req->base.complete(&req->base, err); 616045e3678SYuan Kang } 617045e3678SYuan Kang 6185588d039SRussell King /* 6195588d039SRussell King * Allocate an enhanced descriptor, which contains the hardware descriptor 6205588d039SRussell King * and space for hardware scatter table containing sg_num entries. 6215588d039SRussell King */ 6225588d039SRussell King static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 62330a43b44SRussell King int sg_num, u32 *sh_desc, 62430a43b44SRussell King dma_addr_t sh_desc_dma, 62530a43b44SRussell King gfp_t flags) 6265588d039SRussell King { 6275588d039SRussell King struct ahash_edesc *edesc; 6285588d039SRussell King unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 6295588d039SRussell King 6305588d039SRussell King edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 6315588d039SRussell King if (!edesc) { 6325588d039SRussell King dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 6335588d039SRussell King return NULL; 6345588d039SRussell King } 6355588d039SRussell King 63630a43b44SRussell King init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 63730a43b44SRussell King HDR_SHARE_DEFER | HDR_REVERSE); 63830a43b44SRussell King 6395588d039SRussell King return edesc; 6405588d039SRussell King } 6415588d039SRussell King 64265cf164aSRussell King static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 64365cf164aSRussell King struct ahash_edesc *edesc, 64465cf164aSRussell King struct ahash_request *req, int nents, 64565cf164aSRussell King unsigned int first_sg, 64665cf164aSRussell King unsigned int first_bytes, size_t to_hash) 64765cf164aSRussell King { 64865cf164aSRussell King dma_addr_t src_dma; 64965cf164aSRussell King u32 options; 65065cf164aSRussell King 65165cf164aSRussell King if (nents > 1 || first_sg) { 65265cf164aSRussell King struct sec4_sg_entry *sg = edesc->sec4_sg; 65365cf164aSRussell King unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 65465cf164aSRussell King 65565cf164aSRussell King sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 65665cf164aSRussell King 65765cf164aSRussell King src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 65865cf164aSRussell King if (dma_mapping_error(ctx->jrdev, src_dma)) { 65965cf164aSRussell King dev_err(ctx->jrdev, "unable to map S/G table\n"); 66065cf164aSRussell King return -ENOMEM; 66165cf164aSRussell King } 66265cf164aSRussell King 66365cf164aSRussell King edesc->sec4_sg_bytes = sgsize; 66465cf164aSRussell King edesc->sec4_sg_dma = src_dma; 66565cf164aSRussell King options = LDST_SGF; 66665cf164aSRussell King } else { 66765cf164aSRussell King src_dma = sg_dma_address(req->src); 66865cf164aSRussell King options = 0; 66965cf164aSRussell King } 67065cf164aSRussell King 67165cf164aSRussell King append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 67265cf164aSRussell King options); 67365cf164aSRussell King 67465cf164aSRussell King return 0; 67565cf164aSRussell King } 67665cf164aSRussell King 677045e3678SYuan Kang /* submit update job descriptor */ 678045e3678SYuan Kang static int ahash_update_ctx(struct ahash_request *req) 679045e3678SYuan Kang { 680045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 681045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 682045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 683045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 684019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 685019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 6860355d23dSHoria Geantă u8 *buf = current_buf(state); 6870355d23dSHoria Geantă int *buflen = current_buflen(state); 6880355d23dSHoria Geantă u8 *next_buf = alt_buf(state); 6890355d23dSHoria Geantă int *next_buflen = alt_buflen(state), last_buflen; 690045e3678SYuan Kang int in_len = *buflen + req->nbytes, to_hash; 69130a43b44SRussell King u32 *desc; 692bc13c69eSRussell King int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 693045e3678SYuan Kang struct ahash_edesc *edesc; 694045e3678SYuan Kang int ret = 0; 695045e3678SYuan Kang 696045e3678SYuan Kang last_buflen = *next_buflen; 697045e3678SYuan Kang *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 698045e3678SYuan Kang to_hash = in_len - *next_buflen; 699045e3678SYuan Kang 700045e3678SYuan Kang if (to_hash) { 70113fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, 70213fb8fd7SLABBE Corentin req->nbytes - (*next_buflen)); 703f9970c28SLABBE Corentin if (src_nents < 0) { 704f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 705f9970c28SLABBE Corentin return src_nents; 706f9970c28SLABBE Corentin } 707bc13c69eSRussell King 708bc13c69eSRussell King if (src_nents) { 709bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 710bc13c69eSRussell King DMA_TO_DEVICE); 711bc13c69eSRussell King if (!mapped_nents) { 712bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 713bc13c69eSRussell King return -ENOMEM; 714bc13c69eSRussell King } 715bc13c69eSRussell King } else { 716bc13c69eSRussell King mapped_nents = 0; 717bc13c69eSRussell King } 718bc13c69eSRussell King 719045e3678SYuan Kang sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 720bc13c69eSRussell King sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 721045e3678SYuan Kang sizeof(struct sec4_sg_entry); 722045e3678SYuan Kang 723045e3678SYuan Kang /* 724045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 725045e3678SYuan Kang * link tables 726045e3678SYuan Kang */ 7275588d039SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 72830a43b44SRussell King ctx->sh_desc_update, 72930a43b44SRussell King ctx->sh_desc_update_dma, flags); 730045e3678SYuan Kang if (!edesc) { 731bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 732045e3678SYuan Kang return -ENOMEM; 733045e3678SYuan Kang } 734045e3678SYuan Kang 735045e3678SYuan Kang edesc->src_nents = src_nents; 736045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 737045e3678SYuan Kang 738dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 739045e3678SYuan Kang edesc->sec4_sg, DMA_BIDIRECTIONAL); 740ce572085SHoria Geanta if (ret) 74158b0e5d0SMarkus Elfring goto unmap_ctx; 742045e3678SYuan Kang 743944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 744944c3d4dSHoria Geantă if (ret) 745944c3d4dSHoria Geantă goto unmap_ctx; 746045e3678SYuan Kang 747bc13c69eSRussell King if (mapped_nents) { 748bc13c69eSRussell King sg_to_sec4_sg_last(req->src, mapped_nents, 749bc13c69eSRussell King edesc->sec4_sg + sec4_sg_src_index, 750bc13c69eSRussell King 0); 7518af7b0f8SVictoria Milhoan if (*next_buflen) 752307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, 753307fd543SCristian Stoica to_hash - *buflen, 754307fd543SCristian Stoica *next_buflen, 0); 755045e3678SYuan Kang } else { 756297b9cebSHoria Geantă sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 757297b9cebSHoria Geantă 1); 758045e3678SYuan Kang } 759045e3678SYuan Kang 760045e3678SYuan Kang desc = edesc->hw_desc; 761045e3678SYuan Kang 7621da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 7631da2be33SRuchika Gupta sec4_sg_bytes, 7641da2be33SRuchika Gupta DMA_TO_DEVICE); 765ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 766ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 76732686d34SRussell King ret = -ENOMEM; 76858b0e5d0SMarkus Elfring goto unmap_ctx; 769ce572085SHoria Geanta } 7701da2be33SRuchika Gupta 771045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 772045e3678SYuan Kang to_hash, LDST_SGF); 773045e3678SYuan Kang 774045e3678SYuan Kang append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 775045e3678SYuan Kang 776045e3678SYuan Kang #ifdef DEBUG 777514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 778045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 779045e3678SYuan Kang desc_bytes(desc), 1); 780045e3678SYuan Kang #endif 781045e3678SYuan Kang 782045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 78332686d34SRussell King if (ret) 78458b0e5d0SMarkus Elfring goto unmap_ctx; 78532686d34SRussell King 786045e3678SYuan Kang ret = -EINPROGRESS; 787045e3678SYuan Kang } else if (*next_buflen) { 788307fd543SCristian Stoica scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 789307fd543SCristian Stoica req->nbytes, 0); 790045e3678SYuan Kang *buflen = *next_buflen; 791045e3678SYuan Kang *next_buflen = last_buflen; 792045e3678SYuan Kang } 793045e3678SYuan Kang #ifdef DEBUG 794514df281SAlex Porosanu print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 795045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 796514df281SAlex Porosanu print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 797045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 798045e3678SYuan Kang *next_buflen, 1); 799045e3678SYuan Kang #endif 800045e3678SYuan Kang 801045e3678SYuan Kang return ret; 80258b0e5d0SMarkus Elfring unmap_ctx: 80332686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 80432686d34SRussell King kfree(edesc); 80532686d34SRussell King return ret; 806045e3678SYuan Kang } 807045e3678SYuan Kang 808045e3678SYuan Kang static int ahash_final_ctx(struct ahash_request *req) 809045e3678SYuan Kang { 810045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 811045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 812045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 813045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 814019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 815019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 8160355d23dSHoria Geantă int buflen = *current_buflen(state); 81730a43b44SRussell King u32 *desc; 818b310c178SHoria Geant? int sec4_sg_bytes, sec4_sg_src_index; 819045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 820045e3678SYuan Kang struct ahash_edesc *edesc; 8219e6df0fdSMarkus Elfring int ret; 822045e3678SYuan Kang 823b310c178SHoria Geant? sec4_sg_src_index = 1 + (buflen ? 1 : 0); 824b310c178SHoria Geant? sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 825045e3678SYuan Kang 826045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 82730a43b44SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 82830a43b44SRussell King ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 82930a43b44SRussell King flags); 8305588d039SRussell King if (!edesc) 831045e3678SYuan Kang return -ENOMEM; 832045e3678SYuan Kang 833045e3678SYuan Kang desc = edesc->hw_desc; 834045e3678SYuan Kang 835045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 836045e3678SYuan Kang 837dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 838ce572085SHoria Geanta edesc->sec4_sg, DMA_TO_DEVICE); 839ce572085SHoria Geanta if (ret) 84058b0e5d0SMarkus Elfring goto unmap_ctx; 841045e3678SYuan Kang 842944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 843944c3d4dSHoria Geantă if (ret) 844944c3d4dSHoria Geantă goto unmap_ctx; 845944c3d4dSHoria Geantă 846297b9cebSHoria Geantă sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 847045e3678SYuan Kang 8481da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 8491da2be33SRuchika Gupta sec4_sg_bytes, DMA_TO_DEVICE); 850ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 851ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 85232686d34SRussell King ret = -ENOMEM; 85358b0e5d0SMarkus Elfring goto unmap_ctx; 854ce572085SHoria Geanta } 8551da2be33SRuchika Gupta 856045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 857045e3678SYuan Kang LDST_SGF); 858045e3678SYuan Kang 859045e3678SYuan Kang edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 860045e3678SYuan Kang digestsize); 861ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->dst_dma)) { 862ce572085SHoria Geanta dev_err(jrdev, "unable to map dst\n"); 86332686d34SRussell King ret = -ENOMEM; 86458b0e5d0SMarkus Elfring goto unmap_ctx; 865ce572085SHoria Geanta } 866045e3678SYuan Kang 867045e3678SYuan Kang #ifdef DEBUG 868514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 869045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 870045e3678SYuan Kang #endif 871045e3678SYuan Kang 872045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 87332686d34SRussell King if (ret) 87458b0e5d0SMarkus Elfring goto unmap_ctx; 87532686d34SRussell King 87632686d34SRussell King return -EINPROGRESS; 87758b0e5d0SMarkus Elfring unmap_ctx: 878045e3678SYuan Kang ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 879045e3678SYuan Kang kfree(edesc); 880045e3678SYuan Kang return ret; 881045e3678SYuan Kang } 882045e3678SYuan Kang 883045e3678SYuan Kang static int ahash_finup_ctx(struct ahash_request *req) 884045e3678SYuan Kang { 885045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 886045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 887045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 888045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 889019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 890019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 8910355d23dSHoria Geantă int buflen = *current_buflen(state); 89230a43b44SRussell King u32 *desc; 89365cf164aSRussell King int sec4_sg_src_index; 894bc13c69eSRussell King int src_nents, mapped_nents; 895045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 896045e3678SYuan Kang struct ahash_edesc *edesc; 8979e6df0fdSMarkus Elfring int ret; 898045e3678SYuan Kang 89913fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, req->nbytes); 900f9970c28SLABBE Corentin if (src_nents < 0) { 901f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 902f9970c28SLABBE Corentin return src_nents; 903f9970c28SLABBE Corentin } 904bc13c69eSRussell King 905bc13c69eSRussell King if (src_nents) { 906bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 907bc13c69eSRussell King DMA_TO_DEVICE); 908bc13c69eSRussell King if (!mapped_nents) { 909bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 910bc13c69eSRussell King return -ENOMEM; 911bc13c69eSRussell King } 912bc13c69eSRussell King } else { 913bc13c69eSRussell King mapped_nents = 0; 914bc13c69eSRussell King } 915bc13c69eSRussell King 916045e3678SYuan Kang sec4_sg_src_index = 1 + (buflen ? 1 : 0); 917045e3678SYuan Kang 918045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 9195588d039SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 9209a1a1c08SHoria Geantă ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 9215588d039SRussell King flags); 922045e3678SYuan Kang if (!edesc) { 923bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 924045e3678SYuan Kang return -ENOMEM; 925045e3678SYuan Kang } 926045e3678SYuan Kang 927045e3678SYuan Kang desc = edesc->hw_desc; 928045e3678SYuan Kang 929045e3678SYuan Kang edesc->src_nents = src_nents; 930045e3678SYuan Kang 931dfcd8393SHoria Geantă ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 932ce572085SHoria Geanta edesc->sec4_sg, DMA_TO_DEVICE); 933ce572085SHoria Geanta if (ret) 93458b0e5d0SMarkus Elfring goto unmap_ctx; 935045e3678SYuan Kang 936944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 937944c3d4dSHoria Geantă if (ret) 938944c3d4dSHoria Geantă goto unmap_ctx; 939045e3678SYuan Kang 94065cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 94165cf164aSRussell King sec4_sg_src_index, ctx->ctx_len + buflen, 94265cf164aSRussell King req->nbytes); 94365cf164aSRussell King if (ret) 94458b0e5d0SMarkus Elfring goto unmap_ctx; 945045e3678SYuan Kang 946045e3678SYuan Kang edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 947045e3678SYuan Kang digestsize); 948ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->dst_dma)) { 949ce572085SHoria Geanta dev_err(jrdev, "unable to map dst\n"); 95032686d34SRussell King ret = -ENOMEM; 95158b0e5d0SMarkus Elfring goto unmap_ctx; 952ce572085SHoria Geanta } 953045e3678SYuan Kang 954045e3678SYuan Kang #ifdef DEBUG 955514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 956045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 957045e3678SYuan Kang #endif 958045e3678SYuan Kang 959045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 96032686d34SRussell King if (ret) 96158b0e5d0SMarkus Elfring goto unmap_ctx; 96232686d34SRussell King 96332686d34SRussell King return -EINPROGRESS; 96458b0e5d0SMarkus Elfring unmap_ctx: 965045e3678SYuan Kang ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 966045e3678SYuan Kang kfree(edesc); 967045e3678SYuan Kang return ret; 968045e3678SYuan Kang } 969045e3678SYuan Kang 970045e3678SYuan Kang static int ahash_digest(struct ahash_request *req) 971045e3678SYuan Kang { 972045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 973045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 974944c3d4dSHoria Geantă struct caam_hash_state *state = ahash_request_ctx(req); 975045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 976019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 977019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 97830a43b44SRussell King u32 *desc; 979045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 98065cf164aSRussell King int src_nents, mapped_nents; 981045e3678SYuan Kang struct ahash_edesc *edesc; 9829e6df0fdSMarkus Elfring int ret; 983045e3678SYuan Kang 984944c3d4dSHoria Geantă state->buf_dma = 0; 985944c3d4dSHoria Geantă 9863d5a2db6SRussell King src_nents = sg_nents_for_len(req->src, req->nbytes); 987f9970c28SLABBE Corentin if (src_nents < 0) { 988f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 989f9970c28SLABBE Corentin return src_nents; 990f9970c28SLABBE Corentin } 991bc13c69eSRussell King 992bc13c69eSRussell King if (src_nents) { 993bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 994bc13c69eSRussell King DMA_TO_DEVICE); 995bc13c69eSRussell King if (!mapped_nents) { 996bc13c69eSRussell King dev_err(jrdev, "unable to map source for DMA\n"); 997bc13c69eSRussell King return -ENOMEM; 998bc13c69eSRussell King } 999bc13c69eSRussell King } else { 1000bc13c69eSRussell King mapped_nents = 0; 1001bc13c69eSRussell King } 1002bc13c69eSRussell King 1003045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 10045588d039SRussell King edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 100530a43b44SRussell King ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 10065588d039SRussell King flags); 1007045e3678SYuan Kang if (!edesc) { 1008bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1009045e3678SYuan Kang return -ENOMEM; 1010045e3678SYuan Kang } 1011343e44b1SRussell King 1012045e3678SYuan Kang edesc->src_nents = src_nents; 1013045e3678SYuan Kang 101465cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 101565cf164aSRussell King req->nbytes); 101665cf164aSRussell King if (ret) { 101732686d34SRussell King ahash_unmap(jrdev, edesc, req, digestsize); 101832686d34SRussell King kfree(edesc); 101965cf164aSRussell King return ret; 1020ce572085SHoria Geanta } 102165cf164aSRussell King 102265cf164aSRussell King desc = edesc->hw_desc; 1023045e3678SYuan Kang 1024045e3678SYuan Kang edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1025045e3678SYuan Kang digestsize); 1026ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1027ce572085SHoria Geanta dev_err(jrdev, "unable to map dst\n"); 102832686d34SRussell King ahash_unmap(jrdev, edesc, req, digestsize); 102932686d34SRussell King kfree(edesc); 1030ce572085SHoria Geanta return -ENOMEM; 1031ce572085SHoria Geanta } 1032045e3678SYuan Kang 1033045e3678SYuan Kang #ifdef DEBUG 1034514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1035045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1036045e3678SYuan Kang #endif 1037045e3678SYuan Kang 1038045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1039045e3678SYuan Kang if (!ret) { 1040045e3678SYuan Kang ret = -EINPROGRESS; 1041045e3678SYuan Kang } else { 1042045e3678SYuan Kang ahash_unmap(jrdev, edesc, req, digestsize); 1043045e3678SYuan Kang kfree(edesc); 1044045e3678SYuan Kang } 1045045e3678SYuan Kang 1046045e3678SYuan Kang return ret; 1047045e3678SYuan Kang } 1048045e3678SYuan Kang 1049045e3678SYuan Kang /* submit ahash final if it the first job descriptor */ 1050045e3678SYuan Kang static int ahash_final_no_ctx(struct ahash_request *req) 1051045e3678SYuan Kang { 1052045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1053045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1054045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1055045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1056019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1057019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 10580355d23dSHoria Geantă u8 *buf = current_buf(state); 10590355d23dSHoria Geantă int buflen = *current_buflen(state); 106030a43b44SRussell King u32 *desc; 1061045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1062045e3678SYuan Kang struct ahash_edesc *edesc; 10639e6df0fdSMarkus Elfring int ret; 1064045e3678SYuan Kang 1065045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 106630a43b44SRussell King edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 106730a43b44SRussell King ctx->sh_desc_digest_dma, flags); 10685588d039SRussell King if (!edesc) 1069045e3678SYuan Kang return -ENOMEM; 1070045e3678SYuan Kang 1071045e3678SYuan Kang desc = edesc->hw_desc; 1072045e3678SYuan Kang 1073045e3678SYuan Kang state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1074ce572085SHoria Geanta if (dma_mapping_error(jrdev, state->buf_dma)) { 1075ce572085SHoria Geanta dev_err(jrdev, "unable to map src\n"); 107606435f34SMarkus Elfring goto unmap; 1077ce572085SHoria Geanta } 1078045e3678SYuan Kang 1079045e3678SYuan Kang append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1080045e3678SYuan Kang 1081045e3678SYuan Kang edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1082045e3678SYuan Kang digestsize); 1083ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1084ce572085SHoria Geanta dev_err(jrdev, "unable to map dst\n"); 108506435f34SMarkus Elfring goto unmap; 1086ce572085SHoria Geanta } 1087045e3678SYuan Kang 1088045e3678SYuan Kang #ifdef DEBUG 1089514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1090045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1091045e3678SYuan Kang #endif 1092045e3678SYuan Kang 1093045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1094045e3678SYuan Kang if (!ret) { 1095045e3678SYuan Kang ret = -EINPROGRESS; 1096045e3678SYuan Kang } else { 1097045e3678SYuan Kang ahash_unmap(jrdev, edesc, req, digestsize); 1098045e3678SYuan Kang kfree(edesc); 1099045e3678SYuan Kang } 1100045e3678SYuan Kang 1101045e3678SYuan Kang return ret; 110206435f34SMarkus Elfring unmap: 110306435f34SMarkus Elfring ahash_unmap(jrdev, edesc, req, digestsize); 110406435f34SMarkus Elfring kfree(edesc); 110506435f34SMarkus Elfring return -ENOMEM; 110606435f34SMarkus Elfring 1107045e3678SYuan Kang } 1108045e3678SYuan Kang 1109045e3678SYuan Kang /* submit ahash update if it the first job descriptor after update */ 1110045e3678SYuan Kang static int ahash_update_no_ctx(struct ahash_request *req) 1111045e3678SYuan Kang { 1112045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1113045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1114045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1115045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1116019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1117019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 11180355d23dSHoria Geantă u8 *buf = current_buf(state); 11190355d23dSHoria Geantă int *buflen = current_buflen(state); 11200355d23dSHoria Geantă u8 *next_buf = alt_buf(state); 11210355d23dSHoria Geantă int *next_buflen = alt_buflen(state); 1122045e3678SYuan Kang int in_len = *buflen + req->nbytes, to_hash; 1123bc13c69eSRussell King int sec4_sg_bytes, src_nents, mapped_nents; 1124045e3678SYuan Kang struct ahash_edesc *edesc; 112530a43b44SRussell King u32 *desc; 1126045e3678SYuan Kang int ret = 0; 1127045e3678SYuan Kang 1128045e3678SYuan Kang *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1129045e3678SYuan Kang to_hash = in_len - *next_buflen; 1130045e3678SYuan Kang 1131045e3678SYuan Kang if (to_hash) { 113213fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, 11333d5a2db6SRussell King req->nbytes - *next_buflen); 1134f9970c28SLABBE Corentin if (src_nents < 0) { 1135f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1136f9970c28SLABBE Corentin return src_nents; 1137f9970c28SLABBE Corentin } 1138bc13c69eSRussell King 1139bc13c69eSRussell King if (src_nents) { 1140bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1141bc13c69eSRussell King DMA_TO_DEVICE); 1142bc13c69eSRussell King if (!mapped_nents) { 1143bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1144bc13c69eSRussell King return -ENOMEM; 1145bc13c69eSRussell King } 1146bc13c69eSRussell King } else { 1147bc13c69eSRussell King mapped_nents = 0; 1148bc13c69eSRussell King } 1149bc13c69eSRussell King 1150bc13c69eSRussell King sec4_sg_bytes = (1 + mapped_nents) * 1151045e3678SYuan Kang sizeof(struct sec4_sg_entry); 1152045e3678SYuan Kang 1153045e3678SYuan Kang /* 1154045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 1155045e3678SYuan Kang * link tables 1156045e3678SYuan Kang */ 115730a43b44SRussell King edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 115830a43b44SRussell King ctx->sh_desc_update_first, 115930a43b44SRussell King ctx->sh_desc_update_first_dma, 116030a43b44SRussell King flags); 1161045e3678SYuan Kang if (!edesc) { 1162bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1163045e3678SYuan Kang return -ENOMEM; 1164045e3678SYuan Kang } 1165045e3678SYuan Kang 1166045e3678SYuan Kang edesc->src_nents = src_nents; 1167045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1168045e3678SYuan Kang 1169944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1170944c3d4dSHoria Geantă if (ret) 1171944c3d4dSHoria Geantă goto unmap_ctx; 1172944c3d4dSHoria Geantă 1173bc13c69eSRussell King sg_to_sec4_sg_last(req->src, mapped_nents, 1174bc13c69eSRussell King edesc->sec4_sg + 1, 0); 1175bc13c69eSRussell King 1176045e3678SYuan Kang if (*next_buflen) { 1177307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, 1178307fd543SCristian Stoica to_hash - *buflen, 1179307fd543SCristian Stoica *next_buflen, 0); 1180045e3678SYuan Kang } 1181045e3678SYuan Kang 1182045e3678SYuan Kang desc = edesc->hw_desc; 1183045e3678SYuan Kang 11841da2be33SRuchika Gupta edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 11851da2be33SRuchika Gupta sec4_sg_bytes, 11861da2be33SRuchika Gupta DMA_TO_DEVICE); 1187ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1188ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 118932686d34SRussell King ret = -ENOMEM; 119058b0e5d0SMarkus Elfring goto unmap_ctx; 1191ce572085SHoria Geanta } 11921da2be33SRuchika Gupta 1193045e3678SYuan Kang append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1194045e3678SYuan Kang 1195ce572085SHoria Geanta ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1196ce572085SHoria Geanta if (ret) 119758b0e5d0SMarkus Elfring goto unmap_ctx; 1198045e3678SYuan Kang 1199045e3678SYuan Kang #ifdef DEBUG 1200514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1201045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 1202045e3678SYuan Kang desc_bytes(desc), 1); 1203045e3678SYuan Kang #endif 1204045e3678SYuan Kang 1205045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 120632686d34SRussell King if (ret) 120758b0e5d0SMarkus Elfring goto unmap_ctx; 120832686d34SRussell King 1209045e3678SYuan Kang ret = -EINPROGRESS; 1210045e3678SYuan Kang state->update = ahash_update_ctx; 1211045e3678SYuan Kang state->finup = ahash_finup_ctx; 1212045e3678SYuan Kang state->final = ahash_final_ctx; 1213045e3678SYuan Kang } else if (*next_buflen) { 1214307fd543SCristian Stoica scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1215307fd543SCristian Stoica req->nbytes, 0); 1216045e3678SYuan Kang *buflen = *next_buflen; 1217045e3678SYuan Kang *next_buflen = 0; 1218045e3678SYuan Kang } 1219045e3678SYuan Kang #ifdef DEBUG 1220514df281SAlex Porosanu print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1221045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1222514df281SAlex Porosanu print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1223045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1224045e3678SYuan Kang *next_buflen, 1); 1225045e3678SYuan Kang #endif 1226045e3678SYuan Kang 1227045e3678SYuan Kang return ret; 122858b0e5d0SMarkus Elfring unmap_ctx: 122932686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 123032686d34SRussell King kfree(edesc); 123132686d34SRussell King return ret; 1232045e3678SYuan Kang } 1233045e3678SYuan Kang 1234045e3678SYuan Kang /* submit ahash finup if it the first job descriptor after update */ 1235045e3678SYuan Kang static int ahash_finup_no_ctx(struct ahash_request *req) 1236045e3678SYuan Kang { 1237045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1238045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1239045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1240045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1241019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1242019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 12430355d23dSHoria Geantă int buflen = *current_buflen(state); 124430a43b44SRussell King u32 *desc; 1245bc13c69eSRussell King int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1246045e3678SYuan Kang int digestsize = crypto_ahash_digestsize(ahash); 1247045e3678SYuan Kang struct ahash_edesc *edesc; 12489e6df0fdSMarkus Elfring int ret; 1249045e3678SYuan Kang 125013fb8fd7SLABBE Corentin src_nents = sg_nents_for_len(req->src, req->nbytes); 1251f9970c28SLABBE Corentin if (src_nents < 0) { 1252f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1253f9970c28SLABBE Corentin return src_nents; 1254f9970c28SLABBE Corentin } 1255bc13c69eSRussell King 1256bc13c69eSRussell King if (src_nents) { 1257bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1258bc13c69eSRussell King DMA_TO_DEVICE); 1259bc13c69eSRussell King if (!mapped_nents) { 1260bc13c69eSRussell King dev_err(jrdev, "unable to DMA map source\n"); 1261bc13c69eSRussell King return -ENOMEM; 1262bc13c69eSRussell King } 1263bc13c69eSRussell King } else { 1264bc13c69eSRussell King mapped_nents = 0; 1265bc13c69eSRussell King } 1266bc13c69eSRussell King 1267045e3678SYuan Kang sec4_sg_src_index = 2; 1268bc13c69eSRussell King sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1269045e3678SYuan Kang sizeof(struct sec4_sg_entry); 1270045e3678SYuan Kang 1271045e3678SYuan Kang /* allocate space for base edesc and hw desc commands, link tables */ 127230a43b44SRussell King edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 127330a43b44SRussell King ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 127430a43b44SRussell King flags); 1275045e3678SYuan Kang if (!edesc) { 1276bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1277045e3678SYuan Kang return -ENOMEM; 1278045e3678SYuan Kang } 1279045e3678SYuan Kang 1280045e3678SYuan Kang desc = edesc->hw_desc; 1281045e3678SYuan Kang 1282045e3678SYuan Kang edesc->src_nents = src_nents; 1283045e3678SYuan Kang edesc->sec4_sg_bytes = sec4_sg_bytes; 1284045e3678SYuan Kang 1285944c3d4dSHoria Geantă ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1286944c3d4dSHoria Geantă if (ret) 1287944c3d4dSHoria Geantă goto unmap; 1288045e3678SYuan Kang 128965cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 129065cf164aSRussell King req->nbytes); 129165cf164aSRussell King if (ret) { 1292ce572085SHoria Geanta dev_err(jrdev, "unable to map S/G table\n"); 129306435f34SMarkus Elfring goto unmap; 1294ce572085SHoria Geanta } 12951da2be33SRuchika Gupta 1296045e3678SYuan Kang edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1297045e3678SYuan Kang digestsize); 1298ce572085SHoria Geanta if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1299ce572085SHoria Geanta dev_err(jrdev, "unable to map dst\n"); 130006435f34SMarkus Elfring goto unmap; 1301ce572085SHoria Geanta } 1302045e3678SYuan Kang 1303045e3678SYuan Kang #ifdef DEBUG 1304514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1305045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1306045e3678SYuan Kang #endif 1307045e3678SYuan Kang 1308045e3678SYuan Kang ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1309045e3678SYuan Kang if (!ret) { 1310045e3678SYuan Kang ret = -EINPROGRESS; 1311045e3678SYuan Kang } else { 1312045e3678SYuan Kang ahash_unmap(jrdev, edesc, req, digestsize); 1313045e3678SYuan Kang kfree(edesc); 1314045e3678SYuan Kang } 1315045e3678SYuan Kang 1316045e3678SYuan Kang return ret; 131706435f34SMarkus Elfring unmap: 131806435f34SMarkus Elfring ahash_unmap(jrdev, edesc, req, digestsize); 131906435f34SMarkus Elfring kfree(edesc); 132006435f34SMarkus Elfring return -ENOMEM; 132106435f34SMarkus Elfring 1322045e3678SYuan Kang } 1323045e3678SYuan Kang 1324045e3678SYuan Kang /* submit first update job descriptor after init */ 1325045e3678SYuan Kang static int ahash_update_first(struct ahash_request *req) 1326045e3678SYuan Kang { 1327045e3678SYuan Kang struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1328045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1329045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1330045e3678SYuan Kang struct device *jrdev = ctx->jrdev; 1331019d62dbSHoria Geantă gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1332019d62dbSHoria Geantă GFP_KERNEL : GFP_ATOMIC; 1333944c3d4dSHoria Geantă u8 *next_buf = alt_buf(state); 1334944c3d4dSHoria Geantă int *next_buflen = alt_buflen(state); 1335045e3678SYuan Kang int to_hash; 133630a43b44SRussell King u32 *desc; 133765cf164aSRussell King int src_nents, mapped_nents; 1338045e3678SYuan Kang struct ahash_edesc *edesc; 1339045e3678SYuan Kang int ret = 0; 1340045e3678SYuan Kang 1341045e3678SYuan Kang *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1342045e3678SYuan Kang 1); 1343045e3678SYuan Kang to_hash = req->nbytes - *next_buflen; 1344045e3678SYuan Kang 1345045e3678SYuan Kang if (to_hash) { 13463d5a2db6SRussell King src_nents = sg_nents_for_len(req->src, 13473d5a2db6SRussell King req->nbytes - *next_buflen); 1348f9970c28SLABBE Corentin if (src_nents < 0) { 1349f9970c28SLABBE Corentin dev_err(jrdev, "Invalid number of src SG.\n"); 1350f9970c28SLABBE Corentin return src_nents; 1351f9970c28SLABBE Corentin } 1352bc13c69eSRussell King 1353bc13c69eSRussell King if (src_nents) { 1354bc13c69eSRussell King mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1355bc13c69eSRussell King DMA_TO_DEVICE); 1356bc13c69eSRussell King if (!mapped_nents) { 1357bc13c69eSRussell King dev_err(jrdev, "unable to map source for DMA\n"); 1358bc13c69eSRussell King return -ENOMEM; 1359bc13c69eSRussell King } 1360bc13c69eSRussell King } else { 1361bc13c69eSRussell King mapped_nents = 0; 1362bc13c69eSRussell King } 1363045e3678SYuan Kang 1364045e3678SYuan Kang /* 1365045e3678SYuan Kang * allocate space for base edesc and hw desc commands, 1366045e3678SYuan Kang * link tables 1367045e3678SYuan Kang */ 13685588d039SRussell King edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 136930a43b44SRussell King mapped_nents : 0, 137030a43b44SRussell King ctx->sh_desc_update_first, 137130a43b44SRussell King ctx->sh_desc_update_first_dma, 137230a43b44SRussell King flags); 1373045e3678SYuan Kang if (!edesc) { 1374bc13c69eSRussell King dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1375045e3678SYuan Kang return -ENOMEM; 1376045e3678SYuan Kang } 1377045e3678SYuan Kang 1378045e3678SYuan Kang edesc->src_nents = src_nents; 1379045e3678SYuan Kang 138065cf164aSRussell King ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 138165cf164aSRussell King to_hash); 138265cf164aSRussell King if (ret) 138358b0e5d0SMarkus Elfring goto unmap_ctx; 1384045e3678SYuan Kang 1385045e3678SYuan Kang if (*next_buflen) 1386307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1387307fd543SCristian Stoica *next_buflen, 0); 1388045e3678SYuan Kang 1389045e3678SYuan Kang desc = edesc->hw_desc; 1390045e3678SYuan Kang 1391ce572085SHoria Geanta ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1392ce572085SHoria Geanta if (ret) 139358b0e5d0SMarkus Elfring goto unmap_ctx; 1394045e3678SYuan Kang 1395045e3678SYuan Kang #ifdef DEBUG 1396514df281SAlex Porosanu print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1397045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, desc, 1398045e3678SYuan Kang desc_bytes(desc), 1); 1399045e3678SYuan Kang #endif 1400045e3678SYuan Kang 140132686d34SRussell King ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 140232686d34SRussell King if (ret) 140358b0e5d0SMarkus Elfring goto unmap_ctx; 140432686d34SRussell King 1405045e3678SYuan Kang ret = -EINPROGRESS; 1406045e3678SYuan Kang state->update = ahash_update_ctx; 1407045e3678SYuan Kang state->finup = ahash_finup_ctx; 1408045e3678SYuan Kang state->final = ahash_final_ctx; 1409045e3678SYuan Kang } else if (*next_buflen) { 1410045e3678SYuan Kang state->update = ahash_update_no_ctx; 1411045e3678SYuan Kang state->finup = ahash_finup_no_ctx; 1412045e3678SYuan Kang state->final = ahash_final_no_ctx; 1413307fd543SCristian Stoica scatterwalk_map_and_copy(next_buf, req->src, 0, 1414307fd543SCristian Stoica req->nbytes, 0); 1415944c3d4dSHoria Geantă switch_buf(state); 1416045e3678SYuan Kang } 1417045e3678SYuan Kang #ifdef DEBUG 1418514df281SAlex Porosanu print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1419045e3678SYuan Kang DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1420045e3678SYuan Kang *next_buflen, 1); 1421045e3678SYuan Kang #endif 1422045e3678SYuan Kang 1423045e3678SYuan Kang return ret; 142458b0e5d0SMarkus Elfring unmap_ctx: 142532686d34SRussell King ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 142632686d34SRussell King kfree(edesc); 142732686d34SRussell King return ret; 1428045e3678SYuan Kang } 1429045e3678SYuan Kang 1430045e3678SYuan Kang static int ahash_finup_first(struct ahash_request *req) 1431045e3678SYuan Kang { 1432045e3678SYuan Kang return ahash_digest(req); 1433045e3678SYuan Kang } 1434045e3678SYuan Kang 1435045e3678SYuan Kang static int ahash_init(struct ahash_request *req) 1436045e3678SYuan Kang { 1437045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1438045e3678SYuan Kang 1439045e3678SYuan Kang state->update = ahash_update_first; 1440045e3678SYuan Kang state->finup = ahash_finup_first; 1441045e3678SYuan Kang state->final = ahash_final_no_ctx; 1442045e3678SYuan Kang 144387ec02e7SHoria Geantă state->ctx_dma = 0; 1444045e3678SYuan Kang state->current_buf = 0; 1445de0e35ecSHoria Geanta state->buf_dma = 0; 14466fd4b156SSteve Cornelius state->buflen_0 = 0; 14476fd4b156SSteve Cornelius state->buflen_1 = 0; 1448045e3678SYuan Kang 1449045e3678SYuan Kang return 0; 1450045e3678SYuan Kang } 1451045e3678SYuan Kang 1452045e3678SYuan Kang static int ahash_update(struct ahash_request *req) 1453045e3678SYuan Kang { 1454045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1455045e3678SYuan Kang 1456045e3678SYuan Kang return state->update(req); 1457045e3678SYuan Kang } 1458045e3678SYuan Kang 1459045e3678SYuan Kang static int ahash_finup(struct ahash_request *req) 1460045e3678SYuan Kang { 1461045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1462045e3678SYuan Kang 1463045e3678SYuan Kang return state->finup(req); 1464045e3678SYuan Kang } 1465045e3678SYuan Kang 1466045e3678SYuan Kang static int ahash_final(struct ahash_request *req) 1467045e3678SYuan Kang { 1468045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 1469045e3678SYuan Kang 1470045e3678SYuan Kang return state->final(req); 1471045e3678SYuan Kang } 1472045e3678SYuan Kang 1473045e3678SYuan Kang static int ahash_export(struct ahash_request *req, void *out) 1474045e3678SYuan Kang { 1475045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 14765ec90831SRussell King struct caam_export_state *export = out; 14775ec90831SRussell King int len; 14785ec90831SRussell King u8 *buf; 1479045e3678SYuan Kang 14805ec90831SRussell King if (state->current_buf) { 14815ec90831SRussell King buf = state->buf_1; 14825ec90831SRussell King len = state->buflen_1; 14835ec90831SRussell King } else { 14845ec90831SRussell King buf = state->buf_0; 1485f456cd2dSFabio Estevam len = state->buflen_0; 14865ec90831SRussell King } 14875ec90831SRussell King 14885ec90831SRussell King memcpy(export->buf, buf, len); 14895ec90831SRussell King memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 14905ec90831SRussell King export->buflen = len; 14915ec90831SRussell King export->update = state->update; 14925ec90831SRussell King export->final = state->final; 14935ec90831SRussell King export->finup = state->finup; 1494434b4212SRussell King 1495045e3678SYuan Kang return 0; 1496045e3678SYuan Kang } 1497045e3678SYuan Kang 1498045e3678SYuan Kang static int ahash_import(struct ahash_request *req, const void *in) 1499045e3678SYuan Kang { 1500045e3678SYuan Kang struct caam_hash_state *state = ahash_request_ctx(req); 15015ec90831SRussell King const struct caam_export_state *export = in; 1502045e3678SYuan Kang 15035ec90831SRussell King memset(state, 0, sizeof(*state)); 15045ec90831SRussell King memcpy(state->buf_0, export->buf, export->buflen); 15055ec90831SRussell King memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 15065ec90831SRussell King state->buflen_0 = export->buflen; 15075ec90831SRussell King state->update = export->update; 15085ec90831SRussell King state->final = export->final; 15095ec90831SRussell King state->finup = export->finup; 1510434b4212SRussell King 1511045e3678SYuan Kang return 0; 1512045e3678SYuan Kang } 1513045e3678SYuan Kang 1514045e3678SYuan Kang struct caam_hash_template { 1515045e3678SYuan Kang char name[CRYPTO_MAX_ALG_NAME]; 1516045e3678SYuan Kang char driver_name[CRYPTO_MAX_ALG_NAME]; 1517b0e09baeSYuan Kang char hmac_name[CRYPTO_MAX_ALG_NAME]; 1518b0e09baeSYuan Kang char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1519045e3678SYuan Kang unsigned int blocksize; 1520045e3678SYuan Kang struct ahash_alg template_ahash; 1521045e3678SYuan Kang u32 alg_type; 1522045e3678SYuan Kang }; 1523045e3678SYuan Kang 1524045e3678SYuan Kang /* ahash descriptors */ 1525045e3678SYuan Kang static struct caam_hash_template driver_hash[] = { 1526045e3678SYuan Kang { 1527b0e09baeSYuan Kang .name = "sha1", 1528b0e09baeSYuan Kang .driver_name = "sha1-caam", 1529b0e09baeSYuan Kang .hmac_name = "hmac(sha1)", 1530b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha1-caam", 1531045e3678SYuan Kang .blocksize = SHA1_BLOCK_SIZE, 1532045e3678SYuan Kang .template_ahash = { 1533045e3678SYuan Kang .init = ahash_init, 1534045e3678SYuan Kang .update = ahash_update, 1535045e3678SYuan Kang .final = ahash_final, 1536045e3678SYuan Kang .finup = ahash_finup, 1537045e3678SYuan Kang .digest = ahash_digest, 1538045e3678SYuan Kang .export = ahash_export, 1539045e3678SYuan Kang .import = ahash_import, 1540045e3678SYuan Kang .setkey = ahash_setkey, 1541045e3678SYuan Kang .halg = { 1542045e3678SYuan Kang .digestsize = SHA1_DIGEST_SIZE, 15435ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1544045e3678SYuan Kang }, 1545045e3678SYuan Kang }, 1546045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA1, 1547045e3678SYuan Kang }, { 1548b0e09baeSYuan Kang .name = "sha224", 1549b0e09baeSYuan Kang .driver_name = "sha224-caam", 1550b0e09baeSYuan Kang .hmac_name = "hmac(sha224)", 1551b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha224-caam", 1552045e3678SYuan Kang .blocksize = SHA224_BLOCK_SIZE, 1553045e3678SYuan Kang .template_ahash = { 1554045e3678SYuan Kang .init = ahash_init, 1555045e3678SYuan Kang .update = ahash_update, 1556045e3678SYuan Kang .final = ahash_final, 1557045e3678SYuan Kang .finup = ahash_finup, 1558045e3678SYuan Kang .digest = ahash_digest, 1559045e3678SYuan Kang .export = ahash_export, 1560045e3678SYuan Kang .import = ahash_import, 1561045e3678SYuan Kang .setkey = ahash_setkey, 1562045e3678SYuan Kang .halg = { 1563045e3678SYuan Kang .digestsize = SHA224_DIGEST_SIZE, 15645ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1565045e3678SYuan Kang }, 1566045e3678SYuan Kang }, 1567045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA224, 1568045e3678SYuan Kang }, { 1569b0e09baeSYuan Kang .name = "sha256", 1570b0e09baeSYuan Kang .driver_name = "sha256-caam", 1571b0e09baeSYuan Kang .hmac_name = "hmac(sha256)", 1572b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha256-caam", 1573045e3678SYuan Kang .blocksize = SHA256_BLOCK_SIZE, 1574045e3678SYuan Kang .template_ahash = { 1575045e3678SYuan Kang .init = ahash_init, 1576045e3678SYuan Kang .update = ahash_update, 1577045e3678SYuan Kang .final = ahash_final, 1578045e3678SYuan Kang .finup = ahash_finup, 1579045e3678SYuan Kang .digest = ahash_digest, 1580045e3678SYuan Kang .export = ahash_export, 1581045e3678SYuan Kang .import = ahash_import, 1582045e3678SYuan Kang .setkey = ahash_setkey, 1583045e3678SYuan Kang .halg = { 1584045e3678SYuan Kang .digestsize = SHA256_DIGEST_SIZE, 15855ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1586045e3678SYuan Kang }, 1587045e3678SYuan Kang }, 1588045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA256, 1589045e3678SYuan Kang }, { 1590b0e09baeSYuan Kang .name = "sha384", 1591b0e09baeSYuan Kang .driver_name = "sha384-caam", 1592b0e09baeSYuan Kang .hmac_name = "hmac(sha384)", 1593b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha384-caam", 1594045e3678SYuan Kang .blocksize = SHA384_BLOCK_SIZE, 1595045e3678SYuan Kang .template_ahash = { 1596045e3678SYuan Kang .init = ahash_init, 1597045e3678SYuan Kang .update = ahash_update, 1598045e3678SYuan Kang .final = ahash_final, 1599045e3678SYuan Kang .finup = ahash_finup, 1600045e3678SYuan Kang .digest = ahash_digest, 1601045e3678SYuan Kang .export = ahash_export, 1602045e3678SYuan Kang .import = ahash_import, 1603045e3678SYuan Kang .setkey = ahash_setkey, 1604045e3678SYuan Kang .halg = { 1605045e3678SYuan Kang .digestsize = SHA384_DIGEST_SIZE, 16065ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1607045e3678SYuan Kang }, 1608045e3678SYuan Kang }, 1609045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA384, 1610045e3678SYuan Kang }, { 1611b0e09baeSYuan Kang .name = "sha512", 1612b0e09baeSYuan Kang .driver_name = "sha512-caam", 1613b0e09baeSYuan Kang .hmac_name = "hmac(sha512)", 1614b0e09baeSYuan Kang .hmac_driver_name = "hmac-sha512-caam", 1615045e3678SYuan Kang .blocksize = SHA512_BLOCK_SIZE, 1616045e3678SYuan Kang .template_ahash = { 1617045e3678SYuan Kang .init = ahash_init, 1618045e3678SYuan Kang .update = ahash_update, 1619045e3678SYuan Kang .final = ahash_final, 1620045e3678SYuan Kang .finup = ahash_finup, 1621045e3678SYuan Kang .digest = ahash_digest, 1622045e3678SYuan Kang .export = ahash_export, 1623045e3678SYuan Kang .import = ahash_import, 1624045e3678SYuan Kang .setkey = ahash_setkey, 1625045e3678SYuan Kang .halg = { 1626045e3678SYuan Kang .digestsize = SHA512_DIGEST_SIZE, 16275ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1628045e3678SYuan Kang }, 1629045e3678SYuan Kang }, 1630045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_SHA512, 1631045e3678SYuan Kang }, { 1632b0e09baeSYuan Kang .name = "md5", 1633b0e09baeSYuan Kang .driver_name = "md5-caam", 1634b0e09baeSYuan Kang .hmac_name = "hmac(md5)", 1635b0e09baeSYuan Kang .hmac_driver_name = "hmac-md5-caam", 1636045e3678SYuan Kang .blocksize = MD5_BLOCK_WORDS * 4, 1637045e3678SYuan Kang .template_ahash = { 1638045e3678SYuan Kang .init = ahash_init, 1639045e3678SYuan Kang .update = ahash_update, 1640045e3678SYuan Kang .final = ahash_final, 1641045e3678SYuan Kang .finup = ahash_finup, 1642045e3678SYuan Kang .digest = ahash_digest, 1643045e3678SYuan Kang .export = ahash_export, 1644045e3678SYuan Kang .import = ahash_import, 1645045e3678SYuan Kang .setkey = ahash_setkey, 1646045e3678SYuan Kang .halg = { 1647045e3678SYuan Kang .digestsize = MD5_DIGEST_SIZE, 16485ec90831SRussell King .statesize = sizeof(struct caam_export_state), 1649045e3678SYuan Kang }, 1650045e3678SYuan Kang }, 1651045e3678SYuan Kang .alg_type = OP_ALG_ALGSEL_MD5, 1652045e3678SYuan Kang }, 1653045e3678SYuan Kang }; 1654045e3678SYuan Kang 1655045e3678SYuan Kang struct caam_hash_alg { 1656045e3678SYuan Kang struct list_head entry; 1657045e3678SYuan Kang int alg_type; 1658045e3678SYuan Kang struct ahash_alg ahash_alg; 1659045e3678SYuan Kang }; 1660045e3678SYuan Kang 1661045e3678SYuan Kang static int caam_hash_cra_init(struct crypto_tfm *tfm) 1662045e3678SYuan Kang { 1663045e3678SYuan Kang struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1664045e3678SYuan Kang struct crypto_alg *base = tfm->__crt_alg; 1665045e3678SYuan Kang struct hash_alg_common *halg = 1666045e3678SYuan Kang container_of(base, struct hash_alg_common, base); 1667045e3678SYuan Kang struct ahash_alg *alg = 1668045e3678SYuan Kang container_of(halg, struct ahash_alg, halg); 1669045e3678SYuan Kang struct caam_hash_alg *caam_hash = 1670045e3678SYuan Kang container_of(alg, struct caam_hash_alg, ahash_alg); 1671045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1672045e3678SYuan Kang /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1673045e3678SYuan Kang static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1674045e3678SYuan Kang HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1675045e3678SYuan Kang HASH_MSG_LEN + 32, 1676045e3678SYuan Kang HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1677045e3678SYuan Kang HASH_MSG_LEN + 64, 1678045e3678SYuan Kang HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1679bbf22344SHoria Geantă dma_addr_t dma_addr; 16807e0880b9SHoria Geantă struct caam_drv_private *priv; 1681045e3678SYuan Kang 1682045e3678SYuan Kang /* 1683cfc6f11bSRuchika Gupta * Get a Job ring from Job Ring driver to ensure in-order 1684045e3678SYuan Kang * crypto request processing per tfm 1685045e3678SYuan Kang */ 1686cfc6f11bSRuchika Gupta ctx->jrdev = caam_jr_alloc(); 1687cfc6f11bSRuchika Gupta if (IS_ERR(ctx->jrdev)) { 1688cfc6f11bSRuchika Gupta pr_err("Job Ring Device allocation for transform failed\n"); 1689cfc6f11bSRuchika Gupta return PTR_ERR(ctx->jrdev); 1690cfc6f11bSRuchika Gupta } 1691bbf22344SHoria Geantă 16927e0880b9SHoria Geantă priv = dev_get_drvdata(ctx->jrdev->parent); 16937e0880b9SHoria Geantă ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 16947e0880b9SHoria Geantă 1695bbf22344SHoria Geantă dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1696bbf22344SHoria Geantă offsetof(struct caam_hash_ctx, 1697bbf22344SHoria Geantă sh_desc_update_dma), 16987e0880b9SHoria Geantă ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1699bbf22344SHoria Geantă if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1700bbf22344SHoria Geantă dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1701bbf22344SHoria Geantă caam_jr_free(ctx->jrdev); 1702bbf22344SHoria Geantă return -ENOMEM; 1703bbf22344SHoria Geantă } 1704bbf22344SHoria Geantă 1705bbf22344SHoria Geantă ctx->sh_desc_update_dma = dma_addr; 1706bbf22344SHoria Geantă ctx->sh_desc_update_first_dma = dma_addr + 1707bbf22344SHoria Geantă offsetof(struct caam_hash_ctx, 1708bbf22344SHoria Geantă sh_desc_update_first); 1709bbf22344SHoria Geantă ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1710bbf22344SHoria Geantă sh_desc_fin); 1711bbf22344SHoria Geantă ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1712bbf22344SHoria Geantă sh_desc_digest); 1713bbf22344SHoria Geantă 1714045e3678SYuan Kang /* copy descriptor header template value */ 1715db57656bSHoria Geantă ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1716045e3678SYuan Kang 1717488ebc3aSHoria Geantă ctx->ctx_len = runninglen[(ctx->adata.algtype & 1718488ebc3aSHoria Geantă OP_ALG_ALGSEL_SUBMASK) >> 1719045e3678SYuan Kang OP_ALG_ALGSEL_SHIFT]; 1720045e3678SYuan Kang 1721045e3678SYuan Kang crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1722045e3678SYuan Kang sizeof(struct caam_hash_state)); 1723e6cc5b8dSMarkus Elfring return ahash_set_sh_desc(ahash); 1724045e3678SYuan Kang } 1725045e3678SYuan Kang 1726045e3678SYuan Kang static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1727045e3678SYuan Kang { 1728045e3678SYuan Kang struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1729045e3678SYuan Kang 1730bbf22344SHoria Geantă dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1731bbf22344SHoria Geantă offsetof(struct caam_hash_ctx, 1732bbf22344SHoria Geantă sh_desc_update_dma), 17337e0880b9SHoria Geantă ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1734cfc6f11bSRuchika Gupta caam_jr_free(ctx->jrdev); 1735045e3678SYuan Kang } 1736045e3678SYuan Kang 1737045e3678SYuan Kang static void __exit caam_algapi_hash_exit(void) 1738045e3678SYuan Kang { 1739045e3678SYuan Kang struct caam_hash_alg *t_alg, *n; 1740045e3678SYuan Kang 1741cfc6f11bSRuchika Gupta if (!hash_list.next) 1742045e3678SYuan Kang return; 1743045e3678SYuan Kang 1744cfc6f11bSRuchika Gupta list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1745045e3678SYuan Kang crypto_unregister_ahash(&t_alg->ahash_alg); 1746045e3678SYuan Kang list_del(&t_alg->entry); 1747045e3678SYuan Kang kfree(t_alg); 1748045e3678SYuan Kang } 1749045e3678SYuan Kang } 1750045e3678SYuan Kang 1751045e3678SYuan Kang static struct caam_hash_alg * 1752cfc6f11bSRuchika Gupta caam_hash_alloc(struct caam_hash_template *template, 1753b0e09baeSYuan Kang bool keyed) 1754045e3678SYuan Kang { 1755045e3678SYuan Kang struct caam_hash_alg *t_alg; 1756045e3678SYuan Kang struct ahash_alg *halg; 1757045e3678SYuan Kang struct crypto_alg *alg; 1758045e3678SYuan Kang 17599c4f9733SFabio Estevam t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1760045e3678SYuan Kang if (!t_alg) { 1761cfc6f11bSRuchika Gupta pr_err("failed to allocate t_alg\n"); 1762045e3678SYuan Kang return ERR_PTR(-ENOMEM); 1763045e3678SYuan Kang } 1764045e3678SYuan Kang 1765045e3678SYuan Kang t_alg->ahash_alg = template->template_ahash; 1766045e3678SYuan Kang halg = &t_alg->ahash_alg; 1767045e3678SYuan Kang alg = &halg->halg.base; 1768045e3678SYuan Kang 1769b0e09baeSYuan Kang if (keyed) { 1770b0e09baeSYuan Kang snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1771b0e09baeSYuan Kang template->hmac_name); 1772b0e09baeSYuan Kang snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1773b0e09baeSYuan Kang template->hmac_driver_name); 1774b0e09baeSYuan Kang } else { 1775b0e09baeSYuan Kang snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1776b0e09baeSYuan Kang template->name); 1777045e3678SYuan Kang snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1778045e3678SYuan Kang template->driver_name); 1779a0118c8bSRussell King t_alg->ahash_alg.setkey = NULL; 1780b0e09baeSYuan Kang } 1781045e3678SYuan Kang alg->cra_module = THIS_MODULE; 1782045e3678SYuan Kang alg->cra_init = caam_hash_cra_init; 1783045e3678SYuan Kang alg->cra_exit = caam_hash_cra_exit; 1784045e3678SYuan Kang alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1785045e3678SYuan Kang alg->cra_priority = CAAM_CRA_PRIORITY; 1786045e3678SYuan Kang alg->cra_blocksize = template->blocksize; 1787045e3678SYuan Kang alg->cra_alignmask = 0; 17886a38f622SEric Biggers alg->cra_flags = CRYPTO_ALG_ASYNC; 1789045e3678SYuan Kang 1790045e3678SYuan Kang t_alg->alg_type = template->alg_type; 1791045e3678SYuan Kang 1792045e3678SYuan Kang return t_alg; 1793045e3678SYuan Kang } 1794045e3678SYuan Kang 1795045e3678SYuan Kang static int __init caam_algapi_hash_init(void) 1796045e3678SYuan Kang { 179735af6403SRuchika Gupta struct device_node *dev_node; 179835af6403SRuchika Gupta struct platform_device *pdev; 179935af6403SRuchika Gupta struct device *ctrldev; 1800045e3678SYuan Kang int i = 0, err = 0; 1801bf83490eSVictoria Milhoan struct caam_drv_private *priv; 1802bf83490eSVictoria Milhoan unsigned int md_limit = SHA512_DIGEST_SIZE; 1803bf83490eSVictoria Milhoan u32 cha_inst, cha_vid; 1804045e3678SYuan Kang 180535af6403SRuchika Gupta dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 180635af6403SRuchika Gupta if (!dev_node) { 180735af6403SRuchika Gupta dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 180835af6403SRuchika Gupta if (!dev_node) 180935af6403SRuchika Gupta return -ENODEV; 181035af6403SRuchika Gupta } 181135af6403SRuchika Gupta 181235af6403SRuchika Gupta pdev = of_find_device_by_node(dev_node); 181335af6403SRuchika Gupta if (!pdev) { 181435af6403SRuchika Gupta of_node_put(dev_node); 181535af6403SRuchika Gupta return -ENODEV; 181635af6403SRuchika Gupta } 181735af6403SRuchika Gupta 181835af6403SRuchika Gupta ctrldev = &pdev->dev; 181935af6403SRuchika Gupta priv = dev_get_drvdata(ctrldev); 182035af6403SRuchika Gupta of_node_put(dev_node); 182135af6403SRuchika Gupta 182235af6403SRuchika Gupta /* 182335af6403SRuchika Gupta * If priv is NULL, it's probably because the caam driver wasn't 182435af6403SRuchika Gupta * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 182535af6403SRuchika Gupta */ 182635af6403SRuchika Gupta if (!priv) 182735af6403SRuchika Gupta return -ENODEV; 182835af6403SRuchika Gupta 1829bf83490eSVictoria Milhoan /* 1830bf83490eSVictoria Milhoan * Register crypto algorithms the device supports. First, identify 1831bf83490eSVictoria Milhoan * presence and attributes of MD block. 1832bf83490eSVictoria Milhoan */ 1833bf83490eSVictoria Milhoan cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 1834bf83490eSVictoria Milhoan cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 1835bf83490eSVictoria Milhoan 1836bf83490eSVictoria Milhoan /* 1837bf83490eSVictoria Milhoan * Skip registration of any hashing algorithms if MD block 1838bf83490eSVictoria Milhoan * is not present. 1839bf83490eSVictoria Milhoan */ 1840bf83490eSVictoria Milhoan if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) 1841bf83490eSVictoria Milhoan return -ENODEV; 1842bf83490eSVictoria Milhoan 1843bf83490eSVictoria Milhoan /* Limit digest size based on LP256 */ 1844bf83490eSVictoria Milhoan if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) 1845bf83490eSVictoria Milhoan md_limit = SHA256_DIGEST_SIZE; 1846bf83490eSVictoria Milhoan 1847cfc6f11bSRuchika Gupta INIT_LIST_HEAD(&hash_list); 1848045e3678SYuan Kang 1849045e3678SYuan Kang /* register crypto algorithms the device supports */ 1850045e3678SYuan Kang for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1851045e3678SYuan Kang struct caam_hash_alg *t_alg; 1852bf83490eSVictoria Milhoan struct caam_hash_template *alg = driver_hash + i; 1853bf83490eSVictoria Milhoan 1854bf83490eSVictoria Milhoan /* If MD size is not supported by device, skip registration */ 1855bf83490eSVictoria Milhoan if (alg->template_ahash.halg.digestsize > md_limit) 1856bf83490eSVictoria Milhoan continue; 1857045e3678SYuan Kang 1858b0e09baeSYuan Kang /* register hmac version */ 1859bf83490eSVictoria Milhoan t_alg = caam_hash_alloc(alg, true); 1860b0e09baeSYuan Kang if (IS_ERR(t_alg)) { 1861b0e09baeSYuan Kang err = PTR_ERR(t_alg); 1862bf83490eSVictoria Milhoan pr_warn("%s alg allocation failed\n", alg->driver_name); 1863b0e09baeSYuan Kang continue; 1864b0e09baeSYuan Kang } 1865b0e09baeSYuan Kang 1866b0e09baeSYuan Kang err = crypto_register_ahash(&t_alg->ahash_alg); 1867b0e09baeSYuan Kang if (err) { 18686ea30f0aSRussell King pr_warn("%s alg registration failed: %d\n", 18696ea30f0aSRussell King t_alg->ahash_alg.halg.base.cra_driver_name, 18706ea30f0aSRussell King err); 1871b0e09baeSYuan Kang kfree(t_alg); 1872b0e09baeSYuan Kang } else 1873cfc6f11bSRuchika Gupta list_add_tail(&t_alg->entry, &hash_list); 1874b0e09baeSYuan Kang 1875b0e09baeSYuan Kang /* register unkeyed version */ 1876bf83490eSVictoria Milhoan t_alg = caam_hash_alloc(alg, false); 1877045e3678SYuan Kang if (IS_ERR(t_alg)) { 1878045e3678SYuan Kang err = PTR_ERR(t_alg); 1879bf83490eSVictoria Milhoan pr_warn("%s alg allocation failed\n", alg->driver_name); 1880045e3678SYuan Kang continue; 1881045e3678SYuan Kang } 1882045e3678SYuan Kang 1883045e3678SYuan Kang err = crypto_register_ahash(&t_alg->ahash_alg); 1884045e3678SYuan Kang if (err) { 18856ea30f0aSRussell King pr_warn("%s alg registration failed: %d\n", 18866ea30f0aSRussell King t_alg->ahash_alg.halg.base.cra_driver_name, 18876ea30f0aSRussell King err); 1888045e3678SYuan Kang kfree(t_alg); 1889045e3678SYuan Kang } else 1890cfc6f11bSRuchika Gupta list_add_tail(&t_alg->entry, &hash_list); 1891045e3678SYuan Kang } 1892045e3678SYuan Kang 1893045e3678SYuan Kang return err; 1894045e3678SYuan Kang } 1895045e3678SYuan Kang 1896045e3678SYuan Kang module_init(caam_algapi_hash_init); 1897045e3678SYuan Kang module_exit(caam_algapi_hash_exit); 1898045e3678SYuan Kang 1899045e3678SYuan Kang MODULE_LICENSE("GPL"); 1900045e3678SYuan Kang MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 1901045e3678SYuan Kang MODULE_AUTHOR("Freescale Semiconductor - NMG"); 1902