xref: /linux/drivers/crypto/caam/caamhash.c (revision 944c3d4dca34403e802287a1e7e9d02c06dce0d5)
1045e3678SYuan Kang /*
2045e3678SYuan Kang  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3045e3678SYuan Kang  *
4045e3678SYuan Kang  * Copyright 2011 Freescale Semiconductor, Inc.
5045e3678SYuan Kang  *
6045e3678SYuan Kang  * Based on caamalg.c crypto API driver.
7045e3678SYuan Kang  *
8045e3678SYuan Kang  * relationship of digest job descriptor or first job descriptor after init to
9045e3678SYuan Kang  * shared descriptors:
10045e3678SYuan Kang  *
11045e3678SYuan Kang  * ---------------                     ---------------
12045e3678SYuan Kang  * | JobDesc #1  |-------------------->|  ShareDesc  |
13045e3678SYuan Kang  * | *(packet 1) |                     |  (hashKey)  |
14045e3678SYuan Kang  * ---------------                     | (operation) |
15045e3678SYuan Kang  *                                     ---------------
16045e3678SYuan Kang  *
17045e3678SYuan Kang  * relationship of subsequent job descriptors to shared descriptors:
18045e3678SYuan Kang  *
19045e3678SYuan Kang  * ---------------                     ---------------
20045e3678SYuan Kang  * | JobDesc #2  |-------------------->|  ShareDesc  |
21045e3678SYuan Kang  * | *(packet 2) |      |------------->|  (hashKey)  |
22045e3678SYuan Kang  * ---------------      |    |-------->| (operation) |
23045e3678SYuan Kang  *       .              |    |         | (load ctx2) |
24045e3678SYuan Kang  *       .              |    |         ---------------
25045e3678SYuan Kang  * ---------------      |    |
26045e3678SYuan Kang  * | JobDesc #3  |------|    |
27045e3678SYuan Kang  * | *(packet 3) |           |
28045e3678SYuan Kang  * ---------------           |
29045e3678SYuan Kang  *       .                   |
30045e3678SYuan Kang  *       .                   |
31045e3678SYuan Kang  * ---------------           |
32045e3678SYuan Kang  * | JobDesc #4  |------------
33045e3678SYuan Kang  * | *(packet 4) |
34045e3678SYuan Kang  * ---------------
35045e3678SYuan Kang  *
36045e3678SYuan Kang  * The SharedDesc never changes for a connection unless rekeyed, but
37045e3678SYuan Kang  * each packet will likely be in a different place. So all we need
38045e3678SYuan Kang  * to know to process the packet is where the input is, where the
39045e3678SYuan Kang  * output goes, and what context we want to process with. Context is
40045e3678SYuan Kang  * in the SharedDesc, packet references in the JobDesc.
41045e3678SYuan Kang  *
42045e3678SYuan Kang  * So, a job desc looks like:
43045e3678SYuan Kang  *
44045e3678SYuan Kang  * ---------------------
45045e3678SYuan Kang  * | Header            |
46045e3678SYuan Kang  * | ShareDesc Pointer |
47045e3678SYuan Kang  * | SEQ_OUT_PTR       |
48045e3678SYuan Kang  * | (output buffer)   |
49045e3678SYuan Kang  * | (output length)   |
50045e3678SYuan Kang  * | SEQ_IN_PTR        |
51045e3678SYuan Kang  * | (input buffer)    |
52045e3678SYuan Kang  * | (input length)    |
53045e3678SYuan Kang  * ---------------------
54045e3678SYuan Kang  */
55045e3678SYuan Kang 
56045e3678SYuan Kang #include "compat.h"
57045e3678SYuan Kang 
58045e3678SYuan Kang #include "regs.h"
59045e3678SYuan Kang #include "intern.h"
60045e3678SYuan Kang #include "desc_constr.h"
61045e3678SYuan Kang #include "jr.h"
62045e3678SYuan Kang #include "error.h"
63045e3678SYuan Kang #include "sg_sw_sec4.h"
64045e3678SYuan Kang #include "key_gen.h"
65045e3678SYuan Kang 
66045e3678SYuan Kang #define CAAM_CRA_PRIORITY		3000
67045e3678SYuan Kang 
68045e3678SYuan Kang /* max hash key is max split key size */
69045e3678SYuan Kang #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70045e3678SYuan Kang 
71045e3678SYuan Kang #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72045e3678SYuan Kang #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73045e3678SYuan Kang 
74045e3678SYuan Kang /* length of descriptors text */
7539957c8eSHoria Geantă #define DESC_AHASH_BASE			(3 * CAAM_CMD_SZ)
76045e3678SYuan Kang #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77045e3678SYuan Kang #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78045e3678SYuan Kang #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79045e3678SYuan Kang #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80045e3678SYuan Kang #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81045e3678SYuan Kang 
82045e3678SYuan Kang #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83045e3678SYuan Kang 					 CAAM_MAX_HASH_KEY_SIZE)
84045e3678SYuan Kang #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85045e3678SYuan Kang 
86045e3678SYuan Kang /* caam context sizes for hashes: running digest + 8 */
87045e3678SYuan Kang #define HASH_MSG_LEN			8
88045e3678SYuan Kang #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89045e3678SYuan Kang 
90045e3678SYuan Kang #ifdef DEBUG
91045e3678SYuan Kang /* for print_hex_dumps with line references */
92045e3678SYuan Kang #define debug(format, arg...) printk(format, arg)
93045e3678SYuan Kang #else
94045e3678SYuan Kang #define debug(format, arg...)
95045e3678SYuan Kang #endif
96045e3678SYuan Kang 
97cfc6f11bSRuchika Gupta 
98cfc6f11bSRuchika Gupta static struct list_head hash_list;
99cfc6f11bSRuchika Gupta 
100045e3678SYuan Kang /* ahash per-session context */
101045e3678SYuan Kang struct caam_hash_ctx {
102e11793f5SRussell King 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103e11793f5SRussell King 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104e11793f5SRussell King 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105e11793f5SRussell King 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106e11793f5SRussell King 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
107045e3678SYuan Kang 	dma_addr_t sh_desc_update_first_dma;
108045e3678SYuan Kang 	dma_addr_t sh_desc_fin_dma;
109045e3678SYuan Kang 	dma_addr_t sh_desc_digest_dma;
110e11793f5SRussell King 	struct device *jrdev;
111045e3678SYuan Kang 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
112045e3678SYuan Kang 	int ctx_len;
113db57656bSHoria Geantă 	struct alginfo adata;
114045e3678SYuan Kang };
115045e3678SYuan Kang 
116045e3678SYuan Kang /* ahash state */
117045e3678SYuan Kang struct caam_hash_state {
118045e3678SYuan Kang 	dma_addr_t buf_dma;
119045e3678SYuan Kang 	dma_addr_t ctx_dma;
120045e3678SYuan Kang 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
121045e3678SYuan Kang 	int buflen_0;
122045e3678SYuan Kang 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
123045e3678SYuan Kang 	int buflen_1;
124e7472422SVictoria Milhoan 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
125045e3678SYuan Kang 	int (*update)(struct ahash_request *req);
126045e3678SYuan Kang 	int (*final)(struct ahash_request *req);
127045e3678SYuan Kang 	int (*finup)(struct ahash_request *req);
128045e3678SYuan Kang 	int current_buf;
129045e3678SYuan Kang };
130045e3678SYuan Kang 
1315ec90831SRussell King struct caam_export_state {
1325ec90831SRussell King 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
1335ec90831SRussell King 	u8 caam_ctx[MAX_CTX_LEN];
1345ec90831SRussell King 	int buflen;
1355ec90831SRussell King 	int (*update)(struct ahash_request *req);
1365ec90831SRussell King 	int (*final)(struct ahash_request *req);
1375ec90831SRussell King 	int (*finup)(struct ahash_request *req);
1385ec90831SRussell King };
1395ec90831SRussell King 
1400355d23dSHoria Geantă static inline void switch_buf(struct caam_hash_state *state)
1410355d23dSHoria Geantă {
1420355d23dSHoria Geantă 	state->current_buf ^= 1;
1430355d23dSHoria Geantă }
1440355d23dSHoria Geantă 
1450355d23dSHoria Geantă static inline u8 *current_buf(struct caam_hash_state *state)
1460355d23dSHoria Geantă {
1470355d23dSHoria Geantă 	return state->current_buf ? state->buf_1 : state->buf_0;
1480355d23dSHoria Geantă }
1490355d23dSHoria Geantă 
1500355d23dSHoria Geantă static inline u8 *alt_buf(struct caam_hash_state *state)
1510355d23dSHoria Geantă {
1520355d23dSHoria Geantă 	return state->current_buf ? state->buf_0 : state->buf_1;
1530355d23dSHoria Geantă }
1540355d23dSHoria Geantă 
1550355d23dSHoria Geantă static inline int *current_buflen(struct caam_hash_state *state)
1560355d23dSHoria Geantă {
1570355d23dSHoria Geantă 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
1580355d23dSHoria Geantă }
1590355d23dSHoria Geantă 
1600355d23dSHoria Geantă static inline int *alt_buflen(struct caam_hash_state *state)
1610355d23dSHoria Geantă {
1620355d23dSHoria Geantă 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
1630355d23dSHoria Geantă }
1640355d23dSHoria Geantă 
165045e3678SYuan Kang /* Common job descriptor seq in/out ptr routines */
166045e3678SYuan Kang 
167045e3678SYuan Kang /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
168ce572085SHoria Geanta static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
169045e3678SYuan Kang 				      struct caam_hash_state *state,
170045e3678SYuan Kang 				      int ctx_len)
171045e3678SYuan Kang {
172045e3678SYuan Kang 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
173045e3678SYuan Kang 					ctx_len, DMA_FROM_DEVICE);
174ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
175ce572085SHoria Geanta 		dev_err(jrdev, "unable to map ctx\n");
17687ec02e7SHoria Geantă 		state->ctx_dma = 0;
177ce572085SHoria Geanta 		return -ENOMEM;
178ce572085SHoria Geanta 	}
179ce572085SHoria Geanta 
180045e3678SYuan Kang 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
181ce572085SHoria Geanta 
182ce572085SHoria Geanta 	return 0;
183045e3678SYuan Kang }
184045e3678SYuan Kang 
185045e3678SYuan Kang /* Map req->result, and append seq_out_ptr command that points to it */
186045e3678SYuan Kang static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
187045e3678SYuan Kang 						u8 *result, int digestsize)
188045e3678SYuan Kang {
189045e3678SYuan Kang 	dma_addr_t dst_dma;
190045e3678SYuan Kang 
191045e3678SYuan Kang 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
192045e3678SYuan Kang 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
193045e3678SYuan Kang 
194045e3678SYuan Kang 	return dst_dma;
195045e3678SYuan Kang }
196045e3678SYuan Kang 
197*944c3d4dSHoria Geantă /* Map current buffer in state (if length > 0) and put it in link table */
198*944c3d4dSHoria Geantă static inline int buf_map_to_sec4_sg(struct device *jrdev,
199045e3678SYuan Kang 				     struct sec4_sg_entry *sec4_sg,
200*944c3d4dSHoria Geantă 				     struct caam_hash_state *state)
201045e3678SYuan Kang {
202*944c3d4dSHoria Geantă 	int buflen = *current_buflen(state);
203045e3678SYuan Kang 
204*944c3d4dSHoria Geantă 	if (!buflen)
205*944c3d4dSHoria Geantă 		return 0;
206045e3678SYuan Kang 
207*944c3d4dSHoria Geantă 	state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
208*944c3d4dSHoria Geantă 					DMA_TO_DEVICE);
209*944c3d4dSHoria Geantă 	if (dma_mapping_error(jrdev, state->buf_dma)) {
210*944c3d4dSHoria Geantă 		dev_err(jrdev, "unable to map buf\n");
211*944c3d4dSHoria Geantă 		state->buf_dma = 0;
212*944c3d4dSHoria Geantă 		return -ENOMEM;
213045e3678SYuan Kang 	}
214045e3678SYuan Kang 
215*944c3d4dSHoria Geantă 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
216045e3678SYuan Kang 
217*944c3d4dSHoria Geantă 	return 0;
218045e3678SYuan Kang }
219045e3678SYuan Kang 
220045e3678SYuan Kang /* Map state->caam_ctx, and add it to link table */
221ce572085SHoria Geanta static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
222ce572085SHoria Geanta 				     struct caam_hash_state *state, int ctx_len,
223ce572085SHoria Geanta 				     struct sec4_sg_entry *sec4_sg, u32 flag)
224045e3678SYuan Kang {
225045e3678SYuan Kang 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
226ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
227ce572085SHoria Geanta 		dev_err(jrdev, "unable to map ctx\n");
22887ec02e7SHoria Geantă 		state->ctx_dma = 0;
229ce572085SHoria Geanta 		return -ENOMEM;
230ce572085SHoria Geanta 	}
231ce572085SHoria Geanta 
232045e3678SYuan Kang 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
233ce572085SHoria Geanta 
234ce572085SHoria Geanta 	return 0;
235045e3678SYuan Kang }
236045e3678SYuan Kang 
2371a0166f1SHoria Geantă /*
2381a0166f1SHoria Geantă  * For ahash update, final and finup (import_ctx = true)
2391a0166f1SHoria Geantă  *     import context, read and write to seqout
2401a0166f1SHoria Geantă  * For ahash firsts and digest (import_ctx = false)
2411a0166f1SHoria Geantă  *     read and write to seqout
2421a0166f1SHoria Geantă  */
2431a0166f1SHoria Geantă static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
2441a0166f1SHoria Geantă 				     struct caam_hash_ctx *ctx, bool import_ctx)
245045e3678SYuan Kang {
2461a0166f1SHoria Geantă 	u32 op = ctx->adata.algtype;
2471a0166f1SHoria Geantă 	u32 *skip_key_load;
248045e3678SYuan Kang 
24961bb86bbSKim Phillips 	init_sh_desc(desc, HDR_SHARE_SERIAL);
250045e3678SYuan Kang 
2511a0166f1SHoria Geantă 	/* Append key if it has been set; ahash update excluded */
2521a0166f1SHoria Geantă 	if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
2531a0166f1SHoria Geantă 		/* Skip key loading if already shared */
2541a0166f1SHoria Geantă 		skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
255045e3678SYuan Kang 					    JUMP_COND_SHRD);
256045e3678SYuan Kang 
2571a0166f1SHoria Geantă 		append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
2581a0166f1SHoria Geantă 				  ctx->adata.keylen, CLASS_2 |
2591a0166f1SHoria Geantă 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
260045e3678SYuan Kang 
2611a0166f1SHoria Geantă 		set_jump_tgt_here(desc, skip_key_load);
2621a0166f1SHoria Geantă 
2631a0166f1SHoria Geantă 		op |= OP_ALG_AAI_HMAC_PRECOMP;
264045e3678SYuan Kang 	}
265045e3678SYuan Kang 
2661a0166f1SHoria Geantă 	/* If needed, import context from software */
2671a0166f1SHoria Geantă 	if (import_ctx)
268281669dfSHoria Geantă 		append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
269281669dfSHoria Geantă 				LDST_SRCDST_BYTE_CONTEXT);
270045e3678SYuan Kang 
271045e3678SYuan Kang 	/* Class 2 operation */
272045e3678SYuan Kang 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
273045e3678SYuan Kang 
274045e3678SYuan Kang 	/*
275045e3678SYuan Kang 	 * Load from buf and/or src and write to req->result or state->context
2761a0166f1SHoria Geantă 	 * Calculate remaining bytes to read
277045e3678SYuan Kang 	 */
2781a0166f1SHoria Geantă 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
2791a0166f1SHoria Geantă 	/* Read remaining bytes */
2801a0166f1SHoria Geantă 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
2811a0166f1SHoria Geantă 			     FIFOLD_TYPE_MSG | KEY_VLF);
2821a0166f1SHoria Geantă 	/* Store class2 context bytes */
2831a0166f1SHoria Geantă 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
2841a0166f1SHoria Geantă 			 LDST_SRCDST_BYTE_CONTEXT);
285045e3678SYuan Kang }
286045e3678SYuan Kang 
287045e3678SYuan Kang static int ahash_set_sh_desc(struct crypto_ahash *ahash)
288045e3678SYuan Kang {
289045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
290045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
291045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
292045e3678SYuan Kang 	u32 *desc;
293045e3678SYuan Kang 
294045e3678SYuan Kang 	/* ahash_update shared descriptor */
295045e3678SYuan Kang 	desc = ctx->sh_desc_update;
2961a0166f1SHoria Geantă 	ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
297bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
298bbf22344SHoria Geantă 				   desc_bytes(desc), DMA_TO_DEVICE);
299045e3678SYuan Kang #ifdef DEBUG
300514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
301514df281SAlex Porosanu 		       "ahash update shdesc@"__stringify(__LINE__)": ",
302045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
303045e3678SYuan Kang #endif
304045e3678SYuan Kang 
305045e3678SYuan Kang 	/* ahash_update_first shared descriptor */
306045e3678SYuan Kang 	desc = ctx->sh_desc_update_first;
3071a0166f1SHoria Geantă 	ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
308bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
309bbf22344SHoria Geantă 				   desc_bytes(desc), DMA_TO_DEVICE);
310045e3678SYuan Kang #ifdef DEBUG
311514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
312514df281SAlex Porosanu 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
313045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
314045e3678SYuan Kang #endif
315045e3678SYuan Kang 
316045e3678SYuan Kang 	/* ahash_final shared descriptor */
317045e3678SYuan Kang 	desc = ctx->sh_desc_fin;
3181a0166f1SHoria Geantă 	ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
319bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
320bbf22344SHoria Geantă 				   desc_bytes(desc), DMA_TO_DEVICE);
321045e3678SYuan Kang #ifdef DEBUG
322514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
323045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
324045e3678SYuan Kang 		       desc_bytes(desc), 1);
325045e3678SYuan Kang #endif
326045e3678SYuan Kang 
327045e3678SYuan Kang 	/* ahash_digest shared descriptor */
328045e3678SYuan Kang 	desc = ctx->sh_desc_digest;
3291a0166f1SHoria Geantă 	ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
330bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
331bbf22344SHoria Geantă 				   desc_bytes(desc), DMA_TO_DEVICE);
332045e3678SYuan Kang #ifdef DEBUG
333514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
334514df281SAlex Porosanu 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
335045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
336045e3678SYuan Kang 		       desc_bytes(desc), 1);
337045e3678SYuan Kang #endif
338045e3678SYuan Kang 
339045e3678SYuan Kang 	return 0;
340045e3678SYuan Kang }
341045e3678SYuan Kang 
342045e3678SYuan Kang /* Digest hash size if it is too large */
34366b3e887SKim Phillips static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
344045e3678SYuan Kang 			   u32 *keylen, u8 *key_out, u32 digestsize)
345045e3678SYuan Kang {
346045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
347045e3678SYuan Kang 	u32 *desc;
348045e3678SYuan Kang 	struct split_key_result result;
349045e3678SYuan Kang 	dma_addr_t src_dma, dst_dma;
3509e6df0fdSMarkus Elfring 	int ret;
351045e3678SYuan Kang 
3529c23b7d3SVakul Garg 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
3532af8f4a2SKim Phillips 	if (!desc) {
3542af8f4a2SKim Phillips 		dev_err(jrdev, "unable to allocate key input memory\n");
3552af8f4a2SKim Phillips 		return -ENOMEM;
3562af8f4a2SKim Phillips 	}
357045e3678SYuan Kang 
358045e3678SYuan Kang 	init_job_desc(desc, 0);
359045e3678SYuan Kang 
360045e3678SYuan Kang 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
361045e3678SYuan Kang 				 DMA_TO_DEVICE);
362045e3678SYuan Kang 	if (dma_mapping_error(jrdev, src_dma)) {
363045e3678SYuan Kang 		dev_err(jrdev, "unable to map key input memory\n");
364045e3678SYuan Kang 		kfree(desc);
365045e3678SYuan Kang 		return -ENOMEM;
366045e3678SYuan Kang 	}
367045e3678SYuan Kang 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
368045e3678SYuan Kang 				 DMA_FROM_DEVICE);
369045e3678SYuan Kang 	if (dma_mapping_error(jrdev, dst_dma)) {
370045e3678SYuan Kang 		dev_err(jrdev, "unable to map key output memory\n");
371045e3678SYuan Kang 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
372045e3678SYuan Kang 		kfree(desc);
373045e3678SYuan Kang 		return -ENOMEM;
374045e3678SYuan Kang 	}
375045e3678SYuan Kang 
376045e3678SYuan Kang 	/* Job descriptor to perform unkeyed hash on key_in */
377db57656bSHoria Geantă 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
378045e3678SYuan Kang 			 OP_ALG_AS_INITFINAL);
379045e3678SYuan Kang 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
380045e3678SYuan Kang 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
381045e3678SYuan Kang 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
382045e3678SYuan Kang 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
383045e3678SYuan Kang 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
384045e3678SYuan Kang 			 LDST_SRCDST_BYTE_CONTEXT);
385045e3678SYuan Kang 
386045e3678SYuan Kang #ifdef DEBUG
387514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
388045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
389514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
390045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
391045e3678SYuan Kang #endif
392045e3678SYuan Kang 
393045e3678SYuan Kang 	result.err = 0;
394045e3678SYuan Kang 	init_completion(&result.completion);
395045e3678SYuan Kang 
396045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
397045e3678SYuan Kang 	if (!ret) {
398045e3678SYuan Kang 		/* in progress */
399045e3678SYuan Kang 		wait_for_completion_interruptible(&result.completion);
400045e3678SYuan Kang 		ret = result.err;
401045e3678SYuan Kang #ifdef DEBUG
402514df281SAlex Porosanu 		print_hex_dump(KERN_ERR,
403514df281SAlex Porosanu 			       "digested key@"__stringify(__LINE__)": ",
404045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
405045e3678SYuan Kang 			       digestsize, 1);
406045e3678SYuan Kang #endif
407045e3678SYuan Kang 	}
408045e3678SYuan Kang 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
409045e3678SYuan Kang 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
410045e3678SYuan Kang 
411e11aa9f1SHoria Geanta 	*keylen = digestsize;
412e11aa9f1SHoria Geanta 
413045e3678SYuan Kang 	kfree(desc);
414045e3678SYuan Kang 
415045e3678SYuan Kang 	return ret;
416045e3678SYuan Kang }
417045e3678SYuan Kang 
418045e3678SYuan Kang static int ahash_setkey(struct crypto_ahash *ahash,
419045e3678SYuan Kang 			const u8 *key, unsigned int keylen)
420045e3678SYuan Kang {
421045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
422045e3678SYuan Kang 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
423045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
4249e6df0fdSMarkus Elfring 	int ret;
425045e3678SYuan Kang 	u8 *hashed_key = NULL;
426045e3678SYuan Kang 
427045e3678SYuan Kang #ifdef DEBUG
428045e3678SYuan Kang 	printk(KERN_ERR "keylen %d\n", keylen);
429045e3678SYuan Kang #endif
430045e3678SYuan Kang 
431045e3678SYuan Kang 	if (keylen > blocksize) {
432e7a33c4dSMarkus Elfring 		hashed_key = kmalloc_array(digestsize,
433e7a33c4dSMarkus Elfring 					   sizeof(*hashed_key),
434e7a33c4dSMarkus Elfring 					   GFP_KERNEL | GFP_DMA);
435045e3678SYuan Kang 		if (!hashed_key)
436045e3678SYuan Kang 			return -ENOMEM;
437045e3678SYuan Kang 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
438045e3678SYuan Kang 				      digestsize);
439045e3678SYuan Kang 		if (ret)
440d6e7a7d0SMarkus Elfring 			goto bad_free_key;
441045e3678SYuan Kang 		key = hashed_key;
442045e3678SYuan Kang 	}
443045e3678SYuan Kang 
4446655cb8eSHoria Geantă 	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
4456655cb8eSHoria Geantă 			    CAAM_MAX_HASH_KEY_SIZE);
446045e3678SYuan Kang 	if (ret)
447d6e7a7d0SMarkus Elfring 		goto bad_free_key;
448045e3678SYuan Kang 
449045e3678SYuan Kang #ifdef DEBUG
450514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
451045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
452db57656bSHoria Geantă 		       ctx->adata.keylen_pad, 1);
453045e3678SYuan Kang #endif
454045e3678SYuan Kang 
455045e3678SYuan Kang 	kfree(hashed_key);
456cfb725f6SHoria Geantă 	return ahash_set_sh_desc(ahash);
457d6e7a7d0SMarkus Elfring  bad_free_key:
458045e3678SYuan Kang 	kfree(hashed_key);
459045e3678SYuan Kang 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
460045e3678SYuan Kang 	return -EINVAL;
461045e3678SYuan Kang }
462045e3678SYuan Kang 
463045e3678SYuan Kang /*
464045e3678SYuan Kang  * ahash_edesc - s/w-extended ahash descriptor
465045e3678SYuan Kang  * @dst_dma: physical mapped address of req->result
466045e3678SYuan Kang  * @sec4_sg_dma: physical mapped address of h/w link table
467045e3678SYuan Kang  * @src_nents: number of segments in input scatterlist
468045e3678SYuan Kang  * @sec4_sg_bytes: length of dma mapped sec4_sg space
469045e3678SYuan Kang  * @hw_desc: the h/w job descriptor followed by any referenced link tables
470343e44b1SRussell King  * @sec4_sg: h/w link table
471045e3678SYuan Kang  */
472045e3678SYuan Kang struct ahash_edesc {
473045e3678SYuan Kang 	dma_addr_t dst_dma;
474045e3678SYuan Kang 	dma_addr_t sec4_sg_dma;
475045e3678SYuan Kang 	int src_nents;
476045e3678SYuan Kang 	int sec4_sg_bytes;
477d7b24ed4SRussell King 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
478343e44b1SRussell King 	struct sec4_sg_entry sec4_sg[0];
479045e3678SYuan Kang };
480045e3678SYuan Kang 
481045e3678SYuan Kang static inline void ahash_unmap(struct device *dev,
482045e3678SYuan Kang 			struct ahash_edesc *edesc,
483045e3678SYuan Kang 			struct ahash_request *req, int dst_len)
484045e3678SYuan Kang {
485*944c3d4dSHoria Geantă 	struct caam_hash_state *state = ahash_request_ctx(req);
486*944c3d4dSHoria Geantă 
487045e3678SYuan Kang 	if (edesc->src_nents)
48813fb8fd7SLABBE Corentin 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
489045e3678SYuan Kang 	if (edesc->dst_dma)
490045e3678SYuan Kang 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
491045e3678SYuan Kang 
492045e3678SYuan Kang 	if (edesc->sec4_sg_bytes)
493045e3678SYuan Kang 		dma_unmap_single(dev, edesc->sec4_sg_dma,
494045e3678SYuan Kang 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
495*944c3d4dSHoria Geantă 
496*944c3d4dSHoria Geantă 	if (state->buf_dma) {
497*944c3d4dSHoria Geantă 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
498*944c3d4dSHoria Geantă 				 DMA_TO_DEVICE);
499*944c3d4dSHoria Geantă 		state->buf_dma = 0;
500*944c3d4dSHoria Geantă 	}
501045e3678SYuan Kang }
502045e3678SYuan Kang 
503045e3678SYuan Kang static inline void ahash_unmap_ctx(struct device *dev,
504045e3678SYuan Kang 			struct ahash_edesc *edesc,
505045e3678SYuan Kang 			struct ahash_request *req, int dst_len, u32 flag)
506045e3678SYuan Kang {
507045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
508045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
509045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
510045e3678SYuan Kang 
51187ec02e7SHoria Geantă 	if (state->ctx_dma) {
512045e3678SYuan Kang 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
51387ec02e7SHoria Geantă 		state->ctx_dma = 0;
51487ec02e7SHoria Geantă 	}
515045e3678SYuan Kang 	ahash_unmap(dev, edesc, req, dst_len);
516045e3678SYuan Kang }
517045e3678SYuan Kang 
518045e3678SYuan Kang static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
519045e3678SYuan Kang 		       void *context)
520045e3678SYuan Kang {
521045e3678SYuan Kang 	struct ahash_request *req = context;
522045e3678SYuan Kang 	struct ahash_edesc *edesc;
523045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
524045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
525045e3678SYuan Kang #ifdef DEBUG
526045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
527045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
528045e3678SYuan Kang 
529045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
530045e3678SYuan Kang #endif
531045e3678SYuan Kang 
5324ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
533fa9659cdSMarek Vasut 	if (err)
534fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
535045e3678SYuan Kang 
536045e3678SYuan Kang 	ahash_unmap(jrdev, edesc, req, digestsize);
537045e3678SYuan Kang 	kfree(edesc);
538045e3678SYuan Kang 
539045e3678SYuan Kang #ifdef DEBUG
540514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
541045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
542045e3678SYuan Kang 		       ctx->ctx_len, 1);
543045e3678SYuan Kang 	if (req->result)
544514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
545045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
546045e3678SYuan Kang 			       digestsize, 1);
547045e3678SYuan Kang #endif
548045e3678SYuan Kang 
549045e3678SYuan Kang 	req->base.complete(&req->base, err);
550045e3678SYuan Kang }
551045e3678SYuan Kang 
552045e3678SYuan Kang static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
553045e3678SYuan Kang 			    void *context)
554045e3678SYuan Kang {
555045e3678SYuan Kang 	struct ahash_request *req = context;
556045e3678SYuan Kang 	struct ahash_edesc *edesc;
557045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
558045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
559045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
560*944c3d4dSHoria Geantă #ifdef DEBUG
561045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
562045e3678SYuan Kang 
563045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
564045e3678SYuan Kang #endif
565045e3678SYuan Kang 
5664ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
567fa9659cdSMarek Vasut 	if (err)
568fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
569045e3678SYuan Kang 
570045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
571*944c3d4dSHoria Geantă 	switch_buf(state);
572045e3678SYuan Kang 	kfree(edesc);
573045e3678SYuan Kang 
574045e3678SYuan Kang #ifdef DEBUG
575514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
576045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
577045e3678SYuan Kang 		       ctx->ctx_len, 1);
578045e3678SYuan Kang 	if (req->result)
579514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
580045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
581045e3678SYuan Kang 			       digestsize, 1);
582045e3678SYuan Kang #endif
583045e3678SYuan Kang 
584045e3678SYuan Kang 	req->base.complete(&req->base, err);
585045e3678SYuan Kang }
586045e3678SYuan Kang 
587045e3678SYuan Kang static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
588045e3678SYuan Kang 			       void *context)
589045e3678SYuan Kang {
590045e3678SYuan Kang 	struct ahash_request *req = context;
591045e3678SYuan Kang 	struct ahash_edesc *edesc;
592045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
593045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
594045e3678SYuan Kang #ifdef DEBUG
595045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
596045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
597045e3678SYuan Kang 
598045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
599045e3678SYuan Kang #endif
600045e3678SYuan Kang 
6014ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
602fa9659cdSMarek Vasut 	if (err)
603fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
604045e3678SYuan Kang 
605bc9e05f9SHoria Geanta 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
606045e3678SYuan Kang 	kfree(edesc);
607045e3678SYuan Kang 
608045e3678SYuan Kang #ifdef DEBUG
609514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
610045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
611045e3678SYuan Kang 		       ctx->ctx_len, 1);
612045e3678SYuan Kang 	if (req->result)
613514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
614045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
615045e3678SYuan Kang 			       digestsize, 1);
616045e3678SYuan Kang #endif
617045e3678SYuan Kang 
618045e3678SYuan Kang 	req->base.complete(&req->base, err);
619045e3678SYuan Kang }
620045e3678SYuan Kang 
621045e3678SYuan Kang static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
622045e3678SYuan Kang 			       void *context)
623045e3678SYuan Kang {
624045e3678SYuan Kang 	struct ahash_request *req = context;
625045e3678SYuan Kang 	struct ahash_edesc *edesc;
626045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
627045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
628045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
629*944c3d4dSHoria Geantă #ifdef DEBUG
630045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
631045e3678SYuan Kang 
632045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
633045e3678SYuan Kang #endif
634045e3678SYuan Kang 
6354ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
636fa9659cdSMarek Vasut 	if (err)
637fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
638045e3678SYuan Kang 
639ef62b231SHoria Geanta 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
640*944c3d4dSHoria Geantă 	switch_buf(state);
641045e3678SYuan Kang 	kfree(edesc);
642045e3678SYuan Kang 
643045e3678SYuan Kang #ifdef DEBUG
644514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
645045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
646045e3678SYuan Kang 		       ctx->ctx_len, 1);
647045e3678SYuan Kang 	if (req->result)
648514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
649045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
650045e3678SYuan Kang 			       digestsize, 1);
651045e3678SYuan Kang #endif
652045e3678SYuan Kang 
653045e3678SYuan Kang 	req->base.complete(&req->base, err);
654045e3678SYuan Kang }
655045e3678SYuan Kang 
6565588d039SRussell King /*
6575588d039SRussell King  * Allocate an enhanced descriptor, which contains the hardware descriptor
6585588d039SRussell King  * and space for hardware scatter table containing sg_num entries.
6595588d039SRussell King  */
6605588d039SRussell King static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
66130a43b44SRussell King 					     int sg_num, u32 *sh_desc,
66230a43b44SRussell King 					     dma_addr_t sh_desc_dma,
66330a43b44SRussell King 					     gfp_t flags)
6645588d039SRussell King {
6655588d039SRussell King 	struct ahash_edesc *edesc;
6665588d039SRussell King 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
6675588d039SRussell King 
6685588d039SRussell King 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
6695588d039SRussell King 	if (!edesc) {
6705588d039SRussell King 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
6715588d039SRussell King 		return NULL;
6725588d039SRussell King 	}
6735588d039SRussell King 
67430a43b44SRussell King 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
67530a43b44SRussell King 			     HDR_SHARE_DEFER | HDR_REVERSE);
67630a43b44SRussell King 
6775588d039SRussell King 	return edesc;
6785588d039SRussell King }
6795588d039SRussell King 
68065cf164aSRussell King static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
68165cf164aSRussell King 			       struct ahash_edesc *edesc,
68265cf164aSRussell King 			       struct ahash_request *req, int nents,
68365cf164aSRussell King 			       unsigned int first_sg,
68465cf164aSRussell King 			       unsigned int first_bytes, size_t to_hash)
68565cf164aSRussell King {
68665cf164aSRussell King 	dma_addr_t src_dma;
68765cf164aSRussell King 	u32 options;
68865cf164aSRussell King 
68965cf164aSRussell King 	if (nents > 1 || first_sg) {
69065cf164aSRussell King 		struct sec4_sg_entry *sg = edesc->sec4_sg;
69165cf164aSRussell King 		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
69265cf164aSRussell King 
69365cf164aSRussell King 		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
69465cf164aSRussell King 
69565cf164aSRussell King 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
69665cf164aSRussell King 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
69765cf164aSRussell King 			dev_err(ctx->jrdev, "unable to map S/G table\n");
69865cf164aSRussell King 			return -ENOMEM;
69965cf164aSRussell King 		}
70065cf164aSRussell King 
70165cf164aSRussell King 		edesc->sec4_sg_bytes = sgsize;
70265cf164aSRussell King 		edesc->sec4_sg_dma = src_dma;
70365cf164aSRussell King 		options = LDST_SGF;
70465cf164aSRussell King 	} else {
70565cf164aSRussell King 		src_dma = sg_dma_address(req->src);
70665cf164aSRussell King 		options = 0;
70765cf164aSRussell King 	}
70865cf164aSRussell King 
70965cf164aSRussell King 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
71065cf164aSRussell King 			  options);
71165cf164aSRussell King 
71265cf164aSRussell King 	return 0;
71365cf164aSRussell King }
71465cf164aSRussell King 
715045e3678SYuan Kang /* submit update job descriptor */
716045e3678SYuan Kang static int ahash_update_ctx(struct ahash_request *req)
717045e3678SYuan Kang {
718045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
719045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
720045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
721045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
722045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
723045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
7240355d23dSHoria Geantă 	u8 *buf = current_buf(state);
7250355d23dSHoria Geantă 	int *buflen = current_buflen(state);
7260355d23dSHoria Geantă 	u8 *next_buf = alt_buf(state);
7270355d23dSHoria Geantă 	int *next_buflen = alt_buflen(state), last_buflen;
728045e3678SYuan Kang 	int in_len = *buflen + req->nbytes, to_hash;
72930a43b44SRussell King 	u32 *desc;
730bc13c69eSRussell King 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
731045e3678SYuan Kang 	struct ahash_edesc *edesc;
732045e3678SYuan Kang 	int ret = 0;
733045e3678SYuan Kang 
734045e3678SYuan Kang 	last_buflen = *next_buflen;
735045e3678SYuan Kang 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
736045e3678SYuan Kang 	to_hash = in_len - *next_buflen;
737045e3678SYuan Kang 
738045e3678SYuan Kang 	if (to_hash) {
73913fb8fd7SLABBE Corentin 		src_nents = sg_nents_for_len(req->src,
74013fb8fd7SLABBE Corentin 					     req->nbytes - (*next_buflen));
741f9970c28SLABBE Corentin 		if (src_nents < 0) {
742f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
743f9970c28SLABBE Corentin 			return src_nents;
744f9970c28SLABBE Corentin 		}
745bc13c69eSRussell King 
746bc13c69eSRussell King 		if (src_nents) {
747bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
748bc13c69eSRussell King 						  DMA_TO_DEVICE);
749bc13c69eSRussell King 			if (!mapped_nents) {
750bc13c69eSRussell King 				dev_err(jrdev, "unable to DMA map source\n");
751bc13c69eSRussell King 				return -ENOMEM;
752bc13c69eSRussell King 			}
753bc13c69eSRussell King 		} else {
754bc13c69eSRussell King 			mapped_nents = 0;
755bc13c69eSRussell King 		}
756bc13c69eSRussell King 
757045e3678SYuan Kang 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
758bc13c69eSRussell King 		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
759045e3678SYuan Kang 				 sizeof(struct sec4_sg_entry);
760045e3678SYuan Kang 
761045e3678SYuan Kang 		/*
762045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
763045e3678SYuan Kang 		 * link tables
764045e3678SYuan Kang 		 */
7655588d039SRussell King 		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
76630a43b44SRussell King 					  ctx->sh_desc_update,
76730a43b44SRussell King 					  ctx->sh_desc_update_dma, flags);
768045e3678SYuan Kang 		if (!edesc) {
769bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
770045e3678SYuan Kang 			return -ENOMEM;
771045e3678SYuan Kang 		}
772045e3678SYuan Kang 
773045e3678SYuan Kang 		edesc->src_nents = src_nents;
774045e3678SYuan Kang 		edesc->sec4_sg_bytes = sec4_sg_bytes;
775045e3678SYuan Kang 
776ce572085SHoria Geanta 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
777045e3678SYuan Kang 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
778ce572085SHoria Geanta 		if (ret)
77958b0e5d0SMarkus Elfring 			goto unmap_ctx;
780045e3678SYuan Kang 
781*944c3d4dSHoria Geantă 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
782*944c3d4dSHoria Geantă 		if (ret)
783*944c3d4dSHoria Geantă 			goto unmap_ctx;
784045e3678SYuan Kang 
785bc13c69eSRussell King 		if (mapped_nents) {
786bc13c69eSRussell King 			sg_to_sec4_sg_last(req->src, mapped_nents,
787bc13c69eSRussell King 					   edesc->sec4_sg + sec4_sg_src_index,
788bc13c69eSRussell King 					   0);
7898af7b0f8SVictoria Milhoan 			if (*next_buflen)
790307fd543SCristian Stoica 				scatterwalk_map_and_copy(next_buf, req->src,
791307fd543SCristian Stoica 							 to_hash - *buflen,
792307fd543SCristian Stoica 							 *next_buflen, 0);
793045e3678SYuan Kang 		} else {
794045e3678SYuan Kang 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
795261ea058SHoria Geantă 				cpu_to_caam32(SEC4_SG_LEN_FIN);
796045e3678SYuan Kang 		}
797045e3678SYuan Kang 
798045e3678SYuan Kang 		desc = edesc->hw_desc;
799045e3678SYuan Kang 
8001da2be33SRuchika Gupta 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
8011da2be33SRuchika Gupta 						     sec4_sg_bytes,
8021da2be33SRuchika Gupta 						     DMA_TO_DEVICE);
803ce572085SHoria Geanta 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
804ce572085SHoria Geanta 			dev_err(jrdev, "unable to map S/G table\n");
80532686d34SRussell King 			ret = -ENOMEM;
80658b0e5d0SMarkus Elfring 			goto unmap_ctx;
807ce572085SHoria Geanta 		}
8081da2be33SRuchika Gupta 
809045e3678SYuan Kang 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
810045e3678SYuan Kang 				       to_hash, LDST_SGF);
811045e3678SYuan Kang 
812045e3678SYuan Kang 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
813045e3678SYuan Kang 
814045e3678SYuan Kang #ifdef DEBUG
815514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
816045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
817045e3678SYuan Kang 			       desc_bytes(desc), 1);
818045e3678SYuan Kang #endif
819045e3678SYuan Kang 
820045e3678SYuan Kang 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
82132686d34SRussell King 		if (ret)
82258b0e5d0SMarkus Elfring 			goto unmap_ctx;
82332686d34SRussell King 
824045e3678SYuan Kang 		ret = -EINPROGRESS;
825045e3678SYuan Kang 	} else if (*next_buflen) {
826307fd543SCristian Stoica 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
827307fd543SCristian Stoica 					 req->nbytes, 0);
828045e3678SYuan Kang 		*buflen = *next_buflen;
829045e3678SYuan Kang 		*next_buflen = last_buflen;
830045e3678SYuan Kang 	}
831045e3678SYuan Kang #ifdef DEBUG
832514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
833045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
834514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
835045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
836045e3678SYuan Kang 		       *next_buflen, 1);
837045e3678SYuan Kang #endif
838045e3678SYuan Kang 
839045e3678SYuan Kang 	return ret;
84058b0e5d0SMarkus Elfring  unmap_ctx:
84132686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
84232686d34SRussell King 	kfree(edesc);
84332686d34SRussell King 	return ret;
844045e3678SYuan Kang }
845045e3678SYuan Kang 
846045e3678SYuan Kang static int ahash_final_ctx(struct ahash_request *req)
847045e3678SYuan Kang {
848045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
849045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
850045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
851045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
852045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
853045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
8540355d23dSHoria Geantă 	int buflen = *current_buflen(state);
85530a43b44SRussell King 	u32 *desc;
856b310c178SHoria Geant? 	int sec4_sg_bytes, sec4_sg_src_index;
857045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
858045e3678SYuan Kang 	struct ahash_edesc *edesc;
8599e6df0fdSMarkus Elfring 	int ret;
860045e3678SYuan Kang 
861b310c178SHoria Geant? 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
862b310c178SHoria Geant? 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
863045e3678SYuan Kang 
864045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
86530a43b44SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
86630a43b44SRussell King 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
86730a43b44SRussell King 				  flags);
8685588d039SRussell King 	if (!edesc)
869045e3678SYuan Kang 		return -ENOMEM;
870045e3678SYuan Kang 
871045e3678SYuan Kang 	desc = edesc->hw_desc;
872045e3678SYuan Kang 
873045e3678SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
874045e3678SYuan Kang 	edesc->src_nents = 0;
875045e3678SYuan Kang 
876ce572085SHoria Geanta 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
877ce572085SHoria Geanta 				 edesc->sec4_sg, DMA_TO_DEVICE);
878ce572085SHoria Geanta 	if (ret)
87958b0e5d0SMarkus Elfring 		goto unmap_ctx;
880045e3678SYuan Kang 
881*944c3d4dSHoria Geantă 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
882*944c3d4dSHoria Geantă 	if (ret)
883*944c3d4dSHoria Geantă 		goto unmap_ctx;
884*944c3d4dSHoria Geantă 
885261ea058SHoria Geantă 	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
886261ea058SHoria Geantă 		cpu_to_caam32(SEC4_SG_LEN_FIN);
887045e3678SYuan Kang 
8881da2be33SRuchika Gupta 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
8891da2be33SRuchika Gupta 					    sec4_sg_bytes, DMA_TO_DEVICE);
890ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
891ce572085SHoria Geanta 		dev_err(jrdev, "unable to map S/G table\n");
89232686d34SRussell King 		ret = -ENOMEM;
89358b0e5d0SMarkus Elfring 		goto unmap_ctx;
894ce572085SHoria Geanta 	}
8951da2be33SRuchika Gupta 
896045e3678SYuan Kang 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
897045e3678SYuan Kang 			  LDST_SGF);
898045e3678SYuan Kang 
899045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
900045e3678SYuan Kang 						digestsize);
901ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
902ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
90332686d34SRussell King 		ret = -ENOMEM;
90458b0e5d0SMarkus Elfring 		goto unmap_ctx;
905ce572085SHoria Geanta 	}
906045e3678SYuan Kang 
907045e3678SYuan Kang #ifdef DEBUG
908514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
909045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
910045e3678SYuan Kang #endif
911045e3678SYuan Kang 
912045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
91332686d34SRussell King 	if (ret)
91458b0e5d0SMarkus Elfring 		goto unmap_ctx;
91532686d34SRussell King 
91632686d34SRussell King 	return -EINPROGRESS;
91758b0e5d0SMarkus Elfring  unmap_ctx:
918045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
919045e3678SYuan Kang 	kfree(edesc);
920045e3678SYuan Kang 	return ret;
921045e3678SYuan Kang }
922045e3678SYuan Kang 
923045e3678SYuan Kang static int ahash_finup_ctx(struct ahash_request *req)
924045e3678SYuan Kang {
925045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
926045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
927045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
928045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
929045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
930045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9310355d23dSHoria Geantă 	int buflen = *current_buflen(state);
93230a43b44SRussell King 	u32 *desc;
93365cf164aSRussell King 	int sec4_sg_src_index;
934bc13c69eSRussell King 	int src_nents, mapped_nents;
935045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
936045e3678SYuan Kang 	struct ahash_edesc *edesc;
9379e6df0fdSMarkus Elfring 	int ret;
938045e3678SYuan Kang 
93913fb8fd7SLABBE Corentin 	src_nents = sg_nents_for_len(req->src, req->nbytes);
940f9970c28SLABBE Corentin 	if (src_nents < 0) {
941f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
942f9970c28SLABBE Corentin 		return src_nents;
943f9970c28SLABBE Corentin 	}
944bc13c69eSRussell King 
945bc13c69eSRussell King 	if (src_nents) {
946bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
947bc13c69eSRussell King 					  DMA_TO_DEVICE);
948bc13c69eSRussell King 		if (!mapped_nents) {
949bc13c69eSRussell King 			dev_err(jrdev, "unable to DMA map source\n");
950bc13c69eSRussell King 			return -ENOMEM;
951bc13c69eSRussell King 		}
952bc13c69eSRussell King 	} else {
953bc13c69eSRussell King 		mapped_nents = 0;
954bc13c69eSRussell King 	}
955bc13c69eSRussell King 
956045e3678SYuan Kang 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
957045e3678SYuan Kang 
958045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
9595588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
9609a1a1c08SHoria Geantă 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
9615588d039SRussell King 				  flags);
962045e3678SYuan Kang 	if (!edesc) {
963bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
964045e3678SYuan Kang 		return -ENOMEM;
965045e3678SYuan Kang 	}
966045e3678SYuan Kang 
967045e3678SYuan Kang 	desc = edesc->hw_desc;
968045e3678SYuan Kang 
969045e3678SYuan Kang 	edesc->src_nents = src_nents;
970045e3678SYuan Kang 
971ce572085SHoria Geanta 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
972ce572085SHoria Geanta 				 edesc->sec4_sg, DMA_TO_DEVICE);
973ce572085SHoria Geanta 	if (ret)
97458b0e5d0SMarkus Elfring 		goto unmap_ctx;
975045e3678SYuan Kang 
976*944c3d4dSHoria Geantă 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
977*944c3d4dSHoria Geantă 	if (ret)
978*944c3d4dSHoria Geantă 		goto unmap_ctx;
979045e3678SYuan Kang 
98065cf164aSRussell King 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
98165cf164aSRussell King 				  sec4_sg_src_index, ctx->ctx_len + buflen,
98265cf164aSRussell King 				  req->nbytes);
98365cf164aSRussell King 	if (ret)
98458b0e5d0SMarkus Elfring 		goto unmap_ctx;
985045e3678SYuan Kang 
986045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
987045e3678SYuan Kang 						digestsize);
988ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
989ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
99032686d34SRussell King 		ret = -ENOMEM;
99158b0e5d0SMarkus Elfring 		goto unmap_ctx;
992ce572085SHoria Geanta 	}
993045e3678SYuan Kang 
994045e3678SYuan Kang #ifdef DEBUG
995514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
996045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
997045e3678SYuan Kang #endif
998045e3678SYuan Kang 
999045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
100032686d34SRussell King 	if (ret)
100158b0e5d0SMarkus Elfring 		goto unmap_ctx;
100232686d34SRussell King 
100332686d34SRussell King 	return -EINPROGRESS;
100458b0e5d0SMarkus Elfring  unmap_ctx:
1005045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1006045e3678SYuan Kang 	kfree(edesc);
1007045e3678SYuan Kang 	return ret;
1008045e3678SYuan Kang }
1009045e3678SYuan Kang 
1010045e3678SYuan Kang static int ahash_digest(struct ahash_request *req)
1011045e3678SYuan Kang {
1012045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1013045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1014*944c3d4dSHoria Geantă 	struct caam_hash_state *state = ahash_request_ctx(req);
1015045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1016045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1017045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
101830a43b44SRussell King 	u32 *desc;
1019045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
102065cf164aSRussell King 	int src_nents, mapped_nents;
1021045e3678SYuan Kang 	struct ahash_edesc *edesc;
10229e6df0fdSMarkus Elfring 	int ret;
1023045e3678SYuan Kang 
1024*944c3d4dSHoria Geantă 	state->buf_dma = 0;
1025*944c3d4dSHoria Geantă 
10263d5a2db6SRussell King 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1027f9970c28SLABBE Corentin 	if (src_nents < 0) {
1028f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
1029f9970c28SLABBE Corentin 		return src_nents;
1030f9970c28SLABBE Corentin 	}
1031bc13c69eSRussell King 
1032bc13c69eSRussell King 	if (src_nents) {
1033bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1034bc13c69eSRussell King 					  DMA_TO_DEVICE);
1035bc13c69eSRussell King 		if (!mapped_nents) {
1036bc13c69eSRussell King 			dev_err(jrdev, "unable to map source for DMA\n");
1037bc13c69eSRussell King 			return -ENOMEM;
1038bc13c69eSRussell King 		}
1039bc13c69eSRussell King 	} else {
1040bc13c69eSRussell King 		mapped_nents = 0;
1041bc13c69eSRussell King 	}
1042bc13c69eSRussell King 
1043045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
10445588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
104530a43b44SRussell King 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
10465588d039SRussell King 				  flags);
1047045e3678SYuan Kang 	if (!edesc) {
1048bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1049045e3678SYuan Kang 		return -ENOMEM;
1050045e3678SYuan Kang 	}
1051343e44b1SRussell King 
1052045e3678SYuan Kang 	edesc->src_nents = src_nents;
1053045e3678SYuan Kang 
105465cf164aSRussell King 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
105565cf164aSRussell King 				  req->nbytes);
105665cf164aSRussell King 	if (ret) {
105732686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
105832686d34SRussell King 		kfree(edesc);
105965cf164aSRussell King 		return ret;
1060ce572085SHoria Geanta 	}
106165cf164aSRussell King 
106265cf164aSRussell King 	desc = edesc->hw_desc;
1063045e3678SYuan Kang 
1064045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1065045e3678SYuan Kang 						digestsize);
1066ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1067ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
106832686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
106932686d34SRussell King 		kfree(edesc);
1070ce572085SHoria Geanta 		return -ENOMEM;
1071ce572085SHoria Geanta 	}
1072045e3678SYuan Kang 
1073045e3678SYuan Kang #ifdef DEBUG
1074514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1075045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1076045e3678SYuan Kang #endif
1077045e3678SYuan Kang 
1078045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1079045e3678SYuan Kang 	if (!ret) {
1080045e3678SYuan Kang 		ret = -EINPROGRESS;
1081045e3678SYuan Kang 	} else {
1082045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1083045e3678SYuan Kang 		kfree(edesc);
1084045e3678SYuan Kang 	}
1085045e3678SYuan Kang 
1086045e3678SYuan Kang 	return ret;
1087045e3678SYuan Kang }
1088045e3678SYuan Kang 
1089045e3678SYuan Kang /* submit ahash final if it the first job descriptor */
1090045e3678SYuan Kang static int ahash_final_no_ctx(struct ahash_request *req)
1091045e3678SYuan Kang {
1092045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1093045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1094045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1095045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1096045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1097045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
10980355d23dSHoria Geantă 	u8 *buf = current_buf(state);
10990355d23dSHoria Geantă 	int buflen = *current_buflen(state);
110030a43b44SRussell King 	u32 *desc;
1101045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1102045e3678SYuan Kang 	struct ahash_edesc *edesc;
11039e6df0fdSMarkus Elfring 	int ret;
1104045e3678SYuan Kang 
1105045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
110630a43b44SRussell King 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
110730a43b44SRussell King 				  ctx->sh_desc_digest_dma, flags);
11085588d039SRussell King 	if (!edesc)
1109045e3678SYuan Kang 		return -ENOMEM;
1110045e3678SYuan Kang 
1111045e3678SYuan Kang 	desc = edesc->hw_desc;
1112045e3678SYuan Kang 
1113045e3678SYuan Kang 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1114ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1115ce572085SHoria Geanta 		dev_err(jrdev, "unable to map src\n");
111606435f34SMarkus Elfring 		goto unmap;
1117ce572085SHoria Geanta 	}
1118045e3678SYuan Kang 
1119045e3678SYuan Kang 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1120045e3678SYuan Kang 
1121045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1122045e3678SYuan Kang 						digestsize);
1123ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1124ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
112506435f34SMarkus Elfring 		goto unmap;
1126ce572085SHoria Geanta 	}
1127045e3678SYuan Kang 	edesc->src_nents = 0;
1128045e3678SYuan Kang 
1129045e3678SYuan Kang #ifdef DEBUG
1130514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1131045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1132045e3678SYuan Kang #endif
1133045e3678SYuan Kang 
1134045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1135045e3678SYuan Kang 	if (!ret) {
1136045e3678SYuan Kang 		ret = -EINPROGRESS;
1137045e3678SYuan Kang 	} else {
1138045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1139045e3678SYuan Kang 		kfree(edesc);
1140045e3678SYuan Kang 	}
1141045e3678SYuan Kang 
1142045e3678SYuan Kang 	return ret;
114306435f34SMarkus Elfring  unmap:
114406435f34SMarkus Elfring 	ahash_unmap(jrdev, edesc, req, digestsize);
114506435f34SMarkus Elfring 	kfree(edesc);
114606435f34SMarkus Elfring 	return -ENOMEM;
114706435f34SMarkus Elfring 
1148045e3678SYuan Kang }
1149045e3678SYuan Kang 
1150045e3678SYuan Kang /* submit ahash update if it the first job descriptor after update */
1151045e3678SYuan Kang static int ahash_update_no_ctx(struct ahash_request *req)
1152045e3678SYuan Kang {
1153045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1154045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1155045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1156045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1157045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1158045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
11590355d23dSHoria Geantă 	u8 *buf = current_buf(state);
11600355d23dSHoria Geantă 	int *buflen = current_buflen(state);
11610355d23dSHoria Geantă 	u8 *next_buf = alt_buf(state);
11620355d23dSHoria Geantă 	int *next_buflen = alt_buflen(state);
1163045e3678SYuan Kang 	int in_len = *buflen + req->nbytes, to_hash;
1164bc13c69eSRussell King 	int sec4_sg_bytes, src_nents, mapped_nents;
1165045e3678SYuan Kang 	struct ahash_edesc *edesc;
116630a43b44SRussell King 	u32 *desc;
1167045e3678SYuan Kang 	int ret = 0;
1168045e3678SYuan Kang 
1169045e3678SYuan Kang 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1170045e3678SYuan Kang 	to_hash = in_len - *next_buflen;
1171045e3678SYuan Kang 
1172045e3678SYuan Kang 	if (to_hash) {
117313fb8fd7SLABBE Corentin 		src_nents = sg_nents_for_len(req->src,
11743d5a2db6SRussell King 					     req->nbytes - *next_buflen);
1175f9970c28SLABBE Corentin 		if (src_nents < 0) {
1176f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
1177f9970c28SLABBE Corentin 			return src_nents;
1178f9970c28SLABBE Corentin 		}
1179bc13c69eSRussell King 
1180bc13c69eSRussell King 		if (src_nents) {
1181bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1182bc13c69eSRussell King 						  DMA_TO_DEVICE);
1183bc13c69eSRussell King 			if (!mapped_nents) {
1184bc13c69eSRussell King 				dev_err(jrdev, "unable to DMA map source\n");
1185bc13c69eSRussell King 				return -ENOMEM;
1186bc13c69eSRussell King 			}
1187bc13c69eSRussell King 		} else {
1188bc13c69eSRussell King 			mapped_nents = 0;
1189bc13c69eSRussell King 		}
1190bc13c69eSRussell King 
1191bc13c69eSRussell King 		sec4_sg_bytes = (1 + mapped_nents) *
1192045e3678SYuan Kang 				sizeof(struct sec4_sg_entry);
1193045e3678SYuan Kang 
1194045e3678SYuan Kang 		/*
1195045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
1196045e3678SYuan Kang 		 * link tables
1197045e3678SYuan Kang 		 */
119830a43b44SRussell King 		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
119930a43b44SRussell King 					  ctx->sh_desc_update_first,
120030a43b44SRussell King 					  ctx->sh_desc_update_first_dma,
120130a43b44SRussell King 					  flags);
1202045e3678SYuan Kang 		if (!edesc) {
1203bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1204045e3678SYuan Kang 			return -ENOMEM;
1205045e3678SYuan Kang 		}
1206045e3678SYuan Kang 
1207045e3678SYuan Kang 		edesc->src_nents = src_nents;
1208045e3678SYuan Kang 		edesc->sec4_sg_bytes = sec4_sg_bytes;
120976b99080SHoria Geanta 		edesc->dst_dma = 0;
1210045e3678SYuan Kang 
1211*944c3d4dSHoria Geantă 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1212*944c3d4dSHoria Geantă 		if (ret)
1213*944c3d4dSHoria Geantă 			goto unmap_ctx;
1214*944c3d4dSHoria Geantă 
1215bc13c69eSRussell King 		sg_to_sec4_sg_last(req->src, mapped_nents,
1216bc13c69eSRussell King 				   edesc->sec4_sg + 1, 0);
1217bc13c69eSRussell King 
1218045e3678SYuan Kang 		if (*next_buflen) {
1219307fd543SCristian Stoica 			scatterwalk_map_and_copy(next_buf, req->src,
1220307fd543SCristian Stoica 						 to_hash - *buflen,
1221307fd543SCristian Stoica 						 *next_buflen, 0);
1222045e3678SYuan Kang 		}
1223045e3678SYuan Kang 
1224045e3678SYuan Kang 		desc = edesc->hw_desc;
1225045e3678SYuan Kang 
12261da2be33SRuchika Gupta 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
12271da2be33SRuchika Gupta 						    sec4_sg_bytes,
12281da2be33SRuchika Gupta 						    DMA_TO_DEVICE);
1229ce572085SHoria Geanta 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1230ce572085SHoria Geanta 			dev_err(jrdev, "unable to map S/G table\n");
123132686d34SRussell King 			ret = -ENOMEM;
123258b0e5d0SMarkus Elfring 			goto unmap_ctx;
1233ce572085SHoria Geanta 		}
12341da2be33SRuchika Gupta 
1235045e3678SYuan Kang 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1236045e3678SYuan Kang 
1237ce572085SHoria Geanta 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1238ce572085SHoria Geanta 		if (ret)
123958b0e5d0SMarkus Elfring 			goto unmap_ctx;
1240045e3678SYuan Kang 
1241045e3678SYuan Kang #ifdef DEBUG
1242514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1243045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1244045e3678SYuan Kang 			       desc_bytes(desc), 1);
1245045e3678SYuan Kang #endif
1246045e3678SYuan Kang 
1247045e3678SYuan Kang 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
124832686d34SRussell King 		if (ret)
124958b0e5d0SMarkus Elfring 			goto unmap_ctx;
125032686d34SRussell King 
1251045e3678SYuan Kang 		ret = -EINPROGRESS;
1252045e3678SYuan Kang 		state->update = ahash_update_ctx;
1253045e3678SYuan Kang 		state->finup = ahash_finup_ctx;
1254045e3678SYuan Kang 		state->final = ahash_final_ctx;
1255045e3678SYuan Kang 	} else if (*next_buflen) {
1256307fd543SCristian Stoica 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1257307fd543SCristian Stoica 					 req->nbytes, 0);
1258045e3678SYuan Kang 		*buflen = *next_buflen;
1259045e3678SYuan Kang 		*next_buflen = 0;
1260045e3678SYuan Kang 	}
1261045e3678SYuan Kang #ifdef DEBUG
1262514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1263045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1264514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1265045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1266045e3678SYuan Kang 		       *next_buflen, 1);
1267045e3678SYuan Kang #endif
1268045e3678SYuan Kang 
1269045e3678SYuan Kang 	return ret;
127058b0e5d0SMarkus Elfring  unmap_ctx:
127132686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
127232686d34SRussell King 	kfree(edesc);
127332686d34SRussell King 	return ret;
1274045e3678SYuan Kang }
1275045e3678SYuan Kang 
1276045e3678SYuan Kang /* submit ahash finup if it the first job descriptor after update */
1277045e3678SYuan Kang static int ahash_finup_no_ctx(struct ahash_request *req)
1278045e3678SYuan Kang {
1279045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1280045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1281045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1282045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1283045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1284045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
12850355d23dSHoria Geantă 	int buflen = *current_buflen(state);
128630a43b44SRussell King 	u32 *desc;
1287bc13c69eSRussell King 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1288045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1289045e3678SYuan Kang 	struct ahash_edesc *edesc;
12909e6df0fdSMarkus Elfring 	int ret;
1291045e3678SYuan Kang 
129213fb8fd7SLABBE Corentin 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1293f9970c28SLABBE Corentin 	if (src_nents < 0) {
1294f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
1295f9970c28SLABBE Corentin 		return src_nents;
1296f9970c28SLABBE Corentin 	}
1297bc13c69eSRussell King 
1298bc13c69eSRussell King 	if (src_nents) {
1299bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1300bc13c69eSRussell King 					  DMA_TO_DEVICE);
1301bc13c69eSRussell King 		if (!mapped_nents) {
1302bc13c69eSRussell King 			dev_err(jrdev, "unable to DMA map source\n");
1303bc13c69eSRussell King 			return -ENOMEM;
1304bc13c69eSRussell King 		}
1305bc13c69eSRussell King 	} else {
1306bc13c69eSRussell King 		mapped_nents = 0;
1307bc13c69eSRussell King 	}
1308bc13c69eSRussell King 
1309045e3678SYuan Kang 	sec4_sg_src_index = 2;
1310bc13c69eSRussell King 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1311045e3678SYuan Kang 			 sizeof(struct sec4_sg_entry);
1312045e3678SYuan Kang 
1313045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
131430a43b44SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
131530a43b44SRussell King 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
131630a43b44SRussell King 				  flags);
1317045e3678SYuan Kang 	if (!edesc) {
1318bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1319045e3678SYuan Kang 		return -ENOMEM;
1320045e3678SYuan Kang 	}
1321045e3678SYuan Kang 
1322045e3678SYuan Kang 	desc = edesc->hw_desc;
1323045e3678SYuan Kang 
1324045e3678SYuan Kang 	edesc->src_nents = src_nents;
1325045e3678SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1326045e3678SYuan Kang 
1327*944c3d4dSHoria Geantă 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1328*944c3d4dSHoria Geantă 	if (ret)
1329*944c3d4dSHoria Geantă 		goto unmap;
1330045e3678SYuan Kang 
133165cf164aSRussell King 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
133265cf164aSRussell King 				  req->nbytes);
133365cf164aSRussell King 	if (ret) {
1334ce572085SHoria Geanta 		dev_err(jrdev, "unable to map S/G table\n");
133506435f34SMarkus Elfring 		goto unmap;
1336ce572085SHoria Geanta 	}
13371da2be33SRuchika Gupta 
1338045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1339045e3678SYuan Kang 						digestsize);
1340ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1341ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
134206435f34SMarkus Elfring 		goto unmap;
1343ce572085SHoria Geanta 	}
1344045e3678SYuan Kang 
1345045e3678SYuan Kang #ifdef DEBUG
1346514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1347045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1348045e3678SYuan Kang #endif
1349045e3678SYuan Kang 
1350045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1351045e3678SYuan Kang 	if (!ret) {
1352045e3678SYuan Kang 		ret = -EINPROGRESS;
1353045e3678SYuan Kang 	} else {
1354045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1355045e3678SYuan Kang 		kfree(edesc);
1356045e3678SYuan Kang 	}
1357045e3678SYuan Kang 
1358045e3678SYuan Kang 	return ret;
135906435f34SMarkus Elfring  unmap:
136006435f34SMarkus Elfring 	ahash_unmap(jrdev, edesc, req, digestsize);
136106435f34SMarkus Elfring 	kfree(edesc);
136206435f34SMarkus Elfring 	return -ENOMEM;
136306435f34SMarkus Elfring 
1364045e3678SYuan Kang }
1365045e3678SYuan Kang 
1366045e3678SYuan Kang /* submit first update job descriptor after init */
1367045e3678SYuan Kang static int ahash_update_first(struct ahash_request *req)
1368045e3678SYuan Kang {
1369045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1370045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1371045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1372045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1373045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1374045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1375*944c3d4dSHoria Geantă 	u8 *next_buf = alt_buf(state);
1376*944c3d4dSHoria Geantă 	int *next_buflen = alt_buflen(state);
1377045e3678SYuan Kang 	int to_hash;
137830a43b44SRussell King 	u32 *desc;
137965cf164aSRussell King 	int src_nents, mapped_nents;
1380045e3678SYuan Kang 	struct ahash_edesc *edesc;
1381045e3678SYuan Kang 	int ret = 0;
1382045e3678SYuan Kang 
1383045e3678SYuan Kang 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1384045e3678SYuan Kang 				      1);
1385045e3678SYuan Kang 	to_hash = req->nbytes - *next_buflen;
1386045e3678SYuan Kang 
1387045e3678SYuan Kang 	if (to_hash) {
13883d5a2db6SRussell King 		src_nents = sg_nents_for_len(req->src,
13893d5a2db6SRussell King 					     req->nbytes - *next_buflen);
1390f9970c28SLABBE Corentin 		if (src_nents < 0) {
1391f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
1392f9970c28SLABBE Corentin 			return src_nents;
1393f9970c28SLABBE Corentin 		}
1394bc13c69eSRussell King 
1395bc13c69eSRussell King 		if (src_nents) {
1396bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1397bc13c69eSRussell King 						  DMA_TO_DEVICE);
1398bc13c69eSRussell King 			if (!mapped_nents) {
1399bc13c69eSRussell King 				dev_err(jrdev, "unable to map source for DMA\n");
1400bc13c69eSRussell King 				return -ENOMEM;
1401bc13c69eSRussell King 			}
1402bc13c69eSRussell King 		} else {
1403bc13c69eSRussell King 			mapped_nents = 0;
1404bc13c69eSRussell King 		}
1405045e3678SYuan Kang 
1406045e3678SYuan Kang 		/*
1407045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
1408045e3678SYuan Kang 		 * link tables
1409045e3678SYuan Kang 		 */
14105588d039SRussell King 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
141130a43b44SRussell King 					  mapped_nents : 0,
141230a43b44SRussell King 					  ctx->sh_desc_update_first,
141330a43b44SRussell King 					  ctx->sh_desc_update_first_dma,
141430a43b44SRussell King 					  flags);
1415045e3678SYuan Kang 		if (!edesc) {
1416bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1417045e3678SYuan Kang 			return -ENOMEM;
1418045e3678SYuan Kang 		}
1419045e3678SYuan Kang 
1420045e3678SYuan Kang 		edesc->src_nents = src_nents;
142176b99080SHoria Geanta 		edesc->dst_dma = 0;
1422045e3678SYuan Kang 
142365cf164aSRussell King 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
142465cf164aSRussell King 					  to_hash);
142565cf164aSRussell King 		if (ret)
142658b0e5d0SMarkus Elfring 			goto unmap_ctx;
1427045e3678SYuan Kang 
1428045e3678SYuan Kang 		if (*next_buflen)
1429307fd543SCristian Stoica 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1430307fd543SCristian Stoica 						 *next_buflen, 0);
1431045e3678SYuan Kang 
1432045e3678SYuan Kang 		desc = edesc->hw_desc;
1433045e3678SYuan Kang 
1434ce572085SHoria Geanta 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1435ce572085SHoria Geanta 		if (ret)
143658b0e5d0SMarkus Elfring 			goto unmap_ctx;
1437045e3678SYuan Kang 
1438045e3678SYuan Kang #ifdef DEBUG
1439514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1440045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1441045e3678SYuan Kang 			       desc_bytes(desc), 1);
1442045e3678SYuan Kang #endif
1443045e3678SYuan Kang 
144432686d34SRussell King 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
144532686d34SRussell King 		if (ret)
144658b0e5d0SMarkus Elfring 			goto unmap_ctx;
144732686d34SRussell King 
1448045e3678SYuan Kang 		ret = -EINPROGRESS;
1449045e3678SYuan Kang 		state->update = ahash_update_ctx;
1450045e3678SYuan Kang 		state->finup = ahash_finup_ctx;
1451045e3678SYuan Kang 		state->final = ahash_final_ctx;
1452045e3678SYuan Kang 	} else if (*next_buflen) {
1453045e3678SYuan Kang 		state->update = ahash_update_no_ctx;
1454045e3678SYuan Kang 		state->finup = ahash_finup_no_ctx;
1455045e3678SYuan Kang 		state->final = ahash_final_no_ctx;
1456307fd543SCristian Stoica 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1457307fd543SCristian Stoica 					 req->nbytes, 0);
1458*944c3d4dSHoria Geantă 		switch_buf(state);
1459045e3678SYuan Kang 	}
1460045e3678SYuan Kang #ifdef DEBUG
1461514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1462045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1463045e3678SYuan Kang 		       *next_buflen, 1);
1464045e3678SYuan Kang #endif
1465045e3678SYuan Kang 
1466045e3678SYuan Kang 	return ret;
146758b0e5d0SMarkus Elfring  unmap_ctx:
146832686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
146932686d34SRussell King 	kfree(edesc);
147032686d34SRussell King 	return ret;
1471045e3678SYuan Kang }
1472045e3678SYuan Kang 
1473045e3678SYuan Kang static int ahash_finup_first(struct ahash_request *req)
1474045e3678SYuan Kang {
1475045e3678SYuan Kang 	return ahash_digest(req);
1476045e3678SYuan Kang }
1477045e3678SYuan Kang 
1478045e3678SYuan Kang static int ahash_init(struct ahash_request *req)
1479045e3678SYuan Kang {
1480045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1481045e3678SYuan Kang 
1482045e3678SYuan Kang 	state->update = ahash_update_first;
1483045e3678SYuan Kang 	state->finup = ahash_finup_first;
1484045e3678SYuan Kang 	state->final = ahash_final_no_ctx;
1485045e3678SYuan Kang 
148687ec02e7SHoria Geantă 	state->ctx_dma = 0;
1487045e3678SYuan Kang 	state->current_buf = 0;
1488de0e35ecSHoria Geanta 	state->buf_dma = 0;
14896fd4b156SSteve Cornelius 	state->buflen_0 = 0;
14906fd4b156SSteve Cornelius 	state->buflen_1 = 0;
1491045e3678SYuan Kang 
1492045e3678SYuan Kang 	return 0;
1493045e3678SYuan Kang }
1494045e3678SYuan Kang 
1495045e3678SYuan Kang static int ahash_update(struct ahash_request *req)
1496045e3678SYuan Kang {
1497045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1498045e3678SYuan Kang 
1499045e3678SYuan Kang 	return state->update(req);
1500045e3678SYuan Kang }
1501045e3678SYuan Kang 
1502045e3678SYuan Kang static int ahash_finup(struct ahash_request *req)
1503045e3678SYuan Kang {
1504045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1505045e3678SYuan Kang 
1506045e3678SYuan Kang 	return state->finup(req);
1507045e3678SYuan Kang }
1508045e3678SYuan Kang 
1509045e3678SYuan Kang static int ahash_final(struct ahash_request *req)
1510045e3678SYuan Kang {
1511045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1512045e3678SYuan Kang 
1513045e3678SYuan Kang 	return state->final(req);
1514045e3678SYuan Kang }
1515045e3678SYuan Kang 
1516045e3678SYuan Kang static int ahash_export(struct ahash_request *req, void *out)
1517045e3678SYuan Kang {
1518045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
15195ec90831SRussell King 	struct caam_export_state *export = out;
15205ec90831SRussell King 	int len;
15215ec90831SRussell King 	u8 *buf;
1522045e3678SYuan Kang 
15235ec90831SRussell King 	if (state->current_buf) {
15245ec90831SRussell King 		buf = state->buf_1;
15255ec90831SRussell King 		len = state->buflen_1;
15265ec90831SRussell King 	} else {
15275ec90831SRussell King 		buf = state->buf_0;
1528f456cd2dSFabio Estevam 		len = state->buflen_0;
15295ec90831SRussell King 	}
15305ec90831SRussell King 
15315ec90831SRussell King 	memcpy(export->buf, buf, len);
15325ec90831SRussell King 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
15335ec90831SRussell King 	export->buflen = len;
15345ec90831SRussell King 	export->update = state->update;
15355ec90831SRussell King 	export->final = state->final;
15365ec90831SRussell King 	export->finup = state->finup;
1537434b4212SRussell King 
1538045e3678SYuan Kang 	return 0;
1539045e3678SYuan Kang }
1540045e3678SYuan Kang 
1541045e3678SYuan Kang static int ahash_import(struct ahash_request *req, const void *in)
1542045e3678SYuan Kang {
1543045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
15445ec90831SRussell King 	const struct caam_export_state *export = in;
1545045e3678SYuan Kang 
15465ec90831SRussell King 	memset(state, 0, sizeof(*state));
15475ec90831SRussell King 	memcpy(state->buf_0, export->buf, export->buflen);
15485ec90831SRussell King 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
15495ec90831SRussell King 	state->buflen_0 = export->buflen;
15505ec90831SRussell King 	state->update = export->update;
15515ec90831SRussell King 	state->final = export->final;
15525ec90831SRussell King 	state->finup = export->finup;
1553434b4212SRussell King 
1554045e3678SYuan Kang 	return 0;
1555045e3678SYuan Kang }
1556045e3678SYuan Kang 
1557045e3678SYuan Kang struct caam_hash_template {
1558045e3678SYuan Kang 	char name[CRYPTO_MAX_ALG_NAME];
1559045e3678SYuan Kang 	char driver_name[CRYPTO_MAX_ALG_NAME];
1560b0e09baeSYuan Kang 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1561b0e09baeSYuan Kang 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1562045e3678SYuan Kang 	unsigned int blocksize;
1563045e3678SYuan Kang 	struct ahash_alg template_ahash;
1564045e3678SYuan Kang 	u32 alg_type;
1565045e3678SYuan Kang };
1566045e3678SYuan Kang 
1567045e3678SYuan Kang /* ahash descriptors */
1568045e3678SYuan Kang static struct caam_hash_template driver_hash[] = {
1569045e3678SYuan Kang 	{
1570b0e09baeSYuan Kang 		.name = "sha1",
1571b0e09baeSYuan Kang 		.driver_name = "sha1-caam",
1572b0e09baeSYuan Kang 		.hmac_name = "hmac(sha1)",
1573b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha1-caam",
1574045e3678SYuan Kang 		.blocksize = SHA1_BLOCK_SIZE,
1575045e3678SYuan Kang 		.template_ahash = {
1576045e3678SYuan Kang 			.init = ahash_init,
1577045e3678SYuan Kang 			.update = ahash_update,
1578045e3678SYuan Kang 			.final = ahash_final,
1579045e3678SYuan Kang 			.finup = ahash_finup,
1580045e3678SYuan Kang 			.digest = ahash_digest,
1581045e3678SYuan Kang 			.export = ahash_export,
1582045e3678SYuan Kang 			.import = ahash_import,
1583045e3678SYuan Kang 			.setkey = ahash_setkey,
1584045e3678SYuan Kang 			.halg = {
1585045e3678SYuan Kang 				.digestsize = SHA1_DIGEST_SIZE,
15865ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1587045e3678SYuan Kang 			},
1588045e3678SYuan Kang 		},
1589045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA1,
1590045e3678SYuan Kang 	}, {
1591b0e09baeSYuan Kang 		.name = "sha224",
1592b0e09baeSYuan Kang 		.driver_name = "sha224-caam",
1593b0e09baeSYuan Kang 		.hmac_name = "hmac(sha224)",
1594b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha224-caam",
1595045e3678SYuan Kang 		.blocksize = SHA224_BLOCK_SIZE,
1596045e3678SYuan Kang 		.template_ahash = {
1597045e3678SYuan Kang 			.init = ahash_init,
1598045e3678SYuan Kang 			.update = ahash_update,
1599045e3678SYuan Kang 			.final = ahash_final,
1600045e3678SYuan Kang 			.finup = ahash_finup,
1601045e3678SYuan Kang 			.digest = ahash_digest,
1602045e3678SYuan Kang 			.export = ahash_export,
1603045e3678SYuan Kang 			.import = ahash_import,
1604045e3678SYuan Kang 			.setkey = ahash_setkey,
1605045e3678SYuan Kang 			.halg = {
1606045e3678SYuan Kang 				.digestsize = SHA224_DIGEST_SIZE,
16075ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1608045e3678SYuan Kang 			},
1609045e3678SYuan Kang 		},
1610045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA224,
1611045e3678SYuan Kang 	}, {
1612b0e09baeSYuan Kang 		.name = "sha256",
1613b0e09baeSYuan Kang 		.driver_name = "sha256-caam",
1614b0e09baeSYuan Kang 		.hmac_name = "hmac(sha256)",
1615b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha256-caam",
1616045e3678SYuan Kang 		.blocksize = SHA256_BLOCK_SIZE,
1617045e3678SYuan Kang 		.template_ahash = {
1618045e3678SYuan Kang 			.init = ahash_init,
1619045e3678SYuan Kang 			.update = ahash_update,
1620045e3678SYuan Kang 			.final = ahash_final,
1621045e3678SYuan Kang 			.finup = ahash_finup,
1622045e3678SYuan Kang 			.digest = ahash_digest,
1623045e3678SYuan Kang 			.export = ahash_export,
1624045e3678SYuan Kang 			.import = ahash_import,
1625045e3678SYuan Kang 			.setkey = ahash_setkey,
1626045e3678SYuan Kang 			.halg = {
1627045e3678SYuan Kang 				.digestsize = SHA256_DIGEST_SIZE,
16285ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1629045e3678SYuan Kang 			},
1630045e3678SYuan Kang 		},
1631045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA256,
1632045e3678SYuan Kang 	}, {
1633b0e09baeSYuan Kang 		.name = "sha384",
1634b0e09baeSYuan Kang 		.driver_name = "sha384-caam",
1635b0e09baeSYuan Kang 		.hmac_name = "hmac(sha384)",
1636b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha384-caam",
1637045e3678SYuan Kang 		.blocksize = SHA384_BLOCK_SIZE,
1638045e3678SYuan Kang 		.template_ahash = {
1639045e3678SYuan Kang 			.init = ahash_init,
1640045e3678SYuan Kang 			.update = ahash_update,
1641045e3678SYuan Kang 			.final = ahash_final,
1642045e3678SYuan Kang 			.finup = ahash_finup,
1643045e3678SYuan Kang 			.digest = ahash_digest,
1644045e3678SYuan Kang 			.export = ahash_export,
1645045e3678SYuan Kang 			.import = ahash_import,
1646045e3678SYuan Kang 			.setkey = ahash_setkey,
1647045e3678SYuan Kang 			.halg = {
1648045e3678SYuan Kang 				.digestsize = SHA384_DIGEST_SIZE,
16495ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1650045e3678SYuan Kang 			},
1651045e3678SYuan Kang 		},
1652045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA384,
1653045e3678SYuan Kang 	}, {
1654b0e09baeSYuan Kang 		.name = "sha512",
1655b0e09baeSYuan Kang 		.driver_name = "sha512-caam",
1656b0e09baeSYuan Kang 		.hmac_name = "hmac(sha512)",
1657b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha512-caam",
1658045e3678SYuan Kang 		.blocksize = SHA512_BLOCK_SIZE,
1659045e3678SYuan Kang 		.template_ahash = {
1660045e3678SYuan Kang 			.init = ahash_init,
1661045e3678SYuan Kang 			.update = ahash_update,
1662045e3678SYuan Kang 			.final = ahash_final,
1663045e3678SYuan Kang 			.finup = ahash_finup,
1664045e3678SYuan Kang 			.digest = ahash_digest,
1665045e3678SYuan Kang 			.export = ahash_export,
1666045e3678SYuan Kang 			.import = ahash_import,
1667045e3678SYuan Kang 			.setkey = ahash_setkey,
1668045e3678SYuan Kang 			.halg = {
1669045e3678SYuan Kang 				.digestsize = SHA512_DIGEST_SIZE,
16705ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1671045e3678SYuan Kang 			},
1672045e3678SYuan Kang 		},
1673045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA512,
1674045e3678SYuan Kang 	}, {
1675b0e09baeSYuan Kang 		.name = "md5",
1676b0e09baeSYuan Kang 		.driver_name = "md5-caam",
1677b0e09baeSYuan Kang 		.hmac_name = "hmac(md5)",
1678b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-md5-caam",
1679045e3678SYuan Kang 		.blocksize = MD5_BLOCK_WORDS * 4,
1680045e3678SYuan Kang 		.template_ahash = {
1681045e3678SYuan Kang 			.init = ahash_init,
1682045e3678SYuan Kang 			.update = ahash_update,
1683045e3678SYuan Kang 			.final = ahash_final,
1684045e3678SYuan Kang 			.finup = ahash_finup,
1685045e3678SYuan Kang 			.digest = ahash_digest,
1686045e3678SYuan Kang 			.export = ahash_export,
1687045e3678SYuan Kang 			.import = ahash_import,
1688045e3678SYuan Kang 			.setkey = ahash_setkey,
1689045e3678SYuan Kang 			.halg = {
1690045e3678SYuan Kang 				.digestsize = MD5_DIGEST_SIZE,
16915ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1692045e3678SYuan Kang 			},
1693045e3678SYuan Kang 		},
1694045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_MD5,
1695045e3678SYuan Kang 	},
1696045e3678SYuan Kang };
1697045e3678SYuan Kang 
1698045e3678SYuan Kang struct caam_hash_alg {
1699045e3678SYuan Kang 	struct list_head entry;
1700045e3678SYuan Kang 	int alg_type;
1701045e3678SYuan Kang 	struct ahash_alg ahash_alg;
1702045e3678SYuan Kang };
1703045e3678SYuan Kang 
1704045e3678SYuan Kang static int caam_hash_cra_init(struct crypto_tfm *tfm)
1705045e3678SYuan Kang {
1706045e3678SYuan Kang 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1707045e3678SYuan Kang 	struct crypto_alg *base = tfm->__crt_alg;
1708045e3678SYuan Kang 	struct hash_alg_common *halg =
1709045e3678SYuan Kang 		 container_of(base, struct hash_alg_common, base);
1710045e3678SYuan Kang 	struct ahash_alg *alg =
1711045e3678SYuan Kang 		 container_of(halg, struct ahash_alg, halg);
1712045e3678SYuan Kang 	struct caam_hash_alg *caam_hash =
1713045e3678SYuan Kang 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1714045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1715045e3678SYuan Kang 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1716045e3678SYuan Kang 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1717045e3678SYuan Kang 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1718045e3678SYuan Kang 					 HASH_MSG_LEN + 32,
1719045e3678SYuan Kang 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1720045e3678SYuan Kang 					 HASH_MSG_LEN + 64,
1721045e3678SYuan Kang 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1722bbf22344SHoria Geantă 	dma_addr_t dma_addr;
1723045e3678SYuan Kang 
1724045e3678SYuan Kang 	/*
1725cfc6f11bSRuchika Gupta 	 * Get a Job ring from Job Ring driver to ensure in-order
1726045e3678SYuan Kang 	 * crypto request processing per tfm
1727045e3678SYuan Kang 	 */
1728cfc6f11bSRuchika Gupta 	ctx->jrdev = caam_jr_alloc();
1729cfc6f11bSRuchika Gupta 	if (IS_ERR(ctx->jrdev)) {
1730cfc6f11bSRuchika Gupta 		pr_err("Job Ring Device allocation for transform failed\n");
1731cfc6f11bSRuchika Gupta 		return PTR_ERR(ctx->jrdev);
1732cfc6f11bSRuchika Gupta 	}
1733bbf22344SHoria Geantă 
1734bbf22344SHoria Geantă 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1735bbf22344SHoria Geantă 					offsetof(struct caam_hash_ctx,
1736bbf22344SHoria Geantă 						 sh_desc_update_dma),
1737bbf22344SHoria Geantă 					DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1738bbf22344SHoria Geantă 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1739bbf22344SHoria Geantă 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1740bbf22344SHoria Geantă 		caam_jr_free(ctx->jrdev);
1741bbf22344SHoria Geantă 		return -ENOMEM;
1742bbf22344SHoria Geantă 	}
1743bbf22344SHoria Geantă 
1744bbf22344SHoria Geantă 	ctx->sh_desc_update_dma = dma_addr;
1745bbf22344SHoria Geantă 	ctx->sh_desc_update_first_dma = dma_addr +
1746bbf22344SHoria Geantă 					offsetof(struct caam_hash_ctx,
1747bbf22344SHoria Geantă 						 sh_desc_update_first);
1748bbf22344SHoria Geantă 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1749bbf22344SHoria Geantă 						   sh_desc_fin);
1750bbf22344SHoria Geantă 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1751bbf22344SHoria Geantă 						      sh_desc_digest);
1752bbf22344SHoria Geantă 
1753045e3678SYuan Kang 	/* copy descriptor header template value */
1754db57656bSHoria Geantă 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1755045e3678SYuan Kang 
1756488ebc3aSHoria Geantă 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
1757488ebc3aSHoria Geantă 				   OP_ALG_ALGSEL_SUBMASK) >>
1758045e3678SYuan Kang 				  OP_ALG_ALGSEL_SHIFT];
1759045e3678SYuan Kang 
1760045e3678SYuan Kang 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1761045e3678SYuan Kang 				 sizeof(struct caam_hash_state));
1762e6cc5b8dSMarkus Elfring 	return ahash_set_sh_desc(ahash);
1763045e3678SYuan Kang }
1764045e3678SYuan Kang 
1765045e3678SYuan Kang static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1766045e3678SYuan Kang {
1767045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1768045e3678SYuan Kang 
1769bbf22344SHoria Geantă 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1770bbf22344SHoria Geantă 			       offsetof(struct caam_hash_ctx,
1771bbf22344SHoria Geantă 					sh_desc_update_dma),
1772bbf22344SHoria Geantă 			       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1773cfc6f11bSRuchika Gupta 	caam_jr_free(ctx->jrdev);
1774045e3678SYuan Kang }
1775045e3678SYuan Kang 
1776045e3678SYuan Kang static void __exit caam_algapi_hash_exit(void)
1777045e3678SYuan Kang {
1778045e3678SYuan Kang 	struct caam_hash_alg *t_alg, *n;
1779045e3678SYuan Kang 
1780cfc6f11bSRuchika Gupta 	if (!hash_list.next)
1781045e3678SYuan Kang 		return;
1782045e3678SYuan Kang 
1783cfc6f11bSRuchika Gupta 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1784045e3678SYuan Kang 		crypto_unregister_ahash(&t_alg->ahash_alg);
1785045e3678SYuan Kang 		list_del(&t_alg->entry);
1786045e3678SYuan Kang 		kfree(t_alg);
1787045e3678SYuan Kang 	}
1788045e3678SYuan Kang }
1789045e3678SYuan Kang 
1790045e3678SYuan Kang static struct caam_hash_alg *
1791cfc6f11bSRuchika Gupta caam_hash_alloc(struct caam_hash_template *template,
1792b0e09baeSYuan Kang 		bool keyed)
1793045e3678SYuan Kang {
1794045e3678SYuan Kang 	struct caam_hash_alg *t_alg;
1795045e3678SYuan Kang 	struct ahash_alg *halg;
1796045e3678SYuan Kang 	struct crypto_alg *alg;
1797045e3678SYuan Kang 
17989c4f9733SFabio Estevam 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1799045e3678SYuan Kang 	if (!t_alg) {
1800cfc6f11bSRuchika Gupta 		pr_err("failed to allocate t_alg\n");
1801045e3678SYuan Kang 		return ERR_PTR(-ENOMEM);
1802045e3678SYuan Kang 	}
1803045e3678SYuan Kang 
1804045e3678SYuan Kang 	t_alg->ahash_alg = template->template_ahash;
1805045e3678SYuan Kang 	halg = &t_alg->ahash_alg;
1806045e3678SYuan Kang 	alg = &halg->halg.base;
1807045e3678SYuan Kang 
1808b0e09baeSYuan Kang 	if (keyed) {
1809b0e09baeSYuan Kang 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1810b0e09baeSYuan Kang 			 template->hmac_name);
1811b0e09baeSYuan Kang 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1812b0e09baeSYuan Kang 			 template->hmac_driver_name);
1813b0e09baeSYuan Kang 	} else {
1814b0e09baeSYuan Kang 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1815b0e09baeSYuan Kang 			 template->name);
1816045e3678SYuan Kang 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1817045e3678SYuan Kang 			 template->driver_name);
1818a0118c8bSRussell King 		t_alg->ahash_alg.setkey = NULL;
1819b0e09baeSYuan Kang 	}
1820045e3678SYuan Kang 	alg->cra_module = THIS_MODULE;
1821045e3678SYuan Kang 	alg->cra_init = caam_hash_cra_init;
1822045e3678SYuan Kang 	alg->cra_exit = caam_hash_cra_exit;
1823045e3678SYuan Kang 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1824045e3678SYuan Kang 	alg->cra_priority = CAAM_CRA_PRIORITY;
1825045e3678SYuan Kang 	alg->cra_blocksize = template->blocksize;
1826045e3678SYuan Kang 	alg->cra_alignmask = 0;
1827045e3678SYuan Kang 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1828045e3678SYuan Kang 	alg->cra_type = &crypto_ahash_type;
1829045e3678SYuan Kang 
1830045e3678SYuan Kang 	t_alg->alg_type = template->alg_type;
1831045e3678SYuan Kang 
1832045e3678SYuan Kang 	return t_alg;
1833045e3678SYuan Kang }
1834045e3678SYuan Kang 
1835045e3678SYuan Kang static int __init caam_algapi_hash_init(void)
1836045e3678SYuan Kang {
183735af6403SRuchika Gupta 	struct device_node *dev_node;
183835af6403SRuchika Gupta 	struct platform_device *pdev;
183935af6403SRuchika Gupta 	struct device *ctrldev;
1840045e3678SYuan Kang 	int i = 0, err = 0;
1841bf83490eSVictoria Milhoan 	struct caam_drv_private *priv;
1842bf83490eSVictoria Milhoan 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1843bf83490eSVictoria Milhoan 	u32 cha_inst, cha_vid;
1844045e3678SYuan Kang 
184535af6403SRuchika Gupta 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
184635af6403SRuchika Gupta 	if (!dev_node) {
184735af6403SRuchika Gupta 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
184835af6403SRuchika Gupta 		if (!dev_node)
184935af6403SRuchika Gupta 			return -ENODEV;
185035af6403SRuchika Gupta 	}
185135af6403SRuchika Gupta 
185235af6403SRuchika Gupta 	pdev = of_find_device_by_node(dev_node);
185335af6403SRuchika Gupta 	if (!pdev) {
185435af6403SRuchika Gupta 		of_node_put(dev_node);
185535af6403SRuchika Gupta 		return -ENODEV;
185635af6403SRuchika Gupta 	}
185735af6403SRuchika Gupta 
185835af6403SRuchika Gupta 	ctrldev = &pdev->dev;
185935af6403SRuchika Gupta 	priv = dev_get_drvdata(ctrldev);
186035af6403SRuchika Gupta 	of_node_put(dev_node);
186135af6403SRuchika Gupta 
186235af6403SRuchika Gupta 	/*
186335af6403SRuchika Gupta 	 * If priv is NULL, it's probably because the caam driver wasn't
186435af6403SRuchika Gupta 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
186535af6403SRuchika Gupta 	 */
186635af6403SRuchika Gupta 	if (!priv)
186735af6403SRuchika Gupta 		return -ENODEV;
186835af6403SRuchika Gupta 
1869bf83490eSVictoria Milhoan 	/*
1870bf83490eSVictoria Milhoan 	 * Register crypto algorithms the device supports.  First, identify
1871bf83490eSVictoria Milhoan 	 * presence and attributes of MD block.
1872bf83490eSVictoria Milhoan 	 */
1873bf83490eSVictoria Milhoan 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1874bf83490eSVictoria Milhoan 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1875bf83490eSVictoria Milhoan 
1876bf83490eSVictoria Milhoan 	/*
1877bf83490eSVictoria Milhoan 	 * Skip registration of any hashing algorithms if MD block
1878bf83490eSVictoria Milhoan 	 * is not present.
1879bf83490eSVictoria Milhoan 	 */
1880bf83490eSVictoria Milhoan 	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1881bf83490eSVictoria Milhoan 		return -ENODEV;
1882bf83490eSVictoria Milhoan 
1883bf83490eSVictoria Milhoan 	/* Limit digest size based on LP256 */
1884bf83490eSVictoria Milhoan 	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1885bf83490eSVictoria Milhoan 		md_limit = SHA256_DIGEST_SIZE;
1886bf83490eSVictoria Milhoan 
1887cfc6f11bSRuchika Gupta 	INIT_LIST_HEAD(&hash_list);
1888045e3678SYuan Kang 
1889045e3678SYuan Kang 	/* register crypto algorithms the device supports */
1890045e3678SYuan Kang 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1891045e3678SYuan Kang 		struct caam_hash_alg *t_alg;
1892bf83490eSVictoria Milhoan 		struct caam_hash_template *alg = driver_hash + i;
1893bf83490eSVictoria Milhoan 
1894bf83490eSVictoria Milhoan 		/* If MD size is not supported by device, skip registration */
1895bf83490eSVictoria Milhoan 		if (alg->template_ahash.halg.digestsize > md_limit)
1896bf83490eSVictoria Milhoan 			continue;
1897045e3678SYuan Kang 
1898b0e09baeSYuan Kang 		/* register hmac version */
1899bf83490eSVictoria Milhoan 		t_alg = caam_hash_alloc(alg, true);
1900b0e09baeSYuan Kang 		if (IS_ERR(t_alg)) {
1901b0e09baeSYuan Kang 			err = PTR_ERR(t_alg);
1902bf83490eSVictoria Milhoan 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1903b0e09baeSYuan Kang 			continue;
1904b0e09baeSYuan Kang 		}
1905b0e09baeSYuan Kang 
1906b0e09baeSYuan Kang 		err = crypto_register_ahash(&t_alg->ahash_alg);
1907b0e09baeSYuan Kang 		if (err) {
19086ea30f0aSRussell King 			pr_warn("%s alg registration failed: %d\n",
19096ea30f0aSRussell King 				t_alg->ahash_alg.halg.base.cra_driver_name,
19106ea30f0aSRussell King 				err);
1911b0e09baeSYuan Kang 			kfree(t_alg);
1912b0e09baeSYuan Kang 		} else
1913cfc6f11bSRuchika Gupta 			list_add_tail(&t_alg->entry, &hash_list);
1914b0e09baeSYuan Kang 
1915b0e09baeSYuan Kang 		/* register unkeyed version */
1916bf83490eSVictoria Milhoan 		t_alg = caam_hash_alloc(alg, false);
1917045e3678SYuan Kang 		if (IS_ERR(t_alg)) {
1918045e3678SYuan Kang 			err = PTR_ERR(t_alg);
1919bf83490eSVictoria Milhoan 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1920045e3678SYuan Kang 			continue;
1921045e3678SYuan Kang 		}
1922045e3678SYuan Kang 
1923045e3678SYuan Kang 		err = crypto_register_ahash(&t_alg->ahash_alg);
1924045e3678SYuan Kang 		if (err) {
19256ea30f0aSRussell King 			pr_warn("%s alg registration failed: %d\n",
19266ea30f0aSRussell King 				t_alg->ahash_alg.halg.base.cra_driver_name,
19276ea30f0aSRussell King 				err);
1928045e3678SYuan Kang 			kfree(t_alg);
1929045e3678SYuan Kang 		} else
1930cfc6f11bSRuchika Gupta 			list_add_tail(&t_alg->entry, &hash_list);
1931045e3678SYuan Kang 	}
1932045e3678SYuan Kang 
1933045e3678SYuan Kang 	return err;
1934045e3678SYuan Kang }
1935045e3678SYuan Kang 
1936045e3678SYuan Kang module_init(caam_algapi_hash_init);
1937045e3678SYuan Kang module_exit(caam_algapi_hash_exit);
1938045e3678SYuan Kang 
1939045e3678SYuan Kang MODULE_LICENSE("GPL");
1940045e3678SYuan Kang MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1941045e3678SYuan Kang MODULE_AUTHOR("Freescale Semiconductor - NMG");
1942