xref: /linux/drivers/crypto/caam/caamhash.c (revision 5588d039b5ea35760ffc94a50ed3aa2027aec11d)
1045e3678SYuan Kang /*
2045e3678SYuan Kang  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3045e3678SYuan Kang  *
4045e3678SYuan Kang  * Copyright 2011 Freescale Semiconductor, Inc.
5045e3678SYuan Kang  *
6045e3678SYuan Kang  * Based on caamalg.c crypto API driver.
7045e3678SYuan Kang  *
8045e3678SYuan Kang  * relationship of digest job descriptor or first job descriptor after init to
9045e3678SYuan Kang  * shared descriptors:
10045e3678SYuan Kang  *
11045e3678SYuan Kang  * ---------------                     ---------------
12045e3678SYuan Kang  * | JobDesc #1  |-------------------->|  ShareDesc  |
13045e3678SYuan Kang  * | *(packet 1) |                     |  (hashKey)  |
14045e3678SYuan Kang  * ---------------                     | (operation) |
15045e3678SYuan Kang  *                                     ---------------
16045e3678SYuan Kang  *
17045e3678SYuan Kang  * relationship of subsequent job descriptors to shared descriptors:
18045e3678SYuan Kang  *
19045e3678SYuan Kang  * ---------------                     ---------------
20045e3678SYuan Kang  * | JobDesc #2  |-------------------->|  ShareDesc  |
21045e3678SYuan Kang  * | *(packet 2) |      |------------->|  (hashKey)  |
22045e3678SYuan Kang  * ---------------      |    |-------->| (operation) |
23045e3678SYuan Kang  *       .              |    |         | (load ctx2) |
24045e3678SYuan Kang  *       .              |    |         ---------------
25045e3678SYuan Kang  * ---------------      |    |
26045e3678SYuan Kang  * | JobDesc #3  |------|    |
27045e3678SYuan Kang  * | *(packet 3) |           |
28045e3678SYuan Kang  * ---------------           |
29045e3678SYuan Kang  *       .                   |
30045e3678SYuan Kang  *       .                   |
31045e3678SYuan Kang  * ---------------           |
32045e3678SYuan Kang  * | JobDesc #4  |------------
33045e3678SYuan Kang  * | *(packet 4) |
34045e3678SYuan Kang  * ---------------
35045e3678SYuan Kang  *
36045e3678SYuan Kang  * The SharedDesc never changes for a connection unless rekeyed, but
37045e3678SYuan Kang  * each packet will likely be in a different place. So all we need
38045e3678SYuan Kang  * to know to process the packet is where the input is, where the
39045e3678SYuan Kang  * output goes, and what context we want to process with. Context is
40045e3678SYuan Kang  * in the SharedDesc, packet references in the JobDesc.
41045e3678SYuan Kang  *
42045e3678SYuan Kang  * So, a job desc looks like:
43045e3678SYuan Kang  *
44045e3678SYuan Kang  * ---------------------
45045e3678SYuan Kang  * | Header            |
46045e3678SYuan Kang  * | ShareDesc Pointer |
47045e3678SYuan Kang  * | SEQ_OUT_PTR       |
48045e3678SYuan Kang  * | (output buffer)   |
49045e3678SYuan Kang  * | (output length)   |
50045e3678SYuan Kang  * | SEQ_IN_PTR        |
51045e3678SYuan Kang  * | (input buffer)    |
52045e3678SYuan Kang  * | (input length)    |
53045e3678SYuan Kang  * ---------------------
54045e3678SYuan Kang  */
55045e3678SYuan Kang 
56045e3678SYuan Kang #include "compat.h"
57045e3678SYuan Kang 
58045e3678SYuan Kang #include "regs.h"
59045e3678SYuan Kang #include "intern.h"
60045e3678SYuan Kang #include "desc_constr.h"
61045e3678SYuan Kang #include "jr.h"
62045e3678SYuan Kang #include "error.h"
63045e3678SYuan Kang #include "sg_sw_sec4.h"
64045e3678SYuan Kang #include "key_gen.h"
65045e3678SYuan Kang 
66045e3678SYuan Kang #define CAAM_CRA_PRIORITY		3000
67045e3678SYuan Kang 
68045e3678SYuan Kang /* max hash key is max split key size */
69045e3678SYuan Kang #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70045e3678SYuan Kang 
71045e3678SYuan Kang #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72045e3678SYuan Kang #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73045e3678SYuan Kang 
74045e3678SYuan Kang /* length of descriptors text */
75045e3678SYuan Kang #define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
76045e3678SYuan Kang #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77045e3678SYuan Kang #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78045e3678SYuan Kang #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79045e3678SYuan Kang #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80045e3678SYuan Kang #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81045e3678SYuan Kang 
82045e3678SYuan Kang #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83045e3678SYuan Kang 					 CAAM_MAX_HASH_KEY_SIZE)
84045e3678SYuan Kang #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85045e3678SYuan Kang 
86045e3678SYuan Kang /* caam context sizes for hashes: running digest + 8 */
87045e3678SYuan Kang #define HASH_MSG_LEN			8
88045e3678SYuan Kang #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89045e3678SYuan Kang 
90045e3678SYuan Kang #ifdef DEBUG
91045e3678SYuan Kang /* for print_hex_dumps with line references */
92045e3678SYuan Kang #define debug(format, arg...) printk(format, arg)
93045e3678SYuan Kang #else
94045e3678SYuan Kang #define debug(format, arg...)
95045e3678SYuan Kang #endif
96045e3678SYuan Kang 
97cfc6f11bSRuchika Gupta 
98cfc6f11bSRuchika Gupta static struct list_head hash_list;
99cfc6f11bSRuchika Gupta 
100045e3678SYuan Kang /* ahash per-session context */
101045e3678SYuan Kang struct caam_hash_ctx {
102e11793f5SRussell King 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103e11793f5SRussell King 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104e11793f5SRussell King 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105e11793f5SRussell King 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106e11793f5SRussell King 	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
107e11793f5SRussell King 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
108045e3678SYuan Kang 	dma_addr_t sh_desc_update_first_dma;
109045e3678SYuan Kang 	dma_addr_t sh_desc_fin_dma;
110045e3678SYuan Kang 	dma_addr_t sh_desc_digest_dma;
111045e3678SYuan Kang 	dma_addr_t sh_desc_finup_dma;
112e11793f5SRussell King 	struct device *jrdev;
113045e3678SYuan Kang 	u32 alg_type;
114045e3678SYuan Kang 	u32 alg_op;
115045e3678SYuan Kang 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
116045e3678SYuan Kang 	dma_addr_t key_dma;
117045e3678SYuan Kang 	int ctx_len;
118045e3678SYuan Kang 	unsigned int split_key_len;
119045e3678SYuan Kang 	unsigned int split_key_pad_len;
120045e3678SYuan Kang };
121045e3678SYuan Kang 
122045e3678SYuan Kang /* ahash state */
123045e3678SYuan Kang struct caam_hash_state {
124045e3678SYuan Kang 	dma_addr_t buf_dma;
125045e3678SYuan Kang 	dma_addr_t ctx_dma;
126045e3678SYuan Kang 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127045e3678SYuan Kang 	int buflen_0;
128045e3678SYuan Kang 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129045e3678SYuan Kang 	int buflen_1;
130e7472422SVictoria Milhoan 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
131045e3678SYuan Kang 	int (*update)(struct ahash_request *req);
132045e3678SYuan Kang 	int (*final)(struct ahash_request *req);
133045e3678SYuan Kang 	int (*finup)(struct ahash_request *req);
134045e3678SYuan Kang 	int current_buf;
135045e3678SYuan Kang };
136045e3678SYuan Kang 
1375ec90831SRussell King struct caam_export_state {
1385ec90831SRussell King 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
1395ec90831SRussell King 	u8 caam_ctx[MAX_CTX_LEN];
1405ec90831SRussell King 	int buflen;
1415ec90831SRussell King 	int (*update)(struct ahash_request *req);
1425ec90831SRussell King 	int (*final)(struct ahash_request *req);
1435ec90831SRussell King 	int (*finup)(struct ahash_request *req);
1445ec90831SRussell King };
1455ec90831SRussell King 
146045e3678SYuan Kang /* Common job descriptor seq in/out ptr routines */
147045e3678SYuan Kang 
148045e3678SYuan Kang /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
149ce572085SHoria Geanta static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150045e3678SYuan Kang 				      struct caam_hash_state *state,
151045e3678SYuan Kang 				      int ctx_len)
152045e3678SYuan Kang {
153045e3678SYuan Kang 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154045e3678SYuan Kang 					ctx_len, DMA_FROM_DEVICE);
155ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
156ce572085SHoria Geanta 		dev_err(jrdev, "unable to map ctx\n");
157ce572085SHoria Geanta 		return -ENOMEM;
158ce572085SHoria Geanta 	}
159ce572085SHoria Geanta 
160045e3678SYuan Kang 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
161ce572085SHoria Geanta 
162ce572085SHoria Geanta 	return 0;
163045e3678SYuan Kang }
164045e3678SYuan Kang 
165045e3678SYuan Kang /* Map req->result, and append seq_out_ptr command that points to it */
166045e3678SYuan Kang static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
167045e3678SYuan Kang 						u8 *result, int digestsize)
168045e3678SYuan Kang {
169045e3678SYuan Kang 	dma_addr_t dst_dma;
170045e3678SYuan Kang 
171045e3678SYuan Kang 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
172045e3678SYuan Kang 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
173045e3678SYuan Kang 
174045e3678SYuan Kang 	return dst_dma;
175045e3678SYuan Kang }
176045e3678SYuan Kang 
177045e3678SYuan Kang /* Map current buffer in state and put it in link table */
178045e3678SYuan Kang static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
179045e3678SYuan Kang 					    struct sec4_sg_entry *sec4_sg,
180045e3678SYuan Kang 					    u8 *buf, int buflen)
181045e3678SYuan Kang {
182045e3678SYuan Kang 	dma_addr_t buf_dma;
183045e3678SYuan Kang 
184045e3678SYuan Kang 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
185045e3678SYuan Kang 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
186045e3678SYuan Kang 
187045e3678SYuan Kang 	return buf_dma;
188045e3678SYuan Kang }
189045e3678SYuan Kang 
190045e3678SYuan Kang /*
191045e3678SYuan Kang  * Only put buffer in link table if it contains data, which is possible,
192045e3678SYuan Kang  * since a buffer has previously been used, and needs to be unmapped,
193045e3678SYuan Kang  */
194045e3678SYuan Kang static inline dma_addr_t
195045e3678SYuan Kang try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
196045e3678SYuan Kang 		       u8 *buf, dma_addr_t buf_dma, int buflen,
197045e3678SYuan Kang 		       int last_buflen)
198045e3678SYuan Kang {
199045e3678SYuan Kang 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
200045e3678SYuan Kang 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
201045e3678SYuan Kang 	if (buflen)
202045e3678SYuan Kang 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
203045e3678SYuan Kang 	else
204045e3678SYuan Kang 		buf_dma = 0;
205045e3678SYuan Kang 
206045e3678SYuan Kang 	return buf_dma;
207045e3678SYuan Kang }
208045e3678SYuan Kang 
209045e3678SYuan Kang /* Map state->caam_ctx, and add it to link table */
210ce572085SHoria Geanta static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
211ce572085SHoria Geanta 				     struct caam_hash_state *state, int ctx_len,
212ce572085SHoria Geanta 				     struct sec4_sg_entry *sec4_sg, u32 flag)
213045e3678SYuan Kang {
214045e3678SYuan Kang 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
215ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
216ce572085SHoria Geanta 		dev_err(jrdev, "unable to map ctx\n");
217ce572085SHoria Geanta 		return -ENOMEM;
218ce572085SHoria Geanta 	}
219ce572085SHoria Geanta 
220045e3678SYuan Kang 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
221ce572085SHoria Geanta 
222ce572085SHoria Geanta 	return 0;
223045e3678SYuan Kang }
224045e3678SYuan Kang 
225045e3678SYuan Kang /* Common shared descriptor commands */
226045e3678SYuan Kang static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
227045e3678SYuan Kang {
228045e3678SYuan Kang 	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
229045e3678SYuan Kang 			  ctx->split_key_len, CLASS_2 |
230045e3678SYuan Kang 			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
231045e3678SYuan Kang }
232045e3678SYuan Kang 
233045e3678SYuan Kang /* Append key if it has been set */
234045e3678SYuan Kang static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
235045e3678SYuan Kang {
236045e3678SYuan Kang 	u32 *key_jump_cmd;
237045e3678SYuan Kang 
23861bb86bbSKim Phillips 	init_sh_desc(desc, HDR_SHARE_SERIAL);
239045e3678SYuan Kang 
240045e3678SYuan Kang 	if (ctx->split_key_len) {
241045e3678SYuan Kang 		/* Skip if already shared */
242045e3678SYuan Kang 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
243045e3678SYuan Kang 					   JUMP_COND_SHRD);
244045e3678SYuan Kang 
245045e3678SYuan Kang 		append_key_ahash(desc, ctx);
246045e3678SYuan Kang 
247045e3678SYuan Kang 		set_jump_tgt_here(desc, key_jump_cmd);
248045e3678SYuan Kang 	}
249045e3678SYuan Kang 
250045e3678SYuan Kang 	/* Propagate errors from shared to job descriptor */
251045e3678SYuan Kang 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
252045e3678SYuan Kang }
253045e3678SYuan Kang 
254045e3678SYuan Kang /*
255045e3678SYuan Kang  * For ahash read data from seqin following state->caam_ctx,
256045e3678SYuan Kang  * and write resulting class2 context to seqout, which may be state->caam_ctx
257045e3678SYuan Kang  * or req->result
258045e3678SYuan Kang  */
259045e3678SYuan Kang static inline void ahash_append_load_str(u32 *desc, int digestsize)
260045e3678SYuan Kang {
261045e3678SYuan Kang 	/* Calculate remaining bytes to read */
262045e3678SYuan Kang 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
263045e3678SYuan Kang 
264045e3678SYuan Kang 	/* Read remaining bytes */
265045e3678SYuan Kang 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
266045e3678SYuan Kang 			     FIFOLD_TYPE_MSG | KEY_VLF);
267045e3678SYuan Kang 
268045e3678SYuan Kang 	/* Store class2 context bytes */
269045e3678SYuan Kang 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
270045e3678SYuan Kang 			 LDST_SRCDST_BYTE_CONTEXT);
271045e3678SYuan Kang }
272045e3678SYuan Kang 
273045e3678SYuan Kang /*
274045e3678SYuan Kang  * For ahash update, final and finup, import context, read and write to seqout
275045e3678SYuan Kang  */
276045e3678SYuan Kang static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
277045e3678SYuan Kang 					 int digestsize,
278045e3678SYuan Kang 					 struct caam_hash_ctx *ctx)
279045e3678SYuan Kang {
280045e3678SYuan Kang 	init_sh_desc_key_ahash(desc, ctx);
281045e3678SYuan Kang 
282045e3678SYuan Kang 	/* Import context from software */
283045e3678SYuan Kang 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
284045e3678SYuan Kang 		   LDST_CLASS_2_CCB | ctx->ctx_len);
285045e3678SYuan Kang 
286045e3678SYuan Kang 	/* Class 2 operation */
287045e3678SYuan Kang 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
288045e3678SYuan Kang 
289045e3678SYuan Kang 	/*
290045e3678SYuan Kang 	 * Load from buf and/or src and write to req->result or state->context
291045e3678SYuan Kang 	 */
292045e3678SYuan Kang 	ahash_append_load_str(desc, digestsize);
293045e3678SYuan Kang }
294045e3678SYuan Kang 
295045e3678SYuan Kang /* For ahash firsts and digest, read and write to seqout */
296045e3678SYuan Kang static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
297045e3678SYuan Kang 				     int digestsize, struct caam_hash_ctx *ctx)
298045e3678SYuan Kang {
299045e3678SYuan Kang 	init_sh_desc_key_ahash(desc, ctx);
300045e3678SYuan Kang 
301045e3678SYuan Kang 	/* Class 2 operation */
302045e3678SYuan Kang 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
303045e3678SYuan Kang 
304045e3678SYuan Kang 	/*
305045e3678SYuan Kang 	 * Load from buf and/or src and write to req->result or state->context
306045e3678SYuan Kang 	 */
307045e3678SYuan Kang 	ahash_append_load_str(desc, digestsize);
308045e3678SYuan Kang }
309045e3678SYuan Kang 
310045e3678SYuan Kang static int ahash_set_sh_desc(struct crypto_ahash *ahash)
311045e3678SYuan Kang {
312045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
313045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
314045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
315045e3678SYuan Kang 	u32 have_key = 0;
316045e3678SYuan Kang 	u32 *desc;
317045e3678SYuan Kang 
318045e3678SYuan Kang 	if (ctx->split_key_len)
319045e3678SYuan Kang 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
320045e3678SYuan Kang 
321045e3678SYuan Kang 	/* ahash_update shared descriptor */
322045e3678SYuan Kang 	desc = ctx->sh_desc_update;
323045e3678SYuan Kang 
32461bb86bbSKim Phillips 	init_sh_desc(desc, HDR_SHARE_SERIAL);
325045e3678SYuan Kang 
326045e3678SYuan Kang 	/* Import context from software */
327045e3678SYuan Kang 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
328045e3678SYuan Kang 		   LDST_CLASS_2_CCB | ctx->ctx_len);
329045e3678SYuan Kang 
330045e3678SYuan Kang 	/* Class 2 operation */
331045e3678SYuan Kang 	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
332045e3678SYuan Kang 			 OP_ALG_ENCRYPT);
333045e3678SYuan Kang 
334045e3678SYuan Kang 	/* Load data and write to result or context */
335045e3678SYuan Kang 	ahash_append_load_str(desc, ctx->ctx_len);
336045e3678SYuan Kang 
337045e3678SYuan Kang 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
338045e3678SYuan Kang 						 DMA_TO_DEVICE);
339045e3678SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
340045e3678SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
341045e3678SYuan Kang 		return -ENOMEM;
342045e3678SYuan Kang 	}
343045e3678SYuan Kang #ifdef DEBUG
344514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
345514df281SAlex Porosanu 		       "ahash update shdesc@"__stringify(__LINE__)": ",
346045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
347045e3678SYuan Kang #endif
348045e3678SYuan Kang 
349045e3678SYuan Kang 	/* ahash_update_first shared descriptor */
350045e3678SYuan Kang 	desc = ctx->sh_desc_update_first;
351045e3678SYuan Kang 
352045e3678SYuan Kang 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
353045e3678SYuan Kang 			  ctx->ctx_len, ctx);
354045e3678SYuan Kang 
355045e3678SYuan Kang 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
356045e3678SYuan Kang 						       desc_bytes(desc),
357045e3678SYuan Kang 						       DMA_TO_DEVICE);
358045e3678SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
359045e3678SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
360045e3678SYuan Kang 		return -ENOMEM;
361045e3678SYuan Kang 	}
362045e3678SYuan Kang #ifdef DEBUG
363514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
364514df281SAlex Porosanu 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
365045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
366045e3678SYuan Kang #endif
367045e3678SYuan Kang 
368045e3678SYuan Kang 	/* ahash_final shared descriptor */
369045e3678SYuan Kang 	desc = ctx->sh_desc_fin;
370045e3678SYuan Kang 
371045e3678SYuan Kang 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
372045e3678SYuan Kang 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
373045e3678SYuan Kang 
374045e3678SYuan Kang 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
375045e3678SYuan Kang 					      DMA_TO_DEVICE);
376045e3678SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
377045e3678SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
378045e3678SYuan Kang 		return -ENOMEM;
379045e3678SYuan Kang 	}
380045e3678SYuan Kang #ifdef DEBUG
381514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
382045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
383045e3678SYuan Kang 		       desc_bytes(desc), 1);
384045e3678SYuan Kang #endif
385045e3678SYuan Kang 
386045e3678SYuan Kang 	/* ahash_finup shared descriptor */
387045e3678SYuan Kang 	desc = ctx->sh_desc_finup;
388045e3678SYuan Kang 
389045e3678SYuan Kang 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
390045e3678SYuan Kang 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
391045e3678SYuan Kang 
392045e3678SYuan Kang 	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
393045e3678SYuan Kang 						DMA_TO_DEVICE);
394045e3678SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
395045e3678SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
396045e3678SYuan Kang 		return -ENOMEM;
397045e3678SYuan Kang 	}
398045e3678SYuan Kang #ifdef DEBUG
399514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
400045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
401045e3678SYuan Kang 		       desc_bytes(desc), 1);
402045e3678SYuan Kang #endif
403045e3678SYuan Kang 
404045e3678SYuan Kang 	/* ahash_digest shared descriptor */
405045e3678SYuan Kang 	desc = ctx->sh_desc_digest;
406045e3678SYuan Kang 
407045e3678SYuan Kang 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
408045e3678SYuan Kang 			  digestsize, ctx);
409045e3678SYuan Kang 
410045e3678SYuan Kang 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
411045e3678SYuan Kang 						 desc_bytes(desc),
412045e3678SYuan Kang 						 DMA_TO_DEVICE);
413045e3678SYuan Kang 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
414045e3678SYuan Kang 		dev_err(jrdev, "unable to map shared descriptor\n");
415045e3678SYuan Kang 		return -ENOMEM;
416045e3678SYuan Kang 	}
417045e3678SYuan Kang #ifdef DEBUG
418514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
419514df281SAlex Porosanu 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
420045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
421045e3678SYuan Kang 		       desc_bytes(desc), 1);
422045e3678SYuan Kang #endif
423045e3678SYuan Kang 
424045e3678SYuan Kang 	return 0;
425045e3678SYuan Kang }
426045e3678SYuan Kang 
42766b3e887SKim Phillips static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
428045e3678SYuan Kang 			      u32 keylen)
429045e3678SYuan Kang {
430045e3678SYuan Kang 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
431045e3678SYuan Kang 			       ctx->split_key_pad_len, key_in, keylen,
432045e3678SYuan Kang 			       ctx->alg_op);
433045e3678SYuan Kang }
434045e3678SYuan Kang 
435045e3678SYuan Kang /* Digest hash size if it is too large */
43666b3e887SKim Phillips static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
437045e3678SYuan Kang 			   u32 *keylen, u8 *key_out, u32 digestsize)
438045e3678SYuan Kang {
439045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
440045e3678SYuan Kang 	u32 *desc;
441045e3678SYuan Kang 	struct split_key_result result;
442045e3678SYuan Kang 	dma_addr_t src_dma, dst_dma;
443045e3678SYuan Kang 	int ret = 0;
444045e3678SYuan Kang 
4459c23b7d3SVakul Garg 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
4462af8f4a2SKim Phillips 	if (!desc) {
4472af8f4a2SKim Phillips 		dev_err(jrdev, "unable to allocate key input memory\n");
4482af8f4a2SKim Phillips 		return -ENOMEM;
4492af8f4a2SKim Phillips 	}
450045e3678SYuan Kang 
451045e3678SYuan Kang 	init_job_desc(desc, 0);
452045e3678SYuan Kang 
453045e3678SYuan Kang 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
454045e3678SYuan Kang 				 DMA_TO_DEVICE);
455045e3678SYuan Kang 	if (dma_mapping_error(jrdev, src_dma)) {
456045e3678SYuan Kang 		dev_err(jrdev, "unable to map key input memory\n");
457045e3678SYuan Kang 		kfree(desc);
458045e3678SYuan Kang 		return -ENOMEM;
459045e3678SYuan Kang 	}
460045e3678SYuan Kang 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
461045e3678SYuan Kang 				 DMA_FROM_DEVICE);
462045e3678SYuan Kang 	if (dma_mapping_error(jrdev, dst_dma)) {
463045e3678SYuan Kang 		dev_err(jrdev, "unable to map key output memory\n");
464045e3678SYuan Kang 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
465045e3678SYuan Kang 		kfree(desc);
466045e3678SYuan Kang 		return -ENOMEM;
467045e3678SYuan Kang 	}
468045e3678SYuan Kang 
469045e3678SYuan Kang 	/* Job descriptor to perform unkeyed hash on key_in */
470045e3678SYuan Kang 	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
471045e3678SYuan Kang 			 OP_ALG_AS_INITFINAL);
472045e3678SYuan Kang 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
473045e3678SYuan Kang 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
474045e3678SYuan Kang 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
475045e3678SYuan Kang 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
476045e3678SYuan Kang 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
477045e3678SYuan Kang 			 LDST_SRCDST_BYTE_CONTEXT);
478045e3678SYuan Kang 
479045e3678SYuan Kang #ifdef DEBUG
480514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
481045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
482514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
483045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
484045e3678SYuan Kang #endif
485045e3678SYuan Kang 
486045e3678SYuan Kang 	result.err = 0;
487045e3678SYuan Kang 	init_completion(&result.completion);
488045e3678SYuan Kang 
489045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
490045e3678SYuan Kang 	if (!ret) {
491045e3678SYuan Kang 		/* in progress */
492045e3678SYuan Kang 		wait_for_completion_interruptible(&result.completion);
493045e3678SYuan Kang 		ret = result.err;
494045e3678SYuan Kang #ifdef DEBUG
495514df281SAlex Porosanu 		print_hex_dump(KERN_ERR,
496514df281SAlex Porosanu 			       "digested key@"__stringify(__LINE__)": ",
497045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
498045e3678SYuan Kang 			       digestsize, 1);
499045e3678SYuan Kang #endif
500045e3678SYuan Kang 	}
501045e3678SYuan Kang 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
502045e3678SYuan Kang 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
503045e3678SYuan Kang 
504e11aa9f1SHoria Geanta 	*keylen = digestsize;
505e11aa9f1SHoria Geanta 
506045e3678SYuan Kang 	kfree(desc);
507045e3678SYuan Kang 
508045e3678SYuan Kang 	return ret;
509045e3678SYuan Kang }
510045e3678SYuan Kang 
511045e3678SYuan Kang static int ahash_setkey(struct crypto_ahash *ahash,
512045e3678SYuan Kang 			const u8 *key, unsigned int keylen)
513045e3678SYuan Kang {
514045e3678SYuan Kang 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
515045e3678SYuan Kang 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
516045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
517045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
518045e3678SYuan Kang 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
519045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
520045e3678SYuan Kang 	int ret = 0;
521045e3678SYuan Kang 	u8 *hashed_key = NULL;
522045e3678SYuan Kang 
523045e3678SYuan Kang #ifdef DEBUG
524045e3678SYuan Kang 	printk(KERN_ERR "keylen %d\n", keylen);
525045e3678SYuan Kang #endif
526045e3678SYuan Kang 
527045e3678SYuan Kang 	if (keylen > blocksize) {
528045e3678SYuan Kang 		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
529045e3678SYuan Kang 				     GFP_DMA);
530045e3678SYuan Kang 		if (!hashed_key)
531045e3678SYuan Kang 			return -ENOMEM;
532045e3678SYuan Kang 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
533045e3678SYuan Kang 				      digestsize);
534045e3678SYuan Kang 		if (ret)
535045e3678SYuan Kang 			goto badkey;
536045e3678SYuan Kang 		key = hashed_key;
537045e3678SYuan Kang 	}
538045e3678SYuan Kang 
539045e3678SYuan Kang 	/* Pick class 2 key length from algorithm submask */
540045e3678SYuan Kang 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
541045e3678SYuan Kang 				      OP_ALG_ALGSEL_SHIFT] * 2;
542045e3678SYuan Kang 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
543045e3678SYuan Kang 
544045e3678SYuan Kang #ifdef DEBUG
545045e3678SYuan Kang 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
546045e3678SYuan Kang 	       ctx->split_key_len, ctx->split_key_pad_len);
547514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
548045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
549045e3678SYuan Kang #endif
550045e3678SYuan Kang 
551045e3678SYuan Kang 	ret = gen_split_hash_key(ctx, key, keylen);
552045e3678SYuan Kang 	if (ret)
553045e3678SYuan Kang 		goto badkey;
554045e3678SYuan Kang 
555045e3678SYuan Kang 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
556045e3678SYuan Kang 				      DMA_TO_DEVICE);
557045e3678SYuan Kang 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
558045e3678SYuan Kang 		dev_err(jrdev, "unable to map key i/o memory\n");
5593d67be27SHoria Geanta 		ret = -ENOMEM;
5603d67be27SHoria Geanta 		goto map_err;
561045e3678SYuan Kang 	}
562045e3678SYuan Kang #ifdef DEBUG
563514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
564045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
565045e3678SYuan Kang 		       ctx->split_key_pad_len, 1);
566045e3678SYuan Kang #endif
567045e3678SYuan Kang 
568045e3678SYuan Kang 	ret = ahash_set_sh_desc(ahash);
569045e3678SYuan Kang 	if (ret) {
570045e3678SYuan Kang 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
571045e3678SYuan Kang 				 DMA_TO_DEVICE);
572045e3678SYuan Kang 	}
573045e3678SYuan Kang 
5743d67be27SHoria Geanta map_err:
575045e3678SYuan Kang 	kfree(hashed_key);
576045e3678SYuan Kang 	return ret;
577045e3678SYuan Kang badkey:
578045e3678SYuan Kang 	kfree(hashed_key);
579045e3678SYuan Kang 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
580045e3678SYuan Kang 	return -EINVAL;
581045e3678SYuan Kang }
582045e3678SYuan Kang 
583045e3678SYuan Kang /*
584045e3678SYuan Kang  * ahash_edesc - s/w-extended ahash descriptor
585045e3678SYuan Kang  * @dst_dma: physical mapped address of req->result
586045e3678SYuan Kang  * @sec4_sg_dma: physical mapped address of h/w link table
587045e3678SYuan Kang  * @src_nents: number of segments in input scatterlist
588045e3678SYuan Kang  * @sec4_sg_bytes: length of dma mapped sec4_sg space
589045e3678SYuan Kang  * @hw_desc: the h/w job descriptor followed by any referenced link tables
590343e44b1SRussell King  * @sec4_sg: h/w link table
591045e3678SYuan Kang  */
592045e3678SYuan Kang struct ahash_edesc {
593045e3678SYuan Kang 	dma_addr_t dst_dma;
594045e3678SYuan Kang 	dma_addr_t sec4_sg_dma;
595045e3678SYuan Kang 	int src_nents;
596045e3678SYuan Kang 	int sec4_sg_bytes;
597d7b24ed4SRussell King 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
598343e44b1SRussell King 	struct sec4_sg_entry sec4_sg[0];
599045e3678SYuan Kang };
600045e3678SYuan Kang 
601045e3678SYuan Kang static inline void ahash_unmap(struct device *dev,
602045e3678SYuan Kang 			struct ahash_edesc *edesc,
603045e3678SYuan Kang 			struct ahash_request *req, int dst_len)
604045e3678SYuan Kang {
605045e3678SYuan Kang 	if (edesc->src_nents)
60613fb8fd7SLABBE Corentin 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
607045e3678SYuan Kang 	if (edesc->dst_dma)
608045e3678SYuan Kang 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
609045e3678SYuan Kang 
610045e3678SYuan Kang 	if (edesc->sec4_sg_bytes)
611045e3678SYuan Kang 		dma_unmap_single(dev, edesc->sec4_sg_dma,
612045e3678SYuan Kang 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
613045e3678SYuan Kang }
614045e3678SYuan Kang 
615045e3678SYuan Kang static inline void ahash_unmap_ctx(struct device *dev,
616045e3678SYuan Kang 			struct ahash_edesc *edesc,
617045e3678SYuan Kang 			struct ahash_request *req, int dst_len, u32 flag)
618045e3678SYuan Kang {
619045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
620045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
621045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
622045e3678SYuan Kang 
623045e3678SYuan Kang 	if (state->ctx_dma)
624045e3678SYuan Kang 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
625045e3678SYuan Kang 	ahash_unmap(dev, edesc, req, dst_len);
626045e3678SYuan Kang }
627045e3678SYuan Kang 
628045e3678SYuan Kang static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
629045e3678SYuan Kang 		       void *context)
630045e3678SYuan Kang {
631045e3678SYuan Kang 	struct ahash_request *req = context;
632045e3678SYuan Kang 	struct ahash_edesc *edesc;
633045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
634045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
635045e3678SYuan Kang #ifdef DEBUG
636045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
637045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
638045e3678SYuan Kang 
639045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
640045e3678SYuan Kang #endif
641045e3678SYuan Kang 
642045e3678SYuan Kang 	edesc = (struct ahash_edesc *)((char *)desc -
643045e3678SYuan Kang 		 offsetof(struct ahash_edesc, hw_desc));
644fa9659cdSMarek Vasut 	if (err)
645fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
646045e3678SYuan Kang 
647045e3678SYuan Kang 	ahash_unmap(jrdev, edesc, req, digestsize);
648045e3678SYuan Kang 	kfree(edesc);
649045e3678SYuan Kang 
650045e3678SYuan Kang #ifdef DEBUG
651514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
652045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
653045e3678SYuan Kang 		       ctx->ctx_len, 1);
654045e3678SYuan Kang 	if (req->result)
655514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
656045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
657045e3678SYuan Kang 			       digestsize, 1);
658045e3678SYuan Kang #endif
659045e3678SYuan Kang 
660045e3678SYuan Kang 	req->base.complete(&req->base, err);
661045e3678SYuan Kang }
662045e3678SYuan Kang 
663045e3678SYuan Kang static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
664045e3678SYuan Kang 			    void *context)
665045e3678SYuan Kang {
666045e3678SYuan Kang 	struct ahash_request *req = context;
667045e3678SYuan Kang 	struct ahash_edesc *edesc;
668045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
669045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
670045e3678SYuan Kang #ifdef DEBUG
671045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
672045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
673045e3678SYuan Kang 
674045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
675045e3678SYuan Kang #endif
676045e3678SYuan Kang 
677045e3678SYuan Kang 	edesc = (struct ahash_edesc *)((char *)desc -
678045e3678SYuan Kang 		 offsetof(struct ahash_edesc, hw_desc));
679fa9659cdSMarek Vasut 	if (err)
680fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
681045e3678SYuan Kang 
682045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
683045e3678SYuan Kang 	kfree(edesc);
684045e3678SYuan Kang 
685045e3678SYuan Kang #ifdef DEBUG
686514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
687045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
688045e3678SYuan Kang 		       ctx->ctx_len, 1);
689045e3678SYuan Kang 	if (req->result)
690514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
691045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
692045e3678SYuan Kang 			       digestsize, 1);
693045e3678SYuan Kang #endif
694045e3678SYuan Kang 
695045e3678SYuan Kang 	req->base.complete(&req->base, err);
696045e3678SYuan Kang }
697045e3678SYuan Kang 
698045e3678SYuan Kang static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
699045e3678SYuan Kang 			       void *context)
700045e3678SYuan Kang {
701045e3678SYuan Kang 	struct ahash_request *req = context;
702045e3678SYuan Kang 	struct ahash_edesc *edesc;
703045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
704045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
705045e3678SYuan Kang #ifdef DEBUG
706045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
707045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
708045e3678SYuan Kang 
709045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
710045e3678SYuan Kang #endif
711045e3678SYuan Kang 
712045e3678SYuan Kang 	edesc = (struct ahash_edesc *)((char *)desc -
713045e3678SYuan Kang 		 offsetof(struct ahash_edesc, hw_desc));
714fa9659cdSMarek Vasut 	if (err)
715fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
716045e3678SYuan Kang 
717bc9e05f9SHoria Geanta 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
718045e3678SYuan Kang 	kfree(edesc);
719045e3678SYuan Kang 
720045e3678SYuan Kang #ifdef DEBUG
721514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
722045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
723045e3678SYuan Kang 		       ctx->ctx_len, 1);
724045e3678SYuan Kang 	if (req->result)
725514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
726045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
727045e3678SYuan Kang 			       digestsize, 1);
728045e3678SYuan Kang #endif
729045e3678SYuan Kang 
730045e3678SYuan Kang 	req->base.complete(&req->base, err);
731045e3678SYuan Kang }
732045e3678SYuan Kang 
733045e3678SYuan Kang static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
734045e3678SYuan Kang 			       void *context)
735045e3678SYuan Kang {
736045e3678SYuan Kang 	struct ahash_request *req = context;
737045e3678SYuan Kang 	struct ahash_edesc *edesc;
738045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
739045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
740045e3678SYuan Kang #ifdef DEBUG
741045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
742045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
743045e3678SYuan Kang 
744045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
745045e3678SYuan Kang #endif
746045e3678SYuan Kang 
747045e3678SYuan Kang 	edesc = (struct ahash_edesc *)((char *)desc -
748045e3678SYuan Kang 		 offsetof(struct ahash_edesc, hw_desc));
749fa9659cdSMarek Vasut 	if (err)
750fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
751045e3678SYuan Kang 
752ef62b231SHoria Geanta 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
753045e3678SYuan Kang 	kfree(edesc);
754045e3678SYuan Kang 
755045e3678SYuan Kang #ifdef DEBUG
756514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
757045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
758045e3678SYuan Kang 		       ctx->ctx_len, 1);
759045e3678SYuan Kang 	if (req->result)
760514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
761045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
762045e3678SYuan Kang 			       digestsize, 1);
763045e3678SYuan Kang #endif
764045e3678SYuan Kang 
765045e3678SYuan Kang 	req->base.complete(&req->base, err);
766045e3678SYuan Kang }
767045e3678SYuan Kang 
768*5588d039SRussell King /*
769*5588d039SRussell King  * Allocate an enhanced descriptor, which contains the hardware descriptor
770*5588d039SRussell King  * and space for hardware scatter table containing sg_num entries.
771*5588d039SRussell King  */
772*5588d039SRussell King static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
773*5588d039SRussell King 					     int sg_num, gfp_t flags)
774*5588d039SRussell King {
775*5588d039SRussell King 	struct ahash_edesc *edesc;
776*5588d039SRussell King 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
777*5588d039SRussell King 
778*5588d039SRussell King 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
779*5588d039SRussell King 	if (!edesc) {
780*5588d039SRussell King 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
781*5588d039SRussell King 		return NULL;
782*5588d039SRussell King 	}
783*5588d039SRussell King 
784*5588d039SRussell King 	return edesc;
785*5588d039SRussell King }
786*5588d039SRussell King 
787045e3678SYuan Kang /* submit update job descriptor */
788045e3678SYuan Kang static int ahash_update_ctx(struct ahash_request *req)
789045e3678SYuan Kang {
790045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
791045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
792045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
793045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
794045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
795045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
796045e3678SYuan Kang 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
797045e3678SYuan Kang 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
798045e3678SYuan Kang 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
799045e3678SYuan Kang 	int *next_buflen = state->current_buf ? &state->buflen_0 :
800045e3678SYuan Kang 			   &state->buflen_1, last_buflen;
801045e3678SYuan Kang 	int in_len = *buflen + req->nbytes, to_hash;
802045e3678SYuan Kang 	u32 *sh_desc = ctx->sh_desc_update, *desc;
803045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_update_dma;
804bc13c69eSRussell King 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
805045e3678SYuan Kang 	struct ahash_edesc *edesc;
806045e3678SYuan Kang 	int ret = 0;
807045e3678SYuan Kang 	int sh_len;
808045e3678SYuan Kang 
809045e3678SYuan Kang 	last_buflen = *next_buflen;
810045e3678SYuan Kang 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
811045e3678SYuan Kang 	to_hash = in_len - *next_buflen;
812045e3678SYuan Kang 
813045e3678SYuan Kang 	if (to_hash) {
81413fb8fd7SLABBE Corentin 		src_nents = sg_nents_for_len(req->src,
81513fb8fd7SLABBE Corentin 					     req->nbytes - (*next_buflen));
816f9970c28SLABBE Corentin 		if (src_nents < 0) {
817f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
818f9970c28SLABBE Corentin 			return src_nents;
819f9970c28SLABBE Corentin 		}
820bc13c69eSRussell King 
821bc13c69eSRussell King 		if (src_nents) {
822bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
823bc13c69eSRussell King 						  DMA_TO_DEVICE);
824bc13c69eSRussell King 			if (!mapped_nents) {
825bc13c69eSRussell King 				dev_err(jrdev, "unable to DMA map source\n");
826bc13c69eSRussell King 				return -ENOMEM;
827bc13c69eSRussell King 			}
828bc13c69eSRussell King 		} else {
829bc13c69eSRussell King 			mapped_nents = 0;
830bc13c69eSRussell King 		}
831bc13c69eSRussell King 
832045e3678SYuan Kang 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
833bc13c69eSRussell King 		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
834045e3678SYuan Kang 				 sizeof(struct sec4_sg_entry);
835045e3678SYuan Kang 
836045e3678SYuan Kang 		/*
837045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
838045e3678SYuan Kang 		 * link tables
839045e3678SYuan Kang 		 */
840*5588d039SRussell King 		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
841*5588d039SRussell King 					  flags);
842045e3678SYuan Kang 		if (!edesc) {
843bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
844045e3678SYuan Kang 			return -ENOMEM;
845045e3678SYuan Kang 		}
846045e3678SYuan Kang 
847045e3678SYuan Kang 		edesc->src_nents = src_nents;
848045e3678SYuan Kang 		edesc->sec4_sg_bytes = sec4_sg_bytes;
849045e3678SYuan Kang 
850ce572085SHoria Geanta 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
851045e3678SYuan Kang 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
852ce572085SHoria Geanta 		if (ret)
85332686d34SRussell King 			goto err;
854045e3678SYuan Kang 
855045e3678SYuan Kang 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
856045e3678SYuan Kang 							edesc->sec4_sg + 1,
857045e3678SYuan Kang 							buf, state->buf_dma,
858c7556ff7SRussell King 							*buflen, last_buflen);
859045e3678SYuan Kang 
860bc13c69eSRussell King 		if (mapped_nents) {
861bc13c69eSRussell King 			sg_to_sec4_sg_last(req->src, mapped_nents,
862bc13c69eSRussell King 					   edesc->sec4_sg + sec4_sg_src_index,
863bc13c69eSRussell King 					   0);
8648af7b0f8SVictoria Milhoan 			if (*next_buflen)
865307fd543SCristian Stoica 				scatterwalk_map_and_copy(next_buf, req->src,
866307fd543SCristian Stoica 							 to_hash - *buflen,
867307fd543SCristian Stoica 							 *next_buflen, 0);
868045e3678SYuan Kang 		} else {
869045e3678SYuan Kang 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
870261ea058SHoria Geantă 				cpu_to_caam32(SEC4_SG_LEN_FIN);
871045e3678SYuan Kang 		}
872045e3678SYuan Kang 
8738af7b0f8SVictoria Milhoan 		state->current_buf = !state->current_buf;
8748af7b0f8SVictoria Milhoan 
875045e3678SYuan Kang 		sh_len = desc_len(sh_desc);
876045e3678SYuan Kang 		desc = edesc->hw_desc;
877045e3678SYuan Kang 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
878045e3678SYuan Kang 				     HDR_REVERSE);
879045e3678SYuan Kang 
8801da2be33SRuchika Gupta 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
8811da2be33SRuchika Gupta 						     sec4_sg_bytes,
8821da2be33SRuchika Gupta 						     DMA_TO_DEVICE);
883ce572085SHoria Geanta 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
884ce572085SHoria Geanta 			dev_err(jrdev, "unable to map S/G table\n");
88532686d34SRussell King 			ret = -ENOMEM;
88632686d34SRussell King 			goto err;
887ce572085SHoria Geanta 		}
8881da2be33SRuchika Gupta 
889045e3678SYuan Kang 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
890045e3678SYuan Kang 				       to_hash, LDST_SGF);
891045e3678SYuan Kang 
892045e3678SYuan Kang 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
893045e3678SYuan Kang 
894045e3678SYuan Kang #ifdef DEBUG
895514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
896045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
897045e3678SYuan Kang 			       desc_bytes(desc), 1);
898045e3678SYuan Kang #endif
899045e3678SYuan Kang 
900045e3678SYuan Kang 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
90132686d34SRussell King 		if (ret)
90232686d34SRussell King 			goto err;
90332686d34SRussell King 
904045e3678SYuan Kang 		ret = -EINPROGRESS;
905045e3678SYuan Kang 	} else if (*next_buflen) {
906307fd543SCristian Stoica 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
907307fd543SCristian Stoica 					 req->nbytes, 0);
908045e3678SYuan Kang 		*buflen = *next_buflen;
909045e3678SYuan Kang 		*next_buflen = last_buflen;
910045e3678SYuan Kang 	}
911045e3678SYuan Kang #ifdef DEBUG
912514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
913045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
914514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
915045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
916045e3678SYuan Kang 		       *next_buflen, 1);
917045e3678SYuan Kang #endif
918045e3678SYuan Kang 
919045e3678SYuan Kang 	return ret;
92032686d34SRussell King 
92132686d34SRussell King  err:
92232686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
92332686d34SRussell King 	kfree(edesc);
92432686d34SRussell King 	return ret;
925045e3678SYuan Kang }
926045e3678SYuan Kang 
927045e3678SYuan Kang static int ahash_final_ctx(struct ahash_request *req)
928045e3678SYuan Kang {
929045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
930045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
931045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
932045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
933045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
934045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
935045e3678SYuan Kang 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
936045e3678SYuan Kang 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
937045e3678SYuan Kang 	int last_buflen = state->current_buf ? state->buflen_0 :
938045e3678SYuan Kang 			  state->buflen_1;
939045e3678SYuan Kang 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
940045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
941b310c178SHoria Geant? 	int sec4_sg_bytes, sec4_sg_src_index;
942045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
943045e3678SYuan Kang 	struct ahash_edesc *edesc;
944045e3678SYuan Kang 	int ret = 0;
945045e3678SYuan Kang 	int sh_len;
946045e3678SYuan Kang 
947b310c178SHoria Geant? 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
948b310c178SHoria Geant? 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
949045e3678SYuan Kang 
950045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
951*5588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, flags);
952*5588d039SRussell King 	if (!edesc)
953045e3678SYuan Kang 		return -ENOMEM;
954045e3678SYuan Kang 
955045e3678SYuan Kang 	sh_len = desc_len(sh_desc);
956045e3678SYuan Kang 	desc = edesc->hw_desc;
957045e3678SYuan Kang 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
958045e3678SYuan Kang 
959045e3678SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
960045e3678SYuan Kang 	edesc->src_nents = 0;
961045e3678SYuan Kang 
962ce572085SHoria Geanta 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
963ce572085SHoria Geanta 				 edesc->sec4_sg, DMA_TO_DEVICE);
964ce572085SHoria Geanta 	if (ret)
96532686d34SRussell King 		goto err;
966045e3678SYuan Kang 
967045e3678SYuan Kang 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
968045e3678SYuan Kang 						buf, state->buf_dma, buflen,
969045e3678SYuan Kang 						last_buflen);
970261ea058SHoria Geantă 	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
971261ea058SHoria Geantă 		cpu_to_caam32(SEC4_SG_LEN_FIN);
972045e3678SYuan Kang 
9731da2be33SRuchika Gupta 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9741da2be33SRuchika Gupta 					    sec4_sg_bytes, DMA_TO_DEVICE);
975ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
976ce572085SHoria Geanta 		dev_err(jrdev, "unable to map S/G table\n");
97732686d34SRussell King 		ret = -ENOMEM;
97832686d34SRussell King 		goto err;
979ce572085SHoria Geanta 	}
9801da2be33SRuchika Gupta 
981045e3678SYuan Kang 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
982045e3678SYuan Kang 			  LDST_SGF);
983045e3678SYuan Kang 
984045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
985045e3678SYuan Kang 						digestsize);
986ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
987ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
98832686d34SRussell King 		ret = -ENOMEM;
98932686d34SRussell King 		goto err;
990ce572085SHoria Geanta 	}
991045e3678SYuan Kang 
992045e3678SYuan Kang #ifdef DEBUG
993514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
994045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
995045e3678SYuan Kang #endif
996045e3678SYuan Kang 
997045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
99832686d34SRussell King 	if (ret)
99932686d34SRussell King 		goto err;
100032686d34SRussell King 
100132686d34SRussell King 	return -EINPROGRESS;
100232686d34SRussell King 
100332686d34SRussell King err:
1004045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1005045e3678SYuan Kang 	kfree(edesc);
1006045e3678SYuan Kang 	return ret;
1007045e3678SYuan Kang }
1008045e3678SYuan Kang 
1009045e3678SYuan Kang static int ahash_finup_ctx(struct ahash_request *req)
1010045e3678SYuan Kang {
1011045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1012045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1013045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1014045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1015045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1016045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1017045e3678SYuan Kang 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1018045e3678SYuan Kang 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1019045e3678SYuan Kang 	int last_buflen = state->current_buf ? state->buflen_0 :
1020045e3678SYuan Kang 			  state->buflen_1;
1021045e3678SYuan Kang 	u32 *sh_desc = ctx->sh_desc_finup, *desc;
1022045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_finup_dma;
1023045e3678SYuan Kang 	int sec4_sg_bytes, sec4_sg_src_index;
1024bc13c69eSRussell King 	int src_nents, mapped_nents;
1025045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1026045e3678SYuan Kang 	struct ahash_edesc *edesc;
1027045e3678SYuan Kang 	int ret = 0;
1028045e3678SYuan Kang 	int sh_len;
1029045e3678SYuan Kang 
103013fb8fd7SLABBE Corentin 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1031f9970c28SLABBE Corentin 	if (src_nents < 0) {
1032f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
1033f9970c28SLABBE Corentin 		return src_nents;
1034f9970c28SLABBE Corentin 	}
1035bc13c69eSRussell King 
1036bc13c69eSRussell King 	if (src_nents) {
1037bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1038bc13c69eSRussell King 					  DMA_TO_DEVICE);
1039bc13c69eSRussell King 		if (!mapped_nents) {
1040bc13c69eSRussell King 			dev_err(jrdev, "unable to DMA map source\n");
1041bc13c69eSRussell King 			return -ENOMEM;
1042bc13c69eSRussell King 		}
1043bc13c69eSRussell King 	} else {
1044bc13c69eSRussell King 		mapped_nents = 0;
1045bc13c69eSRussell King 	}
1046bc13c69eSRussell King 
1047045e3678SYuan Kang 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1048bc13c69eSRussell King 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1049045e3678SYuan Kang 			 sizeof(struct sec4_sg_entry);
1050045e3678SYuan Kang 
1051045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
1052*5588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1053*5588d039SRussell King 				  flags);
1054045e3678SYuan Kang 	if (!edesc) {
1055bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1056045e3678SYuan Kang 		return -ENOMEM;
1057045e3678SYuan Kang 	}
1058045e3678SYuan Kang 
1059045e3678SYuan Kang 	sh_len = desc_len(sh_desc);
1060045e3678SYuan Kang 	desc = edesc->hw_desc;
1061045e3678SYuan Kang 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1062045e3678SYuan Kang 
1063045e3678SYuan Kang 	edesc->src_nents = src_nents;
1064045e3678SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1065045e3678SYuan Kang 
1066ce572085SHoria Geanta 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1067ce572085SHoria Geanta 				 edesc->sec4_sg, DMA_TO_DEVICE);
1068ce572085SHoria Geanta 	if (ret)
106932686d34SRussell King 		goto err;
1070045e3678SYuan Kang 
1071045e3678SYuan Kang 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1072045e3678SYuan Kang 						buf, state->buf_dma, buflen,
1073045e3678SYuan Kang 						last_buflen);
1074045e3678SYuan Kang 
1075bc13c69eSRussell King 	sg_to_sec4_sg_last(req->src, mapped_nents,
1076bc13c69eSRussell King 			   edesc->sec4_sg + sec4_sg_src_index, 0);
1077045e3678SYuan Kang 
10781da2be33SRuchika Gupta 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
10791da2be33SRuchika Gupta 					    sec4_sg_bytes, DMA_TO_DEVICE);
1080ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1081ce572085SHoria Geanta 		dev_err(jrdev, "unable to map S/G table\n");
108232686d34SRussell King 		ret = -ENOMEM;
108332686d34SRussell King 		goto err;
1084ce572085SHoria Geanta 	}
10851da2be33SRuchika Gupta 
1086045e3678SYuan Kang 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1087045e3678SYuan Kang 			       buflen + req->nbytes, LDST_SGF);
1088045e3678SYuan Kang 
1089045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1090045e3678SYuan Kang 						digestsize);
1091ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1092ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
109332686d34SRussell King 		ret = -ENOMEM;
109432686d34SRussell King 		goto err;
1095ce572085SHoria Geanta 	}
1096045e3678SYuan Kang 
1097045e3678SYuan Kang #ifdef DEBUG
1098514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1099045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1100045e3678SYuan Kang #endif
1101045e3678SYuan Kang 
1102045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
110332686d34SRussell King 	if (ret)
110432686d34SRussell King 		goto err;
110532686d34SRussell King 
110632686d34SRussell King 	return -EINPROGRESS;
110732686d34SRussell King 
110832686d34SRussell King err:
1109045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1110045e3678SYuan Kang 	kfree(edesc);
1111045e3678SYuan Kang 	return ret;
1112045e3678SYuan Kang }
1113045e3678SYuan Kang 
1114045e3678SYuan Kang static int ahash_digest(struct ahash_request *req)
1115045e3678SYuan Kang {
1116045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1117045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1118045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1119045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1120045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1121045e3678SYuan Kang 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1122045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1123045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1124bc13c69eSRussell King 	int src_nents, mapped_nents, sec4_sg_bytes;
1125045e3678SYuan Kang 	dma_addr_t src_dma;
1126045e3678SYuan Kang 	struct ahash_edesc *edesc;
1127045e3678SYuan Kang 	int ret = 0;
1128045e3678SYuan Kang 	u32 options;
1129045e3678SYuan Kang 	int sh_len;
1130045e3678SYuan Kang 
11313d5a2db6SRussell King 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1132f9970c28SLABBE Corentin 	if (src_nents < 0) {
1133f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
1134f9970c28SLABBE Corentin 		return src_nents;
1135f9970c28SLABBE Corentin 	}
1136bc13c69eSRussell King 
1137bc13c69eSRussell King 	if (src_nents) {
1138bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1139bc13c69eSRussell King 					  DMA_TO_DEVICE);
1140bc13c69eSRussell King 		if (!mapped_nents) {
1141bc13c69eSRussell King 			dev_err(jrdev, "unable to map source for DMA\n");
1142bc13c69eSRussell King 			return -ENOMEM;
1143bc13c69eSRussell King 		}
1144bc13c69eSRussell King 	} else {
1145bc13c69eSRussell King 		mapped_nents = 0;
1146bc13c69eSRussell King 	}
1147bc13c69eSRussell King 
1148bc13c69eSRussell King 	if (mapped_nents > 1)
1149bc13c69eSRussell King 		sec4_sg_bytes = mapped_nents * sizeof(struct sec4_sg_entry);
11503d5a2db6SRussell King 	else
11513d5a2db6SRussell King 		sec4_sg_bytes = 0;
1152045e3678SYuan Kang 
1153045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
1154*5588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1155*5588d039SRussell King 				  flags);
1156045e3678SYuan Kang 	if (!edesc) {
1157bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1158045e3678SYuan Kang 		return -ENOMEM;
1159045e3678SYuan Kang 	}
1160343e44b1SRussell King 
116145e9af78SHoria Geanta 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1162045e3678SYuan Kang 	edesc->src_nents = src_nents;
1163045e3678SYuan Kang 
1164045e3678SYuan Kang 	sh_len = desc_len(sh_desc);
1165045e3678SYuan Kang 	desc = edesc->hw_desc;
1166045e3678SYuan Kang 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1167045e3678SYuan Kang 
11683d5a2db6SRussell King 	if (src_nents > 1) {
1169bc13c69eSRussell King 		sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg, 0);
11701da2be33SRuchika Gupta 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
11711da2be33SRuchika Gupta 					    sec4_sg_bytes, DMA_TO_DEVICE);
1172ce572085SHoria Geanta 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1173ce572085SHoria Geanta 			dev_err(jrdev, "unable to map S/G table\n");
117432686d34SRussell King 			ahash_unmap(jrdev, edesc, req, digestsize);
117532686d34SRussell King 			kfree(edesc);
1176ce572085SHoria Geanta 			return -ENOMEM;
1177ce572085SHoria Geanta 		}
1178045e3678SYuan Kang 		src_dma = edesc->sec4_sg_dma;
1179045e3678SYuan Kang 		options = LDST_SGF;
1180045e3678SYuan Kang 	} else {
1181045e3678SYuan Kang 		src_dma = sg_dma_address(req->src);
1182045e3678SYuan Kang 		options = 0;
1183045e3678SYuan Kang 	}
1184045e3678SYuan Kang 	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1185045e3678SYuan Kang 
1186045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1187045e3678SYuan Kang 						digestsize);
1188ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1189ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
119032686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
119132686d34SRussell King 		kfree(edesc);
1192ce572085SHoria Geanta 		return -ENOMEM;
1193ce572085SHoria Geanta 	}
1194045e3678SYuan Kang 
1195045e3678SYuan Kang #ifdef DEBUG
1196514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1197045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1198045e3678SYuan Kang #endif
1199045e3678SYuan Kang 
1200045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1201045e3678SYuan Kang 	if (!ret) {
1202045e3678SYuan Kang 		ret = -EINPROGRESS;
1203045e3678SYuan Kang 	} else {
1204045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1205045e3678SYuan Kang 		kfree(edesc);
1206045e3678SYuan Kang 	}
1207045e3678SYuan Kang 
1208045e3678SYuan Kang 	return ret;
1209045e3678SYuan Kang }
1210045e3678SYuan Kang 
1211045e3678SYuan Kang /* submit ahash final if it the first job descriptor */
1212045e3678SYuan Kang static int ahash_final_no_ctx(struct ahash_request *req)
1213045e3678SYuan Kang {
1214045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1215045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1216045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1217045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1218045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1219045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1220045e3678SYuan Kang 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1221045e3678SYuan Kang 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1222045e3678SYuan Kang 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1223045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1224045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1225045e3678SYuan Kang 	struct ahash_edesc *edesc;
1226045e3678SYuan Kang 	int ret = 0;
1227045e3678SYuan Kang 	int sh_len;
1228045e3678SYuan Kang 
1229045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
1230*5588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, 0, flags);
1231*5588d039SRussell King 	if (!edesc)
1232045e3678SYuan Kang 		return -ENOMEM;
1233045e3678SYuan Kang 
1234045e3678SYuan Kang 	sh_len = desc_len(sh_desc);
1235045e3678SYuan Kang 	desc = edesc->hw_desc;
1236045e3678SYuan Kang 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1237045e3678SYuan Kang 
1238045e3678SYuan Kang 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1239ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1240ce572085SHoria Geanta 		dev_err(jrdev, "unable to map src\n");
124132686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
124232686d34SRussell King 		kfree(edesc);
1243ce572085SHoria Geanta 		return -ENOMEM;
1244ce572085SHoria Geanta 	}
1245045e3678SYuan Kang 
1246045e3678SYuan Kang 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1247045e3678SYuan Kang 
1248045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1249045e3678SYuan Kang 						digestsize);
1250ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1251ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
125232686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
125332686d34SRussell King 		kfree(edesc);
1254ce572085SHoria Geanta 		return -ENOMEM;
1255ce572085SHoria Geanta 	}
1256045e3678SYuan Kang 	edesc->src_nents = 0;
1257045e3678SYuan Kang 
1258045e3678SYuan Kang #ifdef DEBUG
1259514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1260045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1261045e3678SYuan Kang #endif
1262045e3678SYuan Kang 
1263045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1264045e3678SYuan Kang 	if (!ret) {
1265045e3678SYuan Kang 		ret = -EINPROGRESS;
1266045e3678SYuan Kang 	} else {
1267045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1268045e3678SYuan Kang 		kfree(edesc);
1269045e3678SYuan Kang 	}
1270045e3678SYuan Kang 
1271045e3678SYuan Kang 	return ret;
1272045e3678SYuan Kang }
1273045e3678SYuan Kang 
1274045e3678SYuan Kang /* submit ahash update if it the first job descriptor after update */
1275045e3678SYuan Kang static int ahash_update_no_ctx(struct ahash_request *req)
1276045e3678SYuan Kang {
1277045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1278045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1279045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1280045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1281045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1282045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1283045e3678SYuan Kang 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1284045e3678SYuan Kang 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1285045e3678SYuan Kang 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1286045e3678SYuan Kang 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1287045e3678SYuan Kang 			   &state->buflen_1;
1288045e3678SYuan Kang 	int in_len = *buflen + req->nbytes, to_hash;
1289bc13c69eSRussell King 	int sec4_sg_bytes, src_nents, mapped_nents;
1290045e3678SYuan Kang 	struct ahash_edesc *edesc;
1291045e3678SYuan Kang 	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1292045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1293045e3678SYuan Kang 	int ret = 0;
1294045e3678SYuan Kang 	int sh_len;
1295045e3678SYuan Kang 
1296045e3678SYuan Kang 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1297045e3678SYuan Kang 	to_hash = in_len - *next_buflen;
1298045e3678SYuan Kang 
1299045e3678SYuan Kang 	if (to_hash) {
130013fb8fd7SLABBE Corentin 		src_nents = sg_nents_for_len(req->src,
13013d5a2db6SRussell King 					     req->nbytes - *next_buflen);
1302f9970c28SLABBE Corentin 		if (src_nents < 0) {
1303f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
1304f9970c28SLABBE Corentin 			return src_nents;
1305f9970c28SLABBE Corentin 		}
1306bc13c69eSRussell King 
1307bc13c69eSRussell King 		if (src_nents) {
1308bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1309bc13c69eSRussell King 						  DMA_TO_DEVICE);
1310bc13c69eSRussell King 			if (!mapped_nents) {
1311bc13c69eSRussell King 				dev_err(jrdev, "unable to DMA map source\n");
1312bc13c69eSRussell King 				return -ENOMEM;
1313bc13c69eSRussell King 			}
1314bc13c69eSRussell King 		} else {
1315bc13c69eSRussell King 			mapped_nents = 0;
1316bc13c69eSRussell King 		}
1317bc13c69eSRussell King 
1318bc13c69eSRussell King 		sec4_sg_bytes = (1 + mapped_nents) *
1319045e3678SYuan Kang 				sizeof(struct sec4_sg_entry);
1320045e3678SYuan Kang 
1321045e3678SYuan Kang 		/*
1322045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
1323045e3678SYuan Kang 		 * link tables
1324045e3678SYuan Kang 		 */
1325*5588d039SRussell King 		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, flags);
1326045e3678SYuan Kang 		if (!edesc) {
1327bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1328045e3678SYuan Kang 			return -ENOMEM;
1329045e3678SYuan Kang 		}
1330045e3678SYuan Kang 
1331045e3678SYuan Kang 		edesc->src_nents = src_nents;
1332045e3678SYuan Kang 		edesc->sec4_sg_bytes = sec4_sg_bytes;
133376b99080SHoria Geanta 		edesc->dst_dma = 0;
1334045e3678SYuan Kang 
1335045e3678SYuan Kang 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1336045e3678SYuan Kang 						    buf, *buflen);
1337bc13c69eSRussell King 		sg_to_sec4_sg_last(req->src, mapped_nents,
1338bc13c69eSRussell King 				   edesc->sec4_sg + 1, 0);
1339bc13c69eSRussell King 
1340045e3678SYuan Kang 		if (*next_buflen) {
1341307fd543SCristian Stoica 			scatterwalk_map_and_copy(next_buf, req->src,
1342307fd543SCristian Stoica 						 to_hash - *buflen,
1343307fd543SCristian Stoica 						 *next_buflen, 0);
1344045e3678SYuan Kang 		}
1345045e3678SYuan Kang 
13468af7b0f8SVictoria Milhoan 		state->current_buf = !state->current_buf;
13478af7b0f8SVictoria Milhoan 
1348045e3678SYuan Kang 		sh_len = desc_len(sh_desc);
1349045e3678SYuan Kang 		desc = edesc->hw_desc;
1350045e3678SYuan Kang 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1351045e3678SYuan Kang 				     HDR_REVERSE);
1352045e3678SYuan Kang 
13531da2be33SRuchika Gupta 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
13541da2be33SRuchika Gupta 						    sec4_sg_bytes,
13551da2be33SRuchika Gupta 						    DMA_TO_DEVICE);
1356ce572085SHoria Geanta 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1357ce572085SHoria Geanta 			dev_err(jrdev, "unable to map S/G table\n");
135832686d34SRussell King 			ret = -ENOMEM;
135932686d34SRussell King 			goto err;
1360ce572085SHoria Geanta 		}
13611da2be33SRuchika Gupta 
1362045e3678SYuan Kang 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1363045e3678SYuan Kang 
1364ce572085SHoria Geanta 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1365ce572085SHoria Geanta 		if (ret)
136632686d34SRussell King 			goto err;
1367045e3678SYuan Kang 
1368045e3678SYuan Kang #ifdef DEBUG
1369514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1370045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1371045e3678SYuan Kang 			       desc_bytes(desc), 1);
1372045e3678SYuan Kang #endif
1373045e3678SYuan Kang 
1374045e3678SYuan Kang 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
137532686d34SRussell King 		if (ret)
137632686d34SRussell King 			goto err;
137732686d34SRussell King 
1378045e3678SYuan Kang 		ret = -EINPROGRESS;
1379045e3678SYuan Kang 		state->update = ahash_update_ctx;
1380045e3678SYuan Kang 		state->finup = ahash_finup_ctx;
1381045e3678SYuan Kang 		state->final = ahash_final_ctx;
1382045e3678SYuan Kang 	} else if (*next_buflen) {
1383307fd543SCristian Stoica 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1384307fd543SCristian Stoica 					 req->nbytes, 0);
1385045e3678SYuan Kang 		*buflen = *next_buflen;
1386045e3678SYuan Kang 		*next_buflen = 0;
1387045e3678SYuan Kang 	}
1388045e3678SYuan Kang #ifdef DEBUG
1389514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1390045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1391514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1392045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1393045e3678SYuan Kang 		       *next_buflen, 1);
1394045e3678SYuan Kang #endif
1395045e3678SYuan Kang 
1396045e3678SYuan Kang 	return ret;
139732686d34SRussell King 
139832686d34SRussell King err:
139932686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
140032686d34SRussell King 	kfree(edesc);
140132686d34SRussell King 	return ret;
1402045e3678SYuan Kang }
1403045e3678SYuan Kang 
1404045e3678SYuan Kang /* submit ahash finup if it the first job descriptor after update */
1405045e3678SYuan Kang static int ahash_finup_no_ctx(struct ahash_request *req)
1406045e3678SYuan Kang {
1407045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1408045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1409045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1410045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1411045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1412045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1413045e3678SYuan Kang 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1414045e3678SYuan Kang 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1415045e3678SYuan Kang 	int last_buflen = state->current_buf ? state->buflen_0 :
1416045e3678SYuan Kang 			  state->buflen_1;
1417045e3678SYuan Kang 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1418045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1419bc13c69eSRussell King 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1420045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1421045e3678SYuan Kang 	struct ahash_edesc *edesc;
1422045e3678SYuan Kang 	int sh_len;
1423045e3678SYuan Kang 	int ret = 0;
1424045e3678SYuan Kang 
142513fb8fd7SLABBE Corentin 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1426f9970c28SLABBE Corentin 	if (src_nents < 0) {
1427f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
1428f9970c28SLABBE Corentin 		return src_nents;
1429f9970c28SLABBE Corentin 	}
1430bc13c69eSRussell King 
1431bc13c69eSRussell King 	if (src_nents) {
1432bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1433bc13c69eSRussell King 					  DMA_TO_DEVICE);
1434bc13c69eSRussell King 		if (!mapped_nents) {
1435bc13c69eSRussell King 			dev_err(jrdev, "unable to DMA map source\n");
1436bc13c69eSRussell King 			return -ENOMEM;
1437bc13c69eSRussell King 		}
1438bc13c69eSRussell King 	} else {
1439bc13c69eSRussell King 		mapped_nents = 0;
1440bc13c69eSRussell King 	}
1441bc13c69eSRussell King 
1442045e3678SYuan Kang 	sec4_sg_src_index = 2;
1443bc13c69eSRussell King 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1444045e3678SYuan Kang 			 sizeof(struct sec4_sg_entry);
1445045e3678SYuan Kang 
1446045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
1447*5588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, flags);
1448045e3678SYuan Kang 	if (!edesc) {
1449bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1450045e3678SYuan Kang 		return -ENOMEM;
1451045e3678SYuan Kang 	}
1452045e3678SYuan Kang 
1453045e3678SYuan Kang 	sh_len = desc_len(sh_desc);
1454045e3678SYuan Kang 	desc = edesc->hw_desc;
1455045e3678SYuan Kang 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1456045e3678SYuan Kang 
1457045e3678SYuan Kang 	edesc->src_nents = src_nents;
1458045e3678SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1459045e3678SYuan Kang 
1460045e3678SYuan Kang 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1461045e3678SYuan Kang 						state->buf_dma, buflen,
1462045e3678SYuan Kang 						last_buflen);
1463045e3678SYuan Kang 
1464bc13c69eSRussell King 	sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + 1, 0);
1465045e3678SYuan Kang 
14661da2be33SRuchika Gupta 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
14671da2be33SRuchika Gupta 					    sec4_sg_bytes, DMA_TO_DEVICE);
1468ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1469ce572085SHoria Geanta 		dev_err(jrdev, "unable to map S/G table\n");
147032686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
147132686d34SRussell King 		kfree(edesc);
1472ce572085SHoria Geanta 		return -ENOMEM;
1473ce572085SHoria Geanta 	}
14741da2be33SRuchika Gupta 
1475045e3678SYuan Kang 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1476045e3678SYuan Kang 			       req->nbytes, LDST_SGF);
1477045e3678SYuan Kang 
1478045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1479045e3678SYuan Kang 						digestsize);
1480ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1481ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
148232686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
148332686d34SRussell King 		kfree(edesc);
1484ce572085SHoria Geanta 		return -ENOMEM;
1485ce572085SHoria Geanta 	}
1486045e3678SYuan Kang 
1487045e3678SYuan Kang #ifdef DEBUG
1488514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1489045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1490045e3678SYuan Kang #endif
1491045e3678SYuan Kang 
1492045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1493045e3678SYuan Kang 	if (!ret) {
1494045e3678SYuan Kang 		ret = -EINPROGRESS;
1495045e3678SYuan Kang 	} else {
1496045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1497045e3678SYuan Kang 		kfree(edesc);
1498045e3678SYuan Kang 	}
1499045e3678SYuan Kang 
1500045e3678SYuan Kang 	return ret;
1501045e3678SYuan Kang }
1502045e3678SYuan Kang 
1503045e3678SYuan Kang /* submit first update job descriptor after init */
1504045e3678SYuan Kang static int ahash_update_first(struct ahash_request *req)
1505045e3678SYuan Kang {
1506045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1507045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1508045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1509045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1510045e3678SYuan Kang 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1511045e3678SYuan Kang 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15124451d494SCristian Stoica 	u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
15134451d494SCristian Stoica 	int *next_buflen = state->current_buf ?
15144451d494SCristian Stoica 		&state->buflen_1 : &state->buflen_0;
1515045e3678SYuan Kang 	int to_hash;
1516045e3678SYuan Kang 	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1517045e3678SYuan Kang 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1518bc13c69eSRussell King 	int sec4_sg_bytes, src_nents, mapped_nents;
1519045e3678SYuan Kang 	dma_addr_t src_dma;
1520045e3678SYuan Kang 	u32 options;
1521045e3678SYuan Kang 	struct ahash_edesc *edesc;
1522045e3678SYuan Kang 	int ret = 0;
1523045e3678SYuan Kang 	int sh_len;
1524045e3678SYuan Kang 
1525045e3678SYuan Kang 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1526045e3678SYuan Kang 				      1);
1527045e3678SYuan Kang 	to_hash = req->nbytes - *next_buflen;
1528045e3678SYuan Kang 
1529045e3678SYuan Kang 	if (to_hash) {
15303d5a2db6SRussell King 		src_nents = sg_nents_for_len(req->src,
15313d5a2db6SRussell King 					     req->nbytes - *next_buflen);
1532f9970c28SLABBE Corentin 		if (src_nents < 0) {
1533f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
1534f9970c28SLABBE Corentin 			return src_nents;
1535f9970c28SLABBE Corentin 		}
1536bc13c69eSRussell King 
1537bc13c69eSRussell King 		if (src_nents) {
1538bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1539bc13c69eSRussell King 						  DMA_TO_DEVICE);
1540bc13c69eSRussell King 			if (!mapped_nents) {
1541bc13c69eSRussell King 				dev_err(jrdev, "unable to map source for DMA\n");
1542bc13c69eSRussell King 				return -ENOMEM;
1543bc13c69eSRussell King 			}
1544bc13c69eSRussell King 		} else {
1545bc13c69eSRussell King 			mapped_nents = 0;
1546bc13c69eSRussell King 		}
1547bc13c69eSRussell King 		if (mapped_nents > 1)
1548bc13c69eSRussell King 			sec4_sg_bytes = mapped_nents *
15493d5a2db6SRussell King 					sizeof(struct sec4_sg_entry);
15503d5a2db6SRussell King 		else
15513d5a2db6SRussell King 			sec4_sg_bytes = 0;
1552045e3678SYuan Kang 
1553045e3678SYuan Kang 		/*
1554045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
1555045e3678SYuan Kang 		 * link tables
1556045e3678SYuan Kang 		 */
1557*5588d039SRussell King 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1558*5588d039SRussell King 					  mapped_nents : 0, flags);
1559045e3678SYuan Kang 		if (!edesc) {
1560bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1561045e3678SYuan Kang 			return -ENOMEM;
1562045e3678SYuan Kang 		}
1563045e3678SYuan Kang 
1564045e3678SYuan Kang 		edesc->src_nents = src_nents;
1565045e3678SYuan Kang 		edesc->sec4_sg_bytes = sec4_sg_bytes;
156676b99080SHoria Geanta 		edesc->dst_dma = 0;
1567045e3678SYuan Kang 
15683d5a2db6SRussell King 		if (src_nents > 1) {
1569bc13c69eSRussell King 			sg_to_sec4_sg_last(req->src, mapped_nents,
1570045e3678SYuan Kang 					   edesc->sec4_sg, 0);
15711da2be33SRuchika Gupta 			edesc->sec4_sg_dma = dma_map_single(jrdev,
15721da2be33SRuchika Gupta 							    edesc->sec4_sg,
15731da2be33SRuchika Gupta 							    sec4_sg_bytes,
15741da2be33SRuchika Gupta 							    DMA_TO_DEVICE);
1575ce572085SHoria Geanta 			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1576ce572085SHoria Geanta 				dev_err(jrdev, "unable to map S/G table\n");
157732686d34SRussell King 				ret = -ENOMEM;
157832686d34SRussell King 				goto err;
1579ce572085SHoria Geanta 			}
1580045e3678SYuan Kang 			src_dma = edesc->sec4_sg_dma;
1581045e3678SYuan Kang 			options = LDST_SGF;
1582045e3678SYuan Kang 		} else {
1583045e3678SYuan Kang 			src_dma = sg_dma_address(req->src);
1584045e3678SYuan Kang 			options = 0;
1585045e3678SYuan Kang 		}
1586045e3678SYuan Kang 
1587045e3678SYuan Kang 		if (*next_buflen)
1588307fd543SCristian Stoica 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1589307fd543SCristian Stoica 						 *next_buflen, 0);
1590045e3678SYuan Kang 
1591045e3678SYuan Kang 		sh_len = desc_len(sh_desc);
1592045e3678SYuan Kang 		desc = edesc->hw_desc;
1593045e3678SYuan Kang 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1594045e3678SYuan Kang 				     HDR_REVERSE);
1595045e3678SYuan Kang 
1596045e3678SYuan Kang 		append_seq_in_ptr(desc, src_dma, to_hash, options);
1597045e3678SYuan Kang 
1598ce572085SHoria Geanta 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1599ce572085SHoria Geanta 		if (ret)
160032686d34SRussell King 			goto err;
1601045e3678SYuan Kang 
1602045e3678SYuan Kang #ifdef DEBUG
1603514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1604045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1605045e3678SYuan Kang 			       desc_bytes(desc), 1);
1606045e3678SYuan Kang #endif
1607045e3678SYuan Kang 
160832686d34SRussell King 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
160932686d34SRussell King 		if (ret)
161032686d34SRussell King 			goto err;
161132686d34SRussell King 
1612045e3678SYuan Kang 		ret = -EINPROGRESS;
1613045e3678SYuan Kang 		state->update = ahash_update_ctx;
1614045e3678SYuan Kang 		state->finup = ahash_finup_ctx;
1615045e3678SYuan Kang 		state->final = ahash_final_ctx;
1616045e3678SYuan Kang 	} else if (*next_buflen) {
1617045e3678SYuan Kang 		state->update = ahash_update_no_ctx;
1618045e3678SYuan Kang 		state->finup = ahash_finup_no_ctx;
1619045e3678SYuan Kang 		state->final = ahash_final_no_ctx;
1620307fd543SCristian Stoica 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1621307fd543SCristian Stoica 					 req->nbytes, 0);
1622045e3678SYuan Kang 	}
1623045e3678SYuan Kang #ifdef DEBUG
1624514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1625045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1626045e3678SYuan Kang 		       *next_buflen, 1);
1627045e3678SYuan Kang #endif
1628045e3678SYuan Kang 
1629045e3678SYuan Kang 	return ret;
163032686d34SRussell King 
163132686d34SRussell King err:
163232686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
163332686d34SRussell King 	kfree(edesc);
163432686d34SRussell King 	return ret;
1635045e3678SYuan Kang }
1636045e3678SYuan Kang 
1637045e3678SYuan Kang static int ahash_finup_first(struct ahash_request *req)
1638045e3678SYuan Kang {
1639045e3678SYuan Kang 	return ahash_digest(req);
1640045e3678SYuan Kang }
1641045e3678SYuan Kang 
1642045e3678SYuan Kang static int ahash_init(struct ahash_request *req)
1643045e3678SYuan Kang {
1644045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1645045e3678SYuan Kang 
1646045e3678SYuan Kang 	state->update = ahash_update_first;
1647045e3678SYuan Kang 	state->finup = ahash_finup_first;
1648045e3678SYuan Kang 	state->final = ahash_final_no_ctx;
1649045e3678SYuan Kang 
1650045e3678SYuan Kang 	state->current_buf = 0;
1651de0e35ecSHoria Geanta 	state->buf_dma = 0;
16526fd4b156SSteve Cornelius 	state->buflen_0 = 0;
16536fd4b156SSteve Cornelius 	state->buflen_1 = 0;
1654045e3678SYuan Kang 
1655045e3678SYuan Kang 	return 0;
1656045e3678SYuan Kang }
1657045e3678SYuan Kang 
1658045e3678SYuan Kang static int ahash_update(struct ahash_request *req)
1659045e3678SYuan Kang {
1660045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1661045e3678SYuan Kang 
1662045e3678SYuan Kang 	return state->update(req);
1663045e3678SYuan Kang }
1664045e3678SYuan Kang 
1665045e3678SYuan Kang static int ahash_finup(struct ahash_request *req)
1666045e3678SYuan Kang {
1667045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1668045e3678SYuan Kang 
1669045e3678SYuan Kang 	return state->finup(req);
1670045e3678SYuan Kang }
1671045e3678SYuan Kang 
1672045e3678SYuan Kang static int ahash_final(struct ahash_request *req)
1673045e3678SYuan Kang {
1674045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1675045e3678SYuan Kang 
1676045e3678SYuan Kang 	return state->final(req);
1677045e3678SYuan Kang }
1678045e3678SYuan Kang 
1679045e3678SYuan Kang static int ahash_export(struct ahash_request *req, void *out)
1680045e3678SYuan Kang {
1681045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
16825ec90831SRussell King 	struct caam_export_state *export = out;
16835ec90831SRussell King 	int len;
16845ec90831SRussell King 	u8 *buf;
1685045e3678SYuan Kang 
16865ec90831SRussell King 	if (state->current_buf) {
16875ec90831SRussell King 		buf = state->buf_1;
16885ec90831SRussell King 		len = state->buflen_1;
16895ec90831SRussell King 	} else {
16905ec90831SRussell King 		buf = state->buf_0;
1691f456cd2dSFabio Estevam 		len = state->buflen_0;
16925ec90831SRussell King 	}
16935ec90831SRussell King 
16945ec90831SRussell King 	memcpy(export->buf, buf, len);
16955ec90831SRussell King 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
16965ec90831SRussell King 	export->buflen = len;
16975ec90831SRussell King 	export->update = state->update;
16985ec90831SRussell King 	export->final = state->final;
16995ec90831SRussell King 	export->finup = state->finup;
1700434b4212SRussell King 
1701045e3678SYuan Kang 	return 0;
1702045e3678SYuan Kang }
1703045e3678SYuan Kang 
1704045e3678SYuan Kang static int ahash_import(struct ahash_request *req, const void *in)
1705045e3678SYuan Kang {
1706045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
17075ec90831SRussell King 	const struct caam_export_state *export = in;
1708045e3678SYuan Kang 
17095ec90831SRussell King 	memset(state, 0, sizeof(*state));
17105ec90831SRussell King 	memcpy(state->buf_0, export->buf, export->buflen);
17115ec90831SRussell King 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
17125ec90831SRussell King 	state->buflen_0 = export->buflen;
17135ec90831SRussell King 	state->update = export->update;
17145ec90831SRussell King 	state->final = export->final;
17155ec90831SRussell King 	state->finup = export->finup;
1716434b4212SRussell King 
1717045e3678SYuan Kang 	return 0;
1718045e3678SYuan Kang }
1719045e3678SYuan Kang 
1720045e3678SYuan Kang struct caam_hash_template {
1721045e3678SYuan Kang 	char name[CRYPTO_MAX_ALG_NAME];
1722045e3678SYuan Kang 	char driver_name[CRYPTO_MAX_ALG_NAME];
1723b0e09baeSYuan Kang 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1724b0e09baeSYuan Kang 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1725045e3678SYuan Kang 	unsigned int blocksize;
1726045e3678SYuan Kang 	struct ahash_alg template_ahash;
1727045e3678SYuan Kang 	u32 alg_type;
1728045e3678SYuan Kang 	u32 alg_op;
1729045e3678SYuan Kang };
1730045e3678SYuan Kang 
1731045e3678SYuan Kang /* ahash descriptors */
1732045e3678SYuan Kang static struct caam_hash_template driver_hash[] = {
1733045e3678SYuan Kang 	{
1734b0e09baeSYuan Kang 		.name = "sha1",
1735b0e09baeSYuan Kang 		.driver_name = "sha1-caam",
1736b0e09baeSYuan Kang 		.hmac_name = "hmac(sha1)",
1737b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha1-caam",
1738045e3678SYuan Kang 		.blocksize = SHA1_BLOCK_SIZE,
1739045e3678SYuan Kang 		.template_ahash = {
1740045e3678SYuan Kang 			.init = ahash_init,
1741045e3678SYuan Kang 			.update = ahash_update,
1742045e3678SYuan Kang 			.final = ahash_final,
1743045e3678SYuan Kang 			.finup = ahash_finup,
1744045e3678SYuan Kang 			.digest = ahash_digest,
1745045e3678SYuan Kang 			.export = ahash_export,
1746045e3678SYuan Kang 			.import = ahash_import,
1747045e3678SYuan Kang 			.setkey = ahash_setkey,
1748045e3678SYuan Kang 			.halg = {
1749045e3678SYuan Kang 				.digestsize = SHA1_DIGEST_SIZE,
17505ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1751045e3678SYuan Kang 			},
1752045e3678SYuan Kang 		},
1753045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA1,
1754045e3678SYuan Kang 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1755045e3678SYuan Kang 	}, {
1756b0e09baeSYuan Kang 		.name = "sha224",
1757b0e09baeSYuan Kang 		.driver_name = "sha224-caam",
1758b0e09baeSYuan Kang 		.hmac_name = "hmac(sha224)",
1759b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha224-caam",
1760045e3678SYuan Kang 		.blocksize = SHA224_BLOCK_SIZE,
1761045e3678SYuan Kang 		.template_ahash = {
1762045e3678SYuan Kang 			.init = ahash_init,
1763045e3678SYuan Kang 			.update = ahash_update,
1764045e3678SYuan Kang 			.final = ahash_final,
1765045e3678SYuan Kang 			.finup = ahash_finup,
1766045e3678SYuan Kang 			.digest = ahash_digest,
1767045e3678SYuan Kang 			.export = ahash_export,
1768045e3678SYuan Kang 			.import = ahash_import,
1769045e3678SYuan Kang 			.setkey = ahash_setkey,
1770045e3678SYuan Kang 			.halg = {
1771045e3678SYuan Kang 				.digestsize = SHA224_DIGEST_SIZE,
17725ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1773045e3678SYuan Kang 			},
1774045e3678SYuan Kang 		},
1775045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA224,
1776045e3678SYuan Kang 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1777045e3678SYuan Kang 	}, {
1778b0e09baeSYuan Kang 		.name = "sha256",
1779b0e09baeSYuan Kang 		.driver_name = "sha256-caam",
1780b0e09baeSYuan Kang 		.hmac_name = "hmac(sha256)",
1781b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha256-caam",
1782045e3678SYuan Kang 		.blocksize = SHA256_BLOCK_SIZE,
1783045e3678SYuan Kang 		.template_ahash = {
1784045e3678SYuan Kang 			.init = ahash_init,
1785045e3678SYuan Kang 			.update = ahash_update,
1786045e3678SYuan Kang 			.final = ahash_final,
1787045e3678SYuan Kang 			.finup = ahash_finup,
1788045e3678SYuan Kang 			.digest = ahash_digest,
1789045e3678SYuan Kang 			.export = ahash_export,
1790045e3678SYuan Kang 			.import = ahash_import,
1791045e3678SYuan Kang 			.setkey = ahash_setkey,
1792045e3678SYuan Kang 			.halg = {
1793045e3678SYuan Kang 				.digestsize = SHA256_DIGEST_SIZE,
17945ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1795045e3678SYuan Kang 			},
1796045e3678SYuan Kang 		},
1797045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA256,
1798045e3678SYuan Kang 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1799045e3678SYuan Kang 	}, {
1800b0e09baeSYuan Kang 		.name = "sha384",
1801b0e09baeSYuan Kang 		.driver_name = "sha384-caam",
1802b0e09baeSYuan Kang 		.hmac_name = "hmac(sha384)",
1803b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha384-caam",
1804045e3678SYuan Kang 		.blocksize = SHA384_BLOCK_SIZE,
1805045e3678SYuan Kang 		.template_ahash = {
1806045e3678SYuan Kang 			.init = ahash_init,
1807045e3678SYuan Kang 			.update = ahash_update,
1808045e3678SYuan Kang 			.final = ahash_final,
1809045e3678SYuan Kang 			.finup = ahash_finup,
1810045e3678SYuan Kang 			.digest = ahash_digest,
1811045e3678SYuan Kang 			.export = ahash_export,
1812045e3678SYuan Kang 			.import = ahash_import,
1813045e3678SYuan Kang 			.setkey = ahash_setkey,
1814045e3678SYuan Kang 			.halg = {
1815045e3678SYuan Kang 				.digestsize = SHA384_DIGEST_SIZE,
18165ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1817045e3678SYuan Kang 			},
1818045e3678SYuan Kang 		},
1819045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA384,
1820045e3678SYuan Kang 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1821045e3678SYuan Kang 	}, {
1822b0e09baeSYuan Kang 		.name = "sha512",
1823b0e09baeSYuan Kang 		.driver_name = "sha512-caam",
1824b0e09baeSYuan Kang 		.hmac_name = "hmac(sha512)",
1825b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha512-caam",
1826045e3678SYuan Kang 		.blocksize = SHA512_BLOCK_SIZE,
1827045e3678SYuan Kang 		.template_ahash = {
1828045e3678SYuan Kang 			.init = ahash_init,
1829045e3678SYuan Kang 			.update = ahash_update,
1830045e3678SYuan Kang 			.final = ahash_final,
1831045e3678SYuan Kang 			.finup = ahash_finup,
1832045e3678SYuan Kang 			.digest = ahash_digest,
1833045e3678SYuan Kang 			.export = ahash_export,
1834045e3678SYuan Kang 			.import = ahash_import,
1835045e3678SYuan Kang 			.setkey = ahash_setkey,
1836045e3678SYuan Kang 			.halg = {
1837045e3678SYuan Kang 				.digestsize = SHA512_DIGEST_SIZE,
18385ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1839045e3678SYuan Kang 			},
1840045e3678SYuan Kang 		},
1841045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA512,
1842045e3678SYuan Kang 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1843045e3678SYuan Kang 	}, {
1844b0e09baeSYuan Kang 		.name = "md5",
1845b0e09baeSYuan Kang 		.driver_name = "md5-caam",
1846b0e09baeSYuan Kang 		.hmac_name = "hmac(md5)",
1847b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-md5-caam",
1848045e3678SYuan Kang 		.blocksize = MD5_BLOCK_WORDS * 4,
1849045e3678SYuan Kang 		.template_ahash = {
1850045e3678SYuan Kang 			.init = ahash_init,
1851045e3678SYuan Kang 			.update = ahash_update,
1852045e3678SYuan Kang 			.final = ahash_final,
1853045e3678SYuan Kang 			.finup = ahash_finup,
1854045e3678SYuan Kang 			.digest = ahash_digest,
1855045e3678SYuan Kang 			.export = ahash_export,
1856045e3678SYuan Kang 			.import = ahash_import,
1857045e3678SYuan Kang 			.setkey = ahash_setkey,
1858045e3678SYuan Kang 			.halg = {
1859045e3678SYuan Kang 				.digestsize = MD5_DIGEST_SIZE,
18605ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1861045e3678SYuan Kang 			},
1862045e3678SYuan Kang 		},
1863045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_MD5,
1864045e3678SYuan Kang 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1865045e3678SYuan Kang 	},
1866045e3678SYuan Kang };
1867045e3678SYuan Kang 
1868045e3678SYuan Kang struct caam_hash_alg {
1869045e3678SYuan Kang 	struct list_head entry;
1870045e3678SYuan Kang 	int alg_type;
1871045e3678SYuan Kang 	int alg_op;
1872045e3678SYuan Kang 	struct ahash_alg ahash_alg;
1873045e3678SYuan Kang };
1874045e3678SYuan Kang 
1875045e3678SYuan Kang static int caam_hash_cra_init(struct crypto_tfm *tfm)
1876045e3678SYuan Kang {
1877045e3678SYuan Kang 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1878045e3678SYuan Kang 	struct crypto_alg *base = tfm->__crt_alg;
1879045e3678SYuan Kang 	struct hash_alg_common *halg =
1880045e3678SYuan Kang 		 container_of(base, struct hash_alg_common, base);
1881045e3678SYuan Kang 	struct ahash_alg *alg =
1882045e3678SYuan Kang 		 container_of(halg, struct ahash_alg, halg);
1883045e3678SYuan Kang 	struct caam_hash_alg *caam_hash =
1884045e3678SYuan Kang 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1885045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1886045e3678SYuan Kang 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1887045e3678SYuan Kang 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1888045e3678SYuan Kang 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1889045e3678SYuan Kang 					 HASH_MSG_LEN + 32,
1890045e3678SYuan Kang 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1891045e3678SYuan Kang 					 HASH_MSG_LEN + 64,
1892045e3678SYuan Kang 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1893045e3678SYuan Kang 	int ret = 0;
1894045e3678SYuan Kang 
1895045e3678SYuan Kang 	/*
1896cfc6f11bSRuchika Gupta 	 * Get a Job ring from Job Ring driver to ensure in-order
1897045e3678SYuan Kang 	 * crypto request processing per tfm
1898045e3678SYuan Kang 	 */
1899cfc6f11bSRuchika Gupta 	ctx->jrdev = caam_jr_alloc();
1900cfc6f11bSRuchika Gupta 	if (IS_ERR(ctx->jrdev)) {
1901cfc6f11bSRuchika Gupta 		pr_err("Job Ring Device allocation for transform failed\n");
1902cfc6f11bSRuchika Gupta 		return PTR_ERR(ctx->jrdev);
1903cfc6f11bSRuchika Gupta 	}
1904045e3678SYuan Kang 	/* copy descriptor header template value */
1905045e3678SYuan Kang 	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1906045e3678SYuan Kang 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1907045e3678SYuan Kang 
1908045e3678SYuan Kang 	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1909045e3678SYuan Kang 				  OP_ALG_ALGSEL_SHIFT];
1910045e3678SYuan Kang 
1911045e3678SYuan Kang 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1912045e3678SYuan Kang 				 sizeof(struct caam_hash_state));
1913045e3678SYuan Kang 
1914045e3678SYuan Kang 	ret = ahash_set_sh_desc(ahash);
1915045e3678SYuan Kang 
1916045e3678SYuan Kang 	return ret;
1917045e3678SYuan Kang }
1918045e3678SYuan Kang 
1919045e3678SYuan Kang static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1920045e3678SYuan Kang {
1921045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1922045e3678SYuan Kang 
1923045e3678SYuan Kang 	if (ctx->sh_desc_update_dma &&
1924045e3678SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1925045e3678SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1926045e3678SYuan Kang 				 desc_bytes(ctx->sh_desc_update),
1927045e3678SYuan Kang 				 DMA_TO_DEVICE);
1928045e3678SYuan Kang 	if (ctx->sh_desc_update_first_dma &&
1929045e3678SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1930045e3678SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1931045e3678SYuan Kang 				 desc_bytes(ctx->sh_desc_update_first),
1932045e3678SYuan Kang 				 DMA_TO_DEVICE);
1933045e3678SYuan Kang 	if (ctx->sh_desc_fin_dma &&
1934045e3678SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1935045e3678SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1936045e3678SYuan Kang 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1937045e3678SYuan Kang 	if (ctx->sh_desc_digest_dma &&
1938045e3678SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1939045e3678SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1940045e3678SYuan Kang 				 desc_bytes(ctx->sh_desc_digest),
1941045e3678SYuan Kang 				 DMA_TO_DEVICE);
1942045e3678SYuan Kang 	if (ctx->sh_desc_finup_dma &&
1943045e3678SYuan Kang 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1944045e3678SYuan Kang 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1945045e3678SYuan Kang 				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1946cfc6f11bSRuchika Gupta 
1947cfc6f11bSRuchika Gupta 	caam_jr_free(ctx->jrdev);
1948045e3678SYuan Kang }
1949045e3678SYuan Kang 
1950045e3678SYuan Kang static void __exit caam_algapi_hash_exit(void)
1951045e3678SYuan Kang {
1952045e3678SYuan Kang 	struct caam_hash_alg *t_alg, *n;
1953045e3678SYuan Kang 
1954cfc6f11bSRuchika Gupta 	if (!hash_list.next)
1955045e3678SYuan Kang 		return;
1956045e3678SYuan Kang 
1957cfc6f11bSRuchika Gupta 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1958045e3678SYuan Kang 		crypto_unregister_ahash(&t_alg->ahash_alg);
1959045e3678SYuan Kang 		list_del(&t_alg->entry);
1960045e3678SYuan Kang 		kfree(t_alg);
1961045e3678SYuan Kang 	}
1962045e3678SYuan Kang }
1963045e3678SYuan Kang 
1964045e3678SYuan Kang static struct caam_hash_alg *
1965cfc6f11bSRuchika Gupta caam_hash_alloc(struct caam_hash_template *template,
1966b0e09baeSYuan Kang 		bool keyed)
1967045e3678SYuan Kang {
1968045e3678SYuan Kang 	struct caam_hash_alg *t_alg;
1969045e3678SYuan Kang 	struct ahash_alg *halg;
1970045e3678SYuan Kang 	struct crypto_alg *alg;
1971045e3678SYuan Kang 
19729c4f9733SFabio Estevam 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1973045e3678SYuan Kang 	if (!t_alg) {
1974cfc6f11bSRuchika Gupta 		pr_err("failed to allocate t_alg\n");
1975045e3678SYuan Kang 		return ERR_PTR(-ENOMEM);
1976045e3678SYuan Kang 	}
1977045e3678SYuan Kang 
1978045e3678SYuan Kang 	t_alg->ahash_alg = template->template_ahash;
1979045e3678SYuan Kang 	halg = &t_alg->ahash_alg;
1980045e3678SYuan Kang 	alg = &halg->halg.base;
1981045e3678SYuan Kang 
1982b0e09baeSYuan Kang 	if (keyed) {
1983b0e09baeSYuan Kang 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1984b0e09baeSYuan Kang 			 template->hmac_name);
1985b0e09baeSYuan Kang 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1986b0e09baeSYuan Kang 			 template->hmac_driver_name);
1987b0e09baeSYuan Kang 	} else {
1988b0e09baeSYuan Kang 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1989b0e09baeSYuan Kang 			 template->name);
1990045e3678SYuan Kang 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1991045e3678SYuan Kang 			 template->driver_name);
1992a0118c8bSRussell King 		t_alg->ahash_alg.setkey = NULL;
1993b0e09baeSYuan Kang 	}
1994045e3678SYuan Kang 	alg->cra_module = THIS_MODULE;
1995045e3678SYuan Kang 	alg->cra_init = caam_hash_cra_init;
1996045e3678SYuan Kang 	alg->cra_exit = caam_hash_cra_exit;
1997045e3678SYuan Kang 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1998045e3678SYuan Kang 	alg->cra_priority = CAAM_CRA_PRIORITY;
1999045e3678SYuan Kang 	alg->cra_blocksize = template->blocksize;
2000045e3678SYuan Kang 	alg->cra_alignmask = 0;
2001045e3678SYuan Kang 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
2002045e3678SYuan Kang 	alg->cra_type = &crypto_ahash_type;
2003045e3678SYuan Kang 
2004045e3678SYuan Kang 	t_alg->alg_type = template->alg_type;
2005045e3678SYuan Kang 	t_alg->alg_op = template->alg_op;
2006045e3678SYuan Kang 
2007045e3678SYuan Kang 	return t_alg;
2008045e3678SYuan Kang }
2009045e3678SYuan Kang 
2010045e3678SYuan Kang static int __init caam_algapi_hash_init(void)
2011045e3678SYuan Kang {
201235af6403SRuchika Gupta 	struct device_node *dev_node;
201335af6403SRuchika Gupta 	struct platform_device *pdev;
201435af6403SRuchika Gupta 	struct device *ctrldev;
2015045e3678SYuan Kang 	int i = 0, err = 0;
2016bf83490eSVictoria Milhoan 	struct caam_drv_private *priv;
2017bf83490eSVictoria Milhoan 	unsigned int md_limit = SHA512_DIGEST_SIZE;
2018bf83490eSVictoria Milhoan 	u32 cha_inst, cha_vid;
2019045e3678SYuan Kang 
202035af6403SRuchika Gupta 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
202135af6403SRuchika Gupta 	if (!dev_node) {
202235af6403SRuchika Gupta 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
202335af6403SRuchika Gupta 		if (!dev_node)
202435af6403SRuchika Gupta 			return -ENODEV;
202535af6403SRuchika Gupta 	}
202635af6403SRuchika Gupta 
202735af6403SRuchika Gupta 	pdev = of_find_device_by_node(dev_node);
202835af6403SRuchika Gupta 	if (!pdev) {
202935af6403SRuchika Gupta 		of_node_put(dev_node);
203035af6403SRuchika Gupta 		return -ENODEV;
203135af6403SRuchika Gupta 	}
203235af6403SRuchika Gupta 
203335af6403SRuchika Gupta 	ctrldev = &pdev->dev;
203435af6403SRuchika Gupta 	priv = dev_get_drvdata(ctrldev);
203535af6403SRuchika Gupta 	of_node_put(dev_node);
203635af6403SRuchika Gupta 
203735af6403SRuchika Gupta 	/*
203835af6403SRuchika Gupta 	 * If priv is NULL, it's probably because the caam driver wasn't
203935af6403SRuchika Gupta 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
204035af6403SRuchika Gupta 	 */
204135af6403SRuchika Gupta 	if (!priv)
204235af6403SRuchika Gupta 		return -ENODEV;
204335af6403SRuchika Gupta 
2044bf83490eSVictoria Milhoan 	/*
2045bf83490eSVictoria Milhoan 	 * Register crypto algorithms the device supports.  First, identify
2046bf83490eSVictoria Milhoan 	 * presence and attributes of MD block.
2047bf83490eSVictoria Milhoan 	 */
2048bf83490eSVictoria Milhoan 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2049bf83490eSVictoria Milhoan 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2050bf83490eSVictoria Milhoan 
2051bf83490eSVictoria Milhoan 	/*
2052bf83490eSVictoria Milhoan 	 * Skip registration of any hashing algorithms if MD block
2053bf83490eSVictoria Milhoan 	 * is not present.
2054bf83490eSVictoria Milhoan 	 */
2055bf83490eSVictoria Milhoan 	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
2056bf83490eSVictoria Milhoan 		return -ENODEV;
2057bf83490eSVictoria Milhoan 
2058bf83490eSVictoria Milhoan 	/* Limit digest size based on LP256 */
2059bf83490eSVictoria Milhoan 	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
2060bf83490eSVictoria Milhoan 		md_limit = SHA256_DIGEST_SIZE;
2061bf83490eSVictoria Milhoan 
2062cfc6f11bSRuchika Gupta 	INIT_LIST_HEAD(&hash_list);
2063045e3678SYuan Kang 
2064045e3678SYuan Kang 	/* register crypto algorithms the device supports */
2065045e3678SYuan Kang 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
2066045e3678SYuan Kang 		struct caam_hash_alg *t_alg;
2067bf83490eSVictoria Milhoan 		struct caam_hash_template *alg = driver_hash + i;
2068bf83490eSVictoria Milhoan 
2069bf83490eSVictoria Milhoan 		/* If MD size is not supported by device, skip registration */
2070bf83490eSVictoria Milhoan 		if (alg->template_ahash.halg.digestsize > md_limit)
2071bf83490eSVictoria Milhoan 			continue;
2072045e3678SYuan Kang 
2073b0e09baeSYuan Kang 		/* register hmac version */
2074bf83490eSVictoria Milhoan 		t_alg = caam_hash_alloc(alg, true);
2075b0e09baeSYuan Kang 		if (IS_ERR(t_alg)) {
2076b0e09baeSYuan Kang 			err = PTR_ERR(t_alg);
2077bf83490eSVictoria Milhoan 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2078b0e09baeSYuan Kang 			continue;
2079b0e09baeSYuan Kang 		}
2080b0e09baeSYuan Kang 
2081b0e09baeSYuan Kang 		err = crypto_register_ahash(&t_alg->ahash_alg);
2082b0e09baeSYuan Kang 		if (err) {
20836ea30f0aSRussell King 			pr_warn("%s alg registration failed: %d\n",
20846ea30f0aSRussell King 				t_alg->ahash_alg.halg.base.cra_driver_name,
20856ea30f0aSRussell King 				err);
2086b0e09baeSYuan Kang 			kfree(t_alg);
2087b0e09baeSYuan Kang 		} else
2088cfc6f11bSRuchika Gupta 			list_add_tail(&t_alg->entry, &hash_list);
2089b0e09baeSYuan Kang 
2090b0e09baeSYuan Kang 		/* register unkeyed version */
2091bf83490eSVictoria Milhoan 		t_alg = caam_hash_alloc(alg, false);
2092045e3678SYuan Kang 		if (IS_ERR(t_alg)) {
2093045e3678SYuan Kang 			err = PTR_ERR(t_alg);
2094bf83490eSVictoria Milhoan 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2095045e3678SYuan Kang 			continue;
2096045e3678SYuan Kang 		}
2097045e3678SYuan Kang 
2098045e3678SYuan Kang 		err = crypto_register_ahash(&t_alg->ahash_alg);
2099045e3678SYuan Kang 		if (err) {
21006ea30f0aSRussell King 			pr_warn("%s alg registration failed: %d\n",
21016ea30f0aSRussell King 				t_alg->ahash_alg.halg.base.cra_driver_name,
21026ea30f0aSRussell King 				err);
2103045e3678SYuan Kang 			kfree(t_alg);
2104045e3678SYuan Kang 		} else
2105cfc6f11bSRuchika Gupta 			list_add_tail(&t_alg->entry, &hash_list);
2106045e3678SYuan Kang 	}
2107045e3678SYuan Kang 
2108045e3678SYuan Kang 	return err;
2109045e3678SYuan Kang }
2110045e3678SYuan Kang 
2111045e3678SYuan Kang module_init(caam_algapi_hash_init);
2112045e3678SYuan Kang module_exit(caam_algapi_hash_exit);
2113045e3678SYuan Kang 
2114045e3678SYuan Kang MODULE_LICENSE("GPL");
2115045e3678SYuan Kang MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2116045e3678SYuan Kang MODULE_AUTHOR("Freescale Semiconductor - NMG");
2117