xref: /linux/drivers/crypto/caam/caamhash.c (revision d239b10d4ceb986d998779a4ed81824368aca831)
1618b5dc4SHoria Geantă // SPDX-License-Identifier: GPL-2.0+
2045e3678SYuan Kang /*
3045e3678SYuan Kang  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4045e3678SYuan Kang  *
5045e3678SYuan Kang  * Copyright 2011 Freescale Semiconductor, Inc.
6*d239b10dSHoria Geantă  * Copyright 2018 NXP
7045e3678SYuan Kang  *
8045e3678SYuan Kang  * Based on caamalg.c crypto API driver.
9045e3678SYuan Kang  *
10045e3678SYuan Kang  * relationship of digest job descriptor or first job descriptor after init to
11045e3678SYuan Kang  * shared descriptors:
12045e3678SYuan Kang  *
13045e3678SYuan Kang  * ---------------                     ---------------
14045e3678SYuan Kang  * | JobDesc #1  |-------------------->|  ShareDesc  |
15045e3678SYuan Kang  * | *(packet 1) |                     |  (hashKey)  |
16045e3678SYuan Kang  * ---------------                     | (operation) |
17045e3678SYuan Kang  *                                     ---------------
18045e3678SYuan Kang  *
19045e3678SYuan Kang  * relationship of subsequent job descriptors to shared descriptors:
20045e3678SYuan Kang  *
21045e3678SYuan Kang  * ---------------                     ---------------
22045e3678SYuan Kang  * | JobDesc #2  |-------------------->|  ShareDesc  |
23045e3678SYuan Kang  * | *(packet 2) |      |------------->|  (hashKey)  |
24045e3678SYuan Kang  * ---------------      |    |-------->| (operation) |
25045e3678SYuan Kang  *       .              |    |         | (load ctx2) |
26045e3678SYuan Kang  *       .              |    |         ---------------
27045e3678SYuan Kang  * ---------------      |    |
28045e3678SYuan Kang  * | JobDesc #3  |------|    |
29045e3678SYuan Kang  * | *(packet 3) |           |
30045e3678SYuan Kang  * ---------------           |
31045e3678SYuan Kang  *       .                   |
32045e3678SYuan Kang  *       .                   |
33045e3678SYuan Kang  * ---------------           |
34045e3678SYuan Kang  * | JobDesc #4  |------------
35045e3678SYuan Kang  * | *(packet 4) |
36045e3678SYuan Kang  * ---------------
37045e3678SYuan Kang  *
38045e3678SYuan Kang  * The SharedDesc never changes for a connection unless rekeyed, but
39045e3678SYuan Kang  * each packet will likely be in a different place. So all we need
40045e3678SYuan Kang  * to know to process the packet is where the input is, where the
41045e3678SYuan Kang  * output goes, and what context we want to process with. Context is
42045e3678SYuan Kang  * in the SharedDesc, packet references in the JobDesc.
43045e3678SYuan Kang  *
44045e3678SYuan Kang  * So, a job desc looks like:
45045e3678SYuan Kang  *
46045e3678SYuan Kang  * ---------------------
47045e3678SYuan Kang  * | Header            |
48045e3678SYuan Kang  * | ShareDesc Pointer |
49045e3678SYuan Kang  * | SEQ_OUT_PTR       |
50045e3678SYuan Kang  * | (output buffer)   |
51045e3678SYuan Kang  * | (output length)   |
52045e3678SYuan Kang  * | SEQ_IN_PTR        |
53045e3678SYuan Kang  * | (input buffer)    |
54045e3678SYuan Kang  * | (input length)    |
55045e3678SYuan Kang  * ---------------------
56045e3678SYuan Kang  */
57045e3678SYuan Kang 
58045e3678SYuan Kang #include "compat.h"
59045e3678SYuan Kang 
60045e3678SYuan Kang #include "regs.h"
61045e3678SYuan Kang #include "intern.h"
62045e3678SYuan Kang #include "desc_constr.h"
63045e3678SYuan Kang #include "jr.h"
64045e3678SYuan Kang #include "error.h"
65045e3678SYuan Kang #include "sg_sw_sec4.h"
66045e3678SYuan Kang #include "key_gen.h"
670efa7579SHoria Geantă #include "caamhash_desc.h"
68045e3678SYuan Kang 
69045e3678SYuan Kang #define CAAM_CRA_PRIORITY		3000
70045e3678SYuan Kang 
71045e3678SYuan Kang /* max hash key is max split key size */
72045e3678SYuan Kang #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
73045e3678SYuan Kang 
74045e3678SYuan Kang #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
75045e3678SYuan Kang #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
76045e3678SYuan Kang 
77045e3678SYuan Kang #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
78045e3678SYuan Kang 					 CAAM_MAX_HASH_KEY_SIZE)
79045e3678SYuan Kang #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80045e3678SYuan Kang 
81045e3678SYuan Kang /* caam context sizes for hashes: running digest + 8 */
82045e3678SYuan Kang #define HASH_MSG_LEN			8
83045e3678SYuan Kang #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84045e3678SYuan Kang 
85045e3678SYuan Kang #ifdef DEBUG
86045e3678SYuan Kang /* for print_hex_dumps with line references */
87045e3678SYuan Kang #define debug(format, arg...) printk(format, arg)
88045e3678SYuan Kang #else
89045e3678SYuan Kang #define debug(format, arg...)
90045e3678SYuan Kang #endif
91045e3678SYuan Kang 
92cfc6f11bSRuchika Gupta 
93cfc6f11bSRuchika Gupta static struct list_head hash_list;
94cfc6f11bSRuchika Gupta 
95045e3678SYuan Kang /* ahash per-session context */
96045e3678SYuan Kang struct caam_hash_ctx {
97e11793f5SRussell King 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98e11793f5SRussell King 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99e11793f5SRussell King 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100e11793f5SRussell King 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
101e11793f5SRussell King 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102045e3678SYuan Kang 	dma_addr_t sh_desc_update_first_dma;
103045e3678SYuan Kang 	dma_addr_t sh_desc_fin_dma;
104045e3678SYuan Kang 	dma_addr_t sh_desc_digest_dma;
1057e0880b9SHoria Geantă 	enum dma_data_direction dir;
106e11793f5SRussell King 	struct device *jrdev;
107045e3678SYuan Kang 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
108045e3678SYuan Kang 	int ctx_len;
109db57656bSHoria Geantă 	struct alginfo adata;
110045e3678SYuan Kang };
111045e3678SYuan Kang 
112045e3678SYuan Kang /* ahash state */
113045e3678SYuan Kang struct caam_hash_state {
114045e3678SYuan Kang 	dma_addr_t buf_dma;
115045e3678SYuan Kang 	dma_addr_t ctx_dma;
116045e3678SYuan Kang 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
117045e3678SYuan Kang 	int buflen_0;
118045e3678SYuan Kang 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
119045e3678SYuan Kang 	int buflen_1;
120e7472422SVictoria Milhoan 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121045e3678SYuan Kang 	int (*update)(struct ahash_request *req);
122045e3678SYuan Kang 	int (*final)(struct ahash_request *req);
123045e3678SYuan Kang 	int (*finup)(struct ahash_request *req);
124045e3678SYuan Kang 	int current_buf;
125045e3678SYuan Kang };
126045e3678SYuan Kang 
1275ec90831SRussell King struct caam_export_state {
1285ec90831SRussell King 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
1295ec90831SRussell King 	u8 caam_ctx[MAX_CTX_LEN];
1305ec90831SRussell King 	int buflen;
1315ec90831SRussell King 	int (*update)(struct ahash_request *req);
1325ec90831SRussell King 	int (*final)(struct ahash_request *req);
1335ec90831SRussell King 	int (*finup)(struct ahash_request *req);
1345ec90831SRussell King };
1355ec90831SRussell King 
1360355d23dSHoria Geantă static inline void switch_buf(struct caam_hash_state *state)
1370355d23dSHoria Geantă {
1380355d23dSHoria Geantă 	state->current_buf ^= 1;
1390355d23dSHoria Geantă }
1400355d23dSHoria Geantă 
1410355d23dSHoria Geantă static inline u8 *current_buf(struct caam_hash_state *state)
1420355d23dSHoria Geantă {
1430355d23dSHoria Geantă 	return state->current_buf ? state->buf_1 : state->buf_0;
1440355d23dSHoria Geantă }
1450355d23dSHoria Geantă 
1460355d23dSHoria Geantă static inline u8 *alt_buf(struct caam_hash_state *state)
1470355d23dSHoria Geantă {
1480355d23dSHoria Geantă 	return state->current_buf ? state->buf_0 : state->buf_1;
1490355d23dSHoria Geantă }
1500355d23dSHoria Geantă 
1510355d23dSHoria Geantă static inline int *current_buflen(struct caam_hash_state *state)
1520355d23dSHoria Geantă {
1530355d23dSHoria Geantă 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
1540355d23dSHoria Geantă }
1550355d23dSHoria Geantă 
1560355d23dSHoria Geantă static inline int *alt_buflen(struct caam_hash_state *state)
1570355d23dSHoria Geantă {
1580355d23dSHoria Geantă 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
1590355d23dSHoria Geantă }
1600355d23dSHoria Geantă 
161045e3678SYuan Kang /* Common job descriptor seq in/out ptr routines */
162045e3678SYuan Kang 
163045e3678SYuan Kang /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
164ce572085SHoria Geanta static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
165045e3678SYuan Kang 				      struct caam_hash_state *state,
166045e3678SYuan Kang 				      int ctx_len)
167045e3678SYuan Kang {
168045e3678SYuan Kang 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
169045e3678SYuan Kang 					ctx_len, DMA_FROM_DEVICE);
170ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
171ce572085SHoria Geanta 		dev_err(jrdev, "unable to map ctx\n");
17287ec02e7SHoria Geantă 		state->ctx_dma = 0;
173ce572085SHoria Geanta 		return -ENOMEM;
174ce572085SHoria Geanta 	}
175ce572085SHoria Geanta 
176045e3678SYuan Kang 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
177ce572085SHoria Geanta 
178ce572085SHoria Geanta 	return 0;
179045e3678SYuan Kang }
180045e3678SYuan Kang 
181045e3678SYuan Kang /* Map req->result, and append seq_out_ptr command that points to it */
182045e3678SYuan Kang static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
183045e3678SYuan Kang 						u8 *result, int digestsize)
184045e3678SYuan Kang {
185045e3678SYuan Kang 	dma_addr_t dst_dma;
186045e3678SYuan Kang 
187045e3678SYuan Kang 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
188045e3678SYuan Kang 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
189045e3678SYuan Kang 
190045e3678SYuan Kang 	return dst_dma;
191045e3678SYuan Kang }
192045e3678SYuan Kang 
193944c3d4dSHoria Geantă /* Map current buffer in state (if length > 0) and put it in link table */
194944c3d4dSHoria Geantă static inline int buf_map_to_sec4_sg(struct device *jrdev,
195045e3678SYuan Kang 				     struct sec4_sg_entry *sec4_sg,
196944c3d4dSHoria Geantă 				     struct caam_hash_state *state)
197045e3678SYuan Kang {
198944c3d4dSHoria Geantă 	int buflen = *current_buflen(state);
199045e3678SYuan Kang 
200944c3d4dSHoria Geantă 	if (!buflen)
201944c3d4dSHoria Geantă 		return 0;
202045e3678SYuan Kang 
203944c3d4dSHoria Geantă 	state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
204944c3d4dSHoria Geantă 					DMA_TO_DEVICE);
205944c3d4dSHoria Geantă 	if (dma_mapping_error(jrdev, state->buf_dma)) {
206944c3d4dSHoria Geantă 		dev_err(jrdev, "unable to map buf\n");
207944c3d4dSHoria Geantă 		state->buf_dma = 0;
208944c3d4dSHoria Geantă 		return -ENOMEM;
209045e3678SYuan Kang 	}
210045e3678SYuan Kang 
211944c3d4dSHoria Geantă 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
212045e3678SYuan Kang 
213944c3d4dSHoria Geantă 	return 0;
214045e3678SYuan Kang }
215045e3678SYuan Kang 
216045e3678SYuan Kang /* Map state->caam_ctx, and add it to link table */
217dfcd8393SHoria Geantă static inline int ctx_map_to_sec4_sg(struct device *jrdev,
218ce572085SHoria Geanta 				     struct caam_hash_state *state, int ctx_len,
219ce572085SHoria Geanta 				     struct sec4_sg_entry *sec4_sg, u32 flag)
220045e3678SYuan Kang {
221045e3678SYuan Kang 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
222ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
223ce572085SHoria Geanta 		dev_err(jrdev, "unable to map ctx\n");
22487ec02e7SHoria Geantă 		state->ctx_dma = 0;
225ce572085SHoria Geanta 		return -ENOMEM;
226ce572085SHoria Geanta 	}
227ce572085SHoria Geanta 
228045e3678SYuan Kang 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
229ce572085SHoria Geanta 
230ce572085SHoria Geanta 	return 0;
231045e3678SYuan Kang }
232045e3678SYuan Kang 
233045e3678SYuan Kang static int ahash_set_sh_desc(struct crypto_ahash *ahash)
234045e3678SYuan Kang {
235045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
236045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
237045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
2387e0880b9SHoria Geantă 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
239045e3678SYuan Kang 	u32 *desc;
240045e3678SYuan Kang 
2417e0880b9SHoria Geantă 	ctx->adata.key_virt = ctx->key;
2427e0880b9SHoria Geantă 
243045e3678SYuan Kang 	/* ahash_update shared descriptor */
244045e3678SYuan Kang 	desc = ctx->sh_desc_update;
2450efa7579SHoria Geantă 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2460efa7579SHoria Geantă 			  ctx->ctx_len, true, ctrlpriv->era);
247bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
2487e0880b9SHoria Geantă 				   desc_bytes(desc), ctx->dir);
249045e3678SYuan Kang #ifdef DEBUG
250514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
251514df281SAlex Porosanu 		       "ahash update shdesc@"__stringify(__LINE__)": ",
252045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
253045e3678SYuan Kang #endif
254045e3678SYuan Kang 
255045e3678SYuan Kang 	/* ahash_update_first shared descriptor */
256045e3678SYuan Kang 	desc = ctx->sh_desc_update_first;
2570efa7579SHoria Geantă 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
2580efa7579SHoria Geantă 			  ctx->ctx_len, false, ctrlpriv->era);
259bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
2607e0880b9SHoria Geantă 				   desc_bytes(desc), ctx->dir);
261045e3678SYuan Kang #ifdef DEBUG
262514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
263514df281SAlex Porosanu 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
264045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
265045e3678SYuan Kang #endif
266045e3678SYuan Kang 
267045e3678SYuan Kang 	/* ahash_final shared descriptor */
268045e3678SYuan Kang 	desc = ctx->sh_desc_fin;
2690efa7579SHoria Geantă 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
2700efa7579SHoria Geantă 			  ctx->ctx_len, true, ctrlpriv->era);
271bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
2727e0880b9SHoria Geantă 				   desc_bytes(desc), ctx->dir);
273045e3678SYuan Kang #ifdef DEBUG
274514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
275045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
276045e3678SYuan Kang 		       desc_bytes(desc), 1);
277045e3678SYuan Kang #endif
278045e3678SYuan Kang 
279045e3678SYuan Kang 	/* ahash_digest shared descriptor */
280045e3678SYuan Kang 	desc = ctx->sh_desc_digest;
2810efa7579SHoria Geantă 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
2820efa7579SHoria Geantă 			  ctx->ctx_len, false, ctrlpriv->era);
283bbf22344SHoria Geantă 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
2847e0880b9SHoria Geantă 				   desc_bytes(desc), ctx->dir);
285045e3678SYuan Kang #ifdef DEBUG
286514df281SAlex Porosanu 	print_hex_dump(KERN_ERR,
287514df281SAlex Porosanu 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
288045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
289045e3678SYuan Kang 		       desc_bytes(desc), 1);
290045e3678SYuan Kang #endif
291045e3678SYuan Kang 
292045e3678SYuan Kang 	return 0;
293045e3678SYuan Kang }
294045e3678SYuan Kang 
295045e3678SYuan Kang /* Digest hash size if it is too large */
29666b3e887SKim Phillips static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
297045e3678SYuan Kang 			   u32 *keylen, u8 *key_out, u32 digestsize)
298045e3678SYuan Kang {
299045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
300045e3678SYuan Kang 	u32 *desc;
301045e3678SYuan Kang 	struct split_key_result result;
302045e3678SYuan Kang 	dma_addr_t src_dma, dst_dma;
3039e6df0fdSMarkus Elfring 	int ret;
304045e3678SYuan Kang 
3059c23b7d3SVakul Garg 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
3062af8f4a2SKim Phillips 	if (!desc) {
3072af8f4a2SKim Phillips 		dev_err(jrdev, "unable to allocate key input memory\n");
3082af8f4a2SKim Phillips 		return -ENOMEM;
3092af8f4a2SKim Phillips 	}
310045e3678SYuan Kang 
311045e3678SYuan Kang 	init_job_desc(desc, 0);
312045e3678SYuan Kang 
313045e3678SYuan Kang 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
314045e3678SYuan Kang 				 DMA_TO_DEVICE);
315045e3678SYuan Kang 	if (dma_mapping_error(jrdev, src_dma)) {
316045e3678SYuan Kang 		dev_err(jrdev, "unable to map key input memory\n");
317045e3678SYuan Kang 		kfree(desc);
318045e3678SYuan Kang 		return -ENOMEM;
319045e3678SYuan Kang 	}
320045e3678SYuan Kang 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
321045e3678SYuan Kang 				 DMA_FROM_DEVICE);
322045e3678SYuan Kang 	if (dma_mapping_error(jrdev, dst_dma)) {
323045e3678SYuan Kang 		dev_err(jrdev, "unable to map key output memory\n");
324045e3678SYuan Kang 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
325045e3678SYuan Kang 		kfree(desc);
326045e3678SYuan Kang 		return -ENOMEM;
327045e3678SYuan Kang 	}
328045e3678SYuan Kang 
329045e3678SYuan Kang 	/* Job descriptor to perform unkeyed hash on key_in */
330db57656bSHoria Geantă 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
331045e3678SYuan Kang 			 OP_ALG_AS_INITFINAL);
332045e3678SYuan Kang 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
333045e3678SYuan Kang 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
334045e3678SYuan Kang 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
335045e3678SYuan Kang 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
336045e3678SYuan Kang 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
337045e3678SYuan Kang 			 LDST_SRCDST_BYTE_CONTEXT);
338045e3678SYuan Kang 
339045e3678SYuan Kang #ifdef DEBUG
340514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
341045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
342514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
343045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
344045e3678SYuan Kang #endif
345045e3678SYuan Kang 
346045e3678SYuan Kang 	result.err = 0;
347045e3678SYuan Kang 	init_completion(&result.completion);
348045e3678SYuan Kang 
349045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
350045e3678SYuan Kang 	if (!ret) {
351045e3678SYuan Kang 		/* in progress */
3527459e1d2SHoria Geantă 		wait_for_completion(&result.completion);
353045e3678SYuan Kang 		ret = result.err;
354045e3678SYuan Kang #ifdef DEBUG
355514df281SAlex Porosanu 		print_hex_dump(KERN_ERR,
356514df281SAlex Porosanu 			       "digested key@"__stringify(__LINE__)": ",
357045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
358045e3678SYuan Kang 			       digestsize, 1);
359045e3678SYuan Kang #endif
360045e3678SYuan Kang 	}
361045e3678SYuan Kang 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
362045e3678SYuan Kang 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
363045e3678SYuan Kang 
364e11aa9f1SHoria Geanta 	*keylen = digestsize;
365e11aa9f1SHoria Geanta 
366045e3678SYuan Kang 	kfree(desc);
367045e3678SYuan Kang 
368045e3678SYuan Kang 	return ret;
369045e3678SYuan Kang }
370045e3678SYuan Kang 
371045e3678SYuan Kang static int ahash_setkey(struct crypto_ahash *ahash,
372045e3678SYuan Kang 			const u8 *key, unsigned int keylen)
373045e3678SYuan Kang {
374045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
375045e3678SYuan Kang 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
376045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
3777e0880b9SHoria Geantă 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3789e6df0fdSMarkus Elfring 	int ret;
379045e3678SYuan Kang 	u8 *hashed_key = NULL;
380045e3678SYuan Kang 
381045e3678SYuan Kang #ifdef DEBUG
382045e3678SYuan Kang 	printk(KERN_ERR "keylen %d\n", keylen);
383045e3678SYuan Kang #endif
384045e3678SYuan Kang 
385045e3678SYuan Kang 	if (keylen > blocksize) {
386e7a33c4dSMarkus Elfring 		hashed_key = kmalloc_array(digestsize,
387e7a33c4dSMarkus Elfring 					   sizeof(*hashed_key),
388e7a33c4dSMarkus Elfring 					   GFP_KERNEL | GFP_DMA);
389045e3678SYuan Kang 		if (!hashed_key)
390045e3678SYuan Kang 			return -ENOMEM;
391045e3678SYuan Kang 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
392045e3678SYuan Kang 				      digestsize);
393045e3678SYuan Kang 		if (ret)
394d6e7a7d0SMarkus Elfring 			goto bad_free_key;
395045e3678SYuan Kang 		key = hashed_key;
396045e3678SYuan Kang 	}
397045e3678SYuan Kang 
3987e0880b9SHoria Geantă 	/*
3997e0880b9SHoria Geantă 	 * If DKP is supported, use it in the shared descriptor to generate
4007e0880b9SHoria Geantă 	 * the split key.
4017e0880b9SHoria Geantă 	 */
4027e0880b9SHoria Geantă 	if (ctrlpriv->era >= 6) {
4037e0880b9SHoria Geantă 		ctx->adata.key_inline = true;
4047e0880b9SHoria Geantă 		ctx->adata.keylen = keylen;
4057e0880b9SHoria Geantă 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
4067e0880b9SHoria Geantă 						      OP_ALG_ALGSEL_MASK);
4077e0880b9SHoria Geantă 
4087e0880b9SHoria Geantă 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
409d6e7a7d0SMarkus Elfring 			goto bad_free_key;
410045e3678SYuan Kang 
4117e0880b9SHoria Geantă 		memcpy(ctx->key, key, keylen);
4127e0880b9SHoria Geantă 	} else {
4137e0880b9SHoria Geantă 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
4147e0880b9SHoria Geantă 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
4157e0880b9SHoria Geantă 		if (ret)
4167e0880b9SHoria Geantă 			goto bad_free_key;
4177e0880b9SHoria Geantă 	}
418045e3678SYuan Kang 
419045e3678SYuan Kang 	kfree(hashed_key);
420cfb725f6SHoria Geantă 	return ahash_set_sh_desc(ahash);
421d6e7a7d0SMarkus Elfring  bad_free_key:
422045e3678SYuan Kang 	kfree(hashed_key);
423045e3678SYuan Kang 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
424045e3678SYuan Kang 	return -EINVAL;
425045e3678SYuan Kang }
426045e3678SYuan Kang 
427045e3678SYuan Kang /*
428045e3678SYuan Kang  * ahash_edesc - s/w-extended ahash descriptor
429045e3678SYuan Kang  * @dst_dma: physical mapped address of req->result
430045e3678SYuan Kang  * @sec4_sg_dma: physical mapped address of h/w link table
431045e3678SYuan Kang  * @src_nents: number of segments in input scatterlist
432045e3678SYuan Kang  * @sec4_sg_bytes: length of dma mapped sec4_sg space
433045e3678SYuan Kang  * @hw_desc: the h/w job descriptor followed by any referenced link tables
434343e44b1SRussell King  * @sec4_sg: h/w link table
435045e3678SYuan Kang  */
436045e3678SYuan Kang struct ahash_edesc {
437045e3678SYuan Kang 	dma_addr_t dst_dma;
438045e3678SYuan Kang 	dma_addr_t sec4_sg_dma;
439045e3678SYuan Kang 	int src_nents;
440045e3678SYuan Kang 	int sec4_sg_bytes;
441d7b24ed4SRussell King 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
442343e44b1SRussell King 	struct sec4_sg_entry sec4_sg[0];
443045e3678SYuan Kang };
444045e3678SYuan Kang 
445045e3678SYuan Kang static inline void ahash_unmap(struct device *dev,
446045e3678SYuan Kang 			struct ahash_edesc *edesc,
447045e3678SYuan Kang 			struct ahash_request *req, int dst_len)
448045e3678SYuan Kang {
449944c3d4dSHoria Geantă 	struct caam_hash_state *state = ahash_request_ctx(req);
450944c3d4dSHoria Geantă 
451045e3678SYuan Kang 	if (edesc->src_nents)
45213fb8fd7SLABBE Corentin 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
453045e3678SYuan Kang 	if (edesc->dst_dma)
454045e3678SYuan Kang 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
455045e3678SYuan Kang 
456045e3678SYuan Kang 	if (edesc->sec4_sg_bytes)
457045e3678SYuan Kang 		dma_unmap_single(dev, edesc->sec4_sg_dma,
458045e3678SYuan Kang 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
459944c3d4dSHoria Geantă 
460944c3d4dSHoria Geantă 	if (state->buf_dma) {
461944c3d4dSHoria Geantă 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
462944c3d4dSHoria Geantă 				 DMA_TO_DEVICE);
463944c3d4dSHoria Geantă 		state->buf_dma = 0;
464944c3d4dSHoria Geantă 	}
465045e3678SYuan Kang }
466045e3678SYuan Kang 
467045e3678SYuan Kang static inline void ahash_unmap_ctx(struct device *dev,
468045e3678SYuan Kang 			struct ahash_edesc *edesc,
469045e3678SYuan Kang 			struct ahash_request *req, int dst_len, u32 flag)
470045e3678SYuan Kang {
471045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
472045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
473045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
474045e3678SYuan Kang 
47587ec02e7SHoria Geantă 	if (state->ctx_dma) {
476045e3678SYuan Kang 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
47787ec02e7SHoria Geantă 		state->ctx_dma = 0;
47887ec02e7SHoria Geantă 	}
479045e3678SYuan Kang 	ahash_unmap(dev, edesc, req, dst_len);
480045e3678SYuan Kang }
481045e3678SYuan Kang 
482045e3678SYuan Kang static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
483045e3678SYuan Kang 		       void *context)
484045e3678SYuan Kang {
485045e3678SYuan Kang 	struct ahash_request *req = context;
486045e3678SYuan Kang 	struct ahash_edesc *edesc;
487045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
488045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
489045e3678SYuan Kang #ifdef DEBUG
490045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
491045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
492045e3678SYuan Kang 
493045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
494045e3678SYuan Kang #endif
495045e3678SYuan Kang 
4964ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
497fa9659cdSMarek Vasut 	if (err)
498fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
499045e3678SYuan Kang 
500045e3678SYuan Kang 	ahash_unmap(jrdev, edesc, req, digestsize);
501045e3678SYuan Kang 	kfree(edesc);
502045e3678SYuan Kang 
503045e3678SYuan Kang #ifdef DEBUG
504514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
505045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
506045e3678SYuan Kang 		       ctx->ctx_len, 1);
507045e3678SYuan Kang 	if (req->result)
508514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
509045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
510045e3678SYuan Kang 			       digestsize, 1);
511045e3678SYuan Kang #endif
512045e3678SYuan Kang 
513045e3678SYuan Kang 	req->base.complete(&req->base, err);
514045e3678SYuan Kang }
515045e3678SYuan Kang 
516045e3678SYuan Kang static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
517045e3678SYuan Kang 			    void *context)
518045e3678SYuan Kang {
519045e3678SYuan Kang 	struct ahash_request *req = context;
520045e3678SYuan Kang 	struct ahash_edesc *edesc;
521045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
522045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
523045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
524944c3d4dSHoria Geantă #ifdef DEBUG
525045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
526045e3678SYuan Kang 
527045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
528045e3678SYuan Kang #endif
529045e3678SYuan Kang 
5304ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
531fa9659cdSMarek Vasut 	if (err)
532fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
533045e3678SYuan Kang 
534045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
535944c3d4dSHoria Geantă 	switch_buf(state);
536045e3678SYuan Kang 	kfree(edesc);
537045e3678SYuan Kang 
538045e3678SYuan Kang #ifdef DEBUG
539514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
540045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
541045e3678SYuan Kang 		       ctx->ctx_len, 1);
542045e3678SYuan Kang 	if (req->result)
543514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
544045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
545045e3678SYuan Kang 			       digestsize, 1);
546045e3678SYuan Kang #endif
547045e3678SYuan Kang 
548045e3678SYuan Kang 	req->base.complete(&req->base, err);
549045e3678SYuan Kang }
550045e3678SYuan Kang 
551045e3678SYuan Kang static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
552045e3678SYuan Kang 			       void *context)
553045e3678SYuan Kang {
554045e3678SYuan Kang 	struct ahash_request *req = context;
555045e3678SYuan Kang 	struct ahash_edesc *edesc;
556045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
557045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
558045e3678SYuan Kang #ifdef DEBUG
559045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
560045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
561045e3678SYuan Kang 
562045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
563045e3678SYuan Kang #endif
564045e3678SYuan Kang 
5654ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
566fa9659cdSMarek Vasut 	if (err)
567fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
568045e3678SYuan Kang 
569bc9e05f9SHoria Geanta 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
570045e3678SYuan Kang 	kfree(edesc);
571045e3678SYuan Kang 
572045e3678SYuan Kang #ifdef DEBUG
573514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
574045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
575045e3678SYuan Kang 		       ctx->ctx_len, 1);
576045e3678SYuan Kang 	if (req->result)
577514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
578045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
579045e3678SYuan Kang 			       digestsize, 1);
580045e3678SYuan Kang #endif
581045e3678SYuan Kang 
582045e3678SYuan Kang 	req->base.complete(&req->base, err);
583045e3678SYuan Kang }
584045e3678SYuan Kang 
585045e3678SYuan Kang static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
586045e3678SYuan Kang 			       void *context)
587045e3678SYuan Kang {
588045e3678SYuan Kang 	struct ahash_request *req = context;
589045e3678SYuan Kang 	struct ahash_edesc *edesc;
590045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
591045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
592045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
593944c3d4dSHoria Geantă #ifdef DEBUG
594045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
595045e3678SYuan Kang 
596045e3678SYuan Kang 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
597045e3678SYuan Kang #endif
598045e3678SYuan Kang 
5994ca7c7d8SHoria Geantă 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
600fa9659cdSMarek Vasut 	if (err)
601fa9659cdSMarek Vasut 		caam_jr_strstatus(jrdev, err);
602045e3678SYuan Kang 
603ef62b231SHoria Geanta 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
604944c3d4dSHoria Geantă 	switch_buf(state);
605045e3678SYuan Kang 	kfree(edesc);
606045e3678SYuan Kang 
607045e3678SYuan Kang #ifdef DEBUG
608514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
609045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
610045e3678SYuan Kang 		       ctx->ctx_len, 1);
611045e3678SYuan Kang 	if (req->result)
612514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
613045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
614045e3678SYuan Kang 			       digestsize, 1);
615045e3678SYuan Kang #endif
616045e3678SYuan Kang 
617045e3678SYuan Kang 	req->base.complete(&req->base, err);
618045e3678SYuan Kang }
619045e3678SYuan Kang 
6205588d039SRussell King /*
6215588d039SRussell King  * Allocate an enhanced descriptor, which contains the hardware descriptor
6225588d039SRussell King  * and space for hardware scatter table containing sg_num entries.
6235588d039SRussell King  */
6245588d039SRussell King static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
62530a43b44SRussell King 					     int sg_num, u32 *sh_desc,
62630a43b44SRussell King 					     dma_addr_t sh_desc_dma,
62730a43b44SRussell King 					     gfp_t flags)
6285588d039SRussell King {
6295588d039SRussell King 	struct ahash_edesc *edesc;
6305588d039SRussell King 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
6315588d039SRussell King 
6325588d039SRussell King 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
6335588d039SRussell King 	if (!edesc) {
6345588d039SRussell King 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
6355588d039SRussell King 		return NULL;
6365588d039SRussell King 	}
6375588d039SRussell King 
63830a43b44SRussell King 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
63930a43b44SRussell King 			     HDR_SHARE_DEFER | HDR_REVERSE);
64030a43b44SRussell King 
6415588d039SRussell King 	return edesc;
6425588d039SRussell King }
6435588d039SRussell King 
64465cf164aSRussell King static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
64565cf164aSRussell King 			       struct ahash_edesc *edesc,
64665cf164aSRussell King 			       struct ahash_request *req, int nents,
64765cf164aSRussell King 			       unsigned int first_sg,
64865cf164aSRussell King 			       unsigned int first_bytes, size_t to_hash)
64965cf164aSRussell King {
65065cf164aSRussell King 	dma_addr_t src_dma;
65165cf164aSRussell King 	u32 options;
65265cf164aSRussell King 
65365cf164aSRussell King 	if (nents > 1 || first_sg) {
65465cf164aSRussell King 		struct sec4_sg_entry *sg = edesc->sec4_sg;
65565cf164aSRussell King 		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
65665cf164aSRussell King 
65765cf164aSRussell King 		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
65865cf164aSRussell King 
65965cf164aSRussell King 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
66065cf164aSRussell King 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
66165cf164aSRussell King 			dev_err(ctx->jrdev, "unable to map S/G table\n");
66265cf164aSRussell King 			return -ENOMEM;
66365cf164aSRussell King 		}
66465cf164aSRussell King 
66565cf164aSRussell King 		edesc->sec4_sg_bytes = sgsize;
66665cf164aSRussell King 		edesc->sec4_sg_dma = src_dma;
66765cf164aSRussell King 		options = LDST_SGF;
66865cf164aSRussell King 	} else {
66965cf164aSRussell King 		src_dma = sg_dma_address(req->src);
67065cf164aSRussell King 		options = 0;
67165cf164aSRussell King 	}
67265cf164aSRussell King 
67365cf164aSRussell King 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
67465cf164aSRussell King 			  options);
67565cf164aSRussell King 
67665cf164aSRussell King 	return 0;
67765cf164aSRussell King }
67865cf164aSRussell King 
679045e3678SYuan Kang /* submit update job descriptor */
680045e3678SYuan Kang static int ahash_update_ctx(struct ahash_request *req)
681045e3678SYuan Kang {
682045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
683045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
684045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
685045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
686019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
687019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
6880355d23dSHoria Geantă 	u8 *buf = current_buf(state);
6890355d23dSHoria Geantă 	int *buflen = current_buflen(state);
6900355d23dSHoria Geantă 	u8 *next_buf = alt_buf(state);
6910355d23dSHoria Geantă 	int *next_buflen = alt_buflen(state), last_buflen;
692045e3678SYuan Kang 	int in_len = *buflen + req->nbytes, to_hash;
69330a43b44SRussell King 	u32 *desc;
694bc13c69eSRussell King 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
695045e3678SYuan Kang 	struct ahash_edesc *edesc;
696045e3678SYuan Kang 	int ret = 0;
697045e3678SYuan Kang 
698045e3678SYuan Kang 	last_buflen = *next_buflen;
699045e3678SYuan Kang 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
700045e3678SYuan Kang 	to_hash = in_len - *next_buflen;
701045e3678SYuan Kang 
702045e3678SYuan Kang 	if (to_hash) {
70313fb8fd7SLABBE Corentin 		src_nents = sg_nents_for_len(req->src,
70413fb8fd7SLABBE Corentin 					     req->nbytes - (*next_buflen));
705f9970c28SLABBE Corentin 		if (src_nents < 0) {
706f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
707f9970c28SLABBE Corentin 			return src_nents;
708f9970c28SLABBE Corentin 		}
709bc13c69eSRussell King 
710bc13c69eSRussell King 		if (src_nents) {
711bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
712bc13c69eSRussell King 						  DMA_TO_DEVICE);
713bc13c69eSRussell King 			if (!mapped_nents) {
714bc13c69eSRussell King 				dev_err(jrdev, "unable to DMA map source\n");
715bc13c69eSRussell King 				return -ENOMEM;
716bc13c69eSRussell King 			}
717bc13c69eSRussell King 		} else {
718bc13c69eSRussell King 			mapped_nents = 0;
719bc13c69eSRussell King 		}
720bc13c69eSRussell King 
721045e3678SYuan Kang 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
722bc13c69eSRussell King 		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
723045e3678SYuan Kang 				 sizeof(struct sec4_sg_entry);
724045e3678SYuan Kang 
725045e3678SYuan Kang 		/*
726045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
727045e3678SYuan Kang 		 * link tables
728045e3678SYuan Kang 		 */
7295588d039SRussell King 		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
73030a43b44SRussell King 					  ctx->sh_desc_update,
73130a43b44SRussell King 					  ctx->sh_desc_update_dma, flags);
732045e3678SYuan Kang 		if (!edesc) {
733bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
734045e3678SYuan Kang 			return -ENOMEM;
735045e3678SYuan Kang 		}
736045e3678SYuan Kang 
737045e3678SYuan Kang 		edesc->src_nents = src_nents;
738045e3678SYuan Kang 		edesc->sec4_sg_bytes = sec4_sg_bytes;
739045e3678SYuan Kang 
740dfcd8393SHoria Geantă 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
741045e3678SYuan Kang 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
742ce572085SHoria Geanta 		if (ret)
74358b0e5d0SMarkus Elfring 			goto unmap_ctx;
744045e3678SYuan Kang 
745944c3d4dSHoria Geantă 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
746944c3d4dSHoria Geantă 		if (ret)
747944c3d4dSHoria Geantă 			goto unmap_ctx;
748045e3678SYuan Kang 
749bc13c69eSRussell King 		if (mapped_nents) {
750bc13c69eSRussell King 			sg_to_sec4_sg_last(req->src, mapped_nents,
751bc13c69eSRussell King 					   edesc->sec4_sg + sec4_sg_src_index,
752bc13c69eSRussell King 					   0);
7538af7b0f8SVictoria Milhoan 			if (*next_buflen)
754307fd543SCristian Stoica 				scatterwalk_map_and_copy(next_buf, req->src,
755307fd543SCristian Stoica 							 to_hash - *buflen,
756307fd543SCristian Stoica 							 *next_buflen, 0);
757045e3678SYuan Kang 		} else {
758297b9cebSHoria Geantă 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
759297b9cebSHoria Geantă 					    1);
760045e3678SYuan Kang 		}
761045e3678SYuan Kang 
762045e3678SYuan Kang 		desc = edesc->hw_desc;
763045e3678SYuan Kang 
7641da2be33SRuchika Gupta 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
7651da2be33SRuchika Gupta 						     sec4_sg_bytes,
7661da2be33SRuchika Gupta 						     DMA_TO_DEVICE);
767ce572085SHoria Geanta 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
768ce572085SHoria Geanta 			dev_err(jrdev, "unable to map S/G table\n");
76932686d34SRussell King 			ret = -ENOMEM;
77058b0e5d0SMarkus Elfring 			goto unmap_ctx;
771ce572085SHoria Geanta 		}
7721da2be33SRuchika Gupta 
773045e3678SYuan Kang 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
774045e3678SYuan Kang 				       to_hash, LDST_SGF);
775045e3678SYuan Kang 
776045e3678SYuan Kang 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
777045e3678SYuan Kang 
778045e3678SYuan Kang #ifdef DEBUG
779514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
780045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
781045e3678SYuan Kang 			       desc_bytes(desc), 1);
782045e3678SYuan Kang #endif
783045e3678SYuan Kang 
784045e3678SYuan Kang 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
78532686d34SRussell King 		if (ret)
78658b0e5d0SMarkus Elfring 			goto unmap_ctx;
78732686d34SRussell King 
788045e3678SYuan Kang 		ret = -EINPROGRESS;
789045e3678SYuan Kang 	} else if (*next_buflen) {
790307fd543SCristian Stoica 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
791307fd543SCristian Stoica 					 req->nbytes, 0);
792045e3678SYuan Kang 		*buflen = *next_buflen;
793045e3678SYuan Kang 		*next_buflen = last_buflen;
794045e3678SYuan Kang 	}
795045e3678SYuan Kang #ifdef DEBUG
796514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
797045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
798514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
799045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
800045e3678SYuan Kang 		       *next_buflen, 1);
801045e3678SYuan Kang #endif
802045e3678SYuan Kang 
803045e3678SYuan Kang 	return ret;
80458b0e5d0SMarkus Elfring  unmap_ctx:
80532686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
80632686d34SRussell King 	kfree(edesc);
80732686d34SRussell King 	return ret;
808045e3678SYuan Kang }
809045e3678SYuan Kang 
810045e3678SYuan Kang static int ahash_final_ctx(struct ahash_request *req)
811045e3678SYuan Kang {
812045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
813045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
814045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
815045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
816019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
817019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
8180355d23dSHoria Geantă 	int buflen = *current_buflen(state);
81930a43b44SRussell King 	u32 *desc;
820b310c178SHoria Geant? 	int sec4_sg_bytes, sec4_sg_src_index;
821045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
822045e3678SYuan Kang 	struct ahash_edesc *edesc;
8239e6df0fdSMarkus Elfring 	int ret;
824045e3678SYuan Kang 
825b310c178SHoria Geant? 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
826b310c178SHoria Geant? 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
827045e3678SYuan Kang 
828045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
82930a43b44SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
83030a43b44SRussell King 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
83130a43b44SRussell King 				  flags);
8325588d039SRussell King 	if (!edesc)
833045e3678SYuan Kang 		return -ENOMEM;
834045e3678SYuan Kang 
835045e3678SYuan Kang 	desc = edesc->hw_desc;
836045e3678SYuan Kang 
837045e3678SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
838045e3678SYuan Kang 
839dfcd8393SHoria Geantă 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
840ce572085SHoria Geanta 				 edesc->sec4_sg, DMA_TO_DEVICE);
841ce572085SHoria Geanta 	if (ret)
84258b0e5d0SMarkus Elfring 		goto unmap_ctx;
843045e3678SYuan Kang 
844944c3d4dSHoria Geantă 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
845944c3d4dSHoria Geantă 	if (ret)
846944c3d4dSHoria Geantă 		goto unmap_ctx;
847944c3d4dSHoria Geantă 
848297b9cebSHoria Geantă 	sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
849045e3678SYuan Kang 
8501da2be33SRuchika Gupta 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
8511da2be33SRuchika Gupta 					    sec4_sg_bytes, DMA_TO_DEVICE);
852ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
853ce572085SHoria Geanta 		dev_err(jrdev, "unable to map S/G table\n");
85432686d34SRussell King 		ret = -ENOMEM;
85558b0e5d0SMarkus Elfring 		goto unmap_ctx;
856ce572085SHoria Geanta 	}
8571da2be33SRuchika Gupta 
858045e3678SYuan Kang 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
859045e3678SYuan Kang 			  LDST_SGF);
860045e3678SYuan Kang 
861045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
862045e3678SYuan Kang 						digestsize);
863ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
864ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
86532686d34SRussell King 		ret = -ENOMEM;
86658b0e5d0SMarkus Elfring 		goto unmap_ctx;
867ce572085SHoria Geanta 	}
868045e3678SYuan Kang 
869045e3678SYuan Kang #ifdef DEBUG
870514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
871045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
872045e3678SYuan Kang #endif
873045e3678SYuan Kang 
874045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
87532686d34SRussell King 	if (ret)
87658b0e5d0SMarkus Elfring 		goto unmap_ctx;
87732686d34SRussell King 
87832686d34SRussell King 	return -EINPROGRESS;
87958b0e5d0SMarkus Elfring  unmap_ctx:
880045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
881045e3678SYuan Kang 	kfree(edesc);
882045e3678SYuan Kang 	return ret;
883045e3678SYuan Kang }
884045e3678SYuan Kang 
885045e3678SYuan Kang static int ahash_finup_ctx(struct ahash_request *req)
886045e3678SYuan Kang {
887045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
888045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
889045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
890045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
891019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
892019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
8930355d23dSHoria Geantă 	int buflen = *current_buflen(state);
89430a43b44SRussell King 	u32 *desc;
89565cf164aSRussell King 	int sec4_sg_src_index;
896bc13c69eSRussell King 	int src_nents, mapped_nents;
897045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
898045e3678SYuan Kang 	struct ahash_edesc *edesc;
8999e6df0fdSMarkus Elfring 	int ret;
900045e3678SYuan Kang 
90113fb8fd7SLABBE Corentin 	src_nents = sg_nents_for_len(req->src, req->nbytes);
902f9970c28SLABBE Corentin 	if (src_nents < 0) {
903f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
904f9970c28SLABBE Corentin 		return src_nents;
905f9970c28SLABBE Corentin 	}
906bc13c69eSRussell King 
907bc13c69eSRussell King 	if (src_nents) {
908bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
909bc13c69eSRussell King 					  DMA_TO_DEVICE);
910bc13c69eSRussell King 		if (!mapped_nents) {
911bc13c69eSRussell King 			dev_err(jrdev, "unable to DMA map source\n");
912bc13c69eSRussell King 			return -ENOMEM;
913bc13c69eSRussell King 		}
914bc13c69eSRussell King 	} else {
915bc13c69eSRussell King 		mapped_nents = 0;
916bc13c69eSRussell King 	}
917bc13c69eSRussell King 
918045e3678SYuan Kang 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919045e3678SYuan Kang 
920045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
9215588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
9229a1a1c08SHoria Geantă 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
9235588d039SRussell King 				  flags);
924045e3678SYuan Kang 	if (!edesc) {
925bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
926045e3678SYuan Kang 		return -ENOMEM;
927045e3678SYuan Kang 	}
928045e3678SYuan Kang 
929045e3678SYuan Kang 	desc = edesc->hw_desc;
930045e3678SYuan Kang 
931045e3678SYuan Kang 	edesc->src_nents = src_nents;
932045e3678SYuan Kang 
933dfcd8393SHoria Geantă 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
934ce572085SHoria Geanta 				 edesc->sec4_sg, DMA_TO_DEVICE);
935ce572085SHoria Geanta 	if (ret)
93658b0e5d0SMarkus Elfring 		goto unmap_ctx;
937045e3678SYuan Kang 
938944c3d4dSHoria Geantă 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
939944c3d4dSHoria Geantă 	if (ret)
940944c3d4dSHoria Geantă 		goto unmap_ctx;
941045e3678SYuan Kang 
94265cf164aSRussell King 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
94365cf164aSRussell King 				  sec4_sg_src_index, ctx->ctx_len + buflen,
94465cf164aSRussell King 				  req->nbytes);
94565cf164aSRussell King 	if (ret)
94658b0e5d0SMarkus Elfring 		goto unmap_ctx;
947045e3678SYuan Kang 
948045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
949045e3678SYuan Kang 						digestsize);
950ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
951ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
95232686d34SRussell King 		ret = -ENOMEM;
95358b0e5d0SMarkus Elfring 		goto unmap_ctx;
954ce572085SHoria Geanta 	}
955045e3678SYuan Kang 
956045e3678SYuan Kang #ifdef DEBUG
957514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
958045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
959045e3678SYuan Kang #endif
960045e3678SYuan Kang 
961045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
96232686d34SRussell King 	if (ret)
96358b0e5d0SMarkus Elfring 		goto unmap_ctx;
96432686d34SRussell King 
96532686d34SRussell King 	return -EINPROGRESS;
96658b0e5d0SMarkus Elfring  unmap_ctx:
967045e3678SYuan Kang 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
968045e3678SYuan Kang 	kfree(edesc);
969045e3678SYuan Kang 	return ret;
970045e3678SYuan Kang }
971045e3678SYuan Kang 
972045e3678SYuan Kang static int ahash_digest(struct ahash_request *req)
973045e3678SYuan Kang {
974045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
975045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
976944c3d4dSHoria Geantă 	struct caam_hash_state *state = ahash_request_ctx(req);
977045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
978019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
979019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
98030a43b44SRussell King 	u32 *desc;
981045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
98265cf164aSRussell King 	int src_nents, mapped_nents;
983045e3678SYuan Kang 	struct ahash_edesc *edesc;
9849e6df0fdSMarkus Elfring 	int ret;
985045e3678SYuan Kang 
986944c3d4dSHoria Geantă 	state->buf_dma = 0;
987944c3d4dSHoria Geantă 
9883d5a2db6SRussell King 	src_nents = sg_nents_for_len(req->src, req->nbytes);
989f9970c28SLABBE Corentin 	if (src_nents < 0) {
990f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
991f9970c28SLABBE Corentin 		return src_nents;
992f9970c28SLABBE Corentin 	}
993bc13c69eSRussell King 
994bc13c69eSRussell King 	if (src_nents) {
995bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
996bc13c69eSRussell King 					  DMA_TO_DEVICE);
997bc13c69eSRussell King 		if (!mapped_nents) {
998bc13c69eSRussell King 			dev_err(jrdev, "unable to map source for DMA\n");
999bc13c69eSRussell King 			return -ENOMEM;
1000bc13c69eSRussell King 		}
1001bc13c69eSRussell King 	} else {
1002bc13c69eSRussell King 		mapped_nents = 0;
1003bc13c69eSRussell King 	}
1004bc13c69eSRussell King 
1005045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
10065588d039SRussell King 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
100730a43b44SRussell King 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
10085588d039SRussell King 				  flags);
1009045e3678SYuan Kang 	if (!edesc) {
1010bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1011045e3678SYuan Kang 		return -ENOMEM;
1012045e3678SYuan Kang 	}
1013343e44b1SRussell King 
1014045e3678SYuan Kang 	edesc->src_nents = src_nents;
1015045e3678SYuan Kang 
101665cf164aSRussell King 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
101765cf164aSRussell King 				  req->nbytes);
101865cf164aSRussell King 	if (ret) {
101932686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
102032686d34SRussell King 		kfree(edesc);
102165cf164aSRussell King 		return ret;
1022ce572085SHoria Geanta 	}
102365cf164aSRussell King 
102465cf164aSRussell King 	desc = edesc->hw_desc;
1025045e3678SYuan Kang 
1026045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1027045e3678SYuan Kang 						digestsize);
1028ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1029ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
103032686d34SRussell King 		ahash_unmap(jrdev, edesc, req, digestsize);
103132686d34SRussell King 		kfree(edesc);
1032ce572085SHoria Geanta 		return -ENOMEM;
1033ce572085SHoria Geanta 	}
1034045e3678SYuan Kang 
1035045e3678SYuan Kang #ifdef DEBUG
1036514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1037045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1038045e3678SYuan Kang #endif
1039045e3678SYuan Kang 
1040045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1041045e3678SYuan Kang 	if (!ret) {
1042045e3678SYuan Kang 		ret = -EINPROGRESS;
1043045e3678SYuan Kang 	} else {
1044045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1045045e3678SYuan Kang 		kfree(edesc);
1046045e3678SYuan Kang 	}
1047045e3678SYuan Kang 
1048045e3678SYuan Kang 	return ret;
1049045e3678SYuan Kang }
1050045e3678SYuan Kang 
1051045e3678SYuan Kang /* submit ahash final if it the first job descriptor */
1052045e3678SYuan Kang static int ahash_final_no_ctx(struct ahash_request *req)
1053045e3678SYuan Kang {
1054045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1055045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1056045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1057045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1058019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1059019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
10600355d23dSHoria Geantă 	u8 *buf = current_buf(state);
10610355d23dSHoria Geantă 	int buflen = *current_buflen(state);
106230a43b44SRussell King 	u32 *desc;
1063045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1064045e3678SYuan Kang 	struct ahash_edesc *edesc;
10659e6df0fdSMarkus Elfring 	int ret;
1066045e3678SYuan Kang 
1067045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
106830a43b44SRussell King 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
106930a43b44SRussell King 				  ctx->sh_desc_digest_dma, flags);
10705588d039SRussell King 	if (!edesc)
1071045e3678SYuan Kang 		return -ENOMEM;
1072045e3678SYuan Kang 
1073045e3678SYuan Kang 	desc = edesc->hw_desc;
1074045e3678SYuan Kang 
1075045e3678SYuan Kang 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1076ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1077ce572085SHoria Geanta 		dev_err(jrdev, "unable to map src\n");
107806435f34SMarkus Elfring 		goto unmap;
1079ce572085SHoria Geanta 	}
1080045e3678SYuan Kang 
1081045e3678SYuan Kang 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1082045e3678SYuan Kang 
1083045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1084045e3678SYuan Kang 						digestsize);
1085ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1086ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
108706435f34SMarkus Elfring 		goto unmap;
1088ce572085SHoria Geanta 	}
1089045e3678SYuan Kang 
1090045e3678SYuan Kang #ifdef DEBUG
1091514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1092045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1093045e3678SYuan Kang #endif
1094045e3678SYuan Kang 
1095045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1096045e3678SYuan Kang 	if (!ret) {
1097045e3678SYuan Kang 		ret = -EINPROGRESS;
1098045e3678SYuan Kang 	} else {
1099045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1100045e3678SYuan Kang 		kfree(edesc);
1101045e3678SYuan Kang 	}
1102045e3678SYuan Kang 
1103045e3678SYuan Kang 	return ret;
110406435f34SMarkus Elfring  unmap:
110506435f34SMarkus Elfring 	ahash_unmap(jrdev, edesc, req, digestsize);
110606435f34SMarkus Elfring 	kfree(edesc);
110706435f34SMarkus Elfring 	return -ENOMEM;
110806435f34SMarkus Elfring 
1109045e3678SYuan Kang }
1110045e3678SYuan Kang 
1111045e3678SYuan Kang /* submit ahash update if it the first job descriptor after update */
1112045e3678SYuan Kang static int ahash_update_no_ctx(struct ahash_request *req)
1113045e3678SYuan Kang {
1114045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1115045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1116045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1117045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1118019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1119019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
11200355d23dSHoria Geantă 	u8 *buf = current_buf(state);
11210355d23dSHoria Geantă 	int *buflen = current_buflen(state);
11220355d23dSHoria Geantă 	u8 *next_buf = alt_buf(state);
11230355d23dSHoria Geantă 	int *next_buflen = alt_buflen(state);
1124045e3678SYuan Kang 	int in_len = *buflen + req->nbytes, to_hash;
1125bc13c69eSRussell King 	int sec4_sg_bytes, src_nents, mapped_nents;
1126045e3678SYuan Kang 	struct ahash_edesc *edesc;
112730a43b44SRussell King 	u32 *desc;
1128045e3678SYuan Kang 	int ret = 0;
1129045e3678SYuan Kang 
1130045e3678SYuan Kang 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1131045e3678SYuan Kang 	to_hash = in_len - *next_buflen;
1132045e3678SYuan Kang 
1133045e3678SYuan Kang 	if (to_hash) {
113413fb8fd7SLABBE Corentin 		src_nents = sg_nents_for_len(req->src,
11353d5a2db6SRussell King 					     req->nbytes - *next_buflen);
1136f9970c28SLABBE Corentin 		if (src_nents < 0) {
1137f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
1138f9970c28SLABBE Corentin 			return src_nents;
1139f9970c28SLABBE Corentin 		}
1140bc13c69eSRussell King 
1141bc13c69eSRussell King 		if (src_nents) {
1142bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1143bc13c69eSRussell King 						  DMA_TO_DEVICE);
1144bc13c69eSRussell King 			if (!mapped_nents) {
1145bc13c69eSRussell King 				dev_err(jrdev, "unable to DMA map source\n");
1146bc13c69eSRussell King 				return -ENOMEM;
1147bc13c69eSRussell King 			}
1148bc13c69eSRussell King 		} else {
1149bc13c69eSRussell King 			mapped_nents = 0;
1150bc13c69eSRussell King 		}
1151bc13c69eSRussell King 
1152bc13c69eSRussell King 		sec4_sg_bytes = (1 + mapped_nents) *
1153045e3678SYuan Kang 				sizeof(struct sec4_sg_entry);
1154045e3678SYuan Kang 
1155045e3678SYuan Kang 		/*
1156045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
1157045e3678SYuan Kang 		 * link tables
1158045e3678SYuan Kang 		 */
115930a43b44SRussell King 		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
116030a43b44SRussell King 					  ctx->sh_desc_update_first,
116130a43b44SRussell King 					  ctx->sh_desc_update_first_dma,
116230a43b44SRussell King 					  flags);
1163045e3678SYuan Kang 		if (!edesc) {
1164bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1165045e3678SYuan Kang 			return -ENOMEM;
1166045e3678SYuan Kang 		}
1167045e3678SYuan Kang 
1168045e3678SYuan Kang 		edesc->src_nents = src_nents;
1169045e3678SYuan Kang 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1170045e3678SYuan Kang 
1171944c3d4dSHoria Geantă 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1172944c3d4dSHoria Geantă 		if (ret)
1173944c3d4dSHoria Geantă 			goto unmap_ctx;
1174944c3d4dSHoria Geantă 
1175bc13c69eSRussell King 		sg_to_sec4_sg_last(req->src, mapped_nents,
1176bc13c69eSRussell King 				   edesc->sec4_sg + 1, 0);
1177bc13c69eSRussell King 
1178045e3678SYuan Kang 		if (*next_buflen) {
1179307fd543SCristian Stoica 			scatterwalk_map_and_copy(next_buf, req->src,
1180307fd543SCristian Stoica 						 to_hash - *buflen,
1181307fd543SCristian Stoica 						 *next_buflen, 0);
1182045e3678SYuan Kang 		}
1183045e3678SYuan Kang 
1184045e3678SYuan Kang 		desc = edesc->hw_desc;
1185045e3678SYuan Kang 
11861da2be33SRuchika Gupta 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
11871da2be33SRuchika Gupta 						    sec4_sg_bytes,
11881da2be33SRuchika Gupta 						    DMA_TO_DEVICE);
1189ce572085SHoria Geanta 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1190ce572085SHoria Geanta 			dev_err(jrdev, "unable to map S/G table\n");
119132686d34SRussell King 			ret = -ENOMEM;
119258b0e5d0SMarkus Elfring 			goto unmap_ctx;
1193ce572085SHoria Geanta 		}
11941da2be33SRuchika Gupta 
1195045e3678SYuan Kang 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1196045e3678SYuan Kang 
1197ce572085SHoria Geanta 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1198ce572085SHoria Geanta 		if (ret)
119958b0e5d0SMarkus Elfring 			goto unmap_ctx;
1200045e3678SYuan Kang 
1201045e3678SYuan Kang #ifdef DEBUG
1202514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1203045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1204045e3678SYuan Kang 			       desc_bytes(desc), 1);
1205045e3678SYuan Kang #endif
1206045e3678SYuan Kang 
1207045e3678SYuan Kang 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
120832686d34SRussell King 		if (ret)
120958b0e5d0SMarkus Elfring 			goto unmap_ctx;
121032686d34SRussell King 
1211045e3678SYuan Kang 		ret = -EINPROGRESS;
1212045e3678SYuan Kang 		state->update = ahash_update_ctx;
1213045e3678SYuan Kang 		state->finup = ahash_finup_ctx;
1214045e3678SYuan Kang 		state->final = ahash_final_ctx;
1215045e3678SYuan Kang 	} else if (*next_buflen) {
1216307fd543SCristian Stoica 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1217307fd543SCristian Stoica 					 req->nbytes, 0);
1218045e3678SYuan Kang 		*buflen = *next_buflen;
1219045e3678SYuan Kang 		*next_buflen = 0;
1220045e3678SYuan Kang 	}
1221045e3678SYuan Kang #ifdef DEBUG
1222514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1223045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1224514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1225045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1226045e3678SYuan Kang 		       *next_buflen, 1);
1227045e3678SYuan Kang #endif
1228045e3678SYuan Kang 
1229045e3678SYuan Kang 	return ret;
123058b0e5d0SMarkus Elfring  unmap_ctx:
123132686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
123232686d34SRussell King 	kfree(edesc);
123332686d34SRussell King 	return ret;
1234045e3678SYuan Kang }
1235045e3678SYuan Kang 
1236045e3678SYuan Kang /* submit ahash finup if it the first job descriptor after update */
1237045e3678SYuan Kang static int ahash_finup_no_ctx(struct ahash_request *req)
1238045e3678SYuan Kang {
1239045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1240045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1241045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1242045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1243019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1244019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
12450355d23dSHoria Geantă 	int buflen = *current_buflen(state);
124630a43b44SRussell King 	u32 *desc;
1247bc13c69eSRussell King 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1248045e3678SYuan Kang 	int digestsize = crypto_ahash_digestsize(ahash);
1249045e3678SYuan Kang 	struct ahash_edesc *edesc;
12509e6df0fdSMarkus Elfring 	int ret;
1251045e3678SYuan Kang 
125213fb8fd7SLABBE Corentin 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1253f9970c28SLABBE Corentin 	if (src_nents < 0) {
1254f9970c28SLABBE Corentin 		dev_err(jrdev, "Invalid number of src SG.\n");
1255f9970c28SLABBE Corentin 		return src_nents;
1256f9970c28SLABBE Corentin 	}
1257bc13c69eSRussell King 
1258bc13c69eSRussell King 	if (src_nents) {
1259bc13c69eSRussell King 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1260bc13c69eSRussell King 					  DMA_TO_DEVICE);
1261bc13c69eSRussell King 		if (!mapped_nents) {
1262bc13c69eSRussell King 			dev_err(jrdev, "unable to DMA map source\n");
1263bc13c69eSRussell King 			return -ENOMEM;
1264bc13c69eSRussell King 		}
1265bc13c69eSRussell King 	} else {
1266bc13c69eSRussell King 		mapped_nents = 0;
1267bc13c69eSRussell King 	}
1268bc13c69eSRussell King 
1269045e3678SYuan Kang 	sec4_sg_src_index = 2;
1270bc13c69eSRussell King 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1271045e3678SYuan Kang 			 sizeof(struct sec4_sg_entry);
1272045e3678SYuan Kang 
1273045e3678SYuan Kang 	/* allocate space for base edesc and hw desc commands, link tables */
127430a43b44SRussell King 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
127530a43b44SRussell King 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
127630a43b44SRussell King 				  flags);
1277045e3678SYuan Kang 	if (!edesc) {
1278bc13c69eSRussell King 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1279045e3678SYuan Kang 		return -ENOMEM;
1280045e3678SYuan Kang 	}
1281045e3678SYuan Kang 
1282045e3678SYuan Kang 	desc = edesc->hw_desc;
1283045e3678SYuan Kang 
1284045e3678SYuan Kang 	edesc->src_nents = src_nents;
1285045e3678SYuan Kang 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1286045e3678SYuan Kang 
1287944c3d4dSHoria Geantă 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1288944c3d4dSHoria Geantă 	if (ret)
1289944c3d4dSHoria Geantă 		goto unmap;
1290045e3678SYuan Kang 
129165cf164aSRussell King 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
129265cf164aSRussell King 				  req->nbytes);
129365cf164aSRussell King 	if (ret) {
1294ce572085SHoria Geanta 		dev_err(jrdev, "unable to map S/G table\n");
129506435f34SMarkus Elfring 		goto unmap;
1296ce572085SHoria Geanta 	}
12971da2be33SRuchika Gupta 
1298045e3678SYuan Kang 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1299045e3678SYuan Kang 						digestsize);
1300ce572085SHoria Geanta 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1301ce572085SHoria Geanta 		dev_err(jrdev, "unable to map dst\n");
130206435f34SMarkus Elfring 		goto unmap;
1303ce572085SHoria Geanta 	}
1304045e3678SYuan Kang 
1305045e3678SYuan Kang #ifdef DEBUG
1306514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1307045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1308045e3678SYuan Kang #endif
1309045e3678SYuan Kang 
1310045e3678SYuan Kang 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1311045e3678SYuan Kang 	if (!ret) {
1312045e3678SYuan Kang 		ret = -EINPROGRESS;
1313045e3678SYuan Kang 	} else {
1314045e3678SYuan Kang 		ahash_unmap(jrdev, edesc, req, digestsize);
1315045e3678SYuan Kang 		kfree(edesc);
1316045e3678SYuan Kang 	}
1317045e3678SYuan Kang 
1318045e3678SYuan Kang 	return ret;
131906435f34SMarkus Elfring  unmap:
132006435f34SMarkus Elfring 	ahash_unmap(jrdev, edesc, req, digestsize);
132106435f34SMarkus Elfring 	kfree(edesc);
132206435f34SMarkus Elfring 	return -ENOMEM;
132306435f34SMarkus Elfring 
1324045e3678SYuan Kang }
1325045e3678SYuan Kang 
1326045e3678SYuan Kang /* submit first update job descriptor after init */
1327045e3678SYuan Kang static int ahash_update_first(struct ahash_request *req)
1328045e3678SYuan Kang {
1329045e3678SYuan Kang 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1330045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1331045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1332045e3678SYuan Kang 	struct device *jrdev = ctx->jrdev;
1333019d62dbSHoria Geantă 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1334019d62dbSHoria Geantă 		       GFP_KERNEL : GFP_ATOMIC;
1335944c3d4dSHoria Geantă 	u8 *next_buf = alt_buf(state);
1336944c3d4dSHoria Geantă 	int *next_buflen = alt_buflen(state);
1337045e3678SYuan Kang 	int to_hash;
133830a43b44SRussell King 	u32 *desc;
133965cf164aSRussell King 	int src_nents, mapped_nents;
1340045e3678SYuan Kang 	struct ahash_edesc *edesc;
1341045e3678SYuan Kang 	int ret = 0;
1342045e3678SYuan Kang 
1343045e3678SYuan Kang 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1344045e3678SYuan Kang 				      1);
1345045e3678SYuan Kang 	to_hash = req->nbytes - *next_buflen;
1346045e3678SYuan Kang 
1347045e3678SYuan Kang 	if (to_hash) {
13483d5a2db6SRussell King 		src_nents = sg_nents_for_len(req->src,
13493d5a2db6SRussell King 					     req->nbytes - *next_buflen);
1350f9970c28SLABBE Corentin 		if (src_nents < 0) {
1351f9970c28SLABBE Corentin 			dev_err(jrdev, "Invalid number of src SG.\n");
1352f9970c28SLABBE Corentin 			return src_nents;
1353f9970c28SLABBE Corentin 		}
1354bc13c69eSRussell King 
1355bc13c69eSRussell King 		if (src_nents) {
1356bc13c69eSRussell King 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1357bc13c69eSRussell King 						  DMA_TO_DEVICE);
1358bc13c69eSRussell King 			if (!mapped_nents) {
1359bc13c69eSRussell King 				dev_err(jrdev, "unable to map source for DMA\n");
1360bc13c69eSRussell King 				return -ENOMEM;
1361bc13c69eSRussell King 			}
1362bc13c69eSRussell King 		} else {
1363bc13c69eSRussell King 			mapped_nents = 0;
1364bc13c69eSRussell King 		}
1365045e3678SYuan Kang 
1366045e3678SYuan Kang 		/*
1367045e3678SYuan Kang 		 * allocate space for base edesc and hw desc commands,
1368045e3678SYuan Kang 		 * link tables
1369045e3678SYuan Kang 		 */
13705588d039SRussell King 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
137130a43b44SRussell King 					  mapped_nents : 0,
137230a43b44SRussell King 					  ctx->sh_desc_update_first,
137330a43b44SRussell King 					  ctx->sh_desc_update_first_dma,
137430a43b44SRussell King 					  flags);
1375045e3678SYuan Kang 		if (!edesc) {
1376bc13c69eSRussell King 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1377045e3678SYuan Kang 			return -ENOMEM;
1378045e3678SYuan Kang 		}
1379045e3678SYuan Kang 
1380045e3678SYuan Kang 		edesc->src_nents = src_nents;
1381045e3678SYuan Kang 
138265cf164aSRussell King 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
138365cf164aSRussell King 					  to_hash);
138465cf164aSRussell King 		if (ret)
138558b0e5d0SMarkus Elfring 			goto unmap_ctx;
1386045e3678SYuan Kang 
1387045e3678SYuan Kang 		if (*next_buflen)
1388307fd543SCristian Stoica 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1389307fd543SCristian Stoica 						 *next_buflen, 0);
1390045e3678SYuan Kang 
1391045e3678SYuan Kang 		desc = edesc->hw_desc;
1392045e3678SYuan Kang 
1393ce572085SHoria Geanta 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1394ce572085SHoria Geanta 		if (ret)
139558b0e5d0SMarkus Elfring 			goto unmap_ctx;
1396045e3678SYuan Kang 
1397045e3678SYuan Kang #ifdef DEBUG
1398514df281SAlex Porosanu 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1399045e3678SYuan Kang 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1400045e3678SYuan Kang 			       desc_bytes(desc), 1);
1401045e3678SYuan Kang #endif
1402045e3678SYuan Kang 
140332686d34SRussell King 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
140432686d34SRussell King 		if (ret)
140558b0e5d0SMarkus Elfring 			goto unmap_ctx;
140632686d34SRussell King 
1407045e3678SYuan Kang 		ret = -EINPROGRESS;
1408045e3678SYuan Kang 		state->update = ahash_update_ctx;
1409045e3678SYuan Kang 		state->finup = ahash_finup_ctx;
1410045e3678SYuan Kang 		state->final = ahash_final_ctx;
1411045e3678SYuan Kang 	} else if (*next_buflen) {
1412045e3678SYuan Kang 		state->update = ahash_update_no_ctx;
1413045e3678SYuan Kang 		state->finup = ahash_finup_no_ctx;
1414045e3678SYuan Kang 		state->final = ahash_final_no_ctx;
1415307fd543SCristian Stoica 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1416307fd543SCristian Stoica 					 req->nbytes, 0);
1417944c3d4dSHoria Geantă 		switch_buf(state);
1418045e3678SYuan Kang 	}
1419045e3678SYuan Kang #ifdef DEBUG
1420514df281SAlex Porosanu 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1421045e3678SYuan Kang 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1422045e3678SYuan Kang 		       *next_buflen, 1);
1423045e3678SYuan Kang #endif
1424045e3678SYuan Kang 
1425045e3678SYuan Kang 	return ret;
142658b0e5d0SMarkus Elfring  unmap_ctx:
142732686d34SRussell King 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
142832686d34SRussell King 	kfree(edesc);
142932686d34SRussell King 	return ret;
1430045e3678SYuan Kang }
1431045e3678SYuan Kang 
1432045e3678SYuan Kang static int ahash_finup_first(struct ahash_request *req)
1433045e3678SYuan Kang {
1434045e3678SYuan Kang 	return ahash_digest(req);
1435045e3678SYuan Kang }
1436045e3678SYuan Kang 
1437045e3678SYuan Kang static int ahash_init(struct ahash_request *req)
1438045e3678SYuan Kang {
1439045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1440045e3678SYuan Kang 
1441045e3678SYuan Kang 	state->update = ahash_update_first;
1442045e3678SYuan Kang 	state->finup = ahash_finup_first;
1443045e3678SYuan Kang 	state->final = ahash_final_no_ctx;
1444045e3678SYuan Kang 
144587ec02e7SHoria Geantă 	state->ctx_dma = 0;
1446045e3678SYuan Kang 	state->current_buf = 0;
1447de0e35ecSHoria Geanta 	state->buf_dma = 0;
14486fd4b156SSteve Cornelius 	state->buflen_0 = 0;
14496fd4b156SSteve Cornelius 	state->buflen_1 = 0;
1450045e3678SYuan Kang 
1451045e3678SYuan Kang 	return 0;
1452045e3678SYuan Kang }
1453045e3678SYuan Kang 
1454045e3678SYuan Kang static int ahash_update(struct ahash_request *req)
1455045e3678SYuan Kang {
1456045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1457045e3678SYuan Kang 
1458045e3678SYuan Kang 	return state->update(req);
1459045e3678SYuan Kang }
1460045e3678SYuan Kang 
1461045e3678SYuan Kang static int ahash_finup(struct ahash_request *req)
1462045e3678SYuan Kang {
1463045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1464045e3678SYuan Kang 
1465045e3678SYuan Kang 	return state->finup(req);
1466045e3678SYuan Kang }
1467045e3678SYuan Kang 
1468045e3678SYuan Kang static int ahash_final(struct ahash_request *req)
1469045e3678SYuan Kang {
1470045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
1471045e3678SYuan Kang 
1472045e3678SYuan Kang 	return state->final(req);
1473045e3678SYuan Kang }
1474045e3678SYuan Kang 
1475045e3678SYuan Kang static int ahash_export(struct ahash_request *req, void *out)
1476045e3678SYuan Kang {
1477045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
14785ec90831SRussell King 	struct caam_export_state *export = out;
14795ec90831SRussell King 	int len;
14805ec90831SRussell King 	u8 *buf;
1481045e3678SYuan Kang 
14825ec90831SRussell King 	if (state->current_buf) {
14835ec90831SRussell King 		buf = state->buf_1;
14845ec90831SRussell King 		len = state->buflen_1;
14855ec90831SRussell King 	} else {
14865ec90831SRussell King 		buf = state->buf_0;
1487f456cd2dSFabio Estevam 		len = state->buflen_0;
14885ec90831SRussell King 	}
14895ec90831SRussell King 
14905ec90831SRussell King 	memcpy(export->buf, buf, len);
14915ec90831SRussell King 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
14925ec90831SRussell King 	export->buflen = len;
14935ec90831SRussell King 	export->update = state->update;
14945ec90831SRussell King 	export->final = state->final;
14955ec90831SRussell King 	export->finup = state->finup;
1496434b4212SRussell King 
1497045e3678SYuan Kang 	return 0;
1498045e3678SYuan Kang }
1499045e3678SYuan Kang 
1500045e3678SYuan Kang static int ahash_import(struct ahash_request *req, const void *in)
1501045e3678SYuan Kang {
1502045e3678SYuan Kang 	struct caam_hash_state *state = ahash_request_ctx(req);
15035ec90831SRussell King 	const struct caam_export_state *export = in;
1504045e3678SYuan Kang 
15055ec90831SRussell King 	memset(state, 0, sizeof(*state));
15065ec90831SRussell King 	memcpy(state->buf_0, export->buf, export->buflen);
15075ec90831SRussell King 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
15085ec90831SRussell King 	state->buflen_0 = export->buflen;
15095ec90831SRussell King 	state->update = export->update;
15105ec90831SRussell King 	state->final = export->final;
15115ec90831SRussell King 	state->finup = export->finup;
1512434b4212SRussell King 
1513045e3678SYuan Kang 	return 0;
1514045e3678SYuan Kang }
1515045e3678SYuan Kang 
1516045e3678SYuan Kang struct caam_hash_template {
1517045e3678SYuan Kang 	char name[CRYPTO_MAX_ALG_NAME];
1518045e3678SYuan Kang 	char driver_name[CRYPTO_MAX_ALG_NAME];
1519b0e09baeSYuan Kang 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1520b0e09baeSYuan Kang 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1521045e3678SYuan Kang 	unsigned int blocksize;
1522045e3678SYuan Kang 	struct ahash_alg template_ahash;
1523045e3678SYuan Kang 	u32 alg_type;
1524045e3678SYuan Kang };
1525045e3678SYuan Kang 
1526045e3678SYuan Kang /* ahash descriptors */
1527045e3678SYuan Kang static struct caam_hash_template driver_hash[] = {
1528045e3678SYuan Kang 	{
1529b0e09baeSYuan Kang 		.name = "sha1",
1530b0e09baeSYuan Kang 		.driver_name = "sha1-caam",
1531b0e09baeSYuan Kang 		.hmac_name = "hmac(sha1)",
1532b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha1-caam",
1533045e3678SYuan Kang 		.blocksize = SHA1_BLOCK_SIZE,
1534045e3678SYuan Kang 		.template_ahash = {
1535045e3678SYuan Kang 			.init = ahash_init,
1536045e3678SYuan Kang 			.update = ahash_update,
1537045e3678SYuan Kang 			.final = ahash_final,
1538045e3678SYuan Kang 			.finup = ahash_finup,
1539045e3678SYuan Kang 			.digest = ahash_digest,
1540045e3678SYuan Kang 			.export = ahash_export,
1541045e3678SYuan Kang 			.import = ahash_import,
1542045e3678SYuan Kang 			.setkey = ahash_setkey,
1543045e3678SYuan Kang 			.halg = {
1544045e3678SYuan Kang 				.digestsize = SHA1_DIGEST_SIZE,
15455ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1546045e3678SYuan Kang 			},
1547045e3678SYuan Kang 		},
1548045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA1,
1549045e3678SYuan Kang 	}, {
1550b0e09baeSYuan Kang 		.name = "sha224",
1551b0e09baeSYuan Kang 		.driver_name = "sha224-caam",
1552b0e09baeSYuan Kang 		.hmac_name = "hmac(sha224)",
1553b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha224-caam",
1554045e3678SYuan Kang 		.blocksize = SHA224_BLOCK_SIZE,
1555045e3678SYuan Kang 		.template_ahash = {
1556045e3678SYuan Kang 			.init = ahash_init,
1557045e3678SYuan Kang 			.update = ahash_update,
1558045e3678SYuan Kang 			.final = ahash_final,
1559045e3678SYuan Kang 			.finup = ahash_finup,
1560045e3678SYuan Kang 			.digest = ahash_digest,
1561045e3678SYuan Kang 			.export = ahash_export,
1562045e3678SYuan Kang 			.import = ahash_import,
1563045e3678SYuan Kang 			.setkey = ahash_setkey,
1564045e3678SYuan Kang 			.halg = {
1565045e3678SYuan Kang 				.digestsize = SHA224_DIGEST_SIZE,
15665ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1567045e3678SYuan Kang 			},
1568045e3678SYuan Kang 		},
1569045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA224,
1570045e3678SYuan Kang 	}, {
1571b0e09baeSYuan Kang 		.name = "sha256",
1572b0e09baeSYuan Kang 		.driver_name = "sha256-caam",
1573b0e09baeSYuan Kang 		.hmac_name = "hmac(sha256)",
1574b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha256-caam",
1575045e3678SYuan Kang 		.blocksize = SHA256_BLOCK_SIZE,
1576045e3678SYuan Kang 		.template_ahash = {
1577045e3678SYuan Kang 			.init = ahash_init,
1578045e3678SYuan Kang 			.update = ahash_update,
1579045e3678SYuan Kang 			.final = ahash_final,
1580045e3678SYuan Kang 			.finup = ahash_finup,
1581045e3678SYuan Kang 			.digest = ahash_digest,
1582045e3678SYuan Kang 			.export = ahash_export,
1583045e3678SYuan Kang 			.import = ahash_import,
1584045e3678SYuan Kang 			.setkey = ahash_setkey,
1585045e3678SYuan Kang 			.halg = {
1586045e3678SYuan Kang 				.digestsize = SHA256_DIGEST_SIZE,
15875ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1588045e3678SYuan Kang 			},
1589045e3678SYuan Kang 		},
1590045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA256,
1591045e3678SYuan Kang 	}, {
1592b0e09baeSYuan Kang 		.name = "sha384",
1593b0e09baeSYuan Kang 		.driver_name = "sha384-caam",
1594b0e09baeSYuan Kang 		.hmac_name = "hmac(sha384)",
1595b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha384-caam",
1596045e3678SYuan Kang 		.blocksize = SHA384_BLOCK_SIZE,
1597045e3678SYuan Kang 		.template_ahash = {
1598045e3678SYuan Kang 			.init = ahash_init,
1599045e3678SYuan Kang 			.update = ahash_update,
1600045e3678SYuan Kang 			.final = ahash_final,
1601045e3678SYuan Kang 			.finup = ahash_finup,
1602045e3678SYuan Kang 			.digest = ahash_digest,
1603045e3678SYuan Kang 			.export = ahash_export,
1604045e3678SYuan Kang 			.import = ahash_import,
1605045e3678SYuan Kang 			.setkey = ahash_setkey,
1606045e3678SYuan Kang 			.halg = {
1607045e3678SYuan Kang 				.digestsize = SHA384_DIGEST_SIZE,
16085ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1609045e3678SYuan Kang 			},
1610045e3678SYuan Kang 		},
1611045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA384,
1612045e3678SYuan Kang 	}, {
1613b0e09baeSYuan Kang 		.name = "sha512",
1614b0e09baeSYuan Kang 		.driver_name = "sha512-caam",
1615b0e09baeSYuan Kang 		.hmac_name = "hmac(sha512)",
1616b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-sha512-caam",
1617045e3678SYuan Kang 		.blocksize = SHA512_BLOCK_SIZE,
1618045e3678SYuan Kang 		.template_ahash = {
1619045e3678SYuan Kang 			.init = ahash_init,
1620045e3678SYuan Kang 			.update = ahash_update,
1621045e3678SYuan Kang 			.final = ahash_final,
1622045e3678SYuan Kang 			.finup = ahash_finup,
1623045e3678SYuan Kang 			.digest = ahash_digest,
1624045e3678SYuan Kang 			.export = ahash_export,
1625045e3678SYuan Kang 			.import = ahash_import,
1626045e3678SYuan Kang 			.setkey = ahash_setkey,
1627045e3678SYuan Kang 			.halg = {
1628045e3678SYuan Kang 				.digestsize = SHA512_DIGEST_SIZE,
16295ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1630045e3678SYuan Kang 			},
1631045e3678SYuan Kang 		},
1632045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_SHA512,
1633045e3678SYuan Kang 	}, {
1634b0e09baeSYuan Kang 		.name = "md5",
1635b0e09baeSYuan Kang 		.driver_name = "md5-caam",
1636b0e09baeSYuan Kang 		.hmac_name = "hmac(md5)",
1637b0e09baeSYuan Kang 		.hmac_driver_name = "hmac-md5-caam",
1638045e3678SYuan Kang 		.blocksize = MD5_BLOCK_WORDS * 4,
1639045e3678SYuan Kang 		.template_ahash = {
1640045e3678SYuan Kang 			.init = ahash_init,
1641045e3678SYuan Kang 			.update = ahash_update,
1642045e3678SYuan Kang 			.final = ahash_final,
1643045e3678SYuan Kang 			.finup = ahash_finup,
1644045e3678SYuan Kang 			.digest = ahash_digest,
1645045e3678SYuan Kang 			.export = ahash_export,
1646045e3678SYuan Kang 			.import = ahash_import,
1647045e3678SYuan Kang 			.setkey = ahash_setkey,
1648045e3678SYuan Kang 			.halg = {
1649045e3678SYuan Kang 				.digestsize = MD5_DIGEST_SIZE,
16505ec90831SRussell King 				.statesize = sizeof(struct caam_export_state),
1651045e3678SYuan Kang 			},
1652045e3678SYuan Kang 		},
1653045e3678SYuan Kang 		.alg_type = OP_ALG_ALGSEL_MD5,
1654045e3678SYuan Kang 	},
1655045e3678SYuan Kang };
1656045e3678SYuan Kang 
1657045e3678SYuan Kang struct caam_hash_alg {
1658045e3678SYuan Kang 	struct list_head entry;
1659045e3678SYuan Kang 	int alg_type;
1660045e3678SYuan Kang 	struct ahash_alg ahash_alg;
1661045e3678SYuan Kang };
1662045e3678SYuan Kang 
1663045e3678SYuan Kang static int caam_hash_cra_init(struct crypto_tfm *tfm)
1664045e3678SYuan Kang {
1665045e3678SYuan Kang 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1666045e3678SYuan Kang 	struct crypto_alg *base = tfm->__crt_alg;
1667045e3678SYuan Kang 	struct hash_alg_common *halg =
1668045e3678SYuan Kang 		 container_of(base, struct hash_alg_common, base);
1669045e3678SYuan Kang 	struct ahash_alg *alg =
1670045e3678SYuan Kang 		 container_of(halg, struct ahash_alg, halg);
1671045e3678SYuan Kang 	struct caam_hash_alg *caam_hash =
1672045e3678SYuan Kang 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1673045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1674045e3678SYuan Kang 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1675045e3678SYuan Kang 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1676045e3678SYuan Kang 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1677045e3678SYuan Kang 					 HASH_MSG_LEN + 32,
1678045e3678SYuan Kang 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1679045e3678SYuan Kang 					 HASH_MSG_LEN + 64,
1680045e3678SYuan Kang 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1681bbf22344SHoria Geantă 	dma_addr_t dma_addr;
16827e0880b9SHoria Geantă 	struct caam_drv_private *priv;
1683045e3678SYuan Kang 
1684045e3678SYuan Kang 	/*
1685cfc6f11bSRuchika Gupta 	 * Get a Job ring from Job Ring driver to ensure in-order
1686045e3678SYuan Kang 	 * crypto request processing per tfm
1687045e3678SYuan Kang 	 */
1688cfc6f11bSRuchika Gupta 	ctx->jrdev = caam_jr_alloc();
1689cfc6f11bSRuchika Gupta 	if (IS_ERR(ctx->jrdev)) {
1690cfc6f11bSRuchika Gupta 		pr_err("Job Ring Device allocation for transform failed\n");
1691cfc6f11bSRuchika Gupta 		return PTR_ERR(ctx->jrdev);
1692cfc6f11bSRuchika Gupta 	}
1693bbf22344SHoria Geantă 
16947e0880b9SHoria Geantă 	priv = dev_get_drvdata(ctx->jrdev->parent);
16957e0880b9SHoria Geantă 	ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
16967e0880b9SHoria Geantă 
1697bbf22344SHoria Geantă 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1698bbf22344SHoria Geantă 					offsetof(struct caam_hash_ctx,
1699bbf22344SHoria Geantă 						 sh_desc_update_dma),
17007e0880b9SHoria Geantă 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1701bbf22344SHoria Geantă 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1702bbf22344SHoria Geantă 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1703bbf22344SHoria Geantă 		caam_jr_free(ctx->jrdev);
1704bbf22344SHoria Geantă 		return -ENOMEM;
1705bbf22344SHoria Geantă 	}
1706bbf22344SHoria Geantă 
1707bbf22344SHoria Geantă 	ctx->sh_desc_update_dma = dma_addr;
1708bbf22344SHoria Geantă 	ctx->sh_desc_update_first_dma = dma_addr +
1709bbf22344SHoria Geantă 					offsetof(struct caam_hash_ctx,
1710bbf22344SHoria Geantă 						 sh_desc_update_first);
1711bbf22344SHoria Geantă 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1712bbf22344SHoria Geantă 						   sh_desc_fin);
1713bbf22344SHoria Geantă 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1714bbf22344SHoria Geantă 						      sh_desc_digest);
1715bbf22344SHoria Geantă 
1716045e3678SYuan Kang 	/* copy descriptor header template value */
1717db57656bSHoria Geantă 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1718045e3678SYuan Kang 
1719488ebc3aSHoria Geantă 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
1720488ebc3aSHoria Geantă 				   OP_ALG_ALGSEL_SUBMASK) >>
1721045e3678SYuan Kang 				  OP_ALG_ALGSEL_SHIFT];
1722045e3678SYuan Kang 
1723045e3678SYuan Kang 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1724045e3678SYuan Kang 				 sizeof(struct caam_hash_state));
1725e6cc5b8dSMarkus Elfring 	return ahash_set_sh_desc(ahash);
1726045e3678SYuan Kang }
1727045e3678SYuan Kang 
1728045e3678SYuan Kang static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1729045e3678SYuan Kang {
1730045e3678SYuan Kang 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1731045e3678SYuan Kang 
1732bbf22344SHoria Geantă 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1733bbf22344SHoria Geantă 			       offsetof(struct caam_hash_ctx,
1734bbf22344SHoria Geantă 					sh_desc_update_dma),
17357e0880b9SHoria Geantă 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1736cfc6f11bSRuchika Gupta 	caam_jr_free(ctx->jrdev);
1737045e3678SYuan Kang }
1738045e3678SYuan Kang 
1739045e3678SYuan Kang static void __exit caam_algapi_hash_exit(void)
1740045e3678SYuan Kang {
1741045e3678SYuan Kang 	struct caam_hash_alg *t_alg, *n;
1742045e3678SYuan Kang 
1743cfc6f11bSRuchika Gupta 	if (!hash_list.next)
1744045e3678SYuan Kang 		return;
1745045e3678SYuan Kang 
1746cfc6f11bSRuchika Gupta 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1747045e3678SYuan Kang 		crypto_unregister_ahash(&t_alg->ahash_alg);
1748045e3678SYuan Kang 		list_del(&t_alg->entry);
1749045e3678SYuan Kang 		kfree(t_alg);
1750045e3678SYuan Kang 	}
1751045e3678SYuan Kang }
1752045e3678SYuan Kang 
1753045e3678SYuan Kang static struct caam_hash_alg *
1754cfc6f11bSRuchika Gupta caam_hash_alloc(struct caam_hash_template *template,
1755b0e09baeSYuan Kang 		bool keyed)
1756045e3678SYuan Kang {
1757045e3678SYuan Kang 	struct caam_hash_alg *t_alg;
1758045e3678SYuan Kang 	struct ahash_alg *halg;
1759045e3678SYuan Kang 	struct crypto_alg *alg;
1760045e3678SYuan Kang 
17619c4f9733SFabio Estevam 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1762045e3678SYuan Kang 	if (!t_alg) {
1763cfc6f11bSRuchika Gupta 		pr_err("failed to allocate t_alg\n");
1764045e3678SYuan Kang 		return ERR_PTR(-ENOMEM);
1765045e3678SYuan Kang 	}
1766045e3678SYuan Kang 
1767045e3678SYuan Kang 	t_alg->ahash_alg = template->template_ahash;
1768045e3678SYuan Kang 	halg = &t_alg->ahash_alg;
1769045e3678SYuan Kang 	alg = &halg->halg.base;
1770045e3678SYuan Kang 
1771b0e09baeSYuan Kang 	if (keyed) {
1772b0e09baeSYuan Kang 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1773b0e09baeSYuan Kang 			 template->hmac_name);
1774b0e09baeSYuan Kang 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1775b0e09baeSYuan Kang 			 template->hmac_driver_name);
1776b0e09baeSYuan Kang 	} else {
1777b0e09baeSYuan Kang 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1778b0e09baeSYuan Kang 			 template->name);
1779045e3678SYuan Kang 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1780045e3678SYuan Kang 			 template->driver_name);
1781a0118c8bSRussell King 		t_alg->ahash_alg.setkey = NULL;
1782b0e09baeSYuan Kang 	}
1783045e3678SYuan Kang 	alg->cra_module = THIS_MODULE;
1784045e3678SYuan Kang 	alg->cra_init = caam_hash_cra_init;
1785045e3678SYuan Kang 	alg->cra_exit = caam_hash_cra_exit;
1786045e3678SYuan Kang 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1787045e3678SYuan Kang 	alg->cra_priority = CAAM_CRA_PRIORITY;
1788045e3678SYuan Kang 	alg->cra_blocksize = template->blocksize;
1789045e3678SYuan Kang 	alg->cra_alignmask = 0;
17906a38f622SEric Biggers 	alg->cra_flags = CRYPTO_ALG_ASYNC;
1791045e3678SYuan Kang 
1792045e3678SYuan Kang 	t_alg->alg_type = template->alg_type;
1793045e3678SYuan Kang 
1794045e3678SYuan Kang 	return t_alg;
1795045e3678SYuan Kang }
1796045e3678SYuan Kang 
1797045e3678SYuan Kang static int __init caam_algapi_hash_init(void)
1798045e3678SYuan Kang {
179935af6403SRuchika Gupta 	struct device_node *dev_node;
180035af6403SRuchika Gupta 	struct platform_device *pdev;
180135af6403SRuchika Gupta 	struct device *ctrldev;
1802045e3678SYuan Kang 	int i = 0, err = 0;
1803bf83490eSVictoria Milhoan 	struct caam_drv_private *priv;
1804bf83490eSVictoria Milhoan 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1805*d239b10dSHoria Geantă 	u32 md_inst, md_vid;
1806045e3678SYuan Kang 
180735af6403SRuchika Gupta 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
180835af6403SRuchika Gupta 	if (!dev_node) {
180935af6403SRuchika Gupta 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
181035af6403SRuchika Gupta 		if (!dev_node)
181135af6403SRuchika Gupta 			return -ENODEV;
181235af6403SRuchika Gupta 	}
181335af6403SRuchika Gupta 
181435af6403SRuchika Gupta 	pdev = of_find_device_by_node(dev_node);
181535af6403SRuchika Gupta 	if (!pdev) {
181635af6403SRuchika Gupta 		of_node_put(dev_node);
181735af6403SRuchika Gupta 		return -ENODEV;
181835af6403SRuchika Gupta 	}
181935af6403SRuchika Gupta 
182035af6403SRuchika Gupta 	ctrldev = &pdev->dev;
182135af6403SRuchika Gupta 	priv = dev_get_drvdata(ctrldev);
182235af6403SRuchika Gupta 	of_node_put(dev_node);
182335af6403SRuchika Gupta 
182435af6403SRuchika Gupta 	/*
182535af6403SRuchika Gupta 	 * If priv is NULL, it's probably because the caam driver wasn't
182635af6403SRuchika Gupta 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
182735af6403SRuchika Gupta 	 */
182835af6403SRuchika Gupta 	if (!priv)
182935af6403SRuchika Gupta 		return -ENODEV;
183035af6403SRuchika Gupta 
1831bf83490eSVictoria Milhoan 	/*
1832bf83490eSVictoria Milhoan 	 * Register crypto algorithms the device supports.  First, identify
1833bf83490eSVictoria Milhoan 	 * presence and attributes of MD block.
1834bf83490eSVictoria Milhoan 	 */
1835*d239b10dSHoria Geantă 	if (priv->era < 10) {
1836*d239b10dSHoria Geantă 		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1837*d239b10dSHoria Geantă 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1838*d239b10dSHoria Geantă 		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1839*d239b10dSHoria Geantă 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1840*d239b10dSHoria Geantă 	} else {
1841*d239b10dSHoria Geantă 		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1842*d239b10dSHoria Geantă 
1843*d239b10dSHoria Geantă 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1844*d239b10dSHoria Geantă 		md_inst = mdha & CHA_VER_NUM_MASK;
1845*d239b10dSHoria Geantă 	}
1846bf83490eSVictoria Milhoan 
1847bf83490eSVictoria Milhoan 	/*
1848bf83490eSVictoria Milhoan 	 * Skip registration of any hashing algorithms if MD block
1849bf83490eSVictoria Milhoan 	 * is not present.
1850bf83490eSVictoria Milhoan 	 */
1851*d239b10dSHoria Geantă 	if (!md_inst)
1852bf83490eSVictoria Milhoan 		return -ENODEV;
1853bf83490eSVictoria Milhoan 
1854bf83490eSVictoria Milhoan 	/* Limit digest size based on LP256 */
1855*d239b10dSHoria Geantă 	if (md_vid == CHA_VER_VID_MD_LP256)
1856bf83490eSVictoria Milhoan 		md_limit = SHA256_DIGEST_SIZE;
1857bf83490eSVictoria Milhoan 
1858cfc6f11bSRuchika Gupta 	INIT_LIST_HEAD(&hash_list);
1859045e3678SYuan Kang 
1860045e3678SYuan Kang 	/* register crypto algorithms the device supports */
1861045e3678SYuan Kang 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1862045e3678SYuan Kang 		struct caam_hash_alg *t_alg;
1863bf83490eSVictoria Milhoan 		struct caam_hash_template *alg = driver_hash + i;
1864bf83490eSVictoria Milhoan 
1865bf83490eSVictoria Milhoan 		/* If MD size is not supported by device, skip registration */
1866bf83490eSVictoria Milhoan 		if (alg->template_ahash.halg.digestsize > md_limit)
1867bf83490eSVictoria Milhoan 			continue;
1868045e3678SYuan Kang 
1869b0e09baeSYuan Kang 		/* register hmac version */
1870bf83490eSVictoria Milhoan 		t_alg = caam_hash_alloc(alg, true);
1871b0e09baeSYuan Kang 		if (IS_ERR(t_alg)) {
1872b0e09baeSYuan Kang 			err = PTR_ERR(t_alg);
1873bf83490eSVictoria Milhoan 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1874b0e09baeSYuan Kang 			continue;
1875b0e09baeSYuan Kang 		}
1876b0e09baeSYuan Kang 
1877b0e09baeSYuan Kang 		err = crypto_register_ahash(&t_alg->ahash_alg);
1878b0e09baeSYuan Kang 		if (err) {
18796ea30f0aSRussell King 			pr_warn("%s alg registration failed: %d\n",
18806ea30f0aSRussell King 				t_alg->ahash_alg.halg.base.cra_driver_name,
18816ea30f0aSRussell King 				err);
1882b0e09baeSYuan Kang 			kfree(t_alg);
1883b0e09baeSYuan Kang 		} else
1884cfc6f11bSRuchika Gupta 			list_add_tail(&t_alg->entry, &hash_list);
1885b0e09baeSYuan Kang 
1886b0e09baeSYuan Kang 		/* register unkeyed version */
1887bf83490eSVictoria Milhoan 		t_alg = caam_hash_alloc(alg, false);
1888045e3678SYuan Kang 		if (IS_ERR(t_alg)) {
1889045e3678SYuan Kang 			err = PTR_ERR(t_alg);
1890bf83490eSVictoria Milhoan 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1891045e3678SYuan Kang 			continue;
1892045e3678SYuan Kang 		}
1893045e3678SYuan Kang 
1894045e3678SYuan Kang 		err = crypto_register_ahash(&t_alg->ahash_alg);
1895045e3678SYuan Kang 		if (err) {
18966ea30f0aSRussell King 			pr_warn("%s alg registration failed: %d\n",
18976ea30f0aSRussell King 				t_alg->ahash_alg.halg.base.cra_driver_name,
18986ea30f0aSRussell King 				err);
1899045e3678SYuan Kang 			kfree(t_alg);
1900045e3678SYuan Kang 		} else
1901cfc6f11bSRuchika Gupta 			list_add_tail(&t_alg->entry, &hash_list);
1902045e3678SYuan Kang 	}
1903045e3678SYuan Kang 
1904045e3678SYuan Kang 	return err;
1905045e3678SYuan Kang }
1906045e3678SYuan Kang 
1907045e3678SYuan Kang module_init(caam_algapi_hash_init);
1908045e3678SYuan Kang module_exit(caam_algapi_hash_exit);
1909045e3678SYuan Kang 
1910045e3678SYuan Kang MODULE_LICENSE("GPL");
1911045e3678SYuan Kang MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1912045e3678SYuan Kang MODULE_AUTHOR("Freescale Semiconductor - NMG");
1913