xref: /linux/drivers/crypto/caam/caamhash.c (revision 32786fdc9506aeba98278c1844d4bfb766863832)
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY		3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE			(3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81 
82 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83 					 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85 
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN			8
88 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89 
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96 
97 
98 static struct list_head hash_list;
99 
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
107 	dma_addr_t sh_desc_update_first_dma;
108 	dma_addr_t sh_desc_fin_dma;
109 	dma_addr_t sh_desc_digest_dma;
110 	struct device *jrdev;
111 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
112 	dma_addr_t key_dma;
113 	int ctx_len;
114 	struct alginfo adata;
115 };
116 
117 /* ahash state */
118 struct caam_hash_state {
119 	dma_addr_t buf_dma;
120 	dma_addr_t ctx_dma;
121 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
122 	int buflen_0;
123 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
124 	int buflen_1;
125 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
126 	int (*update)(struct ahash_request *req);
127 	int (*final)(struct ahash_request *req);
128 	int (*finup)(struct ahash_request *req);
129 	int current_buf;
130 };
131 
132 struct caam_export_state {
133 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
134 	u8 caam_ctx[MAX_CTX_LEN];
135 	int buflen;
136 	int (*update)(struct ahash_request *req);
137 	int (*final)(struct ahash_request *req);
138 	int (*finup)(struct ahash_request *req);
139 };
140 
141 /* Common job descriptor seq in/out ptr routines */
142 
143 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
144 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
145 				      struct caam_hash_state *state,
146 				      int ctx_len)
147 {
148 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149 					ctx_len, DMA_FROM_DEVICE);
150 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
151 		dev_err(jrdev, "unable to map ctx\n");
152 		return -ENOMEM;
153 	}
154 
155 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
156 
157 	return 0;
158 }
159 
160 /* Map req->result, and append seq_out_ptr command that points to it */
161 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
162 						u8 *result, int digestsize)
163 {
164 	dma_addr_t dst_dma;
165 
166 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
167 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
168 
169 	return dst_dma;
170 }
171 
172 /* Map current buffer in state and put it in link table */
173 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
174 					    struct sec4_sg_entry *sec4_sg,
175 					    u8 *buf, int buflen)
176 {
177 	dma_addr_t buf_dma;
178 
179 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
180 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
181 
182 	return buf_dma;
183 }
184 
185 /*
186  * Only put buffer in link table if it contains data, which is possible,
187  * since a buffer has previously been used, and needs to be unmapped,
188  */
189 static inline dma_addr_t
190 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191 		       u8 *buf, dma_addr_t buf_dma, int buflen,
192 		       int last_buflen)
193 {
194 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 	if (buflen)
197 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198 	else
199 		buf_dma = 0;
200 
201 	return buf_dma;
202 }
203 
204 /* Map state->caam_ctx, and add it to link table */
205 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206 				     struct caam_hash_state *state, int ctx_len,
207 				     struct sec4_sg_entry *sec4_sg, u32 flag)
208 {
209 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
210 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
211 		dev_err(jrdev, "unable to map ctx\n");
212 		return -ENOMEM;
213 	}
214 
215 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
216 
217 	return 0;
218 }
219 
220 /*
221  * For ahash update, final and finup (import_ctx = true)
222  *     import context, read and write to seqout
223  * For ahash firsts and digest (import_ctx = false)
224  *     read and write to seqout
225  */
226 static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
227 				     struct caam_hash_ctx *ctx, bool import_ctx)
228 {
229 	u32 op = ctx->adata.algtype;
230 	u32 *skip_key_load;
231 
232 	init_sh_desc(desc, HDR_SHARE_SERIAL);
233 
234 	/* Append key if it has been set; ahash update excluded */
235 	if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
236 		/* Skip key loading if already shared */
237 		skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
238 					    JUMP_COND_SHRD);
239 
240 		append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
241 				  ctx->adata.keylen, CLASS_2 |
242 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
243 
244 		set_jump_tgt_here(desc, skip_key_load);
245 
246 		op |= OP_ALG_AAI_HMAC_PRECOMP;
247 	}
248 
249 	/* If needed, import context from software */
250 	if (import_ctx)
251 		append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
252 				LDST_SRCDST_BYTE_CONTEXT);
253 
254 	/* Class 2 operation */
255 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
256 
257 	/*
258 	 * Load from buf and/or src and write to req->result or state->context
259 	 * Calculate remaining bytes to read
260 	 */
261 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
262 	/* Read remaining bytes */
263 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
264 			     FIFOLD_TYPE_MSG | KEY_VLF);
265 	/* Store class2 context bytes */
266 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
267 			 LDST_SRCDST_BYTE_CONTEXT);
268 }
269 
270 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
271 {
272 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
273 	int digestsize = crypto_ahash_digestsize(ahash);
274 	struct device *jrdev = ctx->jrdev;
275 	u32 *desc;
276 
277 	/* ahash_update shared descriptor */
278 	desc = ctx->sh_desc_update;
279 	ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
280 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
281 						 DMA_TO_DEVICE);
282 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
283 		dev_err(jrdev, "unable to map shared descriptor\n");
284 		return -ENOMEM;
285 	}
286 #ifdef DEBUG
287 	print_hex_dump(KERN_ERR,
288 		       "ahash update shdesc@"__stringify(__LINE__)": ",
289 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
290 #endif
291 
292 	/* ahash_update_first shared descriptor */
293 	desc = ctx->sh_desc_update_first;
294 	ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
295 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
296 						       desc_bytes(desc),
297 						       DMA_TO_DEVICE);
298 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
299 		dev_err(jrdev, "unable to map shared descriptor\n");
300 		return -ENOMEM;
301 	}
302 #ifdef DEBUG
303 	print_hex_dump(KERN_ERR,
304 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
305 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
306 #endif
307 
308 	/* ahash_final shared descriptor */
309 	desc = ctx->sh_desc_fin;
310 	ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
311 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
312 					      DMA_TO_DEVICE);
313 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
314 		dev_err(jrdev, "unable to map shared descriptor\n");
315 		return -ENOMEM;
316 	}
317 #ifdef DEBUG
318 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
319 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
320 		       desc_bytes(desc), 1);
321 #endif
322 
323 	/* ahash_digest shared descriptor */
324 	desc = ctx->sh_desc_digest;
325 	ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
326 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
327 						 desc_bytes(desc),
328 						 DMA_TO_DEVICE);
329 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
330 		dev_err(jrdev, "unable to map shared descriptor\n");
331 		return -ENOMEM;
332 	}
333 #ifdef DEBUG
334 	print_hex_dump(KERN_ERR,
335 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
336 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
337 		       desc_bytes(desc), 1);
338 #endif
339 
340 	return 0;
341 }
342 
343 /* Digest hash size if it is too large */
344 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
345 			   u32 *keylen, u8 *key_out, u32 digestsize)
346 {
347 	struct device *jrdev = ctx->jrdev;
348 	u32 *desc;
349 	struct split_key_result result;
350 	dma_addr_t src_dma, dst_dma;
351 	int ret;
352 
353 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
354 	if (!desc) {
355 		dev_err(jrdev, "unable to allocate key input memory\n");
356 		return -ENOMEM;
357 	}
358 
359 	init_job_desc(desc, 0);
360 
361 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
362 				 DMA_TO_DEVICE);
363 	if (dma_mapping_error(jrdev, src_dma)) {
364 		dev_err(jrdev, "unable to map key input memory\n");
365 		kfree(desc);
366 		return -ENOMEM;
367 	}
368 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
369 				 DMA_FROM_DEVICE);
370 	if (dma_mapping_error(jrdev, dst_dma)) {
371 		dev_err(jrdev, "unable to map key output memory\n");
372 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
373 		kfree(desc);
374 		return -ENOMEM;
375 	}
376 
377 	/* Job descriptor to perform unkeyed hash on key_in */
378 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
379 			 OP_ALG_AS_INITFINAL);
380 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
381 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
382 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
383 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
384 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
385 			 LDST_SRCDST_BYTE_CONTEXT);
386 
387 #ifdef DEBUG
388 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
389 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
390 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
391 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
392 #endif
393 
394 	result.err = 0;
395 	init_completion(&result.completion);
396 
397 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
398 	if (!ret) {
399 		/* in progress */
400 		wait_for_completion_interruptible(&result.completion);
401 		ret = result.err;
402 #ifdef DEBUG
403 		print_hex_dump(KERN_ERR,
404 			       "digested key@"__stringify(__LINE__)": ",
405 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
406 			       digestsize, 1);
407 #endif
408 	}
409 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
410 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
411 
412 	*keylen = digestsize;
413 
414 	kfree(desc);
415 
416 	return ret;
417 }
418 
419 static int ahash_setkey(struct crypto_ahash *ahash,
420 			const u8 *key, unsigned int keylen)
421 {
422 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
423 	struct device *jrdev = ctx->jrdev;
424 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
425 	int digestsize = crypto_ahash_digestsize(ahash);
426 	int ret;
427 	u8 *hashed_key = NULL;
428 
429 #ifdef DEBUG
430 	printk(KERN_ERR "keylen %d\n", keylen);
431 #endif
432 
433 	if (keylen > blocksize) {
434 		hashed_key = kmalloc_array(digestsize,
435 					   sizeof(*hashed_key),
436 					   GFP_KERNEL | GFP_DMA);
437 		if (!hashed_key)
438 			return -ENOMEM;
439 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
440 				      digestsize);
441 		if (ret)
442 			goto bad_free_key;
443 		key = hashed_key;
444 	}
445 
446 	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
447 			    CAAM_MAX_HASH_KEY_SIZE);
448 	if (ret)
449 		goto bad_free_key;
450 
451 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
452 				      DMA_TO_DEVICE);
453 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
454 		dev_err(jrdev, "unable to map key i/o memory\n");
455 		ret = -ENOMEM;
456 		goto error_free_key;
457 	}
458 #ifdef DEBUG
459 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
460 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
461 		       ctx->adata.keylen_pad, 1);
462 #endif
463 
464 	ret = ahash_set_sh_desc(ahash);
465 	if (ret) {
466 		dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
467 				 DMA_TO_DEVICE);
468 	}
469 
470  error_free_key:
471 	kfree(hashed_key);
472 	return ret;
473  bad_free_key:
474 	kfree(hashed_key);
475 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
476 	return -EINVAL;
477 }
478 
479 /*
480  * ahash_edesc - s/w-extended ahash descriptor
481  * @dst_dma: physical mapped address of req->result
482  * @sec4_sg_dma: physical mapped address of h/w link table
483  * @src_nents: number of segments in input scatterlist
484  * @sec4_sg_bytes: length of dma mapped sec4_sg space
485  * @hw_desc: the h/w job descriptor followed by any referenced link tables
486  * @sec4_sg: h/w link table
487  */
488 struct ahash_edesc {
489 	dma_addr_t dst_dma;
490 	dma_addr_t sec4_sg_dma;
491 	int src_nents;
492 	int sec4_sg_bytes;
493 	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
494 	struct sec4_sg_entry sec4_sg[0];
495 };
496 
497 static inline void ahash_unmap(struct device *dev,
498 			struct ahash_edesc *edesc,
499 			struct ahash_request *req, int dst_len)
500 {
501 	if (edesc->src_nents)
502 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
503 	if (edesc->dst_dma)
504 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
505 
506 	if (edesc->sec4_sg_bytes)
507 		dma_unmap_single(dev, edesc->sec4_sg_dma,
508 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
509 }
510 
511 static inline void ahash_unmap_ctx(struct device *dev,
512 			struct ahash_edesc *edesc,
513 			struct ahash_request *req, int dst_len, u32 flag)
514 {
515 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
516 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
517 	struct caam_hash_state *state = ahash_request_ctx(req);
518 
519 	if (state->ctx_dma)
520 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
521 	ahash_unmap(dev, edesc, req, dst_len);
522 }
523 
524 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
525 		       void *context)
526 {
527 	struct ahash_request *req = context;
528 	struct ahash_edesc *edesc;
529 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
530 	int digestsize = crypto_ahash_digestsize(ahash);
531 #ifdef DEBUG
532 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
533 	struct caam_hash_state *state = ahash_request_ctx(req);
534 
535 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
536 #endif
537 
538 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
539 	if (err)
540 		caam_jr_strstatus(jrdev, err);
541 
542 	ahash_unmap(jrdev, edesc, req, digestsize);
543 	kfree(edesc);
544 
545 #ifdef DEBUG
546 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
547 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
548 		       ctx->ctx_len, 1);
549 	if (req->result)
550 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
551 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
552 			       digestsize, 1);
553 #endif
554 
555 	req->base.complete(&req->base, err);
556 }
557 
558 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
559 			    void *context)
560 {
561 	struct ahash_request *req = context;
562 	struct ahash_edesc *edesc;
563 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
564 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
565 #ifdef DEBUG
566 	struct caam_hash_state *state = ahash_request_ctx(req);
567 	int digestsize = crypto_ahash_digestsize(ahash);
568 
569 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
570 #endif
571 
572 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
573 	if (err)
574 		caam_jr_strstatus(jrdev, err);
575 
576 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
577 	kfree(edesc);
578 
579 #ifdef DEBUG
580 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
581 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
582 		       ctx->ctx_len, 1);
583 	if (req->result)
584 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
585 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
586 			       digestsize, 1);
587 #endif
588 
589 	req->base.complete(&req->base, err);
590 }
591 
592 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
593 			       void *context)
594 {
595 	struct ahash_request *req = context;
596 	struct ahash_edesc *edesc;
597 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
598 	int digestsize = crypto_ahash_digestsize(ahash);
599 #ifdef DEBUG
600 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
601 	struct caam_hash_state *state = ahash_request_ctx(req);
602 
603 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
604 #endif
605 
606 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
607 	if (err)
608 		caam_jr_strstatus(jrdev, err);
609 
610 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
611 	kfree(edesc);
612 
613 #ifdef DEBUG
614 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
615 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
616 		       ctx->ctx_len, 1);
617 	if (req->result)
618 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
619 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
620 			       digestsize, 1);
621 #endif
622 
623 	req->base.complete(&req->base, err);
624 }
625 
626 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
627 			       void *context)
628 {
629 	struct ahash_request *req = context;
630 	struct ahash_edesc *edesc;
631 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
632 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
633 #ifdef DEBUG
634 	struct caam_hash_state *state = ahash_request_ctx(req);
635 	int digestsize = crypto_ahash_digestsize(ahash);
636 
637 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
638 #endif
639 
640 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
641 	if (err)
642 		caam_jr_strstatus(jrdev, err);
643 
644 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
645 	kfree(edesc);
646 
647 #ifdef DEBUG
648 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
649 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
650 		       ctx->ctx_len, 1);
651 	if (req->result)
652 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
653 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
654 			       digestsize, 1);
655 #endif
656 
657 	req->base.complete(&req->base, err);
658 }
659 
660 /*
661  * Allocate an enhanced descriptor, which contains the hardware descriptor
662  * and space for hardware scatter table containing sg_num entries.
663  */
664 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
665 					     int sg_num, u32 *sh_desc,
666 					     dma_addr_t sh_desc_dma,
667 					     gfp_t flags)
668 {
669 	struct ahash_edesc *edesc;
670 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
671 
672 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
673 	if (!edesc) {
674 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
675 		return NULL;
676 	}
677 
678 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
679 			     HDR_SHARE_DEFER | HDR_REVERSE);
680 
681 	return edesc;
682 }
683 
684 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
685 			       struct ahash_edesc *edesc,
686 			       struct ahash_request *req, int nents,
687 			       unsigned int first_sg,
688 			       unsigned int first_bytes, size_t to_hash)
689 {
690 	dma_addr_t src_dma;
691 	u32 options;
692 
693 	if (nents > 1 || first_sg) {
694 		struct sec4_sg_entry *sg = edesc->sec4_sg;
695 		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
696 
697 		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
698 
699 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
700 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
701 			dev_err(ctx->jrdev, "unable to map S/G table\n");
702 			return -ENOMEM;
703 		}
704 
705 		edesc->sec4_sg_bytes = sgsize;
706 		edesc->sec4_sg_dma = src_dma;
707 		options = LDST_SGF;
708 	} else {
709 		src_dma = sg_dma_address(req->src);
710 		options = 0;
711 	}
712 
713 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
714 			  options);
715 
716 	return 0;
717 }
718 
719 /* submit update job descriptor */
720 static int ahash_update_ctx(struct ahash_request *req)
721 {
722 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
723 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
724 	struct caam_hash_state *state = ahash_request_ctx(req);
725 	struct device *jrdev = ctx->jrdev;
726 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
727 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
728 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
729 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
730 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
731 	int *next_buflen = state->current_buf ? &state->buflen_0 :
732 			   &state->buflen_1, last_buflen;
733 	int in_len = *buflen + req->nbytes, to_hash;
734 	u32 *desc;
735 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
736 	struct ahash_edesc *edesc;
737 	int ret = 0;
738 
739 	last_buflen = *next_buflen;
740 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
741 	to_hash = in_len - *next_buflen;
742 
743 	if (to_hash) {
744 		src_nents = sg_nents_for_len(req->src,
745 					     req->nbytes - (*next_buflen));
746 		if (src_nents < 0) {
747 			dev_err(jrdev, "Invalid number of src SG.\n");
748 			return src_nents;
749 		}
750 
751 		if (src_nents) {
752 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
753 						  DMA_TO_DEVICE);
754 			if (!mapped_nents) {
755 				dev_err(jrdev, "unable to DMA map source\n");
756 				return -ENOMEM;
757 			}
758 		} else {
759 			mapped_nents = 0;
760 		}
761 
762 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
763 		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
764 				 sizeof(struct sec4_sg_entry);
765 
766 		/*
767 		 * allocate space for base edesc and hw desc commands,
768 		 * link tables
769 		 */
770 		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
771 					  ctx->sh_desc_update,
772 					  ctx->sh_desc_update_dma, flags);
773 		if (!edesc) {
774 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
775 			return -ENOMEM;
776 		}
777 
778 		edesc->src_nents = src_nents;
779 		edesc->sec4_sg_bytes = sec4_sg_bytes;
780 
781 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
782 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
783 		if (ret)
784 			goto unmap_ctx;
785 
786 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
787 							edesc->sec4_sg + 1,
788 							buf, state->buf_dma,
789 							*buflen, last_buflen);
790 
791 		if (mapped_nents) {
792 			sg_to_sec4_sg_last(req->src, mapped_nents,
793 					   edesc->sec4_sg + sec4_sg_src_index,
794 					   0);
795 			if (*next_buflen)
796 				scatterwalk_map_and_copy(next_buf, req->src,
797 							 to_hash - *buflen,
798 							 *next_buflen, 0);
799 		} else {
800 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
801 				cpu_to_caam32(SEC4_SG_LEN_FIN);
802 		}
803 
804 		state->current_buf = !state->current_buf;
805 
806 		desc = edesc->hw_desc;
807 
808 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
809 						     sec4_sg_bytes,
810 						     DMA_TO_DEVICE);
811 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
812 			dev_err(jrdev, "unable to map S/G table\n");
813 			ret = -ENOMEM;
814 			goto unmap_ctx;
815 		}
816 
817 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
818 				       to_hash, LDST_SGF);
819 
820 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
821 
822 #ifdef DEBUG
823 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
824 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
825 			       desc_bytes(desc), 1);
826 #endif
827 
828 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
829 		if (ret)
830 			goto unmap_ctx;
831 
832 		ret = -EINPROGRESS;
833 	} else if (*next_buflen) {
834 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
835 					 req->nbytes, 0);
836 		*buflen = *next_buflen;
837 		*next_buflen = last_buflen;
838 	}
839 #ifdef DEBUG
840 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
841 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
842 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
843 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
844 		       *next_buflen, 1);
845 #endif
846 
847 	return ret;
848  unmap_ctx:
849 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
850 	kfree(edesc);
851 	return ret;
852 }
853 
854 static int ahash_final_ctx(struct ahash_request *req)
855 {
856 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
857 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
858 	struct caam_hash_state *state = ahash_request_ctx(req);
859 	struct device *jrdev = ctx->jrdev;
860 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
861 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
862 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
863 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
864 	int last_buflen = state->current_buf ? state->buflen_0 :
865 			  state->buflen_1;
866 	u32 *desc;
867 	int sec4_sg_bytes, sec4_sg_src_index;
868 	int digestsize = crypto_ahash_digestsize(ahash);
869 	struct ahash_edesc *edesc;
870 	int ret;
871 
872 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
873 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
874 
875 	/* allocate space for base edesc and hw desc commands, link tables */
876 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
877 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
878 				  flags);
879 	if (!edesc)
880 		return -ENOMEM;
881 
882 	desc = edesc->hw_desc;
883 
884 	edesc->sec4_sg_bytes = sec4_sg_bytes;
885 	edesc->src_nents = 0;
886 
887 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
888 				 edesc->sec4_sg, DMA_TO_DEVICE);
889 	if (ret)
890 		goto unmap_ctx;
891 
892 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
893 						buf, state->buf_dma, buflen,
894 						last_buflen);
895 	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
896 		cpu_to_caam32(SEC4_SG_LEN_FIN);
897 
898 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
899 					    sec4_sg_bytes, DMA_TO_DEVICE);
900 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
901 		dev_err(jrdev, "unable to map S/G table\n");
902 		ret = -ENOMEM;
903 		goto unmap_ctx;
904 	}
905 
906 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
907 			  LDST_SGF);
908 
909 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
910 						digestsize);
911 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
912 		dev_err(jrdev, "unable to map dst\n");
913 		ret = -ENOMEM;
914 		goto unmap_ctx;
915 	}
916 
917 #ifdef DEBUG
918 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
919 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
920 #endif
921 
922 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
923 	if (ret)
924 		goto unmap_ctx;
925 
926 	return -EINPROGRESS;
927  unmap_ctx:
928 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
929 	kfree(edesc);
930 	return ret;
931 }
932 
933 static int ahash_finup_ctx(struct ahash_request *req)
934 {
935 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
936 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
937 	struct caam_hash_state *state = ahash_request_ctx(req);
938 	struct device *jrdev = ctx->jrdev;
939 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
940 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
941 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
942 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
943 	int last_buflen = state->current_buf ? state->buflen_0 :
944 			  state->buflen_1;
945 	u32 *desc;
946 	int sec4_sg_src_index;
947 	int src_nents, mapped_nents;
948 	int digestsize = crypto_ahash_digestsize(ahash);
949 	struct ahash_edesc *edesc;
950 	int ret;
951 
952 	src_nents = sg_nents_for_len(req->src, req->nbytes);
953 	if (src_nents < 0) {
954 		dev_err(jrdev, "Invalid number of src SG.\n");
955 		return src_nents;
956 	}
957 
958 	if (src_nents) {
959 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
960 					  DMA_TO_DEVICE);
961 		if (!mapped_nents) {
962 			dev_err(jrdev, "unable to DMA map source\n");
963 			return -ENOMEM;
964 		}
965 	} else {
966 		mapped_nents = 0;
967 	}
968 
969 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
970 
971 	/* allocate space for base edesc and hw desc commands, link tables */
972 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
973 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
974 				  flags);
975 	if (!edesc) {
976 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
977 		return -ENOMEM;
978 	}
979 
980 	desc = edesc->hw_desc;
981 
982 	edesc->src_nents = src_nents;
983 
984 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
985 				 edesc->sec4_sg, DMA_TO_DEVICE);
986 	if (ret)
987 		goto unmap_ctx;
988 
989 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
990 						buf, state->buf_dma, buflen,
991 						last_buflen);
992 
993 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
994 				  sec4_sg_src_index, ctx->ctx_len + buflen,
995 				  req->nbytes);
996 	if (ret)
997 		goto unmap_ctx;
998 
999 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1000 						digestsize);
1001 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1002 		dev_err(jrdev, "unable to map dst\n");
1003 		ret = -ENOMEM;
1004 		goto unmap_ctx;
1005 	}
1006 
1007 #ifdef DEBUG
1008 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1009 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1010 #endif
1011 
1012 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1013 	if (ret)
1014 		goto unmap_ctx;
1015 
1016 	return -EINPROGRESS;
1017  unmap_ctx:
1018 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1019 	kfree(edesc);
1020 	return ret;
1021 }
1022 
1023 static int ahash_digest(struct ahash_request *req)
1024 {
1025 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1026 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1027 	struct device *jrdev = ctx->jrdev;
1028 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1029 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1030 	u32 *desc;
1031 	int digestsize = crypto_ahash_digestsize(ahash);
1032 	int src_nents, mapped_nents;
1033 	struct ahash_edesc *edesc;
1034 	int ret;
1035 
1036 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1037 	if (src_nents < 0) {
1038 		dev_err(jrdev, "Invalid number of src SG.\n");
1039 		return src_nents;
1040 	}
1041 
1042 	if (src_nents) {
1043 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1044 					  DMA_TO_DEVICE);
1045 		if (!mapped_nents) {
1046 			dev_err(jrdev, "unable to map source for DMA\n");
1047 			return -ENOMEM;
1048 		}
1049 	} else {
1050 		mapped_nents = 0;
1051 	}
1052 
1053 	/* allocate space for base edesc and hw desc commands, link tables */
1054 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1055 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1056 				  flags);
1057 	if (!edesc) {
1058 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1059 		return -ENOMEM;
1060 	}
1061 
1062 	edesc->src_nents = src_nents;
1063 
1064 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1065 				  req->nbytes);
1066 	if (ret) {
1067 		ahash_unmap(jrdev, edesc, req, digestsize);
1068 		kfree(edesc);
1069 		return ret;
1070 	}
1071 
1072 	desc = edesc->hw_desc;
1073 
1074 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1075 						digestsize);
1076 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1077 		dev_err(jrdev, "unable to map dst\n");
1078 		ahash_unmap(jrdev, edesc, req, digestsize);
1079 		kfree(edesc);
1080 		return -ENOMEM;
1081 	}
1082 
1083 #ifdef DEBUG
1084 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1085 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1086 #endif
1087 
1088 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1089 	if (!ret) {
1090 		ret = -EINPROGRESS;
1091 	} else {
1092 		ahash_unmap(jrdev, edesc, req, digestsize);
1093 		kfree(edesc);
1094 	}
1095 
1096 	return ret;
1097 }
1098 
1099 /* submit ahash final if it the first job descriptor */
1100 static int ahash_final_no_ctx(struct ahash_request *req)
1101 {
1102 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1103 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1104 	struct caam_hash_state *state = ahash_request_ctx(req);
1105 	struct device *jrdev = ctx->jrdev;
1106 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1107 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1108 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1109 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1110 	u32 *desc;
1111 	int digestsize = crypto_ahash_digestsize(ahash);
1112 	struct ahash_edesc *edesc;
1113 	int ret;
1114 
1115 	/* allocate space for base edesc and hw desc commands, link tables */
1116 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1117 				  ctx->sh_desc_digest_dma, flags);
1118 	if (!edesc)
1119 		return -ENOMEM;
1120 
1121 	desc = edesc->hw_desc;
1122 
1123 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1124 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1125 		dev_err(jrdev, "unable to map src\n");
1126 		goto unmap;
1127 	}
1128 
1129 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1130 
1131 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1132 						digestsize);
1133 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1134 		dev_err(jrdev, "unable to map dst\n");
1135 		goto unmap;
1136 	}
1137 	edesc->src_nents = 0;
1138 
1139 #ifdef DEBUG
1140 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1141 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1142 #endif
1143 
1144 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1145 	if (!ret) {
1146 		ret = -EINPROGRESS;
1147 	} else {
1148 		ahash_unmap(jrdev, edesc, req, digestsize);
1149 		kfree(edesc);
1150 	}
1151 
1152 	return ret;
1153  unmap:
1154 	ahash_unmap(jrdev, edesc, req, digestsize);
1155 	kfree(edesc);
1156 	return -ENOMEM;
1157 
1158 }
1159 
1160 /* submit ahash update if it the first job descriptor after update */
1161 static int ahash_update_no_ctx(struct ahash_request *req)
1162 {
1163 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1164 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1165 	struct caam_hash_state *state = ahash_request_ctx(req);
1166 	struct device *jrdev = ctx->jrdev;
1167 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1168 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1169 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1170 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1171 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1172 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1173 			   &state->buflen_1;
1174 	int in_len = *buflen + req->nbytes, to_hash;
1175 	int sec4_sg_bytes, src_nents, mapped_nents;
1176 	struct ahash_edesc *edesc;
1177 	u32 *desc;
1178 	int ret = 0;
1179 
1180 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1181 	to_hash = in_len - *next_buflen;
1182 
1183 	if (to_hash) {
1184 		src_nents = sg_nents_for_len(req->src,
1185 					     req->nbytes - *next_buflen);
1186 		if (src_nents < 0) {
1187 			dev_err(jrdev, "Invalid number of src SG.\n");
1188 			return src_nents;
1189 		}
1190 
1191 		if (src_nents) {
1192 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1193 						  DMA_TO_DEVICE);
1194 			if (!mapped_nents) {
1195 				dev_err(jrdev, "unable to DMA map source\n");
1196 				return -ENOMEM;
1197 			}
1198 		} else {
1199 			mapped_nents = 0;
1200 		}
1201 
1202 		sec4_sg_bytes = (1 + mapped_nents) *
1203 				sizeof(struct sec4_sg_entry);
1204 
1205 		/*
1206 		 * allocate space for base edesc and hw desc commands,
1207 		 * link tables
1208 		 */
1209 		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1210 					  ctx->sh_desc_update_first,
1211 					  ctx->sh_desc_update_first_dma,
1212 					  flags);
1213 		if (!edesc) {
1214 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1215 			return -ENOMEM;
1216 		}
1217 
1218 		edesc->src_nents = src_nents;
1219 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1220 		edesc->dst_dma = 0;
1221 
1222 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1223 						    buf, *buflen);
1224 		sg_to_sec4_sg_last(req->src, mapped_nents,
1225 				   edesc->sec4_sg + 1, 0);
1226 
1227 		if (*next_buflen) {
1228 			scatterwalk_map_and_copy(next_buf, req->src,
1229 						 to_hash - *buflen,
1230 						 *next_buflen, 0);
1231 		}
1232 
1233 		state->current_buf = !state->current_buf;
1234 
1235 		desc = edesc->hw_desc;
1236 
1237 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1238 						    sec4_sg_bytes,
1239 						    DMA_TO_DEVICE);
1240 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1241 			dev_err(jrdev, "unable to map S/G table\n");
1242 			ret = -ENOMEM;
1243 			goto unmap_ctx;
1244 		}
1245 
1246 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1247 
1248 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1249 		if (ret)
1250 			goto unmap_ctx;
1251 
1252 #ifdef DEBUG
1253 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1254 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1255 			       desc_bytes(desc), 1);
1256 #endif
1257 
1258 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1259 		if (ret)
1260 			goto unmap_ctx;
1261 
1262 		ret = -EINPROGRESS;
1263 		state->update = ahash_update_ctx;
1264 		state->finup = ahash_finup_ctx;
1265 		state->final = ahash_final_ctx;
1266 	} else if (*next_buflen) {
1267 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1268 					 req->nbytes, 0);
1269 		*buflen = *next_buflen;
1270 		*next_buflen = 0;
1271 	}
1272 #ifdef DEBUG
1273 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1274 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1275 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1276 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1277 		       *next_buflen, 1);
1278 #endif
1279 
1280 	return ret;
1281  unmap_ctx:
1282 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1283 	kfree(edesc);
1284 	return ret;
1285 }
1286 
1287 /* submit ahash finup if it the first job descriptor after update */
1288 static int ahash_finup_no_ctx(struct ahash_request *req)
1289 {
1290 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1291 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1292 	struct caam_hash_state *state = ahash_request_ctx(req);
1293 	struct device *jrdev = ctx->jrdev;
1294 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1295 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1296 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1297 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1298 	int last_buflen = state->current_buf ? state->buflen_0 :
1299 			  state->buflen_1;
1300 	u32 *desc;
1301 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1302 	int digestsize = crypto_ahash_digestsize(ahash);
1303 	struct ahash_edesc *edesc;
1304 	int ret;
1305 
1306 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1307 	if (src_nents < 0) {
1308 		dev_err(jrdev, "Invalid number of src SG.\n");
1309 		return src_nents;
1310 	}
1311 
1312 	if (src_nents) {
1313 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1314 					  DMA_TO_DEVICE);
1315 		if (!mapped_nents) {
1316 			dev_err(jrdev, "unable to DMA map source\n");
1317 			return -ENOMEM;
1318 		}
1319 	} else {
1320 		mapped_nents = 0;
1321 	}
1322 
1323 	sec4_sg_src_index = 2;
1324 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1325 			 sizeof(struct sec4_sg_entry);
1326 
1327 	/* allocate space for base edesc and hw desc commands, link tables */
1328 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1329 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1330 				  flags);
1331 	if (!edesc) {
1332 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1333 		return -ENOMEM;
1334 	}
1335 
1336 	desc = edesc->hw_desc;
1337 
1338 	edesc->src_nents = src_nents;
1339 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1340 
1341 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1342 						state->buf_dma, buflen,
1343 						last_buflen);
1344 
1345 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1346 				  req->nbytes);
1347 	if (ret) {
1348 		dev_err(jrdev, "unable to map S/G table\n");
1349 		goto unmap;
1350 	}
1351 
1352 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1353 						digestsize);
1354 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1355 		dev_err(jrdev, "unable to map dst\n");
1356 		goto unmap;
1357 	}
1358 
1359 #ifdef DEBUG
1360 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1361 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1362 #endif
1363 
1364 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1365 	if (!ret) {
1366 		ret = -EINPROGRESS;
1367 	} else {
1368 		ahash_unmap(jrdev, edesc, req, digestsize);
1369 		kfree(edesc);
1370 	}
1371 
1372 	return ret;
1373  unmap:
1374 	ahash_unmap(jrdev, edesc, req, digestsize);
1375 	kfree(edesc);
1376 	return -ENOMEM;
1377 
1378 }
1379 
1380 /* submit first update job descriptor after init */
1381 static int ahash_update_first(struct ahash_request *req)
1382 {
1383 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1384 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1385 	struct caam_hash_state *state = ahash_request_ctx(req);
1386 	struct device *jrdev = ctx->jrdev;
1387 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1388 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1389 	u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1390 	int *next_buflen = state->current_buf ?
1391 		&state->buflen_1 : &state->buflen_0;
1392 	int to_hash;
1393 	u32 *desc;
1394 	int src_nents, mapped_nents;
1395 	struct ahash_edesc *edesc;
1396 	int ret = 0;
1397 
1398 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1399 				      1);
1400 	to_hash = req->nbytes - *next_buflen;
1401 
1402 	if (to_hash) {
1403 		src_nents = sg_nents_for_len(req->src,
1404 					     req->nbytes - *next_buflen);
1405 		if (src_nents < 0) {
1406 			dev_err(jrdev, "Invalid number of src SG.\n");
1407 			return src_nents;
1408 		}
1409 
1410 		if (src_nents) {
1411 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1412 						  DMA_TO_DEVICE);
1413 			if (!mapped_nents) {
1414 				dev_err(jrdev, "unable to map source for DMA\n");
1415 				return -ENOMEM;
1416 			}
1417 		} else {
1418 			mapped_nents = 0;
1419 		}
1420 
1421 		/*
1422 		 * allocate space for base edesc and hw desc commands,
1423 		 * link tables
1424 		 */
1425 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1426 					  mapped_nents : 0,
1427 					  ctx->sh_desc_update_first,
1428 					  ctx->sh_desc_update_first_dma,
1429 					  flags);
1430 		if (!edesc) {
1431 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1432 			return -ENOMEM;
1433 		}
1434 
1435 		edesc->src_nents = src_nents;
1436 		edesc->dst_dma = 0;
1437 
1438 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1439 					  to_hash);
1440 		if (ret)
1441 			goto unmap_ctx;
1442 
1443 		if (*next_buflen)
1444 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1445 						 *next_buflen, 0);
1446 
1447 		desc = edesc->hw_desc;
1448 
1449 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1450 		if (ret)
1451 			goto unmap_ctx;
1452 
1453 #ifdef DEBUG
1454 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1455 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1456 			       desc_bytes(desc), 1);
1457 #endif
1458 
1459 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1460 		if (ret)
1461 			goto unmap_ctx;
1462 
1463 		ret = -EINPROGRESS;
1464 		state->update = ahash_update_ctx;
1465 		state->finup = ahash_finup_ctx;
1466 		state->final = ahash_final_ctx;
1467 	} else if (*next_buflen) {
1468 		state->update = ahash_update_no_ctx;
1469 		state->finup = ahash_finup_no_ctx;
1470 		state->final = ahash_final_no_ctx;
1471 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1472 					 req->nbytes, 0);
1473 	}
1474 #ifdef DEBUG
1475 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1476 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1477 		       *next_buflen, 1);
1478 #endif
1479 
1480 	return ret;
1481  unmap_ctx:
1482 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1483 	kfree(edesc);
1484 	return ret;
1485 }
1486 
1487 static int ahash_finup_first(struct ahash_request *req)
1488 {
1489 	return ahash_digest(req);
1490 }
1491 
1492 static int ahash_init(struct ahash_request *req)
1493 {
1494 	struct caam_hash_state *state = ahash_request_ctx(req);
1495 
1496 	state->update = ahash_update_first;
1497 	state->finup = ahash_finup_first;
1498 	state->final = ahash_final_no_ctx;
1499 
1500 	state->current_buf = 0;
1501 	state->buf_dma = 0;
1502 	state->buflen_0 = 0;
1503 	state->buflen_1 = 0;
1504 
1505 	return 0;
1506 }
1507 
1508 static int ahash_update(struct ahash_request *req)
1509 {
1510 	struct caam_hash_state *state = ahash_request_ctx(req);
1511 
1512 	return state->update(req);
1513 }
1514 
1515 static int ahash_finup(struct ahash_request *req)
1516 {
1517 	struct caam_hash_state *state = ahash_request_ctx(req);
1518 
1519 	return state->finup(req);
1520 }
1521 
1522 static int ahash_final(struct ahash_request *req)
1523 {
1524 	struct caam_hash_state *state = ahash_request_ctx(req);
1525 
1526 	return state->final(req);
1527 }
1528 
1529 static int ahash_export(struct ahash_request *req, void *out)
1530 {
1531 	struct caam_hash_state *state = ahash_request_ctx(req);
1532 	struct caam_export_state *export = out;
1533 	int len;
1534 	u8 *buf;
1535 
1536 	if (state->current_buf) {
1537 		buf = state->buf_1;
1538 		len = state->buflen_1;
1539 	} else {
1540 		buf = state->buf_0;
1541 		len = state->buflen_0;
1542 	}
1543 
1544 	memcpy(export->buf, buf, len);
1545 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1546 	export->buflen = len;
1547 	export->update = state->update;
1548 	export->final = state->final;
1549 	export->finup = state->finup;
1550 
1551 	return 0;
1552 }
1553 
1554 static int ahash_import(struct ahash_request *req, const void *in)
1555 {
1556 	struct caam_hash_state *state = ahash_request_ctx(req);
1557 	const struct caam_export_state *export = in;
1558 
1559 	memset(state, 0, sizeof(*state));
1560 	memcpy(state->buf_0, export->buf, export->buflen);
1561 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1562 	state->buflen_0 = export->buflen;
1563 	state->update = export->update;
1564 	state->final = export->final;
1565 	state->finup = export->finup;
1566 
1567 	return 0;
1568 }
1569 
1570 struct caam_hash_template {
1571 	char name[CRYPTO_MAX_ALG_NAME];
1572 	char driver_name[CRYPTO_MAX_ALG_NAME];
1573 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1574 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1575 	unsigned int blocksize;
1576 	struct ahash_alg template_ahash;
1577 	u32 alg_type;
1578 };
1579 
1580 /* ahash descriptors */
1581 static struct caam_hash_template driver_hash[] = {
1582 	{
1583 		.name = "sha1",
1584 		.driver_name = "sha1-caam",
1585 		.hmac_name = "hmac(sha1)",
1586 		.hmac_driver_name = "hmac-sha1-caam",
1587 		.blocksize = SHA1_BLOCK_SIZE,
1588 		.template_ahash = {
1589 			.init = ahash_init,
1590 			.update = ahash_update,
1591 			.final = ahash_final,
1592 			.finup = ahash_finup,
1593 			.digest = ahash_digest,
1594 			.export = ahash_export,
1595 			.import = ahash_import,
1596 			.setkey = ahash_setkey,
1597 			.halg = {
1598 				.digestsize = SHA1_DIGEST_SIZE,
1599 				.statesize = sizeof(struct caam_export_state),
1600 			},
1601 		},
1602 		.alg_type = OP_ALG_ALGSEL_SHA1,
1603 	}, {
1604 		.name = "sha224",
1605 		.driver_name = "sha224-caam",
1606 		.hmac_name = "hmac(sha224)",
1607 		.hmac_driver_name = "hmac-sha224-caam",
1608 		.blocksize = SHA224_BLOCK_SIZE,
1609 		.template_ahash = {
1610 			.init = ahash_init,
1611 			.update = ahash_update,
1612 			.final = ahash_final,
1613 			.finup = ahash_finup,
1614 			.digest = ahash_digest,
1615 			.export = ahash_export,
1616 			.import = ahash_import,
1617 			.setkey = ahash_setkey,
1618 			.halg = {
1619 				.digestsize = SHA224_DIGEST_SIZE,
1620 				.statesize = sizeof(struct caam_export_state),
1621 			},
1622 		},
1623 		.alg_type = OP_ALG_ALGSEL_SHA224,
1624 	}, {
1625 		.name = "sha256",
1626 		.driver_name = "sha256-caam",
1627 		.hmac_name = "hmac(sha256)",
1628 		.hmac_driver_name = "hmac-sha256-caam",
1629 		.blocksize = SHA256_BLOCK_SIZE,
1630 		.template_ahash = {
1631 			.init = ahash_init,
1632 			.update = ahash_update,
1633 			.final = ahash_final,
1634 			.finup = ahash_finup,
1635 			.digest = ahash_digest,
1636 			.export = ahash_export,
1637 			.import = ahash_import,
1638 			.setkey = ahash_setkey,
1639 			.halg = {
1640 				.digestsize = SHA256_DIGEST_SIZE,
1641 				.statesize = sizeof(struct caam_export_state),
1642 			},
1643 		},
1644 		.alg_type = OP_ALG_ALGSEL_SHA256,
1645 	}, {
1646 		.name = "sha384",
1647 		.driver_name = "sha384-caam",
1648 		.hmac_name = "hmac(sha384)",
1649 		.hmac_driver_name = "hmac-sha384-caam",
1650 		.blocksize = SHA384_BLOCK_SIZE,
1651 		.template_ahash = {
1652 			.init = ahash_init,
1653 			.update = ahash_update,
1654 			.final = ahash_final,
1655 			.finup = ahash_finup,
1656 			.digest = ahash_digest,
1657 			.export = ahash_export,
1658 			.import = ahash_import,
1659 			.setkey = ahash_setkey,
1660 			.halg = {
1661 				.digestsize = SHA384_DIGEST_SIZE,
1662 				.statesize = sizeof(struct caam_export_state),
1663 			},
1664 		},
1665 		.alg_type = OP_ALG_ALGSEL_SHA384,
1666 	}, {
1667 		.name = "sha512",
1668 		.driver_name = "sha512-caam",
1669 		.hmac_name = "hmac(sha512)",
1670 		.hmac_driver_name = "hmac-sha512-caam",
1671 		.blocksize = SHA512_BLOCK_SIZE,
1672 		.template_ahash = {
1673 			.init = ahash_init,
1674 			.update = ahash_update,
1675 			.final = ahash_final,
1676 			.finup = ahash_finup,
1677 			.digest = ahash_digest,
1678 			.export = ahash_export,
1679 			.import = ahash_import,
1680 			.setkey = ahash_setkey,
1681 			.halg = {
1682 				.digestsize = SHA512_DIGEST_SIZE,
1683 				.statesize = sizeof(struct caam_export_state),
1684 			},
1685 		},
1686 		.alg_type = OP_ALG_ALGSEL_SHA512,
1687 	}, {
1688 		.name = "md5",
1689 		.driver_name = "md5-caam",
1690 		.hmac_name = "hmac(md5)",
1691 		.hmac_driver_name = "hmac-md5-caam",
1692 		.blocksize = MD5_BLOCK_WORDS * 4,
1693 		.template_ahash = {
1694 			.init = ahash_init,
1695 			.update = ahash_update,
1696 			.final = ahash_final,
1697 			.finup = ahash_finup,
1698 			.digest = ahash_digest,
1699 			.export = ahash_export,
1700 			.import = ahash_import,
1701 			.setkey = ahash_setkey,
1702 			.halg = {
1703 				.digestsize = MD5_DIGEST_SIZE,
1704 				.statesize = sizeof(struct caam_export_state),
1705 			},
1706 		},
1707 		.alg_type = OP_ALG_ALGSEL_MD5,
1708 	},
1709 };
1710 
1711 struct caam_hash_alg {
1712 	struct list_head entry;
1713 	int alg_type;
1714 	struct ahash_alg ahash_alg;
1715 };
1716 
1717 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1718 {
1719 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1720 	struct crypto_alg *base = tfm->__crt_alg;
1721 	struct hash_alg_common *halg =
1722 		 container_of(base, struct hash_alg_common, base);
1723 	struct ahash_alg *alg =
1724 		 container_of(halg, struct ahash_alg, halg);
1725 	struct caam_hash_alg *caam_hash =
1726 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1727 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1728 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1729 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1730 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1731 					 HASH_MSG_LEN + 32,
1732 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1733 					 HASH_MSG_LEN + 64,
1734 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1735 
1736 	/*
1737 	 * Get a Job ring from Job Ring driver to ensure in-order
1738 	 * crypto request processing per tfm
1739 	 */
1740 	ctx->jrdev = caam_jr_alloc();
1741 	if (IS_ERR(ctx->jrdev)) {
1742 		pr_err("Job Ring Device allocation for transform failed\n");
1743 		return PTR_ERR(ctx->jrdev);
1744 	}
1745 	/* copy descriptor header template value */
1746 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1747 
1748 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
1749 				   OP_ALG_ALGSEL_SUBMASK) >>
1750 				  OP_ALG_ALGSEL_SHIFT];
1751 
1752 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1753 				 sizeof(struct caam_hash_state));
1754 	return ahash_set_sh_desc(ahash);
1755 }
1756 
1757 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1758 {
1759 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1760 
1761 	if (ctx->sh_desc_update_dma &&
1762 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1763 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1764 				 desc_bytes(ctx->sh_desc_update),
1765 				 DMA_TO_DEVICE);
1766 	if (ctx->sh_desc_update_first_dma &&
1767 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1768 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1769 				 desc_bytes(ctx->sh_desc_update_first),
1770 				 DMA_TO_DEVICE);
1771 	if (ctx->sh_desc_fin_dma &&
1772 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1773 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1774 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1775 	if (ctx->sh_desc_digest_dma &&
1776 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1777 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1778 				 desc_bytes(ctx->sh_desc_digest),
1779 				 DMA_TO_DEVICE);
1780 
1781 	caam_jr_free(ctx->jrdev);
1782 }
1783 
1784 static void __exit caam_algapi_hash_exit(void)
1785 {
1786 	struct caam_hash_alg *t_alg, *n;
1787 
1788 	if (!hash_list.next)
1789 		return;
1790 
1791 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1792 		crypto_unregister_ahash(&t_alg->ahash_alg);
1793 		list_del(&t_alg->entry);
1794 		kfree(t_alg);
1795 	}
1796 }
1797 
1798 static struct caam_hash_alg *
1799 caam_hash_alloc(struct caam_hash_template *template,
1800 		bool keyed)
1801 {
1802 	struct caam_hash_alg *t_alg;
1803 	struct ahash_alg *halg;
1804 	struct crypto_alg *alg;
1805 
1806 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1807 	if (!t_alg) {
1808 		pr_err("failed to allocate t_alg\n");
1809 		return ERR_PTR(-ENOMEM);
1810 	}
1811 
1812 	t_alg->ahash_alg = template->template_ahash;
1813 	halg = &t_alg->ahash_alg;
1814 	alg = &halg->halg.base;
1815 
1816 	if (keyed) {
1817 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1818 			 template->hmac_name);
1819 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1820 			 template->hmac_driver_name);
1821 	} else {
1822 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1823 			 template->name);
1824 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1825 			 template->driver_name);
1826 		t_alg->ahash_alg.setkey = NULL;
1827 	}
1828 	alg->cra_module = THIS_MODULE;
1829 	alg->cra_init = caam_hash_cra_init;
1830 	alg->cra_exit = caam_hash_cra_exit;
1831 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1832 	alg->cra_priority = CAAM_CRA_PRIORITY;
1833 	alg->cra_blocksize = template->blocksize;
1834 	alg->cra_alignmask = 0;
1835 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1836 	alg->cra_type = &crypto_ahash_type;
1837 
1838 	t_alg->alg_type = template->alg_type;
1839 
1840 	return t_alg;
1841 }
1842 
1843 static int __init caam_algapi_hash_init(void)
1844 {
1845 	struct device_node *dev_node;
1846 	struct platform_device *pdev;
1847 	struct device *ctrldev;
1848 	int i = 0, err = 0;
1849 	struct caam_drv_private *priv;
1850 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1851 	u32 cha_inst, cha_vid;
1852 
1853 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1854 	if (!dev_node) {
1855 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1856 		if (!dev_node)
1857 			return -ENODEV;
1858 	}
1859 
1860 	pdev = of_find_device_by_node(dev_node);
1861 	if (!pdev) {
1862 		of_node_put(dev_node);
1863 		return -ENODEV;
1864 	}
1865 
1866 	ctrldev = &pdev->dev;
1867 	priv = dev_get_drvdata(ctrldev);
1868 	of_node_put(dev_node);
1869 
1870 	/*
1871 	 * If priv is NULL, it's probably because the caam driver wasn't
1872 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1873 	 */
1874 	if (!priv)
1875 		return -ENODEV;
1876 
1877 	/*
1878 	 * Register crypto algorithms the device supports.  First, identify
1879 	 * presence and attributes of MD block.
1880 	 */
1881 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1882 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1883 
1884 	/*
1885 	 * Skip registration of any hashing algorithms if MD block
1886 	 * is not present.
1887 	 */
1888 	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1889 		return -ENODEV;
1890 
1891 	/* Limit digest size based on LP256 */
1892 	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1893 		md_limit = SHA256_DIGEST_SIZE;
1894 
1895 	INIT_LIST_HEAD(&hash_list);
1896 
1897 	/* register crypto algorithms the device supports */
1898 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1899 		struct caam_hash_alg *t_alg;
1900 		struct caam_hash_template *alg = driver_hash + i;
1901 
1902 		/* If MD size is not supported by device, skip registration */
1903 		if (alg->template_ahash.halg.digestsize > md_limit)
1904 			continue;
1905 
1906 		/* register hmac version */
1907 		t_alg = caam_hash_alloc(alg, true);
1908 		if (IS_ERR(t_alg)) {
1909 			err = PTR_ERR(t_alg);
1910 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1911 			continue;
1912 		}
1913 
1914 		err = crypto_register_ahash(&t_alg->ahash_alg);
1915 		if (err) {
1916 			pr_warn("%s alg registration failed: %d\n",
1917 				t_alg->ahash_alg.halg.base.cra_driver_name,
1918 				err);
1919 			kfree(t_alg);
1920 		} else
1921 			list_add_tail(&t_alg->entry, &hash_list);
1922 
1923 		/* register unkeyed version */
1924 		t_alg = caam_hash_alloc(alg, false);
1925 		if (IS_ERR(t_alg)) {
1926 			err = PTR_ERR(t_alg);
1927 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1928 			continue;
1929 		}
1930 
1931 		err = crypto_register_ahash(&t_alg->ahash_alg);
1932 		if (err) {
1933 			pr_warn("%s alg registration failed: %d\n",
1934 				t_alg->ahash_alg.halg.base.cra_driver_name,
1935 				err);
1936 			kfree(t_alg);
1937 		} else
1938 			list_add_tail(&t_alg->entry, &hash_list);
1939 	}
1940 
1941 	return err;
1942 }
1943 
1944 module_init(caam_algapi_hash_init);
1945 module_exit(caam_algapi_hash_exit);
1946 
1947 MODULE_LICENSE("GPL");
1948 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1949 MODULE_AUTHOR("Freescale Semiconductor - NMG");
1950