xref: /linux/drivers/crypto/caam/caamhash.c (revision 7d07de2c18abd95f72efb28f78a4825e0fc1aa6a)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57 
58 #include "compat.h"
59 
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 
69 #define CAAM_CRA_PRIORITY		3000
70 
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
73 
74 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
76 
77 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
78 					 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80 
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN			8
83 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84 
85 static struct list_head hash_list;
86 
87 /* ahash per-session context */
88 struct caam_hash_ctx {
89 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
90 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
91 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
94 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
95 	dma_addr_t sh_desc_update_first_dma;
96 	dma_addr_t sh_desc_fin_dma;
97 	dma_addr_t sh_desc_digest_dma;
98 	enum dma_data_direction dir;
99 	enum dma_data_direction key_dir;
100 	struct device *jrdev;
101 	int ctx_len;
102 	struct alginfo adata;
103 };
104 
105 /* ahash state */
106 struct caam_hash_state {
107 	dma_addr_t buf_dma;
108 	dma_addr_t ctx_dma;
109 	int ctx_dma_len;
110 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
111 	int buflen;
112 	int next_buflen;
113 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
114 	int (*update)(struct ahash_request *req);
115 	int (*final)(struct ahash_request *req);
116 	int (*finup)(struct ahash_request *req);
117 };
118 
119 struct caam_export_state {
120 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
121 	u8 caam_ctx[MAX_CTX_LEN];
122 	int buflen;
123 	int (*update)(struct ahash_request *req);
124 	int (*final)(struct ahash_request *req);
125 	int (*finup)(struct ahash_request *req);
126 };
127 
128 static inline bool is_cmac_aes(u32 algtype)
129 {
130 	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
131 	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
132 }
133 /* Common job descriptor seq in/out ptr routines */
134 
135 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
136 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
137 				      struct caam_hash_state *state,
138 				      int ctx_len)
139 {
140 	state->ctx_dma_len = ctx_len;
141 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
142 					ctx_len, DMA_FROM_DEVICE);
143 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
144 		dev_err(jrdev, "unable to map ctx\n");
145 		state->ctx_dma = 0;
146 		return -ENOMEM;
147 	}
148 
149 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
150 
151 	return 0;
152 }
153 
154 /* Map current buffer in state (if length > 0) and put it in link table */
155 static inline int buf_map_to_sec4_sg(struct device *jrdev,
156 				     struct sec4_sg_entry *sec4_sg,
157 				     struct caam_hash_state *state)
158 {
159 	int buflen = state->buflen;
160 
161 	if (!buflen)
162 		return 0;
163 
164 	state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
165 					DMA_TO_DEVICE);
166 	if (dma_mapping_error(jrdev, state->buf_dma)) {
167 		dev_err(jrdev, "unable to map buf\n");
168 		state->buf_dma = 0;
169 		return -ENOMEM;
170 	}
171 
172 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
173 
174 	return 0;
175 }
176 
177 /* Map state->caam_ctx, and add it to link table */
178 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
179 				     struct caam_hash_state *state, int ctx_len,
180 				     struct sec4_sg_entry *sec4_sg, u32 flag)
181 {
182 	state->ctx_dma_len = ctx_len;
183 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
184 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
185 		dev_err(jrdev, "unable to map ctx\n");
186 		state->ctx_dma = 0;
187 		return -ENOMEM;
188 	}
189 
190 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
191 
192 	return 0;
193 }
194 
195 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
196 {
197 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
198 	int digestsize = crypto_ahash_digestsize(ahash);
199 	struct device *jrdev = ctx->jrdev;
200 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
201 	u32 *desc;
202 
203 	ctx->adata.key_virt = ctx->key;
204 
205 	/* ahash_update shared descriptor */
206 	desc = ctx->sh_desc_update;
207 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
208 			  ctx->ctx_len, true, ctrlpriv->era);
209 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
210 				   desc_bytes(desc), ctx->dir);
211 
212 	print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
213 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
214 			     1);
215 
216 	/* ahash_update_first shared descriptor */
217 	desc = ctx->sh_desc_update_first;
218 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
219 			  ctx->ctx_len, false, ctrlpriv->era);
220 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
221 				   desc_bytes(desc), ctx->dir);
222 	print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
223 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
224 			     desc_bytes(desc), 1);
225 
226 	/* ahash_final shared descriptor */
227 	desc = ctx->sh_desc_fin;
228 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
229 			  ctx->ctx_len, true, ctrlpriv->era);
230 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
231 				   desc_bytes(desc), ctx->dir);
232 
233 	print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
234 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
235 			     desc_bytes(desc), 1);
236 
237 	/* ahash_digest shared descriptor */
238 	desc = ctx->sh_desc_digest;
239 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
240 			  ctx->ctx_len, false, ctrlpriv->era);
241 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
242 				   desc_bytes(desc), ctx->dir);
243 
244 	print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
245 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
246 			     desc_bytes(desc), 1);
247 
248 	return 0;
249 }
250 
251 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
252 {
253 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
254 	int digestsize = crypto_ahash_digestsize(ahash);
255 	struct device *jrdev = ctx->jrdev;
256 	u32 *desc;
257 
258 	/* shared descriptor for ahash_update */
259 	desc = ctx->sh_desc_update;
260 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
261 			    ctx->ctx_len, ctx->ctx_len);
262 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
263 				   desc_bytes(desc), ctx->dir);
264 	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
265 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
266 			     1);
267 
268 	/* shared descriptor for ahash_{final,finup} */
269 	desc = ctx->sh_desc_fin;
270 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
271 			    digestsize, ctx->ctx_len);
272 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
273 				   desc_bytes(desc), ctx->dir);
274 	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
275 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
276 			     1);
277 
278 	/* key is immediate data for INIT and INITFINAL states */
279 	ctx->adata.key_virt = ctx->key;
280 
281 	/* shared descriptor for first invocation of ahash_update */
282 	desc = ctx->sh_desc_update_first;
283 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
284 			    ctx->ctx_len);
285 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
286 				   desc_bytes(desc), ctx->dir);
287 	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
288 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
289 			     desc_bytes(desc), 1);
290 
291 	/* shared descriptor for ahash_digest */
292 	desc = ctx->sh_desc_digest;
293 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
294 			    digestsize, ctx->ctx_len);
295 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
296 				   desc_bytes(desc), ctx->dir);
297 	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
298 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
299 			     1);
300 	return 0;
301 }
302 
303 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
304 {
305 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
306 	int digestsize = crypto_ahash_digestsize(ahash);
307 	struct device *jrdev = ctx->jrdev;
308 	u32 *desc;
309 
310 	/* shared descriptor for ahash_update */
311 	desc = ctx->sh_desc_update;
312 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
313 			    ctx->ctx_len, ctx->ctx_len);
314 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
315 				   desc_bytes(desc), ctx->dir);
316 	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
317 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
318 			     desc_bytes(desc), 1);
319 
320 	/* shared descriptor for ahash_{final,finup} */
321 	desc = ctx->sh_desc_fin;
322 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
323 			    digestsize, ctx->ctx_len);
324 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
325 				   desc_bytes(desc), ctx->dir);
326 	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
327 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
328 			     desc_bytes(desc), 1);
329 
330 	/* shared descriptor for first invocation of ahash_update */
331 	desc = ctx->sh_desc_update_first;
332 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
333 			    ctx->ctx_len);
334 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
335 				   desc_bytes(desc), ctx->dir);
336 	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
337 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 			     desc_bytes(desc), 1);
339 
340 	/* shared descriptor for ahash_digest */
341 	desc = ctx->sh_desc_digest;
342 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
343 			    digestsize, ctx->ctx_len);
344 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
345 				   desc_bytes(desc), ctx->dir);
346 	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
347 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
348 			     desc_bytes(desc), 1);
349 
350 	return 0;
351 }
352 
353 /* Digest hash size if it is too large */
354 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
355 			   u32 digestsize)
356 {
357 	struct device *jrdev = ctx->jrdev;
358 	u32 *desc;
359 	struct split_key_result result;
360 	dma_addr_t key_dma;
361 	int ret;
362 
363 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
364 	if (!desc) {
365 		dev_err(jrdev, "unable to allocate key input memory\n");
366 		return -ENOMEM;
367 	}
368 
369 	init_job_desc(desc, 0);
370 
371 	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
372 	if (dma_mapping_error(jrdev, key_dma)) {
373 		dev_err(jrdev, "unable to map key memory\n");
374 		kfree(desc);
375 		return -ENOMEM;
376 	}
377 
378 	/* Job descriptor to perform unkeyed hash on key_in */
379 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
380 			 OP_ALG_AS_INITFINAL);
381 	append_seq_in_ptr(desc, key_dma, *keylen, 0);
382 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
383 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
384 	append_seq_out_ptr(desc, key_dma, digestsize, 0);
385 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
386 			 LDST_SRCDST_BYTE_CONTEXT);
387 
388 	print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
389 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
390 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
391 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
392 			     1);
393 
394 	result.err = 0;
395 	init_completion(&result.completion);
396 
397 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
398 	if (!ret) {
399 		/* in progress */
400 		wait_for_completion(&result.completion);
401 		ret = result.err;
402 
403 		print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
404 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
405 				     digestsize, 1);
406 	}
407 	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
408 
409 	*keylen = digestsize;
410 
411 	kfree(desc);
412 
413 	return ret;
414 }
415 
416 static int ahash_setkey(struct crypto_ahash *ahash,
417 			const u8 *key, unsigned int keylen)
418 {
419 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
420 	struct device *jrdev = ctx->jrdev;
421 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
422 	int digestsize = crypto_ahash_digestsize(ahash);
423 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
424 	int ret;
425 	u8 *hashed_key = NULL;
426 
427 	dev_dbg(jrdev, "keylen %d\n", keylen);
428 
429 	if (keylen > blocksize) {
430 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
431 		if (!hashed_key)
432 			return -ENOMEM;
433 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
434 		if (ret)
435 			goto bad_free_key;
436 		key = hashed_key;
437 	}
438 
439 	/*
440 	 * If DKP is supported, use it in the shared descriptor to generate
441 	 * the split key.
442 	 */
443 	if (ctrlpriv->era >= 6) {
444 		ctx->adata.key_inline = true;
445 		ctx->adata.keylen = keylen;
446 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
447 						      OP_ALG_ALGSEL_MASK);
448 
449 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
450 			goto bad_free_key;
451 
452 		memcpy(ctx->key, key, keylen);
453 
454 		/*
455 		 * In case |user key| > |derived key|, using DKP<imm,imm>
456 		 * would result in invalid opcodes (last bytes of user key) in
457 		 * the resulting descriptor. Use DKP<ptr,imm> instead => both
458 		 * virtual and dma key addresses are needed.
459 		 */
460 		if (keylen > ctx->adata.keylen_pad)
461 			dma_sync_single_for_device(ctx->jrdev,
462 						   ctx->adata.key_dma,
463 						   ctx->adata.keylen_pad,
464 						   DMA_TO_DEVICE);
465 	} else {
466 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
467 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
468 		if (ret)
469 			goto bad_free_key;
470 	}
471 
472 	kfree(hashed_key);
473 	return ahash_set_sh_desc(ahash);
474  bad_free_key:
475 	kfree(hashed_key);
476 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
477 	return -EINVAL;
478 }
479 
480 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
481 			unsigned int keylen)
482 {
483 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
484 	struct device *jrdev = ctx->jrdev;
485 
486 	if (keylen != AES_KEYSIZE_128) {
487 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
488 		return -EINVAL;
489 	}
490 
491 	memcpy(ctx->key, key, keylen);
492 	dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
493 				   DMA_TO_DEVICE);
494 	ctx->adata.keylen = keylen;
495 
496 	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
497 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
498 
499 	return axcbc_set_sh_desc(ahash);
500 }
501 
502 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
503 			unsigned int keylen)
504 {
505 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506 	int err;
507 
508 	err = aes_check_keylen(keylen);
509 	if (err) {
510 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
511 		return err;
512 	}
513 
514 	/* key is immediate data for all cmac shared descriptors */
515 	ctx->adata.key_virt = key;
516 	ctx->adata.keylen = keylen;
517 
518 	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
519 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
520 
521 	return acmac_set_sh_desc(ahash);
522 }
523 
524 /*
525  * ahash_edesc - s/w-extended ahash descriptor
526  * @sec4_sg_dma: physical mapped address of h/w link table
527  * @src_nents: number of segments in input scatterlist
528  * @sec4_sg_bytes: length of dma mapped sec4_sg space
529  * @hw_desc: the h/w job descriptor followed by any referenced link tables
530  * @sec4_sg: h/w link table
531  */
532 struct ahash_edesc {
533 	dma_addr_t sec4_sg_dma;
534 	int src_nents;
535 	int sec4_sg_bytes;
536 	u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
537 	struct sec4_sg_entry sec4_sg[0];
538 };
539 
540 static inline void ahash_unmap(struct device *dev,
541 			struct ahash_edesc *edesc,
542 			struct ahash_request *req, int dst_len)
543 {
544 	struct caam_hash_state *state = ahash_request_ctx(req);
545 
546 	if (edesc->src_nents)
547 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
548 
549 	if (edesc->sec4_sg_bytes)
550 		dma_unmap_single(dev, edesc->sec4_sg_dma,
551 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
552 
553 	if (state->buf_dma) {
554 		dma_unmap_single(dev, state->buf_dma, state->buflen,
555 				 DMA_TO_DEVICE);
556 		state->buf_dma = 0;
557 	}
558 }
559 
560 static inline void ahash_unmap_ctx(struct device *dev,
561 			struct ahash_edesc *edesc,
562 			struct ahash_request *req, int dst_len, u32 flag)
563 {
564 	struct caam_hash_state *state = ahash_request_ctx(req);
565 
566 	if (state->ctx_dma) {
567 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
568 		state->ctx_dma = 0;
569 	}
570 	ahash_unmap(dev, edesc, req, dst_len);
571 }
572 
573 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
574 		       void *context)
575 {
576 	struct ahash_request *req = context;
577 	struct ahash_edesc *edesc;
578 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
579 	int digestsize = crypto_ahash_digestsize(ahash);
580 	struct caam_hash_state *state = ahash_request_ctx(req);
581 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
582 	int ecode = 0;
583 
584 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
585 
586 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
587 	if (err)
588 		ecode = caam_jr_strstatus(jrdev, err);
589 
590 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
591 	memcpy(req->result, state->caam_ctx, digestsize);
592 	kfree(edesc);
593 
594 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
595 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
596 			     ctx->ctx_len, 1);
597 
598 	req->base.complete(&req->base, ecode);
599 }
600 
601 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
602 			    void *context)
603 {
604 	struct ahash_request *req = context;
605 	struct ahash_edesc *edesc;
606 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
607 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
608 	struct caam_hash_state *state = ahash_request_ctx(req);
609 	int digestsize = crypto_ahash_digestsize(ahash);
610 	int ecode = 0;
611 
612 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
613 
614 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
615 	if (err)
616 		ecode = caam_jr_strstatus(jrdev, err);
617 
618 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
619 	kfree(edesc);
620 
621 	scatterwalk_map_and_copy(state->buf, req->src,
622 				 req->nbytes - state->next_buflen,
623 				 state->next_buflen, 0);
624 	state->buflen = state->next_buflen;
625 
626 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
627 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
628 			     state->buflen, 1);
629 
630 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
631 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
632 			     ctx->ctx_len, 1);
633 	if (req->result)
634 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
635 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
636 				     digestsize, 1);
637 
638 	req->base.complete(&req->base, ecode);
639 }
640 
641 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
642 			       void *context)
643 {
644 	struct ahash_request *req = context;
645 	struct ahash_edesc *edesc;
646 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
647 	int digestsize = crypto_ahash_digestsize(ahash);
648 	struct caam_hash_state *state = ahash_request_ctx(req);
649 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
650 	int ecode = 0;
651 
652 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
653 
654 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
655 	if (err)
656 		ecode = caam_jr_strstatus(jrdev, err);
657 
658 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
659 	memcpy(req->result, state->caam_ctx, digestsize);
660 	kfree(edesc);
661 
662 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
663 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
664 			     ctx->ctx_len, 1);
665 
666 	req->base.complete(&req->base, ecode);
667 }
668 
669 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
670 			       void *context)
671 {
672 	struct ahash_request *req = context;
673 	struct ahash_edesc *edesc;
674 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
675 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
676 	struct caam_hash_state *state = ahash_request_ctx(req);
677 	int digestsize = crypto_ahash_digestsize(ahash);
678 	int ecode = 0;
679 
680 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
681 
682 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
683 	if (err)
684 		ecode = caam_jr_strstatus(jrdev, err);
685 
686 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
687 	kfree(edesc);
688 
689 	scatterwalk_map_and_copy(state->buf, req->src,
690 				 req->nbytes - state->next_buflen,
691 				 state->next_buflen, 0);
692 	state->buflen = state->next_buflen;
693 
694 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
695 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
696 			     state->buflen, 1);
697 
698 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
699 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
700 			     ctx->ctx_len, 1);
701 	if (req->result)
702 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
703 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
704 				     digestsize, 1);
705 
706 	req->base.complete(&req->base, ecode);
707 }
708 
709 /*
710  * Allocate an enhanced descriptor, which contains the hardware descriptor
711  * and space for hardware scatter table containing sg_num entries.
712  */
713 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
714 					     int sg_num, u32 *sh_desc,
715 					     dma_addr_t sh_desc_dma,
716 					     gfp_t flags)
717 {
718 	struct ahash_edesc *edesc;
719 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
720 
721 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
722 	if (!edesc) {
723 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
724 		return NULL;
725 	}
726 
727 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
728 			     HDR_SHARE_DEFER | HDR_REVERSE);
729 
730 	return edesc;
731 }
732 
733 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
734 			       struct ahash_edesc *edesc,
735 			       struct ahash_request *req, int nents,
736 			       unsigned int first_sg,
737 			       unsigned int first_bytes, size_t to_hash)
738 {
739 	dma_addr_t src_dma;
740 	u32 options;
741 
742 	if (nents > 1 || first_sg) {
743 		struct sec4_sg_entry *sg = edesc->sec4_sg;
744 		unsigned int sgsize = sizeof(*sg) *
745 				      pad_sg_nents(first_sg + nents);
746 
747 		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
748 
749 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
750 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
751 			dev_err(ctx->jrdev, "unable to map S/G table\n");
752 			return -ENOMEM;
753 		}
754 
755 		edesc->sec4_sg_bytes = sgsize;
756 		edesc->sec4_sg_dma = src_dma;
757 		options = LDST_SGF;
758 	} else {
759 		src_dma = sg_dma_address(req->src);
760 		options = 0;
761 	}
762 
763 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
764 			  options);
765 
766 	return 0;
767 }
768 
769 /* submit update job descriptor */
770 static int ahash_update_ctx(struct ahash_request *req)
771 {
772 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
773 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
774 	struct caam_hash_state *state = ahash_request_ctx(req);
775 	struct device *jrdev = ctx->jrdev;
776 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
777 		       GFP_KERNEL : GFP_ATOMIC;
778 	u8 *buf = state->buf;
779 	int *buflen = &state->buflen;
780 	int *next_buflen = &state->next_buflen;
781 	int blocksize = crypto_ahash_blocksize(ahash);
782 	int in_len = *buflen + req->nbytes, to_hash;
783 	u32 *desc;
784 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
785 	struct ahash_edesc *edesc;
786 	int ret = 0;
787 
788 	*next_buflen = in_len & (blocksize - 1);
789 	to_hash = in_len - *next_buflen;
790 
791 	/*
792 	 * For XCBC and CMAC, if to_hash is multiple of block size,
793 	 * keep last block in internal buffer
794 	 */
795 	if ((is_xcbc_aes(ctx->adata.algtype) ||
796 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
797 	     (*next_buflen == 0)) {
798 		*next_buflen = blocksize;
799 		to_hash -= blocksize;
800 	}
801 
802 	if (to_hash) {
803 		int pad_nents;
804 		int src_len = req->nbytes - *next_buflen;
805 
806 		src_nents = sg_nents_for_len(req->src, src_len);
807 		if (src_nents < 0) {
808 			dev_err(jrdev, "Invalid number of src SG.\n");
809 			return src_nents;
810 		}
811 
812 		if (src_nents) {
813 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
814 						  DMA_TO_DEVICE);
815 			if (!mapped_nents) {
816 				dev_err(jrdev, "unable to DMA map source\n");
817 				return -ENOMEM;
818 			}
819 		} else {
820 			mapped_nents = 0;
821 		}
822 
823 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
824 		pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
825 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
826 
827 		/*
828 		 * allocate space for base edesc and hw desc commands,
829 		 * link tables
830 		 */
831 		edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
832 					  ctx->sh_desc_update_dma, flags);
833 		if (!edesc) {
834 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
835 			return -ENOMEM;
836 		}
837 
838 		edesc->src_nents = src_nents;
839 		edesc->sec4_sg_bytes = sec4_sg_bytes;
840 
841 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
842 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
843 		if (ret)
844 			goto unmap_ctx;
845 
846 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
847 		if (ret)
848 			goto unmap_ctx;
849 
850 		if (mapped_nents)
851 			sg_to_sec4_sg_last(req->src, src_len,
852 					   edesc->sec4_sg + sec4_sg_src_index,
853 					   0);
854 		else
855 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
856 					    1);
857 
858 		desc = edesc->hw_desc;
859 
860 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
861 						     sec4_sg_bytes,
862 						     DMA_TO_DEVICE);
863 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
864 			dev_err(jrdev, "unable to map S/G table\n");
865 			ret = -ENOMEM;
866 			goto unmap_ctx;
867 		}
868 
869 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
870 				       to_hash, LDST_SGF);
871 
872 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
873 
874 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
875 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
876 				     desc_bytes(desc), 1);
877 
878 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
879 		if (ret)
880 			goto unmap_ctx;
881 
882 		ret = -EINPROGRESS;
883 	} else if (*next_buflen) {
884 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
885 					 req->nbytes, 0);
886 		*buflen = *next_buflen;
887 
888 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
889 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
890 				     *buflen, 1);
891 	}
892 
893 	return ret;
894 unmap_ctx:
895 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
896 	kfree(edesc);
897 	return ret;
898 }
899 
900 static int ahash_final_ctx(struct ahash_request *req)
901 {
902 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
903 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
904 	struct caam_hash_state *state = ahash_request_ctx(req);
905 	struct device *jrdev = ctx->jrdev;
906 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
907 		       GFP_KERNEL : GFP_ATOMIC;
908 	int buflen = state->buflen;
909 	u32 *desc;
910 	int sec4_sg_bytes;
911 	int digestsize = crypto_ahash_digestsize(ahash);
912 	struct ahash_edesc *edesc;
913 	int ret;
914 
915 	sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
916 			sizeof(struct sec4_sg_entry);
917 
918 	/* allocate space for base edesc and hw desc commands, link tables */
919 	edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
920 				  ctx->sh_desc_fin_dma, flags);
921 	if (!edesc)
922 		return -ENOMEM;
923 
924 	desc = edesc->hw_desc;
925 
926 	edesc->sec4_sg_bytes = sec4_sg_bytes;
927 
928 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
929 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
930 	if (ret)
931 		goto unmap_ctx;
932 
933 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
934 	if (ret)
935 		goto unmap_ctx;
936 
937 	sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
938 
939 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
940 					    sec4_sg_bytes, DMA_TO_DEVICE);
941 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
942 		dev_err(jrdev, "unable to map S/G table\n");
943 		ret = -ENOMEM;
944 		goto unmap_ctx;
945 	}
946 
947 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
948 			  LDST_SGF);
949 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
950 
951 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
952 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
953 			     1);
954 
955 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
956 	if (ret)
957 		goto unmap_ctx;
958 
959 	return -EINPROGRESS;
960  unmap_ctx:
961 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
962 	kfree(edesc);
963 	return ret;
964 }
965 
966 static int ahash_finup_ctx(struct ahash_request *req)
967 {
968 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
969 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
970 	struct caam_hash_state *state = ahash_request_ctx(req);
971 	struct device *jrdev = ctx->jrdev;
972 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
973 		       GFP_KERNEL : GFP_ATOMIC;
974 	int buflen = state->buflen;
975 	u32 *desc;
976 	int sec4_sg_src_index;
977 	int src_nents, mapped_nents;
978 	int digestsize = crypto_ahash_digestsize(ahash);
979 	struct ahash_edesc *edesc;
980 	int ret;
981 
982 	src_nents = sg_nents_for_len(req->src, req->nbytes);
983 	if (src_nents < 0) {
984 		dev_err(jrdev, "Invalid number of src SG.\n");
985 		return src_nents;
986 	}
987 
988 	if (src_nents) {
989 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
990 					  DMA_TO_DEVICE);
991 		if (!mapped_nents) {
992 			dev_err(jrdev, "unable to DMA map source\n");
993 			return -ENOMEM;
994 		}
995 	} else {
996 		mapped_nents = 0;
997 	}
998 
999 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1000 
1001 	/* allocate space for base edesc and hw desc commands, link tables */
1002 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1003 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
1004 				  flags);
1005 	if (!edesc) {
1006 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1007 		return -ENOMEM;
1008 	}
1009 
1010 	desc = edesc->hw_desc;
1011 
1012 	edesc->src_nents = src_nents;
1013 
1014 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1015 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1016 	if (ret)
1017 		goto unmap_ctx;
1018 
1019 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1020 	if (ret)
1021 		goto unmap_ctx;
1022 
1023 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1024 				  sec4_sg_src_index, ctx->ctx_len + buflen,
1025 				  req->nbytes);
1026 	if (ret)
1027 		goto unmap_ctx;
1028 
1029 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1030 
1031 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1032 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1033 			     1);
1034 
1035 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1036 	if (ret)
1037 		goto unmap_ctx;
1038 
1039 	return -EINPROGRESS;
1040  unmap_ctx:
1041 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1042 	kfree(edesc);
1043 	return ret;
1044 }
1045 
1046 static int ahash_digest(struct ahash_request *req)
1047 {
1048 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1049 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1050 	struct caam_hash_state *state = ahash_request_ctx(req);
1051 	struct device *jrdev = ctx->jrdev;
1052 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1053 		       GFP_KERNEL : GFP_ATOMIC;
1054 	u32 *desc;
1055 	int digestsize = crypto_ahash_digestsize(ahash);
1056 	int src_nents, mapped_nents;
1057 	struct ahash_edesc *edesc;
1058 	int ret;
1059 
1060 	state->buf_dma = 0;
1061 
1062 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1063 	if (src_nents < 0) {
1064 		dev_err(jrdev, "Invalid number of src SG.\n");
1065 		return src_nents;
1066 	}
1067 
1068 	if (src_nents) {
1069 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1070 					  DMA_TO_DEVICE);
1071 		if (!mapped_nents) {
1072 			dev_err(jrdev, "unable to map source for DMA\n");
1073 			return -ENOMEM;
1074 		}
1075 	} else {
1076 		mapped_nents = 0;
1077 	}
1078 
1079 	/* allocate space for base edesc and hw desc commands, link tables */
1080 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1081 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1082 				  flags);
1083 	if (!edesc) {
1084 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1085 		return -ENOMEM;
1086 	}
1087 
1088 	edesc->src_nents = src_nents;
1089 
1090 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1091 				  req->nbytes);
1092 	if (ret) {
1093 		ahash_unmap(jrdev, edesc, req, digestsize);
1094 		kfree(edesc);
1095 		return ret;
1096 	}
1097 
1098 	desc = edesc->hw_desc;
1099 
1100 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1101 	if (ret) {
1102 		ahash_unmap(jrdev, edesc, req, digestsize);
1103 		kfree(edesc);
1104 		return -ENOMEM;
1105 	}
1106 
1107 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1108 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1109 			     1);
1110 
1111 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1112 	if (!ret) {
1113 		ret = -EINPROGRESS;
1114 	} else {
1115 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1116 		kfree(edesc);
1117 	}
1118 
1119 	return ret;
1120 }
1121 
1122 /* submit ahash final if it the first job descriptor */
1123 static int ahash_final_no_ctx(struct ahash_request *req)
1124 {
1125 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1126 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1127 	struct caam_hash_state *state = ahash_request_ctx(req);
1128 	struct device *jrdev = ctx->jrdev;
1129 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1130 		       GFP_KERNEL : GFP_ATOMIC;
1131 	u8 *buf = state->buf;
1132 	int buflen = state->buflen;
1133 	u32 *desc;
1134 	int digestsize = crypto_ahash_digestsize(ahash);
1135 	struct ahash_edesc *edesc;
1136 	int ret;
1137 
1138 	/* allocate space for base edesc and hw desc commands, link tables */
1139 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1140 				  ctx->sh_desc_digest_dma, flags);
1141 	if (!edesc)
1142 		return -ENOMEM;
1143 
1144 	desc = edesc->hw_desc;
1145 
1146 	if (buflen) {
1147 		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1148 						DMA_TO_DEVICE);
1149 		if (dma_mapping_error(jrdev, state->buf_dma)) {
1150 			dev_err(jrdev, "unable to map src\n");
1151 			goto unmap;
1152 		}
1153 
1154 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1155 	}
1156 
1157 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1158 	if (ret)
1159 		goto unmap;
1160 
1161 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1162 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1163 			     1);
1164 
1165 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1166 	if (!ret) {
1167 		ret = -EINPROGRESS;
1168 	} else {
1169 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1170 		kfree(edesc);
1171 	}
1172 
1173 	return ret;
1174  unmap:
1175 	ahash_unmap(jrdev, edesc, req, digestsize);
1176 	kfree(edesc);
1177 	return -ENOMEM;
1178 
1179 }
1180 
1181 /* submit ahash update if it the first job descriptor after update */
1182 static int ahash_update_no_ctx(struct ahash_request *req)
1183 {
1184 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1185 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1186 	struct caam_hash_state *state = ahash_request_ctx(req);
1187 	struct device *jrdev = ctx->jrdev;
1188 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1189 		       GFP_KERNEL : GFP_ATOMIC;
1190 	u8 *buf = state->buf;
1191 	int *buflen = &state->buflen;
1192 	int *next_buflen = &state->next_buflen;
1193 	int blocksize = crypto_ahash_blocksize(ahash);
1194 	int in_len = *buflen + req->nbytes, to_hash;
1195 	int sec4_sg_bytes, src_nents, mapped_nents;
1196 	struct ahash_edesc *edesc;
1197 	u32 *desc;
1198 	int ret = 0;
1199 
1200 	*next_buflen = in_len & (blocksize - 1);
1201 	to_hash = in_len - *next_buflen;
1202 
1203 	/*
1204 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1205 	 * keep last block in internal buffer
1206 	 */
1207 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1208 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1209 	     (*next_buflen == 0)) {
1210 		*next_buflen = blocksize;
1211 		to_hash -= blocksize;
1212 	}
1213 
1214 	if (to_hash) {
1215 		int pad_nents;
1216 		int src_len = req->nbytes - *next_buflen;
1217 
1218 		src_nents = sg_nents_for_len(req->src, src_len);
1219 		if (src_nents < 0) {
1220 			dev_err(jrdev, "Invalid number of src SG.\n");
1221 			return src_nents;
1222 		}
1223 
1224 		if (src_nents) {
1225 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1226 						  DMA_TO_DEVICE);
1227 			if (!mapped_nents) {
1228 				dev_err(jrdev, "unable to DMA map source\n");
1229 				return -ENOMEM;
1230 			}
1231 		} else {
1232 			mapped_nents = 0;
1233 		}
1234 
1235 		pad_nents = pad_sg_nents(1 + mapped_nents);
1236 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1237 
1238 		/*
1239 		 * allocate space for base edesc and hw desc commands,
1240 		 * link tables
1241 		 */
1242 		edesc = ahash_edesc_alloc(ctx, pad_nents,
1243 					  ctx->sh_desc_update_first,
1244 					  ctx->sh_desc_update_first_dma,
1245 					  flags);
1246 		if (!edesc) {
1247 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1248 			return -ENOMEM;
1249 		}
1250 
1251 		edesc->src_nents = src_nents;
1252 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1253 
1254 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1255 		if (ret)
1256 			goto unmap_ctx;
1257 
1258 		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1259 
1260 		desc = edesc->hw_desc;
1261 
1262 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1263 						    sec4_sg_bytes,
1264 						    DMA_TO_DEVICE);
1265 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1266 			dev_err(jrdev, "unable to map S/G table\n");
1267 			ret = -ENOMEM;
1268 			goto unmap_ctx;
1269 		}
1270 
1271 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1272 
1273 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1274 		if (ret)
1275 			goto unmap_ctx;
1276 
1277 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1278 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1279 				     desc_bytes(desc), 1);
1280 
1281 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1282 		if (ret)
1283 			goto unmap_ctx;
1284 
1285 		ret = -EINPROGRESS;
1286 		state->update = ahash_update_ctx;
1287 		state->finup = ahash_finup_ctx;
1288 		state->final = ahash_final_ctx;
1289 	} else if (*next_buflen) {
1290 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1291 					 req->nbytes, 0);
1292 		*buflen = *next_buflen;
1293 
1294 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1295 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1296 				     *buflen, 1);
1297 	}
1298 
1299 	return ret;
1300  unmap_ctx:
1301 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1302 	kfree(edesc);
1303 	return ret;
1304 }
1305 
1306 /* submit ahash finup if it the first job descriptor after update */
1307 static int ahash_finup_no_ctx(struct ahash_request *req)
1308 {
1309 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1310 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1311 	struct caam_hash_state *state = ahash_request_ctx(req);
1312 	struct device *jrdev = ctx->jrdev;
1313 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1314 		       GFP_KERNEL : GFP_ATOMIC;
1315 	int buflen = state->buflen;
1316 	u32 *desc;
1317 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1318 	int digestsize = crypto_ahash_digestsize(ahash);
1319 	struct ahash_edesc *edesc;
1320 	int ret;
1321 
1322 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1323 	if (src_nents < 0) {
1324 		dev_err(jrdev, "Invalid number of src SG.\n");
1325 		return src_nents;
1326 	}
1327 
1328 	if (src_nents) {
1329 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1330 					  DMA_TO_DEVICE);
1331 		if (!mapped_nents) {
1332 			dev_err(jrdev, "unable to DMA map source\n");
1333 			return -ENOMEM;
1334 		}
1335 	} else {
1336 		mapped_nents = 0;
1337 	}
1338 
1339 	sec4_sg_src_index = 2;
1340 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1341 			 sizeof(struct sec4_sg_entry);
1342 
1343 	/* allocate space for base edesc and hw desc commands, link tables */
1344 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1345 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1346 				  flags);
1347 	if (!edesc) {
1348 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1349 		return -ENOMEM;
1350 	}
1351 
1352 	desc = edesc->hw_desc;
1353 
1354 	edesc->src_nents = src_nents;
1355 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1356 
1357 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1358 	if (ret)
1359 		goto unmap;
1360 
1361 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1362 				  req->nbytes);
1363 	if (ret) {
1364 		dev_err(jrdev, "unable to map S/G table\n");
1365 		goto unmap;
1366 	}
1367 
1368 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1369 	if (ret)
1370 		goto unmap;
1371 
1372 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1373 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1374 			     1);
1375 
1376 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1377 	if (!ret) {
1378 		ret = -EINPROGRESS;
1379 	} else {
1380 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1381 		kfree(edesc);
1382 	}
1383 
1384 	return ret;
1385  unmap:
1386 	ahash_unmap(jrdev, edesc, req, digestsize);
1387 	kfree(edesc);
1388 	return -ENOMEM;
1389 
1390 }
1391 
1392 /* submit first update job descriptor after init */
1393 static int ahash_update_first(struct ahash_request *req)
1394 {
1395 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1396 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1397 	struct caam_hash_state *state = ahash_request_ctx(req);
1398 	struct device *jrdev = ctx->jrdev;
1399 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1400 		       GFP_KERNEL : GFP_ATOMIC;
1401 	u8 *buf = state->buf;
1402 	int *buflen = &state->buflen;
1403 	int *next_buflen = &state->next_buflen;
1404 	int to_hash;
1405 	int blocksize = crypto_ahash_blocksize(ahash);
1406 	u32 *desc;
1407 	int src_nents, mapped_nents;
1408 	struct ahash_edesc *edesc;
1409 	int ret = 0;
1410 
1411 	*next_buflen = req->nbytes & (blocksize - 1);
1412 	to_hash = req->nbytes - *next_buflen;
1413 
1414 	/*
1415 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1416 	 * keep last block in internal buffer
1417 	 */
1418 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1419 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1420 	     (*next_buflen == 0)) {
1421 		*next_buflen = blocksize;
1422 		to_hash -= blocksize;
1423 	}
1424 
1425 	if (to_hash) {
1426 		src_nents = sg_nents_for_len(req->src,
1427 					     req->nbytes - *next_buflen);
1428 		if (src_nents < 0) {
1429 			dev_err(jrdev, "Invalid number of src SG.\n");
1430 			return src_nents;
1431 		}
1432 
1433 		if (src_nents) {
1434 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1435 						  DMA_TO_DEVICE);
1436 			if (!mapped_nents) {
1437 				dev_err(jrdev, "unable to map source for DMA\n");
1438 				return -ENOMEM;
1439 			}
1440 		} else {
1441 			mapped_nents = 0;
1442 		}
1443 
1444 		/*
1445 		 * allocate space for base edesc and hw desc commands,
1446 		 * link tables
1447 		 */
1448 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1449 					  mapped_nents : 0,
1450 					  ctx->sh_desc_update_first,
1451 					  ctx->sh_desc_update_first_dma,
1452 					  flags);
1453 		if (!edesc) {
1454 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1455 			return -ENOMEM;
1456 		}
1457 
1458 		edesc->src_nents = src_nents;
1459 
1460 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1461 					  to_hash);
1462 		if (ret)
1463 			goto unmap_ctx;
1464 
1465 		desc = edesc->hw_desc;
1466 
1467 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1468 		if (ret)
1469 			goto unmap_ctx;
1470 
1471 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1472 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1473 				     desc_bytes(desc), 1);
1474 
1475 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1476 		if (ret)
1477 			goto unmap_ctx;
1478 
1479 		ret = -EINPROGRESS;
1480 		state->update = ahash_update_ctx;
1481 		state->finup = ahash_finup_ctx;
1482 		state->final = ahash_final_ctx;
1483 	} else if (*next_buflen) {
1484 		state->update = ahash_update_no_ctx;
1485 		state->finup = ahash_finup_no_ctx;
1486 		state->final = ahash_final_no_ctx;
1487 		scatterwalk_map_and_copy(buf, req->src, 0,
1488 					 req->nbytes, 0);
1489 		*buflen = *next_buflen;
1490 
1491 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1492 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
1493 				     *buflen, 1);
1494 	}
1495 
1496 	return ret;
1497  unmap_ctx:
1498 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1499 	kfree(edesc);
1500 	return ret;
1501 }
1502 
1503 static int ahash_finup_first(struct ahash_request *req)
1504 {
1505 	return ahash_digest(req);
1506 }
1507 
1508 static int ahash_init(struct ahash_request *req)
1509 {
1510 	struct caam_hash_state *state = ahash_request_ctx(req);
1511 
1512 	state->update = ahash_update_first;
1513 	state->finup = ahash_finup_first;
1514 	state->final = ahash_final_no_ctx;
1515 
1516 	state->ctx_dma = 0;
1517 	state->ctx_dma_len = 0;
1518 	state->buf_dma = 0;
1519 	state->buflen = 0;
1520 	state->next_buflen = 0;
1521 
1522 	return 0;
1523 }
1524 
1525 static int ahash_update(struct ahash_request *req)
1526 {
1527 	struct caam_hash_state *state = ahash_request_ctx(req);
1528 
1529 	return state->update(req);
1530 }
1531 
1532 static int ahash_finup(struct ahash_request *req)
1533 {
1534 	struct caam_hash_state *state = ahash_request_ctx(req);
1535 
1536 	return state->finup(req);
1537 }
1538 
1539 static int ahash_final(struct ahash_request *req)
1540 {
1541 	struct caam_hash_state *state = ahash_request_ctx(req);
1542 
1543 	return state->final(req);
1544 }
1545 
1546 static int ahash_export(struct ahash_request *req, void *out)
1547 {
1548 	struct caam_hash_state *state = ahash_request_ctx(req);
1549 	struct caam_export_state *export = out;
1550 	u8 *buf = state->buf;
1551 	int len = state->buflen;
1552 
1553 	memcpy(export->buf, buf, len);
1554 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1555 	export->buflen = len;
1556 	export->update = state->update;
1557 	export->final = state->final;
1558 	export->finup = state->finup;
1559 
1560 	return 0;
1561 }
1562 
1563 static int ahash_import(struct ahash_request *req, const void *in)
1564 {
1565 	struct caam_hash_state *state = ahash_request_ctx(req);
1566 	const struct caam_export_state *export = in;
1567 
1568 	memset(state, 0, sizeof(*state));
1569 	memcpy(state->buf, export->buf, export->buflen);
1570 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1571 	state->buflen = export->buflen;
1572 	state->update = export->update;
1573 	state->final = export->final;
1574 	state->finup = export->finup;
1575 
1576 	return 0;
1577 }
1578 
1579 struct caam_hash_template {
1580 	char name[CRYPTO_MAX_ALG_NAME];
1581 	char driver_name[CRYPTO_MAX_ALG_NAME];
1582 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1583 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1584 	unsigned int blocksize;
1585 	struct ahash_alg template_ahash;
1586 	u32 alg_type;
1587 };
1588 
1589 /* ahash descriptors */
1590 static struct caam_hash_template driver_hash[] = {
1591 	{
1592 		.name = "sha1",
1593 		.driver_name = "sha1-caam",
1594 		.hmac_name = "hmac(sha1)",
1595 		.hmac_driver_name = "hmac-sha1-caam",
1596 		.blocksize = SHA1_BLOCK_SIZE,
1597 		.template_ahash = {
1598 			.init = ahash_init,
1599 			.update = ahash_update,
1600 			.final = ahash_final,
1601 			.finup = ahash_finup,
1602 			.digest = ahash_digest,
1603 			.export = ahash_export,
1604 			.import = ahash_import,
1605 			.setkey = ahash_setkey,
1606 			.halg = {
1607 				.digestsize = SHA1_DIGEST_SIZE,
1608 				.statesize = sizeof(struct caam_export_state),
1609 			},
1610 		},
1611 		.alg_type = OP_ALG_ALGSEL_SHA1,
1612 	}, {
1613 		.name = "sha224",
1614 		.driver_name = "sha224-caam",
1615 		.hmac_name = "hmac(sha224)",
1616 		.hmac_driver_name = "hmac-sha224-caam",
1617 		.blocksize = SHA224_BLOCK_SIZE,
1618 		.template_ahash = {
1619 			.init = ahash_init,
1620 			.update = ahash_update,
1621 			.final = ahash_final,
1622 			.finup = ahash_finup,
1623 			.digest = ahash_digest,
1624 			.export = ahash_export,
1625 			.import = ahash_import,
1626 			.setkey = ahash_setkey,
1627 			.halg = {
1628 				.digestsize = SHA224_DIGEST_SIZE,
1629 				.statesize = sizeof(struct caam_export_state),
1630 			},
1631 		},
1632 		.alg_type = OP_ALG_ALGSEL_SHA224,
1633 	}, {
1634 		.name = "sha256",
1635 		.driver_name = "sha256-caam",
1636 		.hmac_name = "hmac(sha256)",
1637 		.hmac_driver_name = "hmac-sha256-caam",
1638 		.blocksize = SHA256_BLOCK_SIZE,
1639 		.template_ahash = {
1640 			.init = ahash_init,
1641 			.update = ahash_update,
1642 			.final = ahash_final,
1643 			.finup = ahash_finup,
1644 			.digest = ahash_digest,
1645 			.export = ahash_export,
1646 			.import = ahash_import,
1647 			.setkey = ahash_setkey,
1648 			.halg = {
1649 				.digestsize = SHA256_DIGEST_SIZE,
1650 				.statesize = sizeof(struct caam_export_state),
1651 			},
1652 		},
1653 		.alg_type = OP_ALG_ALGSEL_SHA256,
1654 	}, {
1655 		.name = "sha384",
1656 		.driver_name = "sha384-caam",
1657 		.hmac_name = "hmac(sha384)",
1658 		.hmac_driver_name = "hmac-sha384-caam",
1659 		.blocksize = SHA384_BLOCK_SIZE,
1660 		.template_ahash = {
1661 			.init = ahash_init,
1662 			.update = ahash_update,
1663 			.final = ahash_final,
1664 			.finup = ahash_finup,
1665 			.digest = ahash_digest,
1666 			.export = ahash_export,
1667 			.import = ahash_import,
1668 			.setkey = ahash_setkey,
1669 			.halg = {
1670 				.digestsize = SHA384_DIGEST_SIZE,
1671 				.statesize = sizeof(struct caam_export_state),
1672 			},
1673 		},
1674 		.alg_type = OP_ALG_ALGSEL_SHA384,
1675 	}, {
1676 		.name = "sha512",
1677 		.driver_name = "sha512-caam",
1678 		.hmac_name = "hmac(sha512)",
1679 		.hmac_driver_name = "hmac-sha512-caam",
1680 		.blocksize = SHA512_BLOCK_SIZE,
1681 		.template_ahash = {
1682 			.init = ahash_init,
1683 			.update = ahash_update,
1684 			.final = ahash_final,
1685 			.finup = ahash_finup,
1686 			.digest = ahash_digest,
1687 			.export = ahash_export,
1688 			.import = ahash_import,
1689 			.setkey = ahash_setkey,
1690 			.halg = {
1691 				.digestsize = SHA512_DIGEST_SIZE,
1692 				.statesize = sizeof(struct caam_export_state),
1693 			},
1694 		},
1695 		.alg_type = OP_ALG_ALGSEL_SHA512,
1696 	}, {
1697 		.name = "md5",
1698 		.driver_name = "md5-caam",
1699 		.hmac_name = "hmac(md5)",
1700 		.hmac_driver_name = "hmac-md5-caam",
1701 		.blocksize = MD5_BLOCK_WORDS * 4,
1702 		.template_ahash = {
1703 			.init = ahash_init,
1704 			.update = ahash_update,
1705 			.final = ahash_final,
1706 			.finup = ahash_finup,
1707 			.digest = ahash_digest,
1708 			.export = ahash_export,
1709 			.import = ahash_import,
1710 			.setkey = ahash_setkey,
1711 			.halg = {
1712 				.digestsize = MD5_DIGEST_SIZE,
1713 				.statesize = sizeof(struct caam_export_state),
1714 			},
1715 		},
1716 		.alg_type = OP_ALG_ALGSEL_MD5,
1717 	}, {
1718 		.hmac_name = "xcbc(aes)",
1719 		.hmac_driver_name = "xcbc-aes-caam",
1720 		.blocksize = AES_BLOCK_SIZE,
1721 		.template_ahash = {
1722 			.init = ahash_init,
1723 			.update = ahash_update,
1724 			.final = ahash_final,
1725 			.finup = ahash_finup,
1726 			.digest = ahash_digest,
1727 			.export = ahash_export,
1728 			.import = ahash_import,
1729 			.setkey = axcbc_setkey,
1730 			.halg = {
1731 				.digestsize = AES_BLOCK_SIZE,
1732 				.statesize = sizeof(struct caam_export_state),
1733 			},
1734 		 },
1735 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1736 	}, {
1737 		.hmac_name = "cmac(aes)",
1738 		.hmac_driver_name = "cmac-aes-caam",
1739 		.blocksize = AES_BLOCK_SIZE,
1740 		.template_ahash = {
1741 			.init = ahash_init,
1742 			.update = ahash_update,
1743 			.final = ahash_final,
1744 			.finup = ahash_finup,
1745 			.digest = ahash_digest,
1746 			.export = ahash_export,
1747 			.import = ahash_import,
1748 			.setkey = acmac_setkey,
1749 			.halg = {
1750 				.digestsize = AES_BLOCK_SIZE,
1751 				.statesize = sizeof(struct caam_export_state),
1752 			},
1753 		 },
1754 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1755 	},
1756 };
1757 
1758 struct caam_hash_alg {
1759 	struct list_head entry;
1760 	int alg_type;
1761 	struct ahash_alg ahash_alg;
1762 };
1763 
1764 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1765 {
1766 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1767 	struct crypto_alg *base = tfm->__crt_alg;
1768 	struct hash_alg_common *halg =
1769 		 container_of(base, struct hash_alg_common, base);
1770 	struct ahash_alg *alg =
1771 		 container_of(halg, struct ahash_alg, halg);
1772 	struct caam_hash_alg *caam_hash =
1773 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1774 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1775 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1776 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1777 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1778 					 HASH_MSG_LEN + 32,
1779 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1780 					 HASH_MSG_LEN + 64,
1781 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1782 	dma_addr_t dma_addr;
1783 	struct caam_drv_private *priv;
1784 
1785 	/*
1786 	 * Get a Job ring from Job Ring driver to ensure in-order
1787 	 * crypto request processing per tfm
1788 	 */
1789 	ctx->jrdev = caam_jr_alloc();
1790 	if (IS_ERR(ctx->jrdev)) {
1791 		pr_err("Job Ring Device allocation for transform failed\n");
1792 		return PTR_ERR(ctx->jrdev);
1793 	}
1794 
1795 	priv = dev_get_drvdata(ctx->jrdev->parent);
1796 
1797 	if (is_xcbc_aes(caam_hash->alg_type)) {
1798 		ctx->dir = DMA_TO_DEVICE;
1799 		ctx->key_dir = DMA_BIDIRECTIONAL;
1800 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1801 		ctx->ctx_len = 48;
1802 	} else if (is_cmac_aes(caam_hash->alg_type)) {
1803 		ctx->dir = DMA_TO_DEVICE;
1804 		ctx->key_dir = DMA_NONE;
1805 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1806 		ctx->ctx_len = 32;
1807 	} else {
1808 		if (priv->era >= 6) {
1809 			ctx->dir = DMA_BIDIRECTIONAL;
1810 			ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1811 		} else {
1812 			ctx->dir = DMA_TO_DEVICE;
1813 			ctx->key_dir = DMA_NONE;
1814 		}
1815 		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1816 		ctx->ctx_len = runninglen[(ctx->adata.algtype &
1817 					   OP_ALG_ALGSEL_SUBMASK) >>
1818 					  OP_ALG_ALGSEL_SHIFT];
1819 	}
1820 
1821 	if (ctx->key_dir != DMA_NONE) {
1822 		ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1823 							  ARRAY_SIZE(ctx->key),
1824 							  ctx->key_dir,
1825 							  DMA_ATTR_SKIP_CPU_SYNC);
1826 		if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1827 			dev_err(ctx->jrdev, "unable to map key\n");
1828 			caam_jr_free(ctx->jrdev);
1829 			return -ENOMEM;
1830 		}
1831 	}
1832 
1833 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1834 					offsetof(struct caam_hash_ctx, key),
1835 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1836 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1837 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1838 
1839 		if (ctx->key_dir != DMA_NONE)
1840 			dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1841 					       ARRAY_SIZE(ctx->key),
1842 					       ctx->key_dir,
1843 					       DMA_ATTR_SKIP_CPU_SYNC);
1844 
1845 		caam_jr_free(ctx->jrdev);
1846 		return -ENOMEM;
1847 	}
1848 
1849 	ctx->sh_desc_update_dma = dma_addr;
1850 	ctx->sh_desc_update_first_dma = dma_addr +
1851 					offsetof(struct caam_hash_ctx,
1852 						 sh_desc_update_first);
1853 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1854 						   sh_desc_fin);
1855 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1856 						      sh_desc_digest);
1857 
1858 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1859 				 sizeof(struct caam_hash_state));
1860 
1861 	/*
1862 	 * For keyed hash algorithms shared descriptors
1863 	 * will be created later in setkey() callback
1864 	 */
1865 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1866 }
1867 
1868 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1869 {
1870 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1871 
1872 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1873 			       offsetof(struct caam_hash_ctx, key),
1874 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1875 	if (ctx->key_dir != DMA_NONE)
1876 		dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1877 				       ARRAY_SIZE(ctx->key), ctx->key_dir,
1878 				       DMA_ATTR_SKIP_CPU_SYNC);
1879 	caam_jr_free(ctx->jrdev);
1880 }
1881 
1882 void caam_algapi_hash_exit(void)
1883 {
1884 	struct caam_hash_alg *t_alg, *n;
1885 
1886 	if (!hash_list.next)
1887 		return;
1888 
1889 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1890 		crypto_unregister_ahash(&t_alg->ahash_alg);
1891 		list_del(&t_alg->entry);
1892 		kfree(t_alg);
1893 	}
1894 }
1895 
1896 static struct caam_hash_alg *
1897 caam_hash_alloc(struct caam_hash_template *template,
1898 		bool keyed)
1899 {
1900 	struct caam_hash_alg *t_alg;
1901 	struct ahash_alg *halg;
1902 	struct crypto_alg *alg;
1903 
1904 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1905 	if (!t_alg) {
1906 		pr_err("failed to allocate t_alg\n");
1907 		return ERR_PTR(-ENOMEM);
1908 	}
1909 
1910 	t_alg->ahash_alg = template->template_ahash;
1911 	halg = &t_alg->ahash_alg;
1912 	alg = &halg->halg.base;
1913 
1914 	if (keyed) {
1915 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1916 			 template->hmac_name);
1917 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918 			 template->hmac_driver_name);
1919 	} else {
1920 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1921 			 template->name);
1922 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1923 			 template->driver_name);
1924 		t_alg->ahash_alg.setkey = NULL;
1925 	}
1926 	alg->cra_module = THIS_MODULE;
1927 	alg->cra_init = caam_hash_cra_init;
1928 	alg->cra_exit = caam_hash_cra_exit;
1929 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1930 	alg->cra_priority = CAAM_CRA_PRIORITY;
1931 	alg->cra_blocksize = template->blocksize;
1932 	alg->cra_alignmask = 0;
1933 	alg->cra_flags = CRYPTO_ALG_ASYNC;
1934 
1935 	t_alg->alg_type = template->alg_type;
1936 
1937 	return t_alg;
1938 }
1939 
1940 int caam_algapi_hash_init(struct device *ctrldev)
1941 {
1942 	int i = 0, err = 0;
1943 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1944 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1945 	u32 md_inst, md_vid;
1946 
1947 	/*
1948 	 * Register crypto algorithms the device supports.  First, identify
1949 	 * presence and attributes of MD block.
1950 	 */
1951 	if (priv->era < 10) {
1952 		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1953 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1954 		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1955 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1956 	} else {
1957 		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1958 
1959 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1960 		md_inst = mdha & CHA_VER_NUM_MASK;
1961 	}
1962 
1963 	/*
1964 	 * Skip registration of any hashing algorithms if MD block
1965 	 * is not present.
1966 	 */
1967 	if (!md_inst)
1968 		return 0;
1969 
1970 	/* Limit digest size based on LP256 */
1971 	if (md_vid == CHA_VER_VID_MD_LP256)
1972 		md_limit = SHA256_DIGEST_SIZE;
1973 
1974 	INIT_LIST_HEAD(&hash_list);
1975 
1976 	/* register crypto algorithms the device supports */
1977 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1978 		struct caam_hash_alg *t_alg;
1979 		struct caam_hash_template *alg = driver_hash + i;
1980 
1981 		/* If MD size is not supported by device, skip registration */
1982 		if (is_mdha(alg->alg_type) &&
1983 		    alg->template_ahash.halg.digestsize > md_limit)
1984 			continue;
1985 
1986 		/* register hmac version */
1987 		t_alg = caam_hash_alloc(alg, true);
1988 		if (IS_ERR(t_alg)) {
1989 			err = PTR_ERR(t_alg);
1990 			pr_warn("%s alg allocation failed\n",
1991 				alg->hmac_driver_name);
1992 			continue;
1993 		}
1994 
1995 		err = crypto_register_ahash(&t_alg->ahash_alg);
1996 		if (err) {
1997 			pr_warn("%s alg registration failed: %d\n",
1998 				t_alg->ahash_alg.halg.base.cra_driver_name,
1999 				err);
2000 			kfree(t_alg);
2001 		} else
2002 			list_add_tail(&t_alg->entry, &hash_list);
2003 
2004 		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2005 			continue;
2006 
2007 		/* register unkeyed version */
2008 		t_alg = caam_hash_alloc(alg, false);
2009 		if (IS_ERR(t_alg)) {
2010 			err = PTR_ERR(t_alg);
2011 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2012 			continue;
2013 		}
2014 
2015 		err = crypto_register_ahash(&t_alg->ahash_alg);
2016 		if (err) {
2017 			pr_warn("%s alg registration failed: %d\n",
2018 				t_alg->ahash_alg.halg.base.cra_driver_name,
2019 				err);
2020 			kfree(t_alg);
2021 		} else
2022 			list_add_tail(&t_alg->entry, &hash_list);
2023 	}
2024 
2025 	return err;
2026 }
2027