xref: /linux/drivers/crypto/caam/caamhash.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57 
58 #include "compat.h"
59 
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 
69 #define CAAM_CRA_PRIORITY		3000
70 
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
73 
74 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
76 
77 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
78 					 CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80 
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN			8
83 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84 
85 static struct list_head hash_list;
86 
87 /* ahash per-session context */
88 struct caam_hash_ctx {
89 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
90 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
91 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
94 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
95 	dma_addr_t sh_desc_update_first_dma;
96 	dma_addr_t sh_desc_fin_dma;
97 	dma_addr_t sh_desc_digest_dma;
98 	enum dma_data_direction dir;
99 	enum dma_data_direction key_dir;
100 	struct device *jrdev;
101 	int ctx_len;
102 	struct alginfo adata;
103 };
104 
105 /* ahash state */
106 struct caam_hash_state {
107 	dma_addr_t buf_dma;
108 	dma_addr_t ctx_dma;
109 	int ctx_dma_len;
110 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
111 	int buflen_0;
112 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113 	int buflen_1;
114 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
115 	int (*update)(struct ahash_request *req);
116 	int (*final)(struct ahash_request *req);
117 	int (*finup)(struct ahash_request *req);
118 	int current_buf;
119 };
120 
121 struct caam_export_state {
122 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
123 	u8 caam_ctx[MAX_CTX_LEN];
124 	int buflen;
125 	int (*update)(struct ahash_request *req);
126 	int (*final)(struct ahash_request *req);
127 	int (*finup)(struct ahash_request *req);
128 };
129 
130 static inline void switch_buf(struct caam_hash_state *state)
131 {
132 	state->current_buf ^= 1;
133 }
134 
135 static inline u8 *current_buf(struct caam_hash_state *state)
136 {
137 	return state->current_buf ? state->buf_1 : state->buf_0;
138 }
139 
140 static inline u8 *alt_buf(struct caam_hash_state *state)
141 {
142 	return state->current_buf ? state->buf_0 : state->buf_1;
143 }
144 
145 static inline int *current_buflen(struct caam_hash_state *state)
146 {
147 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
148 }
149 
150 static inline int *alt_buflen(struct caam_hash_state *state)
151 {
152 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
153 }
154 
155 static inline bool is_cmac_aes(u32 algtype)
156 {
157 	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
158 	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
159 }
160 /* Common job descriptor seq in/out ptr routines */
161 
162 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
163 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
164 				      struct caam_hash_state *state,
165 				      int ctx_len)
166 {
167 	state->ctx_dma_len = ctx_len;
168 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
169 					ctx_len, DMA_FROM_DEVICE);
170 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
171 		dev_err(jrdev, "unable to map ctx\n");
172 		state->ctx_dma = 0;
173 		return -ENOMEM;
174 	}
175 
176 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
177 
178 	return 0;
179 }
180 
181 /* Map current buffer in state (if length > 0) and put it in link table */
182 static inline int buf_map_to_sec4_sg(struct device *jrdev,
183 				     struct sec4_sg_entry *sec4_sg,
184 				     struct caam_hash_state *state)
185 {
186 	int buflen = *current_buflen(state);
187 
188 	if (!buflen)
189 		return 0;
190 
191 	state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
192 					DMA_TO_DEVICE);
193 	if (dma_mapping_error(jrdev, state->buf_dma)) {
194 		dev_err(jrdev, "unable to map buf\n");
195 		state->buf_dma = 0;
196 		return -ENOMEM;
197 	}
198 
199 	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
200 
201 	return 0;
202 }
203 
204 /* Map state->caam_ctx, and add it to link table */
205 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
206 				     struct caam_hash_state *state, int ctx_len,
207 				     struct sec4_sg_entry *sec4_sg, u32 flag)
208 {
209 	state->ctx_dma_len = ctx_len;
210 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
212 		dev_err(jrdev, "unable to map ctx\n");
213 		state->ctx_dma = 0;
214 		return -ENOMEM;
215 	}
216 
217 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
218 
219 	return 0;
220 }
221 
222 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
223 {
224 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
225 	int digestsize = crypto_ahash_digestsize(ahash);
226 	struct device *jrdev = ctx->jrdev;
227 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
228 	u32 *desc;
229 
230 	ctx->adata.key_virt = ctx->key;
231 
232 	/* ahash_update shared descriptor */
233 	desc = ctx->sh_desc_update;
234 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
235 			  ctx->ctx_len, true, ctrlpriv->era);
236 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
237 				   desc_bytes(desc), ctx->dir);
238 
239 	print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
240 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
241 			     1);
242 
243 	/* ahash_update_first shared descriptor */
244 	desc = ctx->sh_desc_update_first;
245 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
246 			  ctx->ctx_len, false, ctrlpriv->era);
247 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
248 				   desc_bytes(desc), ctx->dir);
249 	print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
250 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
251 			     desc_bytes(desc), 1);
252 
253 	/* ahash_final shared descriptor */
254 	desc = ctx->sh_desc_fin;
255 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
256 			  ctx->ctx_len, true, ctrlpriv->era);
257 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
258 				   desc_bytes(desc), ctx->dir);
259 
260 	print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
261 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
262 			     desc_bytes(desc), 1);
263 
264 	/* ahash_digest shared descriptor */
265 	desc = ctx->sh_desc_digest;
266 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
267 			  ctx->ctx_len, false, ctrlpriv->era);
268 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
269 				   desc_bytes(desc), ctx->dir);
270 
271 	print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
272 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
273 			     desc_bytes(desc), 1);
274 
275 	return 0;
276 }
277 
278 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
279 {
280 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
281 	int digestsize = crypto_ahash_digestsize(ahash);
282 	struct device *jrdev = ctx->jrdev;
283 	u32 *desc;
284 
285 	/* shared descriptor for ahash_update */
286 	desc = ctx->sh_desc_update;
287 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
288 			    ctx->ctx_len, ctx->ctx_len);
289 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
290 				   desc_bytes(desc), ctx->dir);
291 	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
292 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
293 			     1);
294 
295 	/* shared descriptor for ahash_{final,finup} */
296 	desc = ctx->sh_desc_fin;
297 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
298 			    digestsize, ctx->ctx_len);
299 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
300 				   desc_bytes(desc), ctx->dir);
301 	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
302 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
303 			     1);
304 
305 	/* key is immediate data for INIT and INITFINAL states */
306 	ctx->adata.key_virt = ctx->key;
307 
308 	/* shared descriptor for first invocation of ahash_update */
309 	desc = ctx->sh_desc_update_first;
310 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
311 			    ctx->ctx_len);
312 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
313 				   desc_bytes(desc), ctx->dir);
314 	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
315 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
316 			     desc_bytes(desc), 1);
317 
318 	/* shared descriptor for ahash_digest */
319 	desc = ctx->sh_desc_digest;
320 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
321 			    digestsize, ctx->ctx_len);
322 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
323 				   desc_bytes(desc), ctx->dir);
324 	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
325 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
326 			     1);
327 	return 0;
328 }
329 
330 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
331 {
332 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
333 	int digestsize = crypto_ahash_digestsize(ahash);
334 	struct device *jrdev = ctx->jrdev;
335 	u32 *desc;
336 
337 	/* shared descriptor for ahash_update */
338 	desc = ctx->sh_desc_update;
339 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
340 			    ctx->ctx_len, ctx->ctx_len);
341 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
342 				   desc_bytes(desc), ctx->dir);
343 	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
344 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
345 			     desc_bytes(desc), 1);
346 
347 	/* shared descriptor for ahash_{final,finup} */
348 	desc = ctx->sh_desc_fin;
349 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
350 			    digestsize, ctx->ctx_len);
351 	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
352 				   desc_bytes(desc), ctx->dir);
353 	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
354 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 			     desc_bytes(desc), 1);
356 
357 	/* shared descriptor for first invocation of ahash_update */
358 	desc = ctx->sh_desc_update_first;
359 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
360 			    ctx->ctx_len);
361 	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
362 				   desc_bytes(desc), ctx->dir);
363 	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
364 			     " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
365 			     desc_bytes(desc), 1);
366 
367 	/* shared descriptor for ahash_digest */
368 	desc = ctx->sh_desc_digest;
369 	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
370 			    digestsize, ctx->ctx_len);
371 	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
372 				   desc_bytes(desc), ctx->dir);
373 	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
374 			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
375 			     desc_bytes(desc), 1);
376 
377 	return 0;
378 }
379 
380 /* Digest hash size if it is too large */
381 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
382 			   u32 digestsize)
383 {
384 	struct device *jrdev = ctx->jrdev;
385 	u32 *desc;
386 	struct split_key_result result;
387 	dma_addr_t key_dma;
388 	int ret;
389 
390 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
391 	if (!desc) {
392 		dev_err(jrdev, "unable to allocate key input memory\n");
393 		return -ENOMEM;
394 	}
395 
396 	init_job_desc(desc, 0);
397 
398 	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
399 	if (dma_mapping_error(jrdev, key_dma)) {
400 		dev_err(jrdev, "unable to map key memory\n");
401 		kfree(desc);
402 		return -ENOMEM;
403 	}
404 
405 	/* Job descriptor to perform unkeyed hash on key_in */
406 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
407 			 OP_ALG_AS_INITFINAL);
408 	append_seq_in_ptr(desc, key_dma, *keylen, 0);
409 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
410 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
411 	append_seq_out_ptr(desc, key_dma, digestsize, 0);
412 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
413 			 LDST_SRCDST_BYTE_CONTEXT);
414 
415 	print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
416 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
417 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
418 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
419 			     1);
420 
421 	result.err = 0;
422 	init_completion(&result.completion);
423 
424 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
425 	if (!ret) {
426 		/* in progress */
427 		wait_for_completion(&result.completion);
428 		ret = result.err;
429 
430 		print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
431 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
432 				     digestsize, 1);
433 	}
434 	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
435 
436 	*keylen = digestsize;
437 
438 	kfree(desc);
439 
440 	return ret;
441 }
442 
443 static int ahash_setkey(struct crypto_ahash *ahash,
444 			const u8 *key, unsigned int keylen)
445 {
446 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
447 	struct device *jrdev = ctx->jrdev;
448 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
449 	int digestsize = crypto_ahash_digestsize(ahash);
450 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
451 	int ret;
452 	u8 *hashed_key = NULL;
453 
454 	dev_dbg(jrdev, "keylen %d\n", keylen);
455 
456 	if (keylen > blocksize) {
457 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
458 		if (!hashed_key)
459 			return -ENOMEM;
460 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
461 		if (ret)
462 			goto bad_free_key;
463 		key = hashed_key;
464 	}
465 
466 	/*
467 	 * If DKP is supported, use it in the shared descriptor to generate
468 	 * the split key.
469 	 */
470 	if (ctrlpriv->era >= 6) {
471 		ctx->adata.key_inline = true;
472 		ctx->adata.keylen = keylen;
473 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
474 						      OP_ALG_ALGSEL_MASK);
475 
476 		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
477 			goto bad_free_key;
478 
479 		memcpy(ctx->key, key, keylen);
480 
481 		/*
482 		 * In case |user key| > |derived key|, using DKP<imm,imm>
483 		 * would result in invalid opcodes (last bytes of user key) in
484 		 * the resulting descriptor. Use DKP<ptr,imm> instead => both
485 		 * virtual and dma key addresses are needed.
486 		 */
487 		if (keylen > ctx->adata.keylen_pad)
488 			dma_sync_single_for_device(ctx->jrdev,
489 						   ctx->adata.key_dma,
490 						   ctx->adata.keylen_pad,
491 						   DMA_TO_DEVICE);
492 	} else {
493 		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
494 				    keylen, CAAM_MAX_HASH_KEY_SIZE);
495 		if (ret)
496 			goto bad_free_key;
497 	}
498 
499 	kfree(hashed_key);
500 	return ahash_set_sh_desc(ahash);
501  bad_free_key:
502 	kfree(hashed_key);
503 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
504 	return -EINVAL;
505 }
506 
507 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
508 			unsigned int keylen)
509 {
510 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
511 	struct device *jrdev = ctx->jrdev;
512 
513 	if (keylen != AES_KEYSIZE_128) {
514 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
515 		return -EINVAL;
516 	}
517 
518 	memcpy(ctx->key, key, keylen);
519 	dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
520 				   DMA_TO_DEVICE);
521 	ctx->adata.keylen = keylen;
522 
523 	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
524 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
525 
526 	return axcbc_set_sh_desc(ahash);
527 }
528 
529 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
530 			unsigned int keylen)
531 {
532 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
533 	int err;
534 
535 	err = aes_check_keylen(keylen);
536 	if (err) {
537 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
538 		return err;
539 	}
540 
541 	/* key is immediate data for all cmac shared descriptors */
542 	ctx->adata.key_virt = key;
543 	ctx->adata.keylen = keylen;
544 
545 	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
546 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
547 
548 	return acmac_set_sh_desc(ahash);
549 }
550 
551 /*
552  * ahash_edesc - s/w-extended ahash descriptor
553  * @sec4_sg_dma: physical mapped address of h/w link table
554  * @src_nents: number of segments in input scatterlist
555  * @sec4_sg_bytes: length of dma mapped sec4_sg space
556  * @hw_desc: the h/w job descriptor followed by any referenced link tables
557  * @sec4_sg: h/w link table
558  */
559 struct ahash_edesc {
560 	dma_addr_t sec4_sg_dma;
561 	int src_nents;
562 	int sec4_sg_bytes;
563 	u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
564 	struct sec4_sg_entry sec4_sg[0];
565 };
566 
567 static inline void ahash_unmap(struct device *dev,
568 			struct ahash_edesc *edesc,
569 			struct ahash_request *req, int dst_len)
570 {
571 	struct caam_hash_state *state = ahash_request_ctx(req);
572 
573 	if (edesc->src_nents)
574 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
575 
576 	if (edesc->sec4_sg_bytes)
577 		dma_unmap_single(dev, edesc->sec4_sg_dma,
578 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
579 
580 	if (state->buf_dma) {
581 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
582 				 DMA_TO_DEVICE);
583 		state->buf_dma = 0;
584 	}
585 }
586 
587 static inline void ahash_unmap_ctx(struct device *dev,
588 			struct ahash_edesc *edesc,
589 			struct ahash_request *req, int dst_len, u32 flag)
590 {
591 	struct caam_hash_state *state = ahash_request_ctx(req);
592 
593 	if (state->ctx_dma) {
594 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
595 		state->ctx_dma = 0;
596 	}
597 	ahash_unmap(dev, edesc, req, dst_len);
598 }
599 
600 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
601 		       void *context)
602 {
603 	struct ahash_request *req = context;
604 	struct ahash_edesc *edesc;
605 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
606 	int digestsize = crypto_ahash_digestsize(ahash);
607 	struct caam_hash_state *state = ahash_request_ctx(req);
608 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
609 	int ecode = 0;
610 
611 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
612 
613 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
614 	if (err)
615 		ecode = caam_jr_strstatus(jrdev, err);
616 
617 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
618 	memcpy(req->result, state->caam_ctx, digestsize);
619 	kfree(edesc);
620 
621 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
622 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
623 			     ctx->ctx_len, 1);
624 
625 	req->base.complete(&req->base, ecode);
626 }
627 
628 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
629 			    void *context)
630 {
631 	struct ahash_request *req = context;
632 	struct ahash_edesc *edesc;
633 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
634 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
635 	struct caam_hash_state *state = ahash_request_ctx(req);
636 	int digestsize = crypto_ahash_digestsize(ahash);
637 	int ecode = 0;
638 
639 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
640 
641 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
642 	if (err)
643 		ecode = caam_jr_strstatus(jrdev, err);
644 
645 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
646 	switch_buf(state);
647 	kfree(edesc);
648 
649 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
650 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
651 			     ctx->ctx_len, 1);
652 	if (req->result)
653 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
654 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
655 				     digestsize, 1);
656 
657 	req->base.complete(&req->base, ecode);
658 }
659 
660 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
661 			       void *context)
662 {
663 	struct ahash_request *req = context;
664 	struct ahash_edesc *edesc;
665 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
666 	int digestsize = crypto_ahash_digestsize(ahash);
667 	struct caam_hash_state *state = ahash_request_ctx(req);
668 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
669 	int ecode = 0;
670 
671 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
672 
673 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
674 	if (err)
675 		ecode = caam_jr_strstatus(jrdev, err);
676 
677 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
678 	memcpy(req->result, state->caam_ctx, digestsize);
679 	kfree(edesc);
680 
681 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
682 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
683 			     ctx->ctx_len, 1);
684 
685 	req->base.complete(&req->base, ecode);
686 }
687 
688 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
689 			       void *context)
690 {
691 	struct ahash_request *req = context;
692 	struct ahash_edesc *edesc;
693 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
694 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
695 	struct caam_hash_state *state = ahash_request_ctx(req);
696 	int digestsize = crypto_ahash_digestsize(ahash);
697 	int ecode = 0;
698 
699 	dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
700 
701 	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
702 	if (err)
703 		ecode = caam_jr_strstatus(jrdev, err);
704 
705 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
706 	switch_buf(state);
707 	kfree(edesc);
708 
709 	print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
710 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
711 			     ctx->ctx_len, 1);
712 	if (req->result)
713 		print_hex_dump_debug("result@"__stringify(__LINE__)": ",
714 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
715 				     digestsize, 1);
716 
717 	req->base.complete(&req->base, ecode);
718 }
719 
720 /*
721  * Allocate an enhanced descriptor, which contains the hardware descriptor
722  * and space for hardware scatter table containing sg_num entries.
723  */
724 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
725 					     int sg_num, u32 *sh_desc,
726 					     dma_addr_t sh_desc_dma,
727 					     gfp_t flags)
728 {
729 	struct ahash_edesc *edesc;
730 	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
731 
732 	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
733 	if (!edesc) {
734 		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
735 		return NULL;
736 	}
737 
738 	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
739 			     HDR_SHARE_DEFER | HDR_REVERSE);
740 
741 	return edesc;
742 }
743 
744 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
745 			       struct ahash_edesc *edesc,
746 			       struct ahash_request *req, int nents,
747 			       unsigned int first_sg,
748 			       unsigned int first_bytes, size_t to_hash)
749 {
750 	dma_addr_t src_dma;
751 	u32 options;
752 
753 	if (nents > 1 || first_sg) {
754 		struct sec4_sg_entry *sg = edesc->sec4_sg;
755 		unsigned int sgsize = sizeof(*sg) *
756 				      pad_sg_nents(first_sg + nents);
757 
758 		sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
759 
760 		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
761 		if (dma_mapping_error(ctx->jrdev, src_dma)) {
762 			dev_err(ctx->jrdev, "unable to map S/G table\n");
763 			return -ENOMEM;
764 		}
765 
766 		edesc->sec4_sg_bytes = sgsize;
767 		edesc->sec4_sg_dma = src_dma;
768 		options = LDST_SGF;
769 	} else {
770 		src_dma = sg_dma_address(req->src);
771 		options = 0;
772 	}
773 
774 	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
775 			  options);
776 
777 	return 0;
778 }
779 
780 /* submit update job descriptor */
781 static int ahash_update_ctx(struct ahash_request *req)
782 {
783 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
784 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
785 	struct caam_hash_state *state = ahash_request_ctx(req);
786 	struct device *jrdev = ctx->jrdev;
787 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
788 		       GFP_KERNEL : GFP_ATOMIC;
789 	u8 *buf = current_buf(state);
790 	int *buflen = current_buflen(state);
791 	u8 *next_buf = alt_buf(state);
792 	int blocksize = crypto_ahash_blocksize(ahash);
793 	int *next_buflen = alt_buflen(state), last_buflen;
794 	int in_len = *buflen + req->nbytes, to_hash;
795 	u32 *desc;
796 	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
797 	struct ahash_edesc *edesc;
798 	int ret = 0;
799 
800 	last_buflen = *next_buflen;
801 	*next_buflen = in_len & (blocksize - 1);
802 	to_hash = in_len - *next_buflen;
803 
804 	/*
805 	 * For XCBC and CMAC, if to_hash is multiple of block size,
806 	 * keep last block in internal buffer
807 	 */
808 	if ((is_xcbc_aes(ctx->adata.algtype) ||
809 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
810 	     (*next_buflen == 0)) {
811 		*next_buflen = blocksize;
812 		to_hash -= blocksize;
813 	}
814 
815 	if (to_hash) {
816 		int pad_nents;
817 		int src_len = req->nbytes - *next_buflen;
818 
819 		src_nents = sg_nents_for_len(req->src, src_len);
820 		if (src_nents < 0) {
821 			dev_err(jrdev, "Invalid number of src SG.\n");
822 			return src_nents;
823 		}
824 
825 		if (src_nents) {
826 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
827 						  DMA_TO_DEVICE);
828 			if (!mapped_nents) {
829 				dev_err(jrdev, "unable to DMA map source\n");
830 				return -ENOMEM;
831 			}
832 		} else {
833 			mapped_nents = 0;
834 		}
835 
836 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
837 		pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
838 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
839 
840 		/*
841 		 * allocate space for base edesc and hw desc commands,
842 		 * link tables
843 		 */
844 		edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
845 					  ctx->sh_desc_update_dma, flags);
846 		if (!edesc) {
847 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
848 			return -ENOMEM;
849 		}
850 
851 		edesc->src_nents = src_nents;
852 		edesc->sec4_sg_bytes = sec4_sg_bytes;
853 
854 		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
855 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
856 		if (ret)
857 			goto unmap_ctx;
858 
859 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
860 		if (ret)
861 			goto unmap_ctx;
862 
863 		if (mapped_nents)
864 			sg_to_sec4_sg_last(req->src, src_len,
865 					   edesc->sec4_sg + sec4_sg_src_index,
866 					   0);
867 		else
868 			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
869 					    1);
870 
871 		if (*next_buflen)
872 			scatterwalk_map_and_copy(next_buf, req->src,
873 						 to_hash - *buflen,
874 						 *next_buflen, 0);
875 		desc = edesc->hw_desc;
876 
877 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
878 						     sec4_sg_bytes,
879 						     DMA_TO_DEVICE);
880 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
881 			dev_err(jrdev, "unable to map S/G table\n");
882 			ret = -ENOMEM;
883 			goto unmap_ctx;
884 		}
885 
886 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
887 				       to_hash, LDST_SGF);
888 
889 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
890 
891 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
892 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
893 				     desc_bytes(desc), 1);
894 
895 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
896 		if (ret)
897 			goto unmap_ctx;
898 
899 		ret = -EINPROGRESS;
900 	} else if (*next_buflen) {
901 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
902 					 req->nbytes, 0);
903 		*buflen = *next_buflen;
904 		*next_buflen = last_buflen;
905 	}
906 
907 	print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
908 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
909 	print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
910 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
911 			     *next_buflen, 1);
912 
913 	return ret;
914 unmap_ctx:
915 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
916 	kfree(edesc);
917 	return ret;
918 }
919 
920 static int ahash_final_ctx(struct ahash_request *req)
921 {
922 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
923 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
924 	struct caam_hash_state *state = ahash_request_ctx(req);
925 	struct device *jrdev = ctx->jrdev;
926 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
927 		       GFP_KERNEL : GFP_ATOMIC;
928 	int buflen = *current_buflen(state);
929 	u32 *desc;
930 	int sec4_sg_bytes;
931 	int digestsize = crypto_ahash_digestsize(ahash);
932 	struct ahash_edesc *edesc;
933 	int ret;
934 
935 	sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
936 			sizeof(struct sec4_sg_entry);
937 
938 	/* allocate space for base edesc and hw desc commands, link tables */
939 	edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
940 				  ctx->sh_desc_fin_dma, flags);
941 	if (!edesc)
942 		return -ENOMEM;
943 
944 	desc = edesc->hw_desc;
945 
946 	edesc->sec4_sg_bytes = sec4_sg_bytes;
947 
948 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
949 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
950 	if (ret)
951 		goto unmap_ctx;
952 
953 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
954 	if (ret)
955 		goto unmap_ctx;
956 
957 	sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
958 
959 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
960 					    sec4_sg_bytes, DMA_TO_DEVICE);
961 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
962 		dev_err(jrdev, "unable to map S/G table\n");
963 		ret = -ENOMEM;
964 		goto unmap_ctx;
965 	}
966 
967 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
968 			  LDST_SGF);
969 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
970 
971 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
972 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
973 			     1);
974 
975 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
976 	if (ret)
977 		goto unmap_ctx;
978 
979 	return -EINPROGRESS;
980  unmap_ctx:
981 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
982 	kfree(edesc);
983 	return ret;
984 }
985 
986 static int ahash_finup_ctx(struct ahash_request *req)
987 {
988 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
989 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
990 	struct caam_hash_state *state = ahash_request_ctx(req);
991 	struct device *jrdev = ctx->jrdev;
992 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
993 		       GFP_KERNEL : GFP_ATOMIC;
994 	int buflen = *current_buflen(state);
995 	u32 *desc;
996 	int sec4_sg_src_index;
997 	int src_nents, mapped_nents;
998 	int digestsize = crypto_ahash_digestsize(ahash);
999 	struct ahash_edesc *edesc;
1000 	int ret;
1001 
1002 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1003 	if (src_nents < 0) {
1004 		dev_err(jrdev, "Invalid number of src SG.\n");
1005 		return src_nents;
1006 	}
1007 
1008 	if (src_nents) {
1009 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1010 					  DMA_TO_DEVICE);
1011 		if (!mapped_nents) {
1012 			dev_err(jrdev, "unable to DMA map source\n");
1013 			return -ENOMEM;
1014 		}
1015 	} else {
1016 		mapped_nents = 0;
1017 	}
1018 
1019 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1020 
1021 	/* allocate space for base edesc and hw desc commands, link tables */
1022 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1023 				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
1024 				  flags);
1025 	if (!edesc) {
1026 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1027 		return -ENOMEM;
1028 	}
1029 
1030 	desc = edesc->hw_desc;
1031 
1032 	edesc->src_nents = src_nents;
1033 
1034 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1035 				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1036 	if (ret)
1037 		goto unmap_ctx;
1038 
1039 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1040 	if (ret)
1041 		goto unmap_ctx;
1042 
1043 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1044 				  sec4_sg_src_index, ctx->ctx_len + buflen,
1045 				  req->nbytes);
1046 	if (ret)
1047 		goto unmap_ctx;
1048 
1049 	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1050 
1051 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1052 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1053 			     1);
1054 
1055 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1056 	if (ret)
1057 		goto unmap_ctx;
1058 
1059 	return -EINPROGRESS;
1060  unmap_ctx:
1061 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1062 	kfree(edesc);
1063 	return ret;
1064 }
1065 
1066 static int ahash_digest(struct ahash_request *req)
1067 {
1068 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1069 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1070 	struct caam_hash_state *state = ahash_request_ctx(req);
1071 	struct device *jrdev = ctx->jrdev;
1072 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1073 		       GFP_KERNEL : GFP_ATOMIC;
1074 	u32 *desc;
1075 	int digestsize = crypto_ahash_digestsize(ahash);
1076 	int src_nents, mapped_nents;
1077 	struct ahash_edesc *edesc;
1078 	int ret;
1079 
1080 	state->buf_dma = 0;
1081 
1082 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1083 	if (src_nents < 0) {
1084 		dev_err(jrdev, "Invalid number of src SG.\n");
1085 		return src_nents;
1086 	}
1087 
1088 	if (src_nents) {
1089 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1090 					  DMA_TO_DEVICE);
1091 		if (!mapped_nents) {
1092 			dev_err(jrdev, "unable to map source for DMA\n");
1093 			return -ENOMEM;
1094 		}
1095 	} else {
1096 		mapped_nents = 0;
1097 	}
1098 
1099 	/* allocate space for base edesc and hw desc commands, link tables */
1100 	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1101 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1102 				  flags);
1103 	if (!edesc) {
1104 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1105 		return -ENOMEM;
1106 	}
1107 
1108 	edesc->src_nents = src_nents;
1109 
1110 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1111 				  req->nbytes);
1112 	if (ret) {
1113 		ahash_unmap(jrdev, edesc, req, digestsize);
1114 		kfree(edesc);
1115 		return ret;
1116 	}
1117 
1118 	desc = edesc->hw_desc;
1119 
1120 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1121 	if (ret) {
1122 		ahash_unmap(jrdev, edesc, req, digestsize);
1123 		kfree(edesc);
1124 		return -ENOMEM;
1125 	}
1126 
1127 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1128 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1129 			     1);
1130 
1131 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1132 	if (!ret) {
1133 		ret = -EINPROGRESS;
1134 	} else {
1135 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1136 		kfree(edesc);
1137 	}
1138 
1139 	return ret;
1140 }
1141 
1142 /* submit ahash final if it the first job descriptor */
1143 static int ahash_final_no_ctx(struct ahash_request *req)
1144 {
1145 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1146 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1147 	struct caam_hash_state *state = ahash_request_ctx(req);
1148 	struct device *jrdev = ctx->jrdev;
1149 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1150 		       GFP_KERNEL : GFP_ATOMIC;
1151 	u8 *buf = current_buf(state);
1152 	int buflen = *current_buflen(state);
1153 	u32 *desc;
1154 	int digestsize = crypto_ahash_digestsize(ahash);
1155 	struct ahash_edesc *edesc;
1156 	int ret;
1157 
1158 	/* allocate space for base edesc and hw desc commands, link tables */
1159 	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1160 				  ctx->sh_desc_digest_dma, flags);
1161 	if (!edesc)
1162 		return -ENOMEM;
1163 
1164 	desc = edesc->hw_desc;
1165 
1166 	if (buflen) {
1167 		state->buf_dma = dma_map_single(jrdev, buf, buflen,
1168 						DMA_TO_DEVICE);
1169 		if (dma_mapping_error(jrdev, state->buf_dma)) {
1170 			dev_err(jrdev, "unable to map src\n");
1171 			goto unmap;
1172 		}
1173 
1174 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1175 	}
1176 
1177 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1178 	if (ret)
1179 		goto unmap;
1180 
1181 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1182 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1183 			     1);
1184 
1185 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1186 	if (!ret) {
1187 		ret = -EINPROGRESS;
1188 	} else {
1189 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1190 		kfree(edesc);
1191 	}
1192 
1193 	return ret;
1194  unmap:
1195 	ahash_unmap(jrdev, edesc, req, digestsize);
1196 	kfree(edesc);
1197 	return -ENOMEM;
1198 
1199 }
1200 
1201 /* submit ahash update if it the first job descriptor after update */
1202 static int ahash_update_no_ctx(struct ahash_request *req)
1203 {
1204 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1205 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1206 	struct caam_hash_state *state = ahash_request_ctx(req);
1207 	struct device *jrdev = ctx->jrdev;
1208 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1209 		       GFP_KERNEL : GFP_ATOMIC;
1210 	u8 *buf = current_buf(state);
1211 	int *buflen = current_buflen(state);
1212 	int blocksize = crypto_ahash_blocksize(ahash);
1213 	u8 *next_buf = alt_buf(state);
1214 	int *next_buflen = alt_buflen(state);
1215 	int in_len = *buflen + req->nbytes, to_hash;
1216 	int sec4_sg_bytes, src_nents, mapped_nents;
1217 	struct ahash_edesc *edesc;
1218 	u32 *desc;
1219 	int ret = 0;
1220 
1221 	*next_buflen = in_len & (blocksize - 1);
1222 	to_hash = in_len - *next_buflen;
1223 
1224 	/*
1225 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1226 	 * keep last block in internal buffer
1227 	 */
1228 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1229 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1230 	     (*next_buflen == 0)) {
1231 		*next_buflen = blocksize;
1232 		to_hash -= blocksize;
1233 	}
1234 
1235 	if (to_hash) {
1236 		int pad_nents;
1237 		int src_len = req->nbytes - *next_buflen;
1238 
1239 		src_nents = sg_nents_for_len(req->src, src_len);
1240 		if (src_nents < 0) {
1241 			dev_err(jrdev, "Invalid number of src SG.\n");
1242 			return src_nents;
1243 		}
1244 
1245 		if (src_nents) {
1246 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1247 						  DMA_TO_DEVICE);
1248 			if (!mapped_nents) {
1249 				dev_err(jrdev, "unable to DMA map source\n");
1250 				return -ENOMEM;
1251 			}
1252 		} else {
1253 			mapped_nents = 0;
1254 		}
1255 
1256 		pad_nents = pad_sg_nents(1 + mapped_nents);
1257 		sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1258 
1259 		/*
1260 		 * allocate space for base edesc and hw desc commands,
1261 		 * link tables
1262 		 */
1263 		edesc = ahash_edesc_alloc(ctx, pad_nents,
1264 					  ctx->sh_desc_update_first,
1265 					  ctx->sh_desc_update_first_dma,
1266 					  flags);
1267 		if (!edesc) {
1268 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1269 			return -ENOMEM;
1270 		}
1271 
1272 		edesc->src_nents = src_nents;
1273 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1274 
1275 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1276 		if (ret)
1277 			goto unmap_ctx;
1278 
1279 		sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1280 
1281 		if (*next_buflen) {
1282 			scatterwalk_map_and_copy(next_buf, req->src,
1283 						 to_hash - *buflen,
1284 						 *next_buflen, 0);
1285 		}
1286 
1287 		desc = edesc->hw_desc;
1288 
1289 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1290 						    sec4_sg_bytes,
1291 						    DMA_TO_DEVICE);
1292 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1293 			dev_err(jrdev, "unable to map S/G table\n");
1294 			ret = -ENOMEM;
1295 			goto unmap_ctx;
1296 		}
1297 
1298 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1299 
1300 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1301 		if (ret)
1302 			goto unmap_ctx;
1303 
1304 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1305 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1306 				     desc_bytes(desc), 1);
1307 
1308 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1309 		if (ret)
1310 			goto unmap_ctx;
1311 
1312 		ret = -EINPROGRESS;
1313 		state->update = ahash_update_ctx;
1314 		state->finup = ahash_finup_ctx;
1315 		state->final = ahash_final_ctx;
1316 	} else if (*next_buflen) {
1317 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1318 					 req->nbytes, 0);
1319 		*buflen = *next_buflen;
1320 		*next_buflen = 0;
1321 	}
1322 
1323 	print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
1324 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1325 	print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1326 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1327 			     1);
1328 
1329 	return ret;
1330  unmap_ctx:
1331 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1332 	kfree(edesc);
1333 	return ret;
1334 }
1335 
1336 /* submit ahash finup if it the first job descriptor after update */
1337 static int ahash_finup_no_ctx(struct ahash_request *req)
1338 {
1339 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1340 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1341 	struct caam_hash_state *state = ahash_request_ctx(req);
1342 	struct device *jrdev = ctx->jrdev;
1343 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1344 		       GFP_KERNEL : GFP_ATOMIC;
1345 	int buflen = *current_buflen(state);
1346 	u32 *desc;
1347 	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1348 	int digestsize = crypto_ahash_digestsize(ahash);
1349 	struct ahash_edesc *edesc;
1350 	int ret;
1351 
1352 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1353 	if (src_nents < 0) {
1354 		dev_err(jrdev, "Invalid number of src SG.\n");
1355 		return src_nents;
1356 	}
1357 
1358 	if (src_nents) {
1359 		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1360 					  DMA_TO_DEVICE);
1361 		if (!mapped_nents) {
1362 			dev_err(jrdev, "unable to DMA map source\n");
1363 			return -ENOMEM;
1364 		}
1365 	} else {
1366 		mapped_nents = 0;
1367 	}
1368 
1369 	sec4_sg_src_index = 2;
1370 	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1371 			 sizeof(struct sec4_sg_entry);
1372 
1373 	/* allocate space for base edesc and hw desc commands, link tables */
1374 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1375 				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1376 				  flags);
1377 	if (!edesc) {
1378 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1379 		return -ENOMEM;
1380 	}
1381 
1382 	desc = edesc->hw_desc;
1383 
1384 	edesc->src_nents = src_nents;
1385 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1386 
1387 	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1388 	if (ret)
1389 		goto unmap;
1390 
1391 	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1392 				  req->nbytes);
1393 	if (ret) {
1394 		dev_err(jrdev, "unable to map S/G table\n");
1395 		goto unmap;
1396 	}
1397 
1398 	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1399 	if (ret)
1400 		goto unmap;
1401 
1402 	print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1403 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1404 			     1);
1405 
1406 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1407 	if (!ret) {
1408 		ret = -EINPROGRESS;
1409 	} else {
1410 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1411 		kfree(edesc);
1412 	}
1413 
1414 	return ret;
1415  unmap:
1416 	ahash_unmap(jrdev, edesc, req, digestsize);
1417 	kfree(edesc);
1418 	return -ENOMEM;
1419 
1420 }
1421 
1422 /* submit first update job descriptor after init */
1423 static int ahash_update_first(struct ahash_request *req)
1424 {
1425 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1426 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1427 	struct caam_hash_state *state = ahash_request_ctx(req);
1428 	struct device *jrdev = ctx->jrdev;
1429 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1430 		       GFP_KERNEL : GFP_ATOMIC;
1431 	u8 *next_buf = alt_buf(state);
1432 	int *next_buflen = alt_buflen(state);
1433 	int to_hash;
1434 	int blocksize = crypto_ahash_blocksize(ahash);
1435 	u32 *desc;
1436 	int src_nents, mapped_nents;
1437 	struct ahash_edesc *edesc;
1438 	int ret = 0;
1439 
1440 	*next_buflen = req->nbytes & (blocksize - 1);
1441 	to_hash = req->nbytes - *next_buflen;
1442 
1443 	/*
1444 	 * For XCBC and CMAC, if to_hash is multiple of block size,
1445 	 * keep last block in internal buffer
1446 	 */
1447 	if ((is_xcbc_aes(ctx->adata.algtype) ||
1448 	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1449 	     (*next_buflen == 0)) {
1450 		*next_buflen = blocksize;
1451 		to_hash -= blocksize;
1452 	}
1453 
1454 	if (to_hash) {
1455 		src_nents = sg_nents_for_len(req->src,
1456 					     req->nbytes - *next_buflen);
1457 		if (src_nents < 0) {
1458 			dev_err(jrdev, "Invalid number of src SG.\n");
1459 			return src_nents;
1460 		}
1461 
1462 		if (src_nents) {
1463 			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1464 						  DMA_TO_DEVICE);
1465 			if (!mapped_nents) {
1466 				dev_err(jrdev, "unable to map source for DMA\n");
1467 				return -ENOMEM;
1468 			}
1469 		} else {
1470 			mapped_nents = 0;
1471 		}
1472 
1473 		/*
1474 		 * allocate space for base edesc and hw desc commands,
1475 		 * link tables
1476 		 */
1477 		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1478 					  mapped_nents : 0,
1479 					  ctx->sh_desc_update_first,
1480 					  ctx->sh_desc_update_first_dma,
1481 					  flags);
1482 		if (!edesc) {
1483 			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1484 			return -ENOMEM;
1485 		}
1486 
1487 		edesc->src_nents = src_nents;
1488 
1489 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1490 					  to_hash);
1491 		if (ret)
1492 			goto unmap_ctx;
1493 
1494 		if (*next_buflen)
1495 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1496 						 *next_buflen, 0);
1497 
1498 		desc = edesc->hw_desc;
1499 
1500 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1501 		if (ret)
1502 			goto unmap_ctx;
1503 
1504 		print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1505 				     DUMP_PREFIX_ADDRESS, 16, 4, desc,
1506 				     desc_bytes(desc), 1);
1507 
1508 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1509 		if (ret)
1510 			goto unmap_ctx;
1511 
1512 		ret = -EINPROGRESS;
1513 		state->update = ahash_update_ctx;
1514 		state->finup = ahash_finup_ctx;
1515 		state->final = ahash_final_ctx;
1516 	} else if (*next_buflen) {
1517 		state->update = ahash_update_no_ctx;
1518 		state->finup = ahash_finup_no_ctx;
1519 		state->final = ahash_final_no_ctx;
1520 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1521 					 req->nbytes, 0);
1522 		switch_buf(state);
1523 	}
1524 
1525 	print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1526 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1527 			     1);
1528 
1529 	return ret;
1530  unmap_ctx:
1531 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1532 	kfree(edesc);
1533 	return ret;
1534 }
1535 
1536 static int ahash_finup_first(struct ahash_request *req)
1537 {
1538 	return ahash_digest(req);
1539 }
1540 
1541 static int ahash_init(struct ahash_request *req)
1542 {
1543 	struct caam_hash_state *state = ahash_request_ctx(req);
1544 
1545 	state->update = ahash_update_first;
1546 	state->finup = ahash_finup_first;
1547 	state->final = ahash_final_no_ctx;
1548 
1549 	state->ctx_dma = 0;
1550 	state->ctx_dma_len = 0;
1551 	state->current_buf = 0;
1552 	state->buf_dma = 0;
1553 	state->buflen_0 = 0;
1554 	state->buflen_1 = 0;
1555 
1556 	return 0;
1557 }
1558 
1559 static int ahash_update(struct ahash_request *req)
1560 {
1561 	struct caam_hash_state *state = ahash_request_ctx(req);
1562 
1563 	return state->update(req);
1564 }
1565 
1566 static int ahash_finup(struct ahash_request *req)
1567 {
1568 	struct caam_hash_state *state = ahash_request_ctx(req);
1569 
1570 	return state->finup(req);
1571 }
1572 
1573 static int ahash_final(struct ahash_request *req)
1574 {
1575 	struct caam_hash_state *state = ahash_request_ctx(req);
1576 
1577 	return state->final(req);
1578 }
1579 
1580 static int ahash_export(struct ahash_request *req, void *out)
1581 {
1582 	struct caam_hash_state *state = ahash_request_ctx(req);
1583 	struct caam_export_state *export = out;
1584 	int len;
1585 	u8 *buf;
1586 
1587 	if (state->current_buf) {
1588 		buf = state->buf_1;
1589 		len = state->buflen_1;
1590 	} else {
1591 		buf = state->buf_0;
1592 		len = state->buflen_0;
1593 	}
1594 
1595 	memcpy(export->buf, buf, len);
1596 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1597 	export->buflen = len;
1598 	export->update = state->update;
1599 	export->final = state->final;
1600 	export->finup = state->finup;
1601 
1602 	return 0;
1603 }
1604 
1605 static int ahash_import(struct ahash_request *req, const void *in)
1606 {
1607 	struct caam_hash_state *state = ahash_request_ctx(req);
1608 	const struct caam_export_state *export = in;
1609 
1610 	memset(state, 0, sizeof(*state));
1611 	memcpy(state->buf_0, export->buf, export->buflen);
1612 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1613 	state->buflen_0 = export->buflen;
1614 	state->update = export->update;
1615 	state->final = export->final;
1616 	state->finup = export->finup;
1617 
1618 	return 0;
1619 }
1620 
1621 struct caam_hash_template {
1622 	char name[CRYPTO_MAX_ALG_NAME];
1623 	char driver_name[CRYPTO_MAX_ALG_NAME];
1624 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1625 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1626 	unsigned int blocksize;
1627 	struct ahash_alg template_ahash;
1628 	u32 alg_type;
1629 };
1630 
1631 /* ahash descriptors */
1632 static struct caam_hash_template driver_hash[] = {
1633 	{
1634 		.name = "sha1",
1635 		.driver_name = "sha1-caam",
1636 		.hmac_name = "hmac(sha1)",
1637 		.hmac_driver_name = "hmac-sha1-caam",
1638 		.blocksize = SHA1_BLOCK_SIZE,
1639 		.template_ahash = {
1640 			.init = ahash_init,
1641 			.update = ahash_update,
1642 			.final = ahash_final,
1643 			.finup = ahash_finup,
1644 			.digest = ahash_digest,
1645 			.export = ahash_export,
1646 			.import = ahash_import,
1647 			.setkey = ahash_setkey,
1648 			.halg = {
1649 				.digestsize = SHA1_DIGEST_SIZE,
1650 				.statesize = sizeof(struct caam_export_state),
1651 			},
1652 		},
1653 		.alg_type = OP_ALG_ALGSEL_SHA1,
1654 	}, {
1655 		.name = "sha224",
1656 		.driver_name = "sha224-caam",
1657 		.hmac_name = "hmac(sha224)",
1658 		.hmac_driver_name = "hmac-sha224-caam",
1659 		.blocksize = SHA224_BLOCK_SIZE,
1660 		.template_ahash = {
1661 			.init = ahash_init,
1662 			.update = ahash_update,
1663 			.final = ahash_final,
1664 			.finup = ahash_finup,
1665 			.digest = ahash_digest,
1666 			.export = ahash_export,
1667 			.import = ahash_import,
1668 			.setkey = ahash_setkey,
1669 			.halg = {
1670 				.digestsize = SHA224_DIGEST_SIZE,
1671 				.statesize = sizeof(struct caam_export_state),
1672 			},
1673 		},
1674 		.alg_type = OP_ALG_ALGSEL_SHA224,
1675 	}, {
1676 		.name = "sha256",
1677 		.driver_name = "sha256-caam",
1678 		.hmac_name = "hmac(sha256)",
1679 		.hmac_driver_name = "hmac-sha256-caam",
1680 		.blocksize = SHA256_BLOCK_SIZE,
1681 		.template_ahash = {
1682 			.init = ahash_init,
1683 			.update = ahash_update,
1684 			.final = ahash_final,
1685 			.finup = ahash_finup,
1686 			.digest = ahash_digest,
1687 			.export = ahash_export,
1688 			.import = ahash_import,
1689 			.setkey = ahash_setkey,
1690 			.halg = {
1691 				.digestsize = SHA256_DIGEST_SIZE,
1692 				.statesize = sizeof(struct caam_export_state),
1693 			},
1694 		},
1695 		.alg_type = OP_ALG_ALGSEL_SHA256,
1696 	}, {
1697 		.name = "sha384",
1698 		.driver_name = "sha384-caam",
1699 		.hmac_name = "hmac(sha384)",
1700 		.hmac_driver_name = "hmac-sha384-caam",
1701 		.blocksize = SHA384_BLOCK_SIZE,
1702 		.template_ahash = {
1703 			.init = ahash_init,
1704 			.update = ahash_update,
1705 			.final = ahash_final,
1706 			.finup = ahash_finup,
1707 			.digest = ahash_digest,
1708 			.export = ahash_export,
1709 			.import = ahash_import,
1710 			.setkey = ahash_setkey,
1711 			.halg = {
1712 				.digestsize = SHA384_DIGEST_SIZE,
1713 				.statesize = sizeof(struct caam_export_state),
1714 			},
1715 		},
1716 		.alg_type = OP_ALG_ALGSEL_SHA384,
1717 	}, {
1718 		.name = "sha512",
1719 		.driver_name = "sha512-caam",
1720 		.hmac_name = "hmac(sha512)",
1721 		.hmac_driver_name = "hmac-sha512-caam",
1722 		.blocksize = SHA512_BLOCK_SIZE,
1723 		.template_ahash = {
1724 			.init = ahash_init,
1725 			.update = ahash_update,
1726 			.final = ahash_final,
1727 			.finup = ahash_finup,
1728 			.digest = ahash_digest,
1729 			.export = ahash_export,
1730 			.import = ahash_import,
1731 			.setkey = ahash_setkey,
1732 			.halg = {
1733 				.digestsize = SHA512_DIGEST_SIZE,
1734 				.statesize = sizeof(struct caam_export_state),
1735 			},
1736 		},
1737 		.alg_type = OP_ALG_ALGSEL_SHA512,
1738 	}, {
1739 		.name = "md5",
1740 		.driver_name = "md5-caam",
1741 		.hmac_name = "hmac(md5)",
1742 		.hmac_driver_name = "hmac-md5-caam",
1743 		.blocksize = MD5_BLOCK_WORDS * 4,
1744 		.template_ahash = {
1745 			.init = ahash_init,
1746 			.update = ahash_update,
1747 			.final = ahash_final,
1748 			.finup = ahash_finup,
1749 			.digest = ahash_digest,
1750 			.export = ahash_export,
1751 			.import = ahash_import,
1752 			.setkey = ahash_setkey,
1753 			.halg = {
1754 				.digestsize = MD5_DIGEST_SIZE,
1755 				.statesize = sizeof(struct caam_export_state),
1756 			},
1757 		},
1758 		.alg_type = OP_ALG_ALGSEL_MD5,
1759 	}, {
1760 		.hmac_name = "xcbc(aes)",
1761 		.hmac_driver_name = "xcbc-aes-caam",
1762 		.blocksize = AES_BLOCK_SIZE,
1763 		.template_ahash = {
1764 			.init = ahash_init,
1765 			.update = ahash_update,
1766 			.final = ahash_final,
1767 			.finup = ahash_finup,
1768 			.digest = ahash_digest,
1769 			.export = ahash_export,
1770 			.import = ahash_import,
1771 			.setkey = axcbc_setkey,
1772 			.halg = {
1773 				.digestsize = AES_BLOCK_SIZE,
1774 				.statesize = sizeof(struct caam_export_state),
1775 			},
1776 		 },
1777 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1778 	}, {
1779 		.hmac_name = "cmac(aes)",
1780 		.hmac_driver_name = "cmac-aes-caam",
1781 		.blocksize = AES_BLOCK_SIZE,
1782 		.template_ahash = {
1783 			.init = ahash_init,
1784 			.update = ahash_update,
1785 			.final = ahash_final,
1786 			.finup = ahash_finup,
1787 			.digest = ahash_digest,
1788 			.export = ahash_export,
1789 			.import = ahash_import,
1790 			.setkey = acmac_setkey,
1791 			.halg = {
1792 				.digestsize = AES_BLOCK_SIZE,
1793 				.statesize = sizeof(struct caam_export_state),
1794 			},
1795 		 },
1796 		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1797 	},
1798 };
1799 
1800 struct caam_hash_alg {
1801 	struct list_head entry;
1802 	int alg_type;
1803 	struct ahash_alg ahash_alg;
1804 };
1805 
1806 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1807 {
1808 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1809 	struct crypto_alg *base = tfm->__crt_alg;
1810 	struct hash_alg_common *halg =
1811 		 container_of(base, struct hash_alg_common, base);
1812 	struct ahash_alg *alg =
1813 		 container_of(halg, struct ahash_alg, halg);
1814 	struct caam_hash_alg *caam_hash =
1815 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1816 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1817 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1818 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1819 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1820 					 HASH_MSG_LEN + 32,
1821 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1822 					 HASH_MSG_LEN + 64,
1823 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1824 	dma_addr_t dma_addr;
1825 	struct caam_drv_private *priv;
1826 
1827 	/*
1828 	 * Get a Job ring from Job Ring driver to ensure in-order
1829 	 * crypto request processing per tfm
1830 	 */
1831 	ctx->jrdev = caam_jr_alloc();
1832 	if (IS_ERR(ctx->jrdev)) {
1833 		pr_err("Job Ring Device allocation for transform failed\n");
1834 		return PTR_ERR(ctx->jrdev);
1835 	}
1836 
1837 	priv = dev_get_drvdata(ctx->jrdev->parent);
1838 
1839 	if (is_xcbc_aes(caam_hash->alg_type)) {
1840 		ctx->dir = DMA_TO_DEVICE;
1841 		ctx->key_dir = DMA_BIDIRECTIONAL;
1842 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1843 		ctx->ctx_len = 48;
1844 	} else if (is_cmac_aes(caam_hash->alg_type)) {
1845 		ctx->dir = DMA_TO_DEVICE;
1846 		ctx->key_dir = DMA_NONE;
1847 		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1848 		ctx->ctx_len = 32;
1849 	} else {
1850 		if (priv->era >= 6) {
1851 			ctx->dir = DMA_BIDIRECTIONAL;
1852 			ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1853 		} else {
1854 			ctx->dir = DMA_TO_DEVICE;
1855 			ctx->key_dir = DMA_NONE;
1856 		}
1857 		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1858 		ctx->ctx_len = runninglen[(ctx->adata.algtype &
1859 					   OP_ALG_ALGSEL_SUBMASK) >>
1860 					  OP_ALG_ALGSEL_SHIFT];
1861 	}
1862 
1863 	if (ctx->key_dir != DMA_NONE) {
1864 		ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1865 							  ARRAY_SIZE(ctx->key),
1866 							  ctx->key_dir,
1867 							  DMA_ATTR_SKIP_CPU_SYNC);
1868 		if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1869 			dev_err(ctx->jrdev, "unable to map key\n");
1870 			caam_jr_free(ctx->jrdev);
1871 			return -ENOMEM;
1872 		}
1873 	}
1874 
1875 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1876 					offsetof(struct caam_hash_ctx, key),
1877 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1878 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1879 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1880 
1881 		if (ctx->key_dir != DMA_NONE)
1882 			dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1883 					       ARRAY_SIZE(ctx->key),
1884 					       ctx->key_dir,
1885 					       DMA_ATTR_SKIP_CPU_SYNC);
1886 
1887 		caam_jr_free(ctx->jrdev);
1888 		return -ENOMEM;
1889 	}
1890 
1891 	ctx->sh_desc_update_dma = dma_addr;
1892 	ctx->sh_desc_update_first_dma = dma_addr +
1893 					offsetof(struct caam_hash_ctx,
1894 						 sh_desc_update_first);
1895 	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1896 						   sh_desc_fin);
1897 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1898 						      sh_desc_digest);
1899 
1900 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1901 				 sizeof(struct caam_hash_state));
1902 
1903 	/*
1904 	 * For keyed hash algorithms shared descriptors
1905 	 * will be created later in setkey() callback
1906 	 */
1907 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1908 }
1909 
1910 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1911 {
1912 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1913 
1914 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1915 			       offsetof(struct caam_hash_ctx, key),
1916 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1917 	if (ctx->key_dir != DMA_NONE)
1918 		dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1919 				       ARRAY_SIZE(ctx->key), ctx->key_dir,
1920 				       DMA_ATTR_SKIP_CPU_SYNC);
1921 	caam_jr_free(ctx->jrdev);
1922 }
1923 
1924 void caam_algapi_hash_exit(void)
1925 {
1926 	struct caam_hash_alg *t_alg, *n;
1927 
1928 	if (!hash_list.next)
1929 		return;
1930 
1931 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1932 		crypto_unregister_ahash(&t_alg->ahash_alg);
1933 		list_del(&t_alg->entry);
1934 		kfree(t_alg);
1935 	}
1936 }
1937 
1938 static struct caam_hash_alg *
1939 caam_hash_alloc(struct caam_hash_template *template,
1940 		bool keyed)
1941 {
1942 	struct caam_hash_alg *t_alg;
1943 	struct ahash_alg *halg;
1944 	struct crypto_alg *alg;
1945 
1946 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1947 	if (!t_alg) {
1948 		pr_err("failed to allocate t_alg\n");
1949 		return ERR_PTR(-ENOMEM);
1950 	}
1951 
1952 	t_alg->ahash_alg = template->template_ahash;
1953 	halg = &t_alg->ahash_alg;
1954 	alg = &halg->halg.base;
1955 
1956 	if (keyed) {
1957 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1958 			 template->hmac_name);
1959 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1960 			 template->hmac_driver_name);
1961 	} else {
1962 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1963 			 template->name);
1964 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1965 			 template->driver_name);
1966 		t_alg->ahash_alg.setkey = NULL;
1967 	}
1968 	alg->cra_module = THIS_MODULE;
1969 	alg->cra_init = caam_hash_cra_init;
1970 	alg->cra_exit = caam_hash_cra_exit;
1971 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1972 	alg->cra_priority = CAAM_CRA_PRIORITY;
1973 	alg->cra_blocksize = template->blocksize;
1974 	alg->cra_alignmask = 0;
1975 	alg->cra_flags = CRYPTO_ALG_ASYNC;
1976 
1977 	t_alg->alg_type = template->alg_type;
1978 
1979 	return t_alg;
1980 }
1981 
1982 int caam_algapi_hash_init(struct device *ctrldev)
1983 {
1984 	int i = 0, err = 0;
1985 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1986 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1987 	u32 md_inst, md_vid;
1988 
1989 	/*
1990 	 * Register crypto algorithms the device supports.  First, identify
1991 	 * presence and attributes of MD block.
1992 	 */
1993 	if (priv->era < 10) {
1994 		md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1995 			  CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1996 		md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1997 			   CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1998 	} else {
1999 		u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2000 
2001 		md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2002 		md_inst = mdha & CHA_VER_NUM_MASK;
2003 	}
2004 
2005 	/*
2006 	 * Skip registration of any hashing algorithms if MD block
2007 	 * is not present.
2008 	 */
2009 	if (!md_inst)
2010 		return 0;
2011 
2012 	/* Limit digest size based on LP256 */
2013 	if (md_vid == CHA_VER_VID_MD_LP256)
2014 		md_limit = SHA256_DIGEST_SIZE;
2015 
2016 	INIT_LIST_HEAD(&hash_list);
2017 
2018 	/* register crypto algorithms the device supports */
2019 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
2020 		struct caam_hash_alg *t_alg;
2021 		struct caam_hash_template *alg = driver_hash + i;
2022 
2023 		/* If MD size is not supported by device, skip registration */
2024 		if (is_mdha(alg->alg_type) &&
2025 		    alg->template_ahash.halg.digestsize > md_limit)
2026 			continue;
2027 
2028 		/* register hmac version */
2029 		t_alg = caam_hash_alloc(alg, true);
2030 		if (IS_ERR(t_alg)) {
2031 			err = PTR_ERR(t_alg);
2032 			pr_warn("%s alg allocation failed\n",
2033 				alg->hmac_driver_name);
2034 			continue;
2035 		}
2036 
2037 		err = crypto_register_ahash(&t_alg->ahash_alg);
2038 		if (err) {
2039 			pr_warn("%s alg registration failed: %d\n",
2040 				t_alg->ahash_alg.halg.base.cra_driver_name,
2041 				err);
2042 			kfree(t_alg);
2043 		} else
2044 			list_add_tail(&t_alg->entry, &hash_list);
2045 
2046 		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2047 			continue;
2048 
2049 		/* register unkeyed version */
2050 		t_alg = caam_hash_alloc(alg, false);
2051 		if (IS_ERR(t_alg)) {
2052 			err = PTR_ERR(t_alg);
2053 			pr_warn("%s alg allocation failed\n", alg->driver_name);
2054 			continue;
2055 		}
2056 
2057 		err = crypto_register_ahash(&t_alg->ahash_alg);
2058 		if (err) {
2059 			pr_warn("%s alg registration failed: %d\n",
2060 				t_alg->ahash_alg.halg.base.cra_driver_name,
2061 				err);
2062 			kfree(t_alg);
2063 		} else
2064 			list_add_tail(&t_alg->entry, &hash_list);
2065 	}
2066 
2067 	return err;
2068 }
2069