xref: /linux/drivers/crypto/caam/caamhash.c (revision ea0b3984c1cc8b28de27a3bec285102b4e366a4c)
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55 
56 #include "compat.h"
57 
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65 
66 #define CAAM_CRA_PRIORITY		3000
67 
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
70 
71 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
73 
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81 
82 #define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
83 					 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85 
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN			8
88 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89 
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96 
97 
98 static struct list_head hash_list;
99 
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102 	struct device *jrdev;
103 	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 	dma_addr_t sh_desc_update_dma;
109 	dma_addr_t sh_desc_update_first_dma;
110 	dma_addr_t sh_desc_fin_dma;
111 	dma_addr_t sh_desc_digest_dma;
112 	dma_addr_t sh_desc_finup_dma;
113 	u32 alg_type;
114 	u32 alg_op;
115 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 	dma_addr_t key_dma;
117 	int ctx_len;
118 	unsigned int split_key_len;
119 	unsigned int split_key_pad_len;
120 };
121 
122 /* ahash state */
123 struct caam_hash_state {
124 	dma_addr_t buf_dma;
125 	dma_addr_t ctx_dma;
126 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 	int buflen_0;
128 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 	int buflen_1;
130 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
131 	int (*update)(struct ahash_request *req);
132 	int (*final)(struct ahash_request *req);
133 	int (*finup)(struct ahash_request *req);
134 	int current_buf;
135 };
136 
137 /* Common job descriptor seq in/out ptr routines */
138 
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 				      struct caam_hash_state *state,
142 				      int ctx_len)
143 {
144 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 					ctx_len, DMA_FROM_DEVICE);
146 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
147 		dev_err(jrdev, "unable to map ctx\n");
148 		return -ENOMEM;
149 	}
150 
151 	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
152 
153 	return 0;
154 }
155 
156 /* Map req->result, and append seq_out_ptr command that points to it */
157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
158 						u8 *result, int digestsize)
159 {
160 	dma_addr_t dst_dma;
161 
162 	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
163 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
164 
165 	return dst_dma;
166 }
167 
168 /* Map current buffer in state and put it in link table */
169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
170 					    struct sec4_sg_entry *sec4_sg,
171 					    u8 *buf, int buflen)
172 {
173 	dma_addr_t buf_dma;
174 
175 	buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
176 	dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
177 
178 	return buf_dma;
179 }
180 
181 /* Map req->src and put it in link table */
182 static inline void src_map_to_sec4_sg(struct device *jrdev,
183 				      struct scatterlist *src, int src_nents,
184 				      struct sec4_sg_entry *sec4_sg)
185 {
186 	dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
187 	sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
188 }
189 
190 /*
191  * Only put buffer in link table if it contains data, which is possible,
192  * since a buffer has previously been used, and needs to be unmapped,
193  */
194 static inline dma_addr_t
195 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
196 		       u8 *buf, dma_addr_t buf_dma, int buflen,
197 		       int last_buflen)
198 {
199 	if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
200 		dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
201 	if (buflen)
202 		buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
203 	else
204 		buf_dma = 0;
205 
206 	return buf_dma;
207 }
208 
209 /* Map state->caam_ctx, and add it to link table */
210 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
211 				     struct caam_hash_state *state, int ctx_len,
212 				     struct sec4_sg_entry *sec4_sg, u32 flag)
213 {
214 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
215 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
216 		dev_err(jrdev, "unable to map ctx\n");
217 		return -ENOMEM;
218 	}
219 
220 	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
221 
222 	return 0;
223 }
224 
225 /* Common shared descriptor commands */
226 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
227 {
228 	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
229 			  ctx->split_key_len, CLASS_2 |
230 			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
231 }
232 
233 /* Append key if it has been set */
234 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
235 {
236 	u32 *key_jump_cmd;
237 
238 	init_sh_desc(desc, HDR_SHARE_SERIAL);
239 
240 	if (ctx->split_key_len) {
241 		/* Skip if already shared */
242 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
243 					   JUMP_COND_SHRD);
244 
245 		append_key_ahash(desc, ctx);
246 
247 		set_jump_tgt_here(desc, key_jump_cmd);
248 	}
249 
250 	/* Propagate errors from shared to job descriptor */
251 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
252 }
253 
254 /*
255  * For ahash read data from seqin following state->caam_ctx,
256  * and write resulting class2 context to seqout, which may be state->caam_ctx
257  * or req->result
258  */
259 static inline void ahash_append_load_str(u32 *desc, int digestsize)
260 {
261 	/* Calculate remaining bytes to read */
262 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
263 
264 	/* Read remaining bytes */
265 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
266 			     FIFOLD_TYPE_MSG | KEY_VLF);
267 
268 	/* Store class2 context bytes */
269 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
270 			 LDST_SRCDST_BYTE_CONTEXT);
271 }
272 
273 /*
274  * For ahash update, final and finup, import context, read and write to seqout
275  */
276 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
277 					 int digestsize,
278 					 struct caam_hash_ctx *ctx)
279 {
280 	init_sh_desc_key_ahash(desc, ctx);
281 
282 	/* Import context from software */
283 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
284 		   LDST_CLASS_2_CCB | ctx->ctx_len);
285 
286 	/* Class 2 operation */
287 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
288 
289 	/*
290 	 * Load from buf and/or src and write to req->result or state->context
291 	 */
292 	ahash_append_load_str(desc, digestsize);
293 }
294 
295 /* For ahash firsts and digest, read and write to seqout */
296 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
297 				     int digestsize, struct caam_hash_ctx *ctx)
298 {
299 	init_sh_desc_key_ahash(desc, ctx);
300 
301 	/* Class 2 operation */
302 	append_operation(desc, op | state | OP_ALG_ENCRYPT);
303 
304 	/*
305 	 * Load from buf and/or src and write to req->result or state->context
306 	 */
307 	ahash_append_load_str(desc, digestsize);
308 }
309 
310 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
311 {
312 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
313 	int digestsize = crypto_ahash_digestsize(ahash);
314 	struct device *jrdev = ctx->jrdev;
315 	u32 have_key = 0;
316 	u32 *desc;
317 
318 	if (ctx->split_key_len)
319 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
320 
321 	/* ahash_update shared descriptor */
322 	desc = ctx->sh_desc_update;
323 
324 	init_sh_desc(desc, HDR_SHARE_SERIAL);
325 
326 	/* Import context from software */
327 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
328 		   LDST_CLASS_2_CCB | ctx->ctx_len);
329 
330 	/* Class 2 operation */
331 	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
332 			 OP_ALG_ENCRYPT);
333 
334 	/* Load data and write to result or context */
335 	ahash_append_load_str(desc, ctx->ctx_len);
336 
337 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
338 						 DMA_TO_DEVICE);
339 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
340 		dev_err(jrdev, "unable to map shared descriptor\n");
341 		return -ENOMEM;
342 	}
343 #ifdef DEBUG
344 	print_hex_dump(KERN_ERR,
345 		       "ahash update shdesc@"__stringify(__LINE__)": ",
346 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
347 #endif
348 
349 	/* ahash_update_first shared descriptor */
350 	desc = ctx->sh_desc_update_first;
351 
352 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
353 			  ctx->ctx_len, ctx);
354 
355 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
356 						       desc_bytes(desc),
357 						       DMA_TO_DEVICE);
358 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
359 		dev_err(jrdev, "unable to map shared descriptor\n");
360 		return -ENOMEM;
361 	}
362 #ifdef DEBUG
363 	print_hex_dump(KERN_ERR,
364 		       "ahash update first shdesc@"__stringify(__LINE__)": ",
365 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
366 #endif
367 
368 	/* ahash_final shared descriptor */
369 	desc = ctx->sh_desc_fin;
370 
371 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
372 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
373 
374 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
375 					      DMA_TO_DEVICE);
376 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
377 		dev_err(jrdev, "unable to map shared descriptor\n");
378 		return -ENOMEM;
379 	}
380 #ifdef DEBUG
381 	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
382 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
383 		       desc_bytes(desc), 1);
384 #endif
385 
386 	/* ahash_finup shared descriptor */
387 	desc = ctx->sh_desc_finup;
388 
389 	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
390 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
391 
392 	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
393 						DMA_TO_DEVICE);
394 	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
395 		dev_err(jrdev, "unable to map shared descriptor\n");
396 		return -ENOMEM;
397 	}
398 #ifdef DEBUG
399 	print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
400 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
401 		       desc_bytes(desc), 1);
402 #endif
403 
404 	/* ahash_digest shared descriptor */
405 	desc = ctx->sh_desc_digest;
406 
407 	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
408 			  digestsize, ctx);
409 
410 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
411 						 desc_bytes(desc),
412 						 DMA_TO_DEVICE);
413 	if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
414 		dev_err(jrdev, "unable to map shared descriptor\n");
415 		return -ENOMEM;
416 	}
417 #ifdef DEBUG
418 	print_hex_dump(KERN_ERR,
419 		       "ahash digest shdesc@"__stringify(__LINE__)": ",
420 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
421 		       desc_bytes(desc), 1);
422 #endif
423 
424 	return 0;
425 }
426 
427 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
428 			      u32 keylen)
429 {
430 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
431 			       ctx->split_key_pad_len, key_in, keylen,
432 			       ctx->alg_op);
433 }
434 
435 /* Digest hash size if it is too large */
436 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
437 			   u32 *keylen, u8 *key_out, u32 digestsize)
438 {
439 	struct device *jrdev = ctx->jrdev;
440 	u32 *desc;
441 	struct split_key_result result;
442 	dma_addr_t src_dma, dst_dma;
443 	int ret = 0;
444 
445 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
446 	if (!desc) {
447 		dev_err(jrdev, "unable to allocate key input memory\n");
448 		return -ENOMEM;
449 	}
450 
451 	init_job_desc(desc, 0);
452 
453 	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
454 				 DMA_TO_DEVICE);
455 	if (dma_mapping_error(jrdev, src_dma)) {
456 		dev_err(jrdev, "unable to map key input memory\n");
457 		kfree(desc);
458 		return -ENOMEM;
459 	}
460 	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
461 				 DMA_FROM_DEVICE);
462 	if (dma_mapping_error(jrdev, dst_dma)) {
463 		dev_err(jrdev, "unable to map key output memory\n");
464 		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
465 		kfree(desc);
466 		return -ENOMEM;
467 	}
468 
469 	/* Job descriptor to perform unkeyed hash on key_in */
470 	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
471 			 OP_ALG_AS_INITFINAL);
472 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
473 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
474 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
475 	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
476 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
477 			 LDST_SRCDST_BYTE_CONTEXT);
478 
479 #ifdef DEBUG
480 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
481 		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
482 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
483 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
484 #endif
485 
486 	result.err = 0;
487 	init_completion(&result.completion);
488 
489 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
490 	if (!ret) {
491 		/* in progress */
492 		wait_for_completion_interruptible(&result.completion);
493 		ret = result.err;
494 #ifdef DEBUG
495 		print_hex_dump(KERN_ERR,
496 			       "digested key@"__stringify(__LINE__)": ",
497 			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
498 			       digestsize, 1);
499 #endif
500 	}
501 	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
502 	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
503 
504 	*keylen = digestsize;
505 
506 	kfree(desc);
507 
508 	return ret;
509 }
510 
511 static int ahash_setkey(struct crypto_ahash *ahash,
512 			const u8 *key, unsigned int keylen)
513 {
514 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
515 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
516 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
517 	struct device *jrdev = ctx->jrdev;
518 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
519 	int digestsize = crypto_ahash_digestsize(ahash);
520 	int ret = 0;
521 	u8 *hashed_key = NULL;
522 
523 #ifdef DEBUG
524 	printk(KERN_ERR "keylen %d\n", keylen);
525 #endif
526 
527 	if (keylen > blocksize) {
528 		hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
529 				     GFP_DMA);
530 		if (!hashed_key)
531 			return -ENOMEM;
532 		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
533 				      digestsize);
534 		if (ret)
535 			goto badkey;
536 		key = hashed_key;
537 	}
538 
539 	/* Pick class 2 key length from algorithm submask */
540 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
541 				      OP_ALG_ALGSEL_SHIFT] * 2;
542 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
543 
544 #ifdef DEBUG
545 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
546 	       ctx->split_key_len, ctx->split_key_pad_len);
547 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
548 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
549 #endif
550 
551 	ret = gen_split_hash_key(ctx, key, keylen);
552 	if (ret)
553 		goto badkey;
554 
555 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
556 				      DMA_TO_DEVICE);
557 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
558 		dev_err(jrdev, "unable to map key i/o memory\n");
559 		ret = -ENOMEM;
560 		goto map_err;
561 	}
562 #ifdef DEBUG
563 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
564 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
565 		       ctx->split_key_pad_len, 1);
566 #endif
567 
568 	ret = ahash_set_sh_desc(ahash);
569 	if (ret) {
570 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
571 				 DMA_TO_DEVICE);
572 	}
573 
574 map_err:
575 	kfree(hashed_key);
576 	return ret;
577 badkey:
578 	kfree(hashed_key);
579 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
580 	return -EINVAL;
581 }
582 
583 /*
584  * ahash_edesc - s/w-extended ahash descriptor
585  * @dst_dma: physical mapped address of req->result
586  * @sec4_sg_dma: physical mapped address of h/w link table
587  * @src_nents: number of segments in input scatterlist
588  * @sec4_sg_bytes: length of dma mapped sec4_sg space
589  * @sec4_sg: pointer to h/w link table
590  * @hw_desc: the h/w job descriptor followed by any referenced link tables
591  */
592 struct ahash_edesc {
593 	dma_addr_t dst_dma;
594 	dma_addr_t sec4_sg_dma;
595 	int src_nents;
596 	int sec4_sg_bytes;
597 	struct sec4_sg_entry *sec4_sg;
598 	u32 hw_desc[0];
599 };
600 
601 static inline void ahash_unmap(struct device *dev,
602 			struct ahash_edesc *edesc,
603 			struct ahash_request *req, int dst_len)
604 {
605 	if (edesc->src_nents)
606 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
607 	if (edesc->dst_dma)
608 		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
609 
610 	if (edesc->sec4_sg_bytes)
611 		dma_unmap_single(dev, edesc->sec4_sg_dma,
612 				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
613 }
614 
615 static inline void ahash_unmap_ctx(struct device *dev,
616 			struct ahash_edesc *edesc,
617 			struct ahash_request *req, int dst_len, u32 flag)
618 {
619 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
620 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
621 	struct caam_hash_state *state = ahash_request_ctx(req);
622 
623 	if (state->ctx_dma)
624 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
625 	ahash_unmap(dev, edesc, req, dst_len);
626 }
627 
628 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
629 		       void *context)
630 {
631 	struct ahash_request *req = context;
632 	struct ahash_edesc *edesc;
633 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
634 	int digestsize = crypto_ahash_digestsize(ahash);
635 #ifdef DEBUG
636 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
637 	struct caam_hash_state *state = ahash_request_ctx(req);
638 
639 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
640 #endif
641 
642 	edesc = (struct ahash_edesc *)((char *)desc -
643 		 offsetof(struct ahash_edesc, hw_desc));
644 	if (err)
645 		caam_jr_strstatus(jrdev, err);
646 
647 	ahash_unmap(jrdev, edesc, req, digestsize);
648 	kfree(edesc);
649 
650 #ifdef DEBUG
651 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
652 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
653 		       ctx->ctx_len, 1);
654 	if (req->result)
655 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
656 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
657 			       digestsize, 1);
658 #endif
659 
660 	req->base.complete(&req->base, err);
661 }
662 
663 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
664 			    void *context)
665 {
666 	struct ahash_request *req = context;
667 	struct ahash_edesc *edesc;
668 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
669 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
670 #ifdef DEBUG
671 	struct caam_hash_state *state = ahash_request_ctx(req);
672 	int digestsize = crypto_ahash_digestsize(ahash);
673 
674 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
675 #endif
676 
677 	edesc = (struct ahash_edesc *)((char *)desc -
678 		 offsetof(struct ahash_edesc, hw_desc));
679 	if (err)
680 		caam_jr_strstatus(jrdev, err);
681 
682 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
683 	kfree(edesc);
684 
685 #ifdef DEBUG
686 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
687 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
688 		       ctx->ctx_len, 1);
689 	if (req->result)
690 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
691 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
692 			       digestsize, 1);
693 #endif
694 
695 	req->base.complete(&req->base, err);
696 }
697 
698 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
699 			       void *context)
700 {
701 	struct ahash_request *req = context;
702 	struct ahash_edesc *edesc;
703 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
704 	int digestsize = crypto_ahash_digestsize(ahash);
705 #ifdef DEBUG
706 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
707 	struct caam_hash_state *state = ahash_request_ctx(req);
708 
709 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
710 #endif
711 
712 	edesc = (struct ahash_edesc *)((char *)desc -
713 		 offsetof(struct ahash_edesc, hw_desc));
714 	if (err)
715 		caam_jr_strstatus(jrdev, err);
716 
717 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
718 	kfree(edesc);
719 
720 #ifdef DEBUG
721 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
722 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
723 		       ctx->ctx_len, 1);
724 	if (req->result)
725 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
726 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
727 			       digestsize, 1);
728 #endif
729 
730 	req->base.complete(&req->base, err);
731 }
732 
733 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
734 			       void *context)
735 {
736 	struct ahash_request *req = context;
737 	struct ahash_edesc *edesc;
738 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
739 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
740 #ifdef DEBUG
741 	struct caam_hash_state *state = ahash_request_ctx(req);
742 	int digestsize = crypto_ahash_digestsize(ahash);
743 
744 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
745 #endif
746 
747 	edesc = (struct ahash_edesc *)((char *)desc -
748 		 offsetof(struct ahash_edesc, hw_desc));
749 	if (err)
750 		caam_jr_strstatus(jrdev, err);
751 
752 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
753 	kfree(edesc);
754 
755 #ifdef DEBUG
756 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
757 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
758 		       ctx->ctx_len, 1);
759 	if (req->result)
760 		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
761 			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
762 			       digestsize, 1);
763 #endif
764 
765 	req->base.complete(&req->base, err);
766 }
767 
768 /* submit update job descriptor */
769 static int ahash_update_ctx(struct ahash_request *req)
770 {
771 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
772 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
773 	struct caam_hash_state *state = ahash_request_ctx(req);
774 	struct device *jrdev = ctx->jrdev;
775 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
776 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
777 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
778 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
779 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
780 	int *next_buflen = state->current_buf ? &state->buflen_0 :
781 			   &state->buflen_1, last_buflen;
782 	int in_len = *buflen + req->nbytes, to_hash;
783 	u32 *sh_desc = ctx->sh_desc_update, *desc;
784 	dma_addr_t ptr = ctx->sh_desc_update_dma;
785 	int src_nents, sec4_sg_bytes, sec4_sg_src_index;
786 	struct ahash_edesc *edesc;
787 	int ret = 0;
788 	int sh_len;
789 
790 	last_buflen = *next_buflen;
791 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
792 	to_hash = in_len - *next_buflen;
793 
794 	if (to_hash) {
795 		src_nents = sg_nents_for_len(req->src,
796 					     req->nbytes - (*next_buflen));
797 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
798 		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
799 				 sizeof(struct sec4_sg_entry);
800 
801 		/*
802 		 * allocate space for base edesc and hw desc commands,
803 		 * link tables
804 		 */
805 		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
806 				sec4_sg_bytes, GFP_DMA | flags);
807 		if (!edesc) {
808 			dev_err(jrdev,
809 				"could not allocate extended descriptor\n");
810 			return -ENOMEM;
811 		}
812 
813 		edesc->src_nents = src_nents;
814 		edesc->sec4_sg_bytes = sec4_sg_bytes;
815 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
816 				 DESC_JOB_IO_LEN;
817 
818 		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
819 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
820 		if (ret)
821 			return ret;
822 
823 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
824 							edesc->sec4_sg + 1,
825 							buf, state->buf_dma,
826 							*next_buflen, *buflen);
827 
828 		if (src_nents) {
829 			src_map_to_sec4_sg(jrdev, req->src, src_nents,
830 					   edesc->sec4_sg + sec4_sg_src_index);
831 			if (*next_buflen)
832 				scatterwalk_map_and_copy(next_buf, req->src,
833 							 to_hash - *buflen,
834 							 *next_buflen, 0);
835 		} else {
836 			(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
837 							SEC4_SG_LEN_FIN;
838 		}
839 
840 		state->current_buf = !state->current_buf;
841 
842 		sh_len = desc_len(sh_desc);
843 		desc = edesc->hw_desc;
844 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
845 				     HDR_REVERSE);
846 
847 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
848 						     sec4_sg_bytes,
849 						     DMA_TO_DEVICE);
850 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
851 			dev_err(jrdev, "unable to map S/G table\n");
852 			return -ENOMEM;
853 		}
854 
855 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
856 				       to_hash, LDST_SGF);
857 
858 		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
859 
860 #ifdef DEBUG
861 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
862 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
863 			       desc_bytes(desc), 1);
864 #endif
865 
866 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
867 		if (!ret) {
868 			ret = -EINPROGRESS;
869 		} else {
870 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
871 					   DMA_BIDIRECTIONAL);
872 			kfree(edesc);
873 		}
874 	} else if (*next_buflen) {
875 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
876 					 req->nbytes, 0);
877 		*buflen = *next_buflen;
878 		*next_buflen = last_buflen;
879 	}
880 #ifdef DEBUG
881 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
882 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
883 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
884 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
885 		       *next_buflen, 1);
886 #endif
887 
888 	return ret;
889 }
890 
891 static int ahash_final_ctx(struct ahash_request *req)
892 {
893 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
894 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
895 	struct caam_hash_state *state = ahash_request_ctx(req);
896 	struct device *jrdev = ctx->jrdev;
897 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
898 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
899 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
900 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
901 	int last_buflen = state->current_buf ? state->buflen_0 :
902 			  state->buflen_1;
903 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
904 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
905 	int sec4_sg_bytes, sec4_sg_src_index;
906 	int digestsize = crypto_ahash_digestsize(ahash);
907 	struct ahash_edesc *edesc;
908 	int ret = 0;
909 	int sh_len;
910 
911 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
912 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
913 
914 	/* allocate space for base edesc and hw desc commands, link tables */
915 	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
916 			GFP_DMA | flags);
917 	if (!edesc) {
918 		dev_err(jrdev, "could not allocate extended descriptor\n");
919 		return -ENOMEM;
920 	}
921 
922 	sh_len = desc_len(sh_desc);
923 	desc = edesc->hw_desc;
924 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
925 
926 	edesc->sec4_sg_bytes = sec4_sg_bytes;
927 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
928 			 DESC_JOB_IO_LEN;
929 	edesc->src_nents = 0;
930 
931 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
932 				 edesc->sec4_sg, DMA_TO_DEVICE);
933 	if (ret)
934 		return ret;
935 
936 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
937 						buf, state->buf_dma, buflen,
938 						last_buflen);
939 	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
940 
941 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
942 					    sec4_sg_bytes, DMA_TO_DEVICE);
943 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
944 		dev_err(jrdev, "unable to map S/G table\n");
945 		return -ENOMEM;
946 	}
947 
948 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
949 			  LDST_SGF);
950 
951 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
952 						digestsize);
953 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
954 		dev_err(jrdev, "unable to map dst\n");
955 		return -ENOMEM;
956 	}
957 
958 #ifdef DEBUG
959 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
960 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
961 #endif
962 
963 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
964 	if (!ret) {
965 		ret = -EINPROGRESS;
966 	} else {
967 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
968 		kfree(edesc);
969 	}
970 
971 	return ret;
972 }
973 
974 static int ahash_finup_ctx(struct ahash_request *req)
975 {
976 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
977 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
978 	struct caam_hash_state *state = ahash_request_ctx(req);
979 	struct device *jrdev = ctx->jrdev;
980 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
981 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
982 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
983 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
984 	int last_buflen = state->current_buf ? state->buflen_0 :
985 			  state->buflen_1;
986 	u32 *sh_desc = ctx->sh_desc_finup, *desc;
987 	dma_addr_t ptr = ctx->sh_desc_finup_dma;
988 	int sec4_sg_bytes, sec4_sg_src_index;
989 	int src_nents;
990 	int digestsize = crypto_ahash_digestsize(ahash);
991 	struct ahash_edesc *edesc;
992 	int ret = 0;
993 	int sh_len;
994 
995 	src_nents = sg_nents_for_len(req->src, req->nbytes);
996 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
997 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
998 			 sizeof(struct sec4_sg_entry);
999 
1000 	/* allocate space for base edesc and hw desc commands, link tables */
1001 	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
1002 			GFP_DMA | flags);
1003 	if (!edesc) {
1004 		dev_err(jrdev, "could not allocate extended descriptor\n");
1005 		return -ENOMEM;
1006 	}
1007 
1008 	sh_len = desc_len(sh_desc);
1009 	desc = edesc->hw_desc;
1010 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1011 
1012 	edesc->src_nents = src_nents;
1013 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1014 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1015 			 DESC_JOB_IO_LEN;
1016 
1017 	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1018 				 edesc->sec4_sg, DMA_TO_DEVICE);
1019 	if (ret)
1020 		return ret;
1021 
1022 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1023 						buf, state->buf_dma, buflen,
1024 						last_buflen);
1025 
1026 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
1027 			   sec4_sg_src_index);
1028 
1029 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1030 					    sec4_sg_bytes, DMA_TO_DEVICE);
1031 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1032 		dev_err(jrdev, "unable to map S/G table\n");
1033 		return -ENOMEM;
1034 	}
1035 
1036 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1037 			       buflen + req->nbytes, LDST_SGF);
1038 
1039 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1040 						digestsize);
1041 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1042 		dev_err(jrdev, "unable to map dst\n");
1043 		return -ENOMEM;
1044 	}
1045 
1046 #ifdef DEBUG
1047 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1048 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1049 #endif
1050 
1051 	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1052 	if (!ret) {
1053 		ret = -EINPROGRESS;
1054 	} else {
1055 		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1056 		kfree(edesc);
1057 	}
1058 
1059 	return ret;
1060 }
1061 
1062 static int ahash_digest(struct ahash_request *req)
1063 {
1064 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1065 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1066 	struct device *jrdev = ctx->jrdev;
1067 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1068 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1069 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1070 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1071 	int digestsize = crypto_ahash_digestsize(ahash);
1072 	int src_nents, sec4_sg_bytes;
1073 	dma_addr_t src_dma;
1074 	struct ahash_edesc *edesc;
1075 	int ret = 0;
1076 	u32 options;
1077 	int sh_len;
1078 
1079 	src_nents = sg_count(req->src, req->nbytes);
1080 	dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1081 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1082 
1083 	/* allocate space for base edesc and hw desc commands, link tables */
1084 	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
1085 			GFP_DMA | flags);
1086 	if (!edesc) {
1087 		dev_err(jrdev, "could not allocate extended descriptor\n");
1088 		return -ENOMEM;
1089 	}
1090 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1091 			  DESC_JOB_IO_LEN;
1092 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1093 	edesc->src_nents = src_nents;
1094 
1095 	sh_len = desc_len(sh_desc);
1096 	desc = edesc->hw_desc;
1097 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1098 
1099 	if (src_nents) {
1100 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1101 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1102 					    sec4_sg_bytes, DMA_TO_DEVICE);
1103 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1104 			dev_err(jrdev, "unable to map S/G table\n");
1105 			return -ENOMEM;
1106 		}
1107 		src_dma = edesc->sec4_sg_dma;
1108 		options = LDST_SGF;
1109 	} else {
1110 		src_dma = sg_dma_address(req->src);
1111 		options = 0;
1112 	}
1113 	append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1114 
1115 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1116 						digestsize);
1117 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1118 		dev_err(jrdev, "unable to map dst\n");
1119 		return -ENOMEM;
1120 	}
1121 
1122 #ifdef DEBUG
1123 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1124 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1125 #endif
1126 
1127 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1128 	if (!ret) {
1129 		ret = -EINPROGRESS;
1130 	} else {
1131 		ahash_unmap(jrdev, edesc, req, digestsize);
1132 		kfree(edesc);
1133 	}
1134 
1135 	return ret;
1136 }
1137 
1138 /* submit ahash final if it the first job descriptor */
1139 static int ahash_final_no_ctx(struct ahash_request *req)
1140 {
1141 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1142 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1143 	struct caam_hash_state *state = ahash_request_ctx(req);
1144 	struct device *jrdev = ctx->jrdev;
1145 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1146 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1147 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1148 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1149 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1150 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1151 	int digestsize = crypto_ahash_digestsize(ahash);
1152 	struct ahash_edesc *edesc;
1153 	int ret = 0;
1154 	int sh_len;
1155 
1156 	/* allocate space for base edesc and hw desc commands, link tables */
1157 	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
1158 	if (!edesc) {
1159 		dev_err(jrdev, "could not allocate extended descriptor\n");
1160 		return -ENOMEM;
1161 	}
1162 
1163 	edesc->sec4_sg_bytes = 0;
1164 	sh_len = desc_len(sh_desc);
1165 	desc = edesc->hw_desc;
1166 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1167 
1168 	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1169 	if (dma_mapping_error(jrdev, state->buf_dma)) {
1170 		dev_err(jrdev, "unable to map src\n");
1171 		return -ENOMEM;
1172 	}
1173 
1174 	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1175 
1176 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1177 						digestsize);
1178 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1179 		dev_err(jrdev, "unable to map dst\n");
1180 		return -ENOMEM;
1181 	}
1182 	edesc->src_nents = 0;
1183 
1184 #ifdef DEBUG
1185 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1186 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1187 #endif
1188 
1189 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1190 	if (!ret) {
1191 		ret = -EINPROGRESS;
1192 	} else {
1193 		ahash_unmap(jrdev, edesc, req, digestsize);
1194 		kfree(edesc);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 /* submit ahash update if it the first job descriptor after update */
1201 static int ahash_update_no_ctx(struct ahash_request *req)
1202 {
1203 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1204 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1205 	struct caam_hash_state *state = ahash_request_ctx(req);
1206 	struct device *jrdev = ctx->jrdev;
1207 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1208 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1209 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1210 	int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1211 	u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1212 	int *next_buflen = state->current_buf ? &state->buflen_0 :
1213 			   &state->buflen_1;
1214 	int in_len = *buflen + req->nbytes, to_hash;
1215 	int sec4_sg_bytes, src_nents;
1216 	struct ahash_edesc *edesc;
1217 	u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1218 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1219 	int ret = 0;
1220 	int sh_len;
1221 
1222 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1223 	to_hash = in_len - *next_buflen;
1224 
1225 	if (to_hash) {
1226 		src_nents = sg_nents_for_len(req->src,
1227 					     req->nbytes - (*next_buflen));
1228 		sec4_sg_bytes = (1 + src_nents) *
1229 				sizeof(struct sec4_sg_entry);
1230 
1231 		/*
1232 		 * allocate space for base edesc and hw desc commands,
1233 		 * link tables
1234 		 */
1235 		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
1236 				sec4_sg_bytes, GFP_DMA | flags);
1237 		if (!edesc) {
1238 			dev_err(jrdev,
1239 				"could not allocate extended descriptor\n");
1240 			return -ENOMEM;
1241 		}
1242 
1243 		edesc->src_nents = src_nents;
1244 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1245 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1246 				 DESC_JOB_IO_LEN;
1247 		edesc->dst_dma = 0;
1248 
1249 		state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1250 						    buf, *buflen);
1251 		src_map_to_sec4_sg(jrdev, req->src, src_nents,
1252 				   edesc->sec4_sg + 1);
1253 		if (*next_buflen) {
1254 			scatterwalk_map_and_copy(next_buf, req->src,
1255 						 to_hash - *buflen,
1256 						 *next_buflen, 0);
1257 		}
1258 
1259 		state->current_buf = !state->current_buf;
1260 
1261 		sh_len = desc_len(sh_desc);
1262 		desc = edesc->hw_desc;
1263 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1264 				     HDR_REVERSE);
1265 
1266 		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1267 						    sec4_sg_bytes,
1268 						    DMA_TO_DEVICE);
1269 		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1270 			dev_err(jrdev, "unable to map S/G table\n");
1271 			return -ENOMEM;
1272 		}
1273 
1274 		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1275 
1276 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1277 		if (ret)
1278 			return ret;
1279 
1280 #ifdef DEBUG
1281 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1282 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1283 			       desc_bytes(desc), 1);
1284 #endif
1285 
1286 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1287 		if (!ret) {
1288 			ret = -EINPROGRESS;
1289 			state->update = ahash_update_ctx;
1290 			state->finup = ahash_finup_ctx;
1291 			state->final = ahash_final_ctx;
1292 		} else {
1293 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1294 					DMA_TO_DEVICE);
1295 			kfree(edesc);
1296 		}
1297 	} else if (*next_buflen) {
1298 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1299 					 req->nbytes, 0);
1300 		*buflen = *next_buflen;
1301 		*next_buflen = 0;
1302 	}
1303 #ifdef DEBUG
1304 	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1305 		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1306 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1307 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1308 		       *next_buflen, 1);
1309 #endif
1310 
1311 	return ret;
1312 }
1313 
1314 /* submit ahash finup if it the first job descriptor after update */
1315 static int ahash_finup_no_ctx(struct ahash_request *req)
1316 {
1317 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1318 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1319 	struct caam_hash_state *state = ahash_request_ctx(req);
1320 	struct device *jrdev = ctx->jrdev;
1321 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1322 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1323 	u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1324 	int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1325 	int last_buflen = state->current_buf ? state->buflen_0 :
1326 			  state->buflen_1;
1327 	u32 *sh_desc = ctx->sh_desc_digest, *desc;
1328 	dma_addr_t ptr = ctx->sh_desc_digest_dma;
1329 	int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1330 	int digestsize = crypto_ahash_digestsize(ahash);
1331 	struct ahash_edesc *edesc;
1332 	int sh_len;
1333 	int ret = 0;
1334 
1335 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1336 	sec4_sg_src_index = 2;
1337 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1338 			 sizeof(struct sec4_sg_entry);
1339 
1340 	/* allocate space for base edesc and hw desc commands, link tables */
1341 	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
1342 			GFP_DMA | flags);
1343 	if (!edesc) {
1344 		dev_err(jrdev, "could not allocate extended descriptor\n");
1345 		return -ENOMEM;
1346 	}
1347 
1348 	sh_len = desc_len(sh_desc);
1349 	desc = edesc->hw_desc;
1350 	init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1351 
1352 	edesc->src_nents = src_nents;
1353 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1354 	edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1355 			 DESC_JOB_IO_LEN;
1356 
1357 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1358 						state->buf_dma, buflen,
1359 						last_buflen);
1360 
1361 	src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
1362 
1363 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1364 					    sec4_sg_bytes, DMA_TO_DEVICE);
1365 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1366 		dev_err(jrdev, "unable to map S/G table\n");
1367 		return -ENOMEM;
1368 	}
1369 
1370 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1371 			       req->nbytes, LDST_SGF);
1372 
1373 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1374 						digestsize);
1375 	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1376 		dev_err(jrdev, "unable to map dst\n");
1377 		return -ENOMEM;
1378 	}
1379 
1380 #ifdef DEBUG
1381 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1382 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1383 #endif
1384 
1385 	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1386 	if (!ret) {
1387 		ret = -EINPROGRESS;
1388 	} else {
1389 		ahash_unmap(jrdev, edesc, req, digestsize);
1390 		kfree(edesc);
1391 	}
1392 
1393 	return ret;
1394 }
1395 
1396 /* submit first update job descriptor after init */
1397 static int ahash_update_first(struct ahash_request *req)
1398 {
1399 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1400 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1401 	struct caam_hash_state *state = ahash_request_ctx(req);
1402 	struct device *jrdev = ctx->jrdev;
1403 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1404 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1405 	u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1406 	int *next_buflen = state->current_buf ?
1407 		&state->buflen_1 : &state->buflen_0;
1408 	int to_hash;
1409 	u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1410 	dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1411 	int sec4_sg_bytes, src_nents;
1412 	dma_addr_t src_dma;
1413 	u32 options;
1414 	struct ahash_edesc *edesc;
1415 	int ret = 0;
1416 	int sh_len;
1417 
1418 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1419 				      1);
1420 	to_hash = req->nbytes - *next_buflen;
1421 
1422 	if (to_hash) {
1423 		src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
1424 		dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1425 		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1426 
1427 		/*
1428 		 * allocate space for base edesc and hw desc commands,
1429 		 * link tables
1430 		 */
1431 		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
1432 				sec4_sg_bytes, GFP_DMA | flags);
1433 		if (!edesc) {
1434 			dev_err(jrdev,
1435 				"could not allocate extended descriptor\n");
1436 			return -ENOMEM;
1437 		}
1438 
1439 		edesc->src_nents = src_nents;
1440 		edesc->sec4_sg_bytes = sec4_sg_bytes;
1441 		edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1442 				 DESC_JOB_IO_LEN;
1443 		edesc->dst_dma = 0;
1444 
1445 		if (src_nents) {
1446 			sg_to_sec4_sg_last(req->src, src_nents,
1447 					   edesc->sec4_sg, 0);
1448 			edesc->sec4_sg_dma = dma_map_single(jrdev,
1449 							    edesc->sec4_sg,
1450 							    sec4_sg_bytes,
1451 							    DMA_TO_DEVICE);
1452 			if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1453 				dev_err(jrdev, "unable to map S/G table\n");
1454 				return -ENOMEM;
1455 			}
1456 			src_dma = edesc->sec4_sg_dma;
1457 			options = LDST_SGF;
1458 		} else {
1459 			src_dma = sg_dma_address(req->src);
1460 			options = 0;
1461 		}
1462 
1463 		if (*next_buflen)
1464 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1465 						 *next_buflen, 0);
1466 
1467 		sh_len = desc_len(sh_desc);
1468 		desc = edesc->hw_desc;
1469 		init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1470 				     HDR_REVERSE);
1471 
1472 		append_seq_in_ptr(desc, src_dma, to_hash, options);
1473 
1474 		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1475 		if (ret)
1476 			return ret;
1477 
1478 #ifdef DEBUG
1479 		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1480 			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1481 			       desc_bytes(desc), 1);
1482 #endif
1483 
1484 		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1485 				      req);
1486 		if (!ret) {
1487 			ret = -EINPROGRESS;
1488 			state->update = ahash_update_ctx;
1489 			state->finup = ahash_finup_ctx;
1490 			state->final = ahash_final_ctx;
1491 		} else {
1492 			ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1493 					DMA_TO_DEVICE);
1494 			kfree(edesc);
1495 		}
1496 	} else if (*next_buflen) {
1497 		state->update = ahash_update_no_ctx;
1498 		state->finup = ahash_finup_no_ctx;
1499 		state->final = ahash_final_no_ctx;
1500 		scatterwalk_map_and_copy(next_buf, req->src, 0,
1501 					 req->nbytes, 0);
1502 	}
1503 #ifdef DEBUG
1504 	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1505 		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1506 		       *next_buflen, 1);
1507 #endif
1508 
1509 	return ret;
1510 }
1511 
1512 static int ahash_finup_first(struct ahash_request *req)
1513 {
1514 	return ahash_digest(req);
1515 }
1516 
1517 static int ahash_init(struct ahash_request *req)
1518 {
1519 	struct caam_hash_state *state = ahash_request_ctx(req);
1520 
1521 	state->update = ahash_update_first;
1522 	state->finup = ahash_finup_first;
1523 	state->final = ahash_final_no_ctx;
1524 
1525 	state->current_buf = 0;
1526 	state->buf_dma = 0;
1527 	state->buflen_0 = 0;
1528 	state->buflen_1 = 0;
1529 
1530 	return 0;
1531 }
1532 
1533 static int ahash_update(struct ahash_request *req)
1534 {
1535 	struct caam_hash_state *state = ahash_request_ctx(req);
1536 
1537 	return state->update(req);
1538 }
1539 
1540 static int ahash_finup(struct ahash_request *req)
1541 {
1542 	struct caam_hash_state *state = ahash_request_ctx(req);
1543 
1544 	return state->finup(req);
1545 }
1546 
1547 static int ahash_final(struct ahash_request *req)
1548 {
1549 	struct caam_hash_state *state = ahash_request_ctx(req);
1550 
1551 	return state->final(req);
1552 }
1553 
1554 static int ahash_export(struct ahash_request *req, void *out)
1555 {
1556 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1557 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1558 	struct caam_hash_state *state = ahash_request_ctx(req);
1559 
1560 	memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1561 	memcpy(out + sizeof(struct caam_hash_ctx), state,
1562 	       sizeof(struct caam_hash_state));
1563 	return 0;
1564 }
1565 
1566 static int ahash_import(struct ahash_request *req, const void *in)
1567 {
1568 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1569 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1570 	struct caam_hash_state *state = ahash_request_ctx(req);
1571 
1572 	memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1573 	memcpy(state, in + sizeof(struct caam_hash_ctx),
1574 	       sizeof(struct caam_hash_state));
1575 	return 0;
1576 }
1577 
1578 struct caam_hash_template {
1579 	char name[CRYPTO_MAX_ALG_NAME];
1580 	char driver_name[CRYPTO_MAX_ALG_NAME];
1581 	char hmac_name[CRYPTO_MAX_ALG_NAME];
1582 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1583 	unsigned int blocksize;
1584 	struct ahash_alg template_ahash;
1585 	u32 alg_type;
1586 	u32 alg_op;
1587 };
1588 
1589 /* ahash descriptors */
1590 static struct caam_hash_template driver_hash[] = {
1591 	{
1592 		.name = "sha1",
1593 		.driver_name = "sha1-caam",
1594 		.hmac_name = "hmac(sha1)",
1595 		.hmac_driver_name = "hmac-sha1-caam",
1596 		.blocksize = SHA1_BLOCK_SIZE,
1597 		.template_ahash = {
1598 			.init = ahash_init,
1599 			.update = ahash_update,
1600 			.final = ahash_final,
1601 			.finup = ahash_finup,
1602 			.digest = ahash_digest,
1603 			.export = ahash_export,
1604 			.import = ahash_import,
1605 			.setkey = ahash_setkey,
1606 			.halg = {
1607 				.digestsize = SHA1_DIGEST_SIZE,
1608 				},
1609 			},
1610 		.alg_type = OP_ALG_ALGSEL_SHA1,
1611 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1612 	}, {
1613 		.name = "sha224",
1614 		.driver_name = "sha224-caam",
1615 		.hmac_name = "hmac(sha224)",
1616 		.hmac_driver_name = "hmac-sha224-caam",
1617 		.blocksize = SHA224_BLOCK_SIZE,
1618 		.template_ahash = {
1619 			.init = ahash_init,
1620 			.update = ahash_update,
1621 			.final = ahash_final,
1622 			.finup = ahash_finup,
1623 			.digest = ahash_digest,
1624 			.export = ahash_export,
1625 			.import = ahash_import,
1626 			.setkey = ahash_setkey,
1627 			.halg = {
1628 				.digestsize = SHA224_DIGEST_SIZE,
1629 				},
1630 			},
1631 		.alg_type = OP_ALG_ALGSEL_SHA224,
1632 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1633 	}, {
1634 		.name = "sha256",
1635 		.driver_name = "sha256-caam",
1636 		.hmac_name = "hmac(sha256)",
1637 		.hmac_driver_name = "hmac-sha256-caam",
1638 		.blocksize = SHA256_BLOCK_SIZE,
1639 		.template_ahash = {
1640 			.init = ahash_init,
1641 			.update = ahash_update,
1642 			.final = ahash_final,
1643 			.finup = ahash_finup,
1644 			.digest = ahash_digest,
1645 			.export = ahash_export,
1646 			.import = ahash_import,
1647 			.setkey = ahash_setkey,
1648 			.halg = {
1649 				.digestsize = SHA256_DIGEST_SIZE,
1650 				},
1651 			},
1652 		.alg_type = OP_ALG_ALGSEL_SHA256,
1653 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1654 	}, {
1655 		.name = "sha384",
1656 		.driver_name = "sha384-caam",
1657 		.hmac_name = "hmac(sha384)",
1658 		.hmac_driver_name = "hmac-sha384-caam",
1659 		.blocksize = SHA384_BLOCK_SIZE,
1660 		.template_ahash = {
1661 			.init = ahash_init,
1662 			.update = ahash_update,
1663 			.final = ahash_final,
1664 			.finup = ahash_finup,
1665 			.digest = ahash_digest,
1666 			.export = ahash_export,
1667 			.import = ahash_import,
1668 			.setkey = ahash_setkey,
1669 			.halg = {
1670 				.digestsize = SHA384_DIGEST_SIZE,
1671 				},
1672 			},
1673 		.alg_type = OP_ALG_ALGSEL_SHA384,
1674 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1675 	}, {
1676 		.name = "sha512",
1677 		.driver_name = "sha512-caam",
1678 		.hmac_name = "hmac(sha512)",
1679 		.hmac_driver_name = "hmac-sha512-caam",
1680 		.blocksize = SHA512_BLOCK_SIZE,
1681 		.template_ahash = {
1682 			.init = ahash_init,
1683 			.update = ahash_update,
1684 			.final = ahash_final,
1685 			.finup = ahash_finup,
1686 			.digest = ahash_digest,
1687 			.export = ahash_export,
1688 			.import = ahash_import,
1689 			.setkey = ahash_setkey,
1690 			.halg = {
1691 				.digestsize = SHA512_DIGEST_SIZE,
1692 				},
1693 			},
1694 		.alg_type = OP_ALG_ALGSEL_SHA512,
1695 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1696 	}, {
1697 		.name = "md5",
1698 		.driver_name = "md5-caam",
1699 		.hmac_name = "hmac(md5)",
1700 		.hmac_driver_name = "hmac-md5-caam",
1701 		.blocksize = MD5_BLOCK_WORDS * 4,
1702 		.template_ahash = {
1703 			.init = ahash_init,
1704 			.update = ahash_update,
1705 			.final = ahash_final,
1706 			.finup = ahash_finup,
1707 			.digest = ahash_digest,
1708 			.export = ahash_export,
1709 			.import = ahash_import,
1710 			.setkey = ahash_setkey,
1711 			.halg = {
1712 				.digestsize = MD5_DIGEST_SIZE,
1713 				},
1714 			},
1715 		.alg_type = OP_ALG_ALGSEL_MD5,
1716 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1717 	},
1718 };
1719 
1720 struct caam_hash_alg {
1721 	struct list_head entry;
1722 	int alg_type;
1723 	int alg_op;
1724 	struct ahash_alg ahash_alg;
1725 };
1726 
1727 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1728 {
1729 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1730 	struct crypto_alg *base = tfm->__crt_alg;
1731 	struct hash_alg_common *halg =
1732 		 container_of(base, struct hash_alg_common, base);
1733 	struct ahash_alg *alg =
1734 		 container_of(halg, struct ahash_alg, halg);
1735 	struct caam_hash_alg *caam_hash =
1736 		 container_of(alg, struct caam_hash_alg, ahash_alg);
1737 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1738 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1739 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1740 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1741 					 HASH_MSG_LEN + 32,
1742 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1743 					 HASH_MSG_LEN + 64,
1744 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1745 	int ret = 0;
1746 
1747 	/*
1748 	 * Get a Job ring from Job Ring driver to ensure in-order
1749 	 * crypto request processing per tfm
1750 	 */
1751 	ctx->jrdev = caam_jr_alloc();
1752 	if (IS_ERR(ctx->jrdev)) {
1753 		pr_err("Job Ring Device allocation for transform failed\n");
1754 		return PTR_ERR(ctx->jrdev);
1755 	}
1756 	/* copy descriptor header template value */
1757 	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1758 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1759 
1760 	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1761 				  OP_ALG_ALGSEL_SHIFT];
1762 
1763 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1764 				 sizeof(struct caam_hash_state));
1765 
1766 	ret = ahash_set_sh_desc(ahash);
1767 
1768 	return ret;
1769 }
1770 
1771 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1772 {
1773 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1774 
1775 	if (ctx->sh_desc_update_dma &&
1776 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1777 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1778 				 desc_bytes(ctx->sh_desc_update),
1779 				 DMA_TO_DEVICE);
1780 	if (ctx->sh_desc_update_first_dma &&
1781 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1782 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1783 				 desc_bytes(ctx->sh_desc_update_first),
1784 				 DMA_TO_DEVICE);
1785 	if (ctx->sh_desc_fin_dma &&
1786 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1787 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1788 				 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1789 	if (ctx->sh_desc_digest_dma &&
1790 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1791 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1792 				 desc_bytes(ctx->sh_desc_digest),
1793 				 DMA_TO_DEVICE);
1794 	if (ctx->sh_desc_finup_dma &&
1795 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1796 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1797 				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1798 
1799 	caam_jr_free(ctx->jrdev);
1800 }
1801 
1802 static void __exit caam_algapi_hash_exit(void)
1803 {
1804 	struct caam_hash_alg *t_alg, *n;
1805 
1806 	if (!hash_list.next)
1807 		return;
1808 
1809 	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1810 		crypto_unregister_ahash(&t_alg->ahash_alg);
1811 		list_del(&t_alg->entry);
1812 		kfree(t_alg);
1813 	}
1814 }
1815 
1816 static struct caam_hash_alg *
1817 caam_hash_alloc(struct caam_hash_template *template,
1818 		bool keyed)
1819 {
1820 	struct caam_hash_alg *t_alg;
1821 	struct ahash_alg *halg;
1822 	struct crypto_alg *alg;
1823 
1824 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1825 	if (!t_alg) {
1826 		pr_err("failed to allocate t_alg\n");
1827 		return ERR_PTR(-ENOMEM);
1828 	}
1829 
1830 	t_alg->ahash_alg = template->template_ahash;
1831 	halg = &t_alg->ahash_alg;
1832 	alg = &halg->halg.base;
1833 
1834 	if (keyed) {
1835 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1836 			 template->hmac_name);
1837 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1838 			 template->hmac_driver_name);
1839 	} else {
1840 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1841 			 template->name);
1842 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1843 			 template->driver_name);
1844 	}
1845 	alg->cra_module = THIS_MODULE;
1846 	alg->cra_init = caam_hash_cra_init;
1847 	alg->cra_exit = caam_hash_cra_exit;
1848 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1849 	alg->cra_priority = CAAM_CRA_PRIORITY;
1850 	alg->cra_blocksize = template->blocksize;
1851 	alg->cra_alignmask = 0;
1852 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1853 	alg->cra_type = &crypto_ahash_type;
1854 
1855 	t_alg->alg_type = template->alg_type;
1856 	t_alg->alg_op = template->alg_op;
1857 
1858 	return t_alg;
1859 }
1860 
1861 static int __init caam_algapi_hash_init(void)
1862 {
1863 	struct device_node *dev_node;
1864 	struct platform_device *pdev;
1865 	struct device *ctrldev;
1866 	int i = 0, err = 0;
1867 	struct caam_drv_private *priv;
1868 	unsigned int md_limit = SHA512_DIGEST_SIZE;
1869 	u32 cha_inst, cha_vid;
1870 
1871 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1872 	if (!dev_node) {
1873 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1874 		if (!dev_node)
1875 			return -ENODEV;
1876 	}
1877 
1878 	pdev = of_find_device_by_node(dev_node);
1879 	if (!pdev) {
1880 		of_node_put(dev_node);
1881 		return -ENODEV;
1882 	}
1883 
1884 	ctrldev = &pdev->dev;
1885 	priv = dev_get_drvdata(ctrldev);
1886 	of_node_put(dev_node);
1887 
1888 	/*
1889 	 * If priv is NULL, it's probably because the caam driver wasn't
1890 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1891 	 */
1892 	if (!priv)
1893 		return -ENODEV;
1894 
1895 	/*
1896 	 * Register crypto algorithms the device supports.  First, identify
1897 	 * presence and attributes of MD block.
1898 	 */
1899 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1900 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1901 
1902 	/*
1903 	 * Skip registration of any hashing algorithms if MD block
1904 	 * is not present.
1905 	 */
1906 	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1907 		return -ENODEV;
1908 
1909 	/* Limit digest size based on LP256 */
1910 	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1911 		md_limit = SHA256_DIGEST_SIZE;
1912 
1913 	INIT_LIST_HEAD(&hash_list);
1914 
1915 	/* register crypto algorithms the device supports */
1916 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1917 		struct caam_hash_alg *t_alg;
1918 		struct caam_hash_template *alg = driver_hash + i;
1919 
1920 		/* If MD size is not supported by device, skip registration */
1921 		if (alg->template_ahash.halg.digestsize > md_limit)
1922 			continue;
1923 
1924 		/* register hmac version */
1925 		t_alg = caam_hash_alloc(alg, true);
1926 		if (IS_ERR(t_alg)) {
1927 			err = PTR_ERR(t_alg);
1928 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1929 			continue;
1930 		}
1931 
1932 		err = crypto_register_ahash(&t_alg->ahash_alg);
1933 		if (err) {
1934 			pr_warn("%s alg registration failed\n",
1935 				t_alg->ahash_alg.halg.base.cra_driver_name);
1936 			kfree(t_alg);
1937 		} else
1938 			list_add_tail(&t_alg->entry, &hash_list);
1939 
1940 		/* register unkeyed version */
1941 		t_alg = caam_hash_alloc(alg, false);
1942 		if (IS_ERR(t_alg)) {
1943 			err = PTR_ERR(t_alg);
1944 			pr_warn("%s alg allocation failed\n", alg->driver_name);
1945 			continue;
1946 		}
1947 
1948 		err = crypto_register_ahash(&t_alg->ahash_alg);
1949 		if (err) {
1950 			pr_warn("%s alg registration failed\n",
1951 				t_alg->ahash_alg.halg.base.cra_driver_name);
1952 			kfree(t_alg);
1953 		} else
1954 			list_add_tail(&t_alg->entry, &hash_list);
1955 	}
1956 
1957 	return err;
1958 }
1959 
1960 module_init(caam_algapi_hash_init);
1961 module_exit(caam_algapi_hash_exit);
1962 
1963 MODULE_LICENSE("GPL");
1964 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1965 MODULE_AUTHOR("Freescale Semiconductor - NMG");
1966