xref: /linux/drivers/crypto/caam/caamalg.c (revision 1abd4986f4445b0280a07bc46aefa3d0d30258f9)
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46 
47 #include "compat.h"
48 
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56 
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY		3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
63 					 SHA512_DIGEST_SIZE * 2)
64 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65 #define CAAM_MAX_IV_LENGTH		16
66 
67 /* length of descriptors text */
68 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
69 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
70 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
71 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72 
73 #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
74 #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
75 					 20 * CAAM_CMD_SZ)
76 #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
77 					 15 * CAAM_CMD_SZ)
78 
79 #define DESC_MAX_USED_BYTES		(DESC_AEAD_GIVENC_LEN + \
80 					 CAAM_MAX_KEY_SIZE)
81 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
82 
83 #ifdef DEBUG
84 /* for print_hex_dumps with line references */
85 #define debug(format, arg...) printk(format, arg)
86 #else
87 #define debug(format, arg...)
88 #endif
89 static struct list_head alg_list;
90 
91 /* Set DK bit in class 1 operation if shared */
92 static inline void append_dec_op1(u32 *desc, u32 type)
93 {
94 	u32 *jump_cmd, *uncond_jump_cmd;
95 
96 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
97 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
98 			 OP_ALG_DECRYPT);
99 	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
100 	set_jump_tgt_here(desc, jump_cmd);
101 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
102 			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
103 	set_jump_tgt_here(desc, uncond_jump_cmd);
104 }
105 
106 /*
107  * Wait for completion of class 1 key loading before allowing
108  * error propagation
109  */
110 static inline void append_dec_shr_done(u32 *desc)
111 {
112 	u32 *jump_cmd;
113 
114 	jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
115 	set_jump_tgt_here(desc, jump_cmd);
116 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
117 }
118 
119 /*
120  * For aead functions, read payload and write payload,
121  * both of which are specified in req->src and req->dst
122  */
123 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
124 {
125 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
126 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
127 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
128 }
129 
130 /*
131  * For aead encrypt and decrypt, read iv for both classes
132  */
133 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
134 {
135 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
136 		   LDST_CLASS_1_CCB | ivsize);
137 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
138 }
139 
140 /*
141  * For ablkcipher encrypt and decrypt, read from req->src and
142  * write to req->dst
143  */
144 static inline void ablkcipher_append_src_dst(u32 *desc)
145 {
146 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
147 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
148 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
149 			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
150 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
151 }
152 
153 /*
154  * If all data, including src (with assoc and iv) or dst (with iv only) are
155  * contiguous
156  */
157 #define GIV_SRC_CONTIG		1
158 #define GIV_DST_CONTIG		(1 << 1)
159 
160 /*
161  * per-session context
162  */
163 struct caam_ctx {
164 	struct device *jrdev;
165 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
166 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
167 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
168 	dma_addr_t sh_desc_enc_dma;
169 	dma_addr_t sh_desc_dec_dma;
170 	dma_addr_t sh_desc_givenc_dma;
171 	u32 class1_alg_type;
172 	u32 class2_alg_type;
173 	u32 alg_op;
174 	u8 key[CAAM_MAX_KEY_SIZE];
175 	dma_addr_t key_dma;
176 	unsigned int enckeylen;
177 	unsigned int split_key_len;
178 	unsigned int split_key_pad_len;
179 	unsigned int authsize;
180 };
181 
182 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
183 			    int keys_fit_inline)
184 {
185 	if (keys_fit_inline) {
186 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
187 				  ctx->split_key_len, CLASS_2 |
188 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
189 		append_key_as_imm(desc, (void *)ctx->key +
190 				  ctx->split_key_pad_len, ctx->enckeylen,
191 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
192 	} else {
193 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
194 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
195 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
196 			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
197 	}
198 }
199 
200 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
201 				  int keys_fit_inline)
202 {
203 	u32 *key_jump_cmd;
204 
205 	init_sh_desc(desc, HDR_SHARE_SERIAL);
206 
207 	/* Skip if already shared */
208 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
209 				   JUMP_COND_SHRD);
210 
211 	append_key_aead(desc, ctx, keys_fit_inline);
212 
213 	set_jump_tgt_here(desc, key_jump_cmd);
214 
215 	/* Propagate errors from shared to job descriptor */
216 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
217 }
218 
219 static int aead_set_sh_desc(struct crypto_aead *aead)
220 {
221 	struct aead_tfm *tfm = &aead->base.crt_aead;
222 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
223 	struct device *jrdev = ctx->jrdev;
224 	bool keys_fit_inline = false;
225 	u32 *key_jump_cmd, *jump_cmd;
226 	u32 geniv, moveiv;
227 	u32 *desc;
228 
229 	if (!ctx->enckeylen || !ctx->authsize)
230 		return 0;
231 
232 	/*
233 	 * Job Descriptor and Shared Descriptors
234 	 * must all fit into the 64-word Descriptor h/w Buffer
235 	 */
236 	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
237 	    ctx->split_key_pad_len + ctx->enckeylen <=
238 	    CAAM_DESC_BYTES_MAX)
239 		keys_fit_inline = true;
240 
241 	/* aead_encrypt shared descriptor */
242 	desc = ctx->sh_desc_enc;
243 
244 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
245 
246 	/* Class 2 operation */
247 	append_operation(desc, ctx->class2_alg_type |
248 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
249 
250 	/* cryptlen = seqoutlen - authsize */
251 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
252 
253 	/* assoclen + cryptlen = seqinlen - ivsize */
254 	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
255 
256 	/* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
257 	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
258 
259 	/* read assoc before reading payload */
260 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
261 			     KEY_VLF);
262 	aead_append_ld_iv(desc, tfm->ivsize);
263 
264 	/* Class 1 operation */
265 	append_operation(desc, ctx->class1_alg_type |
266 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
267 
268 	/* Read and write cryptlen bytes */
269 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
270 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
271 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
272 
273 	/* Write ICV */
274 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
275 			 LDST_SRCDST_BYTE_CONTEXT);
276 
277 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
278 					      desc_bytes(desc),
279 					      DMA_TO_DEVICE);
280 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
281 		dev_err(jrdev, "unable to map shared descriptor\n");
282 		return -ENOMEM;
283 	}
284 #ifdef DEBUG
285 	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
286 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
287 		       desc_bytes(desc), 1);
288 #endif
289 
290 	/*
291 	 * Job Descriptor and Shared Descriptors
292 	 * must all fit into the 64-word Descriptor h/w Buffer
293 	 */
294 	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
295 	    ctx->split_key_pad_len + ctx->enckeylen <=
296 	    CAAM_DESC_BYTES_MAX)
297 		keys_fit_inline = true;
298 
299 	desc = ctx->sh_desc_dec;
300 
301 	/* aead_decrypt shared descriptor */
302 	init_sh_desc(desc, HDR_SHARE_SERIAL);
303 
304 	/* Skip if already shared */
305 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
306 				   JUMP_COND_SHRD);
307 
308 	append_key_aead(desc, ctx, keys_fit_inline);
309 
310 	/* Only propagate error immediately if shared */
311 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
312 	set_jump_tgt_here(desc, key_jump_cmd);
313 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
314 	set_jump_tgt_here(desc, jump_cmd);
315 
316 	/* Class 2 operation */
317 	append_operation(desc, ctx->class2_alg_type |
318 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
319 
320 	/* assoclen + cryptlen = seqinlen - ivsize */
321 	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
322 				ctx->authsize + tfm->ivsize)
323 	/* assoclen = (assoclen + cryptlen) - cryptlen */
324 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
325 	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
326 
327 	/* read assoc before reading payload */
328 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
329 			     KEY_VLF);
330 
331 	aead_append_ld_iv(desc, tfm->ivsize);
332 
333 	append_dec_op1(desc, ctx->class1_alg_type);
334 
335 	/* Read and write cryptlen bytes */
336 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
337 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
338 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
339 
340 	/* Load ICV */
341 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
342 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
343 	append_dec_shr_done(desc);
344 
345 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
346 					      desc_bytes(desc),
347 					      DMA_TO_DEVICE);
348 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
349 		dev_err(jrdev, "unable to map shared descriptor\n");
350 		return -ENOMEM;
351 	}
352 #ifdef DEBUG
353 	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
354 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 		       desc_bytes(desc), 1);
356 #endif
357 
358 	/*
359 	 * Job Descriptor and Shared Descriptors
360 	 * must all fit into the 64-word Descriptor h/w Buffer
361 	 */
362 	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
363 	    ctx->split_key_pad_len + ctx->enckeylen <=
364 	    CAAM_DESC_BYTES_MAX)
365 		keys_fit_inline = true;
366 
367 	/* aead_givencrypt shared descriptor */
368 	desc = ctx->sh_desc_givenc;
369 
370 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
371 
372 	/* Generate IV */
373 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
374 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
375 		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
376 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
377 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
378 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
379 	append_move(desc, MOVE_SRC_INFIFO |
380 		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
381 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
382 
383 	/* Copy IV to class 1 context */
384 	append_move(desc, MOVE_SRC_CLASS1CTX |
385 		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
386 
387 	/* Return to encryption */
388 	append_operation(desc, ctx->class2_alg_type |
389 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
390 
391 	/* ivsize + cryptlen = seqoutlen - authsize */
392 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
393 
394 	/* assoclen = seqinlen - (ivsize + cryptlen) */
395 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
396 
397 	/* read assoc before reading payload */
398 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
399 			     KEY_VLF);
400 
401 	/* Copy iv from class 1 ctx to class 2 fifo*/
402 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
403 		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
404 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
405 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
406 	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
407 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
408 
409 	/* Class 1 operation */
410 	append_operation(desc, ctx->class1_alg_type |
411 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
412 
413 	/* Will write ivsize + cryptlen */
414 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
415 
416 	/* Not need to reload iv */
417 	append_seq_fifo_load(desc, tfm->ivsize,
418 			     FIFOLD_CLASS_SKIP);
419 
420 	/* Will read cryptlen */
421 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
422 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
423 
424 	/* Write ICV */
425 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
426 			 LDST_SRCDST_BYTE_CONTEXT);
427 
428 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
429 						 desc_bytes(desc),
430 						 DMA_TO_DEVICE);
431 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
432 		dev_err(jrdev, "unable to map shared descriptor\n");
433 		return -ENOMEM;
434 	}
435 #ifdef DEBUG
436 	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
437 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
438 		       desc_bytes(desc), 1);
439 #endif
440 
441 	return 0;
442 }
443 
444 static int aead_setauthsize(struct crypto_aead *authenc,
445 				    unsigned int authsize)
446 {
447 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
448 
449 	ctx->authsize = authsize;
450 	aead_set_sh_desc(authenc);
451 
452 	return 0;
453 }
454 
455 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
456 			      u32 authkeylen)
457 {
458 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
459 			       ctx->split_key_pad_len, key_in, authkeylen,
460 			       ctx->alg_op);
461 }
462 
463 static int aead_setkey(struct crypto_aead *aead,
464 			       const u8 *key, unsigned int keylen)
465 {
466 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
467 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
468 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
469 	struct device *jrdev = ctx->jrdev;
470 	struct rtattr *rta = (void *)key;
471 	struct crypto_authenc_key_param *param;
472 	unsigned int authkeylen;
473 	unsigned int enckeylen;
474 	int ret = 0;
475 
476 	param = RTA_DATA(rta);
477 	enckeylen = be32_to_cpu(param->enckeylen);
478 
479 	key += RTA_ALIGN(rta->rta_len);
480 	keylen -= RTA_ALIGN(rta->rta_len);
481 
482 	if (keylen < enckeylen)
483 		goto badkey;
484 
485 	authkeylen = keylen - enckeylen;
486 
487 	if (keylen > CAAM_MAX_KEY_SIZE)
488 		goto badkey;
489 
490 	/* Pick class 2 key length from algorithm submask */
491 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
492 				      OP_ALG_ALGSEL_SHIFT] * 2;
493 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
494 
495 #ifdef DEBUG
496 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
497 	       keylen, enckeylen, authkeylen);
498 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
499 	       ctx->split_key_len, ctx->split_key_pad_len);
500 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
501 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
502 #endif
503 
504 	ret = gen_split_aead_key(ctx, key, authkeylen);
505 	if (ret) {
506 		goto badkey;
507 	}
508 
509 	/* postpend encryption key to auth split key */
510 	memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
511 
512 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
513 				       enckeylen, DMA_TO_DEVICE);
514 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
515 		dev_err(jrdev, "unable to map key i/o memory\n");
516 		return -ENOMEM;
517 	}
518 #ifdef DEBUG
519 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
520 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
521 		       ctx->split_key_pad_len + enckeylen, 1);
522 #endif
523 
524 	ctx->enckeylen = enckeylen;
525 
526 	ret = aead_set_sh_desc(aead);
527 	if (ret) {
528 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
529 				 enckeylen, DMA_TO_DEVICE);
530 	}
531 
532 	return ret;
533 badkey:
534 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
535 	return -EINVAL;
536 }
537 
538 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
539 			     const u8 *key, unsigned int keylen)
540 {
541 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
542 	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
543 	struct device *jrdev = ctx->jrdev;
544 	int ret = 0;
545 	u32 *key_jump_cmd, *jump_cmd;
546 	u32 *desc;
547 
548 #ifdef DEBUG
549 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
550 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
551 #endif
552 
553 	memcpy(ctx->key, key, keylen);
554 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
555 				      DMA_TO_DEVICE);
556 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
557 		dev_err(jrdev, "unable to map key i/o memory\n");
558 		return -ENOMEM;
559 	}
560 	ctx->enckeylen = keylen;
561 
562 	/* ablkcipher_encrypt shared descriptor */
563 	desc = ctx->sh_desc_enc;
564 	init_sh_desc(desc, HDR_SHARE_SERIAL);
565 	/* Skip if already shared */
566 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
567 				   JUMP_COND_SHRD);
568 
569 	/* Load class1 key only */
570 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
571 			  ctx->enckeylen, CLASS_1 |
572 			  KEY_DEST_CLASS_REG);
573 
574 	set_jump_tgt_here(desc, key_jump_cmd);
575 
576 	/* Propagate errors from shared to job descriptor */
577 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
578 
579 	/* Load iv */
580 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
581 		   LDST_CLASS_1_CCB | tfm->ivsize);
582 
583 	/* Load operation */
584 	append_operation(desc, ctx->class1_alg_type |
585 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
586 
587 	/* Perform operation */
588 	ablkcipher_append_src_dst(desc);
589 
590 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
591 					      desc_bytes(desc),
592 					      DMA_TO_DEVICE);
593 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
594 		dev_err(jrdev, "unable to map shared descriptor\n");
595 		return -ENOMEM;
596 	}
597 #ifdef DEBUG
598 	print_hex_dump(KERN_ERR,
599 		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
600 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
601 		       desc_bytes(desc), 1);
602 #endif
603 	/* ablkcipher_decrypt shared descriptor */
604 	desc = ctx->sh_desc_dec;
605 
606 	init_sh_desc(desc, HDR_SHARE_SERIAL);
607 	/* Skip if already shared */
608 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
609 				   JUMP_COND_SHRD);
610 
611 	/* Load class1 key only */
612 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
613 			  ctx->enckeylen, CLASS_1 |
614 			  KEY_DEST_CLASS_REG);
615 
616 	/* For aead, only propagate error immediately if shared */
617 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
618 	set_jump_tgt_here(desc, key_jump_cmd);
619 	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
620 	set_jump_tgt_here(desc, jump_cmd);
621 
622 	/* load IV */
623 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
624 		   LDST_CLASS_1_CCB | tfm->ivsize);
625 
626 	/* Choose operation */
627 	append_dec_op1(desc, ctx->class1_alg_type);
628 
629 	/* Perform operation */
630 	ablkcipher_append_src_dst(desc);
631 
632 	/* Wait for key to load before allowing propagating error */
633 	append_dec_shr_done(desc);
634 
635 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
636 					      desc_bytes(desc),
637 					      DMA_TO_DEVICE);
638 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
639 		dev_err(jrdev, "unable to map shared descriptor\n");
640 		return -ENOMEM;
641 	}
642 
643 #ifdef DEBUG
644 	print_hex_dump(KERN_ERR,
645 		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
646 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
647 		       desc_bytes(desc), 1);
648 #endif
649 
650 	return ret;
651 }
652 
653 /*
654  * aead_edesc - s/w-extended aead descriptor
655  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
656  * @assoc_chained: if source is chained
657  * @src_nents: number of segments in input scatterlist
658  * @src_chained: if source is chained
659  * @dst_nents: number of segments in output scatterlist
660  * @dst_chained: if destination is chained
661  * @iv_dma: dma address of iv for checking continuity and link table
662  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
663  * @sec4_sg_bytes: length of dma mapped sec4_sg space
664  * @sec4_sg_dma: bus physical mapped address of h/w link table
665  * @hw_desc: the h/w job descriptor followed by any referenced link tables
666  */
667 struct aead_edesc {
668 	int assoc_nents;
669 	bool assoc_chained;
670 	int src_nents;
671 	bool src_chained;
672 	int dst_nents;
673 	bool dst_chained;
674 	dma_addr_t iv_dma;
675 	int sec4_sg_bytes;
676 	dma_addr_t sec4_sg_dma;
677 	struct sec4_sg_entry *sec4_sg;
678 	u32 hw_desc[0];
679 };
680 
681 /*
682  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
683  * @src_nents: number of segments in input scatterlist
684  * @src_chained: if source is chained
685  * @dst_nents: number of segments in output scatterlist
686  * @dst_chained: if destination is chained
687  * @iv_dma: dma address of iv for checking continuity and link table
688  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
689  * @sec4_sg_bytes: length of dma mapped sec4_sg space
690  * @sec4_sg_dma: bus physical mapped address of h/w link table
691  * @hw_desc: the h/w job descriptor followed by any referenced link tables
692  */
693 struct ablkcipher_edesc {
694 	int src_nents;
695 	bool src_chained;
696 	int dst_nents;
697 	bool dst_chained;
698 	dma_addr_t iv_dma;
699 	int sec4_sg_bytes;
700 	dma_addr_t sec4_sg_dma;
701 	struct sec4_sg_entry *sec4_sg;
702 	u32 hw_desc[0];
703 };
704 
705 static void caam_unmap(struct device *dev, struct scatterlist *src,
706 		       struct scatterlist *dst, int src_nents,
707 		       bool src_chained, int dst_nents, bool dst_chained,
708 		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
709 		       int sec4_sg_bytes)
710 {
711 	if (dst != src) {
712 		dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
713 				     src_chained);
714 		dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
715 				     dst_chained);
716 	} else {
717 		dma_unmap_sg_chained(dev, src, src_nents ? : 1,
718 				     DMA_BIDIRECTIONAL, src_chained);
719 	}
720 
721 	if (iv_dma)
722 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
723 	if (sec4_sg_bytes)
724 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
725 				 DMA_TO_DEVICE);
726 }
727 
728 static void aead_unmap(struct device *dev,
729 		       struct aead_edesc *edesc,
730 		       struct aead_request *req)
731 {
732 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
733 	int ivsize = crypto_aead_ivsize(aead);
734 
735 	dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
736 			     DMA_TO_DEVICE, edesc->assoc_chained);
737 
738 	caam_unmap(dev, req->src, req->dst,
739 		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
740 		   edesc->dst_chained, edesc->iv_dma, ivsize,
741 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
742 }
743 
744 static void ablkcipher_unmap(struct device *dev,
745 			     struct ablkcipher_edesc *edesc,
746 			     struct ablkcipher_request *req)
747 {
748 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
749 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
750 
751 	caam_unmap(dev, req->src, req->dst,
752 		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
753 		   edesc->dst_chained, edesc->iv_dma, ivsize,
754 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
755 }
756 
757 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
758 				   void *context)
759 {
760 	struct aead_request *req = context;
761 	struct aead_edesc *edesc;
762 #ifdef DEBUG
763 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
764 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
765 	int ivsize = crypto_aead_ivsize(aead);
766 
767 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
768 #endif
769 
770 	edesc = (struct aead_edesc *)((char *)desc -
771 		 offsetof(struct aead_edesc, hw_desc));
772 
773 	if (err) {
774 		char tmp[CAAM_ERROR_STR_MAX];
775 
776 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
777 	}
778 
779 	aead_unmap(jrdev, edesc, req);
780 
781 #ifdef DEBUG
782 	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
783 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
784 		       req->assoclen , 1);
785 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
786 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
787 		       edesc->src_nents ? 100 : ivsize, 1);
788 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
789 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
790 		       edesc->src_nents ? 100 : req->cryptlen +
791 		       ctx->authsize + 4, 1);
792 #endif
793 
794 	kfree(edesc);
795 
796 	aead_request_complete(req, err);
797 }
798 
799 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
800 				   void *context)
801 {
802 	struct aead_request *req = context;
803 	struct aead_edesc *edesc;
804 #ifdef DEBUG
805 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
806 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
807 	int ivsize = crypto_aead_ivsize(aead);
808 
809 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
810 #endif
811 
812 	edesc = (struct aead_edesc *)((char *)desc -
813 		 offsetof(struct aead_edesc, hw_desc));
814 
815 #ifdef DEBUG
816 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
817 		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
818 		       ivsize, 1);
819 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
820 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
821 		       req->cryptlen, 1);
822 #endif
823 
824 	if (err) {
825 		char tmp[CAAM_ERROR_STR_MAX];
826 
827 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
828 	}
829 
830 	aead_unmap(jrdev, edesc, req);
831 
832 	/*
833 	 * verify hw auth check passed else return -EBADMSG
834 	 */
835 	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
836 		err = -EBADMSG;
837 
838 #ifdef DEBUG
839 	print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
840 		       DUMP_PREFIX_ADDRESS, 16, 4,
841 		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
842 		       sizeof(struct iphdr) + req->assoclen +
843 		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
844 		       ctx->authsize + 36, 1);
845 	if (!err && edesc->sec4_sg_bytes) {
846 		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
847 		print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
848 			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
849 			sg->length + ctx->authsize + 16, 1);
850 	}
851 #endif
852 
853 	kfree(edesc);
854 
855 	aead_request_complete(req, err);
856 }
857 
858 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
859 				   void *context)
860 {
861 	struct ablkcipher_request *req = context;
862 	struct ablkcipher_edesc *edesc;
863 #ifdef DEBUG
864 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
865 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
866 
867 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
868 #endif
869 
870 	edesc = (struct ablkcipher_edesc *)((char *)desc -
871 		 offsetof(struct ablkcipher_edesc, hw_desc));
872 
873 	if (err) {
874 		char tmp[CAAM_ERROR_STR_MAX];
875 
876 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
877 	}
878 
879 #ifdef DEBUG
880 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
881 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
882 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
883 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
884 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
885 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
886 #endif
887 
888 	ablkcipher_unmap(jrdev, edesc, req);
889 	kfree(edesc);
890 
891 	ablkcipher_request_complete(req, err);
892 }
893 
894 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
895 				    void *context)
896 {
897 	struct ablkcipher_request *req = context;
898 	struct ablkcipher_edesc *edesc;
899 #ifdef DEBUG
900 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
901 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
902 
903 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
904 #endif
905 
906 	edesc = (struct ablkcipher_edesc *)((char *)desc -
907 		 offsetof(struct ablkcipher_edesc, hw_desc));
908 	if (err) {
909 		char tmp[CAAM_ERROR_STR_MAX];
910 
911 		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
912 	}
913 
914 #ifdef DEBUG
915 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
916 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
917 		       ivsize, 1);
918 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
919 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
920 		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
921 #endif
922 
923 	ablkcipher_unmap(jrdev, edesc, req);
924 	kfree(edesc);
925 
926 	ablkcipher_request_complete(req, err);
927 }
928 
929 /*
930  * Fill in aead job descriptor
931  */
932 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
933 			  struct aead_edesc *edesc,
934 			  struct aead_request *req,
935 			  bool all_contig, bool encrypt)
936 {
937 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
938 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
939 	int ivsize = crypto_aead_ivsize(aead);
940 	int authsize = ctx->authsize;
941 	u32 *desc = edesc->hw_desc;
942 	u32 out_options = 0, in_options;
943 	dma_addr_t dst_dma, src_dma;
944 	int len, sec4_sg_index = 0;
945 
946 #ifdef DEBUG
947 	debug("assoclen %d cryptlen %d authsize %d\n",
948 	      req->assoclen, req->cryptlen, authsize);
949 	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
950 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
951 		       req->assoclen , 1);
952 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
953 		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
954 		       edesc->src_nents ? 100 : ivsize, 1);
955 	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
956 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
957 			edesc->src_nents ? 100 : req->cryptlen, 1);
958 	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
959 		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
960 		       desc_bytes(sh_desc), 1);
961 #endif
962 
963 	len = desc_len(sh_desc);
964 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
965 
966 	if (all_contig) {
967 		src_dma = sg_dma_address(req->assoc);
968 		in_options = 0;
969 	} else {
970 		src_dma = edesc->sec4_sg_dma;
971 		sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
972 				 (edesc->src_nents ? : 1);
973 		in_options = LDST_SGF;
974 	}
975 	if (encrypt)
976 		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
977 				  req->cryptlen - authsize, in_options);
978 	else
979 		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
980 				  req->cryptlen, in_options);
981 
982 	if (likely(req->src == req->dst)) {
983 		if (all_contig) {
984 			dst_dma = sg_dma_address(req->src);
985 		} else {
986 			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
987 				  ((edesc->assoc_nents ? : 1) + 1);
988 			out_options = LDST_SGF;
989 		}
990 	} else {
991 		if (!edesc->dst_nents) {
992 			dst_dma = sg_dma_address(req->dst);
993 		} else {
994 			dst_dma = edesc->sec4_sg_dma +
995 				  sec4_sg_index *
996 				  sizeof(struct sec4_sg_entry);
997 			out_options = LDST_SGF;
998 		}
999 	}
1000 	if (encrypt)
1001 		append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1002 	else
1003 		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1004 				   out_options);
1005 }
1006 
1007 /*
1008  * Fill in aead givencrypt job descriptor
1009  */
1010 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1011 			      struct aead_edesc *edesc,
1012 			      struct aead_request *req,
1013 			      int contig)
1014 {
1015 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1016 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1017 	int ivsize = crypto_aead_ivsize(aead);
1018 	int authsize = ctx->authsize;
1019 	u32 *desc = edesc->hw_desc;
1020 	u32 out_options = 0, in_options;
1021 	dma_addr_t dst_dma, src_dma;
1022 	int len, sec4_sg_index = 0;
1023 
1024 #ifdef DEBUG
1025 	debug("assoclen %d cryptlen %d authsize %d\n",
1026 	      req->assoclen, req->cryptlen, authsize);
1027 	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1028 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1029 		       req->assoclen , 1);
1030 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1031 		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1032 	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1033 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1034 			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1035 	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1036 		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1037 		       desc_bytes(sh_desc), 1);
1038 #endif
1039 
1040 	len = desc_len(sh_desc);
1041 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1042 
1043 	if (contig & GIV_SRC_CONTIG) {
1044 		src_dma = sg_dma_address(req->assoc);
1045 		in_options = 0;
1046 	} else {
1047 		src_dma = edesc->sec4_sg_dma;
1048 		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1049 		in_options = LDST_SGF;
1050 	}
1051 	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1052 			  req->cryptlen - authsize, in_options);
1053 
1054 	if (contig & GIV_DST_CONTIG) {
1055 		dst_dma = edesc->iv_dma;
1056 	} else {
1057 		if (likely(req->src == req->dst)) {
1058 			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1059 				  edesc->assoc_nents;
1060 			out_options = LDST_SGF;
1061 		} else {
1062 			dst_dma = edesc->sec4_sg_dma +
1063 				  sec4_sg_index *
1064 				  sizeof(struct sec4_sg_entry);
1065 			out_options = LDST_SGF;
1066 		}
1067 	}
1068 
1069 	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1070 }
1071 
1072 /*
1073  * Fill in ablkcipher job descriptor
1074  */
1075 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1076 				struct ablkcipher_edesc *edesc,
1077 				struct ablkcipher_request *req,
1078 				bool iv_contig)
1079 {
1080 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1081 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1082 	u32 *desc = edesc->hw_desc;
1083 	u32 out_options = 0, in_options;
1084 	dma_addr_t dst_dma, src_dma;
1085 	int len, sec4_sg_index = 0;
1086 
1087 #ifdef DEBUG
1088 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1089 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1090 		       ivsize, 1);
1091 	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1092 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1093 		       edesc->src_nents ? 100 : req->nbytes, 1);
1094 #endif
1095 
1096 	len = desc_len(sh_desc);
1097 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1098 
1099 	if (iv_contig) {
1100 		src_dma = edesc->iv_dma;
1101 		in_options = 0;
1102 	} else {
1103 		src_dma = edesc->sec4_sg_dma;
1104 		sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1105 		in_options = LDST_SGF;
1106 	}
1107 	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1108 
1109 	if (likely(req->src == req->dst)) {
1110 		if (!edesc->src_nents && iv_contig) {
1111 			dst_dma = sg_dma_address(req->src);
1112 		} else {
1113 			dst_dma = edesc->sec4_sg_dma +
1114 				sizeof(struct sec4_sg_entry);
1115 			out_options = LDST_SGF;
1116 		}
1117 	} else {
1118 		if (!edesc->dst_nents) {
1119 			dst_dma = sg_dma_address(req->dst);
1120 		} else {
1121 			dst_dma = edesc->sec4_sg_dma +
1122 				sec4_sg_index * sizeof(struct sec4_sg_entry);
1123 			out_options = LDST_SGF;
1124 		}
1125 	}
1126 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1127 }
1128 
1129 /*
1130  * allocate and map the aead extended descriptor
1131  */
1132 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1133 					   int desc_bytes, bool *all_contig_ptr)
1134 {
1135 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1136 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1137 	struct device *jrdev = ctx->jrdev;
1138 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1139 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1140 	int assoc_nents, src_nents, dst_nents = 0;
1141 	struct aead_edesc *edesc;
1142 	dma_addr_t iv_dma = 0;
1143 	int sgc;
1144 	bool all_contig = true;
1145 	bool assoc_chained = false, src_chained = false, dst_chained = false;
1146 	int ivsize = crypto_aead_ivsize(aead);
1147 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1148 
1149 	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1150 	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1151 
1152 	if (unlikely(req->dst != req->src))
1153 		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1154 
1155 	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1156 				 DMA_TO_DEVICE, assoc_chained);
1157 	if (likely(req->src == req->dst)) {
1158 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1159 					 DMA_BIDIRECTIONAL, src_chained);
1160 	} else {
1161 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1162 					 DMA_TO_DEVICE, src_chained);
1163 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1164 					 DMA_FROM_DEVICE, dst_chained);
1165 	}
1166 
1167 	/* Check if data are contiguous */
1168 	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1169 	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1170 	    iv_dma || src_nents || iv_dma + ivsize !=
1171 	    sg_dma_address(req->src)) {
1172 		all_contig = false;
1173 		assoc_nents = assoc_nents ? : 1;
1174 		src_nents = src_nents ? : 1;
1175 		sec4_sg_len = assoc_nents + 1 + src_nents;
1176 	}
1177 	sec4_sg_len += dst_nents;
1178 
1179 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1180 
1181 	/* allocate space for base edesc and hw desc commands, link tables */
1182 	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1183 			sec4_sg_bytes, GFP_DMA | flags);
1184 	if (!edesc) {
1185 		dev_err(jrdev, "could not allocate extended descriptor\n");
1186 		return ERR_PTR(-ENOMEM);
1187 	}
1188 
1189 	edesc->assoc_nents = assoc_nents;
1190 	edesc->assoc_chained = assoc_chained;
1191 	edesc->src_nents = src_nents;
1192 	edesc->src_chained = src_chained;
1193 	edesc->dst_nents = dst_nents;
1194 	edesc->dst_chained = dst_chained;
1195 	edesc->iv_dma = iv_dma;
1196 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1197 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1198 			 desc_bytes;
1199 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1200 					    sec4_sg_bytes, DMA_TO_DEVICE);
1201 	*all_contig_ptr = all_contig;
1202 
1203 	sec4_sg_index = 0;
1204 	if (!all_contig) {
1205 		sg_to_sec4_sg(req->assoc,
1206 			      (assoc_nents ? : 1),
1207 			      edesc->sec4_sg +
1208 			      sec4_sg_index, 0);
1209 		sec4_sg_index += assoc_nents ? : 1;
1210 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1211 				   iv_dma, ivsize, 0);
1212 		sec4_sg_index += 1;
1213 		sg_to_sec4_sg_last(req->src,
1214 				   (src_nents ? : 1),
1215 				   edesc->sec4_sg +
1216 				   sec4_sg_index, 0);
1217 		sec4_sg_index += src_nents ? : 1;
1218 	}
1219 	if (dst_nents) {
1220 		sg_to_sec4_sg_last(req->dst, dst_nents,
1221 				   edesc->sec4_sg + sec4_sg_index, 0);
1222 	}
1223 
1224 	return edesc;
1225 }
1226 
1227 static int aead_encrypt(struct aead_request *req)
1228 {
1229 	struct aead_edesc *edesc;
1230 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1231 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1232 	struct device *jrdev = ctx->jrdev;
1233 	bool all_contig;
1234 	u32 *desc;
1235 	int ret = 0;
1236 
1237 	req->cryptlen += ctx->authsize;
1238 
1239 	/* allocate extended descriptor */
1240 	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1241 				 CAAM_CMD_SZ, &all_contig);
1242 	if (IS_ERR(edesc))
1243 		return PTR_ERR(edesc);
1244 
1245 	/* Create and submit job descriptor */
1246 	init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1247 		      all_contig, true);
1248 #ifdef DEBUG
1249 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1250 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1251 		       desc_bytes(edesc->hw_desc), 1);
1252 #endif
1253 
1254 	desc = edesc->hw_desc;
1255 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1256 	if (!ret) {
1257 		ret = -EINPROGRESS;
1258 	} else {
1259 		aead_unmap(jrdev, edesc, req);
1260 		kfree(edesc);
1261 	}
1262 
1263 	return ret;
1264 }
1265 
1266 static int aead_decrypt(struct aead_request *req)
1267 {
1268 	struct aead_edesc *edesc;
1269 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1270 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1271 	struct device *jrdev = ctx->jrdev;
1272 	bool all_contig;
1273 	u32 *desc;
1274 	int ret = 0;
1275 
1276 	/* allocate extended descriptor */
1277 	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1278 				 CAAM_CMD_SZ, &all_contig);
1279 	if (IS_ERR(edesc))
1280 		return PTR_ERR(edesc);
1281 
1282 #ifdef DEBUG
1283 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1284 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1285 		       req->cryptlen, 1);
1286 #endif
1287 
1288 	/* Create and submit job descriptor*/
1289 	init_aead_job(ctx->sh_desc_dec,
1290 		      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1291 #ifdef DEBUG
1292 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1293 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1294 		       desc_bytes(edesc->hw_desc), 1);
1295 #endif
1296 
1297 	desc = edesc->hw_desc;
1298 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1299 	if (!ret) {
1300 		ret = -EINPROGRESS;
1301 	} else {
1302 		aead_unmap(jrdev, edesc, req);
1303 		kfree(edesc);
1304 	}
1305 
1306 	return ret;
1307 }
1308 
1309 /*
1310  * allocate and map the aead extended descriptor for aead givencrypt
1311  */
1312 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1313 					       *greq, int desc_bytes,
1314 					       u32 *contig_ptr)
1315 {
1316 	struct aead_request *req = &greq->areq;
1317 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1318 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1319 	struct device *jrdev = ctx->jrdev;
1320 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1321 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1322 	int assoc_nents, src_nents, dst_nents = 0;
1323 	struct aead_edesc *edesc;
1324 	dma_addr_t iv_dma = 0;
1325 	int sgc;
1326 	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1327 	int ivsize = crypto_aead_ivsize(aead);
1328 	bool assoc_chained = false, src_chained = false, dst_chained = false;
1329 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1330 
1331 	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1332 	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1333 
1334 	if (unlikely(req->dst != req->src))
1335 		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1336 
1337 	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1338 				 DMA_TO_DEVICE, assoc_chained);
1339 	if (likely(req->src == req->dst)) {
1340 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1341 					 DMA_BIDIRECTIONAL, src_chained);
1342 	} else {
1343 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1344 					 DMA_TO_DEVICE, src_chained);
1345 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1346 					 DMA_FROM_DEVICE, dst_chained);
1347 	}
1348 
1349 	/* Check if data are contiguous */
1350 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1351 	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1352 	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1353 		contig &= ~GIV_SRC_CONTIG;
1354 	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1355 		contig &= ~GIV_DST_CONTIG;
1356 	if (unlikely(req->src != req->dst)) {
1357 		dst_nents = dst_nents ? : 1;
1358 		sec4_sg_len += 1;
1359 	}
1360 	if (!(contig & GIV_SRC_CONTIG)) {
1361 		assoc_nents = assoc_nents ? : 1;
1362 		src_nents = src_nents ? : 1;
1363 		sec4_sg_len += assoc_nents + 1 + src_nents;
1364 		if (likely(req->src == req->dst))
1365 			contig &= ~GIV_DST_CONTIG;
1366 	}
1367 	sec4_sg_len += dst_nents;
1368 
1369 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1370 
1371 	/* allocate space for base edesc and hw desc commands, link tables */
1372 	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1373 			sec4_sg_bytes, GFP_DMA | flags);
1374 	if (!edesc) {
1375 		dev_err(jrdev, "could not allocate extended descriptor\n");
1376 		return ERR_PTR(-ENOMEM);
1377 	}
1378 
1379 	edesc->assoc_nents = assoc_nents;
1380 	edesc->assoc_chained = assoc_chained;
1381 	edesc->src_nents = src_nents;
1382 	edesc->src_chained = src_chained;
1383 	edesc->dst_nents = dst_nents;
1384 	edesc->dst_chained = dst_chained;
1385 	edesc->iv_dma = iv_dma;
1386 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1387 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1388 			 desc_bytes;
1389 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1390 					    sec4_sg_bytes, DMA_TO_DEVICE);
1391 	*contig_ptr = contig;
1392 
1393 	sec4_sg_index = 0;
1394 	if (!(contig & GIV_SRC_CONTIG)) {
1395 		sg_to_sec4_sg(req->assoc, assoc_nents,
1396 			      edesc->sec4_sg +
1397 			      sec4_sg_index, 0);
1398 		sec4_sg_index += assoc_nents;
1399 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1400 				   iv_dma, ivsize, 0);
1401 		sec4_sg_index += 1;
1402 		sg_to_sec4_sg_last(req->src, src_nents,
1403 				   edesc->sec4_sg +
1404 				   sec4_sg_index, 0);
1405 		sec4_sg_index += src_nents;
1406 	}
1407 	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1408 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1409 				   iv_dma, ivsize, 0);
1410 		sec4_sg_index += 1;
1411 		sg_to_sec4_sg_last(req->dst, dst_nents,
1412 				   edesc->sec4_sg + sec4_sg_index, 0);
1413 	}
1414 
1415 	return edesc;
1416 }
1417 
1418 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1419 {
1420 	struct aead_request *req = &areq->areq;
1421 	struct aead_edesc *edesc;
1422 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1423 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1424 	struct device *jrdev = ctx->jrdev;
1425 	u32 contig;
1426 	u32 *desc;
1427 	int ret = 0;
1428 
1429 	req->cryptlen += ctx->authsize;
1430 
1431 	/* allocate extended descriptor */
1432 	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1433 				     CAAM_CMD_SZ, &contig);
1434 
1435 	if (IS_ERR(edesc))
1436 		return PTR_ERR(edesc);
1437 
1438 #ifdef DEBUG
1439 	print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1440 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1441 		       req->cryptlen, 1);
1442 #endif
1443 
1444 	/* Create and submit job descriptor*/
1445 	init_aead_giv_job(ctx->sh_desc_givenc,
1446 			  ctx->sh_desc_givenc_dma, edesc, req, contig);
1447 #ifdef DEBUG
1448 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1449 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1450 		       desc_bytes(edesc->hw_desc), 1);
1451 #endif
1452 
1453 	desc = edesc->hw_desc;
1454 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1455 	if (!ret) {
1456 		ret = -EINPROGRESS;
1457 	} else {
1458 		aead_unmap(jrdev, edesc, req);
1459 		kfree(edesc);
1460 	}
1461 
1462 	return ret;
1463 }
1464 
1465 /*
1466  * allocate and map the ablkcipher extended descriptor for ablkcipher
1467  */
1468 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1469 						       *req, int desc_bytes,
1470 						       bool *iv_contig_out)
1471 {
1472 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1473 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1474 	struct device *jrdev = ctx->jrdev;
1475 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1476 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1477 		       GFP_KERNEL : GFP_ATOMIC;
1478 	int src_nents, dst_nents = 0, sec4_sg_bytes;
1479 	struct ablkcipher_edesc *edesc;
1480 	dma_addr_t iv_dma = 0;
1481 	bool iv_contig = false;
1482 	int sgc;
1483 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1484 	bool src_chained = false, dst_chained = false;
1485 	int sec4_sg_index;
1486 
1487 	src_nents = sg_count(req->src, req->nbytes, &src_chained);
1488 
1489 	if (req->dst != req->src)
1490 		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1491 
1492 	if (likely(req->src == req->dst)) {
1493 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1494 					 DMA_BIDIRECTIONAL, src_chained);
1495 	} else {
1496 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1497 					 DMA_TO_DEVICE, src_chained);
1498 		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1499 					 DMA_FROM_DEVICE, dst_chained);
1500 	}
1501 
1502 	/*
1503 	 * Check if iv can be contiguous with source and destination.
1504 	 * If so, include it. If not, create scatterlist.
1505 	 */
1506 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1507 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1508 		iv_contig = true;
1509 	else
1510 		src_nents = src_nents ? : 1;
1511 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1512 			sizeof(struct sec4_sg_entry);
1513 
1514 	/* allocate space for base edesc and hw desc commands, link tables */
1515 	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1516 			sec4_sg_bytes, GFP_DMA | flags);
1517 	if (!edesc) {
1518 		dev_err(jrdev, "could not allocate extended descriptor\n");
1519 		return ERR_PTR(-ENOMEM);
1520 	}
1521 
1522 	edesc->src_nents = src_nents;
1523 	edesc->src_chained = src_chained;
1524 	edesc->dst_nents = dst_nents;
1525 	edesc->dst_chained = dst_chained;
1526 	edesc->sec4_sg_bytes = sec4_sg_bytes;
1527 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1528 			 desc_bytes;
1529 
1530 	sec4_sg_index = 0;
1531 	if (!iv_contig) {
1532 		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1533 		sg_to_sec4_sg_last(req->src, src_nents,
1534 				   edesc->sec4_sg + 1, 0);
1535 		sec4_sg_index += 1 + src_nents;
1536 	}
1537 
1538 	if (dst_nents) {
1539 		sg_to_sec4_sg_last(req->dst, dst_nents,
1540 			edesc->sec4_sg + sec4_sg_index, 0);
1541 	}
1542 
1543 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1544 					    sec4_sg_bytes, DMA_TO_DEVICE);
1545 	edesc->iv_dma = iv_dma;
1546 
1547 #ifdef DEBUG
1548 	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1549 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1550 		       sec4_sg_bytes, 1);
1551 #endif
1552 
1553 	*iv_contig_out = iv_contig;
1554 	return edesc;
1555 }
1556 
1557 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1558 {
1559 	struct ablkcipher_edesc *edesc;
1560 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1561 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1562 	struct device *jrdev = ctx->jrdev;
1563 	bool iv_contig;
1564 	u32 *desc;
1565 	int ret = 0;
1566 
1567 	/* allocate extended descriptor */
1568 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1569 				       CAAM_CMD_SZ, &iv_contig);
1570 	if (IS_ERR(edesc))
1571 		return PTR_ERR(edesc);
1572 
1573 	/* Create and submit job descriptor*/
1574 	init_ablkcipher_job(ctx->sh_desc_enc,
1575 		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1576 #ifdef DEBUG
1577 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1578 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1579 		       desc_bytes(edesc->hw_desc), 1);
1580 #endif
1581 	desc = edesc->hw_desc;
1582 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1583 
1584 	if (!ret) {
1585 		ret = -EINPROGRESS;
1586 	} else {
1587 		ablkcipher_unmap(jrdev, edesc, req);
1588 		kfree(edesc);
1589 	}
1590 
1591 	return ret;
1592 }
1593 
1594 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1595 {
1596 	struct ablkcipher_edesc *edesc;
1597 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1598 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1599 	struct device *jrdev = ctx->jrdev;
1600 	bool iv_contig;
1601 	u32 *desc;
1602 	int ret = 0;
1603 
1604 	/* allocate extended descriptor */
1605 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1606 				       CAAM_CMD_SZ, &iv_contig);
1607 	if (IS_ERR(edesc))
1608 		return PTR_ERR(edesc);
1609 
1610 	/* Create and submit job descriptor*/
1611 	init_ablkcipher_job(ctx->sh_desc_dec,
1612 		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1613 	desc = edesc->hw_desc;
1614 #ifdef DEBUG
1615 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1616 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1617 		       desc_bytes(edesc->hw_desc), 1);
1618 #endif
1619 
1620 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1621 	if (!ret) {
1622 		ret = -EINPROGRESS;
1623 	} else {
1624 		ablkcipher_unmap(jrdev, edesc, req);
1625 		kfree(edesc);
1626 	}
1627 
1628 	return ret;
1629 }
1630 
1631 #define template_aead		template_u.aead
1632 #define template_ablkcipher	template_u.ablkcipher
1633 struct caam_alg_template {
1634 	char name[CRYPTO_MAX_ALG_NAME];
1635 	char driver_name[CRYPTO_MAX_ALG_NAME];
1636 	unsigned int blocksize;
1637 	u32 type;
1638 	union {
1639 		struct ablkcipher_alg ablkcipher;
1640 		struct aead_alg aead;
1641 		struct blkcipher_alg blkcipher;
1642 		struct cipher_alg cipher;
1643 		struct compress_alg compress;
1644 		struct rng_alg rng;
1645 	} template_u;
1646 	u32 class1_alg_type;
1647 	u32 class2_alg_type;
1648 	u32 alg_op;
1649 };
1650 
1651 static struct caam_alg_template driver_algs[] = {
1652 	/* single-pass ipsec_esp descriptor */
1653 	{
1654 		.name = "authenc(hmac(md5),cbc(aes))",
1655 		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
1656 		.blocksize = AES_BLOCK_SIZE,
1657 		.type = CRYPTO_ALG_TYPE_AEAD,
1658 		.template_aead = {
1659 			.setkey = aead_setkey,
1660 			.setauthsize = aead_setauthsize,
1661 			.encrypt = aead_encrypt,
1662 			.decrypt = aead_decrypt,
1663 			.givencrypt = aead_givencrypt,
1664 			.geniv = "<built-in>",
1665 			.ivsize = AES_BLOCK_SIZE,
1666 			.maxauthsize = MD5_DIGEST_SIZE,
1667 			},
1668 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1669 		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1670 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1671 	},
1672 	{
1673 		.name = "authenc(hmac(sha1),cbc(aes))",
1674 		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1675 		.blocksize = AES_BLOCK_SIZE,
1676 		.type = CRYPTO_ALG_TYPE_AEAD,
1677 		.template_aead = {
1678 			.setkey = aead_setkey,
1679 			.setauthsize = aead_setauthsize,
1680 			.encrypt = aead_encrypt,
1681 			.decrypt = aead_decrypt,
1682 			.givencrypt = aead_givencrypt,
1683 			.geniv = "<built-in>",
1684 			.ivsize = AES_BLOCK_SIZE,
1685 			.maxauthsize = SHA1_DIGEST_SIZE,
1686 			},
1687 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1688 		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1689 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1690 	},
1691 	{
1692 		.name = "authenc(hmac(sha224),cbc(aes))",
1693 		.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1694 		.blocksize = AES_BLOCK_SIZE,
1695 		.type = CRYPTO_ALG_TYPE_AEAD,
1696 		.template_aead = {
1697 			.setkey = aead_setkey,
1698 			.setauthsize = aead_setauthsize,
1699 			.encrypt = aead_encrypt,
1700 			.decrypt = aead_decrypt,
1701 			.givencrypt = aead_givencrypt,
1702 			.geniv = "<built-in>",
1703 			.ivsize = AES_BLOCK_SIZE,
1704 			.maxauthsize = SHA224_DIGEST_SIZE,
1705 			},
1706 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1707 		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1708 				   OP_ALG_AAI_HMAC_PRECOMP,
1709 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1710 	},
1711 	{
1712 		.name = "authenc(hmac(sha256),cbc(aes))",
1713 		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1714 		.blocksize = AES_BLOCK_SIZE,
1715 		.type = CRYPTO_ALG_TYPE_AEAD,
1716 		.template_aead = {
1717 			.setkey = aead_setkey,
1718 			.setauthsize = aead_setauthsize,
1719 			.encrypt = aead_encrypt,
1720 			.decrypt = aead_decrypt,
1721 			.givencrypt = aead_givencrypt,
1722 			.geniv = "<built-in>",
1723 			.ivsize = AES_BLOCK_SIZE,
1724 			.maxauthsize = SHA256_DIGEST_SIZE,
1725 			},
1726 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1727 		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1728 				   OP_ALG_AAI_HMAC_PRECOMP,
1729 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1730 	},
1731 	{
1732 		.name = "authenc(hmac(sha384),cbc(aes))",
1733 		.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1734 		.blocksize = AES_BLOCK_SIZE,
1735 		.type = CRYPTO_ALG_TYPE_AEAD,
1736 		.template_aead = {
1737 			.setkey = aead_setkey,
1738 			.setauthsize = aead_setauthsize,
1739 			.encrypt = aead_encrypt,
1740 			.decrypt = aead_decrypt,
1741 			.givencrypt = aead_givencrypt,
1742 			.geniv = "<built-in>",
1743 			.ivsize = AES_BLOCK_SIZE,
1744 			.maxauthsize = SHA384_DIGEST_SIZE,
1745 			},
1746 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1747 		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1748 				   OP_ALG_AAI_HMAC_PRECOMP,
1749 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1750 	},
1751 
1752 	{
1753 		.name = "authenc(hmac(sha512),cbc(aes))",
1754 		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1755 		.blocksize = AES_BLOCK_SIZE,
1756 		.type = CRYPTO_ALG_TYPE_AEAD,
1757 		.template_aead = {
1758 			.setkey = aead_setkey,
1759 			.setauthsize = aead_setauthsize,
1760 			.encrypt = aead_encrypt,
1761 			.decrypt = aead_decrypt,
1762 			.givencrypt = aead_givencrypt,
1763 			.geniv = "<built-in>",
1764 			.ivsize = AES_BLOCK_SIZE,
1765 			.maxauthsize = SHA512_DIGEST_SIZE,
1766 			},
1767 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1768 		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1769 				   OP_ALG_AAI_HMAC_PRECOMP,
1770 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1771 	},
1772 	{
1773 		.name = "authenc(hmac(md5),cbc(des3_ede))",
1774 		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1775 		.blocksize = DES3_EDE_BLOCK_SIZE,
1776 		.type = CRYPTO_ALG_TYPE_AEAD,
1777 		.template_aead = {
1778 			.setkey = aead_setkey,
1779 			.setauthsize = aead_setauthsize,
1780 			.encrypt = aead_encrypt,
1781 			.decrypt = aead_decrypt,
1782 			.givencrypt = aead_givencrypt,
1783 			.geniv = "<built-in>",
1784 			.ivsize = DES3_EDE_BLOCK_SIZE,
1785 			.maxauthsize = MD5_DIGEST_SIZE,
1786 			},
1787 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1788 		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1789 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1790 	},
1791 	{
1792 		.name = "authenc(hmac(sha1),cbc(des3_ede))",
1793 		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1794 		.blocksize = DES3_EDE_BLOCK_SIZE,
1795 		.type = CRYPTO_ALG_TYPE_AEAD,
1796 		.template_aead = {
1797 			.setkey = aead_setkey,
1798 			.setauthsize = aead_setauthsize,
1799 			.encrypt = aead_encrypt,
1800 			.decrypt = aead_decrypt,
1801 			.givencrypt = aead_givencrypt,
1802 			.geniv = "<built-in>",
1803 			.ivsize = DES3_EDE_BLOCK_SIZE,
1804 			.maxauthsize = SHA1_DIGEST_SIZE,
1805 			},
1806 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1807 		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1808 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1809 	},
1810 	{
1811 		.name = "authenc(hmac(sha224),cbc(des3_ede))",
1812 		.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1813 		.blocksize = DES3_EDE_BLOCK_SIZE,
1814 		.type = CRYPTO_ALG_TYPE_AEAD,
1815 		.template_aead = {
1816 			.setkey = aead_setkey,
1817 			.setauthsize = aead_setauthsize,
1818 			.encrypt = aead_encrypt,
1819 			.decrypt = aead_decrypt,
1820 			.givencrypt = aead_givencrypt,
1821 			.geniv = "<built-in>",
1822 			.ivsize = DES3_EDE_BLOCK_SIZE,
1823 			.maxauthsize = SHA224_DIGEST_SIZE,
1824 			},
1825 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1826 		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1827 				   OP_ALG_AAI_HMAC_PRECOMP,
1828 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1829 	},
1830 	{
1831 		.name = "authenc(hmac(sha256),cbc(des3_ede))",
1832 		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1833 		.blocksize = DES3_EDE_BLOCK_SIZE,
1834 		.type = CRYPTO_ALG_TYPE_AEAD,
1835 		.template_aead = {
1836 			.setkey = aead_setkey,
1837 			.setauthsize = aead_setauthsize,
1838 			.encrypt = aead_encrypt,
1839 			.decrypt = aead_decrypt,
1840 			.givencrypt = aead_givencrypt,
1841 			.geniv = "<built-in>",
1842 			.ivsize = DES3_EDE_BLOCK_SIZE,
1843 			.maxauthsize = SHA256_DIGEST_SIZE,
1844 			},
1845 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1846 		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1847 				   OP_ALG_AAI_HMAC_PRECOMP,
1848 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1849 	},
1850 	{
1851 		.name = "authenc(hmac(sha384),cbc(des3_ede))",
1852 		.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1853 		.blocksize = DES3_EDE_BLOCK_SIZE,
1854 		.type = CRYPTO_ALG_TYPE_AEAD,
1855 		.template_aead = {
1856 			.setkey = aead_setkey,
1857 			.setauthsize = aead_setauthsize,
1858 			.encrypt = aead_encrypt,
1859 			.decrypt = aead_decrypt,
1860 			.givencrypt = aead_givencrypt,
1861 			.geniv = "<built-in>",
1862 			.ivsize = DES3_EDE_BLOCK_SIZE,
1863 			.maxauthsize = SHA384_DIGEST_SIZE,
1864 			},
1865 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1866 		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1867 				   OP_ALG_AAI_HMAC_PRECOMP,
1868 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1869 	},
1870 	{
1871 		.name = "authenc(hmac(sha512),cbc(des3_ede))",
1872 		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1873 		.blocksize = DES3_EDE_BLOCK_SIZE,
1874 		.type = CRYPTO_ALG_TYPE_AEAD,
1875 		.template_aead = {
1876 			.setkey = aead_setkey,
1877 			.setauthsize = aead_setauthsize,
1878 			.encrypt = aead_encrypt,
1879 			.decrypt = aead_decrypt,
1880 			.givencrypt = aead_givencrypt,
1881 			.geniv = "<built-in>",
1882 			.ivsize = DES3_EDE_BLOCK_SIZE,
1883 			.maxauthsize = SHA512_DIGEST_SIZE,
1884 			},
1885 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1886 		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1887 				   OP_ALG_AAI_HMAC_PRECOMP,
1888 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1889 	},
1890 	{
1891 		.name = "authenc(hmac(md5),cbc(des))",
1892 		.driver_name = "authenc-hmac-md5-cbc-des-caam",
1893 		.blocksize = DES_BLOCK_SIZE,
1894 		.type = CRYPTO_ALG_TYPE_AEAD,
1895 		.template_aead = {
1896 			.setkey = aead_setkey,
1897 			.setauthsize = aead_setauthsize,
1898 			.encrypt = aead_encrypt,
1899 			.decrypt = aead_decrypt,
1900 			.givencrypt = aead_givencrypt,
1901 			.geniv = "<built-in>",
1902 			.ivsize = DES_BLOCK_SIZE,
1903 			.maxauthsize = MD5_DIGEST_SIZE,
1904 			},
1905 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1906 		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1907 		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1908 	},
1909 	{
1910 		.name = "authenc(hmac(sha1),cbc(des))",
1911 		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
1912 		.blocksize = DES_BLOCK_SIZE,
1913 		.type = CRYPTO_ALG_TYPE_AEAD,
1914 		.template_aead = {
1915 			.setkey = aead_setkey,
1916 			.setauthsize = aead_setauthsize,
1917 			.encrypt = aead_encrypt,
1918 			.decrypt = aead_decrypt,
1919 			.givencrypt = aead_givencrypt,
1920 			.geniv = "<built-in>",
1921 			.ivsize = DES_BLOCK_SIZE,
1922 			.maxauthsize = SHA1_DIGEST_SIZE,
1923 			},
1924 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1925 		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1926 		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1927 	},
1928 	{
1929 		.name = "authenc(hmac(sha224),cbc(des))",
1930 		.driver_name = "authenc-hmac-sha224-cbc-des-caam",
1931 		.blocksize = DES_BLOCK_SIZE,
1932 		.type = CRYPTO_ALG_TYPE_AEAD,
1933 		.template_aead = {
1934 			.setkey = aead_setkey,
1935 			.setauthsize = aead_setauthsize,
1936 			.encrypt = aead_encrypt,
1937 			.decrypt = aead_decrypt,
1938 			.givencrypt = aead_givencrypt,
1939 			.geniv = "<built-in>",
1940 			.ivsize = DES_BLOCK_SIZE,
1941 			.maxauthsize = SHA224_DIGEST_SIZE,
1942 			},
1943 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1944 		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1945 				   OP_ALG_AAI_HMAC_PRECOMP,
1946 		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1947 	},
1948 	{
1949 		.name = "authenc(hmac(sha256),cbc(des))",
1950 		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
1951 		.blocksize = DES_BLOCK_SIZE,
1952 		.type = CRYPTO_ALG_TYPE_AEAD,
1953 		.template_aead = {
1954 			.setkey = aead_setkey,
1955 			.setauthsize = aead_setauthsize,
1956 			.encrypt = aead_encrypt,
1957 			.decrypt = aead_decrypt,
1958 			.givencrypt = aead_givencrypt,
1959 			.geniv = "<built-in>",
1960 			.ivsize = DES_BLOCK_SIZE,
1961 			.maxauthsize = SHA256_DIGEST_SIZE,
1962 			},
1963 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1964 		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1965 				   OP_ALG_AAI_HMAC_PRECOMP,
1966 		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1967 	},
1968 	{
1969 		.name = "authenc(hmac(sha384),cbc(des))",
1970 		.driver_name = "authenc-hmac-sha384-cbc-des-caam",
1971 		.blocksize = DES_BLOCK_SIZE,
1972 		.type = CRYPTO_ALG_TYPE_AEAD,
1973 		.template_aead = {
1974 			.setkey = aead_setkey,
1975 			.setauthsize = aead_setauthsize,
1976 			.encrypt = aead_encrypt,
1977 			.decrypt = aead_decrypt,
1978 			.givencrypt = aead_givencrypt,
1979 			.geniv = "<built-in>",
1980 			.ivsize = DES_BLOCK_SIZE,
1981 			.maxauthsize = SHA384_DIGEST_SIZE,
1982 			},
1983 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1984 		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1985 				   OP_ALG_AAI_HMAC_PRECOMP,
1986 		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1987 	},
1988 	{
1989 		.name = "authenc(hmac(sha512),cbc(des))",
1990 		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
1991 		.blocksize = DES_BLOCK_SIZE,
1992 		.type = CRYPTO_ALG_TYPE_AEAD,
1993 		.template_aead = {
1994 			.setkey = aead_setkey,
1995 			.setauthsize = aead_setauthsize,
1996 			.encrypt = aead_encrypt,
1997 			.decrypt = aead_decrypt,
1998 			.givencrypt = aead_givencrypt,
1999 			.geniv = "<built-in>",
2000 			.ivsize = DES_BLOCK_SIZE,
2001 			.maxauthsize = SHA512_DIGEST_SIZE,
2002 			},
2003 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2004 		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2005 				   OP_ALG_AAI_HMAC_PRECOMP,
2006 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2007 	},
2008 	/* ablkcipher descriptor */
2009 	{
2010 		.name = "cbc(aes)",
2011 		.driver_name = "cbc-aes-caam",
2012 		.blocksize = AES_BLOCK_SIZE,
2013 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2014 		.template_ablkcipher = {
2015 			.setkey = ablkcipher_setkey,
2016 			.encrypt = ablkcipher_encrypt,
2017 			.decrypt = ablkcipher_decrypt,
2018 			.geniv = "eseqiv",
2019 			.min_keysize = AES_MIN_KEY_SIZE,
2020 			.max_keysize = AES_MAX_KEY_SIZE,
2021 			.ivsize = AES_BLOCK_SIZE,
2022 			},
2023 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2024 	},
2025 	{
2026 		.name = "cbc(des3_ede)",
2027 		.driver_name = "cbc-3des-caam",
2028 		.blocksize = DES3_EDE_BLOCK_SIZE,
2029 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2030 		.template_ablkcipher = {
2031 			.setkey = ablkcipher_setkey,
2032 			.encrypt = ablkcipher_encrypt,
2033 			.decrypt = ablkcipher_decrypt,
2034 			.geniv = "eseqiv",
2035 			.min_keysize = DES3_EDE_KEY_SIZE,
2036 			.max_keysize = DES3_EDE_KEY_SIZE,
2037 			.ivsize = DES3_EDE_BLOCK_SIZE,
2038 			},
2039 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2040 	},
2041 	{
2042 		.name = "cbc(des)",
2043 		.driver_name = "cbc-des-caam",
2044 		.blocksize = DES_BLOCK_SIZE,
2045 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2046 		.template_ablkcipher = {
2047 			.setkey = ablkcipher_setkey,
2048 			.encrypt = ablkcipher_encrypt,
2049 			.decrypt = ablkcipher_decrypt,
2050 			.geniv = "eseqiv",
2051 			.min_keysize = DES_KEY_SIZE,
2052 			.max_keysize = DES_KEY_SIZE,
2053 			.ivsize = DES_BLOCK_SIZE,
2054 			},
2055 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2056 	}
2057 };
2058 
2059 struct caam_crypto_alg {
2060 	struct list_head entry;
2061 	int class1_alg_type;
2062 	int class2_alg_type;
2063 	int alg_op;
2064 	struct crypto_alg crypto_alg;
2065 };
2066 
2067 static int caam_cra_init(struct crypto_tfm *tfm)
2068 {
2069 	struct crypto_alg *alg = tfm->__crt_alg;
2070 	struct caam_crypto_alg *caam_alg =
2071 		 container_of(alg, struct caam_crypto_alg, crypto_alg);
2072 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2073 
2074 	ctx->jrdev = caam_jr_alloc();
2075 	if (IS_ERR(ctx->jrdev)) {
2076 		pr_err("Job Ring Device allocation for transform failed\n");
2077 		return PTR_ERR(ctx->jrdev);
2078 	}
2079 
2080 	/* copy descriptor header template value */
2081 	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2082 	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2083 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2084 
2085 	return 0;
2086 }
2087 
2088 static void caam_cra_exit(struct crypto_tfm *tfm)
2089 {
2090 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2091 
2092 	if (ctx->sh_desc_enc_dma &&
2093 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2094 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2095 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2096 	if (ctx->sh_desc_dec_dma &&
2097 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2098 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2099 				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2100 	if (ctx->sh_desc_givenc_dma &&
2101 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2102 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2103 				 desc_bytes(ctx->sh_desc_givenc),
2104 				 DMA_TO_DEVICE);
2105 
2106 	caam_jr_free(ctx->jrdev);
2107 }
2108 
2109 static void __exit caam_algapi_exit(void)
2110 {
2111 
2112 	struct caam_crypto_alg *t_alg, *n;
2113 
2114 	if (!alg_list.next)
2115 		return;
2116 
2117 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2118 		crypto_unregister_alg(&t_alg->crypto_alg);
2119 		list_del(&t_alg->entry);
2120 		kfree(t_alg);
2121 	}
2122 }
2123 
2124 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2125 					      *template)
2126 {
2127 	struct caam_crypto_alg *t_alg;
2128 	struct crypto_alg *alg;
2129 
2130 	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2131 	if (!t_alg) {
2132 		pr_err("failed to allocate t_alg\n");
2133 		return ERR_PTR(-ENOMEM);
2134 	}
2135 
2136 	alg = &t_alg->crypto_alg;
2137 
2138 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2139 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2140 		 template->driver_name);
2141 	alg->cra_module = THIS_MODULE;
2142 	alg->cra_init = caam_cra_init;
2143 	alg->cra_exit = caam_cra_exit;
2144 	alg->cra_priority = CAAM_CRA_PRIORITY;
2145 	alg->cra_blocksize = template->blocksize;
2146 	alg->cra_alignmask = 0;
2147 	alg->cra_ctxsize = sizeof(struct caam_ctx);
2148 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2149 			 template->type;
2150 	switch (template->type) {
2151 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2152 		alg->cra_type = &crypto_ablkcipher_type;
2153 		alg->cra_ablkcipher = template->template_ablkcipher;
2154 		break;
2155 	case CRYPTO_ALG_TYPE_AEAD:
2156 		alg->cra_type = &crypto_aead_type;
2157 		alg->cra_aead = template->template_aead;
2158 		break;
2159 	}
2160 
2161 	t_alg->class1_alg_type = template->class1_alg_type;
2162 	t_alg->class2_alg_type = template->class2_alg_type;
2163 	t_alg->alg_op = template->alg_op;
2164 
2165 	return t_alg;
2166 }
2167 
2168 static int __init caam_algapi_init(void)
2169 {
2170 	int i = 0, err = 0;
2171 
2172 	INIT_LIST_HEAD(&alg_list);
2173 
2174 	/* register crypto algorithms the device supports */
2175 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2176 		/* TODO: check if h/w supports alg */
2177 		struct caam_crypto_alg *t_alg;
2178 
2179 		t_alg = caam_alg_alloc(&driver_algs[i]);
2180 		if (IS_ERR(t_alg)) {
2181 			err = PTR_ERR(t_alg);
2182 			pr_warn("%s alg allocation failed\n",
2183 				driver_algs[i].driver_name);
2184 			continue;
2185 		}
2186 
2187 		err = crypto_register_alg(&t_alg->crypto_alg);
2188 		if (err) {
2189 			pr_warn("%s alg registration failed\n",
2190 				t_alg->crypto_alg.cra_driver_name);
2191 			kfree(t_alg);
2192 		} else
2193 			list_add_tail(&t_alg->entry, &alg_list);
2194 	}
2195 	if (!list_empty(&alg_list))
2196 		pr_info("caam algorithms registered in /proc/crypto\n");
2197 
2198 	return err;
2199 }
2200 
2201 module_init(caam_algapi_init);
2202 module_exit(caam_algapi_exit);
2203 
2204 MODULE_LICENSE("GPL");
2205 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2206 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2207