xref: /linux/drivers/crypto/caam/caamalg.c (revision 98838d95075a5295f3478ceba18bcccf472e30f4)
1 /*
2  * caam - Freescale FSL CAAM support for crypto API
3  *
4  * Copyright 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Based on talitos crypto API driver.
7  *
8  * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9  *
10  * ---------------                     ---------------
11  * | JobDesc #1  |-------------------->|  ShareDesc  |
12  * | *(packet 1) |                     |   (PDB)     |
13  * ---------------      |------------->|  (hashKey)  |
14  *       .              |              | (cipherKey) |
15  *       .              |    |-------->| (operation) |
16  * ---------------      |    |         ---------------
17  * | JobDesc #2  |------|    |
18  * | *(packet 2) |           |
19  * ---------------           |
20  *       .                   |
21  *       .                   |
22  * ---------------           |
23  * | JobDesc #3  |------------
24  * | *(packet 3) |
25  * ---------------
26  *
27  * The SharedDesc never changes for a connection unless rekeyed, but
28  * each packet will likely be in a different place. So all we need
29  * to know to process the packet is where the input is, where the
30  * output goes, and what context we want to process with. Context is
31  * in the SharedDesc, packet references in the JobDesc.
32  *
33  * So, a job desc looks like:
34  *
35  * ---------------------
36  * | Header            |
37  * | ShareDesc Pointer |
38  * | SEQ_OUT_PTR       |
39  * | (output buffer)   |
40  * | (output length)   |
41  * | SEQ_IN_PTR        |
42  * | (input buffer)    |
43  * | (input length)    |
44  * ---------------------
45  */
46 
47 #include "compat.h"
48 
49 #include "regs.h"
50 #include "intern.h"
51 #include "desc_constr.h"
52 #include "jr.h"
53 #include "error.h"
54 #include "sg_sw_sec4.h"
55 #include "key_gen.h"
56 
57 /*
58  * crypto alg
59  */
60 #define CAAM_CRA_PRIORITY		3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
63 					 CTR_RFC3686_NONCE_SIZE + \
64 					 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH		16
67 
68 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
70 					 CAAM_CMD_SZ * 4)
71 #define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
72 					 CAAM_CMD_SZ * 5)
73 
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
79 
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
82 
83 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
86 
87 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
90 
91 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
94 
95 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
98 
99 #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
101 					 20 * CAAM_CMD_SZ)
102 #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
103 					 15 * CAAM_CMD_SZ)
104 
105 #define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
107 
108 #ifdef DEBUG
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
111 #else
112 #define debug(format, arg...)
113 #endif
114 
115 #ifdef DEBUG
116 #include <linux/highmem.h>
117 
118 static void dbg_dump_sg(const char *level, const char *prefix_str,
119 			int prefix_type, int rowsize, int groupsize,
120 			struct scatterlist *sg, size_t tlen, bool ascii,
121 			bool may_sleep)
122 {
123 	struct scatterlist *it;
124 	void *it_page;
125 	size_t len;
126 	void *buf;
127 
128 	for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
129 		/*
130 		 * make sure the scatterlist's page
131 		 * has a valid virtual memory mapping
132 		 */
133 		it_page = kmap_atomic(sg_page(it));
134 		if (unlikely(!it_page)) {
135 			printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
136 			return;
137 		}
138 
139 		buf = it_page + it->offset;
140 		len = min(tlen, it->length);
141 		print_hex_dump(level, prefix_str, prefix_type, rowsize,
142 			       groupsize, buf, len, ascii);
143 		tlen -= len;
144 
145 		kunmap_atomic(it_page);
146 	}
147 }
148 #endif
149 
150 static struct list_head alg_list;
151 
152 struct caam_alg_entry {
153 	int class1_alg_type;
154 	int class2_alg_type;
155 	int alg_op;
156 	bool rfc3686;
157 	bool geniv;
158 };
159 
160 struct caam_aead_alg {
161 	struct aead_alg aead;
162 	struct caam_alg_entry caam;
163 	bool registered;
164 };
165 
166 /* Set DK bit in class 1 operation if shared */
167 static inline void append_dec_op1(u32 *desc, u32 type)
168 {
169 	u32 *jump_cmd, *uncond_jump_cmd;
170 
171 	/* DK bit is valid only for AES */
172 	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
173 		append_operation(desc, type | OP_ALG_AS_INITFINAL |
174 				 OP_ALG_DECRYPT);
175 		return;
176 	}
177 
178 	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
179 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
180 			 OP_ALG_DECRYPT);
181 	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
182 	set_jump_tgt_here(desc, jump_cmd);
183 	append_operation(desc, type | OP_ALG_AS_INITFINAL |
184 			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
185 	set_jump_tgt_here(desc, uncond_jump_cmd);
186 }
187 
188 /*
189  * For aead functions, read payload and write payload,
190  * both of which are specified in req->src and req->dst
191  */
192 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
193 {
194 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
195 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
196 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
197 }
198 
199 /*
200  * For ablkcipher encrypt and decrypt, read from req->src and
201  * write to req->dst
202  */
203 static inline void ablkcipher_append_src_dst(u32 *desc)
204 {
205 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
206 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
207 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
208 			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
209 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
210 }
211 
212 /*
213  * per-session context
214  */
215 struct caam_ctx {
216 	struct device *jrdev;
217 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
218 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
219 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
220 	dma_addr_t sh_desc_enc_dma;
221 	dma_addr_t sh_desc_dec_dma;
222 	dma_addr_t sh_desc_givenc_dma;
223 	u32 class1_alg_type;
224 	u32 class2_alg_type;
225 	u32 alg_op;
226 	u8 key[CAAM_MAX_KEY_SIZE];
227 	dma_addr_t key_dma;
228 	unsigned int enckeylen;
229 	unsigned int split_key_len;
230 	unsigned int split_key_pad_len;
231 	unsigned int authsize;
232 };
233 
234 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
235 			    int keys_fit_inline, bool is_rfc3686)
236 {
237 	u32 *nonce;
238 	unsigned int enckeylen = ctx->enckeylen;
239 
240 	/*
241 	 * RFC3686 specific:
242 	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
243 	 *	| enckeylen = encryption key size + nonce size
244 	 */
245 	if (is_rfc3686)
246 		enckeylen -= CTR_RFC3686_NONCE_SIZE;
247 
248 	if (keys_fit_inline) {
249 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
250 				  ctx->split_key_len, CLASS_2 |
251 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
252 		append_key_as_imm(desc, (void *)ctx->key +
253 				  ctx->split_key_pad_len, enckeylen,
254 				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
255 	} else {
256 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
257 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
258 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
259 			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
260 	}
261 
262 	/* Load Counter into CONTEXT1 reg */
263 	if (is_rfc3686) {
264 		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
265 			       enckeylen);
266 		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
267 				   LDST_CLASS_IND_CCB |
268 				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
269 		append_move(desc,
270 			    MOVE_SRC_OUTFIFO |
271 			    MOVE_DEST_CLASS1CTX |
272 			    (16 << MOVE_OFFSET_SHIFT) |
273 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
274 	}
275 }
276 
277 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
278 				  int keys_fit_inline, bool is_rfc3686)
279 {
280 	u32 *key_jump_cmd;
281 
282 	/* Note: Context registers are saved. */
283 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
284 
285 	/* Skip if already shared */
286 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
287 				   JUMP_COND_SHRD);
288 
289 	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
290 
291 	set_jump_tgt_here(desc, key_jump_cmd);
292 }
293 
294 static int aead_null_set_sh_desc(struct crypto_aead *aead)
295 {
296 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
297 	struct device *jrdev = ctx->jrdev;
298 	bool keys_fit_inline = false;
299 	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
300 	u32 *desc;
301 
302 	/*
303 	 * Job Descriptor and Shared Descriptors
304 	 * must all fit into the 64-word Descriptor h/w Buffer
305 	 */
306 	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
307 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
308 		keys_fit_inline = true;
309 
310 	/* aead_encrypt shared descriptor */
311 	desc = ctx->sh_desc_enc;
312 
313 	init_sh_desc(desc, HDR_SHARE_SERIAL);
314 
315 	/* Skip if already shared */
316 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
317 				   JUMP_COND_SHRD);
318 	if (keys_fit_inline)
319 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
320 				  ctx->split_key_len, CLASS_2 |
321 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
322 	else
323 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
324 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
325 	set_jump_tgt_here(desc, key_jump_cmd);
326 
327 	/* assoclen + cryptlen = seqinlen */
328 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
329 
330 	/* Prepare to read and write cryptlen + assoclen bytes */
331 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
332 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
333 
334 	/*
335 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
336 	 * thus need to do some magic, i.e. self-patch the descriptor
337 	 * buffer.
338 	 */
339 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
340 				    MOVE_DEST_MATH3 |
341 				    (0x6 << MOVE_LEN_SHIFT));
342 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
343 				     MOVE_DEST_DESCBUF |
344 				     MOVE_WAITCOMP |
345 				     (0x8 << MOVE_LEN_SHIFT));
346 
347 	/* Class 2 operation */
348 	append_operation(desc, ctx->class2_alg_type |
349 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
350 
351 	/* Read and write cryptlen bytes */
352 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
353 
354 	set_move_tgt_here(desc, read_move_cmd);
355 	set_move_tgt_here(desc, write_move_cmd);
356 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
357 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
358 		    MOVE_AUX_LS);
359 
360 	/* Write ICV */
361 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
362 			 LDST_SRCDST_BYTE_CONTEXT);
363 
364 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
365 					      desc_bytes(desc),
366 					      DMA_TO_DEVICE);
367 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
368 		dev_err(jrdev, "unable to map shared descriptor\n");
369 		return -ENOMEM;
370 	}
371 #ifdef DEBUG
372 	print_hex_dump(KERN_ERR,
373 		       "aead null enc shdesc@"__stringify(__LINE__)": ",
374 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
375 		       desc_bytes(desc), 1);
376 #endif
377 
378 	/*
379 	 * Job Descriptor and Shared Descriptors
380 	 * must all fit into the 64-word Descriptor h/w Buffer
381 	 */
382 	keys_fit_inline = false;
383 	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
384 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
385 		keys_fit_inline = true;
386 
387 	desc = ctx->sh_desc_dec;
388 
389 	/* aead_decrypt shared descriptor */
390 	init_sh_desc(desc, HDR_SHARE_SERIAL);
391 
392 	/* Skip if already shared */
393 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
394 				   JUMP_COND_SHRD);
395 	if (keys_fit_inline)
396 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
397 				  ctx->split_key_len, CLASS_2 |
398 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
399 	else
400 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
401 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
402 	set_jump_tgt_here(desc, key_jump_cmd);
403 
404 	/* Class 2 operation */
405 	append_operation(desc, ctx->class2_alg_type |
406 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
407 
408 	/* assoclen + cryptlen = seqoutlen */
409 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
410 
411 	/* Prepare to read and write cryptlen + assoclen bytes */
412 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
413 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
414 
415 	/*
416 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
417 	 * thus need to do some magic, i.e. self-patch the descriptor
418 	 * buffer.
419 	 */
420 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
421 				    MOVE_DEST_MATH2 |
422 				    (0x6 << MOVE_LEN_SHIFT));
423 	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
424 				     MOVE_DEST_DESCBUF |
425 				     MOVE_WAITCOMP |
426 				     (0x8 << MOVE_LEN_SHIFT));
427 
428 	/* Read and write cryptlen bytes */
429 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
430 
431 	/*
432 	 * Insert a NOP here, since we need at least 4 instructions between
433 	 * code patching the descriptor buffer and the location being patched.
434 	 */
435 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
436 	set_jump_tgt_here(desc, jump_cmd);
437 
438 	set_move_tgt_here(desc, read_move_cmd);
439 	set_move_tgt_here(desc, write_move_cmd);
440 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
441 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
442 		    MOVE_AUX_LS);
443 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
444 
445 	/* Load ICV */
446 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
447 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
448 
449 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
450 					      desc_bytes(desc),
451 					      DMA_TO_DEVICE);
452 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
453 		dev_err(jrdev, "unable to map shared descriptor\n");
454 		return -ENOMEM;
455 	}
456 #ifdef DEBUG
457 	print_hex_dump(KERN_ERR,
458 		       "aead null dec shdesc@"__stringify(__LINE__)": ",
459 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
460 		       desc_bytes(desc), 1);
461 #endif
462 
463 	return 0;
464 }
465 
466 static int aead_set_sh_desc(struct crypto_aead *aead)
467 {
468 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
469 						 struct caam_aead_alg, aead);
470 	unsigned int ivsize = crypto_aead_ivsize(aead);
471 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
472 	struct device *jrdev = ctx->jrdev;
473 	bool keys_fit_inline;
474 	u32 geniv, moveiv;
475 	u32 ctx1_iv_off = 0;
476 	u32 *desc;
477 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
478 			       OP_ALG_AAI_CTR_MOD128);
479 	const bool is_rfc3686 = alg->caam.rfc3686;
480 
481 	if (!ctx->authsize)
482 		return 0;
483 
484 	/* NULL encryption / decryption */
485 	if (!ctx->enckeylen)
486 		return aead_null_set_sh_desc(aead);
487 
488 	/*
489 	 * AES-CTR needs to load IV in CONTEXT1 reg
490 	 * at an offset of 128bits (16bytes)
491 	 * CONTEXT1[255:128] = IV
492 	 */
493 	if (ctr_mode)
494 		ctx1_iv_off = 16;
495 
496 	/*
497 	 * RFC3686 specific:
498 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
499 	 */
500 	if (is_rfc3686)
501 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
502 
503 	if (alg->caam.geniv)
504 		goto skip_enc;
505 
506 	/*
507 	 * Job Descriptor and Shared Descriptors
508 	 * must all fit into the 64-word Descriptor h/w Buffer
509 	 */
510 	keys_fit_inline = false;
511 	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
512 	    ctx->split_key_pad_len + ctx->enckeylen +
513 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
514 	    CAAM_DESC_BYTES_MAX)
515 		keys_fit_inline = true;
516 
517 	/* aead_encrypt shared descriptor */
518 	desc = ctx->sh_desc_enc;
519 
520 	/* Note: Context registers are saved. */
521 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
522 
523 	/* Class 2 operation */
524 	append_operation(desc, ctx->class2_alg_type |
525 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
526 
527 	/* Read and write assoclen bytes */
528 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
529 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
530 
531 	/* Skip assoc data */
532 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
533 
534 	/* read assoc before reading payload */
535 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
536 				      FIFOLDST_VLF);
537 
538 	/* Load Counter into CONTEXT1 reg */
539 	if (is_rfc3686)
540 		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
541 				     LDST_SRCDST_BYTE_CONTEXT |
542 				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
543 				      LDST_OFFSET_SHIFT));
544 
545 	/* Class 1 operation */
546 	append_operation(desc, ctx->class1_alg_type |
547 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
548 
549 	/* Read and write cryptlen bytes */
550 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
551 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
552 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
553 
554 	/* Write ICV */
555 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
556 			 LDST_SRCDST_BYTE_CONTEXT);
557 
558 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
559 					      desc_bytes(desc),
560 					      DMA_TO_DEVICE);
561 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
562 		dev_err(jrdev, "unable to map shared descriptor\n");
563 		return -ENOMEM;
564 	}
565 #ifdef DEBUG
566 	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
567 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
568 		       desc_bytes(desc), 1);
569 #endif
570 
571 skip_enc:
572 	/*
573 	 * Job Descriptor and Shared Descriptors
574 	 * must all fit into the 64-word Descriptor h/w Buffer
575 	 */
576 	keys_fit_inline = false;
577 	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
578 	    ctx->split_key_pad_len + ctx->enckeylen +
579 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
580 	    CAAM_DESC_BYTES_MAX)
581 		keys_fit_inline = true;
582 
583 	/* aead_decrypt shared descriptor */
584 	desc = ctx->sh_desc_dec;
585 
586 	/* Note: Context registers are saved. */
587 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
588 
589 	/* Class 2 operation */
590 	append_operation(desc, ctx->class2_alg_type |
591 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
592 
593 	/* Read and write assoclen bytes */
594 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
595 	if (alg->caam.geniv)
596 		append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
597 	else
598 		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
599 
600 	/* Skip assoc data */
601 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
602 
603 	/* read assoc before reading payload */
604 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
605 			     KEY_VLF);
606 
607 	if (alg->caam.geniv) {
608 		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
609 				LDST_SRCDST_BYTE_CONTEXT |
610 				(ctx1_iv_off << LDST_OFFSET_SHIFT));
611 		append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
612 			    (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
613 	}
614 
615 	/* Load Counter into CONTEXT1 reg */
616 	if (is_rfc3686)
617 		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
618 				     LDST_SRCDST_BYTE_CONTEXT |
619 				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
620 				      LDST_OFFSET_SHIFT));
621 
622 	/* Choose operation */
623 	if (ctr_mode)
624 		append_operation(desc, ctx->class1_alg_type |
625 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
626 	else
627 		append_dec_op1(desc, ctx->class1_alg_type);
628 
629 	/* Read and write cryptlen bytes */
630 	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
631 	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
632 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
633 
634 	/* Load ICV */
635 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
636 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
637 
638 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
639 					      desc_bytes(desc),
640 					      DMA_TO_DEVICE);
641 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
642 		dev_err(jrdev, "unable to map shared descriptor\n");
643 		return -ENOMEM;
644 	}
645 #ifdef DEBUG
646 	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
647 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
648 		       desc_bytes(desc), 1);
649 #endif
650 
651 	if (!alg->caam.geniv)
652 		goto skip_givenc;
653 
654 	/*
655 	 * Job Descriptor and Shared Descriptors
656 	 * must all fit into the 64-word Descriptor h/w Buffer
657 	 */
658 	keys_fit_inline = false;
659 	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
660 	    ctx->split_key_pad_len + ctx->enckeylen +
661 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
662 	    CAAM_DESC_BYTES_MAX)
663 		keys_fit_inline = true;
664 
665 	/* aead_givencrypt shared descriptor */
666 	desc = ctx->sh_desc_enc;
667 
668 	/* Note: Context registers are saved. */
669 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
670 
671 	if (is_rfc3686)
672 		goto copy_iv;
673 
674 	/* Generate IV */
675 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
676 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
677 		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
678 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
679 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
680 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
681 	append_move(desc, MOVE_WAITCOMP |
682 		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
683 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
684 		    (ivsize << MOVE_LEN_SHIFT));
685 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
686 
687 copy_iv:
688 	/* Copy IV to class 1 context */
689 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
690 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
691 		    (ivsize << MOVE_LEN_SHIFT));
692 
693 	/* Return to encryption */
694 	append_operation(desc, ctx->class2_alg_type |
695 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696 
697 	/* Read and write assoclen bytes */
698 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
699 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
700 
701 	/* ivsize + cryptlen = seqoutlen - authsize */
702 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
703 
704 	/* Skip assoc data */
705 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
706 
707 	/* read assoc before reading payload */
708 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
709 			     KEY_VLF);
710 
711 	/* Copy iv from outfifo to class 2 fifo */
712 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
713 		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
714 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
715 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
716 	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
717 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
718 
719 	/* Load Counter into CONTEXT1 reg */
720 	if (is_rfc3686)
721 		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
722 				     LDST_SRCDST_BYTE_CONTEXT |
723 				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
724 				      LDST_OFFSET_SHIFT));
725 
726 	/* Class 1 operation */
727 	append_operation(desc, ctx->class1_alg_type |
728 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
729 
730 	/* Will write ivsize + cryptlen */
731 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
732 
733 	/* Not need to reload iv */
734 	append_seq_fifo_load(desc, ivsize,
735 			     FIFOLD_CLASS_SKIP);
736 
737 	/* Will read cryptlen */
738 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
739 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
740 
741 	/* Write ICV */
742 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
743 			 LDST_SRCDST_BYTE_CONTEXT);
744 
745 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
746 					      desc_bytes(desc),
747 					      DMA_TO_DEVICE);
748 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
749 		dev_err(jrdev, "unable to map shared descriptor\n");
750 		return -ENOMEM;
751 	}
752 #ifdef DEBUG
753 	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
754 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
755 		       desc_bytes(desc), 1);
756 #endif
757 
758 skip_givenc:
759 	return 0;
760 }
761 
762 static int aead_setauthsize(struct crypto_aead *authenc,
763 				    unsigned int authsize)
764 {
765 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
766 
767 	ctx->authsize = authsize;
768 	aead_set_sh_desc(authenc);
769 
770 	return 0;
771 }
772 
773 static int gcm_set_sh_desc(struct crypto_aead *aead)
774 {
775 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
776 	struct device *jrdev = ctx->jrdev;
777 	bool keys_fit_inline = false;
778 	u32 *key_jump_cmd, *zero_payload_jump_cmd,
779 	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
780 	u32 *desc;
781 
782 	if (!ctx->enckeylen || !ctx->authsize)
783 		return 0;
784 
785 	/*
786 	 * AES GCM encrypt shared descriptor
787 	 * Job Descriptor and Shared Descriptor
788 	 * must fit into the 64-word Descriptor h/w Buffer
789 	 */
790 	if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
791 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
792 		keys_fit_inline = true;
793 
794 	desc = ctx->sh_desc_enc;
795 
796 	init_sh_desc(desc, HDR_SHARE_SERIAL);
797 
798 	/* skip key loading if they are loaded due to sharing */
799 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
800 				   JUMP_COND_SHRD | JUMP_COND_SELF);
801 	if (keys_fit_inline)
802 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
803 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
804 	else
805 		append_key(desc, ctx->key_dma, ctx->enckeylen,
806 			   CLASS_1 | KEY_DEST_CLASS_REG);
807 	set_jump_tgt_here(desc, key_jump_cmd);
808 
809 	/* class 1 operation */
810 	append_operation(desc, ctx->class1_alg_type |
811 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
812 
813 	/* if assoclen + cryptlen is ZERO, skip to ICV write */
814 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
815 	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
816 						 JUMP_COND_MATH_Z);
817 
818 	/* if assoclen is ZERO, skip reading the assoc data */
819 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
820 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
821 						 JUMP_COND_MATH_Z);
822 
823 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
824 
825 	/* skip assoc data */
826 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
827 
828 	/* cryptlen = seqinlen - assoclen */
829 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
830 
831 	/* if cryptlen is ZERO jump to zero-payload commands */
832 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
833 					    JUMP_COND_MATH_Z);
834 
835 	/* read assoc data */
836 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
837 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
838 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
839 
840 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
841 
842 	/* write encrypted data */
843 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
844 
845 	/* read payload data */
846 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
847 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
848 
849 	/* jump the zero-payload commands */
850 	append_jump(desc, JUMP_TEST_ALL | 2);
851 
852 	/* zero-payload commands */
853 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
854 
855 	/* read assoc data */
856 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
857 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
858 
859 	/* There is no input data */
860 	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
861 
862 	/* write ICV */
863 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
864 			 LDST_SRCDST_BYTE_CONTEXT);
865 
866 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
867 					      desc_bytes(desc),
868 					      DMA_TO_DEVICE);
869 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
870 		dev_err(jrdev, "unable to map shared descriptor\n");
871 		return -ENOMEM;
872 	}
873 #ifdef DEBUG
874 	print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
875 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
876 		       desc_bytes(desc), 1);
877 #endif
878 
879 	/*
880 	 * Job Descriptor and Shared Descriptors
881 	 * must all fit into the 64-word Descriptor h/w Buffer
882 	 */
883 	keys_fit_inline = false;
884 	if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
885 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
886 		keys_fit_inline = true;
887 
888 	desc = ctx->sh_desc_dec;
889 
890 	init_sh_desc(desc, HDR_SHARE_SERIAL);
891 
892 	/* skip key loading if they are loaded due to sharing */
893 	key_jump_cmd = append_jump(desc, JUMP_JSL |
894 				   JUMP_TEST_ALL | JUMP_COND_SHRD |
895 				   JUMP_COND_SELF);
896 	if (keys_fit_inline)
897 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
898 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
899 	else
900 		append_key(desc, ctx->key_dma, ctx->enckeylen,
901 			   CLASS_1 | KEY_DEST_CLASS_REG);
902 	set_jump_tgt_here(desc, key_jump_cmd);
903 
904 	/* class 1 operation */
905 	append_operation(desc, ctx->class1_alg_type |
906 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
907 
908 	/* if assoclen is ZERO, skip reading the assoc data */
909 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
910 	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
911 						 JUMP_COND_MATH_Z);
912 
913 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
914 
915 	/* skip assoc data */
916 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
917 
918 	/* read assoc data */
919 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
920 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
921 
922 	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
923 
924 	/* cryptlen = seqoutlen - assoclen */
925 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
926 
927 	/* jump to zero-payload command if cryptlen is zero */
928 	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
929 					    JUMP_COND_MATH_Z);
930 
931 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
932 
933 	/* store encrypted data */
934 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
935 
936 	/* read payload data */
937 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
938 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
939 
940 	/* zero-payload command */
941 	set_jump_tgt_here(desc, zero_payload_jump_cmd);
942 
943 	/* read ICV */
944 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
945 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
946 
947 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
948 					      desc_bytes(desc),
949 					      DMA_TO_DEVICE);
950 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
951 		dev_err(jrdev, "unable to map shared descriptor\n");
952 		return -ENOMEM;
953 	}
954 #ifdef DEBUG
955 	print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
956 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
957 		       desc_bytes(desc), 1);
958 #endif
959 
960 	return 0;
961 }
962 
963 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
964 {
965 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
966 
967 	ctx->authsize = authsize;
968 	gcm_set_sh_desc(authenc);
969 
970 	return 0;
971 }
972 
973 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
974 {
975 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
976 	struct device *jrdev = ctx->jrdev;
977 	bool keys_fit_inline = false;
978 	u32 *key_jump_cmd;
979 	u32 *desc;
980 
981 	if (!ctx->enckeylen || !ctx->authsize)
982 		return 0;
983 
984 	/*
985 	 * RFC4106 encrypt shared descriptor
986 	 * Job Descriptor and Shared Descriptor
987 	 * must fit into the 64-word Descriptor h/w Buffer
988 	 */
989 	if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
990 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
991 		keys_fit_inline = true;
992 
993 	desc = ctx->sh_desc_enc;
994 
995 	init_sh_desc(desc, HDR_SHARE_SERIAL);
996 
997 	/* Skip key loading if it is loaded due to sharing */
998 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
999 				   JUMP_COND_SHRD);
1000 	if (keys_fit_inline)
1001 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1002 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1003 	else
1004 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1005 			   CLASS_1 | KEY_DEST_CLASS_REG);
1006 	set_jump_tgt_here(desc, key_jump_cmd);
1007 
1008 	/* Class 1 operation */
1009 	append_operation(desc, ctx->class1_alg_type |
1010 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1011 
1012 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1013 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1014 
1015 	/* Read assoc data */
1016 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1017 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1018 
1019 	/* Skip IV */
1020 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1021 
1022 	/* Will read cryptlen bytes */
1023 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1024 
1025 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1026 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1027 
1028 	/* Skip assoc data */
1029 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1030 
1031 	/* cryptlen = seqoutlen - assoclen */
1032 	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
1033 
1034 	/* Write encrypted data */
1035 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1036 
1037 	/* Read payload data */
1038 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1039 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
1040 
1041 	/* Write ICV */
1042 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1043 			 LDST_SRCDST_BYTE_CONTEXT);
1044 
1045 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1046 					      desc_bytes(desc),
1047 					      DMA_TO_DEVICE);
1048 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1049 		dev_err(jrdev, "unable to map shared descriptor\n");
1050 		return -ENOMEM;
1051 	}
1052 #ifdef DEBUG
1053 	print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
1054 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1055 		       desc_bytes(desc), 1);
1056 #endif
1057 
1058 	/*
1059 	 * Job Descriptor and Shared Descriptors
1060 	 * must all fit into the 64-word Descriptor h/w Buffer
1061 	 */
1062 	keys_fit_inline = false;
1063 	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
1064 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1065 		keys_fit_inline = true;
1066 
1067 	desc = ctx->sh_desc_dec;
1068 
1069 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1070 
1071 	/* Skip key loading if it is loaded due to sharing */
1072 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1073 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1074 	if (keys_fit_inline)
1075 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1076 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1077 	else
1078 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1079 			   CLASS_1 | KEY_DEST_CLASS_REG);
1080 	set_jump_tgt_here(desc, key_jump_cmd);
1081 
1082 	/* Class 1 operation */
1083 	append_operation(desc, ctx->class1_alg_type |
1084 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1085 
1086 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
1087 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
1088 
1089 	/* Read assoc data */
1090 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1091 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
1092 
1093 	/* Skip IV */
1094 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1095 
1096 	/* Will read cryptlen bytes */
1097 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
1098 
1099 	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1100 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
1101 
1102 	/* Skip assoc data */
1103 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
1104 
1105 	/* Will write cryptlen bytes */
1106 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1107 
1108 	/* Store payload data */
1109 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1110 
1111 	/* Read encrypted data */
1112 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
1113 			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
1114 
1115 	/* Read ICV */
1116 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1117 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1118 
1119 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1120 					      desc_bytes(desc),
1121 					      DMA_TO_DEVICE);
1122 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1123 		dev_err(jrdev, "unable to map shared descriptor\n");
1124 		return -ENOMEM;
1125 	}
1126 #ifdef DEBUG
1127 	print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
1128 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1129 		       desc_bytes(desc), 1);
1130 #endif
1131 
1132 	return 0;
1133 }
1134 
1135 static int rfc4106_setauthsize(struct crypto_aead *authenc,
1136 			       unsigned int authsize)
1137 {
1138 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1139 
1140 	ctx->authsize = authsize;
1141 	rfc4106_set_sh_desc(authenc);
1142 
1143 	return 0;
1144 }
1145 
1146 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
1147 {
1148 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1149 	struct device *jrdev = ctx->jrdev;
1150 	bool keys_fit_inline = false;
1151 	u32 *key_jump_cmd;
1152 	u32 *read_move_cmd, *write_move_cmd;
1153 	u32 *desc;
1154 
1155 	if (!ctx->enckeylen || !ctx->authsize)
1156 		return 0;
1157 
1158 	/*
1159 	 * RFC4543 encrypt shared descriptor
1160 	 * Job Descriptor and Shared Descriptor
1161 	 * must fit into the 64-word Descriptor h/w Buffer
1162 	 */
1163 	if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
1164 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1165 		keys_fit_inline = true;
1166 
1167 	desc = ctx->sh_desc_enc;
1168 
1169 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1170 
1171 	/* Skip key loading if it is loaded due to sharing */
1172 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1173 				   JUMP_COND_SHRD);
1174 	if (keys_fit_inline)
1175 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1176 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1177 	else
1178 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1179 			   CLASS_1 | KEY_DEST_CLASS_REG);
1180 	set_jump_tgt_here(desc, key_jump_cmd);
1181 
1182 	/* Class 1 operation */
1183 	append_operation(desc, ctx->class1_alg_type |
1184 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1185 
1186 	/* assoclen + cryptlen = seqinlen */
1187 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
1188 
1189 	/*
1190 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1191 	 * thus need to do some magic, i.e. self-patch the descriptor
1192 	 * buffer.
1193 	 */
1194 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1195 				    (0x6 << MOVE_LEN_SHIFT));
1196 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1197 				     (0x8 << MOVE_LEN_SHIFT));
1198 
1199 	/* Will read assoclen + cryptlen bytes */
1200 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1201 
1202 	/* Will write assoclen + cryptlen bytes */
1203 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1204 
1205 	/* Read and write assoclen + cryptlen bytes */
1206 	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
1207 
1208 	set_move_tgt_here(desc, read_move_cmd);
1209 	set_move_tgt_here(desc, write_move_cmd);
1210 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1211 	/* Move payload data to OFIFO */
1212 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1213 
1214 	/* Write ICV */
1215 	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
1216 			 LDST_SRCDST_BYTE_CONTEXT);
1217 
1218 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1219 					      desc_bytes(desc),
1220 					      DMA_TO_DEVICE);
1221 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1222 		dev_err(jrdev, "unable to map shared descriptor\n");
1223 		return -ENOMEM;
1224 	}
1225 #ifdef DEBUG
1226 	print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
1227 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1228 		       desc_bytes(desc), 1);
1229 #endif
1230 
1231 	/*
1232 	 * Job Descriptor and Shared Descriptors
1233 	 * must all fit into the 64-word Descriptor h/w Buffer
1234 	 */
1235 	keys_fit_inline = false;
1236 	if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
1237 	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
1238 		keys_fit_inline = true;
1239 
1240 	desc = ctx->sh_desc_dec;
1241 
1242 	init_sh_desc(desc, HDR_SHARE_SERIAL);
1243 
1244 	/* Skip key loading if it is loaded due to sharing */
1245 	key_jump_cmd = append_jump(desc, JUMP_JSL |
1246 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
1247 	if (keys_fit_inline)
1248 		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1249 				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1250 	else
1251 		append_key(desc, ctx->key_dma, ctx->enckeylen,
1252 			   CLASS_1 | KEY_DEST_CLASS_REG);
1253 	set_jump_tgt_here(desc, key_jump_cmd);
1254 
1255 	/* Class 1 operation */
1256 	append_operation(desc, ctx->class1_alg_type |
1257 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1258 
1259 	/* assoclen + cryptlen = seqoutlen */
1260 	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1261 
1262 	/*
1263 	 * MOVE_LEN opcode is not available in all SEC HW revisions,
1264 	 * thus need to do some magic, i.e. self-patch the descriptor
1265 	 * buffer.
1266 	 */
1267 	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
1268 				    (0x6 << MOVE_LEN_SHIFT));
1269 	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
1270 				     (0x8 << MOVE_LEN_SHIFT));
1271 
1272 	/* Will read assoclen + cryptlen bytes */
1273 	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1274 
1275 	/* Will write assoclen + cryptlen bytes */
1276 	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1277 
1278 	/* Store payload data */
1279 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
1280 
1281 	/* In-snoop assoclen + cryptlen data */
1282 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
1283 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
1284 
1285 	set_move_tgt_here(desc, read_move_cmd);
1286 	set_move_tgt_here(desc, write_move_cmd);
1287 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1288 	/* Move payload data to OFIFO */
1289 	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
1290 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1291 
1292 	/* Read ICV */
1293 	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
1294 			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
1295 
1296 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1297 					      desc_bytes(desc),
1298 					      DMA_TO_DEVICE);
1299 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1300 		dev_err(jrdev, "unable to map shared descriptor\n");
1301 		return -ENOMEM;
1302 	}
1303 #ifdef DEBUG
1304 	print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
1305 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1306 		       desc_bytes(desc), 1);
1307 #endif
1308 
1309 	return 0;
1310 }
1311 
1312 static int rfc4543_setauthsize(struct crypto_aead *authenc,
1313 			       unsigned int authsize)
1314 {
1315 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
1316 
1317 	ctx->authsize = authsize;
1318 	rfc4543_set_sh_desc(authenc);
1319 
1320 	return 0;
1321 }
1322 
1323 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
1324 			      u32 authkeylen)
1325 {
1326 	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
1327 			       ctx->split_key_pad_len, key_in, authkeylen,
1328 			       ctx->alg_op);
1329 }
1330 
1331 static int aead_setkey(struct crypto_aead *aead,
1332 			       const u8 *key, unsigned int keylen)
1333 {
1334 	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1335 	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
1336 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1337 	struct device *jrdev = ctx->jrdev;
1338 	struct crypto_authenc_keys keys;
1339 	int ret = 0;
1340 
1341 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1342 		goto badkey;
1343 
1344 	/* Pick class 2 key length from algorithm submask */
1345 	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1346 				      OP_ALG_ALGSEL_SHIFT] * 2;
1347 	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
1348 
1349 	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1350 		goto badkey;
1351 
1352 #ifdef DEBUG
1353 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
1354 	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
1355 	       keys.authkeylen);
1356 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
1357 	       ctx->split_key_len, ctx->split_key_pad_len);
1358 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1359 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1360 #endif
1361 
1362 	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
1363 	if (ret) {
1364 		goto badkey;
1365 	}
1366 
1367 	/* postpend encryption key to auth split key */
1368 	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
1369 
1370 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
1371 				      keys.enckeylen, DMA_TO_DEVICE);
1372 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1373 		dev_err(jrdev, "unable to map key i/o memory\n");
1374 		return -ENOMEM;
1375 	}
1376 #ifdef DEBUG
1377 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1378 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1379 		       ctx->split_key_pad_len + keys.enckeylen, 1);
1380 #endif
1381 
1382 	ctx->enckeylen = keys.enckeylen;
1383 
1384 	ret = aead_set_sh_desc(aead);
1385 	if (ret) {
1386 		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
1387 				 keys.enckeylen, DMA_TO_DEVICE);
1388 	}
1389 
1390 	return ret;
1391 badkey:
1392 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1393 	return -EINVAL;
1394 }
1395 
1396 static int gcm_setkey(struct crypto_aead *aead,
1397 		      const u8 *key, unsigned int keylen)
1398 {
1399 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1400 	struct device *jrdev = ctx->jrdev;
1401 	int ret = 0;
1402 
1403 #ifdef DEBUG
1404 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1405 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1406 #endif
1407 
1408 	memcpy(ctx->key, key, keylen);
1409 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1410 				      DMA_TO_DEVICE);
1411 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1412 		dev_err(jrdev, "unable to map key i/o memory\n");
1413 		return -ENOMEM;
1414 	}
1415 	ctx->enckeylen = keylen;
1416 
1417 	ret = gcm_set_sh_desc(aead);
1418 	if (ret) {
1419 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1420 				 DMA_TO_DEVICE);
1421 	}
1422 
1423 	return ret;
1424 }
1425 
1426 static int rfc4106_setkey(struct crypto_aead *aead,
1427 			  const u8 *key, unsigned int keylen)
1428 {
1429 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1430 	struct device *jrdev = ctx->jrdev;
1431 	int ret = 0;
1432 
1433 	if (keylen < 4)
1434 		return -EINVAL;
1435 
1436 #ifdef DEBUG
1437 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1438 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1439 #endif
1440 
1441 	memcpy(ctx->key, key, keylen);
1442 
1443 	/*
1444 	 * The last four bytes of the key material are used as the salt value
1445 	 * in the nonce. Update the AES key length.
1446 	 */
1447 	ctx->enckeylen = keylen - 4;
1448 
1449 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1450 				      DMA_TO_DEVICE);
1451 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1452 		dev_err(jrdev, "unable to map key i/o memory\n");
1453 		return -ENOMEM;
1454 	}
1455 
1456 	ret = rfc4106_set_sh_desc(aead);
1457 	if (ret) {
1458 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1459 				 DMA_TO_DEVICE);
1460 	}
1461 
1462 	return ret;
1463 }
1464 
1465 static int rfc4543_setkey(struct crypto_aead *aead,
1466 			  const u8 *key, unsigned int keylen)
1467 {
1468 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1469 	struct device *jrdev = ctx->jrdev;
1470 	int ret = 0;
1471 
1472 	if (keylen < 4)
1473 		return -EINVAL;
1474 
1475 #ifdef DEBUG
1476 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1477 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1478 #endif
1479 
1480 	memcpy(ctx->key, key, keylen);
1481 
1482 	/*
1483 	 * The last four bytes of the key material are used as the salt value
1484 	 * in the nonce. Update the AES key length.
1485 	 */
1486 	ctx->enckeylen = keylen - 4;
1487 
1488 	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
1489 				      DMA_TO_DEVICE);
1490 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1491 		dev_err(jrdev, "unable to map key i/o memory\n");
1492 		return -ENOMEM;
1493 	}
1494 
1495 	ret = rfc4543_set_sh_desc(aead);
1496 	if (ret) {
1497 		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
1498 				 DMA_TO_DEVICE);
1499 	}
1500 
1501 	return ret;
1502 }
1503 
1504 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1505 			     const u8 *key, unsigned int keylen)
1506 {
1507 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1508 	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
1509 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
1510 	const char *alg_name = crypto_tfm_alg_name(tfm);
1511 	struct device *jrdev = ctx->jrdev;
1512 	int ret = 0;
1513 	u32 *key_jump_cmd;
1514 	u32 *desc;
1515 	u8 *nonce;
1516 	u32 geniv;
1517 	u32 ctx1_iv_off = 0;
1518 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
1519 			       OP_ALG_AAI_CTR_MOD128);
1520 	const bool is_rfc3686 = (ctr_mode &&
1521 				 (strstr(alg_name, "rfc3686") != NULL));
1522 
1523 #ifdef DEBUG
1524 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
1525 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1526 #endif
1527 	/*
1528 	 * AES-CTR needs to load IV in CONTEXT1 reg
1529 	 * at an offset of 128bits (16bytes)
1530 	 * CONTEXT1[255:128] = IV
1531 	 */
1532 	if (ctr_mode)
1533 		ctx1_iv_off = 16;
1534 
1535 	/*
1536 	 * RFC3686 specific:
1537 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1538 	 *	| *key = {KEY, NONCE}
1539 	 */
1540 	if (is_rfc3686) {
1541 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1542 		keylen -= CTR_RFC3686_NONCE_SIZE;
1543 	}
1544 
1545 	memcpy(ctx->key, key, keylen);
1546 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
1547 				      DMA_TO_DEVICE);
1548 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1549 		dev_err(jrdev, "unable to map key i/o memory\n");
1550 		return -ENOMEM;
1551 	}
1552 	ctx->enckeylen = keylen;
1553 
1554 	/* ablkcipher_encrypt shared descriptor */
1555 	desc = ctx->sh_desc_enc;
1556 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1557 	/* Skip if already shared */
1558 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1559 				   JUMP_COND_SHRD);
1560 
1561 	/* Load class1 key only */
1562 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1563 			  ctx->enckeylen, CLASS_1 |
1564 			  KEY_DEST_CLASS_REG);
1565 
1566 	/* Load nonce into CONTEXT1 reg */
1567 	if (is_rfc3686) {
1568 		nonce = (u8 *)key + keylen;
1569 		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1570 				   LDST_CLASS_IND_CCB |
1571 				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1572 		append_move(desc, MOVE_WAITCOMP |
1573 			    MOVE_SRC_OUTFIFO |
1574 			    MOVE_DEST_CLASS1CTX |
1575 			    (16 << MOVE_OFFSET_SHIFT) |
1576 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1577 	}
1578 
1579 	set_jump_tgt_here(desc, key_jump_cmd);
1580 
1581 	/* Load iv */
1582 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1583 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1584 
1585 	/* Load counter into CONTEXT1 reg */
1586 	if (is_rfc3686)
1587 		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1588 				     LDST_SRCDST_BYTE_CONTEXT |
1589 				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1590 				      LDST_OFFSET_SHIFT));
1591 
1592 	/* Load operation */
1593 	append_operation(desc, ctx->class1_alg_type |
1594 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1595 
1596 	/* Perform operation */
1597 	ablkcipher_append_src_dst(desc);
1598 
1599 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
1600 					      desc_bytes(desc),
1601 					      DMA_TO_DEVICE);
1602 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1603 		dev_err(jrdev, "unable to map shared descriptor\n");
1604 		return -ENOMEM;
1605 	}
1606 #ifdef DEBUG
1607 	print_hex_dump(KERN_ERR,
1608 		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
1609 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1610 		       desc_bytes(desc), 1);
1611 #endif
1612 	/* ablkcipher_decrypt shared descriptor */
1613 	desc = ctx->sh_desc_dec;
1614 
1615 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1616 	/* Skip if already shared */
1617 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1618 				   JUMP_COND_SHRD);
1619 
1620 	/* Load class1 key only */
1621 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1622 			  ctx->enckeylen, CLASS_1 |
1623 			  KEY_DEST_CLASS_REG);
1624 
1625 	/* Load nonce into CONTEXT1 reg */
1626 	if (is_rfc3686) {
1627 		nonce = (u8 *)key + keylen;
1628 		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1629 				   LDST_CLASS_IND_CCB |
1630 				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1631 		append_move(desc, MOVE_WAITCOMP |
1632 			    MOVE_SRC_OUTFIFO |
1633 			    MOVE_DEST_CLASS1CTX |
1634 			    (16 << MOVE_OFFSET_SHIFT) |
1635 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1636 	}
1637 
1638 	set_jump_tgt_here(desc, key_jump_cmd);
1639 
1640 	/* load IV */
1641 	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
1642 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1643 
1644 	/* Load counter into CONTEXT1 reg */
1645 	if (is_rfc3686)
1646 		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1647 				     LDST_SRCDST_BYTE_CONTEXT |
1648 				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1649 				      LDST_OFFSET_SHIFT));
1650 
1651 	/* Choose operation */
1652 	if (ctr_mode)
1653 		append_operation(desc, ctx->class1_alg_type |
1654 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
1655 	else
1656 		append_dec_op1(desc, ctx->class1_alg_type);
1657 
1658 	/* Perform operation */
1659 	ablkcipher_append_src_dst(desc);
1660 
1661 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
1662 					      desc_bytes(desc),
1663 					      DMA_TO_DEVICE);
1664 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1665 		dev_err(jrdev, "unable to map shared descriptor\n");
1666 		return -ENOMEM;
1667 	}
1668 
1669 #ifdef DEBUG
1670 	print_hex_dump(KERN_ERR,
1671 		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
1672 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1673 		       desc_bytes(desc), 1);
1674 #endif
1675 	/* ablkcipher_givencrypt shared descriptor */
1676 	desc = ctx->sh_desc_givenc;
1677 
1678 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1679 	/* Skip if already shared */
1680 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1681 				   JUMP_COND_SHRD);
1682 
1683 	/* Load class1 key only */
1684 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1685 			  ctx->enckeylen, CLASS_1 |
1686 			  KEY_DEST_CLASS_REG);
1687 
1688 	/* Load Nonce into CONTEXT1 reg */
1689 	if (is_rfc3686) {
1690 		nonce = (u8 *)key + keylen;
1691 		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1692 				   LDST_CLASS_IND_CCB |
1693 				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1694 		append_move(desc, MOVE_WAITCOMP |
1695 			    MOVE_SRC_OUTFIFO |
1696 			    MOVE_DEST_CLASS1CTX |
1697 			    (16 << MOVE_OFFSET_SHIFT) |
1698 			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1699 	}
1700 	set_jump_tgt_here(desc, key_jump_cmd);
1701 
1702 	/* Generate IV */
1703 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1704 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
1705 		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
1706 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1707 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1708 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1709 	append_move(desc, MOVE_WAITCOMP |
1710 		    MOVE_SRC_INFIFO |
1711 		    MOVE_DEST_CLASS1CTX |
1712 		    (crt->ivsize << MOVE_LEN_SHIFT) |
1713 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1714 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1715 
1716 	/* Copy generated IV to memory */
1717 	append_seq_store(desc, crt->ivsize,
1718 			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
1719 			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
1720 
1721 	/* Load Counter into CONTEXT1 reg */
1722 	if (is_rfc3686)
1723 		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1724 				     LDST_SRCDST_BYTE_CONTEXT |
1725 				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1726 				      LDST_OFFSET_SHIFT));
1727 
1728 	if (ctx1_iv_off)
1729 		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1730 			    (1 << JUMP_OFFSET_SHIFT));
1731 
1732 	/* Load operation */
1733 	append_operation(desc, ctx->class1_alg_type |
1734 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
1735 
1736 	/* Perform operation */
1737 	ablkcipher_append_src_dst(desc);
1738 
1739 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
1740 						 desc_bytes(desc),
1741 						 DMA_TO_DEVICE);
1742 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
1743 		dev_err(jrdev, "unable to map shared descriptor\n");
1744 		return -ENOMEM;
1745 	}
1746 #ifdef DEBUG
1747 	print_hex_dump(KERN_ERR,
1748 		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
1749 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
1750 		       desc_bytes(desc), 1);
1751 #endif
1752 
1753 	return ret;
1754 }
1755 
1756 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1757 				 const u8 *key, unsigned int keylen)
1758 {
1759 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1760 	struct device *jrdev = ctx->jrdev;
1761 	u32 *key_jump_cmd, *desc;
1762 	__be64 sector_size = cpu_to_be64(512);
1763 
1764 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1765 		crypto_ablkcipher_set_flags(ablkcipher,
1766 					    CRYPTO_TFM_RES_BAD_KEY_LEN);
1767 		dev_err(jrdev, "key size mismatch\n");
1768 		return -EINVAL;
1769 	}
1770 
1771 	memcpy(ctx->key, key, keylen);
1772 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
1773 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
1774 		dev_err(jrdev, "unable to map key i/o memory\n");
1775 		return -ENOMEM;
1776 	}
1777 	ctx->enckeylen = keylen;
1778 
1779 	/* xts_ablkcipher_encrypt shared descriptor */
1780 	desc = ctx->sh_desc_enc;
1781 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1782 	/* Skip if already shared */
1783 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1784 				   JUMP_COND_SHRD);
1785 
1786 	/* Load class1 keys only */
1787 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1788 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1789 
1790 	/* Load sector size with index 40 bytes (0x28) */
1791 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1792 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1793 	append_data(desc, (void *)&sector_size, 8);
1794 
1795 	set_jump_tgt_here(desc, key_jump_cmd);
1796 
1797 	/*
1798 	 * create sequence for loading the sector index
1799 	 * Upper 8B of IV - will be used as sector index
1800 	 * Lower 8B of IV - will be discarded
1801 	 */
1802 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1803 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1804 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1805 
1806 	/* Load operation */
1807 	append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
1808 			 OP_ALG_ENCRYPT);
1809 
1810 	/* Perform operation */
1811 	ablkcipher_append_src_dst(desc);
1812 
1813 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1814 					      DMA_TO_DEVICE);
1815 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1816 		dev_err(jrdev, "unable to map shared descriptor\n");
1817 		return -ENOMEM;
1818 	}
1819 #ifdef DEBUG
1820 	print_hex_dump(KERN_ERR,
1821 		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
1822 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1823 #endif
1824 
1825 	/* xts_ablkcipher_decrypt shared descriptor */
1826 	desc = ctx->sh_desc_dec;
1827 
1828 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1829 	/* Skip if already shared */
1830 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1831 				   JUMP_COND_SHRD);
1832 
1833 	/* Load class1 key only */
1834 	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
1835 			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
1836 
1837 	/* Load sector size with index 40 bytes (0x28) */
1838 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
1839 		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
1840 	append_data(desc, (void *)&sector_size, 8);
1841 
1842 	set_jump_tgt_here(desc, key_jump_cmd);
1843 
1844 	/*
1845 	 * create sequence for loading the sector index
1846 	 * Upper 8B of IV - will be used as sector index
1847 	 * Lower 8B of IV - will be discarded
1848 	 */
1849 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
1850 		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
1851 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
1852 
1853 	/* Load operation */
1854 	append_dec_op1(desc, ctx->class1_alg_type);
1855 
1856 	/* Perform operation */
1857 	ablkcipher_append_src_dst(desc);
1858 
1859 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
1860 					      DMA_TO_DEVICE);
1861 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
1862 		dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
1863 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
1864 		dev_err(jrdev, "unable to map shared descriptor\n");
1865 		return -ENOMEM;
1866 	}
1867 #ifdef DEBUG
1868 	print_hex_dump(KERN_ERR,
1869 		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
1870 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1871 #endif
1872 
1873 	return 0;
1874 }
1875 
1876 /*
1877  * aead_edesc - s/w-extended aead descriptor
1878  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1879  * @src_nents: number of segments in input scatterlist
1880  * @dst_nents: number of segments in output scatterlist
1881  * @iv_dma: dma address of iv for checking continuity and link table
1882  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1883  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1884  * @sec4_sg_dma: bus physical mapped address of h/w link table
1885  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1886  */
1887 struct aead_edesc {
1888 	int assoc_nents;
1889 	int src_nents;
1890 	int dst_nents;
1891 	dma_addr_t iv_dma;
1892 	int sec4_sg_bytes;
1893 	dma_addr_t sec4_sg_dma;
1894 	struct sec4_sg_entry *sec4_sg;
1895 	u32 hw_desc[];
1896 };
1897 
1898 /*
1899  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1900  * @src_nents: number of segments in input scatterlist
1901  * @dst_nents: number of segments in output scatterlist
1902  * @iv_dma: dma address of iv for checking continuity and link table
1903  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1904  * @sec4_sg_bytes: length of dma mapped sec4_sg space
1905  * @sec4_sg_dma: bus physical mapped address of h/w link table
1906  * @hw_desc: the h/w job descriptor followed by any referenced link tables
1907  */
1908 struct ablkcipher_edesc {
1909 	int src_nents;
1910 	int dst_nents;
1911 	dma_addr_t iv_dma;
1912 	int sec4_sg_bytes;
1913 	dma_addr_t sec4_sg_dma;
1914 	struct sec4_sg_entry *sec4_sg;
1915 	u32 hw_desc[0];
1916 };
1917 
1918 static void caam_unmap(struct device *dev, struct scatterlist *src,
1919 		       struct scatterlist *dst, int src_nents,
1920 		       int dst_nents,
1921 		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
1922 		       int sec4_sg_bytes)
1923 {
1924 	if (dst != src) {
1925 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
1926 		dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
1927 	} else {
1928 		dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
1929 	}
1930 
1931 	if (iv_dma)
1932 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1933 	if (sec4_sg_bytes)
1934 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
1935 				 DMA_TO_DEVICE);
1936 }
1937 
1938 static void aead_unmap(struct device *dev,
1939 		       struct aead_edesc *edesc,
1940 		       struct aead_request *req)
1941 {
1942 	caam_unmap(dev, req->src, req->dst,
1943 		   edesc->src_nents, edesc->dst_nents, 0, 0,
1944 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1945 }
1946 
1947 static void ablkcipher_unmap(struct device *dev,
1948 			     struct ablkcipher_edesc *edesc,
1949 			     struct ablkcipher_request *req)
1950 {
1951 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1952 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1953 
1954 	caam_unmap(dev, req->src, req->dst,
1955 		   edesc->src_nents, edesc->dst_nents,
1956 		   edesc->iv_dma, ivsize,
1957 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
1958 }
1959 
1960 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1961 				   void *context)
1962 {
1963 	struct aead_request *req = context;
1964 	struct aead_edesc *edesc;
1965 
1966 #ifdef DEBUG
1967 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1968 #endif
1969 
1970 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1971 
1972 	if (err)
1973 		caam_jr_strstatus(jrdev, err);
1974 
1975 	aead_unmap(jrdev, edesc, req);
1976 
1977 	kfree(edesc);
1978 
1979 	aead_request_complete(req, err);
1980 }
1981 
1982 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1983 				   void *context)
1984 {
1985 	struct aead_request *req = context;
1986 	struct aead_edesc *edesc;
1987 
1988 #ifdef DEBUG
1989 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1990 #endif
1991 
1992 	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1993 
1994 	if (err)
1995 		caam_jr_strstatus(jrdev, err);
1996 
1997 	aead_unmap(jrdev, edesc, req);
1998 
1999 	/*
2000 	 * verify hw auth check passed else return -EBADMSG
2001 	 */
2002 	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
2003 		err = -EBADMSG;
2004 
2005 	kfree(edesc);
2006 
2007 	aead_request_complete(req, err);
2008 }
2009 
2010 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
2011 				   void *context)
2012 {
2013 	struct ablkcipher_request *req = context;
2014 	struct ablkcipher_edesc *edesc;
2015 #ifdef DEBUG
2016 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2017 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2018 
2019 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2020 #endif
2021 
2022 	edesc = (struct ablkcipher_edesc *)((char *)desc -
2023 		 offsetof(struct ablkcipher_edesc, hw_desc));
2024 
2025 	if (err)
2026 		caam_jr_strstatus(jrdev, err);
2027 
2028 #ifdef DEBUG
2029 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2030 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2031 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
2032 	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2033 		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2034 		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
2035 #endif
2036 
2037 	ablkcipher_unmap(jrdev, edesc, req);
2038 	kfree(edesc);
2039 
2040 	ablkcipher_request_complete(req, err);
2041 }
2042 
2043 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
2044 				    void *context)
2045 {
2046 	struct ablkcipher_request *req = context;
2047 	struct ablkcipher_edesc *edesc;
2048 #ifdef DEBUG
2049 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2050 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2051 
2052 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2053 #endif
2054 
2055 	edesc = (struct ablkcipher_edesc *)((char *)desc -
2056 		 offsetof(struct ablkcipher_edesc, hw_desc));
2057 	if (err)
2058 		caam_jr_strstatus(jrdev, err);
2059 
2060 #ifdef DEBUG
2061 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
2062 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2063 		       ivsize, 1);
2064 	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
2065 		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
2066 		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
2067 #endif
2068 
2069 	ablkcipher_unmap(jrdev, edesc, req);
2070 	kfree(edesc);
2071 
2072 	ablkcipher_request_complete(req, err);
2073 }
2074 
2075 /*
2076  * Fill in aead job descriptor
2077  */
2078 static void init_aead_job(struct aead_request *req,
2079 			  struct aead_edesc *edesc,
2080 			  bool all_contig, bool encrypt)
2081 {
2082 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2083 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2084 	int authsize = ctx->authsize;
2085 	u32 *desc = edesc->hw_desc;
2086 	u32 out_options, in_options;
2087 	dma_addr_t dst_dma, src_dma;
2088 	int len, sec4_sg_index = 0;
2089 	dma_addr_t ptr;
2090 	u32 *sh_desc;
2091 
2092 	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
2093 	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
2094 
2095 	len = desc_len(sh_desc);
2096 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2097 
2098 	if (all_contig) {
2099 		src_dma = sg_dma_address(req->src);
2100 		in_options = 0;
2101 	} else {
2102 		src_dma = edesc->sec4_sg_dma;
2103 		sec4_sg_index += edesc->src_nents;
2104 		in_options = LDST_SGF;
2105 	}
2106 
2107 	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
2108 			  in_options);
2109 
2110 	dst_dma = src_dma;
2111 	out_options = in_options;
2112 
2113 	if (unlikely(req->src != req->dst)) {
2114 		if (!edesc->dst_nents) {
2115 			dst_dma = sg_dma_address(req->dst);
2116 		} else {
2117 			dst_dma = edesc->sec4_sg_dma +
2118 				  sec4_sg_index *
2119 				  sizeof(struct sec4_sg_entry);
2120 			out_options = LDST_SGF;
2121 		}
2122 	}
2123 
2124 	if (encrypt)
2125 		append_seq_out_ptr(desc, dst_dma,
2126 				   req->assoclen + req->cryptlen + authsize,
2127 				   out_options);
2128 	else
2129 		append_seq_out_ptr(desc, dst_dma,
2130 				   req->assoclen + req->cryptlen - authsize,
2131 				   out_options);
2132 
2133 	/* REG3 = assoclen */
2134 	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
2135 }
2136 
2137 static void init_gcm_job(struct aead_request *req,
2138 			 struct aead_edesc *edesc,
2139 			 bool all_contig, bool encrypt)
2140 {
2141 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2142 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2143 	unsigned int ivsize = crypto_aead_ivsize(aead);
2144 	u32 *desc = edesc->hw_desc;
2145 	bool generic_gcm = (ivsize == 12);
2146 	unsigned int last;
2147 
2148 	init_aead_job(req, edesc, all_contig, encrypt);
2149 
2150 	/* BUG This should not be specific to generic GCM. */
2151 	last = 0;
2152 	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
2153 		last = FIFOLD_TYPE_LAST1;
2154 
2155 	/* Read GCM IV */
2156 	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
2157 			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
2158 	/* Append Salt */
2159 	if (!generic_gcm)
2160 		append_data(desc, ctx->key + ctx->enckeylen, 4);
2161 	/* Append IV */
2162 	append_data(desc, req->iv, ivsize);
2163 	/* End of blank commands */
2164 }
2165 
2166 static void init_authenc_job(struct aead_request *req,
2167 			     struct aead_edesc *edesc,
2168 			     bool all_contig, bool encrypt)
2169 {
2170 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2171 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
2172 						 struct caam_aead_alg, aead);
2173 	unsigned int ivsize = crypto_aead_ivsize(aead);
2174 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2175 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
2176 			       OP_ALG_AAI_CTR_MOD128);
2177 	const bool is_rfc3686 = alg->caam.rfc3686;
2178 	u32 *desc = edesc->hw_desc;
2179 	u32 ivoffset = 0;
2180 
2181 	/*
2182 	 * AES-CTR needs to load IV in CONTEXT1 reg
2183 	 * at an offset of 128bits (16bytes)
2184 	 * CONTEXT1[255:128] = IV
2185 	 */
2186 	if (ctr_mode)
2187 		ivoffset = 16;
2188 
2189 	/*
2190 	 * RFC3686 specific:
2191 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2192 	 */
2193 	if (is_rfc3686)
2194 		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
2195 
2196 	init_aead_job(req, edesc, all_contig, encrypt);
2197 
2198 	if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2199 		append_load_as_imm(desc, req->iv, ivsize,
2200 				   LDST_CLASS_1_CCB |
2201 				   LDST_SRCDST_BYTE_CONTEXT |
2202 				   (ivoffset << LDST_OFFSET_SHIFT));
2203 }
2204 
2205 /*
2206  * Fill in ablkcipher job descriptor
2207  */
2208 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
2209 				struct ablkcipher_edesc *edesc,
2210 				struct ablkcipher_request *req,
2211 				bool iv_contig)
2212 {
2213 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2214 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2215 	u32 *desc = edesc->hw_desc;
2216 	u32 out_options = 0, in_options;
2217 	dma_addr_t dst_dma, src_dma;
2218 	int len, sec4_sg_index = 0;
2219 
2220 #ifdef DEBUG
2221 	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2222 					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2223 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
2224 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2225 		       ivsize, 1);
2226 	printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
2227 	dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
2228 		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2229 		    edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
2230 #endif
2231 
2232 	len = desc_len(sh_desc);
2233 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2234 
2235 	if (iv_contig) {
2236 		src_dma = edesc->iv_dma;
2237 		in_options = 0;
2238 	} else {
2239 		src_dma = edesc->sec4_sg_dma;
2240 		sec4_sg_index += edesc->src_nents + 1;
2241 		in_options = LDST_SGF;
2242 	}
2243 	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
2244 
2245 	if (likely(req->src == req->dst)) {
2246 		if (!edesc->src_nents && iv_contig) {
2247 			dst_dma = sg_dma_address(req->src);
2248 		} else {
2249 			dst_dma = edesc->sec4_sg_dma +
2250 				sizeof(struct sec4_sg_entry);
2251 			out_options = LDST_SGF;
2252 		}
2253 	} else {
2254 		if (!edesc->dst_nents) {
2255 			dst_dma = sg_dma_address(req->dst);
2256 		} else {
2257 			dst_dma = edesc->sec4_sg_dma +
2258 				sec4_sg_index * sizeof(struct sec4_sg_entry);
2259 			out_options = LDST_SGF;
2260 		}
2261 	}
2262 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
2263 }
2264 
2265 /*
2266  * Fill in ablkcipher givencrypt job descriptor
2267  */
2268 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2269 				    struct ablkcipher_edesc *edesc,
2270 				    struct ablkcipher_request *req,
2271 				    bool iv_contig)
2272 {
2273 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2274 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2275 	u32 *desc = edesc->hw_desc;
2276 	u32 out_options, in_options;
2277 	dma_addr_t dst_dma, src_dma;
2278 	int len, sec4_sg_index = 0;
2279 
2280 #ifdef DEBUG
2281 	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2282 					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2283 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2284 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2285 		       ivsize, 1);
2286 	dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
2287 		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2288 		    edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
2289 #endif
2290 
2291 	len = desc_len(sh_desc);
2292 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2293 
2294 	if (!edesc->src_nents) {
2295 		src_dma = sg_dma_address(req->src);
2296 		in_options = 0;
2297 	} else {
2298 		src_dma = edesc->sec4_sg_dma;
2299 		sec4_sg_index += edesc->src_nents;
2300 		in_options = LDST_SGF;
2301 	}
2302 	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2303 
2304 	if (iv_contig) {
2305 		dst_dma = edesc->iv_dma;
2306 		out_options = 0;
2307 	} else {
2308 		dst_dma = edesc->sec4_sg_dma +
2309 			  sec4_sg_index * sizeof(struct sec4_sg_entry);
2310 		out_options = LDST_SGF;
2311 	}
2312 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2313 }
2314 
2315 /*
2316  * allocate and map the aead extended descriptor
2317  */
2318 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
2319 					   int desc_bytes, bool *all_contig_ptr,
2320 					   bool encrypt)
2321 {
2322 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2323 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2324 	struct device *jrdev = ctx->jrdev;
2325 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2326 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2327 	int src_nents, dst_nents = 0;
2328 	struct aead_edesc *edesc;
2329 	int sgc;
2330 	bool all_contig = true;
2331 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
2332 	unsigned int authsize = ctx->authsize;
2333 
2334 	if (unlikely(req->dst != req->src)) {
2335 		src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
2336 		dst_nents = sg_count(req->dst,
2337 				     req->assoclen + req->cryptlen +
2338 					(encrypt ? authsize : (-authsize)));
2339 	} else {
2340 		src_nents = sg_count(req->src,
2341 				     req->assoclen + req->cryptlen +
2342 					(encrypt ? authsize : 0));
2343 	}
2344 
2345 	/* Check if data are contiguous. */
2346 	all_contig = !src_nents;
2347 	if (!all_contig) {
2348 		src_nents = src_nents ? : 1;
2349 		sec4_sg_len = src_nents;
2350 	}
2351 
2352 	sec4_sg_len += dst_nents;
2353 
2354 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2355 
2356 	/* allocate space for base edesc and hw desc commands, link tables */
2357 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2358 			GFP_DMA | flags);
2359 	if (!edesc) {
2360 		dev_err(jrdev, "could not allocate extended descriptor\n");
2361 		return ERR_PTR(-ENOMEM);
2362 	}
2363 
2364 	if (likely(req->src == req->dst)) {
2365 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2366 				 DMA_BIDIRECTIONAL);
2367 		if (unlikely(!sgc)) {
2368 			dev_err(jrdev, "unable to map source\n");
2369 			kfree(edesc);
2370 			return ERR_PTR(-ENOMEM);
2371 		}
2372 	} else {
2373 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2374 				 DMA_TO_DEVICE);
2375 		if (unlikely(!sgc)) {
2376 			dev_err(jrdev, "unable to map source\n");
2377 			kfree(edesc);
2378 			return ERR_PTR(-ENOMEM);
2379 		}
2380 
2381 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2382 				 DMA_FROM_DEVICE);
2383 		if (unlikely(!sgc)) {
2384 			dev_err(jrdev, "unable to map destination\n");
2385 			dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
2386 				     DMA_TO_DEVICE);
2387 			kfree(edesc);
2388 			return ERR_PTR(-ENOMEM);
2389 		}
2390 	}
2391 
2392 	edesc->src_nents = src_nents;
2393 	edesc->dst_nents = dst_nents;
2394 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
2395 			 desc_bytes;
2396 	*all_contig_ptr = all_contig;
2397 
2398 	sec4_sg_index = 0;
2399 	if (!all_contig) {
2400 		sg_to_sec4_sg_last(req->src, src_nents,
2401 			      edesc->sec4_sg + sec4_sg_index, 0);
2402 		sec4_sg_index += src_nents;
2403 	}
2404 	if (dst_nents) {
2405 		sg_to_sec4_sg_last(req->dst, dst_nents,
2406 				   edesc->sec4_sg + sec4_sg_index, 0);
2407 	}
2408 
2409 	if (!sec4_sg_bytes)
2410 		return edesc;
2411 
2412 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2413 					    sec4_sg_bytes, DMA_TO_DEVICE);
2414 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2415 		dev_err(jrdev, "unable to map S/G table\n");
2416 		aead_unmap(jrdev, edesc, req);
2417 		kfree(edesc);
2418 		return ERR_PTR(-ENOMEM);
2419 	}
2420 
2421 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2422 
2423 	return edesc;
2424 }
2425 
2426 static int gcm_encrypt(struct aead_request *req)
2427 {
2428 	struct aead_edesc *edesc;
2429 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2430 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2431 	struct device *jrdev = ctx->jrdev;
2432 	bool all_contig;
2433 	u32 *desc;
2434 	int ret = 0;
2435 
2436 	/* allocate extended descriptor */
2437 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
2438 	if (IS_ERR(edesc))
2439 		return PTR_ERR(edesc);
2440 
2441 	/* Create and submit job descriptor */
2442 	init_gcm_job(req, edesc, all_contig, true);
2443 #ifdef DEBUG
2444 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2445 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2446 		       desc_bytes(edesc->hw_desc), 1);
2447 #endif
2448 
2449 	desc = edesc->hw_desc;
2450 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2451 	if (!ret) {
2452 		ret = -EINPROGRESS;
2453 	} else {
2454 		aead_unmap(jrdev, edesc, req);
2455 		kfree(edesc);
2456 	}
2457 
2458 	return ret;
2459 }
2460 
2461 static int ipsec_gcm_encrypt(struct aead_request *req)
2462 {
2463 	if (req->assoclen < 8)
2464 		return -EINVAL;
2465 
2466 	return gcm_encrypt(req);
2467 }
2468 
2469 static int aead_encrypt(struct aead_request *req)
2470 {
2471 	struct aead_edesc *edesc;
2472 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2473 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2474 	struct device *jrdev = ctx->jrdev;
2475 	bool all_contig;
2476 	u32 *desc;
2477 	int ret = 0;
2478 
2479 	/* allocate extended descriptor */
2480 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2481 				 &all_contig, true);
2482 	if (IS_ERR(edesc))
2483 		return PTR_ERR(edesc);
2484 
2485 	/* Create and submit job descriptor */
2486 	init_authenc_job(req, edesc, all_contig, true);
2487 #ifdef DEBUG
2488 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2489 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2490 		       desc_bytes(edesc->hw_desc), 1);
2491 #endif
2492 
2493 	desc = edesc->hw_desc;
2494 	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2495 	if (!ret) {
2496 		ret = -EINPROGRESS;
2497 	} else {
2498 		aead_unmap(jrdev, edesc, req);
2499 		kfree(edesc);
2500 	}
2501 
2502 	return ret;
2503 }
2504 
2505 static int gcm_decrypt(struct aead_request *req)
2506 {
2507 	struct aead_edesc *edesc;
2508 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2509 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2510 	struct device *jrdev = ctx->jrdev;
2511 	bool all_contig;
2512 	u32 *desc;
2513 	int ret = 0;
2514 
2515 	/* allocate extended descriptor */
2516 	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
2517 	if (IS_ERR(edesc))
2518 		return PTR_ERR(edesc);
2519 
2520 	/* Create and submit job descriptor*/
2521 	init_gcm_job(req, edesc, all_contig, false);
2522 #ifdef DEBUG
2523 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2524 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2525 		       desc_bytes(edesc->hw_desc), 1);
2526 #endif
2527 
2528 	desc = edesc->hw_desc;
2529 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2530 	if (!ret) {
2531 		ret = -EINPROGRESS;
2532 	} else {
2533 		aead_unmap(jrdev, edesc, req);
2534 		kfree(edesc);
2535 	}
2536 
2537 	return ret;
2538 }
2539 
2540 static int ipsec_gcm_decrypt(struct aead_request *req)
2541 {
2542 	if (req->assoclen < 8)
2543 		return -EINVAL;
2544 
2545 	return gcm_decrypt(req);
2546 }
2547 
2548 static int aead_decrypt(struct aead_request *req)
2549 {
2550 	struct aead_edesc *edesc;
2551 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2552 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
2553 	struct device *jrdev = ctx->jrdev;
2554 	bool all_contig;
2555 	u32 *desc;
2556 	int ret = 0;
2557 
2558 #ifdef DEBUG
2559 	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2560 					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
2561 	dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
2562 		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
2563 		    req->assoclen + req->cryptlen, 1, may_sleep);
2564 #endif
2565 
2566 	/* allocate extended descriptor */
2567 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
2568 				 &all_contig, false);
2569 	if (IS_ERR(edesc))
2570 		return PTR_ERR(edesc);
2571 
2572 	/* Create and submit job descriptor*/
2573 	init_authenc_job(req, edesc, all_contig, false);
2574 #ifdef DEBUG
2575 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
2576 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2577 		       desc_bytes(edesc->hw_desc), 1);
2578 #endif
2579 
2580 	desc = edesc->hw_desc;
2581 	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2582 	if (!ret) {
2583 		ret = -EINPROGRESS;
2584 	} else {
2585 		aead_unmap(jrdev, edesc, req);
2586 		kfree(edesc);
2587 	}
2588 
2589 	return ret;
2590 }
2591 
2592 /*
2593  * allocate and map the ablkcipher extended descriptor for ablkcipher
2594  */
2595 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
2596 						       *req, int desc_bytes,
2597 						       bool *iv_contig_out)
2598 {
2599 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2600 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2601 	struct device *jrdev = ctx->jrdev;
2602 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2603 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2604 		       GFP_KERNEL : GFP_ATOMIC;
2605 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2606 	struct ablkcipher_edesc *edesc;
2607 	dma_addr_t iv_dma = 0;
2608 	bool iv_contig = false;
2609 	int sgc;
2610 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2611 	int sec4_sg_index;
2612 
2613 	src_nents = sg_count(req->src, req->nbytes);
2614 
2615 	if (req->dst != req->src)
2616 		dst_nents = sg_count(req->dst, req->nbytes);
2617 
2618 	if (likely(req->src == req->dst)) {
2619 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2620 				 DMA_BIDIRECTIONAL);
2621 	} else {
2622 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2623 				 DMA_TO_DEVICE);
2624 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2625 				 DMA_FROM_DEVICE);
2626 	}
2627 
2628 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
2629 	if (dma_mapping_error(jrdev, iv_dma)) {
2630 		dev_err(jrdev, "unable to map IV\n");
2631 		return ERR_PTR(-ENOMEM);
2632 	}
2633 
2634 	/*
2635 	 * Check if iv can be contiguous with source and destination.
2636 	 * If so, include it. If not, create scatterlist.
2637 	 */
2638 	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
2639 		iv_contig = true;
2640 	else
2641 		src_nents = src_nents ? : 1;
2642 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2643 			sizeof(struct sec4_sg_entry);
2644 
2645 	/* allocate space for base edesc and hw desc commands, link tables */
2646 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2647 			GFP_DMA | flags);
2648 	if (!edesc) {
2649 		dev_err(jrdev, "could not allocate extended descriptor\n");
2650 		return ERR_PTR(-ENOMEM);
2651 	}
2652 
2653 	edesc->src_nents = src_nents;
2654 	edesc->dst_nents = dst_nents;
2655 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2656 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2657 			 desc_bytes;
2658 
2659 	sec4_sg_index = 0;
2660 	if (!iv_contig) {
2661 		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
2662 		sg_to_sec4_sg_last(req->src, src_nents,
2663 				   edesc->sec4_sg + 1, 0);
2664 		sec4_sg_index += 1 + src_nents;
2665 	}
2666 
2667 	if (dst_nents) {
2668 		sg_to_sec4_sg_last(req->dst, dst_nents,
2669 			edesc->sec4_sg + sec4_sg_index, 0);
2670 	}
2671 
2672 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2673 					    sec4_sg_bytes, DMA_TO_DEVICE);
2674 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2675 		dev_err(jrdev, "unable to map S/G table\n");
2676 		return ERR_PTR(-ENOMEM);
2677 	}
2678 
2679 	edesc->iv_dma = iv_dma;
2680 
2681 #ifdef DEBUG
2682 	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
2683 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2684 		       sec4_sg_bytes, 1);
2685 #endif
2686 
2687 	*iv_contig_out = iv_contig;
2688 	return edesc;
2689 }
2690 
2691 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2692 {
2693 	struct ablkcipher_edesc *edesc;
2694 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2695 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2696 	struct device *jrdev = ctx->jrdev;
2697 	bool iv_contig;
2698 	u32 *desc;
2699 	int ret = 0;
2700 
2701 	/* allocate extended descriptor */
2702 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2703 				       CAAM_CMD_SZ, &iv_contig);
2704 	if (IS_ERR(edesc))
2705 		return PTR_ERR(edesc);
2706 
2707 	/* Create and submit job descriptor*/
2708 	init_ablkcipher_job(ctx->sh_desc_enc,
2709 		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
2710 #ifdef DEBUG
2711 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2712 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2713 		       desc_bytes(edesc->hw_desc), 1);
2714 #endif
2715 	desc = edesc->hw_desc;
2716 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2717 
2718 	if (!ret) {
2719 		ret = -EINPROGRESS;
2720 	} else {
2721 		ablkcipher_unmap(jrdev, edesc, req);
2722 		kfree(edesc);
2723 	}
2724 
2725 	return ret;
2726 }
2727 
2728 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2729 {
2730 	struct ablkcipher_edesc *edesc;
2731 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2732 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2733 	struct device *jrdev = ctx->jrdev;
2734 	bool iv_contig;
2735 	u32 *desc;
2736 	int ret = 0;
2737 
2738 	/* allocate extended descriptor */
2739 	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
2740 				       CAAM_CMD_SZ, &iv_contig);
2741 	if (IS_ERR(edesc))
2742 		return PTR_ERR(edesc);
2743 
2744 	/* Create and submit job descriptor*/
2745 	init_ablkcipher_job(ctx->sh_desc_dec,
2746 		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
2747 	desc = edesc->hw_desc;
2748 #ifdef DEBUG
2749 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
2750 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2751 		       desc_bytes(edesc->hw_desc), 1);
2752 #endif
2753 
2754 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
2755 	if (!ret) {
2756 		ret = -EINPROGRESS;
2757 	} else {
2758 		ablkcipher_unmap(jrdev, edesc, req);
2759 		kfree(edesc);
2760 	}
2761 
2762 	return ret;
2763 }
2764 
2765 /*
2766  * allocate and map the ablkcipher extended descriptor
2767  * for ablkcipher givencrypt
2768  */
2769 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
2770 				struct skcipher_givcrypt_request *greq,
2771 				int desc_bytes,
2772 				bool *iv_contig_out)
2773 {
2774 	struct ablkcipher_request *req = &greq->creq;
2775 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2776 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2777 	struct device *jrdev = ctx->jrdev;
2778 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2779 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
2780 		       GFP_KERNEL : GFP_ATOMIC;
2781 	int src_nents, dst_nents = 0, sec4_sg_bytes;
2782 	struct ablkcipher_edesc *edesc;
2783 	dma_addr_t iv_dma = 0;
2784 	bool iv_contig = false;
2785 	int sgc;
2786 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2787 	int sec4_sg_index;
2788 
2789 	src_nents = sg_count(req->src, req->nbytes);
2790 
2791 	if (unlikely(req->dst != req->src))
2792 		dst_nents = sg_count(req->dst, req->nbytes);
2793 
2794 	if (likely(req->src == req->dst)) {
2795 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2796 				 DMA_BIDIRECTIONAL);
2797 	} else {
2798 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
2799 				 DMA_TO_DEVICE);
2800 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
2801 				 DMA_FROM_DEVICE);
2802 	}
2803 
2804 	/*
2805 	 * Check if iv can be contiguous with source and destination.
2806 	 * If so, include it. If not, create scatterlist.
2807 	 */
2808 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
2809 	if (dma_mapping_error(jrdev, iv_dma)) {
2810 		dev_err(jrdev, "unable to map IV\n");
2811 		return ERR_PTR(-ENOMEM);
2812 	}
2813 
2814 	if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
2815 		iv_contig = true;
2816 	else
2817 		dst_nents = dst_nents ? : 1;
2818 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
2819 			sizeof(struct sec4_sg_entry);
2820 
2821 	/* allocate space for base edesc and hw desc commands, link tables */
2822 	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
2823 			GFP_DMA | flags);
2824 	if (!edesc) {
2825 		dev_err(jrdev, "could not allocate extended descriptor\n");
2826 		return ERR_PTR(-ENOMEM);
2827 	}
2828 
2829 	edesc->src_nents = src_nents;
2830 	edesc->dst_nents = dst_nents;
2831 	edesc->sec4_sg_bytes = sec4_sg_bytes;
2832 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
2833 			 desc_bytes;
2834 
2835 	sec4_sg_index = 0;
2836 	if (src_nents) {
2837 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
2838 		sec4_sg_index += src_nents;
2839 	}
2840 
2841 	if (!iv_contig) {
2842 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
2843 				   iv_dma, ivsize, 0);
2844 		sec4_sg_index += 1;
2845 		sg_to_sec4_sg_last(req->dst, dst_nents,
2846 				   edesc->sec4_sg + sec4_sg_index, 0);
2847 	}
2848 
2849 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
2850 					    sec4_sg_bytes, DMA_TO_DEVICE);
2851 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
2852 		dev_err(jrdev, "unable to map S/G table\n");
2853 		return ERR_PTR(-ENOMEM);
2854 	}
2855 	edesc->iv_dma = iv_dma;
2856 
2857 #ifdef DEBUG
2858 	print_hex_dump(KERN_ERR,
2859 		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
2860 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
2861 		       sec4_sg_bytes, 1);
2862 #endif
2863 
2864 	*iv_contig_out = iv_contig;
2865 	return edesc;
2866 }
2867 
2868 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
2869 {
2870 	struct ablkcipher_request *req = &creq->creq;
2871 	struct ablkcipher_edesc *edesc;
2872 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2873 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
2874 	struct device *jrdev = ctx->jrdev;
2875 	bool iv_contig;
2876 	u32 *desc;
2877 	int ret = 0;
2878 
2879 	/* allocate extended descriptor */
2880 	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
2881 				       CAAM_CMD_SZ, &iv_contig);
2882 	if (IS_ERR(edesc))
2883 		return PTR_ERR(edesc);
2884 
2885 	/* Create and submit job descriptor*/
2886 	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
2887 				edesc, req, iv_contig);
2888 #ifdef DEBUG
2889 	print_hex_dump(KERN_ERR,
2890 		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
2891 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
2892 		       desc_bytes(edesc->hw_desc), 1);
2893 #endif
2894 	desc = edesc->hw_desc;
2895 	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
2896 
2897 	if (!ret) {
2898 		ret = -EINPROGRESS;
2899 	} else {
2900 		ablkcipher_unmap(jrdev, edesc, req);
2901 		kfree(edesc);
2902 	}
2903 
2904 	return ret;
2905 }
2906 
2907 #define template_aead		template_u.aead
2908 #define template_ablkcipher	template_u.ablkcipher
2909 struct caam_alg_template {
2910 	char name[CRYPTO_MAX_ALG_NAME];
2911 	char driver_name[CRYPTO_MAX_ALG_NAME];
2912 	unsigned int blocksize;
2913 	u32 type;
2914 	union {
2915 		struct ablkcipher_alg ablkcipher;
2916 	} template_u;
2917 	u32 class1_alg_type;
2918 	u32 class2_alg_type;
2919 	u32 alg_op;
2920 };
2921 
2922 static struct caam_alg_template driver_algs[] = {
2923 	/* ablkcipher descriptor */
2924 	{
2925 		.name = "cbc(aes)",
2926 		.driver_name = "cbc-aes-caam",
2927 		.blocksize = AES_BLOCK_SIZE,
2928 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2929 		.template_ablkcipher = {
2930 			.setkey = ablkcipher_setkey,
2931 			.encrypt = ablkcipher_encrypt,
2932 			.decrypt = ablkcipher_decrypt,
2933 			.givencrypt = ablkcipher_givencrypt,
2934 			.geniv = "<built-in>",
2935 			.min_keysize = AES_MIN_KEY_SIZE,
2936 			.max_keysize = AES_MAX_KEY_SIZE,
2937 			.ivsize = AES_BLOCK_SIZE,
2938 			},
2939 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2940 	},
2941 	{
2942 		.name = "cbc(des3_ede)",
2943 		.driver_name = "cbc-3des-caam",
2944 		.blocksize = DES3_EDE_BLOCK_SIZE,
2945 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2946 		.template_ablkcipher = {
2947 			.setkey = ablkcipher_setkey,
2948 			.encrypt = ablkcipher_encrypt,
2949 			.decrypt = ablkcipher_decrypt,
2950 			.givencrypt = ablkcipher_givencrypt,
2951 			.geniv = "<built-in>",
2952 			.min_keysize = DES3_EDE_KEY_SIZE,
2953 			.max_keysize = DES3_EDE_KEY_SIZE,
2954 			.ivsize = DES3_EDE_BLOCK_SIZE,
2955 			},
2956 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2957 	},
2958 	{
2959 		.name = "cbc(des)",
2960 		.driver_name = "cbc-des-caam",
2961 		.blocksize = DES_BLOCK_SIZE,
2962 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2963 		.template_ablkcipher = {
2964 			.setkey = ablkcipher_setkey,
2965 			.encrypt = ablkcipher_encrypt,
2966 			.decrypt = ablkcipher_decrypt,
2967 			.givencrypt = ablkcipher_givencrypt,
2968 			.geniv = "<built-in>",
2969 			.min_keysize = DES_KEY_SIZE,
2970 			.max_keysize = DES_KEY_SIZE,
2971 			.ivsize = DES_BLOCK_SIZE,
2972 			},
2973 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2974 	},
2975 	{
2976 		.name = "ctr(aes)",
2977 		.driver_name = "ctr-aes-caam",
2978 		.blocksize = 1,
2979 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2980 		.template_ablkcipher = {
2981 			.setkey = ablkcipher_setkey,
2982 			.encrypt = ablkcipher_encrypt,
2983 			.decrypt = ablkcipher_decrypt,
2984 			.geniv = "chainiv",
2985 			.min_keysize = AES_MIN_KEY_SIZE,
2986 			.max_keysize = AES_MAX_KEY_SIZE,
2987 			.ivsize = AES_BLOCK_SIZE,
2988 			},
2989 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2990 	},
2991 	{
2992 		.name = "rfc3686(ctr(aes))",
2993 		.driver_name = "rfc3686-ctr-aes-caam",
2994 		.blocksize = 1,
2995 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
2996 		.template_ablkcipher = {
2997 			.setkey = ablkcipher_setkey,
2998 			.encrypt = ablkcipher_encrypt,
2999 			.decrypt = ablkcipher_decrypt,
3000 			.givencrypt = ablkcipher_givencrypt,
3001 			.geniv = "<built-in>",
3002 			.min_keysize = AES_MIN_KEY_SIZE +
3003 				       CTR_RFC3686_NONCE_SIZE,
3004 			.max_keysize = AES_MAX_KEY_SIZE +
3005 				       CTR_RFC3686_NONCE_SIZE,
3006 			.ivsize = CTR_RFC3686_IV_SIZE,
3007 			},
3008 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
3009 	},
3010 	{
3011 		.name = "xts(aes)",
3012 		.driver_name = "xts-aes-caam",
3013 		.blocksize = AES_BLOCK_SIZE,
3014 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3015 		.template_ablkcipher = {
3016 			.setkey = xts_ablkcipher_setkey,
3017 			.encrypt = ablkcipher_encrypt,
3018 			.decrypt = ablkcipher_decrypt,
3019 			.geniv = "eseqiv",
3020 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
3021 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
3022 			.ivsize = AES_BLOCK_SIZE,
3023 			},
3024 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
3025 	},
3026 };
3027 
3028 static struct caam_aead_alg driver_aeads[] = {
3029 	{
3030 		.aead = {
3031 			.base = {
3032 				.cra_name = "rfc4106(gcm(aes))",
3033 				.cra_driver_name = "rfc4106-gcm-aes-caam",
3034 				.cra_blocksize = 1,
3035 			},
3036 			.setkey = rfc4106_setkey,
3037 			.setauthsize = rfc4106_setauthsize,
3038 			.encrypt = ipsec_gcm_encrypt,
3039 			.decrypt = ipsec_gcm_decrypt,
3040 			.ivsize = 8,
3041 			.maxauthsize = AES_BLOCK_SIZE,
3042 		},
3043 		.caam = {
3044 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3045 		},
3046 	},
3047 	{
3048 		.aead = {
3049 			.base = {
3050 				.cra_name = "rfc4543(gcm(aes))",
3051 				.cra_driver_name = "rfc4543-gcm-aes-caam",
3052 				.cra_blocksize = 1,
3053 			},
3054 			.setkey = rfc4543_setkey,
3055 			.setauthsize = rfc4543_setauthsize,
3056 			.encrypt = ipsec_gcm_encrypt,
3057 			.decrypt = ipsec_gcm_decrypt,
3058 			.ivsize = 8,
3059 			.maxauthsize = AES_BLOCK_SIZE,
3060 		},
3061 		.caam = {
3062 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3063 		},
3064 	},
3065 	/* Galois Counter Mode */
3066 	{
3067 		.aead = {
3068 			.base = {
3069 				.cra_name = "gcm(aes)",
3070 				.cra_driver_name = "gcm-aes-caam",
3071 				.cra_blocksize = 1,
3072 			},
3073 			.setkey = gcm_setkey,
3074 			.setauthsize = gcm_setauthsize,
3075 			.encrypt = gcm_encrypt,
3076 			.decrypt = gcm_decrypt,
3077 			.ivsize = 12,
3078 			.maxauthsize = AES_BLOCK_SIZE,
3079 		},
3080 		.caam = {
3081 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
3082 		},
3083 	},
3084 	/* single-pass ipsec_esp descriptor */
3085 	{
3086 		.aead = {
3087 			.base = {
3088 				.cra_name = "authenc(hmac(md5),"
3089 					    "ecb(cipher_null))",
3090 				.cra_driver_name = "authenc-hmac-md5-"
3091 						   "ecb-cipher_null-caam",
3092 				.cra_blocksize = NULL_BLOCK_SIZE,
3093 			},
3094 			.setkey = aead_setkey,
3095 			.setauthsize = aead_setauthsize,
3096 			.encrypt = aead_encrypt,
3097 			.decrypt = aead_decrypt,
3098 			.ivsize = NULL_IV_SIZE,
3099 			.maxauthsize = MD5_DIGEST_SIZE,
3100 		},
3101 		.caam = {
3102 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3103 					   OP_ALG_AAI_HMAC_PRECOMP,
3104 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3105 		},
3106 	},
3107 	{
3108 		.aead = {
3109 			.base = {
3110 				.cra_name = "authenc(hmac(sha1),"
3111 					    "ecb(cipher_null))",
3112 				.cra_driver_name = "authenc-hmac-sha1-"
3113 						   "ecb-cipher_null-caam",
3114 				.cra_blocksize = NULL_BLOCK_SIZE,
3115 			},
3116 			.setkey = aead_setkey,
3117 			.setauthsize = aead_setauthsize,
3118 			.encrypt = aead_encrypt,
3119 			.decrypt = aead_decrypt,
3120 			.ivsize = NULL_IV_SIZE,
3121 			.maxauthsize = SHA1_DIGEST_SIZE,
3122 		},
3123 		.caam = {
3124 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3125 					   OP_ALG_AAI_HMAC_PRECOMP,
3126 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3127 		},
3128 	},
3129 	{
3130 		.aead = {
3131 			.base = {
3132 				.cra_name = "authenc(hmac(sha224),"
3133 					    "ecb(cipher_null))",
3134 				.cra_driver_name = "authenc-hmac-sha224-"
3135 						   "ecb-cipher_null-caam",
3136 				.cra_blocksize = NULL_BLOCK_SIZE,
3137 			},
3138 			.setkey = aead_setkey,
3139 			.setauthsize = aead_setauthsize,
3140 			.encrypt = aead_encrypt,
3141 			.decrypt = aead_decrypt,
3142 			.ivsize = NULL_IV_SIZE,
3143 			.maxauthsize = SHA224_DIGEST_SIZE,
3144 		},
3145 		.caam = {
3146 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3147 					   OP_ALG_AAI_HMAC_PRECOMP,
3148 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3149 		},
3150 	},
3151 	{
3152 		.aead = {
3153 			.base = {
3154 				.cra_name = "authenc(hmac(sha256),"
3155 					    "ecb(cipher_null))",
3156 				.cra_driver_name = "authenc-hmac-sha256-"
3157 						   "ecb-cipher_null-caam",
3158 				.cra_blocksize = NULL_BLOCK_SIZE,
3159 			},
3160 			.setkey = aead_setkey,
3161 			.setauthsize = aead_setauthsize,
3162 			.encrypt = aead_encrypt,
3163 			.decrypt = aead_decrypt,
3164 			.ivsize = NULL_IV_SIZE,
3165 			.maxauthsize = SHA256_DIGEST_SIZE,
3166 		},
3167 		.caam = {
3168 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3169 					   OP_ALG_AAI_HMAC_PRECOMP,
3170 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3171 		},
3172 	},
3173 	{
3174 		.aead = {
3175 			.base = {
3176 				.cra_name = "authenc(hmac(sha384),"
3177 					    "ecb(cipher_null))",
3178 				.cra_driver_name = "authenc-hmac-sha384-"
3179 						   "ecb-cipher_null-caam",
3180 				.cra_blocksize = NULL_BLOCK_SIZE,
3181 			},
3182 			.setkey = aead_setkey,
3183 			.setauthsize = aead_setauthsize,
3184 			.encrypt = aead_encrypt,
3185 			.decrypt = aead_decrypt,
3186 			.ivsize = NULL_IV_SIZE,
3187 			.maxauthsize = SHA384_DIGEST_SIZE,
3188 		},
3189 		.caam = {
3190 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3191 					   OP_ALG_AAI_HMAC_PRECOMP,
3192 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3193 		},
3194 	},
3195 	{
3196 		.aead = {
3197 			.base = {
3198 				.cra_name = "authenc(hmac(sha512),"
3199 					    "ecb(cipher_null))",
3200 				.cra_driver_name = "authenc-hmac-sha512-"
3201 						   "ecb-cipher_null-caam",
3202 				.cra_blocksize = NULL_BLOCK_SIZE,
3203 			},
3204 			.setkey = aead_setkey,
3205 			.setauthsize = aead_setauthsize,
3206 			.encrypt = aead_encrypt,
3207 			.decrypt = aead_decrypt,
3208 			.ivsize = NULL_IV_SIZE,
3209 			.maxauthsize = SHA512_DIGEST_SIZE,
3210 		},
3211 		.caam = {
3212 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3213 					   OP_ALG_AAI_HMAC_PRECOMP,
3214 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3215 		},
3216 	},
3217 	{
3218 		.aead = {
3219 			.base = {
3220 				.cra_name = "authenc(hmac(md5),cbc(aes))",
3221 				.cra_driver_name = "authenc-hmac-md5-"
3222 						   "cbc-aes-caam",
3223 				.cra_blocksize = AES_BLOCK_SIZE,
3224 			},
3225 			.setkey = aead_setkey,
3226 			.setauthsize = aead_setauthsize,
3227 			.encrypt = aead_encrypt,
3228 			.decrypt = aead_decrypt,
3229 			.ivsize = AES_BLOCK_SIZE,
3230 			.maxauthsize = MD5_DIGEST_SIZE,
3231 		},
3232 		.caam = {
3233 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3234 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3235 					   OP_ALG_AAI_HMAC_PRECOMP,
3236 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3237 		},
3238 	},
3239 	{
3240 		.aead = {
3241 			.base = {
3242 				.cra_name = "echainiv(authenc(hmac(md5),"
3243 					    "cbc(aes)))",
3244 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3245 						   "cbc-aes-caam",
3246 				.cra_blocksize = AES_BLOCK_SIZE,
3247 			},
3248 			.setkey = aead_setkey,
3249 			.setauthsize = aead_setauthsize,
3250 			.encrypt = aead_encrypt,
3251 			.decrypt = aead_decrypt,
3252 			.ivsize = AES_BLOCK_SIZE,
3253 			.maxauthsize = MD5_DIGEST_SIZE,
3254 		},
3255 		.caam = {
3256 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3257 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3258 					   OP_ALG_AAI_HMAC_PRECOMP,
3259 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3260 			.geniv = true,
3261 		},
3262 	},
3263 	{
3264 		.aead = {
3265 			.base = {
3266 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3267 				.cra_driver_name = "authenc-hmac-sha1-"
3268 						   "cbc-aes-caam",
3269 				.cra_blocksize = AES_BLOCK_SIZE,
3270 			},
3271 			.setkey = aead_setkey,
3272 			.setauthsize = aead_setauthsize,
3273 			.encrypt = aead_encrypt,
3274 			.decrypt = aead_decrypt,
3275 			.ivsize = AES_BLOCK_SIZE,
3276 			.maxauthsize = SHA1_DIGEST_SIZE,
3277 		},
3278 		.caam = {
3279 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3280 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3281 					   OP_ALG_AAI_HMAC_PRECOMP,
3282 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3283 		},
3284 	},
3285 	{
3286 		.aead = {
3287 			.base = {
3288 				.cra_name = "echainiv(authenc(hmac(sha1),"
3289 					    "cbc(aes)))",
3290 				.cra_driver_name = "echainiv-authenc-"
3291 						   "hmac-sha1-cbc-aes-caam",
3292 				.cra_blocksize = AES_BLOCK_SIZE,
3293 			},
3294 			.setkey = aead_setkey,
3295 			.setauthsize = aead_setauthsize,
3296 			.encrypt = aead_encrypt,
3297 			.decrypt = aead_decrypt,
3298 			.ivsize = AES_BLOCK_SIZE,
3299 			.maxauthsize = SHA1_DIGEST_SIZE,
3300 		},
3301 		.caam = {
3302 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3303 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3304 					   OP_ALG_AAI_HMAC_PRECOMP,
3305 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3306 			.geniv = true,
3307 		},
3308 	},
3309 	{
3310 		.aead = {
3311 			.base = {
3312 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3313 				.cra_driver_name = "authenc-hmac-sha224-"
3314 						   "cbc-aes-caam",
3315 				.cra_blocksize = AES_BLOCK_SIZE,
3316 			},
3317 			.setkey = aead_setkey,
3318 			.setauthsize = aead_setauthsize,
3319 			.encrypt = aead_encrypt,
3320 			.decrypt = aead_decrypt,
3321 			.ivsize = AES_BLOCK_SIZE,
3322 			.maxauthsize = SHA224_DIGEST_SIZE,
3323 		},
3324 		.caam = {
3325 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3326 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3327 					   OP_ALG_AAI_HMAC_PRECOMP,
3328 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3329 		},
3330 	},
3331 	{
3332 		.aead = {
3333 			.base = {
3334 				.cra_name = "echainiv(authenc(hmac(sha224),"
3335 					    "cbc(aes)))",
3336 				.cra_driver_name = "echainiv-authenc-"
3337 						   "hmac-sha224-cbc-aes-caam",
3338 				.cra_blocksize = AES_BLOCK_SIZE,
3339 			},
3340 			.setkey = aead_setkey,
3341 			.setauthsize = aead_setauthsize,
3342 			.encrypt = aead_encrypt,
3343 			.decrypt = aead_decrypt,
3344 			.ivsize = AES_BLOCK_SIZE,
3345 			.maxauthsize = SHA224_DIGEST_SIZE,
3346 		},
3347 		.caam = {
3348 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3349 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3350 					   OP_ALG_AAI_HMAC_PRECOMP,
3351 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3352 			.geniv = true,
3353 		},
3354 	},
3355 	{
3356 		.aead = {
3357 			.base = {
3358 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3359 				.cra_driver_name = "authenc-hmac-sha256-"
3360 						   "cbc-aes-caam",
3361 				.cra_blocksize = AES_BLOCK_SIZE,
3362 			},
3363 			.setkey = aead_setkey,
3364 			.setauthsize = aead_setauthsize,
3365 			.encrypt = aead_encrypt,
3366 			.decrypt = aead_decrypt,
3367 			.ivsize = AES_BLOCK_SIZE,
3368 			.maxauthsize = SHA256_DIGEST_SIZE,
3369 		},
3370 		.caam = {
3371 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3372 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3373 					   OP_ALG_AAI_HMAC_PRECOMP,
3374 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3375 		},
3376 	},
3377 	{
3378 		.aead = {
3379 			.base = {
3380 				.cra_name = "echainiv(authenc(hmac(sha256),"
3381 					    "cbc(aes)))",
3382 				.cra_driver_name = "echainiv-authenc-"
3383 						   "hmac-sha256-cbc-aes-caam",
3384 				.cra_blocksize = AES_BLOCK_SIZE,
3385 			},
3386 			.setkey = aead_setkey,
3387 			.setauthsize = aead_setauthsize,
3388 			.encrypt = aead_encrypt,
3389 			.decrypt = aead_decrypt,
3390 			.ivsize = AES_BLOCK_SIZE,
3391 			.maxauthsize = SHA256_DIGEST_SIZE,
3392 		},
3393 		.caam = {
3394 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3395 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3396 					   OP_ALG_AAI_HMAC_PRECOMP,
3397 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3398 			.geniv = true,
3399 		},
3400 	},
3401 	{
3402 		.aead = {
3403 			.base = {
3404 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3405 				.cra_driver_name = "authenc-hmac-sha384-"
3406 						   "cbc-aes-caam",
3407 				.cra_blocksize = AES_BLOCK_SIZE,
3408 			},
3409 			.setkey = aead_setkey,
3410 			.setauthsize = aead_setauthsize,
3411 			.encrypt = aead_encrypt,
3412 			.decrypt = aead_decrypt,
3413 			.ivsize = AES_BLOCK_SIZE,
3414 			.maxauthsize = SHA384_DIGEST_SIZE,
3415 		},
3416 		.caam = {
3417 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3418 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3419 					   OP_ALG_AAI_HMAC_PRECOMP,
3420 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3421 		},
3422 	},
3423 	{
3424 		.aead = {
3425 			.base = {
3426 				.cra_name = "echainiv(authenc(hmac(sha384),"
3427 					    "cbc(aes)))",
3428 				.cra_driver_name = "echainiv-authenc-"
3429 						   "hmac-sha384-cbc-aes-caam",
3430 				.cra_blocksize = AES_BLOCK_SIZE,
3431 			},
3432 			.setkey = aead_setkey,
3433 			.setauthsize = aead_setauthsize,
3434 			.encrypt = aead_encrypt,
3435 			.decrypt = aead_decrypt,
3436 			.ivsize = AES_BLOCK_SIZE,
3437 			.maxauthsize = SHA384_DIGEST_SIZE,
3438 		},
3439 		.caam = {
3440 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3441 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3442 					   OP_ALG_AAI_HMAC_PRECOMP,
3443 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3444 			.geniv = true,
3445 		},
3446 	},
3447 	{
3448 		.aead = {
3449 			.base = {
3450 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
3451 				.cra_driver_name = "authenc-hmac-sha512-"
3452 						   "cbc-aes-caam",
3453 				.cra_blocksize = AES_BLOCK_SIZE,
3454 			},
3455 			.setkey = aead_setkey,
3456 			.setauthsize = aead_setauthsize,
3457 			.encrypt = aead_encrypt,
3458 			.decrypt = aead_decrypt,
3459 			.ivsize = AES_BLOCK_SIZE,
3460 			.maxauthsize = SHA512_DIGEST_SIZE,
3461 		},
3462 		.caam = {
3463 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3464 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3465 					   OP_ALG_AAI_HMAC_PRECOMP,
3466 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3467 		},
3468 	},
3469 	{
3470 		.aead = {
3471 			.base = {
3472 				.cra_name = "echainiv(authenc(hmac(sha512),"
3473 					    "cbc(aes)))",
3474 				.cra_driver_name = "echainiv-authenc-"
3475 						   "hmac-sha512-cbc-aes-caam",
3476 				.cra_blocksize = AES_BLOCK_SIZE,
3477 			},
3478 			.setkey = aead_setkey,
3479 			.setauthsize = aead_setauthsize,
3480 			.encrypt = aead_encrypt,
3481 			.decrypt = aead_decrypt,
3482 			.ivsize = AES_BLOCK_SIZE,
3483 			.maxauthsize = SHA512_DIGEST_SIZE,
3484 		},
3485 		.caam = {
3486 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
3487 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3488 					   OP_ALG_AAI_HMAC_PRECOMP,
3489 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3490 			.geniv = true,
3491 		},
3492 	},
3493 	{
3494 		.aead = {
3495 			.base = {
3496 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3497 				.cra_driver_name = "authenc-hmac-md5-"
3498 						   "cbc-des3_ede-caam",
3499 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3500 			},
3501 			.setkey = aead_setkey,
3502 			.setauthsize = aead_setauthsize,
3503 			.encrypt = aead_encrypt,
3504 			.decrypt = aead_decrypt,
3505 			.ivsize = DES3_EDE_BLOCK_SIZE,
3506 			.maxauthsize = MD5_DIGEST_SIZE,
3507 		},
3508 		.caam = {
3509 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3510 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3511 					   OP_ALG_AAI_HMAC_PRECOMP,
3512 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3513 		}
3514 	},
3515 	{
3516 		.aead = {
3517 			.base = {
3518 				.cra_name = "echainiv(authenc(hmac(md5),"
3519 					    "cbc(des3_ede)))",
3520 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3521 						   "cbc-des3_ede-caam",
3522 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3523 			},
3524 			.setkey = aead_setkey,
3525 			.setauthsize = aead_setauthsize,
3526 			.encrypt = aead_encrypt,
3527 			.decrypt = aead_decrypt,
3528 			.ivsize = DES3_EDE_BLOCK_SIZE,
3529 			.maxauthsize = MD5_DIGEST_SIZE,
3530 		},
3531 		.caam = {
3532 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3533 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3534 					   OP_ALG_AAI_HMAC_PRECOMP,
3535 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3536 			.geniv = true,
3537 		}
3538 	},
3539 	{
3540 		.aead = {
3541 			.base = {
3542 				.cra_name = "authenc(hmac(sha1),"
3543 					    "cbc(des3_ede))",
3544 				.cra_driver_name = "authenc-hmac-sha1-"
3545 						   "cbc-des3_ede-caam",
3546 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3547 			},
3548 			.setkey = aead_setkey,
3549 			.setauthsize = aead_setauthsize,
3550 			.encrypt = aead_encrypt,
3551 			.decrypt = aead_decrypt,
3552 			.ivsize = DES3_EDE_BLOCK_SIZE,
3553 			.maxauthsize = SHA1_DIGEST_SIZE,
3554 		},
3555 		.caam = {
3556 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3557 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3558 					   OP_ALG_AAI_HMAC_PRECOMP,
3559 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3560 		},
3561 	},
3562 	{
3563 		.aead = {
3564 			.base = {
3565 				.cra_name = "echainiv(authenc(hmac(sha1),"
3566 					    "cbc(des3_ede)))",
3567 				.cra_driver_name = "echainiv-authenc-"
3568 						   "hmac-sha1-"
3569 						   "cbc-des3_ede-caam",
3570 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3571 			},
3572 			.setkey = aead_setkey,
3573 			.setauthsize = aead_setauthsize,
3574 			.encrypt = aead_encrypt,
3575 			.decrypt = aead_decrypt,
3576 			.ivsize = DES3_EDE_BLOCK_SIZE,
3577 			.maxauthsize = SHA1_DIGEST_SIZE,
3578 		},
3579 		.caam = {
3580 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3581 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3582 					   OP_ALG_AAI_HMAC_PRECOMP,
3583 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3584 			.geniv = true,
3585 		},
3586 	},
3587 	{
3588 		.aead = {
3589 			.base = {
3590 				.cra_name = "authenc(hmac(sha224),"
3591 					    "cbc(des3_ede))",
3592 				.cra_driver_name = "authenc-hmac-sha224-"
3593 						   "cbc-des3_ede-caam",
3594 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3595 			},
3596 			.setkey = aead_setkey,
3597 			.setauthsize = aead_setauthsize,
3598 			.encrypt = aead_encrypt,
3599 			.decrypt = aead_decrypt,
3600 			.ivsize = DES3_EDE_BLOCK_SIZE,
3601 			.maxauthsize = SHA224_DIGEST_SIZE,
3602 		},
3603 		.caam = {
3604 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3605 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3606 					   OP_ALG_AAI_HMAC_PRECOMP,
3607 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3608 		},
3609 	},
3610 	{
3611 		.aead = {
3612 			.base = {
3613 				.cra_name = "echainiv(authenc(hmac(sha224),"
3614 					    "cbc(des3_ede)))",
3615 				.cra_driver_name = "echainiv-authenc-"
3616 						   "hmac-sha224-"
3617 						   "cbc-des3_ede-caam",
3618 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3619 			},
3620 			.setkey = aead_setkey,
3621 			.setauthsize = aead_setauthsize,
3622 			.encrypt = aead_encrypt,
3623 			.decrypt = aead_decrypt,
3624 			.ivsize = DES3_EDE_BLOCK_SIZE,
3625 			.maxauthsize = SHA224_DIGEST_SIZE,
3626 		},
3627 		.caam = {
3628 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3629 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3630 					   OP_ALG_AAI_HMAC_PRECOMP,
3631 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3632 			.geniv = true,
3633 		},
3634 	},
3635 	{
3636 		.aead = {
3637 			.base = {
3638 				.cra_name = "authenc(hmac(sha256),"
3639 					    "cbc(des3_ede))",
3640 				.cra_driver_name = "authenc-hmac-sha256-"
3641 						   "cbc-des3_ede-caam",
3642 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3643 			},
3644 			.setkey = aead_setkey,
3645 			.setauthsize = aead_setauthsize,
3646 			.encrypt = aead_encrypt,
3647 			.decrypt = aead_decrypt,
3648 			.ivsize = DES3_EDE_BLOCK_SIZE,
3649 			.maxauthsize = SHA256_DIGEST_SIZE,
3650 		},
3651 		.caam = {
3652 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3653 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3654 					   OP_ALG_AAI_HMAC_PRECOMP,
3655 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3656 		},
3657 	},
3658 	{
3659 		.aead = {
3660 			.base = {
3661 				.cra_name = "echainiv(authenc(hmac(sha256),"
3662 					    "cbc(des3_ede)))",
3663 				.cra_driver_name = "echainiv-authenc-"
3664 						   "hmac-sha256-"
3665 						   "cbc-des3_ede-caam",
3666 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3667 			},
3668 			.setkey = aead_setkey,
3669 			.setauthsize = aead_setauthsize,
3670 			.encrypt = aead_encrypt,
3671 			.decrypt = aead_decrypt,
3672 			.ivsize = DES3_EDE_BLOCK_SIZE,
3673 			.maxauthsize = SHA256_DIGEST_SIZE,
3674 		},
3675 		.caam = {
3676 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3677 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3678 					   OP_ALG_AAI_HMAC_PRECOMP,
3679 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3680 			.geniv = true,
3681 		},
3682 	},
3683 	{
3684 		.aead = {
3685 			.base = {
3686 				.cra_name = "authenc(hmac(sha384),"
3687 					    "cbc(des3_ede))",
3688 				.cra_driver_name = "authenc-hmac-sha384-"
3689 						   "cbc-des3_ede-caam",
3690 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3691 			},
3692 			.setkey = aead_setkey,
3693 			.setauthsize = aead_setauthsize,
3694 			.encrypt = aead_encrypt,
3695 			.decrypt = aead_decrypt,
3696 			.ivsize = DES3_EDE_BLOCK_SIZE,
3697 			.maxauthsize = SHA384_DIGEST_SIZE,
3698 		},
3699 		.caam = {
3700 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3701 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3702 					   OP_ALG_AAI_HMAC_PRECOMP,
3703 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3704 		},
3705 	},
3706 	{
3707 		.aead = {
3708 			.base = {
3709 				.cra_name = "echainiv(authenc(hmac(sha384),"
3710 					    "cbc(des3_ede)))",
3711 				.cra_driver_name = "echainiv-authenc-"
3712 						   "hmac-sha384-"
3713 						   "cbc-des3_ede-caam",
3714 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3715 			},
3716 			.setkey = aead_setkey,
3717 			.setauthsize = aead_setauthsize,
3718 			.encrypt = aead_encrypt,
3719 			.decrypt = aead_decrypt,
3720 			.ivsize = DES3_EDE_BLOCK_SIZE,
3721 			.maxauthsize = SHA384_DIGEST_SIZE,
3722 		},
3723 		.caam = {
3724 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3725 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3726 					   OP_ALG_AAI_HMAC_PRECOMP,
3727 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3728 			.geniv = true,
3729 		},
3730 	},
3731 	{
3732 		.aead = {
3733 			.base = {
3734 				.cra_name = "authenc(hmac(sha512),"
3735 					    "cbc(des3_ede))",
3736 				.cra_driver_name = "authenc-hmac-sha512-"
3737 						   "cbc-des3_ede-caam",
3738 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3739 			},
3740 			.setkey = aead_setkey,
3741 			.setauthsize = aead_setauthsize,
3742 			.encrypt = aead_encrypt,
3743 			.decrypt = aead_decrypt,
3744 			.ivsize = DES3_EDE_BLOCK_SIZE,
3745 			.maxauthsize = SHA512_DIGEST_SIZE,
3746 		},
3747 		.caam = {
3748 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3749 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3750 					   OP_ALG_AAI_HMAC_PRECOMP,
3751 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3752 		},
3753 	},
3754 	{
3755 		.aead = {
3756 			.base = {
3757 				.cra_name = "echainiv(authenc(hmac(sha512),"
3758 					    "cbc(des3_ede)))",
3759 				.cra_driver_name = "echainiv-authenc-"
3760 						   "hmac-sha512-"
3761 						   "cbc-des3_ede-caam",
3762 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3763 			},
3764 			.setkey = aead_setkey,
3765 			.setauthsize = aead_setauthsize,
3766 			.encrypt = aead_encrypt,
3767 			.decrypt = aead_decrypt,
3768 			.ivsize = DES3_EDE_BLOCK_SIZE,
3769 			.maxauthsize = SHA512_DIGEST_SIZE,
3770 		},
3771 		.caam = {
3772 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
3773 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3774 					   OP_ALG_AAI_HMAC_PRECOMP,
3775 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
3776 			.geniv = true,
3777 		},
3778 	},
3779 	{
3780 		.aead = {
3781 			.base = {
3782 				.cra_name = "authenc(hmac(md5),cbc(des))",
3783 				.cra_driver_name = "authenc-hmac-md5-"
3784 						   "cbc-des-caam",
3785 				.cra_blocksize = DES_BLOCK_SIZE,
3786 			},
3787 			.setkey = aead_setkey,
3788 			.setauthsize = aead_setauthsize,
3789 			.encrypt = aead_encrypt,
3790 			.decrypt = aead_decrypt,
3791 			.ivsize = DES_BLOCK_SIZE,
3792 			.maxauthsize = MD5_DIGEST_SIZE,
3793 		},
3794 		.caam = {
3795 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3796 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3797 					   OP_ALG_AAI_HMAC_PRECOMP,
3798 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3799 		},
3800 	},
3801 	{
3802 		.aead = {
3803 			.base = {
3804 				.cra_name = "echainiv(authenc(hmac(md5),"
3805 					    "cbc(des)))",
3806 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
3807 						   "cbc-des-caam",
3808 				.cra_blocksize = DES_BLOCK_SIZE,
3809 			},
3810 			.setkey = aead_setkey,
3811 			.setauthsize = aead_setauthsize,
3812 			.encrypt = aead_encrypt,
3813 			.decrypt = aead_decrypt,
3814 			.ivsize = DES_BLOCK_SIZE,
3815 			.maxauthsize = MD5_DIGEST_SIZE,
3816 		},
3817 		.caam = {
3818 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3819 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
3820 					   OP_ALG_AAI_HMAC_PRECOMP,
3821 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
3822 			.geniv = true,
3823 		},
3824 	},
3825 	{
3826 		.aead = {
3827 			.base = {
3828 				.cra_name = "authenc(hmac(sha1),cbc(des))",
3829 				.cra_driver_name = "authenc-hmac-sha1-"
3830 						   "cbc-des-caam",
3831 				.cra_blocksize = DES_BLOCK_SIZE,
3832 			},
3833 			.setkey = aead_setkey,
3834 			.setauthsize = aead_setauthsize,
3835 			.encrypt = aead_encrypt,
3836 			.decrypt = aead_decrypt,
3837 			.ivsize = DES_BLOCK_SIZE,
3838 			.maxauthsize = SHA1_DIGEST_SIZE,
3839 		},
3840 		.caam = {
3841 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3842 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3843 					   OP_ALG_AAI_HMAC_PRECOMP,
3844 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3845 		},
3846 	},
3847 	{
3848 		.aead = {
3849 			.base = {
3850 				.cra_name = "echainiv(authenc(hmac(sha1),"
3851 					    "cbc(des)))",
3852 				.cra_driver_name = "echainiv-authenc-"
3853 						   "hmac-sha1-cbc-des-caam",
3854 				.cra_blocksize = DES_BLOCK_SIZE,
3855 			},
3856 			.setkey = aead_setkey,
3857 			.setauthsize = aead_setauthsize,
3858 			.encrypt = aead_encrypt,
3859 			.decrypt = aead_decrypt,
3860 			.ivsize = DES_BLOCK_SIZE,
3861 			.maxauthsize = SHA1_DIGEST_SIZE,
3862 		},
3863 		.caam = {
3864 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3865 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3866 					   OP_ALG_AAI_HMAC_PRECOMP,
3867 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
3868 			.geniv = true,
3869 		},
3870 	},
3871 	{
3872 		.aead = {
3873 			.base = {
3874 				.cra_name = "authenc(hmac(sha224),cbc(des))",
3875 				.cra_driver_name = "authenc-hmac-sha224-"
3876 						   "cbc-des-caam",
3877 				.cra_blocksize = DES_BLOCK_SIZE,
3878 			},
3879 			.setkey = aead_setkey,
3880 			.setauthsize = aead_setauthsize,
3881 			.encrypt = aead_encrypt,
3882 			.decrypt = aead_decrypt,
3883 			.ivsize = DES_BLOCK_SIZE,
3884 			.maxauthsize = SHA224_DIGEST_SIZE,
3885 		},
3886 		.caam = {
3887 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3888 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3889 					   OP_ALG_AAI_HMAC_PRECOMP,
3890 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3891 		},
3892 	},
3893 	{
3894 		.aead = {
3895 			.base = {
3896 				.cra_name = "echainiv(authenc(hmac(sha224),"
3897 					    "cbc(des)))",
3898 				.cra_driver_name = "echainiv-authenc-"
3899 						   "hmac-sha224-cbc-des-caam",
3900 				.cra_blocksize = DES_BLOCK_SIZE,
3901 			},
3902 			.setkey = aead_setkey,
3903 			.setauthsize = aead_setauthsize,
3904 			.encrypt = aead_encrypt,
3905 			.decrypt = aead_decrypt,
3906 			.ivsize = DES_BLOCK_SIZE,
3907 			.maxauthsize = SHA224_DIGEST_SIZE,
3908 		},
3909 		.caam = {
3910 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3911 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3912 					   OP_ALG_AAI_HMAC_PRECOMP,
3913 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
3914 			.geniv = true,
3915 		},
3916 	},
3917 	{
3918 		.aead = {
3919 			.base = {
3920 				.cra_name = "authenc(hmac(sha256),cbc(des))",
3921 				.cra_driver_name = "authenc-hmac-sha256-"
3922 						   "cbc-des-caam",
3923 				.cra_blocksize = DES_BLOCK_SIZE,
3924 			},
3925 			.setkey = aead_setkey,
3926 			.setauthsize = aead_setauthsize,
3927 			.encrypt = aead_encrypt,
3928 			.decrypt = aead_decrypt,
3929 			.ivsize = DES_BLOCK_SIZE,
3930 			.maxauthsize = SHA256_DIGEST_SIZE,
3931 		},
3932 		.caam = {
3933 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3934 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3935 					   OP_ALG_AAI_HMAC_PRECOMP,
3936 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3937 		},
3938 	},
3939 	{
3940 		.aead = {
3941 			.base = {
3942 				.cra_name = "echainiv(authenc(hmac(sha256),"
3943 					    "cbc(des)))",
3944 				.cra_driver_name = "echainiv-authenc-"
3945 						   "hmac-sha256-cbc-des-caam",
3946 				.cra_blocksize = DES_BLOCK_SIZE,
3947 			},
3948 			.setkey = aead_setkey,
3949 			.setauthsize = aead_setauthsize,
3950 			.encrypt = aead_encrypt,
3951 			.decrypt = aead_decrypt,
3952 			.ivsize = DES_BLOCK_SIZE,
3953 			.maxauthsize = SHA256_DIGEST_SIZE,
3954 		},
3955 		.caam = {
3956 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3957 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3958 					   OP_ALG_AAI_HMAC_PRECOMP,
3959 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
3960 			.geniv = true,
3961 		},
3962 	},
3963 	{
3964 		.aead = {
3965 			.base = {
3966 				.cra_name = "authenc(hmac(sha384),cbc(des))",
3967 				.cra_driver_name = "authenc-hmac-sha384-"
3968 						   "cbc-des-caam",
3969 				.cra_blocksize = DES_BLOCK_SIZE,
3970 			},
3971 			.setkey = aead_setkey,
3972 			.setauthsize = aead_setauthsize,
3973 			.encrypt = aead_encrypt,
3974 			.decrypt = aead_decrypt,
3975 			.ivsize = DES_BLOCK_SIZE,
3976 			.maxauthsize = SHA384_DIGEST_SIZE,
3977 		},
3978 		.caam = {
3979 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3980 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3981 					   OP_ALG_AAI_HMAC_PRECOMP,
3982 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
3983 		},
3984 	},
3985 	{
3986 		.aead = {
3987 			.base = {
3988 				.cra_name = "echainiv(authenc(hmac(sha384),"
3989 					    "cbc(des)))",
3990 				.cra_driver_name = "echainiv-authenc-"
3991 						   "hmac-sha384-cbc-des-caam",
3992 				.cra_blocksize = DES_BLOCK_SIZE,
3993 			},
3994 			.setkey = aead_setkey,
3995 			.setauthsize = aead_setauthsize,
3996 			.encrypt = aead_encrypt,
3997 			.decrypt = aead_decrypt,
3998 			.ivsize = DES_BLOCK_SIZE,
3999 			.maxauthsize = SHA384_DIGEST_SIZE,
4000 		},
4001 		.caam = {
4002 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4003 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4004 					   OP_ALG_AAI_HMAC_PRECOMP,
4005 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4006 			.geniv = true,
4007 		},
4008 	},
4009 	{
4010 		.aead = {
4011 			.base = {
4012 				.cra_name = "authenc(hmac(sha512),cbc(des))",
4013 				.cra_driver_name = "authenc-hmac-sha512-"
4014 						   "cbc-des-caam",
4015 				.cra_blocksize = DES_BLOCK_SIZE,
4016 			},
4017 			.setkey = aead_setkey,
4018 			.setauthsize = aead_setauthsize,
4019 			.encrypt = aead_encrypt,
4020 			.decrypt = aead_decrypt,
4021 			.ivsize = DES_BLOCK_SIZE,
4022 			.maxauthsize = SHA512_DIGEST_SIZE,
4023 		},
4024 		.caam = {
4025 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4026 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4027 					   OP_ALG_AAI_HMAC_PRECOMP,
4028 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4029 		},
4030 	},
4031 	{
4032 		.aead = {
4033 			.base = {
4034 				.cra_name = "echainiv(authenc(hmac(sha512),"
4035 					    "cbc(des)))",
4036 				.cra_driver_name = "echainiv-authenc-"
4037 						   "hmac-sha512-cbc-des-caam",
4038 				.cra_blocksize = DES_BLOCK_SIZE,
4039 			},
4040 			.setkey = aead_setkey,
4041 			.setauthsize = aead_setauthsize,
4042 			.encrypt = aead_encrypt,
4043 			.decrypt = aead_decrypt,
4044 			.ivsize = DES_BLOCK_SIZE,
4045 			.maxauthsize = SHA512_DIGEST_SIZE,
4046 		},
4047 		.caam = {
4048 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
4049 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4050 					   OP_ALG_AAI_HMAC_PRECOMP,
4051 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4052 			.geniv = true,
4053 		},
4054 	},
4055 	{
4056 		.aead = {
4057 			.base = {
4058 				.cra_name = "authenc(hmac(md5),"
4059 					    "rfc3686(ctr(aes)))",
4060 				.cra_driver_name = "authenc-hmac-md5-"
4061 						   "rfc3686-ctr-aes-caam",
4062 				.cra_blocksize = 1,
4063 			},
4064 			.setkey = aead_setkey,
4065 			.setauthsize = aead_setauthsize,
4066 			.encrypt = aead_encrypt,
4067 			.decrypt = aead_decrypt,
4068 			.ivsize = CTR_RFC3686_IV_SIZE,
4069 			.maxauthsize = MD5_DIGEST_SIZE,
4070 		},
4071 		.caam = {
4072 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4073 					   OP_ALG_AAI_CTR_MOD128,
4074 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4075 					   OP_ALG_AAI_HMAC_PRECOMP,
4076 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4077 			.rfc3686 = true,
4078 		},
4079 	},
4080 	{
4081 		.aead = {
4082 			.base = {
4083 				.cra_name = "seqiv(authenc("
4084 					    "hmac(md5),rfc3686(ctr(aes))))",
4085 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
4086 						   "rfc3686-ctr-aes-caam",
4087 				.cra_blocksize = 1,
4088 			},
4089 			.setkey = aead_setkey,
4090 			.setauthsize = aead_setauthsize,
4091 			.encrypt = aead_encrypt,
4092 			.decrypt = aead_decrypt,
4093 			.ivsize = CTR_RFC3686_IV_SIZE,
4094 			.maxauthsize = MD5_DIGEST_SIZE,
4095 		},
4096 		.caam = {
4097 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4098 					   OP_ALG_AAI_CTR_MOD128,
4099 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
4100 					   OP_ALG_AAI_HMAC_PRECOMP,
4101 			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
4102 			.rfc3686 = true,
4103 			.geniv = true,
4104 		},
4105 	},
4106 	{
4107 		.aead = {
4108 			.base = {
4109 				.cra_name = "authenc(hmac(sha1),"
4110 					    "rfc3686(ctr(aes)))",
4111 				.cra_driver_name = "authenc-hmac-sha1-"
4112 						   "rfc3686-ctr-aes-caam",
4113 				.cra_blocksize = 1,
4114 			},
4115 			.setkey = aead_setkey,
4116 			.setauthsize = aead_setauthsize,
4117 			.encrypt = aead_encrypt,
4118 			.decrypt = aead_decrypt,
4119 			.ivsize = CTR_RFC3686_IV_SIZE,
4120 			.maxauthsize = SHA1_DIGEST_SIZE,
4121 		},
4122 		.caam = {
4123 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4124 					   OP_ALG_AAI_CTR_MOD128,
4125 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4126 					   OP_ALG_AAI_HMAC_PRECOMP,
4127 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4128 			.rfc3686 = true,
4129 		},
4130 	},
4131 	{
4132 		.aead = {
4133 			.base = {
4134 				.cra_name = "seqiv(authenc("
4135 					    "hmac(sha1),rfc3686(ctr(aes))))",
4136 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
4137 						   "rfc3686-ctr-aes-caam",
4138 				.cra_blocksize = 1,
4139 			},
4140 			.setkey = aead_setkey,
4141 			.setauthsize = aead_setauthsize,
4142 			.encrypt = aead_encrypt,
4143 			.decrypt = aead_decrypt,
4144 			.ivsize = CTR_RFC3686_IV_SIZE,
4145 			.maxauthsize = SHA1_DIGEST_SIZE,
4146 		},
4147 		.caam = {
4148 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4149 					   OP_ALG_AAI_CTR_MOD128,
4150 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4151 					   OP_ALG_AAI_HMAC_PRECOMP,
4152 			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
4153 			.rfc3686 = true,
4154 			.geniv = true,
4155 		},
4156 	},
4157 	{
4158 		.aead = {
4159 			.base = {
4160 				.cra_name = "authenc(hmac(sha224),"
4161 					    "rfc3686(ctr(aes)))",
4162 				.cra_driver_name = "authenc-hmac-sha224-"
4163 						   "rfc3686-ctr-aes-caam",
4164 				.cra_blocksize = 1,
4165 			},
4166 			.setkey = aead_setkey,
4167 			.setauthsize = aead_setauthsize,
4168 			.encrypt = aead_encrypt,
4169 			.decrypt = aead_decrypt,
4170 			.ivsize = CTR_RFC3686_IV_SIZE,
4171 			.maxauthsize = SHA224_DIGEST_SIZE,
4172 		},
4173 		.caam = {
4174 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4175 					   OP_ALG_AAI_CTR_MOD128,
4176 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4177 					   OP_ALG_AAI_HMAC_PRECOMP,
4178 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4179 			.rfc3686 = true,
4180 		},
4181 	},
4182 	{
4183 		.aead = {
4184 			.base = {
4185 				.cra_name = "seqiv(authenc("
4186 					    "hmac(sha224),rfc3686(ctr(aes))))",
4187 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
4188 						   "rfc3686-ctr-aes-caam",
4189 				.cra_blocksize = 1,
4190 			},
4191 			.setkey = aead_setkey,
4192 			.setauthsize = aead_setauthsize,
4193 			.encrypt = aead_encrypt,
4194 			.decrypt = aead_decrypt,
4195 			.ivsize = CTR_RFC3686_IV_SIZE,
4196 			.maxauthsize = SHA224_DIGEST_SIZE,
4197 		},
4198 		.caam = {
4199 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4200 					   OP_ALG_AAI_CTR_MOD128,
4201 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
4202 					   OP_ALG_AAI_HMAC_PRECOMP,
4203 			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
4204 			.rfc3686 = true,
4205 			.geniv = true,
4206 		},
4207 	},
4208 	{
4209 		.aead = {
4210 			.base = {
4211 				.cra_name = "authenc(hmac(sha256),"
4212 					    "rfc3686(ctr(aes)))",
4213 				.cra_driver_name = "authenc-hmac-sha256-"
4214 						   "rfc3686-ctr-aes-caam",
4215 				.cra_blocksize = 1,
4216 			},
4217 			.setkey = aead_setkey,
4218 			.setauthsize = aead_setauthsize,
4219 			.encrypt = aead_encrypt,
4220 			.decrypt = aead_decrypt,
4221 			.ivsize = CTR_RFC3686_IV_SIZE,
4222 			.maxauthsize = SHA256_DIGEST_SIZE,
4223 		},
4224 		.caam = {
4225 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4226 					   OP_ALG_AAI_CTR_MOD128,
4227 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4228 					   OP_ALG_AAI_HMAC_PRECOMP,
4229 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4230 			.rfc3686 = true,
4231 		},
4232 	},
4233 	{
4234 		.aead = {
4235 			.base = {
4236 				.cra_name = "seqiv(authenc(hmac(sha256),"
4237 					    "rfc3686(ctr(aes))))",
4238 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
4239 						   "rfc3686-ctr-aes-caam",
4240 				.cra_blocksize = 1,
4241 			},
4242 			.setkey = aead_setkey,
4243 			.setauthsize = aead_setauthsize,
4244 			.encrypt = aead_encrypt,
4245 			.decrypt = aead_decrypt,
4246 			.ivsize = CTR_RFC3686_IV_SIZE,
4247 			.maxauthsize = SHA256_DIGEST_SIZE,
4248 		},
4249 		.caam = {
4250 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4251 					   OP_ALG_AAI_CTR_MOD128,
4252 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
4253 					   OP_ALG_AAI_HMAC_PRECOMP,
4254 			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
4255 			.rfc3686 = true,
4256 			.geniv = true,
4257 		},
4258 	},
4259 	{
4260 		.aead = {
4261 			.base = {
4262 				.cra_name = "authenc(hmac(sha384),"
4263 					    "rfc3686(ctr(aes)))",
4264 				.cra_driver_name = "authenc-hmac-sha384-"
4265 						   "rfc3686-ctr-aes-caam",
4266 				.cra_blocksize = 1,
4267 			},
4268 			.setkey = aead_setkey,
4269 			.setauthsize = aead_setauthsize,
4270 			.encrypt = aead_encrypt,
4271 			.decrypt = aead_decrypt,
4272 			.ivsize = CTR_RFC3686_IV_SIZE,
4273 			.maxauthsize = SHA384_DIGEST_SIZE,
4274 		},
4275 		.caam = {
4276 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4277 					   OP_ALG_AAI_CTR_MOD128,
4278 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4279 					   OP_ALG_AAI_HMAC_PRECOMP,
4280 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4281 			.rfc3686 = true,
4282 		},
4283 	},
4284 	{
4285 		.aead = {
4286 			.base = {
4287 				.cra_name = "seqiv(authenc(hmac(sha384),"
4288 					    "rfc3686(ctr(aes))))",
4289 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
4290 						   "rfc3686-ctr-aes-caam",
4291 				.cra_blocksize = 1,
4292 			},
4293 			.setkey = aead_setkey,
4294 			.setauthsize = aead_setauthsize,
4295 			.encrypt = aead_encrypt,
4296 			.decrypt = aead_decrypt,
4297 			.ivsize = CTR_RFC3686_IV_SIZE,
4298 			.maxauthsize = SHA384_DIGEST_SIZE,
4299 		},
4300 		.caam = {
4301 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4302 					   OP_ALG_AAI_CTR_MOD128,
4303 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
4304 					   OP_ALG_AAI_HMAC_PRECOMP,
4305 			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
4306 			.rfc3686 = true,
4307 			.geniv = true,
4308 		},
4309 	},
4310 	{
4311 		.aead = {
4312 			.base = {
4313 				.cra_name = "authenc(hmac(sha512),"
4314 					    "rfc3686(ctr(aes)))",
4315 				.cra_driver_name = "authenc-hmac-sha512-"
4316 						   "rfc3686-ctr-aes-caam",
4317 				.cra_blocksize = 1,
4318 			},
4319 			.setkey = aead_setkey,
4320 			.setauthsize = aead_setauthsize,
4321 			.encrypt = aead_encrypt,
4322 			.decrypt = aead_decrypt,
4323 			.ivsize = CTR_RFC3686_IV_SIZE,
4324 			.maxauthsize = SHA512_DIGEST_SIZE,
4325 		},
4326 		.caam = {
4327 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4328 					   OP_ALG_AAI_CTR_MOD128,
4329 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4330 					   OP_ALG_AAI_HMAC_PRECOMP,
4331 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4332 			.rfc3686 = true,
4333 		},
4334 	},
4335 	{
4336 		.aead = {
4337 			.base = {
4338 				.cra_name = "seqiv(authenc(hmac(sha512),"
4339 					    "rfc3686(ctr(aes))))",
4340 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
4341 						   "rfc3686-ctr-aes-caam",
4342 				.cra_blocksize = 1,
4343 			},
4344 			.setkey = aead_setkey,
4345 			.setauthsize = aead_setauthsize,
4346 			.encrypt = aead_encrypt,
4347 			.decrypt = aead_decrypt,
4348 			.ivsize = CTR_RFC3686_IV_SIZE,
4349 			.maxauthsize = SHA512_DIGEST_SIZE,
4350 		},
4351 		.caam = {
4352 			.class1_alg_type = OP_ALG_ALGSEL_AES |
4353 					   OP_ALG_AAI_CTR_MOD128,
4354 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
4355 					   OP_ALG_AAI_HMAC_PRECOMP,
4356 			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
4357 			.rfc3686 = true,
4358 			.geniv = true,
4359 		},
4360 	},
4361 };
4362 
4363 struct caam_crypto_alg {
4364 	struct crypto_alg crypto_alg;
4365 	struct list_head entry;
4366 	struct caam_alg_entry caam;
4367 };
4368 
4369 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4370 {
4371 	ctx->jrdev = caam_jr_alloc();
4372 	if (IS_ERR(ctx->jrdev)) {
4373 		pr_err("Job Ring Device allocation for transform failed\n");
4374 		return PTR_ERR(ctx->jrdev);
4375 	}
4376 
4377 	/* copy descriptor header template value */
4378 	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4379 	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4380 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
4381 
4382 	return 0;
4383 }
4384 
4385 static int caam_cra_init(struct crypto_tfm *tfm)
4386 {
4387 	struct crypto_alg *alg = tfm->__crt_alg;
4388 	struct caam_crypto_alg *caam_alg =
4389 		 container_of(alg, struct caam_crypto_alg, crypto_alg);
4390 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4391 
4392 	return caam_init_common(ctx, &caam_alg->caam);
4393 }
4394 
4395 static int caam_aead_init(struct crypto_aead *tfm)
4396 {
4397 	struct aead_alg *alg = crypto_aead_alg(tfm);
4398 	struct caam_aead_alg *caam_alg =
4399 		 container_of(alg, struct caam_aead_alg, aead);
4400 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4401 
4402 	return caam_init_common(ctx, &caam_alg->caam);
4403 }
4404 
4405 static void caam_exit_common(struct caam_ctx *ctx)
4406 {
4407 	if (ctx->sh_desc_enc_dma &&
4408 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
4409 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
4410 				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
4411 	if (ctx->sh_desc_dec_dma &&
4412 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
4413 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
4414 				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
4415 	if (ctx->sh_desc_givenc_dma &&
4416 	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
4417 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
4418 				 desc_bytes(ctx->sh_desc_givenc),
4419 				 DMA_TO_DEVICE);
4420 	if (ctx->key_dma &&
4421 	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
4422 		dma_unmap_single(ctx->jrdev, ctx->key_dma,
4423 				 ctx->enckeylen + ctx->split_key_pad_len,
4424 				 DMA_TO_DEVICE);
4425 
4426 	caam_jr_free(ctx->jrdev);
4427 }
4428 
4429 static void caam_cra_exit(struct crypto_tfm *tfm)
4430 {
4431 	caam_exit_common(crypto_tfm_ctx(tfm));
4432 }
4433 
4434 static void caam_aead_exit(struct crypto_aead *tfm)
4435 {
4436 	caam_exit_common(crypto_aead_ctx(tfm));
4437 }
4438 
4439 static void __exit caam_algapi_exit(void)
4440 {
4441 
4442 	struct caam_crypto_alg *t_alg, *n;
4443 	int i;
4444 
4445 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4446 		struct caam_aead_alg *t_alg = driver_aeads + i;
4447 
4448 		if (t_alg->registered)
4449 			crypto_unregister_aead(&t_alg->aead);
4450 	}
4451 
4452 	if (!alg_list.next)
4453 		return;
4454 
4455 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
4456 		crypto_unregister_alg(&t_alg->crypto_alg);
4457 		list_del(&t_alg->entry);
4458 		kfree(t_alg);
4459 	}
4460 }
4461 
4462 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
4463 					      *template)
4464 {
4465 	struct caam_crypto_alg *t_alg;
4466 	struct crypto_alg *alg;
4467 
4468 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4469 	if (!t_alg) {
4470 		pr_err("failed to allocate t_alg\n");
4471 		return ERR_PTR(-ENOMEM);
4472 	}
4473 
4474 	alg = &t_alg->crypto_alg;
4475 
4476 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
4477 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4478 		 template->driver_name);
4479 	alg->cra_module = THIS_MODULE;
4480 	alg->cra_init = caam_cra_init;
4481 	alg->cra_exit = caam_cra_exit;
4482 	alg->cra_priority = CAAM_CRA_PRIORITY;
4483 	alg->cra_blocksize = template->blocksize;
4484 	alg->cra_alignmask = 0;
4485 	alg->cra_ctxsize = sizeof(struct caam_ctx);
4486 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
4487 			 template->type;
4488 	switch (template->type) {
4489 	case CRYPTO_ALG_TYPE_GIVCIPHER:
4490 		alg->cra_type = &crypto_givcipher_type;
4491 		alg->cra_ablkcipher = template->template_ablkcipher;
4492 		break;
4493 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
4494 		alg->cra_type = &crypto_ablkcipher_type;
4495 		alg->cra_ablkcipher = template->template_ablkcipher;
4496 		break;
4497 	}
4498 
4499 	t_alg->caam.class1_alg_type = template->class1_alg_type;
4500 	t_alg->caam.class2_alg_type = template->class2_alg_type;
4501 	t_alg->caam.alg_op = template->alg_op;
4502 
4503 	return t_alg;
4504 }
4505 
4506 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
4507 {
4508 	struct aead_alg *alg = &t_alg->aead;
4509 
4510 	alg->base.cra_module = THIS_MODULE;
4511 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
4512 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
4513 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
4514 
4515 	alg->init = caam_aead_init;
4516 	alg->exit = caam_aead_exit;
4517 }
4518 
4519 static int __init caam_algapi_init(void)
4520 {
4521 	struct device_node *dev_node;
4522 	struct platform_device *pdev;
4523 	struct device *ctrldev;
4524 	struct caam_drv_private *priv;
4525 	int i = 0, err = 0;
4526 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4527 	unsigned int md_limit = SHA512_DIGEST_SIZE;
4528 	bool registered = false;
4529 
4530 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4531 	if (!dev_node) {
4532 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4533 		if (!dev_node)
4534 			return -ENODEV;
4535 	}
4536 
4537 	pdev = of_find_device_by_node(dev_node);
4538 	if (!pdev) {
4539 		of_node_put(dev_node);
4540 		return -ENODEV;
4541 	}
4542 
4543 	ctrldev = &pdev->dev;
4544 	priv = dev_get_drvdata(ctrldev);
4545 	of_node_put(dev_node);
4546 
4547 	/*
4548 	 * If priv is NULL, it's probably because the caam driver wasn't
4549 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4550 	 */
4551 	if (!priv)
4552 		return -ENODEV;
4553 
4554 
4555 	INIT_LIST_HEAD(&alg_list);
4556 
4557 	/*
4558 	 * Register crypto algorithms the device supports.
4559 	 * First, detect presence and attributes of DES, AES, and MD blocks.
4560 	 */
4561 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4562 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4563 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4564 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4565 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4566 
4567 	/* If MD is present, limit digest size based on LP256 */
4568 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4569 		md_limit = SHA256_DIGEST_SIZE;
4570 
4571 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4572 		struct caam_crypto_alg *t_alg;
4573 		struct caam_alg_template *alg = driver_algs + i;
4574 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
4575 
4576 		/* Skip DES algorithms if not supported by device */
4577 		if (!des_inst &&
4578 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
4579 		     (alg_sel == OP_ALG_ALGSEL_DES)))
4580 				continue;
4581 
4582 		/* Skip AES algorithms if not supported by device */
4583 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4584 				continue;
4585 
4586 		t_alg = caam_alg_alloc(alg);
4587 		if (IS_ERR(t_alg)) {
4588 			err = PTR_ERR(t_alg);
4589 			pr_warn("%s alg allocation failed\n", alg->driver_name);
4590 			continue;
4591 		}
4592 
4593 		err = crypto_register_alg(&t_alg->crypto_alg);
4594 		if (err) {
4595 			pr_warn("%s alg registration failed\n",
4596 				t_alg->crypto_alg.cra_driver_name);
4597 			kfree(t_alg);
4598 			continue;
4599 		}
4600 
4601 		list_add_tail(&t_alg->entry, &alg_list);
4602 		registered = true;
4603 	}
4604 
4605 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4606 		struct caam_aead_alg *t_alg = driver_aeads + i;
4607 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4608 				 OP_ALG_ALGSEL_MASK;
4609 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4610 				 OP_ALG_ALGSEL_MASK;
4611 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
4612 
4613 		/* Skip DES algorithms if not supported by device */
4614 		if (!des_inst &&
4615 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
4616 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
4617 				continue;
4618 
4619 		/* Skip AES algorithms if not supported by device */
4620 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
4621 				continue;
4622 
4623 		/*
4624 		 * Check support for AES algorithms not available
4625 		 * on LP devices.
4626 		 */
4627 		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4628 			if (alg_aai == OP_ALG_AAI_GCM)
4629 				continue;
4630 
4631 		/*
4632 		 * Skip algorithms requiring message digests
4633 		 * if MD or MD size is not supported by device.
4634 		 */
4635 		if (c2_alg_sel &&
4636 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
4637 				continue;
4638 
4639 		caam_aead_alg_init(t_alg);
4640 
4641 		err = crypto_register_aead(&t_alg->aead);
4642 		if (err) {
4643 			pr_warn("%s alg registration failed\n",
4644 				t_alg->aead.base.cra_driver_name);
4645 			continue;
4646 		}
4647 
4648 		t_alg->registered = true;
4649 		registered = true;
4650 	}
4651 
4652 	if (registered)
4653 		pr_info("caam algorithms registered in /proc/crypto\n");
4654 
4655 	return err;
4656 }
4657 
4658 module_init(caam_algapi_init);
4659 module_exit(caam_algapi_exit);
4660 
4661 MODULE_LICENSE("GPL");
4662 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4663 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
4664