xref: /linux/drivers/crypto/caam/caamalg_qi.c (revision af50e4ba34f4c45e92535364133d4deb5931c1c5)
1 /*
2  * Freescale FSL CAAM support for crypto API over QI backend.
3  * Based on caamalg.c
4  *
5  * Copyright 2013-2016 Freescale Semiconductor, Inc.
6  * Copyright 2016-2017 NXP
7  */
8 
9 #include "compat.h"
10 #include "ctrl.h"
11 #include "regs.h"
12 #include "intern.h"
13 #include "desc_constr.h"
14 #include "error.h"
15 #include "sg_sw_qm.h"
16 #include "key_gen.h"
17 #include "qi.h"
18 #include "jr.h"
19 #include "caamalg_desc.h"
20 
21 /*
22  * crypto alg
23  */
24 #define CAAM_CRA_PRIORITY		2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
27 					 SHA512_DIGEST_SIZE * 2)
28 
29 #define DESC_MAX_USED_BYTES		(DESC_QI_AEAD_GIVENC_LEN + \
30 					 CAAM_MAX_KEY_SIZE)
31 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
32 
33 struct caam_alg_entry {
34 	int class1_alg_type;
35 	int class2_alg_type;
36 	bool rfc3686;
37 	bool geniv;
38 };
39 
40 struct caam_aead_alg {
41 	struct aead_alg aead;
42 	struct caam_alg_entry caam;
43 	bool registered;
44 };
45 
46 /*
47  * per-session context
48  */
49 struct caam_ctx {
50 	struct device *jrdev;
51 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 	u8 key[CAAM_MAX_KEY_SIZE];
55 	dma_addr_t key_dma;
56 	enum dma_data_direction dir;
57 	struct alginfo adata;
58 	struct alginfo cdata;
59 	unsigned int authsize;
60 	struct device *qidev;
61 	spinlock_t lock;	/* Protects multiple init of driver context */
62 	struct caam_drv_ctx *drv_ctx[NUM_OP];
63 };
64 
65 static int aead_set_sh_desc(struct crypto_aead *aead)
66 {
67 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 						 typeof(*alg), aead);
69 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 	unsigned int ivsize = crypto_aead_ivsize(aead);
71 	u32 ctx1_iv_off = 0;
72 	u32 *nonce = NULL;
73 	unsigned int data_len[2];
74 	u32 inl_mask;
75 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 			       OP_ALG_AAI_CTR_MOD128);
77 	const bool is_rfc3686 = alg->caam.rfc3686;
78 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
79 
80 	if (!ctx->cdata.keylen || !ctx->authsize)
81 		return 0;
82 
83 	/*
84 	 * AES-CTR needs to load IV in CONTEXT1 reg
85 	 * at an offset of 128bits (16bytes)
86 	 * CONTEXT1[255:128] = IV
87 	 */
88 	if (ctr_mode)
89 		ctx1_iv_off = 16;
90 
91 	/*
92 	 * RFC3686 specific:
93 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
94 	 */
95 	if (is_rfc3686) {
96 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
97 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
98 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
99 	}
100 
101 	data_len[0] = ctx->adata.keylen_pad;
102 	data_len[1] = ctx->cdata.keylen;
103 
104 	if (alg->caam.geniv)
105 		goto skip_enc;
106 
107 	/* aead_encrypt shared descriptor */
108 	if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
109 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
110 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
111 			      ARRAY_SIZE(data_len)) < 0)
112 		return -EINVAL;
113 
114 	if (inl_mask & 1)
115 		ctx->adata.key_virt = ctx->key;
116 	else
117 		ctx->adata.key_dma = ctx->key_dma;
118 
119 	if (inl_mask & 2)
120 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
121 	else
122 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
123 
124 	ctx->adata.key_inline = !!(inl_mask & 1);
125 	ctx->cdata.key_inline = !!(inl_mask & 2);
126 
127 	cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
128 			       ivsize, ctx->authsize, is_rfc3686, nonce,
129 			       ctx1_iv_off, true, ctrlpriv->era);
130 
131 skip_enc:
132 	/* aead_decrypt shared descriptor */
133 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
134 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
135 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
136 			      ARRAY_SIZE(data_len)) < 0)
137 		return -EINVAL;
138 
139 	if (inl_mask & 1)
140 		ctx->adata.key_virt = ctx->key;
141 	else
142 		ctx->adata.key_dma = ctx->key_dma;
143 
144 	if (inl_mask & 2)
145 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
146 	else
147 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
148 
149 	ctx->adata.key_inline = !!(inl_mask & 1);
150 	ctx->cdata.key_inline = !!(inl_mask & 2);
151 
152 	cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
153 			       ivsize, ctx->authsize, alg->caam.geniv,
154 			       is_rfc3686, nonce, ctx1_iv_off, true,
155 			       ctrlpriv->era);
156 
157 	if (!alg->caam.geniv)
158 		goto skip_givenc;
159 
160 	/* aead_givencrypt shared descriptor */
161 	if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
162 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
163 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
164 			      ARRAY_SIZE(data_len)) < 0)
165 		return -EINVAL;
166 
167 	if (inl_mask & 1)
168 		ctx->adata.key_virt = ctx->key;
169 	else
170 		ctx->adata.key_dma = ctx->key_dma;
171 
172 	if (inl_mask & 2)
173 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
174 	else
175 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
176 
177 	ctx->adata.key_inline = !!(inl_mask & 1);
178 	ctx->cdata.key_inline = !!(inl_mask & 2);
179 
180 	cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
181 				  ivsize, ctx->authsize, is_rfc3686, nonce,
182 				  ctx1_iv_off, true, ctrlpriv->era);
183 
184 skip_givenc:
185 	return 0;
186 }
187 
188 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
189 {
190 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
191 
192 	ctx->authsize = authsize;
193 	aead_set_sh_desc(authenc);
194 
195 	return 0;
196 }
197 
198 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
199 		       unsigned int keylen)
200 {
201 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
202 	struct device *jrdev = ctx->jrdev;
203 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
204 	struct crypto_authenc_keys keys;
205 	int ret = 0;
206 
207 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
208 		goto badkey;
209 
210 #ifdef DEBUG
211 	dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
212 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
213 		keys.authkeylen);
214 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
215 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
216 #endif
217 
218 	/*
219 	 * If DKP is supported, use it in the shared descriptor to generate
220 	 * the split key.
221 	 */
222 	if (ctrlpriv->era >= 6) {
223 		ctx->adata.keylen = keys.authkeylen;
224 		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
225 						      OP_ALG_ALGSEL_MASK);
226 
227 		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
228 			goto badkey;
229 
230 		memcpy(ctx->key, keys.authkey, keys.authkeylen);
231 		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
232 		       keys.enckeylen);
233 		dma_sync_single_for_device(jrdev, ctx->key_dma,
234 					   ctx->adata.keylen_pad +
235 					   keys.enckeylen, ctx->dir);
236 		goto skip_split_key;
237 	}
238 
239 	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
240 			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
241 			    keys.enckeylen);
242 	if (ret)
243 		goto badkey;
244 
245 	/* postpend encryption key to auth split key */
246 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
247 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
248 				   keys.enckeylen, ctx->dir);
249 #ifdef DEBUG
250 	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
251 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
252 		       ctx->adata.keylen_pad + keys.enckeylen, 1);
253 #endif
254 
255 skip_split_key:
256 	ctx->cdata.keylen = keys.enckeylen;
257 
258 	ret = aead_set_sh_desc(aead);
259 	if (ret)
260 		goto badkey;
261 
262 	/* Now update the driver contexts with the new shared descriptor */
263 	if (ctx->drv_ctx[ENCRYPT]) {
264 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
265 					  ctx->sh_desc_enc);
266 		if (ret) {
267 			dev_err(jrdev, "driver enc context update failed\n");
268 			goto badkey;
269 		}
270 	}
271 
272 	if (ctx->drv_ctx[DECRYPT]) {
273 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
274 					  ctx->sh_desc_dec);
275 		if (ret) {
276 			dev_err(jrdev, "driver dec context update failed\n");
277 			goto badkey;
278 		}
279 	}
280 
281 	memzero_explicit(&keys, sizeof(keys));
282 	return ret;
283 badkey:
284 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
285 	memzero_explicit(&keys, sizeof(keys));
286 	return -EINVAL;
287 }
288 
289 static int gcm_set_sh_desc(struct crypto_aead *aead)
290 {
291 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
292 	unsigned int ivsize = crypto_aead_ivsize(aead);
293 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
294 			ctx->cdata.keylen;
295 
296 	if (!ctx->cdata.keylen || !ctx->authsize)
297 		return 0;
298 
299 	/*
300 	 * Job Descriptor and Shared Descriptor
301 	 * must fit into the 64-word Descriptor h/w Buffer
302 	 */
303 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
304 		ctx->cdata.key_inline = true;
305 		ctx->cdata.key_virt = ctx->key;
306 	} else {
307 		ctx->cdata.key_inline = false;
308 		ctx->cdata.key_dma = ctx->key_dma;
309 	}
310 
311 	cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
312 			      ctx->authsize, true);
313 
314 	/*
315 	 * Job Descriptor and Shared Descriptor
316 	 * must fit into the 64-word Descriptor h/w Buffer
317 	 */
318 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
319 		ctx->cdata.key_inline = true;
320 		ctx->cdata.key_virt = ctx->key;
321 	} else {
322 		ctx->cdata.key_inline = false;
323 		ctx->cdata.key_dma = ctx->key_dma;
324 	}
325 
326 	cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
327 			      ctx->authsize, true);
328 
329 	return 0;
330 }
331 
332 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
333 {
334 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
335 
336 	ctx->authsize = authsize;
337 	gcm_set_sh_desc(authenc);
338 
339 	return 0;
340 }
341 
342 static int gcm_setkey(struct crypto_aead *aead,
343 		      const u8 *key, unsigned int keylen)
344 {
345 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
346 	struct device *jrdev = ctx->jrdev;
347 	int ret;
348 
349 #ifdef DEBUG
350 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
351 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
352 #endif
353 
354 	memcpy(ctx->key, key, keylen);
355 	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
356 	ctx->cdata.keylen = keylen;
357 
358 	ret = gcm_set_sh_desc(aead);
359 	if (ret)
360 		return ret;
361 
362 	/* Now update the driver contexts with the new shared descriptor */
363 	if (ctx->drv_ctx[ENCRYPT]) {
364 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
365 					  ctx->sh_desc_enc);
366 		if (ret) {
367 			dev_err(jrdev, "driver enc context update failed\n");
368 			return ret;
369 		}
370 	}
371 
372 	if (ctx->drv_ctx[DECRYPT]) {
373 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
374 					  ctx->sh_desc_dec);
375 		if (ret) {
376 			dev_err(jrdev, "driver dec context update failed\n");
377 			return ret;
378 		}
379 	}
380 
381 	return 0;
382 }
383 
384 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
385 {
386 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
387 	unsigned int ivsize = crypto_aead_ivsize(aead);
388 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
389 			ctx->cdata.keylen;
390 
391 	if (!ctx->cdata.keylen || !ctx->authsize)
392 		return 0;
393 
394 	ctx->cdata.key_virt = ctx->key;
395 
396 	/*
397 	 * Job Descriptor and Shared Descriptor
398 	 * must fit into the 64-word Descriptor h/w Buffer
399 	 */
400 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
401 		ctx->cdata.key_inline = true;
402 	} else {
403 		ctx->cdata.key_inline = false;
404 		ctx->cdata.key_dma = ctx->key_dma;
405 	}
406 
407 	cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
408 				  ctx->authsize, true);
409 
410 	/*
411 	 * Job Descriptor and Shared Descriptor
412 	 * must fit into the 64-word Descriptor h/w Buffer
413 	 */
414 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
415 		ctx->cdata.key_inline = true;
416 	} else {
417 		ctx->cdata.key_inline = false;
418 		ctx->cdata.key_dma = ctx->key_dma;
419 	}
420 
421 	cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
422 				  ctx->authsize, true);
423 
424 	return 0;
425 }
426 
427 static int rfc4106_setauthsize(struct crypto_aead *authenc,
428 			       unsigned int authsize)
429 {
430 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
431 
432 	ctx->authsize = authsize;
433 	rfc4106_set_sh_desc(authenc);
434 
435 	return 0;
436 }
437 
438 static int rfc4106_setkey(struct crypto_aead *aead,
439 			  const u8 *key, unsigned int keylen)
440 {
441 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
442 	struct device *jrdev = ctx->jrdev;
443 	int ret;
444 
445 	if (keylen < 4)
446 		return -EINVAL;
447 
448 #ifdef DEBUG
449 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
450 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
451 #endif
452 
453 	memcpy(ctx->key, key, keylen);
454 	/*
455 	 * The last four bytes of the key material are used as the salt value
456 	 * in the nonce. Update the AES key length.
457 	 */
458 	ctx->cdata.keylen = keylen - 4;
459 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
460 				   ctx->dir);
461 
462 	ret = rfc4106_set_sh_desc(aead);
463 	if (ret)
464 		return ret;
465 
466 	/* Now update the driver contexts with the new shared descriptor */
467 	if (ctx->drv_ctx[ENCRYPT]) {
468 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
469 					  ctx->sh_desc_enc);
470 		if (ret) {
471 			dev_err(jrdev, "driver enc context update failed\n");
472 			return ret;
473 		}
474 	}
475 
476 	if (ctx->drv_ctx[DECRYPT]) {
477 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
478 					  ctx->sh_desc_dec);
479 		if (ret) {
480 			dev_err(jrdev, "driver dec context update failed\n");
481 			return ret;
482 		}
483 	}
484 
485 	return 0;
486 }
487 
488 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
489 {
490 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
491 	unsigned int ivsize = crypto_aead_ivsize(aead);
492 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
493 			ctx->cdata.keylen;
494 
495 	if (!ctx->cdata.keylen || !ctx->authsize)
496 		return 0;
497 
498 	ctx->cdata.key_virt = ctx->key;
499 
500 	/*
501 	 * Job Descriptor and Shared Descriptor
502 	 * must fit into the 64-word Descriptor h/w Buffer
503 	 */
504 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
505 		ctx->cdata.key_inline = true;
506 	} else {
507 		ctx->cdata.key_inline = false;
508 		ctx->cdata.key_dma = ctx->key_dma;
509 	}
510 
511 	cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
512 				  ctx->authsize, true);
513 
514 	/*
515 	 * Job Descriptor and Shared Descriptor
516 	 * must fit into the 64-word Descriptor h/w Buffer
517 	 */
518 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
519 		ctx->cdata.key_inline = true;
520 	} else {
521 		ctx->cdata.key_inline = false;
522 		ctx->cdata.key_dma = ctx->key_dma;
523 	}
524 
525 	cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
526 				  ctx->authsize, true);
527 
528 	return 0;
529 }
530 
531 static int rfc4543_setauthsize(struct crypto_aead *authenc,
532 			       unsigned int authsize)
533 {
534 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
535 
536 	ctx->authsize = authsize;
537 	rfc4543_set_sh_desc(authenc);
538 
539 	return 0;
540 }
541 
542 static int rfc4543_setkey(struct crypto_aead *aead,
543 			  const u8 *key, unsigned int keylen)
544 {
545 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
546 	struct device *jrdev = ctx->jrdev;
547 	int ret;
548 
549 	if (keylen < 4)
550 		return -EINVAL;
551 
552 #ifdef DEBUG
553 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
554 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
555 #endif
556 
557 	memcpy(ctx->key, key, keylen);
558 	/*
559 	 * The last four bytes of the key material are used as the salt value
560 	 * in the nonce. Update the AES key length.
561 	 */
562 	ctx->cdata.keylen = keylen - 4;
563 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
564 				   ctx->dir);
565 
566 	ret = rfc4543_set_sh_desc(aead);
567 	if (ret)
568 		return ret;
569 
570 	/* Now update the driver contexts with the new shared descriptor */
571 	if (ctx->drv_ctx[ENCRYPT]) {
572 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
573 					  ctx->sh_desc_enc);
574 		if (ret) {
575 			dev_err(jrdev, "driver enc context update failed\n");
576 			return ret;
577 		}
578 	}
579 
580 	if (ctx->drv_ctx[DECRYPT]) {
581 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
582 					  ctx->sh_desc_dec);
583 		if (ret) {
584 			dev_err(jrdev, "driver dec context update failed\n");
585 			return ret;
586 		}
587 	}
588 
589 	return 0;
590 }
591 
592 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
593 			     const u8 *key, unsigned int keylen)
594 {
595 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
596 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
597 	const char *alg_name = crypto_tfm_alg_name(tfm);
598 	struct device *jrdev = ctx->jrdev;
599 	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
600 	u32 ctx1_iv_off = 0;
601 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
602 			       OP_ALG_AAI_CTR_MOD128);
603 	const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
604 	int ret = 0;
605 
606 #ifdef DEBUG
607 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
608 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
609 #endif
610 	/*
611 	 * AES-CTR needs to load IV in CONTEXT1 reg
612 	 * at an offset of 128bits (16bytes)
613 	 * CONTEXT1[255:128] = IV
614 	 */
615 	if (ctr_mode)
616 		ctx1_iv_off = 16;
617 
618 	/*
619 	 * RFC3686 specific:
620 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
621 	 *	| *key = {KEY, NONCE}
622 	 */
623 	if (is_rfc3686) {
624 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
625 		keylen -= CTR_RFC3686_NONCE_SIZE;
626 	}
627 
628 	ctx->cdata.keylen = keylen;
629 	ctx->cdata.key_virt = key;
630 	ctx->cdata.key_inline = true;
631 
632 	/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
633 	cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
634 				     is_rfc3686, ctx1_iv_off);
635 	cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
636 				     is_rfc3686, ctx1_iv_off);
637 	cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
638 					ivsize, is_rfc3686, ctx1_iv_off);
639 
640 	/* Now update the driver contexts with the new shared descriptor */
641 	if (ctx->drv_ctx[ENCRYPT]) {
642 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
643 					  ctx->sh_desc_enc);
644 		if (ret) {
645 			dev_err(jrdev, "driver enc context update failed\n");
646 			goto badkey;
647 		}
648 	}
649 
650 	if (ctx->drv_ctx[DECRYPT]) {
651 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
652 					  ctx->sh_desc_dec);
653 		if (ret) {
654 			dev_err(jrdev, "driver dec context update failed\n");
655 			goto badkey;
656 		}
657 	}
658 
659 	if (ctx->drv_ctx[GIVENCRYPT]) {
660 		ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
661 					  ctx->sh_desc_givenc);
662 		if (ret) {
663 			dev_err(jrdev, "driver givenc context update failed\n");
664 			goto badkey;
665 		}
666 	}
667 
668 	return ret;
669 badkey:
670 	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
671 	return -EINVAL;
672 }
673 
674 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
675 				 const u8 *key, unsigned int keylen)
676 {
677 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
678 	struct device *jrdev = ctx->jrdev;
679 	int ret = 0;
680 
681 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
682 		crypto_ablkcipher_set_flags(ablkcipher,
683 					    CRYPTO_TFM_RES_BAD_KEY_LEN);
684 		dev_err(jrdev, "key size mismatch\n");
685 		return -EINVAL;
686 	}
687 
688 	ctx->cdata.keylen = keylen;
689 	ctx->cdata.key_virt = key;
690 	ctx->cdata.key_inline = true;
691 
692 	/* xts ablkcipher encrypt, decrypt shared descriptors */
693 	cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
694 	cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
695 
696 	/* Now update the driver contexts with the new shared descriptor */
697 	if (ctx->drv_ctx[ENCRYPT]) {
698 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
699 					  ctx->sh_desc_enc);
700 		if (ret) {
701 			dev_err(jrdev, "driver enc context update failed\n");
702 			goto badkey;
703 		}
704 	}
705 
706 	if (ctx->drv_ctx[DECRYPT]) {
707 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
708 					  ctx->sh_desc_dec);
709 		if (ret) {
710 			dev_err(jrdev, "driver dec context update failed\n");
711 			goto badkey;
712 		}
713 	}
714 
715 	return ret;
716 badkey:
717 	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
718 	return 0;
719 }
720 
721 /*
722  * aead_edesc - s/w-extended aead descriptor
723  * @src_nents: number of segments in input scatterlist
724  * @dst_nents: number of segments in output scatterlist
725  * @iv_dma: dma address of iv for checking continuity and link table
726  * @qm_sg_bytes: length of dma mapped h/w link table
727  * @qm_sg_dma: bus physical mapped address of h/w link table
728  * @assoclen: associated data length, in CAAM endianness
729  * @assoclen_dma: bus physical mapped address of req->assoclen
730  * @drv_req: driver-specific request structure
731  * @sgt: the h/w link table
732  */
733 struct aead_edesc {
734 	int src_nents;
735 	int dst_nents;
736 	dma_addr_t iv_dma;
737 	int qm_sg_bytes;
738 	dma_addr_t qm_sg_dma;
739 	unsigned int assoclen;
740 	dma_addr_t assoclen_dma;
741 	struct caam_drv_req drv_req;
742 #define CAAM_QI_MAX_AEAD_SG						\
743 	((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /	\
744 	 sizeof(struct qm_sg_entry))
745 	struct qm_sg_entry sgt[0];
746 };
747 
748 /*
749  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
750  * @src_nents: number of segments in input scatterlist
751  * @dst_nents: number of segments in output scatterlist
752  * @iv_dma: dma address of iv for checking continuity and link table
753  * @qm_sg_bytes: length of dma mapped h/w link table
754  * @qm_sg_dma: bus physical mapped address of h/w link table
755  * @drv_req: driver-specific request structure
756  * @sgt: the h/w link table
757  */
758 struct ablkcipher_edesc {
759 	int src_nents;
760 	int dst_nents;
761 	dma_addr_t iv_dma;
762 	int qm_sg_bytes;
763 	dma_addr_t qm_sg_dma;
764 	struct caam_drv_req drv_req;
765 #define CAAM_QI_MAX_ABLKCIPHER_SG					    \
766 	((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
767 	 sizeof(struct qm_sg_entry))
768 	struct qm_sg_entry sgt[0];
769 };
770 
771 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
772 					enum optype type)
773 {
774 	/*
775 	 * This function is called on the fast path with values of 'type'
776 	 * known at compile time. Invalid arguments are not expected and
777 	 * thus no checks are made.
778 	 */
779 	struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
780 	u32 *desc;
781 
782 	if (unlikely(!drv_ctx)) {
783 		spin_lock(&ctx->lock);
784 
785 		/* Read again to check if some other core init drv_ctx */
786 		drv_ctx = ctx->drv_ctx[type];
787 		if (!drv_ctx) {
788 			int cpu;
789 
790 			if (type == ENCRYPT)
791 				desc = ctx->sh_desc_enc;
792 			else if (type == DECRYPT)
793 				desc = ctx->sh_desc_dec;
794 			else /* (type == GIVENCRYPT) */
795 				desc = ctx->sh_desc_givenc;
796 
797 			cpu = smp_processor_id();
798 			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
799 			if (likely(!IS_ERR_OR_NULL(drv_ctx)))
800 				drv_ctx->op_type = type;
801 
802 			ctx->drv_ctx[type] = drv_ctx;
803 		}
804 
805 		spin_unlock(&ctx->lock);
806 	}
807 
808 	return drv_ctx;
809 }
810 
811 static void caam_unmap(struct device *dev, struct scatterlist *src,
812 		       struct scatterlist *dst, int src_nents,
813 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
814 		       enum optype op_type, dma_addr_t qm_sg_dma,
815 		       int qm_sg_bytes)
816 {
817 	if (dst != src) {
818 		if (src_nents)
819 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
820 		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
821 	} else {
822 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
823 	}
824 
825 	if (iv_dma)
826 		dma_unmap_single(dev, iv_dma, ivsize,
827 				 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
828 							 DMA_TO_DEVICE);
829 	if (qm_sg_bytes)
830 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
831 }
832 
833 static void aead_unmap(struct device *dev,
834 		       struct aead_edesc *edesc,
835 		       struct aead_request *req)
836 {
837 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
838 	int ivsize = crypto_aead_ivsize(aead);
839 
840 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
841 		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
842 		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
843 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
844 }
845 
846 static void ablkcipher_unmap(struct device *dev,
847 			     struct ablkcipher_edesc *edesc,
848 			     struct ablkcipher_request *req)
849 {
850 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
851 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
852 
853 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
854 		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
855 		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
856 }
857 
858 static void aead_done(struct caam_drv_req *drv_req, u32 status)
859 {
860 	struct device *qidev;
861 	struct aead_edesc *edesc;
862 	struct aead_request *aead_req = drv_req->app_ctx;
863 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
864 	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
865 	int ecode = 0;
866 
867 	qidev = caam_ctx->qidev;
868 
869 	if (unlikely(status)) {
870 		u32 ssrc = status & JRSTA_SSRC_MASK;
871 		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
872 
873 		caam_jr_strstatus(qidev, status);
874 		/*
875 		 * verify hw auth check passed else return -EBADMSG
876 		 */
877 		if (ssrc == JRSTA_SSRC_CCB_ERROR &&
878 		    err_id == JRSTA_CCBERR_ERRID_ICVCHK)
879 			ecode = -EBADMSG;
880 		else
881 			ecode = -EIO;
882 	}
883 
884 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
885 	aead_unmap(qidev, edesc, aead_req);
886 
887 	aead_request_complete(aead_req, ecode);
888 	qi_cache_free(edesc);
889 }
890 
891 /*
892  * allocate and map the aead extended descriptor
893  */
894 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
895 					   bool encrypt)
896 {
897 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
898 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
899 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
900 						 typeof(*alg), aead);
901 	struct device *qidev = ctx->qidev;
902 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
903 		       GFP_KERNEL : GFP_ATOMIC;
904 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
905 	struct aead_edesc *edesc;
906 	dma_addr_t qm_sg_dma, iv_dma = 0;
907 	int ivsize = 0;
908 	unsigned int authsize = ctx->authsize;
909 	int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
910 	int in_len, out_len;
911 	struct qm_sg_entry *sg_table, *fd_sgt;
912 	struct caam_drv_ctx *drv_ctx;
913 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
914 
915 	drv_ctx = get_drv_ctx(ctx, op_type);
916 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
917 		return (struct aead_edesc *)drv_ctx;
918 
919 	/* allocate space for base edesc and hw desc commands, link tables */
920 	edesc = qi_cache_alloc(GFP_DMA | flags);
921 	if (unlikely(!edesc)) {
922 		dev_err(qidev, "could not allocate extended descriptor\n");
923 		return ERR_PTR(-ENOMEM);
924 	}
925 
926 	if (likely(req->src == req->dst)) {
927 		src_nents = sg_nents_for_len(req->src, req->assoclen +
928 					     req->cryptlen +
929 						(encrypt ? authsize : 0));
930 		if (unlikely(src_nents < 0)) {
931 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
932 				req->assoclen + req->cryptlen +
933 				(encrypt ? authsize : 0));
934 			qi_cache_free(edesc);
935 			return ERR_PTR(src_nents);
936 		}
937 
938 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
939 					      DMA_BIDIRECTIONAL);
940 		if (unlikely(!mapped_src_nents)) {
941 			dev_err(qidev, "unable to map source\n");
942 			qi_cache_free(edesc);
943 			return ERR_PTR(-ENOMEM);
944 		}
945 	} else {
946 		src_nents = sg_nents_for_len(req->src, req->assoclen +
947 					     req->cryptlen);
948 		if (unlikely(src_nents < 0)) {
949 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
950 				req->assoclen + req->cryptlen);
951 			qi_cache_free(edesc);
952 			return ERR_PTR(src_nents);
953 		}
954 
955 		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
956 					     req->cryptlen +
957 					     (encrypt ? authsize :
958 							(-authsize)));
959 		if (unlikely(dst_nents < 0)) {
960 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
961 				req->assoclen + req->cryptlen +
962 				(encrypt ? authsize : (-authsize)));
963 			qi_cache_free(edesc);
964 			return ERR_PTR(dst_nents);
965 		}
966 
967 		if (src_nents) {
968 			mapped_src_nents = dma_map_sg(qidev, req->src,
969 						      src_nents, DMA_TO_DEVICE);
970 			if (unlikely(!mapped_src_nents)) {
971 				dev_err(qidev, "unable to map source\n");
972 				qi_cache_free(edesc);
973 				return ERR_PTR(-ENOMEM);
974 			}
975 		} else {
976 			mapped_src_nents = 0;
977 		}
978 
979 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
980 					      DMA_FROM_DEVICE);
981 		if (unlikely(!mapped_dst_nents)) {
982 			dev_err(qidev, "unable to map destination\n");
983 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
984 			qi_cache_free(edesc);
985 			return ERR_PTR(-ENOMEM);
986 		}
987 	}
988 
989 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
990 		ivsize = crypto_aead_ivsize(aead);
991 		iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
992 		if (dma_mapping_error(qidev, iv_dma)) {
993 			dev_err(qidev, "unable to map IV\n");
994 			caam_unmap(qidev, req->src, req->dst, src_nents,
995 				   dst_nents, 0, 0, op_type, 0, 0);
996 			qi_cache_free(edesc);
997 			return ERR_PTR(-ENOMEM);
998 		}
999 	}
1000 
1001 	/*
1002 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1003 	 * Input is not contiguous.
1004 	 */
1005 	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
1006 		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1007 	if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
1008 		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1009 			qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
1010 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1011 			   iv_dma, ivsize, op_type, 0, 0);
1012 		qi_cache_free(edesc);
1013 		return ERR_PTR(-ENOMEM);
1014 	}
1015 	sg_table = &edesc->sgt[0];
1016 	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1017 
1018 	edesc->src_nents = src_nents;
1019 	edesc->dst_nents = dst_nents;
1020 	edesc->iv_dma = iv_dma;
1021 	edesc->drv_req.app_ctx = req;
1022 	edesc->drv_req.cbk = aead_done;
1023 	edesc->drv_req.drv_ctx = drv_ctx;
1024 
1025 	edesc->assoclen = cpu_to_caam32(req->assoclen);
1026 	edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1027 					     DMA_TO_DEVICE);
1028 	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1029 		dev_err(qidev, "unable to map assoclen\n");
1030 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1031 			   iv_dma, ivsize, op_type, 0, 0);
1032 		qi_cache_free(edesc);
1033 		return ERR_PTR(-ENOMEM);
1034 	}
1035 
1036 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1037 	qm_sg_index++;
1038 	if (ivsize) {
1039 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1040 		qm_sg_index++;
1041 	}
1042 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1043 	qm_sg_index += mapped_src_nents;
1044 
1045 	if (mapped_dst_nents > 1)
1046 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1047 				 qm_sg_index, 0);
1048 
1049 	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1050 	if (dma_mapping_error(qidev, qm_sg_dma)) {
1051 		dev_err(qidev, "unable to map S/G table\n");
1052 		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1053 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1054 			   iv_dma, ivsize, op_type, 0, 0);
1055 		qi_cache_free(edesc);
1056 		return ERR_PTR(-ENOMEM);
1057 	}
1058 
1059 	edesc->qm_sg_dma = qm_sg_dma;
1060 	edesc->qm_sg_bytes = qm_sg_bytes;
1061 
1062 	out_len = req->assoclen + req->cryptlen +
1063 		  (encrypt ? ctx->authsize : (-ctx->authsize));
1064 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1065 
1066 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1067 	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1068 
1069 	if (req->dst == req->src) {
1070 		if (mapped_src_nents == 1)
1071 			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1072 					 out_len, 0);
1073 		else
1074 			dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1075 					     (1 + !!ivsize) * sizeof(*sg_table),
1076 					     out_len, 0);
1077 	} else if (mapped_dst_nents == 1) {
1078 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1079 				 0);
1080 	} else {
1081 		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1082 				     qm_sg_index, out_len, 0);
1083 	}
1084 
1085 	return edesc;
1086 }
1087 
1088 static inline int aead_crypt(struct aead_request *req, bool encrypt)
1089 {
1090 	struct aead_edesc *edesc;
1091 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1092 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1093 	int ret;
1094 
1095 	if (unlikely(caam_congested))
1096 		return -EAGAIN;
1097 
1098 	/* allocate extended descriptor */
1099 	edesc = aead_edesc_alloc(req, encrypt);
1100 	if (IS_ERR_OR_NULL(edesc))
1101 		return PTR_ERR(edesc);
1102 
1103 	/* Create and submit job descriptor */
1104 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1105 	if (!ret) {
1106 		ret = -EINPROGRESS;
1107 	} else {
1108 		aead_unmap(ctx->qidev, edesc, req);
1109 		qi_cache_free(edesc);
1110 	}
1111 
1112 	return ret;
1113 }
1114 
1115 static int aead_encrypt(struct aead_request *req)
1116 {
1117 	return aead_crypt(req, true);
1118 }
1119 
1120 static int aead_decrypt(struct aead_request *req)
1121 {
1122 	return aead_crypt(req, false);
1123 }
1124 
1125 static int ipsec_gcm_encrypt(struct aead_request *req)
1126 {
1127 	if (req->assoclen < 8)
1128 		return -EINVAL;
1129 
1130 	return aead_crypt(req, true);
1131 }
1132 
1133 static int ipsec_gcm_decrypt(struct aead_request *req)
1134 {
1135 	if (req->assoclen < 8)
1136 		return -EINVAL;
1137 
1138 	return aead_crypt(req, false);
1139 }
1140 
1141 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1142 {
1143 	struct ablkcipher_edesc *edesc;
1144 	struct ablkcipher_request *req = drv_req->app_ctx;
1145 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1146 	struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
1147 	struct device *qidev = caam_ctx->qidev;
1148 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1149 
1150 #ifdef DEBUG
1151 	dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1152 #endif
1153 
1154 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
1155 
1156 	if (status)
1157 		caam_jr_strstatus(qidev, status);
1158 
1159 #ifdef DEBUG
1160 	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
1161 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1162 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1163 	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
1164 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1165 		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1166 #endif
1167 
1168 	ablkcipher_unmap(qidev, edesc, req);
1169 	qi_cache_free(edesc);
1170 
1171 	/*
1172 	 * The crypto API expects us to set the IV (req->info) to the last
1173 	 * ciphertext block. This is used e.g. by the CTS mode.
1174 	 */
1175 	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
1176 				 ivsize, 0);
1177 
1178 	ablkcipher_request_complete(req, status);
1179 }
1180 
1181 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1182 						       *req, bool encrypt)
1183 {
1184 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1185 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1186 	struct device *qidev = ctx->qidev;
1187 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1188 		       GFP_KERNEL : GFP_ATOMIC;
1189 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1190 	struct ablkcipher_edesc *edesc;
1191 	dma_addr_t iv_dma;
1192 	bool in_contig;
1193 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1194 	int dst_sg_idx, qm_sg_ents;
1195 	struct qm_sg_entry *sg_table, *fd_sgt;
1196 	struct caam_drv_ctx *drv_ctx;
1197 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1198 
1199 	drv_ctx = get_drv_ctx(ctx, op_type);
1200 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1201 		return (struct ablkcipher_edesc *)drv_ctx;
1202 
1203 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1204 	if (unlikely(src_nents < 0)) {
1205 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1206 			req->nbytes);
1207 		return ERR_PTR(src_nents);
1208 	}
1209 
1210 	if (unlikely(req->src != req->dst)) {
1211 		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1212 		if (unlikely(dst_nents < 0)) {
1213 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1214 				req->nbytes);
1215 			return ERR_PTR(dst_nents);
1216 		}
1217 
1218 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1219 					      DMA_TO_DEVICE);
1220 		if (unlikely(!mapped_src_nents)) {
1221 			dev_err(qidev, "unable to map source\n");
1222 			return ERR_PTR(-ENOMEM);
1223 		}
1224 
1225 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1226 					      DMA_FROM_DEVICE);
1227 		if (unlikely(!mapped_dst_nents)) {
1228 			dev_err(qidev, "unable to map destination\n");
1229 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1230 			return ERR_PTR(-ENOMEM);
1231 		}
1232 	} else {
1233 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1234 					      DMA_BIDIRECTIONAL);
1235 		if (unlikely(!mapped_src_nents)) {
1236 			dev_err(qidev, "unable to map source\n");
1237 			return ERR_PTR(-ENOMEM);
1238 		}
1239 	}
1240 
1241 	iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
1242 	if (dma_mapping_error(qidev, iv_dma)) {
1243 		dev_err(qidev, "unable to map IV\n");
1244 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1245 			   0, 0, 0, 0);
1246 		return ERR_PTR(-ENOMEM);
1247 	}
1248 
1249 	if (mapped_src_nents == 1 &&
1250 	    iv_dma + ivsize == sg_dma_address(req->src)) {
1251 		in_contig = true;
1252 		qm_sg_ents = 0;
1253 	} else {
1254 		in_contig = false;
1255 		qm_sg_ents = 1 + mapped_src_nents;
1256 	}
1257 	dst_sg_idx = qm_sg_ents;
1258 
1259 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1260 	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
1261 		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1262 			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
1263 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1264 			   iv_dma, ivsize, op_type, 0, 0);
1265 		return ERR_PTR(-ENOMEM);
1266 	}
1267 
1268 	/* allocate space for base edesc and link tables */
1269 	edesc = qi_cache_alloc(GFP_DMA | flags);
1270 	if (unlikely(!edesc)) {
1271 		dev_err(qidev, "could not allocate extended descriptor\n");
1272 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1273 			   iv_dma, ivsize, op_type, 0, 0);
1274 		return ERR_PTR(-ENOMEM);
1275 	}
1276 
1277 	edesc->src_nents = src_nents;
1278 	edesc->dst_nents = dst_nents;
1279 	edesc->iv_dma = iv_dma;
1280 	sg_table = &edesc->sgt[0];
1281 	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1282 	edesc->drv_req.app_ctx = req;
1283 	edesc->drv_req.cbk = ablkcipher_done;
1284 	edesc->drv_req.drv_ctx = drv_ctx;
1285 
1286 	if (!in_contig) {
1287 		dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1288 		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1289 	}
1290 
1291 	if (mapped_dst_nents > 1)
1292 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1293 				 dst_sg_idx, 0);
1294 
1295 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1296 					  DMA_TO_DEVICE);
1297 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1298 		dev_err(qidev, "unable to map S/G table\n");
1299 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1300 			   iv_dma, ivsize, op_type, 0, 0);
1301 		qi_cache_free(edesc);
1302 		return ERR_PTR(-ENOMEM);
1303 	}
1304 
1305 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1306 
1307 	if (!in_contig)
1308 		dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1309 					  ivsize + req->nbytes, 0);
1310 	else
1311 		dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
1312 				      0);
1313 
1314 	if (req->src == req->dst) {
1315 		if (!in_contig)
1316 			dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1317 					     sizeof(*sg_table), req->nbytes, 0);
1318 		else
1319 			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1320 					 req->nbytes, 0);
1321 	} else if (mapped_dst_nents > 1) {
1322 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1323 				     sizeof(*sg_table), req->nbytes, 0);
1324 	} else {
1325 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1326 				 req->nbytes, 0);
1327 	}
1328 
1329 	return edesc;
1330 }
1331 
1332 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1333 	struct skcipher_givcrypt_request *creq)
1334 {
1335 	struct ablkcipher_request *req = &creq->creq;
1336 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1337 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1338 	struct device *qidev = ctx->qidev;
1339 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1340 		       GFP_KERNEL : GFP_ATOMIC;
1341 	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1342 	struct ablkcipher_edesc *edesc;
1343 	dma_addr_t iv_dma;
1344 	bool out_contig;
1345 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1346 	struct qm_sg_entry *sg_table, *fd_sgt;
1347 	int dst_sg_idx, qm_sg_ents;
1348 	struct caam_drv_ctx *drv_ctx;
1349 
1350 	drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1351 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1352 		return (struct ablkcipher_edesc *)drv_ctx;
1353 
1354 	src_nents = sg_nents_for_len(req->src, req->nbytes);
1355 	if (unlikely(src_nents < 0)) {
1356 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1357 			req->nbytes);
1358 		return ERR_PTR(src_nents);
1359 	}
1360 
1361 	if (unlikely(req->src != req->dst)) {
1362 		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1363 		if (unlikely(dst_nents < 0)) {
1364 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1365 				req->nbytes);
1366 			return ERR_PTR(dst_nents);
1367 		}
1368 
1369 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1370 					      DMA_TO_DEVICE);
1371 		if (unlikely(!mapped_src_nents)) {
1372 			dev_err(qidev, "unable to map source\n");
1373 			return ERR_PTR(-ENOMEM);
1374 		}
1375 
1376 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1377 					      DMA_FROM_DEVICE);
1378 		if (unlikely(!mapped_dst_nents)) {
1379 			dev_err(qidev, "unable to map destination\n");
1380 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1381 			return ERR_PTR(-ENOMEM);
1382 		}
1383 	} else {
1384 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1385 					      DMA_BIDIRECTIONAL);
1386 		if (unlikely(!mapped_src_nents)) {
1387 			dev_err(qidev, "unable to map source\n");
1388 			return ERR_PTR(-ENOMEM);
1389 		}
1390 
1391 		dst_nents = src_nents;
1392 		mapped_dst_nents = src_nents;
1393 	}
1394 
1395 	iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
1396 	if (dma_mapping_error(qidev, iv_dma)) {
1397 		dev_err(qidev, "unable to map IV\n");
1398 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1399 			   0, 0, 0, 0);
1400 		return ERR_PTR(-ENOMEM);
1401 	}
1402 
1403 	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1404 	dst_sg_idx = qm_sg_ents;
1405 	if (mapped_dst_nents == 1 &&
1406 	    iv_dma + ivsize == sg_dma_address(req->dst)) {
1407 		out_contig = true;
1408 	} else {
1409 		out_contig = false;
1410 		qm_sg_ents += 1 + mapped_dst_nents;
1411 	}
1412 
1413 	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
1414 		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1415 			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
1416 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1417 			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
1418 		return ERR_PTR(-ENOMEM);
1419 	}
1420 
1421 	/* allocate space for base edesc and link tables */
1422 	edesc = qi_cache_alloc(GFP_DMA | flags);
1423 	if (!edesc) {
1424 		dev_err(qidev, "could not allocate extended descriptor\n");
1425 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1426 			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
1427 		return ERR_PTR(-ENOMEM);
1428 	}
1429 
1430 	edesc->src_nents = src_nents;
1431 	edesc->dst_nents = dst_nents;
1432 	edesc->iv_dma = iv_dma;
1433 	sg_table = &edesc->sgt[0];
1434 	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1435 	edesc->drv_req.app_ctx = req;
1436 	edesc->drv_req.cbk = ablkcipher_done;
1437 	edesc->drv_req.drv_ctx = drv_ctx;
1438 
1439 	if (mapped_src_nents > 1)
1440 		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1441 
1442 	if (!out_contig) {
1443 		dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1444 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1445 				 dst_sg_idx + 1, 0);
1446 	}
1447 
1448 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1449 					  DMA_TO_DEVICE);
1450 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1451 		dev_err(qidev, "unable to map S/G table\n");
1452 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1453 			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
1454 		qi_cache_free(edesc);
1455 		return ERR_PTR(-ENOMEM);
1456 	}
1457 
1458 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1459 
1460 	if (mapped_src_nents > 1)
1461 		dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1462 				     0);
1463 	else
1464 		dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1465 				 req->nbytes, 0);
1466 
1467 	if (!out_contig)
1468 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1469 				     sizeof(*sg_table), ivsize + req->nbytes,
1470 				     0);
1471 	else
1472 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1473 				 ivsize + req->nbytes, 0);
1474 
1475 	return edesc;
1476 }
1477 
1478 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1479 {
1480 	struct ablkcipher_edesc *edesc;
1481 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1482 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1483 	int ret;
1484 
1485 	if (unlikely(caam_congested))
1486 		return -EAGAIN;
1487 
1488 	/* allocate extended descriptor */
1489 	edesc = ablkcipher_edesc_alloc(req, encrypt);
1490 	if (IS_ERR(edesc))
1491 		return PTR_ERR(edesc);
1492 
1493 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1494 	if (!ret) {
1495 		ret = -EINPROGRESS;
1496 	} else {
1497 		ablkcipher_unmap(ctx->qidev, edesc, req);
1498 		qi_cache_free(edesc);
1499 	}
1500 
1501 	return ret;
1502 }
1503 
1504 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1505 {
1506 	return ablkcipher_crypt(req, true);
1507 }
1508 
1509 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1510 {
1511 	return ablkcipher_crypt(req, false);
1512 }
1513 
1514 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1515 {
1516 	struct ablkcipher_request *req = &creq->creq;
1517 	struct ablkcipher_edesc *edesc;
1518 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1519 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1520 	int ret;
1521 
1522 	if (unlikely(caam_congested))
1523 		return -EAGAIN;
1524 
1525 	/* allocate extended descriptor */
1526 	edesc = ablkcipher_giv_edesc_alloc(creq);
1527 	if (IS_ERR(edesc))
1528 		return PTR_ERR(edesc);
1529 
1530 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1531 	if (!ret) {
1532 		ret = -EINPROGRESS;
1533 	} else {
1534 		ablkcipher_unmap(ctx->qidev, edesc, req);
1535 		qi_cache_free(edesc);
1536 	}
1537 
1538 	return ret;
1539 }
1540 
1541 #define template_ablkcipher	template_u.ablkcipher
1542 struct caam_alg_template {
1543 	char name[CRYPTO_MAX_ALG_NAME];
1544 	char driver_name[CRYPTO_MAX_ALG_NAME];
1545 	unsigned int blocksize;
1546 	u32 type;
1547 	union {
1548 		struct ablkcipher_alg ablkcipher;
1549 	} template_u;
1550 	u32 class1_alg_type;
1551 	u32 class2_alg_type;
1552 };
1553 
1554 static struct caam_alg_template driver_algs[] = {
1555 	/* ablkcipher descriptor */
1556 	{
1557 		.name = "cbc(aes)",
1558 		.driver_name = "cbc-aes-caam-qi",
1559 		.blocksize = AES_BLOCK_SIZE,
1560 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1561 		.template_ablkcipher = {
1562 			.setkey = ablkcipher_setkey,
1563 			.encrypt = ablkcipher_encrypt,
1564 			.decrypt = ablkcipher_decrypt,
1565 			.givencrypt = ablkcipher_givencrypt,
1566 			.geniv = "<built-in>",
1567 			.min_keysize = AES_MIN_KEY_SIZE,
1568 			.max_keysize = AES_MAX_KEY_SIZE,
1569 			.ivsize = AES_BLOCK_SIZE,
1570 		},
1571 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1572 	},
1573 	{
1574 		.name = "cbc(des3_ede)",
1575 		.driver_name = "cbc-3des-caam-qi",
1576 		.blocksize = DES3_EDE_BLOCK_SIZE,
1577 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1578 		.template_ablkcipher = {
1579 			.setkey = ablkcipher_setkey,
1580 			.encrypt = ablkcipher_encrypt,
1581 			.decrypt = ablkcipher_decrypt,
1582 			.givencrypt = ablkcipher_givencrypt,
1583 			.geniv = "<built-in>",
1584 			.min_keysize = DES3_EDE_KEY_SIZE,
1585 			.max_keysize = DES3_EDE_KEY_SIZE,
1586 			.ivsize = DES3_EDE_BLOCK_SIZE,
1587 		},
1588 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1589 	},
1590 	{
1591 		.name = "cbc(des)",
1592 		.driver_name = "cbc-des-caam-qi",
1593 		.blocksize = DES_BLOCK_SIZE,
1594 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1595 		.template_ablkcipher = {
1596 			.setkey = ablkcipher_setkey,
1597 			.encrypt = ablkcipher_encrypt,
1598 			.decrypt = ablkcipher_decrypt,
1599 			.givencrypt = ablkcipher_givencrypt,
1600 			.geniv = "<built-in>",
1601 			.min_keysize = DES_KEY_SIZE,
1602 			.max_keysize = DES_KEY_SIZE,
1603 			.ivsize = DES_BLOCK_SIZE,
1604 		},
1605 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1606 	},
1607 	{
1608 		.name = "ctr(aes)",
1609 		.driver_name = "ctr-aes-caam-qi",
1610 		.blocksize = 1,
1611 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1612 		.template_ablkcipher = {
1613 			.setkey = ablkcipher_setkey,
1614 			.encrypt = ablkcipher_encrypt,
1615 			.decrypt = ablkcipher_decrypt,
1616 			.geniv = "chainiv",
1617 			.min_keysize = AES_MIN_KEY_SIZE,
1618 			.max_keysize = AES_MAX_KEY_SIZE,
1619 			.ivsize = AES_BLOCK_SIZE,
1620 		},
1621 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1622 	},
1623 	{
1624 		.name = "rfc3686(ctr(aes))",
1625 		.driver_name = "rfc3686-ctr-aes-caam-qi",
1626 		.blocksize = 1,
1627 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1628 		.template_ablkcipher = {
1629 			.setkey = ablkcipher_setkey,
1630 			.encrypt = ablkcipher_encrypt,
1631 			.decrypt = ablkcipher_decrypt,
1632 			.givencrypt = ablkcipher_givencrypt,
1633 			.geniv = "<built-in>",
1634 			.min_keysize = AES_MIN_KEY_SIZE +
1635 				       CTR_RFC3686_NONCE_SIZE,
1636 			.max_keysize = AES_MAX_KEY_SIZE +
1637 				       CTR_RFC3686_NONCE_SIZE,
1638 			.ivsize = CTR_RFC3686_IV_SIZE,
1639 		},
1640 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1641 	},
1642 	{
1643 		.name = "xts(aes)",
1644 		.driver_name = "xts-aes-caam-qi",
1645 		.blocksize = AES_BLOCK_SIZE,
1646 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1647 		.template_ablkcipher = {
1648 			.setkey = xts_ablkcipher_setkey,
1649 			.encrypt = ablkcipher_encrypt,
1650 			.decrypt = ablkcipher_decrypt,
1651 			.geniv = "eseqiv",
1652 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1653 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1654 			.ivsize = AES_BLOCK_SIZE,
1655 		},
1656 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1657 	},
1658 };
1659 
1660 static struct caam_aead_alg driver_aeads[] = {
1661 	{
1662 		.aead = {
1663 			.base = {
1664 				.cra_name = "rfc4106(gcm(aes))",
1665 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1666 				.cra_blocksize = 1,
1667 			},
1668 			.setkey = rfc4106_setkey,
1669 			.setauthsize = rfc4106_setauthsize,
1670 			.encrypt = ipsec_gcm_encrypt,
1671 			.decrypt = ipsec_gcm_decrypt,
1672 			.ivsize = 8,
1673 			.maxauthsize = AES_BLOCK_SIZE,
1674 		},
1675 		.caam = {
1676 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1677 		},
1678 	},
1679 	{
1680 		.aead = {
1681 			.base = {
1682 				.cra_name = "rfc4543(gcm(aes))",
1683 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1684 				.cra_blocksize = 1,
1685 			},
1686 			.setkey = rfc4543_setkey,
1687 			.setauthsize = rfc4543_setauthsize,
1688 			.encrypt = ipsec_gcm_encrypt,
1689 			.decrypt = ipsec_gcm_decrypt,
1690 			.ivsize = 8,
1691 			.maxauthsize = AES_BLOCK_SIZE,
1692 		},
1693 		.caam = {
1694 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1695 		},
1696 	},
1697 	/* Galois Counter Mode */
1698 	{
1699 		.aead = {
1700 			.base = {
1701 				.cra_name = "gcm(aes)",
1702 				.cra_driver_name = "gcm-aes-caam-qi",
1703 				.cra_blocksize = 1,
1704 			},
1705 			.setkey = gcm_setkey,
1706 			.setauthsize = gcm_setauthsize,
1707 			.encrypt = aead_encrypt,
1708 			.decrypt = aead_decrypt,
1709 			.ivsize = 12,
1710 			.maxauthsize = AES_BLOCK_SIZE,
1711 		},
1712 		.caam = {
1713 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1714 		}
1715 	},
1716 	/* single-pass ipsec_esp descriptor */
1717 	{
1718 		.aead = {
1719 			.base = {
1720 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1721 				.cra_driver_name = "authenc-hmac-md5-"
1722 						   "cbc-aes-caam-qi",
1723 				.cra_blocksize = AES_BLOCK_SIZE,
1724 			},
1725 			.setkey = aead_setkey,
1726 			.setauthsize = aead_setauthsize,
1727 			.encrypt = aead_encrypt,
1728 			.decrypt = aead_decrypt,
1729 			.ivsize = AES_BLOCK_SIZE,
1730 			.maxauthsize = MD5_DIGEST_SIZE,
1731 		},
1732 		.caam = {
1733 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1734 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1735 					   OP_ALG_AAI_HMAC_PRECOMP,
1736 		}
1737 	},
1738 	{
1739 		.aead = {
1740 			.base = {
1741 				.cra_name = "echainiv(authenc(hmac(md5),"
1742 					    "cbc(aes)))",
1743 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1744 						   "cbc-aes-caam-qi",
1745 				.cra_blocksize = AES_BLOCK_SIZE,
1746 			},
1747 			.setkey = aead_setkey,
1748 			.setauthsize = aead_setauthsize,
1749 			.encrypt = aead_encrypt,
1750 			.decrypt = aead_decrypt,
1751 			.ivsize = AES_BLOCK_SIZE,
1752 			.maxauthsize = MD5_DIGEST_SIZE,
1753 		},
1754 		.caam = {
1755 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1756 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1757 					   OP_ALG_AAI_HMAC_PRECOMP,
1758 			.geniv = true,
1759 		}
1760 	},
1761 	{
1762 		.aead = {
1763 			.base = {
1764 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1765 				.cra_driver_name = "authenc-hmac-sha1-"
1766 						   "cbc-aes-caam-qi",
1767 				.cra_blocksize = AES_BLOCK_SIZE,
1768 			},
1769 			.setkey = aead_setkey,
1770 			.setauthsize = aead_setauthsize,
1771 			.encrypt = aead_encrypt,
1772 			.decrypt = aead_decrypt,
1773 			.ivsize = AES_BLOCK_SIZE,
1774 			.maxauthsize = SHA1_DIGEST_SIZE,
1775 		},
1776 		.caam = {
1777 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1778 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1779 					   OP_ALG_AAI_HMAC_PRECOMP,
1780 		}
1781 	},
1782 	{
1783 		.aead = {
1784 			.base = {
1785 				.cra_name = "echainiv(authenc(hmac(sha1),"
1786 					    "cbc(aes)))",
1787 				.cra_driver_name = "echainiv-authenc-"
1788 						   "hmac-sha1-cbc-aes-caam-qi",
1789 				.cra_blocksize = AES_BLOCK_SIZE,
1790 			},
1791 			.setkey = aead_setkey,
1792 			.setauthsize = aead_setauthsize,
1793 			.encrypt = aead_encrypt,
1794 			.decrypt = aead_decrypt,
1795 			.ivsize = AES_BLOCK_SIZE,
1796 			.maxauthsize = SHA1_DIGEST_SIZE,
1797 		},
1798 		.caam = {
1799 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1800 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1801 					   OP_ALG_AAI_HMAC_PRECOMP,
1802 			.geniv = true,
1803 		},
1804 	},
1805 	{
1806 		.aead = {
1807 			.base = {
1808 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1809 				.cra_driver_name = "authenc-hmac-sha224-"
1810 						   "cbc-aes-caam-qi",
1811 				.cra_blocksize = AES_BLOCK_SIZE,
1812 			},
1813 			.setkey = aead_setkey,
1814 			.setauthsize = aead_setauthsize,
1815 			.encrypt = aead_encrypt,
1816 			.decrypt = aead_decrypt,
1817 			.ivsize = AES_BLOCK_SIZE,
1818 			.maxauthsize = SHA224_DIGEST_SIZE,
1819 		},
1820 		.caam = {
1821 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1822 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1823 					   OP_ALG_AAI_HMAC_PRECOMP,
1824 		}
1825 	},
1826 	{
1827 		.aead = {
1828 			.base = {
1829 				.cra_name = "echainiv(authenc(hmac(sha224),"
1830 					    "cbc(aes)))",
1831 				.cra_driver_name = "echainiv-authenc-"
1832 						   "hmac-sha224-cbc-aes-caam-qi",
1833 				.cra_blocksize = AES_BLOCK_SIZE,
1834 			},
1835 			.setkey = aead_setkey,
1836 			.setauthsize = aead_setauthsize,
1837 			.encrypt = aead_encrypt,
1838 			.decrypt = aead_decrypt,
1839 			.ivsize = AES_BLOCK_SIZE,
1840 			.maxauthsize = SHA224_DIGEST_SIZE,
1841 		},
1842 		.caam = {
1843 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1844 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1845 					   OP_ALG_AAI_HMAC_PRECOMP,
1846 			.geniv = true,
1847 		}
1848 	},
1849 	{
1850 		.aead = {
1851 			.base = {
1852 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1853 				.cra_driver_name = "authenc-hmac-sha256-"
1854 						   "cbc-aes-caam-qi",
1855 				.cra_blocksize = AES_BLOCK_SIZE,
1856 			},
1857 			.setkey = aead_setkey,
1858 			.setauthsize = aead_setauthsize,
1859 			.encrypt = aead_encrypt,
1860 			.decrypt = aead_decrypt,
1861 			.ivsize = AES_BLOCK_SIZE,
1862 			.maxauthsize = SHA256_DIGEST_SIZE,
1863 		},
1864 		.caam = {
1865 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1866 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1867 					   OP_ALG_AAI_HMAC_PRECOMP,
1868 		}
1869 	},
1870 	{
1871 		.aead = {
1872 			.base = {
1873 				.cra_name = "echainiv(authenc(hmac(sha256),"
1874 					    "cbc(aes)))",
1875 				.cra_driver_name = "echainiv-authenc-"
1876 						   "hmac-sha256-cbc-aes-"
1877 						   "caam-qi",
1878 				.cra_blocksize = AES_BLOCK_SIZE,
1879 			},
1880 			.setkey = aead_setkey,
1881 			.setauthsize = aead_setauthsize,
1882 			.encrypt = aead_encrypt,
1883 			.decrypt = aead_decrypt,
1884 			.ivsize = AES_BLOCK_SIZE,
1885 			.maxauthsize = SHA256_DIGEST_SIZE,
1886 		},
1887 		.caam = {
1888 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1889 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1890 					   OP_ALG_AAI_HMAC_PRECOMP,
1891 			.geniv = true,
1892 		}
1893 	},
1894 	{
1895 		.aead = {
1896 			.base = {
1897 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1898 				.cra_driver_name = "authenc-hmac-sha384-"
1899 						   "cbc-aes-caam-qi",
1900 				.cra_blocksize = AES_BLOCK_SIZE,
1901 			},
1902 			.setkey = aead_setkey,
1903 			.setauthsize = aead_setauthsize,
1904 			.encrypt = aead_encrypt,
1905 			.decrypt = aead_decrypt,
1906 			.ivsize = AES_BLOCK_SIZE,
1907 			.maxauthsize = SHA384_DIGEST_SIZE,
1908 		},
1909 		.caam = {
1910 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1911 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1912 					   OP_ALG_AAI_HMAC_PRECOMP,
1913 		}
1914 	},
1915 	{
1916 		.aead = {
1917 			.base = {
1918 				.cra_name = "echainiv(authenc(hmac(sha384),"
1919 					    "cbc(aes)))",
1920 				.cra_driver_name = "echainiv-authenc-"
1921 						   "hmac-sha384-cbc-aes-"
1922 						   "caam-qi",
1923 				.cra_blocksize = AES_BLOCK_SIZE,
1924 			},
1925 			.setkey = aead_setkey,
1926 			.setauthsize = aead_setauthsize,
1927 			.encrypt = aead_encrypt,
1928 			.decrypt = aead_decrypt,
1929 			.ivsize = AES_BLOCK_SIZE,
1930 			.maxauthsize = SHA384_DIGEST_SIZE,
1931 		},
1932 		.caam = {
1933 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1934 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1935 					   OP_ALG_AAI_HMAC_PRECOMP,
1936 			.geniv = true,
1937 		}
1938 	},
1939 	{
1940 		.aead = {
1941 			.base = {
1942 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1943 				.cra_driver_name = "authenc-hmac-sha512-"
1944 						   "cbc-aes-caam-qi",
1945 				.cra_blocksize = AES_BLOCK_SIZE,
1946 			},
1947 			.setkey = aead_setkey,
1948 			.setauthsize = aead_setauthsize,
1949 			.encrypt = aead_encrypt,
1950 			.decrypt = aead_decrypt,
1951 			.ivsize = AES_BLOCK_SIZE,
1952 			.maxauthsize = SHA512_DIGEST_SIZE,
1953 		},
1954 		.caam = {
1955 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1956 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1957 					   OP_ALG_AAI_HMAC_PRECOMP,
1958 		}
1959 	},
1960 	{
1961 		.aead = {
1962 			.base = {
1963 				.cra_name = "echainiv(authenc(hmac(sha512),"
1964 					    "cbc(aes)))",
1965 				.cra_driver_name = "echainiv-authenc-"
1966 						   "hmac-sha512-cbc-aes-"
1967 						   "caam-qi",
1968 				.cra_blocksize = AES_BLOCK_SIZE,
1969 			},
1970 			.setkey = aead_setkey,
1971 			.setauthsize = aead_setauthsize,
1972 			.encrypt = aead_encrypt,
1973 			.decrypt = aead_decrypt,
1974 			.ivsize = AES_BLOCK_SIZE,
1975 			.maxauthsize = SHA512_DIGEST_SIZE,
1976 		},
1977 		.caam = {
1978 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1979 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1980 					   OP_ALG_AAI_HMAC_PRECOMP,
1981 			.geniv = true,
1982 		}
1983 	},
1984 	{
1985 		.aead = {
1986 			.base = {
1987 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1988 				.cra_driver_name = "authenc-hmac-md5-"
1989 						   "cbc-des3_ede-caam-qi",
1990 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1991 			},
1992 			.setkey = aead_setkey,
1993 			.setauthsize = aead_setauthsize,
1994 			.encrypt = aead_encrypt,
1995 			.decrypt = aead_decrypt,
1996 			.ivsize = DES3_EDE_BLOCK_SIZE,
1997 			.maxauthsize = MD5_DIGEST_SIZE,
1998 		},
1999 		.caam = {
2000 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2001 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2002 					   OP_ALG_AAI_HMAC_PRECOMP,
2003 		}
2004 	},
2005 	{
2006 		.aead = {
2007 			.base = {
2008 				.cra_name = "echainiv(authenc(hmac(md5),"
2009 					    "cbc(des3_ede)))",
2010 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2011 						   "cbc-des3_ede-caam-qi",
2012 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2013 			},
2014 			.setkey = aead_setkey,
2015 			.setauthsize = aead_setauthsize,
2016 			.encrypt = aead_encrypt,
2017 			.decrypt = aead_decrypt,
2018 			.ivsize = DES3_EDE_BLOCK_SIZE,
2019 			.maxauthsize = MD5_DIGEST_SIZE,
2020 		},
2021 		.caam = {
2022 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2023 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2024 					   OP_ALG_AAI_HMAC_PRECOMP,
2025 			.geniv = true,
2026 		}
2027 	},
2028 	{
2029 		.aead = {
2030 			.base = {
2031 				.cra_name = "authenc(hmac(sha1),"
2032 					    "cbc(des3_ede))",
2033 				.cra_driver_name = "authenc-hmac-sha1-"
2034 						   "cbc-des3_ede-caam-qi",
2035 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2036 			},
2037 			.setkey = aead_setkey,
2038 			.setauthsize = aead_setauthsize,
2039 			.encrypt = aead_encrypt,
2040 			.decrypt = aead_decrypt,
2041 			.ivsize = DES3_EDE_BLOCK_SIZE,
2042 			.maxauthsize = SHA1_DIGEST_SIZE,
2043 		},
2044 		.caam = {
2045 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2046 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2047 					   OP_ALG_AAI_HMAC_PRECOMP,
2048 		},
2049 	},
2050 	{
2051 		.aead = {
2052 			.base = {
2053 				.cra_name = "echainiv(authenc(hmac(sha1),"
2054 					    "cbc(des3_ede)))",
2055 				.cra_driver_name = "echainiv-authenc-"
2056 						   "hmac-sha1-"
2057 						   "cbc-des3_ede-caam-qi",
2058 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2059 			},
2060 			.setkey = aead_setkey,
2061 			.setauthsize = aead_setauthsize,
2062 			.encrypt = aead_encrypt,
2063 			.decrypt = aead_decrypt,
2064 			.ivsize = DES3_EDE_BLOCK_SIZE,
2065 			.maxauthsize = SHA1_DIGEST_SIZE,
2066 		},
2067 		.caam = {
2068 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2069 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2070 					   OP_ALG_AAI_HMAC_PRECOMP,
2071 			.geniv = true,
2072 		}
2073 	},
2074 	{
2075 		.aead = {
2076 			.base = {
2077 				.cra_name = "authenc(hmac(sha224),"
2078 					    "cbc(des3_ede))",
2079 				.cra_driver_name = "authenc-hmac-sha224-"
2080 						   "cbc-des3_ede-caam-qi",
2081 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2082 			},
2083 			.setkey = aead_setkey,
2084 			.setauthsize = aead_setauthsize,
2085 			.encrypt = aead_encrypt,
2086 			.decrypt = aead_decrypt,
2087 			.ivsize = DES3_EDE_BLOCK_SIZE,
2088 			.maxauthsize = SHA224_DIGEST_SIZE,
2089 		},
2090 		.caam = {
2091 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2092 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2093 					   OP_ALG_AAI_HMAC_PRECOMP,
2094 		},
2095 	},
2096 	{
2097 		.aead = {
2098 			.base = {
2099 				.cra_name = "echainiv(authenc(hmac(sha224),"
2100 					    "cbc(des3_ede)))",
2101 				.cra_driver_name = "echainiv-authenc-"
2102 						   "hmac-sha224-"
2103 						   "cbc-des3_ede-caam-qi",
2104 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2105 			},
2106 			.setkey = aead_setkey,
2107 			.setauthsize = aead_setauthsize,
2108 			.encrypt = aead_encrypt,
2109 			.decrypt = aead_decrypt,
2110 			.ivsize = DES3_EDE_BLOCK_SIZE,
2111 			.maxauthsize = SHA224_DIGEST_SIZE,
2112 		},
2113 		.caam = {
2114 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2115 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2116 					   OP_ALG_AAI_HMAC_PRECOMP,
2117 			.geniv = true,
2118 		}
2119 	},
2120 	{
2121 		.aead = {
2122 			.base = {
2123 				.cra_name = "authenc(hmac(sha256),"
2124 					    "cbc(des3_ede))",
2125 				.cra_driver_name = "authenc-hmac-sha256-"
2126 						   "cbc-des3_ede-caam-qi",
2127 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2128 			},
2129 			.setkey = aead_setkey,
2130 			.setauthsize = aead_setauthsize,
2131 			.encrypt = aead_encrypt,
2132 			.decrypt = aead_decrypt,
2133 			.ivsize = DES3_EDE_BLOCK_SIZE,
2134 			.maxauthsize = SHA256_DIGEST_SIZE,
2135 		},
2136 		.caam = {
2137 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2138 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2139 					   OP_ALG_AAI_HMAC_PRECOMP,
2140 		},
2141 	},
2142 	{
2143 		.aead = {
2144 			.base = {
2145 				.cra_name = "echainiv(authenc(hmac(sha256),"
2146 					    "cbc(des3_ede)))",
2147 				.cra_driver_name = "echainiv-authenc-"
2148 						   "hmac-sha256-"
2149 						   "cbc-des3_ede-caam-qi",
2150 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2151 			},
2152 			.setkey = aead_setkey,
2153 			.setauthsize = aead_setauthsize,
2154 			.encrypt = aead_encrypt,
2155 			.decrypt = aead_decrypt,
2156 			.ivsize = DES3_EDE_BLOCK_SIZE,
2157 			.maxauthsize = SHA256_DIGEST_SIZE,
2158 		},
2159 		.caam = {
2160 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2161 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2162 					   OP_ALG_AAI_HMAC_PRECOMP,
2163 			.geniv = true,
2164 		}
2165 	},
2166 	{
2167 		.aead = {
2168 			.base = {
2169 				.cra_name = "authenc(hmac(sha384),"
2170 					    "cbc(des3_ede))",
2171 				.cra_driver_name = "authenc-hmac-sha384-"
2172 						   "cbc-des3_ede-caam-qi",
2173 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2174 			},
2175 			.setkey = aead_setkey,
2176 			.setauthsize = aead_setauthsize,
2177 			.encrypt = aead_encrypt,
2178 			.decrypt = aead_decrypt,
2179 			.ivsize = DES3_EDE_BLOCK_SIZE,
2180 			.maxauthsize = SHA384_DIGEST_SIZE,
2181 		},
2182 		.caam = {
2183 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2184 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2185 					   OP_ALG_AAI_HMAC_PRECOMP,
2186 		},
2187 	},
2188 	{
2189 		.aead = {
2190 			.base = {
2191 				.cra_name = "echainiv(authenc(hmac(sha384),"
2192 					    "cbc(des3_ede)))",
2193 				.cra_driver_name = "echainiv-authenc-"
2194 						   "hmac-sha384-"
2195 						   "cbc-des3_ede-caam-qi",
2196 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2197 			},
2198 			.setkey = aead_setkey,
2199 			.setauthsize = aead_setauthsize,
2200 			.encrypt = aead_encrypt,
2201 			.decrypt = aead_decrypt,
2202 			.ivsize = DES3_EDE_BLOCK_SIZE,
2203 			.maxauthsize = SHA384_DIGEST_SIZE,
2204 		},
2205 		.caam = {
2206 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2207 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2208 					   OP_ALG_AAI_HMAC_PRECOMP,
2209 			.geniv = true,
2210 		}
2211 	},
2212 	{
2213 		.aead = {
2214 			.base = {
2215 				.cra_name = "authenc(hmac(sha512),"
2216 					    "cbc(des3_ede))",
2217 				.cra_driver_name = "authenc-hmac-sha512-"
2218 						   "cbc-des3_ede-caam-qi",
2219 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2220 			},
2221 			.setkey = aead_setkey,
2222 			.setauthsize = aead_setauthsize,
2223 			.encrypt = aead_encrypt,
2224 			.decrypt = aead_decrypt,
2225 			.ivsize = DES3_EDE_BLOCK_SIZE,
2226 			.maxauthsize = SHA512_DIGEST_SIZE,
2227 		},
2228 		.caam = {
2229 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2230 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2231 					   OP_ALG_AAI_HMAC_PRECOMP,
2232 		},
2233 	},
2234 	{
2235 		.aead = {
2236 			.base = {
2237 				.cra_name = "echainiv(authenc(hmac(sha512),"
2238 					    "cbc(des3_ede)))",
2239 				.cra_driver_name = "echainiv-authenc-"
2240 						   "hmac-sha512-"
2241 						   "cbc-des3_ede-caam-qi",
2242 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2243 			},
2244 			.setkey = aead_setkey,
2245 			.setauthsize = aead_setauthsize,
2246 			.encrypt = aead_encrypt,
2247 			.decrypt = aead_decrypt,
2248 			.ivsize = DES3_EDE_BLOCK_SIZE,
2249 			.maxauthsize = SHA512_DIGEST_SIZE,
2250 		},
2251 		.caam = {
2252 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2253 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2254 					   OP_ALG_AAI_HMAC_PRECOMP,
2255 			.geniv = true,
2256 		}
2257 	},
2258 	{
2259 		.aead = {
2260 			.base = {
2261 				.cra_name = "authenc(hmac(md5),cbc(des))",
2262 				.cra_driver_name = "authenc-hmac-md5-"
2263 						   "cbc-des-caam-qi",
2264 				.cra_blocksize = DES_BLOCK_SIZE,
2265 			},
2266 			.setkey = aead_setkey,
2267 			.setauthsize = aead_setauthsize,
2268 			.encrypt = aead_encrypt,
2269 			.decrypt = aead_decrypt,
2270 			.ivsize = DES_BLOCK_SIZE,
2271 			.maxauthsize = MD5_DIGEST_SIZE,
2272 		},
2273 		.caam = {
2274 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2275 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2276 					   OP_ALG_AAI_HMAC_PRECOMP,
2277 		},
2278 	},
2279 	{
2280 		.aead = {
2281 			.base = {
2282 				.cra_name = "echainiv(authenc(hmac(md5),"
2283 					    "cbc(des)))",
2284 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2285 						   "cbc-des-caam-qi",
2286 				.cra_blocksize = DES_BLOCK_SIZE,
2287 			},
2288 			.setkey = aead_setkey,
2289 			.setauthsize = aead_setauthsize,
2290 			.encrypt = aead_encrypt,
2291 			.decrypt = aead_decrypt,
2292 			.ivsize = DES_BLOCK_SIZE,
2293 			.maxauthsize = MD5_DIGEST_SIZE,
2294 		},
2295 		.caam = {
2296 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2297 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2298 					   OP_ALG_AAI_HMAC_PRECOMP,
2299 			.geniv = true,
2300 		}
2301 	},
2302 	{
2303 		.aead = {
2304 			.base = {
2305 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2306 				.cra_driver_name = "authenc-hmac-sha1-"
2307 						   "cbc-des-caam-qi",
2308 				.cra_blocksize = DES_BLOCK_SIZE,
2309 			},
2310 			.setkey = aead_setkey,
2311 			.setauthsize = aead_setauthsize,
2312 			.encrypt = aead_encrypt,
2313 			.decrypt = aead_decrypt,
2314 			.ivsize = DES_BLOCK_SIZE,
2315 			.maxauthsize = SHA1_DIGEST_SIZE,
2316 		},
2317 		.caam = {
2318 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2319 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2320 					   OP_ALG_AAI_HMAC_PRECOMP,
2321 		},
2322 	},
2323 	{
2324 		.aead = {
2325 			.base = {
2326 				.cra_name = "echainiv(authenc(hmac(sha1),"
2327 					    "cbc(des)))",
2328 				.cra_driver_name = "echainiv-authenc-"
2329 						   "hmac-sha1-cbc-des-caam-qi",
2330 				.cra_blocksize = DES_BLOCK_SIZE,
2331 			},
2332 			.setkey = aead_setkey,
2333 			.setauthsize = aead_setauthsize,
2334 			.encrypt = aead_encrypt,
2335 			.decrypt = aead_decrypt,
2336 			.ivsize = DES_BLOCK_SIZE,
2337 			.maxauthsize = SHA1_DIGEST_SIZE,
2338 		},
2339 		.caam = {
2340 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2341 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2342 					   OP_ALG_AAI_HMAC_PRECOMP,
2343 			.geniv = true,
2344 		}
2345 	},
2346 	{
2347 		.aead = {
2348 			.base = {
2349 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2350 				.cra_driver_name = "authenc-hmac-sha224-"
2351 						   "cbc-des-caam-qi",
2352 				.cra_blocksize = DES_BLOCK_SIZE,
2353 			},
2354 			.setkey = aead_setkey,
2355 			.setauthsize = aead_setauthsize,
2356 			.encrypt = aead_encrypt,
2357 			.decrypt = aead_decrypt,
2358 			.ivsize = DES_BLOCK_SIZE,
2359 			.maxauthsize = SHA224_DIGEST_SIZE,
2360 		},
2361 		.caam = {
2362 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2363 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2364 					   OP_ALG_AAI_HMAC_PRECOMP,
2365 		},
2366 	},
2367 	{
2368 		.aead = {
2369 			.base = {
2370 				.cra_name = "echainiv(authenc(hmac(sha224),"
2371 					    "cbc(des)))",
2372 				.cra_driver_name = "echainiv-authenc-"
2373 						   "hmac-sha224-cbc-des-"
2374 						   "caam-qi",
2375 				.cra_blocksize = DES_BLOCK_SIZE,
2376 			},
2377 			.setkey = aead_setkey,
2378 			.setauthsize = aead_setauthsize,
2379 			.encrypt = aead_encrypt,
2380 			.decrypt = aead_decrypt,
2381 			.ivsize = DES_BLOCK_SIZE,
2382 			.maxauthsize = SHA224_DIGEST_SIZE,
2383 		},
2384 		.caam = {
2385 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2386 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2387 					   OP_ALG_AAI_HMAC_PRECOMP,
2388 			.geniv = true,
2389 		}
2390 	},
2391 	{
2392 		.aead = {
2393 			.base = {
2394 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2395 				.cra_driver_name = "authenc-hmac-sha256-"
2396 						   "cbc-des-caam-qi",
2397 				.cra_blocksize = DES_BLOCK_SIZE,
2398 			},
2399 			.setkey = aead_setkey,
2400 			.setauthsize = aead_setauthsize,
2401 			.encrypt = aead_encrypt,
2402 			.decrypt = aead_decrypt,
2403 			.ivsize = DES_BLOCK_SIZE,
2404 			.maxauthsize = SHA256_DIGEST_SIZE,
2405 		},
2406 		.caam = {
2407 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2408 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2409 					   OP_ALG_AAI_HMAC_PRECOMP,
2410 		},
2411 	},
2412 	{
2413 		.aead = {
2414 			.base = {
2415 				.cra_name = "echainiv(authenc(hmac(sha256),"
2416 					    "cbc(des)))",
2417 				.cra_driver_name = "echainiv-authenc-"
2418 						   "hmac-sha256-cbc-des-"
2419 						   "caam-qi",
2420 				.cra_blocksize = DES_BLOCK_SIZE,
2421 			},
2422 			.setkey = aead_setkey,
2423 			.setauthsize = aead_setauthsize,
2424 			.encrypt = aead_encrypt,
2425 			.decrypt = aead_decrypt,
2426 			.ivsize = DES_BLOCK_SIZE,
2427 			.maxauthsize = SHA256_DIGEST_SIZE,
2428 		},
2429 		.caam = {
2430 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2431 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2432 					   OP_ALG_AAI_HMAC_PRECOMP,
2433 			.geniv = true,
2434 		},
2435 	},
2436 	{
2437 		.aead = {
2438 			.base = {
2439 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2440 				.cra_driver_name = "authenc-hmac-sha384-"
2441 						   "cbc-des-caam-qi",
2442 				.cra_blocksize = DES_BLOCK_SIZE,
2443 			},
2444 			.setkey = aead_setkey,
2445 			.setauthsize = aead_setauthsize,
2446 			.encrypt = aead_encrypt,
2447 			.decrypt = aead_decrypt,
2448 			.ivsize = DES_BLOCK_SIZE,
2449 			.maxauthsize = SHA384_DIGEST_SIZE,
2450 		},
2451 		.caam = {
2452 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2453 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2454 					   OP_ALG_AAI_HMAC_PRECOMP,
2455 		},
2456 	},
2457 	{
2458 		.aead = {
2459 			.base = {
2460 				.cra_name = "echainiv(authenc(hmac(sha384),"
2461 					    "cbc(des)))",
2462 				.cra_driver_name = "echainiv-authenc-"
2463 						   "hmac-sha384-cbc-des-"
2464 						   "caam-qi",
2465 				.cra_blocksize = DES_BLOCK_SIZE,
2466 			},
2467 			.setkey = aead_setkey,
2468 			.setauthsize = aead_setauthsize,
2469 			.encrypt = aead_encrypt,
2470 			.decrypt = aead_decrypt,
2471 			.ivsize = DES_BLOCK_SIZE,
2472 			.maxauthsize = SHA384_DIGEST_SIZE,
2473 		},
2474 		.caam = {
2475 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2476 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2477 					   OP_ALG_AAI_HMAC_PRECOMP,
2478 			.geniv = true,
2479 		}
2480 	},
2481 	{
2482 		.aead = {
2483 			.base = {
2484 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2485 				.cra_driver_name = "authenc-hmac-sha512-"
2486 						   "cbc-des-caam-qi",
2487 				.cra_blocksize = DES_BLOCK_SIZE,
2488 			},
2489 			.setkey = aead_setkey,
2490 			.setauthsize = aead_setauthsize,
2491 			.encrypt = aead_encrypt,
2492 			.decrypt = aead_decrypt,
2493 			.ivsize = DES_BLOCK_SIZE,
2494 			.maxauthsize = SHA512_DIGEST_SIZE,
2495 		},
2496 		.caam = {
2497 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2498 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2499 					   OP_ALG_AAI_HMAC_PRECOMP,
2500 		}
2501 	},
2502 	{
2503 		.aead = {
2504 			.base = {
2505 				.cra_name = "echainiv(authenc(hmac(sha512),"
2506 					    "cbc(des)))",
2507 				.cra_driver_name = "echainiv-authenc-"
2508 						   "hmac-sha512-cbc-des-"
2509 						   "caam-qi",
2510 				.cra_blocksize = DES_BLOCK_SIZE,
2511 			},
2512 			.setkey = aead_setkey,
2513 			.setauthsize = aead_setauthsize,
2514 			.encrypt = aead_encrypt,
2515 			.decrypt = aead_decrypt,
2516 			.ivsize = DES_BLOCK_SIZE,
2517 			.maxauthsize = SHA512_DIGEST_SIZE,
2518 		},
2519 		.caam = {
2520 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2521 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2522 					   OP_ALG_AAI_HMAC_PRECOMP,
2523 			.geniv = true,
2524 		}
2525 	},
2526 };
2527 
2528 struct caam_crypto_alg {
2529 	struct list_head entry;
2530 	struct crypto_alg crypto_alg;
2531 	struct caam_alg_entry caam;
2532 };
2533 
2534 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2535 			    bool uses_dkp)
2536 {
2537 	struct caam_drv_private *priv;
2538 
2539 	/*
2540 	 * distribute tfms across job rings to ensure in-order
2541 	 * crypto request processing per tfm
2542 	 */
2543 	ctx->jrdev = caam_jr_alloc();
2544 	if (IS_ERR(ctx->jrdev)) {
2545 		pr_err("Job Ring Device allocation for transform failed\n");
2546 		return PTR_ERR(ctx->jrdev);
2547 	}
2548 
2549 	priv = dev_get_drvdata(ctx->jrdev->parent);
2550 	if (priv->era >= 6 && uses_dkp)
2551 		ctx->dir = DMA_BIDIRECTIONAL;
2552 	else
2553 		ctx->dir = DMA_TO_DEVICE;
2554 
2555 	ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2556 				      ctx->dir);
2557 	if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2558 		dev_err(ctx->jrdev, "unable to map key\n");
2559 		caam_jr_free(ctx->jrdev);
2560 		return -ENOMEM;
2561 	}
2562 
2563 	/* copy descriptor header template value */
2564 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2565 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2566 
2567 	ctx->qidev = priv->qidev;
2568 
2569 	spin_lock_init(&ctx->lock);
2570 	ctx->drv_ctx[ENCRYPT] = NULL;
2571 	ctx->drv_ctx[DECRYPT] = NULL;
2572 	ctx->drv_ctx[GIVENCRYPT] = NULL;
2573 
2574 	return 0;
2575 }
2576 
2577 static int caam_cra_init(struct crypto_tfm *tfm)
2578 {
2579 	struct crypto_alg *alg = tfm->__crt_alg;
2580 	struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2581 							crypto_alg);
2582 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2583 
2584 	return caam_init_common(ctx, &caam_alg->caam, false);
2585 }
2586 
2587 static int caam_aead_init(struct crypto_aead *tfm)
2588 {
2589 	struct aead_alg *alg = crypto_aead_alg(tfm);
2590 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2591 						      aead);
2592 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2593 
2594 	return caam_init_common(ctx, &caam_alg->caam,
2595 				alg->setkey == aead_setkey);
2596 }
2597 
2598 static void caam_exit_common(struct caam_ctx *ctx)
2599 {
2600 	caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2601 	caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2602 	caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2603 
2604 	dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2605 
2606 	caam_jr_free(ctx->jrdev);
2607 }
2608 
2609 static void caam_cra_exit(struct crypto_tfm *tfm)
2610 {
2611 	caam_exit_common(crypto_tfm_ctx(tfm));
2612 }
2613 
2614 static void caam_aead_exit(struct crypto_aead *tfm)
2615 {
2616 	caam_exit_common(crypto_aead_ctx(tfm));
2617 }
2618 
2619 static struct list_head alg_list;
2620 static void __exit caam_qi_algapi_exit(void)
2621 {
2622 	struct caam_crypto_alg *t_alg, *n;
2623 	int i;
2624 
2625 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2626 		struct caam_aead_alg *t_alg = driver_aeads + i;
2627 
2628 		if (t_alg->registered)
2629 			crypto_unregister_aead(&t_alg->aead);
2630 	}
2631 
2632 	if (!alg_list.next)
2633 		return;
2634 
2635 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2636 		crypto_unregister_alg(&t_alg->crypto_alg);
2637 		list_del(&t_alg->entry);
2638 		kfree(t_alg);
2639 	}
2640 }
2641 
2642 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2643 					      *template)
2644 {
2645 	struct caam_crypto_alg *t_alg;
2646 	struct crypto_alg *alg;
2647 
2648 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2649 	if (!t_alg)
2650 		return ERR_PTR(-ENOMEM);
2651 
2652 	alg = &t_alg->crypto_alg;
2653 
2654 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2655 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2656 		 template->driver_name);
2657 	alg->cra_module = THIS_MODULE;
2658 	alg->cra_init = caam_cra_init;
2659 	alg->cra_exit = caam_cra_exit;
2660 	alg->cra_priority = CAAM_CRA_PRIORITY;
2661 	alg->cra_blocksize = template->blocksize;
2662 	alg->cra_alignmask = 0;
2663 	alg->cra_ctxsize = sizeof(struct caam_ctx);
2664 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2665 			 template->type;
2666 	switch (template->type) {
2667 	case CRYPTO_ALG_TYPE_GIVCIPHER:
2668 		alg->cra_type = &crypto_givcipher_type;
2669 		alg->cra_ablkcipher = template->template_ablkcipher;
2670 		break;
2671 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2672 		alg->cra_type = &crypto_ablkcipher_type;
2673 		alg->cra_ablkcipher = template->template_ablkcipher;
2674 		break;
2675 	}
2676 
2677 	t_alg->caam.class1_alg_type = template->class1_alg_type;
2678 	t_alg->caam.class2_alg_type = template->class2_alg_type;
2679 
2680 	return t_alg;
2681 }
2682 
2683 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2684 {
2685 	struct aead_alg *alg = &t_alg->aead;
2686 
2687 	alg->base.cra_module = THIS_MODULE;
2688 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2689 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2690 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2691 
2692 	alg->init = caam_aead_init;
2693 	alg->exit = caam_aead_exit;
2694 }
2695 
2696 static int __init caam_qi_algapi_init(void)
2697 {
2698 	struct device_node *dev_node;
2699 	struct platform_device *pdev;
2700 	struct device *ctrldev;
2701 	struct caam_drv_private *priv;
2702 	int i = 0, err = 0;
2703 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2704 	unsigned int md_limit = SHA512_DIGEST_SIZE;
2705 	bool registered = false;
2706 
2707 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2708 	if (!dev_node) {
2709 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2710 		if (!dev_node)
2711 			return -ENODEV;
2712 	}
2713 
2714 	pdev = of_find_device_by_node(dev_node);
2715 	of_node_put(dev_node);
2716 	if (!pdev)
2717 		return -ENODEV;
2718 
2719 	ctrldev = &pdev->dev;
2720 	priv = dev_get_drvdata(ctrldev);
2721 
2722 	/*
2723 	 * If priv is NULL, it's probably because the caam driver wasn't
2724 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2725 	 */
2726 	if (!priv || !priv->qi_present)
2727 		return -ENODEV;
2728 
2729 	if (caam_dpaa2) {
2730 		dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2731 		return -ENODEV;
2732 	}
2733 
2734 	INIT_LIST_HEAD(&alg_list);
2735 
2736 	/*
2737 	 * Register crypto algorithms the device supports.
2738 	 * First, detect presence and attributes of DES, AES, and MD blocks.
2739 	 */
2740 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2741 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2742 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2743 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2744 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2745 
2746 	/* If MD is present, limit digest size based on LP256 */
2747 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2748 		md_limit = SHA256_DIGEST_SIZE;
2749 
2750 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2751 		struct caam_crypto_alg *t_alg;
2752 		struct caam_alg_template *alg = driver_algs + i;
2753 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2754 
2755 		/* Skip DES algorithms if not supported by device */
2756 		if (!des_inst &&
2757 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2758 		     (alg_sel == OP_ALG_ALGSEL_DES)))
2759 			continue;
2760 
2761 		/* Skip AES algorithms if not supported by device */
2762 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2763 			continue;
2764 
2765 		t_alg = caam_alg_alloc(alg);
2766 		if (IS_ERR(t_alg)) {
2767 			err = PTR_ERR(t_alg);
2768 			dev_warn(priv->qidev, "%s alg allocation failed\n",
2769 				 alg->driver_name);
2770 			continue;
2771 		}
2772 
2773 		err = crypto_register_alg(&t_alg->crypto_alg);
2774 		if (err) {
2775 			dev_warn(priv->qidev, "%s alg registration failed\n",
2776 				 t_alg->crypto_alg.cra_driver_name);
2777 			kfree(t_alg);
2778 			continue;
2779 		}
2780 
2781 		list_add_tail(&t_alg->entry, &alg_list);
2782 		registered = true;
2783 	}
2784 
2785 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2786 		struct caam_aead_alg *t_alg = driver_aeads + i;
2787 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2788 				 OP_ALG_ALGSEL_MASK;
2789 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2790 				 OP_ALG_ALGSEL_MASK;
2791 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2792 
2793 		/* Skip DES algorithms if not supported by device */
2794 		if (!des_inst &&
2795 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2796 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2797 			continue;
2798 
2799 		/* Skip AES algorithms if not supported by device */
2800 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2801 			continue;
2802 
2803 		/*
2804 		 * Check support for AES algorithms not available
2805 		 * on LP devices.
2806 		 */
2807 		if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2808 		    (alg_aai == OP_ALG_AAI_GCM))
2809 			continue;
2810 
2811 		/*
2812 		 * Skip algorithms requiring message digests
2813 		 * if MD or MD size is not supported by device.
2814 		 */
2815 		if (c2_alg_sel &&
2816 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2817 			continue;
2818 
2819 		caam_aead_alg_init(t_alg);
2820 
2821 		err = crypto_register_aead(&t_alg->aead);
2822 		if (err) {
2823 			pr_warn("%s alg registration failed\n",
2824 				t_alg->aead.base.cra_driver_name);
2825 			continue;
2826 		}
2827 
2828 		t_alg->registered = true;
2829 		registered = true;
2830 	}
2831 
2832 	if (registered)
2833 		dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2834 
2835 	return err;
2836 }
2837 
2838 module_init(caam_qi_algapi_init);
2839 module_exit(caam_qi_algapi_exit);
2840 
2841 MODULE_LICENSE("GPL");
2842 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2843 MODULE_AUTHOR("Freescale Semiconductor");
2844