xref: /linux/drivers/crypto/caam/caamalg_qi.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Freescale FSL CAAM support for crypto API over QI backend.
3  * Based on caamalg.c
4  *
5  * Copyright 2013-2016 Freescale Semiconductor, Inc.
6  * Copyright 2016-2017 NXP
7  */
8 
9 #include "compat.h"
10 
11 #include "regs.h"
12 #include "intern.h"
13 #include "desc_constr.h"
14 #include "error.h"
15 #include "sg_sw_sec4.h"
16 #include "sg_sw_qm.h"
17 #include "key_gen.h"
18 #include "qi.h"
19 #include "jr.h"
20 #include "caamalg_desc.h"
21 
22 /*
23  * crypto alg
24  */
25 #define CAAM_CRA_PRIORITY		2000
26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
27 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
28 					 SHA512_DIGEST_SIZE * 2)
29 
30 #define DESC_MAX_USED_BYTES		(DESC_QI_AEAD_GIVENC_LEN + \
31 					 CAAM_MAX_KEY_SIZE)
32 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
33 
34 struct caam_alg_entry {
35 	int class1_alg_type;
36 	int class2_alg_type;
37 	bool rfc3686;
38 	bool geniv;
39 };
40 
41 struct caam_aead_alg {
42 	struct aead_alg aead;
43 	struct caam_alg_entry caam;
44 	bool registered;
45 };
46 
47 /*
48  * per-session context
49  */
50 struct caam_ctx {
51 	struct device *jrdev;
52 	u32 sh_desc_enc[DESC_MAX_USED_LEN];
53 	u32 sh_desc_dec[DESC_MAX_USED_LEN];
54 	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
55 	u8 key[CAAM_MAX_KEY_SIZE];
56 	dma_addr_t key_dma;
57 	struct alginfo adata;
58 	struct alginfo cdata;
59 	unsigned int authsize;
60 	struct device *qidev;
61 	spinlock_t lock;	/* Protects multiple init of driver context */
62 	struct caam_drv_ctx *drv_ctx[NUM_OP];
63 };
64 
65 static int aead_set_sh_desc(struct crypto_aead *aead)
66 {
67 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 						 typeof(*alg), aead);
69 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 	unsigned int ivsize = crypto_aead_ivsize(aead);
71 	u32 ctx1_iv_off = 0;
72 	u32 *nonce = NULL;
73 	unsigned int data_len[2];
74 	u32 inl_mask;
75 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 			       OP_ALG_AAI_CTR_MOD128);
77 	const bool is_rfc3686 = alg->caam.rfc3686;
78 
79 	if (!ctx->cdata.keylen || !ctx->authsize)
80 		return 0;
81 
82 	/*
83 	 * AES-CTR needs to load IV in CONTEXT1 reg
84 	 * at an offset of 128bits (16bytes)
85 	 * CONTEXT1[255:128] = IV
86 	 */
87 	if (ctr_mode)
88 		ctx1_iv_off = 16;
89 
90 	/*
91 	 * RFC3686 specific:
92 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
93 	 */
94 	if (is_rfc3686) {
95 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
96 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
97 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
98 	}
99 
100 	data_len[0] = ctx->adata.keylen_pad;
101 	data_len[1] = ctx->cdata.keylen;
102 
103 	if (alg->caam.geniv)
104 		goto skip_enc;
105 
106 	/* aead_encrypt shared descriptor */
107 	if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
108 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
109 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
110 			      ARRAY_SIZE(data_len)) < 0)
111 		return -EINVAL;
112 
113 	if (inl_mask & 1)
114 		ctx->adata.key_virt = ctx->key;
115 	else
116 		ctx->adata.key_dma = ctx->key_dma;
117 
118 	if (inl_mask & 2)
119 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
120 	else
121 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
122 
123 	ctx->adata.key_inline = !!(inl_mask & 1);
124 	ctx->cdata.key_inline = !!(inl_mask & 2);
125 
126 	cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
127 			       ivsize, ctx->authsize, is_rfc3686, nonce,
128 			       ctx1_iv_off, true);
129 
130 skip_enc:
131 	/* aead_decrypt shared descriptor */
132 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
133 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
134 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
135 			      ARRAY_SIZE(data_len)) < 0)
136 		return -EINVAL;
137 
138 	if (inl_mask & 1)
139 		ctx->adata.key_virt = ctx->key;
140 	else
141 		ctx->adata.key_dma = ctx->key_dma;
142 
143 	if (inl_mask & 2)
144 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
145 	else
146 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
147 
148 	ctx->adata.key_inline = !!(inl_mask & 1);
149 	ctx->cdata.key_inline = !!(inl_mask & 2);
150 
151 	cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
152 			       ivsize, ctx->authsize, alg->caam.geniv,
153 			       is_rfc3686, nonce, ctx1_iv_off, true);
154 
155 	if (!alg->caam.geniv)
156 		goto skip_givenc;
157 
158 	/* aead_givencrypt shared descriptor */
159 	if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
160 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
161 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
162 			      ARRAY_SIZE(data_len)) < 0)
163 		return -EINVAL;
164 
165 	if (inl_mask & 1)
166 		ctx->adata.key_virt = ctx->key;
167 	else
168 		ctx->adata.key_dma = ctx->key_dma;
169 
170 	if (inl_mask & 2)
171 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
172 	else
173 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
174 
175 	ctx->adata.key_inline = !!(inl_mask & 1);
176 	ctx->cdata.key_inline = !!(inl_mask & 2);
177 
178 	cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
179 				  ivsize, ctx->authsize, is_rfc3686, nonce,
180 				  ctx1_iv_off, true);
181 
182 skip_givenc:
183 	return 0;
184 }
185 
186 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
187 {
188 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
189 
190 	ctx->authsize = authsize;
191 	aead_set_sh_desc(authenc);
192 
193 	return 0;
194 }
195 
196 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
197 		       unsigned int keylen)
198 {
199 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
200 	struct device *jrdev = ctx->jrdev;
201 	struct crypto_authenc_keys keys;
202 	int ret = 0;
203 
204 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
205 		goto badkey;
206 
207 #ifdef DEBUG
208 	dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
209 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
210 		keys.authkeylen);
211 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
212 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
213 #endif
214 
215 	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
216 			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
217 			    keys.enckeylen);
218 	if (ret)
219 		goto badkey;
220 
221 	/* postpend encryption key to auth split key */
222 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
223 	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
224 				   keys.enckeylen, DMA_TO_DEVICE);
225 #ifdef DEBUG
226 	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
227 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
228 		       ctx->adata.keylen_pad + keys.enckeylen, 1);
229 #endif
230 
231 	ctx->cdata.keylen = keys.enckeylen;
232 
233 	ret = aead_set_sh_desc(aead);
234 	if (ret)
235 		goto badkey;
236 
237 	/* Now update the driver contexts with the new shared descriptor */
238 	if (ctx->drv_ctx[ENCRYPT]) {
239 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
240 					  ctx->sh_desc_enc);
241 		if (ret) {
242 			dev_err(jrdev, "driver enc context update failed\n");
243 			goto badkey;
244 		}
245 	}
246 
247 	if (ctx->drv_ctx[DECRYPT]) {
248 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
249 					  ctx->sh_desc_dec);
250 		if (ret) {
251 			dev_err(jrdev, "driver dec context update failed\n");
252 			goto badkey;
253 		}
254 	}
255 
256 	return ret;
257 badkey:
258 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
259 	return -EINVAL;
260 }
261 
262 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
263 			     const u8 *key, unsigned int keylen)
264 {
265 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
266 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
267 	const char *alg_name = crypto_tfm_alg_name(tfm);
268 	struct device *jrdev = ctx->jrdev;
269 	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
270 	u32 ctx1_iv_off = 0;
271 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
272 			       OP_ALG_AAI_CTR_MOD128);
273 	const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
274 	int ret = 0;
275 
276 	memcpy(ctx->key, key, keylen);
277 #ifdef DEBUG
278 	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
279 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
280 #endif
281 	/*
282 	 * AES-CTR needs to load IV in CONTEXT1 reg
283 	 * at an offset of 128bits (16bytes)
284 	 * CONTEXT1[255:128] = IV
285 	 */
286 	if (ctr_mode)
287 		ctx1_iv_off = 16;
288 
289 	/*
290 	 * RFC3686 specific:
291 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
292 	 *	| *key = {KEY, NONCE}
293 	 */
294 	if (is_rfc3686) {
295 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
296 		keylen -= CTR_RFC3686_NONCE_SIZE;
297 	}
298 
299 	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
300 	ctx->cdata.keylen = keylen;
301 	ctx->cdata.key_virt = ctx->key;
302 	ctx->cdata.key_inline = true;
303 
304 	/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
305 	cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
306 				     is_rfc3686, ctx1_iv_off);
307 	cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
308 				     is_rfc3686, ctx1_iv_off);
309 	cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
310 					ivsize, is_rfc3686, ctx1_iv_off);
311 
312 	/* Now update the driver contexts with the new shared descriptor */
313 	if (ctx->drv_ctx[ENCRYPT]) {
314 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
315 					  ctx->sh_desc_enc);
316 		if (ret) {
317 			dev_err(jrdev, "driver enc context update failed\n");
318 			goto badkey;
319 		}
320 	}
321 
322 	if (ctx->drv_ctx[DECRYPT]) {
323 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
324 					  ctx->sh_desc_dec);
325 		if (ret) {
326 			dev_err(jrdev, "driver dec context update failed\n");
327 			goto badkey;
328 		}
329 	}
330 
331 	if (ctx->drv_ctx[GIVENCRYPT]) {
332 		ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
333 					  ctx->sh_desc_givenc);
334 		if (ret) {
335 			dev_err(jrdev, "driver givenc context update failed\n");
336 			goto badkey;
337 		}
338 	}
339 
340 	return ret;
341 badkey:
342 	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
343 	return -EINVAL;
344 }
345 
346 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
347 				 const u8 *key, unsigned int keylen)
348 {
349 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
350 	struct device *jrdev = ctx->jrdev;
351 	int ret = 0;
352 
353 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
354 		crypto_ablkcipher_set_flags(ablkcipher,
355 					    CRYPTO_TFM_RES_BAD_KEY_LEN);
356 		dev_err(jrdev, "key size mismatch\n");
357 		return -EINVAL;
358 	}
359 
360 	memcpy(ctx->key, key, keylen);
361 	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
362 	ctx->cdata.keylen = keylen;
363 	ctx->cdata.key_virt = ctx->key;
364 	ctx->cdata.key_inline = true;
365 
366 	/* xts ablkcipher encrypt, decrypt shared descriptors */
367 	cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
368 	cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
369 
370 	/* Now update the driver contexts with the new shared descriptor */
371 	if (ctx->drv_ctx[ENCRYPT]) {
372 		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
373 					  ctx->sh_desc_enc);
374 		if (ret) {
375 			dev_err(jrdev, "driver enc context update failed\n");
376 			goto badkey;
377 		}
378 	}
379 
380 	if (ctx->drv_ctx[DECRYPT]) {
381 		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
382 					  ctx->sh_desc_dec);
383 		if (ret) {
384 			dev_err(jrdev, "driver dec context update failed\n");
385 			goto badkey;
386 		}
387 	}
388 
389 	return ret;
390 badkey:
391 	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
392 	return 0;
393 }
394 
395 /*
396  * aead_edesc - s/w-extended aead descriptor
397  * @src_nents: number of segments in input scatterlist
398  * @dst_nents: number of segments in output scatterlist
399  * @iv_dma: dma address of iv for checking continuity and link table
400  * @qm_sg_bytes: length of dma mapped h/w link table
401  * @qm_sg_dma: bus physical mapped address of h/w link table
402  * @assoclen_dma: bus physical mapped address of req->assoclen
403  * @drv_req: driver-specific request structure
404  * @sgt: the h/w link table
405  */
406 struct aead_edesc {
407 	int src_nents;
408 	int dst_nents;
409 	dma_addr_t iv_dma;
410 	int qm_sg_bytes;
411 	dma_addr_t qm_sg_dma;
412 	dma_addr_t assoclen_dma;
413 	struct caam_drv_req drv_req;
414 	struct qm_sg_entry sgt[0];
415 };
416 
417 /*
418  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
419  * @src_nents: number of segments in input scatterlist
420  * @dst_nents: number of segments in output scatterlist
421  * @iv_dma: dma address of iv for checking continuity and link table
422  * @qm_sg_bytes: length of dma mapped h/w link table
423  * @qm_sg_dma: bus physical mapped address of h/w link table
424  * @drv_req: driver-specific request structure
425  * @sgt: the h/w link table
426  */
427 struct ablkcipher_edesc {
428 	int src_nents;
429 	int dst_nents;
430 	dma_addr_t iv_dma;
431 	int qm_sg_bytes;
432 	dma_addr_t qm_sg_dma;
433 	struct caam_drv_req drv_req;
434 	struct qm_sg_entry sgt[0];
435 };
436 
437 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
438 					enum optype type)
439 {
440 	/*
441 	 * This function is called on the fast path with values of 'type'
442 	 * known at compile time. Invalid arguments are not expected and
443 	 * thus no checks are made.
444 	 */
445 	struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
446 	u32 *desc;
447 
448 	if (unlikely(!drv_ctx)) {
449 		spin_lock(&ctx->lock);
450 
451 		/* Read again to check if some other core init drv_ctx */
452 		drv_ctx = ctx->drv_ctx[type];
453 		if (!drv_ctx) {
454 			int cpu;
455 
456 			if (type == ENCRYPT)
457 				desc = ctx->sh_desc_enc;
458 			else if (type == DECRYPT)
459 				desc = ctx->sh_desc_dec;
460 			else /* (type == GIVENCRYPT) */
461 				desc = ctx->sh_desc_givenc;
462 
463 			cpu = smp_processor_id();
464 			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
465 			if (likely(!IS_ERR_OR_NULL(drv_ctx)))
466 				drv_ctx->op_type = type;
467 
468 			ctx->drv_ctx[type] = drv_ctx;
469 		}
470 
471 		spin_unlock(&ctx->lock);
472 	}
473 
474 	return drv_ctx;
475 }
476 
477 static void caam_unmap(struct device *dev, struct scatterlist *src,
478 		       struct scatterlist *dst, int src_nents,
479 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
480 		       enum optype op_type, dma_addr_t qm_sg_dma,
481 		       int qm_sg_bytes)
482 {
483 	if (dst != src) {
484 		if (src_nents)
485 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
486 		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
487 	} else {
488 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
489 	}
490 
491 	if (iv_dma)
492 		dma_unmap_single(dev, iv_dma, ivsize,
493 				 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
494 							 DMA_TO_DEVICE);
495 	if (qm_sg_bytes)
496 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
497 }
498 
499 static void aead_unmap(struct device *dev,
500 		       struct aead_edesc *edesc,
501 		       struct aead_request *req)
502 {
503 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
504 	int ivsize = crypto_aead_ivsize(aead);
505 
506 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
507 		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
508 		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
509 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
510 }
511 
512 static void ablkcipher_unmap(struct device *dev,
513 			     struct ablkcipher_edesc *edesc,
514 			     struct ablkcipher_request *req)
515 {
516 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
517 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
518 
519 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
520 		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
521 		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
522 }
523 
524 static void aead_done(struct caam_drv_req *drv_req, u32 status)
525 {
526 	struct device *qidev;
527 	struct aead_edesc *edesc;
528 	struct aead_request *aead_req = drv_req->app_ctx;
529 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
530 	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
531 	int ecode = 0;
532 
533 	qidev = caam_ctx->qidev;
534 
535 	if (unlikely(status)) {
536 		caam_jr_strstatus(qidev, status);
537 		ecode = -EIO;
538 	}
539 
540 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
541 	aead_unmap(qidev, edesc, aead_req);
542 
543 	aead_request_complete(aead_req, ecode);
544 	qi_cache_free(edesc);
545 }
546 
547 /*
548  * allocate and map the aead extended descriptor
549  */
550 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
551 					   bool encrypt)
552 {
553 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
554 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
555 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
556 						 typeof(*alg), aead);
557 	struct device *qidev = ctx->qidev;
558 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
559 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
560 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
561 	struct aead_edesc *edesc;
562 	dma_addr_t qm_sg_dma, iv_dma = 0;
563 	int ivsize = 0;
564 	unsigned int authsize = ctx->authsize;
565 	int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
566 	int in_len, out_len;
567 	struct qm_sg_entry *sg_table, *fd_sgt;
568 	struct caam_drv_ctx *drv_ctx;
569 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
570 
571 	drv_ctx = get_drv_ctx(ctx, op_type);
572 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
573 		return (struct aead_edesc *)drv_ctx;
574 
575 	/* allocate space for base edesc and hw desc commands, link tables */
576 	edesc = qi_cache_alloc(GFP_DMA | flags);
577 	if (unlikely(!edesc)) {
578 		dev_err(qidev, "could not allocate extended descriptor\n");
579 		return ERR_PTR(-ENOMEM);
580 	}
581 
582 	if (likely(req->src == req->dst)) {
583 		src_nents = sg_nents_for_len(req->src, req->assoclen +
584 					     req->cryptlen +
585 						(encrypt ? authsize : 0));
586 		if (unlikely(src_nents < 0)) {
587 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
588 				req->assoclen + req->cryptlen +
589 				(encrypt ? authsize : 0));
590 			qi_cache_free(edesc);
591 			return ERR_PTR(src_nents);
592 		}
593 
594 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
595 					      DMA_BIDIRECTIONAL);
596 		if (unlikely(!mapped_src_nents)) {
597 			dev_err(qidev, "unable to map source\n");
598 			qi_cache_free(edesc);
599 			return ERR_PTR(-ENOMEM);
600 		}
601 	} else {
602 		src_nents = sg_nents_for_len(req->src, req->assoclen +
603 					     req->cryptlen);
604 		if (unlikely(src_nents < 0)) {
605 			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
606 				req->assoclen + req->cryptlen);
607 			qi_cache_free(edesc);
608 			return ERR_PTR(src_nents);
609 		}
610 
611 		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
612 					     req->cryptlen +
613 					     (encrypt ? authsize :
614 							(-authsize)));
615 		if (unlikely(dst_nents < 0)) {
616 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
617 				req->assoclen + req->cryptlen +
618 				(encrypt ? authsize : (-authsize)));
619 			qi_cache_free(edesc);
620 			return ERR_PTR(dst_nents);
621 		}
622 
623 		if (src_nents) {
624 			mapped_src_nents = dma_map_sg(qidev, req->src,
625 						      src_nents, DMA_TO_DEVICE);
626 			if (unlikely(!mapped_src_nents)) {
627 				dev_err(qidev, "unable to map source\n");
628 				qi_cache_free(edesc);
629 				return ERR_PTR(-ENOMEM);
630 			}
631 		} else {
632 			mapped_src_nents = 0;
633 		}
634 
635 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
636 					      DMA_FROM_DEVICE);
637 		if (unlikely(!mapped_dst_nents)) {
638 			dev_err(qidev, "unable to map destination\n");
639 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
640 			qi_cache_free(edesc);
641 			return ERR_PTR(-ENOMEM);
642 		}
643 	}
644 
645 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
646 		ivsize = crypto_aead_ivsize(aead);
647 		iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
648 		if (dma_mapping_error(qidev, iv_dma)) {
649 			dev_err(qidev, "unable to map IV\n");
650 			caam_unmap(qidev, req->src, req->dst, src_nents,
651 				   dst_nents, 0, 0, op_type, 0, 0);
652 			qi_cache_free(edesc);
653 			return ERR_PTR(-ENOMEM);
654 		}
655 	}
656 
657 	/*
658 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
659 	 * Input is not contiguous.
660 	 */
661 	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
662 		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
663 	sg_table = &edesc->sgt[0];
664 	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
665 
666 	edesc->src_nents = src_nents;
667 	edesc->dst_nents = dst_nents;
668 	edesc->iv_dma = iv_dma;
669 	edesc->drv_req.app_ctx = req;
670 	edesc->drv_req.cbk = aead_done;
671 	edesc->drv_req.drv_ctx = drv_ctx;
672 
673 	edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
674 					     DMA_TO_DEVICE);
675 	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
676 		dev_err(qidev, "unable to map assoclen\n");
677 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
678 			   iv_dma, ivsize, op_type, 0, 0);
679 		qi_cache_free(edesc);
680 		return ERR_PTR(-ENOMEM);
681 	}
682 
683 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
684 	qm_sg_index++;
685 	if (ivsize) {
686 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
687 		qm_sg_index++;
688 	}
689 	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
690 	qm_sg_index += mapped_src_nents;
691 
692 	if (mapped_dst_nents > 1)
693 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
694 				 qm_sg_index, 0);
695 
696 	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
697 	if (dma_mapping_error(qidev, qm_sg_dma)) {
698 		dev_err(qidev, "unable to map S/G table\n");
699 		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
700 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
701 			   iv_dma, ivsize, op_type, 0, 0);
702 		qi_cache_free(edesc);
703 		return ERR_PTR(-ENOMEM);
704 	}
705 
706 	edesc->qm_sg_dma = qm_sg_dma;
707 	edesc->qm_sg_bytes = qm_sg_bytes;
708 
709 	out_len = req->assoclen + req->cryptlen +
710 		  (encrypt ? ctx->authsize : (-ctx->authsize));
711 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
712 
713 	fd_sgt = &edesc->drv_req.fd_sgt[0];
714 	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
715 
716 	if (req->dst == req->src) {
717 		if (mapped_src_nents == 1)
718 			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
719 					 out_len, 0);
720 		else
721 			dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
722 					     (1 + !!ivsize) * sizeof(*sg_table),
723 					     out_len, 0);
724 	} else if (mapped_dst_nents == 1) {
725 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
726 				 0);
727 	} else {
728 		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
729 				     qm_sg_index, out_len, 0);
730 	}
731 
732 	return edesc;
733 }
734 
735 static inline int aead_crypt(struct aead_request *req, bool encrypt)
736 {
737 	struct aead_edesc *edesc;
738 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
739 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
740 	int ret;
741 
742 	if (unlikely(caam_congested))
743 		return -EAGAIN;
744 
745 	/* allocate extended descriptor */
746 	edesc = aead_edesc_alloc(req, encrypt);
747 	if (IS_ERR_OR_NULL(edesc))
748 		return PTR_ERR(edesc);
749 
750 	/* Create and submit job descriptor */
751 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
752 	if (!ret) {
753 		ret = -EINPROGRESS;
754 	} else {
755 		aead_unmap(ctx->qidev, edesc, req);
756 		qi_cache_free(edesc);
757 	}
758 
759 	return ret;
760 }
761 
762 static int aead_encrypt(struct aead_request *req)
763 {
764 	return aead_crypt(req, true);
765 }
766 
767 static int aead_decrypt(struct aead_request *req)
768 {
769 	return aead_crypt(req, false);
770 }
771 
772 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
773 {
774 	struct ablkcipher_edesc *edesc;
775 	struct ablkcipher_request *req = drv_req->app_ctx;
776 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
777 	struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
778 	struct device *qidev = caam_ctx->qidev;
779 #ifdef DEBUG
780 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
781 
782 	dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
783 #endif
784 
785 	edesc = container_of(drv_req, typeof(*edesc), drv_req);
786 
787 	if (status)
788 		caam_jr_strstatus(qidev, status);
789 
790 #ifdef DEBUG
791 	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
792 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
793 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
794 	dbg_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
795 		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
796 		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
797 #endif
798 
799 	ablkcipher_unmap(qidev, edesc, req);
800 	qi_cache_free(edesc);
801 
802 	ablkcipher_request_complete(req, status);
803 }
804 
805 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
806 						       *req, bool encrypt)
807 {
808 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
809 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
810 	struct device *qidev = ctx->qidev;
811 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
812 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
813 		       GFP_KERNEL : GFP_ATOMIC;
814 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
815 	struct ablkcipher_edesc *edesc;
816 	dma_addr_t iv_dma;
817 	bool in_contig;
818 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
819 	int dst_sg_idx, qm_sg_ents;
820 	struct qm_sg_entry *sg_table, *fd_sgt;
821 	struct caam_drv_ctx *drv_ctx;
822 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
823 
824 	drv_ctx = get_drv_ctx(ctx, op_type);
825 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
826 		return (struct ablkcipher_edesc *)drv_ctx;
827 
828 	src_nents = sg_nents_for_len(req->src, req->nbytes);
829 	if (unlikely(src_nents < 0)) {
830 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
831 			req->nbytes);
832 		return ERR_PTR(src_nents);
833 	}
834 
835 	if (unlikely(req->src != req->dst)) {
836 		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
837 		if (unlikely(dst_nents < 0)) {
838 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
839 				req->nbytes);
840 			return ERR_PTR(dst_nents);
841 		}
842 
843 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
844 					      DMA_TO_DEVICE);
845 		if (unlikely(!mapped_src_nents)) {
846 			dev_err(qidev, "unable to map source\n");
847 			return ERR_PTR(-ENOMEM);
848 		}
849 
850 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
851 					      DMA_FROM_DEVICE);
852 		if (unlikely(!mapped_dst_nents)) {
853 			dev_err(qidev, "unable to map destination\n");
854 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
855 			return ERR_PTR(-ENOMEM);
856 		}
857 	} else {
858 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
859 					      DMA_BIDIRECTIONAL);
860 		if (unlikely(!mapped_src_nents)) {
861 			dev_err(qidev, "unable to map source\n");
862 			return ERR_PTR(-ENOMEM);
863 		}
864 	}
865 
866 	iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
867 	if (dma_mapping_error(qidev, iv_dma)) {
868 		dev_err(qidev, "unable to map IV\n");
869 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
870 			   0, 0, 0, 0);
871 		return ERR_PTR(-ENOMEM);
872 	}
873 
874 	if (mapped_src_nents == 1 &&
875 	    iv_dma + ivsize == sg_dma_address(req->src)) {
876 		in_contig = true;
877 		qm_sg_ents = 0;
878 	} else {
879 		in_contig = false;
880 		qm_sg_ents = 1 + mapped_src_nents;
881 	}
882 	dst_sg_idx = qm_sg_ents;
883 
884 	/* allocate space for base edesc and link tables */
885 	edesc = qi_cache_alloc(GFP_DMA | flags);
886 	if (unlikely(!edesc)) {
887 		dev_err(qidev, "could not allocate extended descriptor\n");
888 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
889 			   iv_dma, ivsize, op_type, 0, 0);
890 		return ERR_PTR(-ENOMEM);
891 	}
892 
893 	edesc->src_nents = src_nents;
894 	edesc->dst_nents = dst_nents;
895 	edesc->iv_dma = iv_dma;
896 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
897 	sg_table = &edesc->sgt[0];
898 	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
899 	edesc->drv_req.app_ctx = req;
900 	edesc->drv_req.cbk = ablkcipher_done;
901 	edesc->drv_req.drv_ctx = drv_ctx;
902 
903 	if (!in_contig) {
904 		dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
905 		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
906 	}
907 
908 	if (mapped_dst_nents > 1)
909 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
910 				 dst_sg_idx, 0);
911 
912 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
913 					  DMA_TO_DEVICE);
914 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
915 		dev_err(qidev, "unable to map S/G table\n");
916 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
917 			   iv_dma, ivsize, op_type, 0, 0);
918 		qi_cache_free(edesc);
919 		return ERR_PTR(-ENOMEM);
920 	}
921 
922 	fd_sgt = &edesc->drv_req.fd_sgt[0];
923 
924 	if (!in_contig)
925 		dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
926 					  ivsize + req->nbytes, 0);
927 	else
928 		dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
929 				      0);
930 
931 	if (req->src == req->dst) {
932 		if (!in_contig)
933 			dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
934 					     sizeof(*sg_table), req->nbytes, 0);
935 		else
936 			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
937 					 req->nbytes, 0);
938 	} else if (mapped_dst_nents > 1) {
939 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
940 				     sizeof(*sg_table), req->nbytes, 0);
941 	} else {
942 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
943 				 req->nbytes, 0);
944 	}
945 
946 	return edesc;
947 }
948 
949 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
950 	struct skcipher_givcrypt_request *creq)
951 {
952 	struct ablkcipher_request *req = &creq->creq;
953 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
954 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
955 	struct device *qidev = ctx->qidev;
956 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
957 					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
958 		       GFP_KERNEL : GFP_ATOMIC;
959 	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
960 	struct ablkcipher_edesc *edesc;
961 	dma_addr_t iv_dma;
962 	bool out_contig;
963 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
964 	struct qm_sg_entry *sg_table, *fd_sgt;
965 	int dst_sg_idx, qm_sg_ents;
966 	struct caam_drv_ctx *drv_ctx;
967 
968 	drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
969 	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
970 		return (struct ablkcipher_edesc *)drv_ctx;
971 
972 	src_nents = sg_nents_for_len(req->src, req->nbytes);
973 	if (unlikely(src_nents < 0)) {
974 		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
975 			req->nbytes);
976 		return ERR_PTR(src_nents);
977 	}
978 
979 	if (unlikely(req->src != req->dst)) {
980 		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
981 		if (unlikely(dst_nents < 0)) {
982 			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
983 				req->nbytes);
984 			return ERR_PTR(dst_nents);
985 		}
986 
987 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
988 					      DMA_TO_DEVICE);
989 		if (unlikely(!mapped_src_nents)) {
990 			dev_err(qidev, "unable to map source\n");
991 			return ERR_PTR(-ENOMEM);
992 		}
993 
994 		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
995 					      DMA_FROM_DEVICE);
996 		if (unlikely(!mapped_dst_nents)) {
997 			dev_err(qidev, "unable to map destination\n");
998 			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
999 			return ERR_PTR(-ENOMEM);
1000 		}
1001 	} else {
1002 		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1003 					      DMA_BIDIRECTIONAL);
1004 		if (unlikely(!mapped_src_nents)) {
1005 			dev_err(qidev, "unable to map source\n");
1006 			return ERR_PTR(-ENOMEM);
1007 		}
1008 
1009 		dst_nents = src_nents;
1010 		mapped_dst_nents = src_nents;
1011 	}
1012 
1013 	iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
1014 	if (dma_mapping_error(qidev, iv_dma)) {
1015 		dev_err(qidev, "unable to map IV\n");
1016 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1017 			   0, 0, 0, 0);
1018 		return ERR_PTR(-ENOMEM);
1019 	}
1020 
1021 	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1022 	dst_sg_idx = qm_sg_ents;
1023 	if (mapped_dst_nents == 1 &&
1024 	    iv_dma + ivsize == sg_dma_address(req->dst)) {
1025 		out_contig = true;
1026 	} else {
1027 		out_contig = false;
1028 		qm_sg_ents += 1 + mapped_dst_nents;
1029 	}
1030 
1031 	/* allocate space for base edesc and link tables */
1032 	edesc = qi_cache_alloc(GFP_DMA | flags);
1033 	if (!edesc) {
1034 		dev_err(qidev, "could not allocate extended descriptor\n");
1035 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1036 			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
1037 		return ERR_PTR(-ENOMEM);
1038 	}
1039 
1040 	edesc->src_nents = src_nents;
1041 	edesc->dst_nents = dst_nents;
1042 	edesc->iv_dma = iv_dma;
1043 	sg_table = &edesc->sgt[0];
1044 	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1045 	edesc->drv_req.app_ctx = req;
1046 	edesc->drv_req.cbk = ablkcipher_done;
1047 	edesc->drv_req.drv_ctx = drv_ctx;
1048 
1049 	if (mapped_src_nents > 1)
1050 		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1051 
1052 	if (!out_contig) {
1053 		dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1054 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1055 				 dst_sg_idx + 1, 0);
1056 	}
1057 
1058 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1059 					  DMA_TO_DEVICE);
1060 	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1061 		dev_err(qidev, "unable to map S/G table\n");
1062 		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1063 			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
1064 		qi_cache_free(edesc);
1065 		return ERR_PTR(-ENOMEM);
1066 	}
1067 
1068 	fd_sgt = &edesc->drv_req.fd_sgt[0];
1069 
1070 	if (mapped_src_nents > 1)
1071 		dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1072 				     0);
1073 	else
1074 		dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1075 				 req->nbytes, 0);
1076 
1077 	if (!out_contig)
1078 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1079 				     sizeof(*sg_table), ivsize + req->nbytes,
1080 				     0);
1081 	else
1082 		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1083 				 ivsize + req->nbytes, 0);
1084 
1085 	return edesc;
1086 }
1087 
1088 static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1089 {
1090 	struct ablkcipher_edesc *edesc;
1091 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1092 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1093 	int ret;
1094 
1095 	if (unlikely(caam_congested))
1096 		return -EAGAIN;
1097 
1098 	/* allocate extended descriptor */
1099 	edesc = ablkcipher_edesc_alloc(req, encrypt);
1100 	if (IS_ERR(edesc))
1101 		return PTR_ERR(edesc);
1102 
1103 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1104 	if (!ret) {
1105 		ret = -EINPROGRESS;
1106 	} else {
1107 		ablkcipher_unmap(ctx->qidev, edesc, req);
1108 		qi_cache_free(edesc);
1109 	}
1110 
1111 	return ret;
1112 }
1113 
1114 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1115 {
1116 	return ablkcipher_crypt(req, true);
1117 }
1118 
1119 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1120 {
1121 	return ablkcipher_crypt(req, false);
1122 }
1123 
1124 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1125 {
1126 	struct ablkcipher_request *req = &creq->creq;
1127 	struct ablkcipher_edesc *edesc;
1128 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1129 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1130 	int ret;
1131 
1132 	if (unlikely(caam_congested))
1133 		return -EAGAIN;
1134 
1135 	/* allocate extended descriptor */
1136 	edesc = ablkcipher_giv_edesc_alloc(creq);
1137 	if (IS_ERR(edesc))
1138 		return PTR_ERR(edesc);
1139 
1140 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1141 	if (!ret) {
1142 		ret = -EINPROGRESS;
1143 	} else {
1144 		ablkcipher_unmap(ctx->qidev, edesc, req);
1145 		qi_cache_free(edesc);
1146 	}
1147 
1148 	return ret;
1149 }
1150 
1151 #define template_ablkcipher	template_u.ablkcipher
1152 struct caam_alg_template {
1153 	char name[CRYPTO_MAX_ALG_NAME];
1154 	char driver_name[CRYPTO_MAX_ALG_NAME];
1155 	unsigned int blocksize;
1156 	u32 type;
1157 	union {
1158 		struct ablkcipher_alg ablkcipher;
1159 	} template_u;
1160 	u32 class1_alg_type;
1161 	u32 class2_alg_type;
1162 };
1163 
1164 static struct caam_alg_template driver_algs[] = {
1165 	/* ablkcipher descriptor */
1166 	{
1167 		.name = "cbc(aes)",
1168 		.driver_name = "cbc-aes-caam-qi",
1169 		.blocksize = AES_BLOCK_SIZE,
1170 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1171 		.template_ablkcipher = {
1172 			.setkey = ablkcipher_setkey,
1173 			.encrypt = ablkcipher_encrypt,
1174 			.decrypt = ablkcipher_decrypt,
1175 			.givencrypt = ablkcipher_givencrypt,
1176 			.geniv = "<built-in>",
1177 			.min_keysize = AES_MIN_KEY_SIZE,
1178 			.max_keysize = AES_MAX_KEY_SIZE,
1179 			.ivsize = AES_BLOCK_SIZE,
1180 		},
1181 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1182 	},
1183 	{
1184 		.name = "cbc(des3_ede)",
1185 		.driver_name = "cbc-3des-caam-qi",
1186 		.blocksize = DES3_EDE_BLOCK_SIZE,
1187 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1188 		.template_ablkcipher = {
1189 			.setkey = ablkcipher_setkey,
1190 			.encrypt = ablkcipher_encrypt,
1191 			.decrypt = ablkcipher_decrypt,
1192 			.givencrypt = ablkcipher_givencrypt,
1193 			.geniv = "<built-in>",
1194 			.min_keysize = DES3_EDE_KEY_SIZE,
1195 			.max_keysize = DES3_EDE_KEY_SIZE,
1196 			.ivsize = DES3_EDE_BLOCK_SIZE,
1197 		},
1198 		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1199 	},
1200 	{
1201 		.name = "cbc(des)",
1202 		.driver_name = "cbc-des-caam-qi",
1203 		.blocksize = DES_BLOCK_SIZE,
1204 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1205 		.template_ablkcipher = {
1206 			.setkey = ablkcipher_setkey,
1207 			.encrypt = ablkcipher_encrypt,
1208 			.decrypt = ablkcipher_decrypt,
1209 			.givencrypt = ablkcipher_givencrypt,
1210 			.geniv = "<built-in>",
1211 			.min_keysize = DES_KEY_SIZE,
1212 			.max_keysize = DES_KEY_SIZE,
1213 			.ivsize = DES_BLOCK_SIZE,
1214 		},
1215 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1216 	},
1217 	{
1218 		.name = "ctr(aes)",
1219 		.driver_name = "ctr-aes-caam-qi",
1220 		.blocksize = 1,
1221 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1222 		.template_ablkcipher = {
1223 			.setkey = ablkcipher_setkey,
1224 			.encrypt = ablkcipher_encrypt,
1225 			.decrypt = ablkcipher_decrypt,
1226 			.geniv = "chainiv",
1227 			.min_keysize = AES_MIN_KEY_SIZE,
1228 			.max_keysize = AES_MAX_KEY_SIZE,
1229 			.ivsize = AES_BLOCK_SIZE,
1230 		},
1231 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1232 	},
1233 	{
1234 		.name = "rfc3686(ctr(aes))",
1235 		.driver_name = "rfc3686-ctr-aes-caam-qi",
1236 		.blocksize = 1,
1237 		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
1238 		.template_ablkcipher = {
1239 			.setkey = ablkcipher_setkey,
1240 			.encrypt = ablkcipher_encrypt,
1241 			.decrypt = ablkcipher_decrypt,
1242 			.givencrypt = ablkcipher_givencrypt,
1243 			.geniv = "<built-in>",
1244 			.min_keysize = AES_MIN_KEY_SIZE +
1245 				       CTR_RFC3686_NONCE_SIZE,
1246 			.max_keysize = AES_MAX_KEY_SIZE +
1247 				       CTR_RFC3686_NONCE_SIZE,
1248 			.ivsize = CTR_RFC3686_IV_SIZE,
1249 		},
1250 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1251 	},
1252 	{
1253 		.name = "xts(aes)",
1254 		.driver_name = "xts-aes-caam-qi",
1255 		.blocksize = AES_BLOCK_SIZE,
1256 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1257 		.template_ablkcipher = {
1258 			.setkey = xts_ablkcipher_setkey,
1259 			.encrypt = ablkcipher_encrypt,
1260 			.decrypt = ablkcipher_decrypt,
1261 			.geniv = "eseqiv",
1262 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1263 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1264 			.ivsize = AES_BLOCK_SIZE,
1265 		},
1266 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1267 	},
1268 };
1269 
1270 static struct caam_aead_alg driver_aeads[] = {
1271 	/* single-pass ipsec_esp descriptor */
1272 	{
1273 		.aead = {
1274 			.base = {
1275 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1276 				.cra_driver_name = "authenc-hmac-md5-"
1277 						   "cbc-aes-caam-qi",
1278 				.cra_blocksize = AES_BLOCK_SIZE,
1279 			},
1280 			.setkey = aead_setkey,
1281 			.setauthsize = aead_setauthsize,
1282 			.encrypt = aead_encrypt,
1283 			.decrypt = aead_decrypt,
1284 			.ivsize = AES_BLOCK_SIZE,
1285 			.maxauthsize = MD5_DIGEST_SIZE,
1286 		},
1287 		.caam = {
1288 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1289 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1290 					   OP_ALG_AAI_HMAC_PRECOMP,
1291 		}
1292 	},
1293 	{
1294 		.aead = {
1295 			.base = {
1296 				.cra_name = "echainiv(authenc(hmac(md5),"
1297 					    "cbc(aes)))",
1298 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1299 						   "cbc-aes-caam-qi",
1300 				.cra_blocksize = AES_BLOCK_SIZE,
1301 			},
1302 			.setkey = aead_setkey,
1303 			.setauthsize = aead_setauthsize,
1304 			.encrypt = aead_encrypt,
1305 			.decrypt = aead_decrypt,
1306 			.ivsize = AES_BLOCK_SIZE,
1307 			.maxauthsize = MD5_DIGEST_SIZE,
1308 		},
1309 		.caam = {
1310 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1311 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1312 					   OP_ALG_AAI_HMAC_PRECOMP,
1313 			.geniv = true,
1314 		}
1315 	},
1316 	{
1317 		.aead = {
1318 			.base = {
1319 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1320 				.cra_driver_name = "authenc-hmac-sha1-"
1321 						   "cbc-aes-caam-qi",
1322 				.cra_blocksize = AES_BLOCK_SIZE,
1323 			},
1324 			.setkey = aead_setkey,
1325 			.setauthsize = aead_setauthsize,
1326 			.encrypt = aead_encrypt,
1327 			.decrypt = aead_decrypt,
1328 			.ivsize = AES_BLOCK_SIZE,
1329 			.maxauthsize = SHA1_DIGEST_SIZE,
1330 		},
1331 		.caam = {
1332 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1333 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1334 					   OP_ALG_AAI_HMAC_PRECOMP,
1335 		}
1336 	},
1337 	{
1338 		.aead = {
1339 			.base = {
1340 				.cra_name = "echainiv(authenc(hmac(sha1),"
1341 					    "cbc(aes)))",
1342 				.cra_driver_name = "echainiv-authenc-"
1343 						   "hmac-sha1-cbc-aes-caam-qi",
1344 				.cra_blocksize = AES_BLOCK_SIZE,
1345 			},
1346 			.setkey = aead_setkey,
1347 			.setauthsize = aead_setauthsize,
1348 			.encrypt = aead_encrypt,
1349 			.decrypt = aead_decrypt,
1350 			.ivsize = AES_BLOCK_SIZE,
1351 			.maxauthsize = SHA1_DIGEST_SIZE,
1352 		},
1353 		.caam = {
1354 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1355 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1356 					   OP_ALG_AAI_HMAC_PRECOMP,
1357 			.geniv = true,
1358 		},
1359 	},
1360 	{
1361 		.aead = {
1362 			.base = {
1363 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1364 				.cra_driver_name = "authenc-hmac-sha224-"
1365 						   "cbc-aes-caam-qi",
1366 				.cra_blocksize = AES_BLOCK_SIZE,
1367 			},
1368 			.setkey = aead_setkey,
1369 			.setauthsize = aead_setauthsize,
1370 			.encrypt = aead_encrypt,
1371 			.decrypt = aead_decrypt,
1372 			.ivsize = AES_BLOCK_SIZE,
1373 			.maxauthsize = SHA224_DIGEST_SIZE,
1374 		},
1375 		.caam = {
1376 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1377 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1378 					   OP_ALG_AAI_HMAC_PRECOMP,
1379 		}
1380 	},
1381 	{
1382 		.aead = {
1383 			.base = {
1384 				.cra_name = "echainiv(authenc(hmac(sha224),"
1385 					    "cbc(aes)))",
1386 				.cra_driver_name = "echainiv-authenc-"
1387 						   "hmac-sha224-cbc-aes-caam-qi",
1388 				.cra_blocksize = AES_BLOCK_SIZE,
1389 			},
1390 			.setkey = aead_setkey,
1391 			.setauthsize = aead_setauthsize,
1392 			.encrypt = aead_encrypt,
1393 			.decrypt = aead_decrypt,
1394 			.ivsize = AES_BLOCK_SIZE,
1395 			.maxauthsize = SHA224_DIGEST_SIZE,
1396 		},
1397 		.caam = {
1398 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1399 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1400 					   OP_ALG_AAI_HMAC_PRECOMP,
1401 			.geniv = true,
1402 		}
1403 	},
1404 	{
1405 		.aead = {
1406 			.base = {
1407 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1408 				.cra_driver_name = "authenc-hmac-sha256-"
1409 						   "cbc-aes-caam-qi",
1410 				.cra_blocksize = AES_BLOCK_SIZE,
1411 			},
1412 			.setkey = aead_setkey,
1413 			.setauthsize = aead_setauthsize,
1414 			.encrypt = aead_encrypt,
1415 			.decrypt = aead_decrypt,
1416 			.ivsize = AES_BLOCK_SIZE,
1417 			.maxauthsize = SHA256_DIGEST_SIZE,
1418 		},
1419 		.caam = {
1420 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1421 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1422 					   OP_ALG_AAI_HMAC_PRECOMP,
1423 		}
1424 	},
1425 	{
1426 		.aead = {
1427 			.base = {
1428 				.cra_name = "echainiv(authenc(hmac(sha256),"
1429 					    "cbc(aes)))",
1430 				.cra_driver_name = "echainiv-authenc-"
1431 						   "hmac-sha256-cbc-aes-"
1432 						   "caam-qi",
1433 				.cra_blocksize = AES_BLOCK_SIZE,
1434 			},
1435 			.setkey = aead_setkey,
1436 			.setauthsize = aead_setauthsize,
1437 			.encrypt = aead_encrypt,
1438 			.decrypt = aead_decrypt,
1439 			.ivsize = AES_BLOCK_SIZE,
1440 			.maxauthsize = SHA256_DIGEST_SIZE,
1441 		},
1442 		.caam = {
1443 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1444 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1445 					   OP_ALG_AAI_HMAC_PRECOMP,
1446 			.geniv = true,
1447 		}
1448 	},
1449 	{
1450 		.aead = {
1451 			.base = {
1452 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1453 				.cra_driver_name = "authenc-hmac-sha384-"
1454 						   "cbc-aes-caam-qi",
1455 				.cra_blocksize = AES_BLOCK_SIZE,
1456 			},
1457 			.setkey = aead_setkey,
1458 			.setauthsize = aead_setauthsize,
1459 			.encrypt = aead_encrypt,
1460 			.decrypt = aead_decrypt,
1461 			.ivsize = AES_BLOCK_SIZE,
1462 			.maxauthsize = SHA384_DIGEST_SIZE,
1463 		},
1464 		.caam = {
1465 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1466 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1467 					   OP_ALG_AAI_HMAC_PRECOMP,
1468 		}
1469 	},
1470 	{
1471 		.aead = {
1472 			.base = {
1473 				.cra_name = "echainiv(authenc(hmac(sha384),"
1474 					    "cbc(aes)))",
1475 				.cra_driver_name = "echainiv-authenc-"
1476 						   "hmac-sha384-cbc-aes-"
1477 						   "caam-qi",
1478 				.cra_blocksize = AES_BLOCK_SIZE,
1479 			},
1480 			.setkey = aead_setkey,
1481 			.setauthsize = aead_setauthsize,
1482 			.encrypt = aead_encrypt,
1483 			.decrypt = aead_decrypt,
1484 			.ivsize = AES_BLOCK_SIZE,
1485 			.maxauthsize = SHA384_DIGEST_SIZE,
1486 		},
1487 		.caam = {
1488 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1489 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1490 					   OP_ALG_AAI_HMAC_PRECOMP,
1491 			.geniv = true,
1492 		}
1493 	},
1494 	{
1495 		.aead = {
1496 			.base = {
1497 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1498 				.cra_driver_name = "authenc-hmac-sha512-"
1499 						   "cbc-aes-caam-qi",
1500 				.cra_blocksize = AES_BLOCK_SIZE,
1501 			},
1502 			.setkey = aead_setkey,
1503 			.setauthsize = aead_setauthsize,
1504 			.encrypt = aead_encrypt,
1505 			.decrypt = aead_decrypt,
1506 			.ivsize = AES_BLOCK_SIZE,
1507 			.maxauthsize = SHA512_DIGEST_SIZE,
1508 		},
1509 		.caam = {
1510 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1511 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1512 					   OP_ALG_AAI_HMAC_PRECOMP,
1513 		}
1514 	},
1515 	{
1516 		.aead = {
1517 			.base = {
1518 				.cra_name = "echainiv(authenc(hmac(sha512),"
1519 					    "cbc(aes)))",
1520 				.cra_driver_name = "echainiv-authenc-"
1521 						   "hmac-sha512-cbc-aes-"
1522 						   "caam-qi",
1523 				.cra_blocksize = AES_BLOCK_SIZE,
1524 			},
1525 			.setkey = aead_setkey,
1526 			.setauthsize = aead_setauthsize,
1527 			.encrypt = aead_encrypt,
1528 			.decrypt = aead_decrypt,
1529 			.ivsize = AES_BLOCK_SIZE,
1530 			.maxauthsize = SHA512_DIGEST_SIZE,
1531 		},
1532 		.caam = {
1533 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1534 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1535 					   OP_ALG_AAI_HMAC_PRECOMP,
1536 			.geniv = true,
1537 		}
1538 	},
1539 	{
1540 		.aead = {
1541 			.base = {
1542 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1543 				.cra_driver_name = "authenc-hmac-md5-"
1544 						   "cbc-des3_ede-caam-qi",
1545 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1546 			},
1547 			.setkey = aead_setkey,
1548 			.setauthsize = aead_setauthsize,
1549 			.encrypt = aead_encrypt,
1550 			.decrypt = aead_decrypt,
1551 			.ivsize = DES3_EDE_BLOCK_SIZE,
1552 			.maxauthsize = MD5_DIGEST_SIZE,
1553 		},
1554 		.caam = {
1555 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1556 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1557 					   OP_ALG_AAI_HMAC_PRECOMP,
1558 		}
1559 	},
1560 	{
1561 		.aead = {
1562 			.base = {
1563 				.cra_name = "echainiv(authenc(hmac(md5),"
1564 					    "cbc(des3_ede)))",
1565 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1566 						   "cbc-des3_ede-caam-qi",
1567 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1568 			},
1569 			.setkey = aead_setkey,
1570 			.setauthsize = aead_setauthsize,
1571 			.encrypt = aead_encrypt,
1572 			.decrypt = aead_decrypt,
1573 			.ivsize = DES3_EDE_BLOCK_SIZE,
1574 			.maxauthsize = MD5_DIGEST_SIZE,
1575 		},
1576 		.caam = {
1577 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1578 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1579 					   OP_ALG_AAI_HMAC_PRECOMP,
1580 			.geniv = true,
1581 		}
1582 	},
1583 	{
1584 		.aead = {
1585 			.base = {
1586 				.cra_name = "authenc(hmac(sha1),"
1587 					    "cbc(des3_ede))",
1588 				.cra_driver_name = "authenc-hmac-sha1-"
1589 						   "cbc-des3_ede-caam-qi",
1590 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1591 			},
1592 			.setkey = aead_setkey,
1593 			.setauthsize = aead_setauthsize,
1594 			.encrypt = aead_encrypt,
1595 			.decrypt = aead_decrypt,
1596 			.ivsize = DES3_EDE_BLOCK_SIZE,
1597 			.maxauthsize = SHA1_DIGEST_SIZE,
1598 		},
1599 		.caam = {
1600 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1601 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1602 					   OP_ALG_AAI_HMAC_PRECOMP,
1603 		},
1604 	},
1605 	{
1606 		.aead = {
1607 			.base = {
1608 				.cra_name = "echainiv(authenc(hmac(sha1),"
1609 					    "cbc(des3_ede)))",
1610 				.cra_driver_name = "echainiv-authenc-"
1611 						   "hmac-sha1-"
1612 						   "cbc-des3_ede-caam-qi",
1613 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1614 			},
1615 			.setkey = aead_setkey,
1616 			.setauthsize = aead_setauthsize,
1617 			.encrypt = aead_encrypt,
1618 			.decrypt = aead_decrypt,
1619 			.ivsize = DES3_EDE_BLOCK_SIZE,
1620 			.maxauthsize = SHA1_DIGEST_SIZE,
1621 		},
1622 		.caam = {
1623 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1624 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1625 					   OP_ALG_AAI_HMAC_PRECOMP,
1626 			.geniv = true,
1627 		}
1628 	},
1629 	{
1630 		.aead = {
1631 			.base = {
1632 				.cra_name = "authenc(hmac(sha224),"
1633 					    "cbc(des3_ede))",
1634 				.cra_driver_name = "authenc-hmac-sha224-"
1635 						   "cbc-des3_ede-caam-qi",
1636 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1637 			},
1638 			.setkey = aead_setkey,
1639 			.setauthsize = aead_setauthsize,
1640 			.encrypt = aead_encrypt,
1641 			.decrypt = aead_decrypt,
1642 			.ivsize = DES3_EDE_BLOCK_SIZE,
1643 			.maxauthsize = SHA224_DIGEST_SIZE,
1644 		},
1645 		.caam = {
1646 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1647 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1648 					   OP_ALG_AAI_HMAC_PRECOMP,
1649 		},
1650 	},
1651 	{
1652 		.aead = {
1653 			.base = {
1654 				.cra_name = "echainiv(authenc(hmac(sha224),"
1655 					    "cbc(des3_ede)))",
1656 				.cra_driver_name = "echainiv-authenc-"
1657 						   "hmac-sha224-"
1658 						   "cbc-des3_ede-caam-qi",
1659 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1660 			},
1661 			.setkey = aead_setkey,
1662 			.setauthsize = aead_setauthsize,
1663 			.encrypt = aead_encrypt,
1664 			.decrypt = aead_decrypt,
1665 			.ivsize = DES3_EDE_BLOCK_SIZE,
1666 			.maxauthsize = SHA224_DIGEST_SIZE,
1667 		},
1668 		.caam = {
1669 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1670 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1671 					   OP_ALG_AAI_HMAC_PRECOMP,
1672 			.geniv = true,
1673 		}
1674 	},
1675 	{
1676 		.aead = {
1677 			.base = {
1678 				.cra_name = "authenc(hmac(sha256),"
1679 					    "cbc(des3_ede))",
1680 				.cra_driver_name = "authenc-hmac-sha256-"
1681 						   "cbc-des3_ede-caam-qi",
1682 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1683 			},
1684 			.setkey = aead_setkey,
1685 			.setauthsize = aead_setauthsize,
1686 			.encrypt = aead_encrypt,
1687 			.decrypt = aead_decrypt,
1688 			.ivsize = DES3_EDE_BLOCK_SIZE,
1689 			.maxauthsize = SHA256_DIGEST_SIZE,
1690 		},
1691 		.caam = {
1692 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1693 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1694 					   OP_ALG_AAI_HMAC_PRECOMP,
1695 		},
1696 	},
1697 	{
1698 		.aead = {
1699 			.base = {
1700 				.cra_name = "echainiv(authenc(hmac(sha256),"
1701 					    "cbc(des3_ede)))",
1702 				.cra_driver_name = "echainiv-authenc-"
1703 						   "hmac-sha256-"
1704 						   "cbc-des3_ede-caam-qi",
1705 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1706 			},
1707 			.setkey = aead_setkey,
1708 			.setauthsize = aead_setauthsize,
1709 			.encrypt = aead_encrypt,
1710 			.decrypt = aead_decrypt,
1711 			.ivsize = DES3_EDE_BLOCK_SIZE,
1712 			.maxauthsize = SHA256_DIGEST_SIZE,
1713 		},
1714 		.caam = {
1715 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1716 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1717 					   OP_ALG_AAI_HMAC_PRECOMP,
1718 			.geniv = true,
1719 		}
1720 	},
1721 	{
1722 		.aead = {
1723 			.base = {
1724 				.cra_name = "authenc(hmac(sha384),"
1725 					    "cbc(des3_ede))",
1726 				.cra_driver_name = "authenc-hmac-sha384-"
1727 						   "cbc-des3_ede-caam-qi",
1728 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1729 			},
1730 			.setkey = aead_setkey,
1731 			.setauthsize = aead_setauthsize,
1732 			.encrypt = aead_encrypt,
1733 			.decrypt = aead_decrypt,
1734 			.ivsize = DES3_EDE_BLOCK_SIZE,
1735 			.maxauthsize = SHA384_DIGEST_SIZE,
1736 		},
1737 		.caam = {
1738 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1739 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1740 					   OP_ALG_AAI_HMAC_PRECOMP,
1741 		},
1742 	},
1743 	{
1744 		.aead = {
1745 			.base = {
1746 				.cra_name = "echainiv(authenc(hmac(sha384),"
1747 					    "cbc(des3_ede)))",
1748 				.cra_driver_name = "echainiv-authenc-"
1749 						   "hmac-sha384-"
1750 						   "cbc-des3_ede-caam-qi",
1751 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1752 			},
1753 			.setkey = aead_setkey,
1754 			.setauthsize = aead_setauthsize,
1755 			.encrypt = aead_encrypt,
1756 			.decrypt = aead_decrypt,
1757 			.ivsize = DES3_EDE_BLOCK_SIZE,
1758 			.maxauthsize = SHA384_DIGEST_SIZE,
1759 		},
1760 		.caam = {
1761 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1762 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1763 					   OP_ALG_AAI_HMAC_PRECOMP,
1764 			.geniv = true,
1765 		}
1766 	},
1767 	{
1768 		.aead = {
1769 			.base = {
1770 				.cra_name = "authenc(hmac(sha512),"
1771 					    "cbc(des3_ede))",
1772 				.cra_driver_name = "authenc-hmac-sha512-"
1773 						   "cbc-des3_ede-caam-qi",
1774 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1775 			},
1776 			.setkey = aead_setkey,
1777 			.setauthsize = aead_setauthsize,
1778 			.encrypt = aead_encrypt,
1779 			.decrypt = aead_decrypt,
1780 			.ivsize = DES3_EDE_BLOCK_SIZE,
1781 			.maxauthsize = SHA512_DIGEST_SIZE,
1782 		},
1783 		.caam = {
1784 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1785 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1786 					   OP_ALG_AAI_HMAC_PRECOMP,
1787 		},
1788 	},
1789 	{
1790 		.aead = {
1791 			.base = {
1792 				.cra_name = "echainiv(authenc(hmac(sha512),"
1793 					    "cbc(des3_ede)))",
1794 				.cra_driver_name = "echainiv-authenc-"
1795 						   "hmac-sha512-"
1796 						   "cbc-des3_ede-caam-qi",
1797 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1798 			},
1799 			.setkey = aead_setkey,
1800 			.setauthsize = aead_setauthsize,
1801 			.encrypt = aead_encrypt,
1802 			.decrypt = aead_decrypt,
1803 			.ivsize = DES3_EDE_BLOCK_SIZE,
1804 			.maxauthsize = SHA512_DIGEST_SIZE,
1805 		},
1806 		.caam = {
1807 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1808 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1809 					   OP_ALG_AAI_HMAC_PRECOMP,
1810 			.geniv = true,
1811 		}
1812 	},
1813 	{
1814 		.aead = {
1815 			.base = {
1816 				.cra_name = "authenc(hmac(md5),cbc(des))",
1817 				.cra_driver_name = "authenc-hmac-md5-"
1818 						   "cbc-des-caam-qi",
1819 				.cra_blocksize = DES_BLOCK_SIZE,
1820 			},
1821 			.setkey = aead_setkey,
1822 			.setauthsize = aead_setauthsize,
1823 			.encrypt = aead_encrypt,
1824 			.decrypt = aead_decrypt,
1825 			.ivsize = DES_BLOCK_SIZE,
1826 			.maxauthsize = MD5_DIGEST_SIZE,
1827 		},
1828 		.caam = {
1829 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1830 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1831 					   OP_ALG_AAI_HMAC_PRECOMP,
1832 		},
1833 	},
1834 	{
1835 		.aead = {
1836 			.base = {
1837 				.cra_name = "echainiv(authenc(hmac(md5),"
1838 					    "cbc(des)))",
1839 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1840 						   "cbc-des-caam-qi",
1841 				.cra_blocksize = DES_BLOCK_SIZE,
1842 			},
1843 			.setkey = aead_setkey,
1844 			.setauthsize = aead_setauthsize,
1845 			.encrypt = aead_encrypt,
1846 			.decrypt = aead_decrypt,
1847 			.ivsize = DES_BLOCK_SIZE,
1848 			.maxauthsize = MD5_DIGEST_SIZE,
1849 		},
1850 		.caam = {
1851 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1852 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1853 					   OP_ALG_AAI_HMAC_PRECOMP,
1854 			.geniv = true,
1855 		}
1856 	},
1857 	{
1858 		.aead = {
1859 			.base = {
1860 				.cra_name = "authenc(hmac(sha1),cbc(des))",
1861 				.cra_driver_name = "authenc-hmac-sha1-"
1862 						   "cbc-des-caam-qi",
1863 				.cra_blocksize = DES_BLOCK_SIZE,
1864 			},
1865 			.setkey = aead_setkey,
1866 			.setauthsize = aead_setauthsize,
1867 			.encrypt = aead_encrypt,
1868 			.decrypt = aead_decrypt,
1869 			.ivsize = DES_BLOCK_SIZE,
1870 			.maxauthsize = SHA1_DIGEST_SIZE,
1871 		},
1872 		.caam = {
1873 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1874 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1875 					   OP_ALG_AAI_HMAC_PRECOMP,
1876 		},
1877 	},
1878 	{
1879 		.aead = {
1880 			.base = {
1881 				.cra_name = "echainiv(authenc(hmac(sha1),"
1882 					    "cbc(des)))",
1883 				.cra_driver_name = "echainiv-authenc-"
1884 						   "hmac-sha1-cbc-des-caam-qi",
1885 				.cra_blocksize = DES_BLOCK_SIZE,
1886 			},
1887 			.setkey = aead_setkey,
1888 			.setauthsize = aead_setauthsize,
1889 			.encrypt = aead_encrypt,
1890 			.decrypt = aead_decrypt,
1891 			.ivsize = DES_BLOCK_SIZE,
1892 			.maxauthsize = SHA1_DIGEST_SIZE,
1893 		},
1894 		.caam = {
1895 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1896 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1897 					   OP_ALG_AAI_HMAC_PRECOMP,
1898 			.geniv = true,
1899 		}
1900 	},
1901 	{
1902 		.aead = {
1903 			.base = {
1904 				.cra_name = "authenc(hmac(sha224),cbc(des))",
1905 				.cra_driver_name = "authenc-hmac-sha224-"
1906 						   "cbc-des-caam-qi",
1907 				.cra_blocksize = DES_BLOCK_SIZE,
1908 			},
1909 			.setkey = aead_setkey,
1910 			.setauthsize = aead_setauthsize,
1911 			.encrypt = aead_encrypt,
1912 			.decrypt = aead_decrypt,
1913 			.ivsize = DES_BLOCK_SIZE,
1914 			.maxauthsize = SHA224_DIGEST_SIZE,
1915 		},
1916 		.caam = {
1917 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1918 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1919 					   OP_ALG_AAI_HMAC_PRECOMP,
1920 		},
1921 	},
1922 	{
1923 		.aead = {
1924 			.base = {
1925 				.cra_name = "echainiv(authenc(hmac(sha224),"
1926 					    "cbc(des)))",
1927 				.cra_driver_name = "echainiv-authenc-"
1928 						   "hmac-sha224-cbc-des-"
1929 						   "caam-qi",
1930 				.cra_blocksize = DES_BLOCK_SIZE,
1931 			},
1932 			.setkey = aead_setkey,
1933 			.setauthsize = aead_setauthsize,
1934 			.encrypt = aead_encrypt,
1935 			.decrypt = aead_decrypt,
1936 			.ivsize = DES_BLOCK_SIZE,
1937 			.maxauthsize = SHA224_DIGEST_SIZE,
1938 		},
1939 		.caam = {
1940 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1941 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1942 					   OP_ALG_AAI_HMAC_PRECOMP,
1943 			.geniv = true,
1944 		}
1945 	},
1946 	{
1947 		.aead = {
1948 			.base = {
1949 				.cra_name = "authenc(hmac(sha256),cbc(des))",
1950 				.cra_driver_name = "authenc-hmac-sha256-"
1951 						   "cbc-des-caam-qi",
1952 				.cra_blocksize = DES_BLOCK_SIZE,
1953 			},
1954 			.setkey = aead_setkey,
1955 			.setauthsize = aead_setauthsize,
1956 			.encrypt = aead_encrypt,
1957 			.decrypt = aead_decrypt,
1958 			.ivsize = DES_BLOCK_SIZE,
1959 			.maxauthsize = SHA256_DIGEST_SIZE,
1960 		},
1961 		.caam = {
1962 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1963 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1964 					   OP_ALG_AAI_HMAC_PRECOMP,
1965 		},
1966 	},
1967 	{
1968 		.aead = {
1969 			.base = {
1970 				.cra_name = "echainiv(authenc(hmac(sha256),"
1971 					    "cbc(des)))",
1972 				.cra_driver_name = "echainiv-authenc-"
1973 						   "hmac-sha256-cbc-desi-"
1974 						   "caam-qi",
1975 				.cra_blocksize = DES_BLOCK_SIZE,
1976 			},
1977 			.setkey = aead_setkey,
1978 			.setauthsize = aead_setauthsize,
1979 			.encrypt = aead_encrypt,
1980 			.decrypt = aead_decrypt,
1981 			.ivsize = DES_BLOCK_SIZE,
1982 			.maxauthsize = SHA256_DIGEST_SIZE,
1983 		},
1984 		.caam = {
1985 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1986 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1987 					   OP_ALG_AAI_HMAC_PRECOMP,
1988 			.geniv = true,
1989 		},
1990 	},
1991 	{
1992 		.aead = {
1993 			.base = {
1994 				.cra_name = "authenc(hmac(sha384),cbc(des))",
1995 				.cra_driver_name = "authenc-hmac-sha384-"
1996 						   "cbc-des-caam-qi",
1997 				.cra_blocksize = DES_BLOCK_SIZE,
1998 			},
1999 			.setkey = aead_setkey,
2000 			.setauthsize = aead_setauthsize,
2001 			.encrypt = aead_encrypt,
2002 			.decrypt = aead_decrypt,
2003 			.ivsize = DES_BLOCK_SIZE,
2004 			.maxauthsize = SHA384_DIGEST_SIZE,
2005 		},
2006 		.caam = {
2007 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2008 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2009 					   OP_ALG_AAI_HMAC_PRECOMP,
2010 		},
2011 	},
2012 	{
2013 		.aead = {
2014 			.base = {
2015 				.cra_name = "echainiv(authenc(hmac(sha384),"
2016 					    "cbc(des)))",
2017 				.cra_driver_name = "echainiv-authenc-"
2018 						   "hmac-sha384-cbc-des-"
2019 						   "caam-qi",
2020 				.cra_blocksize = DES_BLOCK_SIZE,
2021 			},
2022 			.setkey = aead_setkey,
2023 			.setauthsize = aead_setauthsize,
2024 			.encrypt = aead_encrypt,
2025 			.decrypt = aead_decrypt,
2026 			.ivsize = DES_BLOCK_SIZE,
2027 			.maxauthsize = SHA384_DIGEST_SIZE,
2028 		},
2029 		.caam = {
2030 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2031 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2032 					   OP_ALG_AAI_HMAC_PRECOMP,
2033 			.geniv = true,
2034 		}
2035 	},
2036 	{
2037 		.aead = {
2038 			.base = {
2039 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2040 				.cra_driver_name = "authenc-hmac-sha512-"
2041 						   "cbc-des-caam-qi",
2042 				.cra_blocksize = DES_BLOCK_SIZE,
2043 			},
2044 			.setkey = aead_setkey,
2045 			.setauthsize = aead_setauthsize,
2046 			.encrypt = aead_encrypt,
2047 			.decrypt = aead_decrypt,
2048 			.ivsize = DES_BLOCK_SIZE,
2049 			.maxauthsize = SHA512_DIGEST_SIZE,
2050 		},
2051 		.caam = {
2052 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2053 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2054 					   OP_ALG_AAI_HMAC_PRECOMP,
2055 		}
2056 	},
2057 	{
2058 		.aead = {
2059 			.base = {
2060 				.cra_name = "echainiv(authenc(hmac(sha512),"
2061 					    "cbc(des)))",
2062 				.cra_driver_name = "echainiv-authenc-"
2063 						   "hmac-sha512-cbc-des-"
2064 						   "caam-qi",
2065 				.cra_blocksize = DES_BLOCK_SIZE,
2066 			},
2067 			.setkey = aead_setkey,
2068 			.setauthsize = aead_setauthsize,
2069 			.encrypt = aead_encrypt,
2070 			.decrypt = aead_decrypt,
2071 			.ivsize = DES_BLOCK_SIZE,
2072 			.maxauthsize = SHA512_DIGEST_SIZE,
2073 		},
2074 		.caam = {
2075 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2076 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2077 					   OP_ALG_AAI_HMAC_PRECOMP,
2078 			.geniv = true,
2079 		}
2080 	},
2081 };
2082 
2083 struct caam_crypto_alg {
2084 	struct list_head entry;
2085 	struct crypto_alg crypto_alg;
2086 	struct caam_alg_entry caam;
2087 };
2088 
2089 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2090 {
2091 	struct caam_drv_private *priv;
2092 
2093 	/*
2094 	 * distribute tfms across job rings to ensure in-order
2095 	 * crypto request processing per tfm
2096 	 */
2097 	ctx->jrdev = caam_jr_alloc();
2098 	if (IS_ERR(ctx->jrdev)) {
2099 		pr_err("Job Ring Device allocation for transform failed\n");
2100 		return PTR_ERR(ctx->jrdev);
2101 	}
2102 
2103 	ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2104 				      DMA_TO_DEVICE);
2105 	if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2106 		dev_err(ctx->jrdev, "unable to map key\n");
2107 		caam_jr_free(ctx->jrdev);
2108 		return -ENOMEM;
2109 	}
2110 
2111 	/* copy descriptor header template value */
2112 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2113 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2114 
2115 	priv = dev_get_drvdata(ctx->jrdev->parent);
2116 	ctx->qidev = priv->qidev;
2117 
2118 	spin_lock_init(&ctx->lock);
2119 	ctx->drv_ctx[ENCRYPT] = NULL;
2120 	ctx->drv_ctx[DECRYPT] = NULL;
2121 	ctx->drv_ctx[GIVENCRYPT] = NULL;
2122 
2123 	return 0;
2124 }
2125 
2126 static int caam_cra_init(struct crypto_tfm *tfm)
2127 {
2128 	struct crypto_alg *alg = tfm->__crt_alg;
2129 	struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2130 							crypto_alg);
2131 	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2132 
2133 	return caam_init_common(ctx, &caam_alg->caam);
2134 }
2135 
2136 static int caam_aead_init(struct crypto_aead *tfm)
2137 {
2138 	struct aead_alg *alg = crypto_aead_alg(tfm);
2139 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2140 						      aead);
2141 	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2142 
2143 	return caam_init_common(ctx, &caam_alg->caam);
2144 }
2145 
2146 static void caam_exit_common(struct caam_ctx *ctx)
2147 {
2148 	caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2149 	caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2150 	caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2151 
2152 	dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
2153 			 DMA_TO_DEVICE);
2154 
2155 	caam_jr_free(ctx->jrdev);
2156 }
2157 
2158 static void caam_cra_exit(struct crypto_tfm *tfm)
2159 {
2160 	caam_exit_common(crypto_tfm_ctx(tfm));
2161 }
2162 
2163 static void caam_aead_exit(struct crypto_aead *tfm)
2164 {
2165 	caam_exit_common(crypto_aead_ctx(tfm));
2166 }
2167 
2168 static struct list_head alg_list;
2169 static void __exit caam_qi_algapi_exit(void)
2170 {
2171 	struct caam_crypto_alg *t_alg, *n;
2172 	int i;
2173 
2174 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2175 		struct caam_aead_alg *t_alg = driver_aeads + i;
2176 
2177 		if (t_alg->registered)
2178 			crypto_unregister_aead(&t_alg->aead);
2179 	}
2180 
2181 	if (!alg_list.next)
2182 		return;
2183 
2184 	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2185 		crypto_unregister_alg(&t_alg->crypto_alg);
2186 		list_del(&t_alg->entry);
2187 		kfree(t_alg);
2188 	}
2189 }
2190 
2191 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2192 					      *template)
2193 {
2194 	struct caam_crypto_alg *t_alg;
2195 	struct crypto_alg *alg;
2196 
2197 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2198 	if (!t_alg)
2199 		return ERR_PTR(-ENOMEM);
2200 
2201 	alg = &t_alg->crypto_alg;
2202 
2203 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2204 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2205 		 template->driver_name);
2206 	alg->cra_module = THIS_MODULE;
2207 	alg->cra_init = caam_cra_init;
2208 	alg->cra_exit = caam_cra_exit;
2209 	alg->cra_priority = CAAM_CRA_PRIORITY;
2210 	alg->cra_blocksize = template->blocksize;
2211 	alg->cra_alignmask = 0;
2212 	alg->cra_ctxsize = sizeof(struct caam_ctx);
2213 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2214 			 template->type;
2215 	switch (template->type) {
2216 	case CRYPTO_ALG_TYPE_GIVCIPHER:
2217 		alg->cra_type = &crypto_givcipher_type;
2218 		alg->cra_ablkcipher = template->template_ablkcipher;
2219 		break;
2220 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2221 		alg->cra_type = &crypto_ablkcipher_type;
2222 		alg->cra_ablkcipher = template->template_ablkcipher;
2223 		break;
2224 	}
2225 
2226 	t_alg->caam.class1_alg_type = template->class1_alg_type;
2227 	t_alg->caam.class2_alg_type = template->class2_alg_type;
2228 
2229 	return t_alg;
2230 }
2231 
2232 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2233 {
2234 	struct aead_alg *alg = &t_alg->aead;
2235 
2236 	alg->base.cra_module = THIS_MODULE;
2237 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2238 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2239 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2240 
2241 	alg->init = caam_aead_init;
2242 	alg->exit = caam_aead_exit;
2243 }
2244 
2245 static int __init caam_qi_algapi_init(void)
2246 {
2247 	struct device_node *dev_node;
2248 	struct platform_device *pdev;
2249 	struct device *ctrldev;
2250 	struct caam_drv_private *priv;
2251 	int i = 0, err = 0;
2252 	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2253 	unsigned int md_limit = SHA512_DIGEST_SIZE;
2254 	bool registered = false;
2255 
2256 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2257 	if (!dev_node) {
2258 		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2259 		if (!dev_node)
2260 			return -ENODEV;
2261 	}
2262 
2263 	pdev = of_find_device_by_node(dev_node);
2264 	of_node_put(dev_node);
2265 	if (!pdev)
2266 		return -ENODEV;
2267 
2268 	ctrldev = &pdev->dev;
2269 	priv = dev_get_drvdata(ctrldev);
2270 
2271 	/*
2272 	 * If priv is NULL, it's probably because the caam driver wasn't
2273 	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2274 	 */
2275 	if (!priv || !priv->qi_present)
2276 		return -ENODEV;
2277 
2278 	INIT_LIST_HEAD(&alg_list);
2279 
2280 	/*
2281 	 * Register crypto algorithms the device supports.
2282 	 * First, detect presence and attributes of DES, AES, and MD blocks.
2283 	 */
2284 	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2285 	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2286 	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2287 	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2288 	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2289 
2290 	/* If MD is present, limit digest size based on LP256 */
2291 	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2292 		md_limit = SHA256_DIGEST_SIZE;
2293 
2294 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2295 		struct caam_crypto_alg *t_alg;
2296 		struct caam_alg_template *alg = driver_algs + i;
2297 		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2298 
2299 		/* Skip DES algorithms if not supported by device */
2300 		if (!des_inst &&
2301 		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2302 		     (alg_sel == OP_ALG_ALGSEL_DES)))
2303 			continue;
2304 
2305 		/* Skip AES algorithms if not supported by device */
2306 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2307 			continue;
2308 
2309 		t_alg = caam_alg_alloc(alg);
2310 		if (IS_ERR(t_alg)) {
2311 			err = PTR_ERR(t_alg);
2312 			dev_warn(priv->qidev, "%s alg allocation failed\n",
2313 				 alg->driver_name);
2314 			continue;
2315 		}
2316 
2317 		err = crypto_register_alg(&t_alg->crypto_alg);
2318 		if (err) {
2319 			dev_warn(priv->qidev, "%s alg registration failed\n",
2320 				 t_alg->crypto_alg.cra_driver_name);
2321 			kfree(t_alg);
2322 			continue;
2323 		}
2324 
2325 		list_add_tail(&t_alg->entry, &alg_list);
2326 		registered = true;
2327 	}
2328 
2329 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2330 		struct caam_aead_alg *t_alg = driver_aeads + i;
2331 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2332 				 OP_ALG_ALGSEL_MASK;
2333 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2334 				 OP_ALG_ALGSEL_MASK;
2335 		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2336 
2337 		/* Skip DES algorithms if not supported by device */
2338 		if (!des_inst &&
2339 		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2340 		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2341 			continue;
2342 
2343 		/* Skip AES algorithms if not supported by device */
2344 		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2345 			continue;
2346 
2347 		/*
2348 		 * Check support for AES algorithms not available
2349 		 * on LP devices.
2350 		 */
2351 		if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2352 		    (alg_aai == OP_ALG_AAI_GCM))
2353 			continue;
2354 
2355 		/*
2356 		 * Skip algorithms requiring message digests
2357 		 * if MD or MD size is not supported by device.
2358 		 */
2359 		if (c2_alg_sel &&
2360 		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2361 			continue;
2362 
2363 		caam_aead_alg_init(t_alg);
2364 
2365 		err = crypto_register_aead(&t_alg->aead);
2366 		if (err) {
2367 			pr_warn("%s alg registration failed\n",
2368 				t_alg->aead.base.cra_driver_name);
2369 			continue;
2370 		}
2371 
2372 		t_alg->registered = true;
2373 		registered = true;
2374 	}
2375 
2376 	if (registered)
2377 		dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2378 
2379 	return err;
2380 }
2381 
2382 module_init(caam_qi_algapi_init);
2383 module_exit(caam_qi_algapi_exit);
2384 
2385 MODULE_LICENSE("GPL");
2386 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2387 MODULE_AUTHOR("Freescale Semiconductor");
2388