xref: /linux/drivers/crypto/caam/caamalg_qi2.c (revision 0b8061c340b643e01da431dd60c75a41bb1d31ec)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
24 
25 #define CAAM_CRA_PRIORITY	2000
26 
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 				 SHA512_DIGEST_SIZE * 2)
30 
31 /*
32  * This is a a cache of buffers, from which the users of CAAM QI driver
33  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34  * NOTE: A more elegant solution would be to have some headroom in the frames
35  *       being processed. This can be added by the dpaa2-eth driver. This would
36  *       pose a problem for userspace application processing which cannot
37  *       know of this limitation. So for now, this will work.
38  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
39  */
40 static struct kmem_cache *qi_cache;
41 
42 struct caam_alg_entry {
43 	struct device *dev;
44 	int class1_alg_type;
45 	int class2_alg_type;
46 	bool rfc3686;
47 	bool geniv;
48 	bool nodkp;
49 };
50 
51 struct caam_aead_alg {
52 	struct aead_alg aead;
53 	struct caam_alg_entry caam;
54 	bool registered;
55 };
56 
57 struct caam_skcipher_alg {
58 	struct skcipher_alg skcipher;
59 	struct caam_alg_entry caam;
60 	bool registered;
61 };
62 
63 /**
64  * struct caam_ctx - per-session context
65  * @flc: Flow Contexts array
66  * @key:  [authentication key], encryption key
67  * @flc_dma: I/O virtual addresses of the Flow Contexts
68  * @key_dma: I/O virtual address of the key
69  * @dir: DMA direction for mapping key and Flow Contexts
70  * @dev: dpseci device
71  * @adata: authentication algorithm details
72  * @cdata: encryption algorithm details
73  * @authsize: authentication tag (a.k.a. ICV / MAC) size
74  */
75 struct caam_ctx {
76 	struct caam_flc flc[NUM_OP];
77 	u8 key[CAAM_MAX_KEY_SIZE];
78 	dma_addr_t flc_dma[NUM_OP];
79 	dma_addr_t key_dma;
80 	enum dma_data_direction dir;
81 	struct device *dev;
82 	struct alginfo adata;
83 	struct alginfo cdata;
84 	unsigned int authsize;
85 	bool xts_key_fallback;
86 	struct crypto_skcipher *fallback;
87 };
88 
89 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
90 				     dma_addr_t iova_addr)
91 {
92 	phys_addr_t phys_addr;
93 
94 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
95 				   iova_addr;
96 
97 	return phys_to_virt(phys_addr);
98 }
99 
100 /*
101  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
102  *
103  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
104  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
105  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
106  * hosting 16 SG entries.
107  *
108  * @flags - flags that would be used for the equivalent kmalloc(..) call
109  *
110  * Returns a pointer to a retrieved buffer on success or NULL on failure.
111  */
112 static inline void *qi_cache_zalloc(gfp_t flags)
113 {
114 	return kmem_cache_zalloc(qi_cache, flags);
115 }
116 
117 /*
118  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
119  *
120  * @obj - buffer previously allocated by qi_cache_zalloc
121  *
122  * No checking is being done, the call is a passthrough call to
123  * kmem_cache_free(...)
124  */
125 static inline void qi_cache_free(void *obj)
126 {
127 	kmem_cache_free(qi_cache, obj);
128 }
129 
130 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
131 {
132 	switch (crypto_tfm_alg_type(areq->tfm)) {
133 	case CRYPTO_ALG_TYPE_SKCIPHER:
134 		return skcipher_request_ctx(skcipher_request_cast(areq));
135 	case CRYPTO_ALG_TYPE_AEAD:
136 		return aead_request_ctx(container_of(areq, struct aead_request,
137 						     base));
138 	case CRYPTO_ALG_TYPE_AHASH:
139 		return ahash_request_ctx(ahash_request_cast(areq));
140 	default:
141 		return ERR_PTR(-EINVAL);
142 	}
143 }
144 
145 static void caam_unmap(struct device *dev, struct scatterlist *src,
146 		       struct scatterlist *dst, int src_nents,
147 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
148 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
149 		       int qm_sg_bytes)
150 {
151 	if (dst != src) {
152 		if (src_nents)
153 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154 		if (dst_nents)
155 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
156 	} else {
157 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
158 	}
159 
160 	if (iv_dma)
161 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
162 
163 	if (qm_sg_bytes)
164 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
165 }
166 
167 static int aead_set_sh_desc(struct crypto_aead *aead)
168 {
169 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
170 						 typeof(*alg), aead);
171 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
172 	unsigned int ivsize = crypto_aead_ivsize(aead);
173 	struct device *dev = ctx->dev;
174 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
175 	struct caam_flc *flc;
176 	u32 *desc;
177 	u32 ctx1_iv_off = 0;
178 	u32 *nonce = NULL;
179 	unsigned int data_len[2];
180 	u32 inl_mask;
181 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
182 			       OP_ALG_AAI_CTR_MOD128);
183 	const bool is_rfc3686 = alg->caam.rfc3686;
184 
185 	if (!ctx->cdata.keylen || !ctx->authsize)
186 		return 0;
187 
188 	/*
189 	 * AES-CTR needs to load IV in CONTEXT1 reg
190 	 * at an offset of 128bits (16bytes)
191 	 * CONTEXT1[255:128] = IV
192 	 */
193 	if (ctr_mode)
194 		ctx1_iv_off = 16;
195 
196 	/*
197 	 * RFC3686 specific:
198 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
199 	 */
200 	if (is_rfc3686) {
201 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
202 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
203 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
204 	}
205 
206 	/*
207 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
208 	 * in invalid opcodes (last bytes of user key) in the resulting
209 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
210 	 * addresses are needed.
211 	 */
212 	ctx->adata.key_virt = ctx->key;
213 	ctx->adata.key_dma = ctx->key_dma;
214 
215 	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
216 	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
217 
218 	data_len[0] = ctx->adata.keylen_pad;
219 	data_len[1] = ctx->cdata.keylen;
220 
221 	/* aead_encrypt shared descriptor */
222 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
223 						 DESC_QI_AEAD_ENC_LEN) +
224 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
225 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
226 			      ARRAY_SIZE(data_len)) < 0)
227 		return -EINVAL;
228 
229 	ctx->adata.key_inline = !!(inl_mask & 1);
230 	ctx->cdata.key_inline = !!(inl_mask & 2);
231 
232 	flc = &ctx->flc[ENCRYPT];
233 	desc = flc->sh_desc;
234 
235 	if (alg->caam.geniv)
236 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
237 					  ivsize, ctx->authsize, is_rfc3686,
238 					  nonce, ctx1_iv_off, true,
239 					  priv->sec_attr.era);
240 	else
241 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
242 				       ivsize, ctx->authsize, is_rfc3686, nonce,
243 				       ctx1_iv_off, true, priv->sec_attr.era);
244 
245 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
246 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
247 				   sizeof(flc->flc) + desc_bytes(desc),
248 				   ctx->dir);
249 
250 	/* aead_decrypt shared descriptor */
251 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
252 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
253 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
254 			      ARRAY_SIZE(data_len)) < 0)
255 		return -EINVAL;
256 
257 	ctx->adata.key_inline = !!(inl_mask & 1);
258 	ctx->cdata.key_inline = !!(inl_mask & 2);
259 
260 	flc = &ctx->flc[DECRYPT];
261 	desc = flc->sh_desc;
262 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
263 			       ivsize, ctx->authsize, alg->caam.geniv,
264 			       is_rfc3686, nonce, ctx1_iv_off, true,
265 			       priv->sec_attr.era);
266 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
267 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
268 				   sizeof(flc->flc) + desc_bytes(desc),
269 				   ctx->dir);
270 
271 	return 0;
272 }
273 
274 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
275 {
276 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
277 
278 	ctx->authsize = authsize;
279 	aead_set_sh_desc(authenc);
280 
281 	return 0;
282 }
283 
284 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
285 		       unsigned int keylen)
286 {
287 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
288 	struct device *dev = ctx->dev;
289 	struct crypto_authenc_keys keys;
290 
291 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
292 		goto badkey;
293 
294 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
295 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
296 		keys.authkeylen);
297 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
298 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
299 
300 	ctx->adata.keylen = keys.authkeylen;
301 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
302 					      OP_ALG_ALGSEL_MASK);
303 
304 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
305 		goto badkey;
306 
307 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
308 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
309 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
310 				   keys.enckeylen, ctx->dir);
311 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
312 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
313 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
314 
315 	ctx->cdata.keylen = keys.enckeylen;
316 
317 	memzero_explicit(&keys, sizeof(keys));
318 	return aead_set_sh_desc(aead);
319 badkey:
320 	memzero_explicit(&keys, sizeof(keys));
321 	return -EINVAL;
322 }
323 
324 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
325 			    unsigned int keylen)
326 {
327 	struct crypto_authenc_keys keys;
328 	int err;
329 
330 	err = crypto_authenc_extractkeys(&keys, key, keylen);
331 	if (unlikely(err))
332 		goto out;
333 
334 	err = -EINVAL;
335 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
336 		goto out;
337 
338 	err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
339 	      aead_setkey(aead, key, keylen);
340 
341 out:
342 	memzero_explicit(&keys, sizeof(keys));
343 	return err;
344 }
345 
346 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
347 					   bool encrypt)
348 {
349 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
350 	struct caam_request *req_ctx = aead_request_ctx(req);
351 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
352 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
353 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
354 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
355 						 typeof(*alg), aead);
356 	struct device *dev = ctx->dev;
357 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
358 		      GFP_KERNEL : GFP_ATOMIC;
359 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
360 	int src_len, dst_len = 0;
361 	struct aead_edesc *edesc;
362 	dma_addr_t qm_sg_dma, iv_dma = 0;
363 	int ivsize = 0;
364 	unsigned int authsize = ctx->authsize;
365 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
366 	int in_len, out_len;
367 	struct dpaa2_sg_entry *sg_table;
368 
369 	/* allocate space for base edesc, link tables and IV */
370 	edesc = qi_cache_zalloc(GFP_DMA | flags);
371 	if (unlikely(!edesc)) {
372 		dev_err(dev, "could not allocate extended descriptor\n");
373 		return ERR_PTR(-ENOMEM);
374 	}
375 
376 	if (unlikely(req->dst != req->src)) {
377 		src_len = req->assoclen + req->cryptlen;
378 		dst_len = src_len + (encrypt ? authsize : (-authsize));
379 
380 		src_nents = sg_nents_for_len(req->src, src_len);
381 		if (unlikely(src_nents < 0)) {
382 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
383 				src_len);
384 			qi_cache_free(edesc);
385 			return ERR_PTR(src_nents);
386 		}
387 
388 		dst_nents = sg_nents_for_len(req->dst, dst_len);
389 		if (unlikely(dst_nents < 0)) {
390 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
391 				dst_len);
392 			qi_cache_free(edesc);
393 			return ERR_PTR(dst_nents);
394 		}
395 
396 		if (src_nents) {
397 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
398 						      DMA_TO_DEVICE);
399 			if (unlikely(!mapped_src_nents)) {
400 				dev_err(dev, "unable to map source\n");
401 				qi_cache_free(edesc);
402 				return ERR_PTR(-ENOMEM);
403 			}
404 		} else {
405 			mapped_src_nents = 0;
406 		}
407 
408 		if (dst_nents) {
409 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
410 						      DMA_FROM_DEVICE);
411 			if (unlikely(!mapped_dst_nents)) {
412 				dev_err(dev, "unable to map destination\n");
413 				dma_unmap_sg(dev, req->src, src_nents,
414 					     DMA_TO_DEVICE);
415 				qi_cache_free(edesc);
416 				return ERR_PTR(-ENOMEM);
417 			}
418 		} else {
419 			mapped_dst_nents = 0;
420 		}
421 	} else {
422 		src_len = req->assoclen + req->cryptlen +
423 			  (encrypt ? authsize : 0);
424 
425 		src_nents = sg_nents_for_len(req->src, src_len);
426 		if (unlikely(src_nents < 0)) {
427 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
428 				src_len);
429 			qi_cache_free(edesc);
430 			return ERR_PTR(src_nents);
431 		}
432 
433 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
434 					      DMA_BIDIRECTIONAL);
435 		if (unlikely(!mapped_src_nents)) {
436 			dev_err(dev, "unable to map source\n");
437 			qi_cache_free(edesc);
438 			return ERR_PTR(-ENOMEM);
439 		}
440 	}
441 
442 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
443 		ivsize = crypto_aead_ivsize(aead);
444 
445 	/*
446 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
447 	 * Input is not contiguous.
448 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
449 	 * the end of the table by allocating more S/G entries. Logic:
450 	 * if (src != dst && output S/G)
451 	 *      pad output S/G, if needed
452 	 * else if (src == dst && S/G)
453 	 *      overlapping S/Gs; pad one of them
454 	 * else if (input S/G) ...
455 	 *      pad input S/G, if needed
456 	 */
457 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
458 	if (mapped_dst_nents > 1)
459 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
460 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
461 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
462 				  1 + !!ivsize +
463 				  pad_sg_nents(mapped_src_nents));
464 	else
465 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
466 
467 	sg_table = &edesc->sgt[0];
468 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
469 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
470 		     CAAM_QI_MEMCACHE_SIZE)) {
471 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
472 			qm_sg_nents, ivsize);
473 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
474 			   0, DMA_NONE, 0, 0);
475 		qi_cache_free(edesc);
476 		return ERR_PTR(-ENOMEM);
477 	}
478 
479 	if (ivsize) {
480 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
481 
482 		/* Make sure IV is located in a DMAable area */
483 		memcpy(iv, req->iv, ivsize);
484 
485 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
486 		if (dma_mapping_error(dev, iv_dma)) {
487 			dev_err(dev, "unable to map IV\n");
488 			caam_unmap(dev, req->src, req->dst, src_nents,
489 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
490 			qi_cache_free(edesc);
491 			return ERR_PTR(-ENOMEM);
492 		}
493 	}
494 
495 	edesc->src_nents = src_nents;
496 	edesc->dst_nents = dst_nents;
497 	edesc->iv_dma = iv_dma;
498 
499 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
500 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
501 		/*
502 		 * The associated data comes already with the IV but we need
503 		 * to skip it when we authenticate or encrypt...
504 		 */
505 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
506 	else
507 		edesc->assoclen = cpu_to_caam32(req->assoclen);
508 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
509 					     DMA_TO_DEVICE);
510 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
511 		dev_err(dev, "unable to map assoclen\n");
512 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
513 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
514 		qi_cache_free(edesc);
515 		return ERR_PTR(-ENOMEM);
516 	}
517 
518 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
519 	qm_sg_index++;
520 	if (ivsize) {
521 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
522 		qm_sg_index++;
523 	}
524 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
525 	qm_sg_index += mapped_src_nents;
526 
527 	if (mapped_dst_nents > 1)
528 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
529 
530 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
531 	if (dma_mapping_error(dev, qm_sg_dma)) {
532 		dev_err(dev, "unable to map S/G table\n");
533 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
534 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
535 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
536 		qi_cache_free(edesc);
537 		return ERR_PTR(-ENOMEM);
538 	}
539 
540 	edesc->qm_sg_dma = qm_sg_dma;
541 	edesc->qm_sg_bytes = qm_sg_bytes;
542 
543 	out_len = req->assoclen + req->cryptlen +
544 		  (encrypt ? ctx->authsize : (-ctx->authsize));
545 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
546 
547 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
548 	dpaa2_fl_set_final(in_fle, true);
549 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
550 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
551 	dpaa2_fl_set_len(in_fle, in_len);
552 
553 	if (req->dst == req->src) {
554 		if (mapped_src_nents == 1) {
555 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
556 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
557 		} else {
558 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
559 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
560 					  (1 + !!ivsize) * sizeof(*sg_table));
561 		}
562 	} else if (!mapped_dst_nents) {
563 		/*
564 		 * crypto engine requires the output entry to be present when
565 		 * "frame list" FD is used.
566 		 * Since engine does not support FMT=2'b11 (unused entry type),
567 		 * leaving out_fle zeroized is the best option.
568 		 */
569 		goto skip_out_fle;
570 	} else if (mapped_dst_nents == 1) {
571 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
572 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
573 	} else {
574 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
575 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
576 				  sizeof(*sg_table));
577 	}
578 
579 	dpaa2_fl_set_len(out_fle, out_len);
580 
581 skip_out_fle:
582 	return edesc;
583 }
584 
585 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
586 {
587 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
588 	unsigned int ivsize = crypto_aead_ivsize(aead);
589 	struct device *dev = ctx->dev;
590 	struct caam_flc *flc;
591 	u32 *desc;
592 
593 	if (!ctx->cdata.keylen || !ctx->authsize)
594 		return 0;
595 
596 	flc = &ctx->flc[ENCRYPT];
597 	desc = flc->sh_desc;
598 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
599 			       ctx->authsize, true, true);
600 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
601 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
602 				   sizeof(flc->flc) + desc_bytes(desc),
603 				   ctx->dir);
604 
605 	flc = &ctx->flc[DECRYPT];
606 	desc = flc->sh_desc;
607 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
608 			       ctx->authsize, false, true);
609 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
610 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
611 				   sizeof(flc->flc) + desc_bytes(desc),
612 				   ctx->dir);
613 
614 	return 0;
615 }
616 
617 static int chachapoly_setauthsize(struct crypto_aead *aead,
618 				  unsigned int authsize)
619 {
620 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
621 
622 	if (authsize != POLY1305_DIGEST_SIZE)
623 		return -EINVAL;
624 
625 	ctx->authsize = authsize;
626 	return chachapoly_set_sh_desc(aead);
627 }
628 
629 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
630 			     unsigned int keylen)
631 {
632 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
633 	unsigned int ivsize = crypto_aead_ivsize(aead);
634 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
635 
636 	if (keylen != CHACHA_KEY_SIZE + saltlen)
637 		return -EINVAL;
638 
639 	ctx->cdata.key_virt = key;
640 	ctx->cdata.keylen = keylen - saltlen;
641 
642 	return chachapoly_set_sh_desc(aead);
643 }
644 
645 static int gcm_set_sh_desc(struct crypto_aead *aead)
646 {
647 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 	struct device *dev = ctx->dev;
649 	unsigned int ivsize = crypto_aead_ivsize(aead);
650 	struct caam_flc *flc;
651 	u32 *desc;
652 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
653 			ctx->cdata.keylen;
654 
655 	if (!ctx->cdata.keylen || !ctx->authsize)
656 		return 0;
657 
658 	/*
659 	 * AES GCM encrypt shared descriptor
660 	 * Job Descriptor and Shared Descriptor
661 	 * must fit into the 64-word Descriptor h/w Buffer
662 	 */
663 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
664 		ctx->cdata.key_inline = true;
665 		ctx->cdata.key_virt = ctx->key;
666 	} else {
667 		ctx->cdata.key_inline = false;
668 		ctx->cdata.key_dma = ctx->key_dma;
669 	}
670 
671 	flc = &ctx->flc[ENCRYPT];
672 	desc = flc->sh_desc;
673 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
674 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
675 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
676 				   sizeof(flc->flc) + desc_bytes(desc),
677 				   ctx->dir);
678 
679 	/*
680 	 * Job Descriptor and Shared Descriptors
681 	 * must all fit into the 64-word Descriptor h/w Buffer
682 	 */
683 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
684 		ctx->cdata.key_inline = true;
685 		ctx->cdata.key_virt = ctx->key;
686 	} else {
687 		ctx->cdata.key_inline = false;
688 		ctx->cdata.key_dma = ctx->key_dma;
689 	}
690 
691 	flc = &ctx->flc[DECRYPT];
692 	desc = flc->sh_desc;
693 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
694 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
695 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
696 				   sizeof(flc->flc) + desc_bytes(desc),
697 				   ctx->dir);
698 
699 	return 0;
700 }
701 
702 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
703 {
704 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
705 	int err;
706 
707 	err = crypto_gcm_check_authsize(authsize);
708 	if (err)
709 		return err;
710 
711 	ctx->authsize = authsize;
712 	gcm_set_sh_desc(authenc);
713 
714 	return 0;
715 }
716 
717 static int gcm_setkey(struct crypto_aead *aead,
718 		      const u8 *key, unsigned int keylen)
719 {
720 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
721 	struct device *dev = ctx->dev;
722 	int ret;
723 
724 	ret = aes_check_keylen(keylen);
725 	if (ret)
726 		return ret;
727 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
728 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
729 
730 	memcpy(ctx->key, key, keylen);
731 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
732 	ctx->cdata.keylen = keylen;
733 
734 	return gcm_set_sh_desc(aead);
735 }
736 
737 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
738 {
739 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
740 	struct device *dev = ctx->dev;
741 	unsigned int ivsize = crypto_aead_ivsize(aead);
742 	struct caam_flc *flc;
743 	u32 *desc;
744 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
745 			ctx->cdata.keylen;
746 
747 	if (!ctx->cdata.keylen || !ctx->authsize)
748 		return 0;
749 
750 	ctx->cdata.key_virt = ctx->key;
751 
752 	/*
753 	 * RFC4106 encrypt shared descriptor
754 	 * Job Descriptor and Shared Descriptor
755 	 * must fit into the 64-word Descriptor h/w Buffer
756 	 */
757 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
758 		ctx->cdata.key_inline = true;
759 	} else {
760 		ctx->cdata.key_inline = false;
761 		ctx->cdata.key_dma = ctx->key_dma;
762 	}
763 
764 	flc = &ctx->flc[ENCRYPT];
765 	desc = flc->sh_desc;
766 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
767 				  true);
768 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
769 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
770 				   sizeof(flc->flc) + desc_bytes(desc),
771 				   ctx->dir);
772 
773 	/*
774 	 * Job Descriptor and Shared Descriptors
775 	 * must all fit into the 64-word Descriptor h/w Buffer
776 	 */
777 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
778 		ctx->cdata.key_inline = true;
779 	} else {
780 		ctx->cdata.key_inline = false;
781 		ctx->cdata.key_dma = ctx->key_dma;
782 	}
783 
784 	flc = &ctx->flc[DECRYPT];
785 	desc = flc->sh_desc;
786 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
787 				  true);
788 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
789 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
790 				   sizeof(flc->flc) + desc_bytes(desc),
791 				   ctx->dir);
792 
793 	return 0;
794 }
795 
796 static int rfc4106_setauthsize(struct crypto_aead *authenc,
797 			       unsigned int authsize)
798 {
799 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
800 	int err;
801 
802 	err = crypto_rfc4106_check_authsize(authsize);
803 	if (err)
804 		return err;
805 
806 	ctx->authsize = authsize;
807 	rfc4106_set_sh_desc(authenc);
808 
809 	return 0;
810 }
811 
812 static int rfc4106_setkey(struct crypto_aead *aead,
813 			  const u8 *key, unsigned int keylen)
814 {
815 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
816 	struct device *dev = ctx->dev;
817 	int ret;
818 
819 	ret = aes_check_keylen(keylen - 4);
820 	if (ret)
821 		return ret;
822 
823 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
824 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
825 
826 	memcpy(ctx->key, key, keylen);
827 	/*
828 	 * The last four bytes of the key material are used as the salt value
829 	 * in the nonce. Update the AES key length.
830 	 */
831 	ctx->cdata.keylen = keylen - 4;
832 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
833 				   ctx->dir);
834 
835 	return rfc4106_set_sh_desc(aead);
836 }
837 
838 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
839 {
840 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
841 	struct device *dev = ctx->dev;
842 	unsigned int ivsize = crypto_aead_ivsize(aead);
843 	struct caam_flc *flc;
844 	u32 *desc;
845 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
846 			ctx->cdata.keylen;
847 
848 	if (!ctx->cdata.keylen || !ctx->authsize)
849 		return 0;
850 
851 	ctx->cdata.key_virt = ctx->key;
852 
853 	/*
854 	 * RFC4543 encrypt shared descriptor
855 	 * Job Descriptor and Shared Descriptor
856 	 * must fit into the 64-word Descriptor h/w Buffer
857 	 */
858 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
859 		ctx->cdata.key_inline = true;
860 	} else {
861 		ctx->cdata.key_inline = false;
862 		ctx->cdata.key_dma = ctx->key_dma;
863 	}
864 
865 	flc = &ctx->flc[ENCRYPT];
866 	desc = flc->sh_desc;
867 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
868 				  true);
869 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
870 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
871 				   sizeof(flc->flc) + desc_bytes(desc),
872 				   ctx->dir);
873 
874 	/*
875 	 * Job Descriptor and Shared Descriptors
876 	 * must all fit into the 64-word Descriptor h/w Buffer
877 	 */
878 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
879 		ctx->cdata.key_inline = true;
880 	} else {
881 		ctx->cdata.key_inline = false;
882 		ctx->cdata.key_dma = ctx->key_dma;
883 	}
884 
885 	flc = &ctx->flc[DECRYPT];
886 	desc = flc->sh_desc;
887 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
888 				  true);
889 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
890 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
891 				   sizeof(flc->flc) + desc_bytes(desc),
892 				   ctx->dir);
893 
894 	return 0;
895 }
896 
897 static int rfc4543_setauthsize(struct crypto_aead *authenc,
898 			       unsigned int authsize)
899 {
900 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
901 
902 	if (authsize != 16)
903 		return -EINVAL;
904 
905 	ctx->authsize = authsize;
906 	rfc4543_set_sh_desc(authenc);
907 
908 	return 0;
909 }
910 
911 static int rfc4543_setkey(struct crypto_aead *aead,
912 			  const u8 *key, unsigned int keylen)
913 {
914 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
915 	struct device *dev = ctx->dev;
916 	int ret;
917 
918 	ret = aes_check_keylen(keylen - 4);
919 	if (ret)
920 		return ret;
921 
922 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
923 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
924 
925 	memcpy(ctx->key, key, keylen);
926 	/*
927 	 * The last four bytes of the key material are used as the salt value
928 	 * in the nonce. Update the AES key length.
929 	 */
930 	ctx->cdata.keylen = keylen - 4;
931 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
932 				   ctx->dir);
933 
934 	return rfc4543_set_sh_desc(aead);
935 }
936 
937 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
938 			   unsigned int keylen, const u32 ctx1_iv_off)
939 {
940 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
941 	struct caam_skcipher_alg *alg =
942 		container_of(crypto_skcipher_alg(skcipher),
943 			     struct caam_skcipher_alg, skcipher);
944 	struct device *dev = ctx->dev;
945 	struct caam_flc *flc;
946 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
947 	u32 *desc;
948 	const bool is_rfc3686 = alg->caam.rfc3686;
949 
950 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
951 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
952 
953 	ctx->cdata.keylen = keylen;
954 	ctx->cdata.key_virt = key;
955 	ctx->cdata.key_inline = true;
956 
957 	/* skcipher_encrypt shared descriptor */
958 	flc = &ctx->flc[ENCRYPT];
959 	desc = flc->sh_desc;
960 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
961 				   ctx1_iv_off);
962 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
963 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
964 				   sizeof(flc->flc) + desc_bytes(desc),
965 				   ctx->dir);
966 
967 	/* skcipher_decrypt shared descriptor */
968 	flc = &ctx->flc[DECRYPT];
969 	desc = flc->sh_desc;
970 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
971 				   ctx1_iv_off);
972 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
973 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
974 				   sizeof(flc->flc) + desc_bytes(desc),
975 				   ctx->dir);
976 
977 	return 0;
978 }
979 
980 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
981 			       const u8 *key, unsigned int keylen)
982 {
983 	int err;
984 
985 	err = aes_check_keylen(keylen);
986 	if (err)
987 		return err;
988 
989 	return skcipher_setkey(skcipher, key, keylen, 0);
990 }
991 
992 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
993 				   const u8 *key, unsigned int keylen)
994 {
995 	u32 ctx1_iv_off;
996 	int err;
997 
998 	/*
999 	 * RFC3686 specific:
1000 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1001 	 *	| *key = {KEY, NONCE}
1002 	 */
1003 	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1004 	keylen -= CTR_RFC3686_NONCE_SIZE;
1005 
1006 	err = aes_check_keylen(keylen);
1007 	if (err)
1008 		return err;
1009 
1010 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1011 }
1012 
1013 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1014 			       const u8 *key, unsigned int keylen)
1015 {
1016 	u32 ctx1_iv_off;
1017 	int err;
1018 
1019 	/*
1020 	 * AES-CTR needs to load IV in CONTEXT1 reg
1021 	 * at an offset of 128bits (16bytes)
1022 	 * CONTEXT1[255:128] = IV
1023 	 */
1024 	ctx1_iv_off = 16;
1025 
1026 	err = aes_check_keylen(keylen);
1027 	if (err)
1028 		return err;
1029 
1030 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1031 }
1032 
1033 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1034 				    const u8 *key, unsigned int keylen)
1035 {
1036 	if (keylen != CHACHA_KEY_SIZE)
1037 		return -EINVAL;
1038 
1039 	return skcipher_setkey(skcipher, key, keylen, 0);
1040 }
1041 
1042 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1043 			       const u8 *key, unsigned int keylen)
1044 {
1045 	return verify_skcipher_des_key(skcipher, key) ?:
1046 	       skcipher_setkey(skcipher, key, keylen, 0);
1047 }
1048 
1049 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1050 			        const u8 *key, unsigned int keylen)
1051 {
1052 	return verify_skcipher_des3_key(skcipher, key) ?:
1053 	       skcipher_setkey(skcipher, key, keylen, 0);
1054 }
1055 
1056 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1057 			       unsigned int keylen)
1058 {
1059 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1060 	struct device *dev = ctx->dev;
1061 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1062 	struct caam_flc *flc;
1063 	u32 *desc;
1064 	int err;
1065 
1066 	err = xts_verify_key(skcipher, key, keylen);
1067 	if (err) {
1068 		dev_dbg(dev, "key size mismatch\n");
1069 		return err;
1070 	}
1071 
1072 	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1073 		ctx->xts_key_fallback = true;
1074 
1075 	if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1076 		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1077 		if (err)
1078 			return err;
1079 	}
1080 
1081 	ctx->cdata.keylen = keylen;
1082 	ctx->cdata.key_virt = key;
1083 	ctx->cdata.key_inline = true;
1084 
1085 	/* xts_skcipher_encrypt shared descriptor */
1086 	flc = &ctx->flc[ENCRYPT];
1087 	desc = flc->sh_desc;
1088 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1089 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1090 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1091 				   sizeof(flc->flc) + desc_bytes(desc),
1092 				   ctx->dir);
1093 
1094 	/* xts_skcipher_decrypt shared descriptor */
1095 	flc = &ctx->flc[DECRYPT];
1096 	desc = flc->sh_desc;
1097 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1098 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1099 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1100 				   sizeof(flc->flc) + desc_bytes(desc),
1101 				   ctx->dir);
1102 
1103 	return 0;
1104 }
1105 
1106 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1107 {
1108 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1109 	struct caam_request *req_ctx = skcipher_request_ctx(req);
1110 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1111 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1112 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1113 	struct device *dev = ctx->dev;
1114 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1115 		       GFP_KERNEL : GFP_ATOMIC;
1116 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1117 	struct skcipher_edesc *edesc;
1118 	dma_addr_t iv_dma;
1119 	u8 *iv;
1120 	int ivsize = crypto_skcipher_ivsize(skcipher);
1121 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1122 	struct dpaa2_sg_entry *sg_table;
1123 
1124 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1125 	if (unlikely(src_nents < 0)) {
1126 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1127 			req->cryptlen);
1128 		return ERR_PTR(src_nents);
1129 	}
1130 
1131 	if (unlikely(req->dst != req->src)) {
1132 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1133 		if (unlikely(dst_nents < 0)) {
1134 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1135 				req->cryptlen);
1136 			return ERR_PTR(dst_nents);
1137 		}
1138 
1139 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1140 					      DMA_TO_DEVICE);
1141 		if (unlikely(!mapped_src_nents)) {
1142 			dev_err(dev, "unable to map source\n");
1143 			return ERR_PTR(-ENOMEM);
1144 		}
1145 
1146 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1147 					      DMA_FROM_DEVICE);
1148 		if (unlikely(!mapped_dst_nents)) {
1149 			dev_err(dev, "unable to map destination\n");
1150 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1151 			return ERR_PTR(-ENOMEM);
1152 		}
1153 	} else {
1154 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1155 					      DMA_BIDIRECTIONAL);
1156 		if (unlikely(!mapped_src_nents)) {
1157 			dev_err(dev, "unable to map source\n");
1158 			return ERR_PTR(-ENOMEM);
1159 		}
1160 	}
1161 
1162 	qm_sg_ents = 1 + mapped_src_nents;
1163 	dst_sg_idx = qm_sg_ents;
1164 
1165 	/*
1166 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1167 	 * IV entries point to the same buffer
1168 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1169 	 *
1170 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1171 	 * the end of the table by allocating more S/G entries.
1172 	 */
1173 	if (req->src != req->dst)
1174 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1175 	else
1176 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1177 
1178 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1179 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1180 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1181 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1182 			qm_sg_ents, ivsize);
1183 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1184 			   0, DMA_NONE, 0, 0);
1185 		return ERR_PTR(-ENOMEM);
1186 	}
1187 
1188 	/* allocate space for base edesc, link tables and IV */
1189 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1190 	if (unlikely(!edesc)) {
1191 		dev_err(dev, "could not allocate extended descriptor\n");
1192 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1193 			   0, DMA_NONE, 0, 0);
1194 		return ERR_PTR(-ENOMEM);
1195 	}
1196 
1197 	/* Make sure IV is located in a DMAable area */
1198 	sg_table = &edesc->sgt[0];
1199 	iv = (u8 *)(sg_table + qm_sg_ents);
1200 	memcpy(iv, req->iv, ivsize);
1201 
1202 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1203 	if (dma_mapping_error(dev, iv_dma)) {
1204 		dev_err(dev, "unable to map IV\n");
1205 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1206 			   0, DMA_NONE, 0, 0);
1207 		qi_cache_free(edesc);
1208 		return ERR_PTR(-ENOMEM);
1209 	}
1210 
1211 	edesc->src_nents = src_nents;
1212 	edesc->dst_nents = dst_nents;
1213 	edesc->iv_dma = iv_dma;
1214 	edesc->qm_sg_bytes = qm_sg_bytes;
1215 
1216 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1217 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1218 
1219 	if (req->src != req->dst)
1220 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1221 
1222 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1223 			 ivsize, 0);
1224 
1225 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1226 					  DMA_TO_DEVICE);
1227 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1228 		dev_err(dev, "unable to map S/G table\n");
1229 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1230 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1231 		qi_cache_free(edesc);
1232 		return ERR_PTR(-ENOMEM);
1233 	}
1234 
1235 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1236 	dpaa2_fl_set_final(in_fle, true);
1237 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1238 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1239 
1240 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1241 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1242 
1243 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1244 
1245 	if (req->src == req->dst)
1246 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1247 				  sizeof(*sg_table));
1248 	else
1249 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1250 				  sizeof(*sg_table));
1251 
1252 	return edesc;
1253 }
1254 
1255 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1256 		       struct aead_request *req)
1257 {
1258 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1259 	int ivsize = crypto_aead_ivsize(aead);
1260 
1261 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1262 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1263 		   edesc->qm_sg_bytes);
1264 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1265 }
1266 
1267 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1268 			   struct skcipher_request *req)
1269 {
1270 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1271 	int ivsize = crypto_skcipher_ivsize(skcipher);
1272 
1273 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1274 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1275 		   edesc->qm_sg_bytes);
1276 }
1277 
1278 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1279 {
1280 	struct crypto_async_request *areq = cbk_ctx;
1281 	struct aead_request *req = container_of(areq, struct aead_request,
1282 						base);
1283 	struct caam_request *req_ctx = to_caam_req(areq);
1284 	struct aead_edesc *edesc = req_ctx->edesc;
1285 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1286 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1287 	int ecode = 0;
1288 
1289 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1290 
1291 	if (unlikely(status))
1292 		ecode = caam_qi2_strstatus(ctx->dev, status);
1293 
1294 	aead_unmap(ctx->dev, edesc, req);
1295 	qi_cache_free(edesc);
1296 	aead_request_complete(req, ecode);
1297 }
1298 
1299 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1300 {
1301 	struct crypto_async_request *areq = cbk_ctx;
1302 	struct aead_request *req = container_of(areq, struct aead_request,
1303 						base);
1304 	struct caam_request *req_ctx = to_caam_req(areq);
1305 	struct aead_edesc *edesc = req_ctx->edesc;
1306 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1307 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1308 	int ecode = 0;
1309 
1310 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1311 
1312 	if (unlikely(status))
1313 		ecode = caam_qi2_strstatus(ctx->dev, status);
1314 
1315 	aead_unmap(ctx->dev, edesc, req);
1316 	qi_cache_free(edesc);
1317 	aead_request_complete(req, ecode);
1318 }
1319 
1320 static int aead_encrypt(struct aead_request *req)
1321 {
1322 	struct aead_edesc *edesc;
1323 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1324 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1325 	struct caam_request *caam_req = aead_request_ctx(req);
1326 	int ret;
1327 
1328 	/* allocate extended descriptor */
1329 	edesc = aead_edesc_alloc(req, true);
1330 	if (IS_ERR(edesc))
1331 		return PTR_ERR(edesc);
1332 
1333 	caam_req->flc = &ctx->flc[ENCRYPT];
1334 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1335 	caam_req->cbk = aead_encrypt_done;
1336 	caam_req->ctx = &req->base;
1337 	caam_req->edesc = edesc;
1338 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1339 	if (ret != -EINPROGRESS &&
1340 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1341 		aead_unmap(ctx->dev, edesc, req);
1342 		qi_cache_free(edesc);
1343 	}
1344 
1345 	return ret;
1346 }
1347 
1348 static int aead_decrypt(struct aead_request *req)
1349 {
1350 	struct aead_edesc *edesc;
1351 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1352 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1353 	struct caam_request *caam_req = aead_request_ctx(req);
1354 	int ret;
1355 
1356 	/* allocate extended descriptor */
1357 	edesc = aead_edesc_alloc(req, false);
1358 	if (IS_ERR(edesc))
1359 		return PTR_ERR(edesc);
1360 
1361 	caam_req->flc = &ctx->flc[DECRYPT];
1362 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1363 	caam_req->cbk = aead_decrypt_done;
1364 	caam_req->ctx = &req->base;
1365 	caam_req->edesc = edesc;
1366 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1367 	if (ret != -EINPROGRESS &&
1368 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1369 		aead_unmap(ctx->dev, edesc, req);
1370 		qi_cache_free(edesc);
1371 	}
1372 
1373 	return ret;
1374 }
1375 
1376 static int ipsec_gcm_encrypt(struct aead_request *req)
1377 {
1378 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1379 }
1380 
1381 static int ipsec_gcm_decrypt(struct aead_request *req)
1382 {
1383 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1384 }
1385 
1386 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1387 {
1388 	struct crypto_async_request *areq = cbk_ctx;
1389 	struct skcipher_request *req = skcipher_request_cast(areq);
1390 	struct caam_request *req_ctx = to_caam_req(areq);
1391 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1392 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1393 	struct skcipher_edesc *edesc = req_ctx->edesc;
1394 	int ecode = 0;
1395 	int ivsize = crypto_skcipher_ivsize(skcipher);
1396 
1397 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1398 
1399 	if (unlikely(status))
1400 		ecode = caam_qi2_strstatus(ctx->dev, status);
1401 
1402 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1403 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1404 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1405 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1406 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1407 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1408 
1409 	skcipher_unmap(ctx->dev, edesc, req);
1410 
1411 	/*
1412 	 * The crypto API expects us to set the IV (req->iv) to the last
1413 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1414 	 * This is used e.g. by the CTS mode.
1415 	 */
1416 	if (!ecode)
1417 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1418 		       ivsize);
1419 
1420 	qi_cache_free(edesc);
1421 	skcipher_request_complete(req, ecode);
1422 }
1423 
1424 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1425 {
1426 	struct crypto_async_request *areq = cbk_ctx;
1427 	struct skcipher_request *req = skcipher_request_cast(areq);
1428 	struct caam_request *req_ctx = to_caam_req(areq);
1429 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1430 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1431 	struct skcipher_edesc *edesc = req_ctx->edesc;
1432 	int ecode = 0;
1433 	int ivsize = crypto_skcipher_ivsize(skcipher);
1434 
1435 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1436 
1437 	if (unlikely(status))
1438 		ecode = caam_qi2_strstatus(ctx->dev, status);
1439 
1440 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1441 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1442 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1443 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1444 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1445 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1446 
1447 	skcipher_unmap(ctx->dev, edesc, req);
1448 
1449 	/*
1450 	 * The crypto API expects us to set the IV (req->iv) to the last
1451 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1452 	 * This is used e.g. by the CTS mode.
1453 	 */
1454 	if (!ecode)
1455 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1456 		       ivsize);
1457 
1458 	qi_cache_free(edesc);
1459 	skcipher_request_complete(req, ecode);
1460 }
1461 
1462 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1463 {
1464 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1465 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1466 
1467 	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1468 }
1469 
1470 static int skcipher_encrypt(struct skcipher_request *req)
1471 {
1472 	struct skcipher_edesc *edesc;
1473 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1474 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1475 	struct caam_request *caam_req = skcipher_request_ctx(req);
1476 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1477 	int ret;
1478 
1479 	/*
1480 	 * XTS is expected to return an error even for input length = 0
1481 	 * Note that the case input length < block size will be caught during
1482 	 * HW offloading and return an error.
1483 	 */
1484 	if (!req->cryptlen && !ctx->fallback)
1485 		return 0;
1486 
1487 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1488 			      ctx->xts_key_fallback)) {
1489 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1490 		skcipher_request_set_callback(&caam_req->fallback_req,
1491 					      req->base.flags,
1492 					      req->base.complete,
1493 					      req->base.data);
1494 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1495 					   req->dst, req->cryptlen, req->iv);
1496 
1497 		return crypto_skcipher_encrypt(&caam_req->fallback_req);
1498 	}
1499 
1500 	/* allocate extended descriptor */
1501 	edesc = skcipher_edesc_alloc(req);
1502 	if (IS_ERR(edesc))
1503 		return PTR_ERR(edesc);
1504 
1505 	caam_req->flc = &ctx->flc[ENCRYPT];
1506 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1507 	caam_req->cbk = skcipher_encrypt_done;
1508 	caam_req->ctx = &req->base;
1509 	caam_req->edesc = edesc;
1510 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1511 	if (ret != -EINPROGRESS &&
1512 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1513 		skcipher_unmap(ctx->dev, edesc, req);
1514 		qi_cache_free(edesc);
1515 	}
1516 
1517 	return ret;
1518 }
1519 
1520 static int skcipher_decrypt(struct skcipher_request *req)
1521 {
1522 	struct skcipher_edesc *edesc;
1523 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1524 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1525 	struct caam_request *caam_req = skcipher_request_ctx(req);
1526 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1527 	int ret;
1528 
1529 	/*
1530 	 * XTS is expected to return an error even for input length = 0
1531 	 * Note that the case input length < block size will be caught during
1532 	 * HW offloading and return an error.
1533 	 */
1534 	if (!req->cryptlen && !ctx->fallback)
1535 		return 0;
1536 
1537 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1538 			      ctx->xts_key_fallback)) {
1539 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1540 		skcipher_request_set_callback(&caam_req->fallback_req,
1541 					      req->base.flags,
1542 					      req->base.complete,
1543 					      req->base.data);
1544 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1545 					   req->dst, req->cryptlen, req->iv);
1546 
1547 		return crypto_skcipher_decrypt(&caam_req->fallback_req);
1548 	}
1549 
1550 	/* allocate extended descriptor */
1551 	edesc = skcipher_edesc_alloc(req);
1552 	if (IS_ERR(edesc))
1553 		return PTR_ERR(edesc);
1554 
1555 	caam_req->flc = &ctx->flc[DECRYPT];
1556 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1557 	caam_req->cbk = skcipher_decrypt_done;
1558 	caam_req->ctx = &req->base;
1559 	caam_req->edesc = edesc;
1560 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1561 	if (ret != -EINPROGRESS &&
1562 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1563 		skcipher_unmap(ctx->dev, edesc, req);
1564 		qi_cache_free(edesc);
1565 	}
1566 
1567 	return ret;
1568 }
1569 
1570 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1571 			 bool uses_dkp)
1572 {
1573 	dma_addr_t dma_addr;
1574 	int i;
1575 
1576 	/* copy descriptor header template value */
1577 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1578 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1579 
1580 	ctx->dev = caam->dev;
1581 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1582 
1583 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1584 					offsetof(struct caam_ctx, flc_dma),
1585 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1586 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1587 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1588 		return -ENOMEM;
1589 	}
1590 
1591 	for (i = 0; i < NUM_OP; i++)
1592 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1593 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1594 
1595 	return 0;
1596 }
1597 
1598 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1599 {
1600 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1601 	struct caam_skcipher_alg *caam_alg =
1602 		container_of(alg, typeof(*caam_alg), skcipher);
1603 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1604 	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1605 	int ret = 0;
1606 
1607 	if (alg_aai == OP_ALG_AAI_XTS) {
1608 		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1609 		struct crypto_skcipher *fallback;
1610 
1611 		fallback = crypto_alloc_skcipher(tfm_name, 0,
1612 						 CRYPTO_ALG_NEED_FALLBACK);
1613 		if (IS_ERR(fallback)) {
1614 			dev_err(caam_alg->caam.dev,
1615 				"Failed to allocate %s fallback: %ld\n",
1616 				tfm_name, PTR_ERR(fallback));
1617 			return PTR_ERR(fallback);
1618 		}
1619 
1620 		ctx->fallback = fallback;
1621 		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1622 					    crypto_skcipher_reqsize(fallback));
1623 	} else {
1624 		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1625 	}
1626 
1627 	ret = caam_cra_init(ctx, &caam_alg->caam, false);
1628 	if (ret && ctx->fallback)
1629 		crypto_free_skcipher(ctx->fallback);
1630 
1631 	return ret;
1632 }
1633 
1634 static int caam_cra_init_aead(struct crypto_aead *tfm)
1635 {
1636 	struct aead_alg *alg = crypto_aead_alg(tfm);
1637 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1638 						      aead);
1639 
1640 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1641 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1642 			     !caam_alg->caam.nodkp);
1643 }
1644 
1645 static void caam_exit_common(struct caam_ctx *ctx)
1646 {
1647 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1648 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1649 			       DMA_ATTR_SKIP_CPU_SYNC);
1650 }
1651 
1652 static void caam_cra_exit(struct crypto_skcipher *tfm)
1653 {
1654 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1655 
1656 	if (ctx->fallback)
1657 		crypto_free_skcipher(ctx->fallback);
1658 	caam_exit_common(ctx);
1659 }
1660 
1661 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1662 {
1663 	caam_exit_common(crypto_aead_ctx(tfm));
1664 }
1665 
1666 static struct caam_skcipher_alg driver_algs[] = {
1667 	{
1668 		.skcipher = {
1669 			.base = {
1670 				.cra_name = "cbc(aes)",
1671 				.cra_driver_name = "cbc-aes-caam-qi2",
1672 				.cra_blocksize = AES_BLOCK_SIZE,
1673 			},
1674 			.setkey = aes_skcipher_setkey,
1675 			.encrypt = skcipher_encrypt,
1676 			.decrypt = skcipher_decrypt,
1677 			.min_keysize = AES_MIN_KEY_SIZE,
1678 			.max_keysize = AES_MAX_KEY_SIZE,
1679 			.ivsize = AES_BLOCK_SIZE,
1680 		},
1681 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1682 	},
1683 	{
1684 		.skcipher = {
1685 			.base = {
1686 				.cra_name = "cbc(des3_ede)",
1687 				.cra_driver_name = "cbc-3des-caam-qi2",
1688 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1689 			},
1690 			.setkey = des3_skcipher_setkey,
1691 			.encrypt = skcipher_encrypt,
1692 			.decrypt = skcipher_decrypt,
1693 			.min_keysize = DES3_EDE_KEY_SIZE,
1694 			.max_keysize = DES3_EDE_KEY_SIZE,
1695 			.ivsize = DES3_EDE_BLOCK_SIZE,
1696 		},
1697 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1698 	},
1699 	{
1700 		.skcipher = {
1701 			.base = {
1702 				.cra_name = "cbc(des)",
1703 				.cra_driver_name = "cbc-des-caam-qi2",
1704 				.cra_blocksize = DES_BLOCK_SIZE,
1705 			},
1706 			.setkey = des_skcipher_setkey,
1707 			.encrypt = skcipher_encrypt,
1708 			.decrypt = skcipher_decrypt,
1709 			.min_keysize = DES_KEY_SIZE,
1710 			.max_keysize = DES_KEY_SIZE,
1711 			.ivsize = DES_BLOCK_SIZE,
1712 		},
1713 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1714 	},
1715 	{
1716 		.skcipher = {
1717 			.base = {
1718 				.cra_name = "ctr(aes)",
1719 				.cra_driver_name = "ctr-aes-caam-qi2",
1720 				.cra_blocksize = 1,
1721 			},
1722 			.setkey = ctr_skcipher_setkey,
1723 			.encrypt = skcipher_encrypt,
1724 			.decrypt = skcipher_decrypt,
1725 			.min_keysize = AES_MIN_KEY_SIZE,
1726 			.max_keysize = AES_MAX_KEY_SIZE,
1727 			.ivsize = AES_BLOCK_SIZE,
1728 			.chunksize = AES_BLOCK_SIZE,
1729 		},
1730 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1731 					OP_ALG_AAI_CTR_MOD128,
1732 	},
1733 	{
1734 		.skcipher = {
1735 			.base = {
1736 				.cra_name = "rfc3686(ctr(aes))",
1737 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1738 				.cra_blocksize = 1,
1739 			},
1740 			.setkey = rfc3686_skcipher_setkey,
1741 			.encrypt = skcipher_encrypt,
1742 			.decrypt = skcipher_decrypt,
1743 			.min_keysize = AES_MIN_KEY_SIZE +
1744 				       CTR_RFC3686_NONCE_SIZE,
1745 			.max_keysize = AES_MAX_KEY_SIZE +
1746 				       CTR_RFC3686_NONCE_SIZE,
1747 			.ivsize = CTR_RFC3686_IV_SIZE,
1748 			.chunksize = AES_BLOCK_SIZE,
1749 		},
1750 		.caam = {
1751 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1752 					   OP_ALG_AAI_CTR_MOD128,
1753 			.rfc3686 = true,
1754 		},
1755 	},
1756 	{
1757 		.skcipher = {
1758 			.base = {
1759 				.cra_name = "xts(aes)",
1760 				.cra_driver_name = "xts-aes-caam-qi2",
1761 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1762 				.cra_blocksize = AES_BLOCK_SIZE,
1763 			},
1764 			.setkey = xts_skcipher_setkey,
1765 			.encrypt = skcipher_encrypt,
1766 			.decrypt = skcipher_decrypt,
1767 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1768 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1769 			.ivsize = AES_BLOCK_SIZE,
1770 		},
1771 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1772 	},
1773 	{
1774 		.skcipher = {
1775 			.base = {
1776 				.cra_name = "chacha20",
1777 				.cra_driver_name = "chacha20-caam-qi2",
1778 				.cra_blocksize = 1,
1779 			},
1780 			.setkey = chacha20_skcipher_setkey,
1781 			.encrypt = skcipher_encrypt,
1782 			.decrypt = skcipher_decrypt,
1783 			.min_keysize = CHACHA_KEY_SIZE,
1784 			.max_keysize = CHACHA_KEY_SIZE,
1785 			.ivsize = CHACHA_IV_SIZE,
1786 		},
1787 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1788 	},
1789 };
1790 
1791 static struct caam_aead_alg driver_aeads[] = {
1792 	{
1793 		.aead = {
1794 			.base = {
1795 				.cra_name = "rfc4106(gcm(aes))",
1796 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1797 				.cra_blocksize = 1,
1798 			},
1799 			.setkey = rfc4106_setkey,
1800 			.setauthsize = rfc4106_setauthsize,
1801 			.encrypt = ipsec_gcm_encrypt,
1802 			.decrypt = ipsec_gcm_decrypt,
1803 			.ivsize = 8,
1804 			.maxauthsize = AES_BLOCK_SIZE,
1805 		},
1806 		.caam = {
1807 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1808 			.nodkp = true,
1809 		},
1810 	},
1811 	{
1812 		.aead = {
1813 			.base = {
1814 				.cra_name = "rfc4543(gcm(aes))",
1815 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1816 				.cra_blocksize = 1,
1817 			},
1818 			.setkey = rfc4543_setkey,
1819 			.setauthsize = rfc4543_setauthsize,
1820 			.encrypt = ipsec_gcm_encrypt,
1821 			.decrypt = ipsec_gcm_decrypt,
1822 			.ivsize = 8,
1823 			.maxauthsize = AES_BLOCK_SIZE,
1824 		},
1825 		.caam = {
1826 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1827 			.nodkp = true,
1828 		},
1829 	},
1830 	/* Galois Counter Mode */
1831 	{
1832 		.aead = {
1833 			.base = {
1834 				.cra_name = "gcm(aes)",
1835 				.cra_driver_name = "gcm-aes-caam-qi2",
1836 				.cra_blocksize = 1,
1837 			},
1838 			.setkey = gcm_setkey,
1839 			.setauthsize = gcm_setauthsize,
1840 			.encrypt = aead_encrypt,
1841 			.decrypt = aead_decrypt,
1842 			.ivsize = 12,
1843 			.maxauthsize = AES_BLOCK_SIZE,
1844 		},
1845 		.caam = {
1846 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1847 			.nodkp = true,
1848 		}
1849 	},
1850 	/* single-pass ipsec_esp descriptor */
1851 	{
1852 		.aead = {
1853 			.base = {
1854 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1855 				.cra_driver_name = "authenc-hmac-md5-"
1856 						   "cbc-aes-caam-qi2",
1857 				.cra_blocksize = AES_BLOCK_SIZE,
1858 			},
1859 			.setkey = aead_setkey,
1860 			.setauthsize = aead_setauthsize,
1861 			.encrypt = aead_encrypt,
1862 			.decrypt = aead_decrypt,
1863 			.ivsize = AES_BLOCK_SIZE,
1864 			.maxauthsize = MD5_DIGEST_SIZE,
1865 		},
1866 		.caam = {
1867 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1868 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1869 					   OP_ALG_AAI_HMAC_PRECOMP,
1870 		}
1871 	},
1872 	{
1873 		.aead = {
1874 			.base = {
1875 				.cra_name = "echainiv(authenc(hmac(md5),"
1876 					    "cbc(aes)))",
1877 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1878 						   "cbc-aes-caam-qi2",
1879 				.cra_blocksize = AES_BLOCK_SIZE,
1880 			},
1881 			.setkey = aead_setkey,
1882 			.setauthsize = aead_setauthsize,
1883 			.encrypt = aead_encrypt,
1884 			.decrypt = aead_decrypt,
1885 			.ivsize = AES_BLOCK_SIZE,
1886 			.maxauthsize = MD5_DIGEST_SIZE,
1887 		},
1888 		.caam = {
1889 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1890 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1891 					   OP_ALG_AAI_HMAC_PRECOMP,
1892 			.geniv = true,
1893 		}
1894 	},
1895 	{
1896 		.aead = {
1897 			.base = {
1898 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1899 				.cra_driver_name = "authenc-hmac-sha1-"
1900 						   "cbc-aes-caam-qi2",
1901 				.cra_blocksize = AES_BLOCK_SIZE,
1902 			},
1903 			.setkey = aead_setkey,
1904 			.setauthsize = aead_setauthsize,
1905 			.encrypt = aead_encrypt,
1906 			.decrypt = aead_decrypt,
1907 			.ivsize = AES_BLOCK_SIZE,
1908 			.maxauthsize = SHA1_DIGEST_SIZE,
1909 		},
1910 		.caam = {
1911 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1912 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1913 					   OP_ALG_AAI_HMAC_PRECOMP,
1914 		}
1915 	},
1916 	{
1917 		.aead = {
1918 			.base = {
1919 				.cra_name = "echainiv(authenc(hmac(sha1),"
1920 					    "cbc(aes)))",
1921 				.cra_driver_name = "echainiv-authenc-"
1922 						   "hmac-sha1-cbc-aes-caam-qi2",
1923 				.cra_blocksize = AES_BLOCK_SIZE,
1924 			},
1925 			.setkey = aead_setkey,
1926 			.setauthsize = aead_setauthsize,
1927 			.encrypt = aead_encrypt,
1928 			.decrypt = aead_decrypt,
1929 			.ivsize = AES_BLOCK_SIZE,
1930 			.maxauthsize = SHA1_DIGEST_SIZE,
1931 		},
1932 		.caam = {
1933 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1934 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1935 					   OP_ALG_AAI_HMAC_PRECOMP,
1936 			.geniv = true,
1937 		},
1938 	},
1939 	{
1940 		.aead = {
1941 			.base = {
1942 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1943 				.cra_driver_name = "authenc-hmac-sha224-"
1944 						   "cbc-aes-caam-qi2",
1945 				.cra_blocksize = AES_BLOCK_SIZE,
1946 			},
1947 			.setkey = aead_setkey,
1948 			.setauthsize = aead_setauthsize,
1949 			.encrypt = aead_encrypt,
1950 			.decrypt = aead_decrypt,
1951 			.ivsize = AES_BLOCK_SIZE,
1952 			.maxauthsize = SHA224_DIGEST_SIZE,
1953 		},
1954 		.caam = {
1955 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1956 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1957 					   OP_ALG_AAI_HMAC_PRECOMP,
1958 		}
1959 	},
1960 	{
1961 		.aead = {
1962 			.base = {
1963 				.cra_name = "echainiv(authenc(hmac(sha224),"
1964 					    "cbc(aes)))",
1965 				.cra_driver_name = "echainiv-authenc-"
1966 						   "hmac-sha224-cbc-aes-caam-qi2",
1967 				.cra_blocksize = AES_BLOCK_SIZE,
1968 			},
1969 			.setkey = aead_setkey,
1970 			.setauthsize = aead_setauthsize,
1971 			.encrypt = aead_encrypt,
1972 			.decrypt = aead_decrypt,
1973 			.ivsize = AES_BLOCK_SIZE,
1974 			.maxauthsize = SHA224_DIGEST_SIZE,
1975 		},
1976 		.caam = {
1977 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1978 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1979 					   OP_ALG_AAI_HMAC_PRECOMP,
1980 			.geniv = true,
1981 		}
1982 	},
1983 	{
1984 		.aead = {
1985 			.base = {
1986 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1987 				.cra_driver_name = "authenc-hmac-sha256-"
1988 						   "cbc-aes-caam-qi2",
1989 				.cra_blocksize = AES_BLOCK_SIZE,
1990 			},
1991 			.setkey = aead_setkey,
1992 			.setauthsize = aead_setauthsize,
1993 			.encrypt = aead_encrypt,
1994 			.decrypt = aead_decrypt,
1995 			.ivsize = AES_BLOCK_SIZE,
1996 			.maxauthsize = SHA256_DIGEST_SIZE,
1997 		},
1998 		.caam = {
1999 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2000 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2001 					   OP_ALG_AAI_HMAC_PRECOMP,
2002 		}
2003 	},
2004 	{
2005 		.aead = {
2006 			.base = {
2007 				.cra_name = "echainiv(authenc(hmac(sha256),"
2008 					    "cbc(aes)))",
2009 				.cra_driver_name = "echainiv-authenc-"
2010 						   "hmac-sha256-cbc-aes-"
2011 						   "caam-qi2",
2012 				.cra_blocksize = AES_BLOCK_SIZE,
2013 			},
2014 			.setkey = aead_setkey,
2015 			.setauthsize = aead_setauthsize,
2016 			.encrypt = aead_encrypt,
2017 			.decrypt = aead_decrypt,
2018 			.ivsize = AES_BLOCK_SIZE,
2019 			.maxauthsize = SHA256_DIGEST_SIZE,
2020 		},
2021 		.caam = {
2022 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2023 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2024 					   OP_ALG_AAI_HMAC_PRECOMP,
2025 			.geniv = true,
2026 		}
2027 	},
2028 	{
2029 		.aead = {
2030 			.base = {
2031 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2032 				.cra_driver_name = "authenc-hmac-sha384-"
2033 						   "cbc-aes-caam-qi2",
2034 				.cra_blocksize = AES_BLOCK_SIZE,
2035 			},
2036 			.setkey = aead_setkey,
2037 			.setauthsize = aead_setauthsize,
2038 			.encrypt = aead_encrypt,
2039 			.decrypt = aead_decrypt,
2040 			.ivsize = AES_BLOCK_SIZE,
2041 			.maxauthsize = SHA384_DIGEST_SIZE,
2042 		},
2043 		.caam = {
2044 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2045 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2046 					   OP_ALG_AAI_HMAC_PRECOMP,
2047 		}
2048 	},
2049 	{
2050 		.aead = {
2051 			.base = {
2052 				.cra_name = "echainiv(authenc(hmac(sha384),"
2053 					    "cbc(aes)))",
2054 				.cra_driver_name = "echainiv-authenc-"
2055 						   "hmac-sha384-cbc-aes-"
2056 						   "caam-qi2",
2057 				.cra_blocksize = AES_BLOCK_SIZE,
2058 			},
2059 			.setkey = aead_setkey,
2060 			.setauthsize = aead_setauthsize,
2061 			.encrypt = aead_encrypt,
2062 			.decrypt = aead_decrypt,
2063 			.ivsize = AES_BLOCK_SIZE,
2064 			.maxauthsize = SHA384_DIGEST_SIZE,
2065 		},
2066 		.caam = {
2067 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2068 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2069 					   OP_ALG_AAI_HMAC_PRECOMP,
2070 			.geniv = true,
2071 		}
2072 	},
2073 	{
2074 		.aead = {
2075 			.base = {
2076 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2077 				.cra_driver_name = "authenc-hmac-sha512-"
2078 						   "cbc-aes-caam-qi2",
2079 				.cra_blocksize = AES_BLOCK_SIZE,
2080 			},
2081 			.setkey = aead_setkey,
2082 			.setauthsize = aead_setauthsize,
2083 			.encrypt = aead_encrypt,
2084 			.decrypt = aead_decrypt,
2085 			.ivsize = AES_BLOCK_SIZE,
2086 			.maxauthsize = SHA512_DIGEST_SIZE,
2087 		},
2088 		.caam = {
2089 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2090 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2091 					   OP_ALG_AAI_HMAC_PRECOMP,
2092 		}
2093 	},
2094 	{
2095 		.aead = {
2096 			.base = {
2097 				.cra_name = "echainiv(authenc(hmac(sha512),"
2098 					    "cbc(aes)))",
2099 				.cra_driver_name = "echainiv-authenc-"
2100 						   "hmac-sha512-cbc-aes-"
2101 						   "caam-qi2",
2102 				.cra_blocksize = AES_BLOCK_SIZE,
2103 			},
2104 			.setkey = aead_setkey,
2105 			.setauthsize = aead_setauthsize,
2106 			.encrypt = aead_encrypt,
2107 			.decrypt = aead_decrypt,
2108 			.ivsize = AES_BLOCK_SIZE,
2109 			.maxauthsize = SHA512_DIGEST_SIZE,
2110 		},
2111 		.caam = {
2112 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2113 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2114 					   OP_ALG_AAI_HMAC_PRECOMP,
2115 			.geniv = true,
2116 		}
2117 	},
2118 	{
2119 		.aead = {
2120 			.base = {
2121 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2122 				.cra_driver_name = "authenc-hmac-md5-"
2123 						   "cbc-des3_ede-caam-qi2",
2124 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2125 			},
2126 			.setkey = des3_aead_setkey,
2127 			.setauthsize = aead_setauthsize,
2128 			.encrypt = aead_encrypt,
2129 			.decrypt = aead_decrypt,
2130 			.ivsize = DES3_EDE_BLOCK_SIZE,
2131 			.maxauthsize = MD5_DIGEST_SIZE,
2132 		},
2133 		.caam = {
2134 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2135 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2136 					   OP_ALG_AAI_HMAC_PRECOMP,
2137 		}
2138 	},
2139 	{
2140 		.aead = {
2141 			.base = {
2142 				.cra_name = "echainiv(authenc(hmac(md5),"
2143 					    "cbc(des3_ede)))",
2144 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2145 						   "cbc-des3_ede-caam-qi2",
2146 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2147 			},
2148 			.setkey = des3_aead_setkey,
2149 			.setauthsize = aead_setauthsize,
2150 			.encrypt = aead_encrypt,
2151 			.decrypt = aead_decrypt,
2152 			.ivsize = DES3_EDE_BLOCK_SIZE,
2153 			.maxauthsize = MD5_DIGEST_SIZE,
2154 		},
2155 		.caam = {
2156 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2157 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2158 					   OP_ALG_AAI_HMAC_PRECOMP,
2159 			.geniv = true,
2160 		}
2161 	},
2162 	{
2163 		.aead = {
2164 			.base = {
2165 				.cra_name = "authenc(hmac(sha1),"
2166 					    "cbc(des3_ede))",
2167 				.cra_driver_name = "authenc-hmac-sha1-"
2168 						   "cbc-des3_ede-caam-qi2",
2169 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2170 			},
2171 			.setkey = des3_aead_setkey,
2172 			.setauthsize = aead_setauthsize,
2173 			.encrypt = aead_encrypt,
2174 			.decrypt = aead_decrypt,
2175 			.ivsize = DES3_EDE_BLOCK_SIZE,
2176 			.maxauthsize = SHA1_DIGEST_SIZE,
2177 		},
2178 		.caam = {
2179 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2180 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2181 					   OP_ALG_AAI_HMAC_PRECOMP,
2182 		},
2183 	},
2184 	{
2185 		.aead = {
2186 			.base = {
2187 				.cra_name = "echainiv(authenc(hmac(sha1),"
2188 					    "cbc(des3_ede)))",
2189 				.cra_driver_name = "echainiv-authenc-"
2190 						   "hmac-sha1-"
2191 						   "cbc-des3_ede-caam-qi2",
2192 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2193 			},
2194 			.setkey = des3_aead_setkey,
2195 			.setauthsize = aead_setauthsize,
2196 			.encrypt = aead_encrypt,
2197 			.decrypt = aead_decrypt,
2198 			.ivsize = DES3_EDE_BLOCK_SIZE,
2199 			.maxauthsize = SHA1_DIGEST_SIZE,
2200 		},
2201 		.caam = {
2202 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2203 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2204 					   OP_ALG_AAI_HMAC_PRECOMP,
2205 			.geniv = true,
2206 		}
2207 	},
2208 	{
2209 		.aead = {
2210 			.base = {
2211 				.cra_name = "authenc(hmac(sha224),"
2212 					    "cbc(des3_ede))",
2213 				.cra_driver_name = "authenc-hmac-sha224-"
2214 						   "cbc-des3_ede-caam-qi2",
2215 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2216 			},
2217 			.setkey = des3_aead_setkey,
2218 			.setauthsize = aead_setauthsize,
2219 			.encrypt = aead_encrypt,
2220 			.decrypt = aead_decrypt,
2221 			.ivsize = DES3_EDE_BLOCK_SIZE,
2222 			.maxauthsize = SHA224_DIGEST_SIZE,
2223 		},
2224 		.caam = {
2225 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2226 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2227 					   OP_ALG_AAI_HMAC_PRECOMP,
2228 		},
2229 	},
2230 	{
2231 		.aead = {
2232 			.base = {
2233 				.cra_name = "echainiv(authenc(hmac(sha224),"
2234 					    "cbc(des3_ede)))",
2235 				.cra_driver_name = "echainiv-authenc-"
2236 						   "hmac-sha224-"
2237 						   "cbc-des3_ede-caam-qi2",
2238 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2239 			},
2240 			.setkey = des3_aead_setkey,
2241 			.setauthsize = aead_setauthsize,
2242 			.encrypt = aead_encrypt,
2243 			.decrypt = aead_decrypt,
2244 			.ivsize = DES3_EDE_BLOCK_SIZE,
2245 			.maxauthsize = SHA224_DIGEST_SIZE,
2246 		},
2247 		.caam = {
2248 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2249 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2250 					   OP_ALG_AAI_HMAC_PRECOMP,
2251 			.geniv = true,
2252 		}
2253 	},
2254 	{
2255 		.aead = {
2256 			.base = {
2257 				.cra_name = "authenc(hmac(sha256),"
2258 					    "cbc(des3_ede))",
2259 				.cra_driver_name = "authenc-hmac-sha256-"
2260 						   "cbc-des3_ede-caam-qi2",
2261 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2262 			},
2263 			.setkey = des3_aead_setkey,
2264 			.setauthsize = aead_setauthsize,
2265 			.encrypt = aead_encrypt,
2266 			.decrypt = aead_decrypt,
2267 			.ivsize = DES3_EDE_BLOCK_SIZE,
2268 			.maxauthsize = SHA256_DIGEST_SIZE,
2269 		},
2270 		.caam = {
2271 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2272 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2273 					   OP_ALG_AAI_HMAC_PRECOMP,
2274 		},
2275 	},
2276 	{
2277 		.aead = {
2278 			.base = {
2279 				.cra_name = "echainiv(authenc(hmac(sha256),"
2280 					    "cbc(des3_ede)))",
2281 				.cra_driver_name = "echainiv-authenc-"
2282 						   "hmac-sha256-"
2283 						   "cbc-des3_ede-caam-qi2",
2284 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2285 			},
2286 			.setkey = des3_aead_setkey,
2287 			.setauthsize = aead_setauthsize,
2288 			.encrypt = aead_encrypt,
2289 			.decrypt = aead_decrypt,
2290 			.ivsize = DES3_EDE_BLOCK_SIZE,
2291 			.maxauthsize = SHA256_DIGEST_SIZE,
2292 		},
2293 		.caam = {
2294 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2295 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2296 					   OP_ALG_AAI_HMAC_PRECOMP,
2297 			.geniv = true,
2298 		}
2299 	},
2300 	{
2301 		.aead = {
2302 			.base = {
2303 				.cra_name = "authenc(hmac(sha384),"
2304 					    "cbc(des3_ede))",
2305 				.cra_driver_name = "authenc-hmac-sha384-"
2306 						   "cbc-des3_ede-caam-qi2",
2307 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2308 			},
2309 			.setkey = des3_aead_setkey,
2310 			.setauthsize = aead_setauthsize,
2311 			.encrypt = aead_encrypt,
2312 			.decrypt = aead_decrypt,
2313 			.ivsize = DES3_EDE_BLOCK_SIZE,
2314 			.maxauthsize = SHA384_DIGEST_SIZE,
2315 		},
2316 		.caam = {
2317 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2318 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2319 					   OP_ALG_AAI_HMAC_PRECOMP,
2320 		},
2321 	},
2322 	{
2323 		.aead = {
2324 			.base = {
2325 				.cra_name = "echainiv(authenc(hmac(sha384),"
2326 					    "cbc(des3_ede)))",
2327 				.cra_driver_name = "echainiv-authenc-"
2328 						   "hmac-sha384-"
2329 						   "cbc-des3_ede-caam-qi2",
2330 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2331 			},
2332 			.setkey = des3_aead_setkey,
2333 			.setauthsize = aead_setauthsize,
2334 			.encrypt = aead_encrypt,
2335 			.decrypt = aead_decrypt,
2336 			.ivsize = DES3_EDE_BLOCK_SIZE,
2337 			.maxauthsize = SHA384_DIGEST_SIZE,
2338 		},
2339 		.caam = {
2340 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2341 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2342 					   OP_ALG_AAI_HMAC_PRECOMP,
2343 			.geniv = true,
2344 		}
2345 	},
2346 	{
2347 		.aead = {
2348 			.base = {
2349 				.cra_name = "authenc(hmac(sha512),"
2350 					    "cbc(des3_ede))",
2351 				.cra_driver_name = "authenc-hmac-sha512-"
2352 						   "cbc-des3_ede-caam-qi2",
2353 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2354 			},
2355 			.setkey = des3_aead_setkey,
2356 			.setauthsize = aead_setauthsize,
2357 			.encrypt = aead_encrypt,
2358 			.decrypt = aead_decrypt,
2359 			.ivsize = DES3_EDE_BLOCK_SIZE,
2360 			.maxauthsize = SHA512_DIGEST_SIZE,
2361 		},
2362 		.caam = {
2363 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2364 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2365 					   OP_ALG_AAI_HMAC_PRECOMP,
2366 		},
2367 	},
2368 	{
2369 		.aead = {
2370 			.base = {
2371 				.cra_name = "echainiv(authenc(hmac(sha512),"
2372 					    "cbc(des3_ede)))",
2373 				.cra_driver_name = "echainiv-authenc-"
2374 						   "hmac-sha512-"
2375 						   "cbc-des3_ede-caam-qi2",
2376 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2377 			},
2378 			.setkey = des3_aead_setkey,
2379 			.setauthsize = aead_setauthsize,
2380 			.encrypt = aead_encrypt,
2381 			.decrypt = aead_decrypt,
2382 			.ivsize = DES3_EDE_BLOCK_SIZE,
2383 			.maxauthsize = SHA512_DIGEST_SIZE,
2384 		},
2385 		.caam = {
2386 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2387 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2388 					   OP_ALG_AAI_HMAC_PRECOMP,
2389 			.geniv = true,
2390 		}
2391 	},
2392 	{
2393 		.aead = {
2394 			.base = {
2395 				.cra_name = "authenc(hmac(md5),cbc(des))",
2396 				.cra_driver_name = "authenc-hmac-md5-"
2397 						   "cbc-des-caam-qi2",
2398 				.cra_blocksize = DES_BLOCK_SIZE,
2399 			},
2400 			.setkey = aead_setkey,
2401 			.setauthsize = aead_setauthsize,
2402 			.encrypt = aead_encrypt,
2403 			.decrypt = aead_decrypt,
2404 			.ivsize = DES_BLOCK_SIZE,
2405 			.maxauthsize = MD5_DIGEST_SIZE,
2406 		},
2407 		.caam = {
2408 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2409 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2410 					   OP_ALG_AAI_HMAC_PRECOMP,
2411 		},
2412 	},
2413 	{
2414 		.aead = {
2415 			.base = {
2416 				.cra_name = "echainiv(authenc(hmac(md5),"
2417 					    "cbc(des)))",
2418 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2419 						   "cbc-des-caam-qi2",
2420 				.cra_blocksize = DES_BLOCK_SIZE,
2421 			},
2422 			.setkey = aead_setkey,
2423 			.setauthsize = aead_setauthsize,
2424 			.encrypt = aead_encrypt,
2425 			.decrypt = aead_decrypt,
2426 			.ivsize = DES_BLOCK_SIZE,
2427 			.maxauthsize = MD5_DIGEST_SIZE,
2428 		},
2429 		.caam = {
2430 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2431 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2432 					   OP_ALG_AAI_HMAC_PRECOMP,
2433 			.geniv = true,
2434 		}
2435 	},
2436 	{
2437 		.aead = {
2438 			.base = {
2439 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2440 				.cra_driver_name = "authenc-hmac-sha1-"
2441 						   "cbc-des-caam-qi2",
2442 				.cra_blocksize = DES_BLOCK_SIZE,
2443 			},
2444 			.setkey = aead_setkey,
2445 			.setauthsize = aead_setauthsize,
2446 			.encrypt = aead_encrypt,
2447 			.decrypt = aead_decrypt,
2448 			.ivsize = DES_BLOCK_SIZE,
2449 			.maxauthsize = SHA1_DIGEST_SIZE,
2450 		},
2451 		.caam = {
2452 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2453 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2454 					   OP_ALG_AAI_HMAC_PRECOMP,
2455 		},
2456 	},
2457 	{
2458 		.aead = {
2459 			.base = {
2460 				.cra_name = "echainiv(authenc(hmac(sha1),"
2461 					    "cbc(des)))",
2462 				.cra_driver_name = "echainiv-authenc-"
2463 						   "hmac-sha1-cbc-des-caam-qi2",
2464 				.cra_blocksize = DES_BLOCK_SIZE,
2465 			},
2466 			.setkey = aead_setkey,
2467 			.setauthsize = aead_setauthsize,
2468 			.encrypt = aead_encrypt,
2469 			.decrypt = aead_decrypt,
2470 			.ivsize = DES_BLOCK_SIZE,
2471 			.maxauthsize = SHA1_DIGEST_SIZE,
2472 		},
2473 		.caam = {
2474 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2475 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2476 					   OP_ALG_AAI_HMAC_PRECOMP,
2477 			.geniv = true,
2478 		}
2479 	},
2480 	{
2481 		.aead = {
2482 			.base = {
2483 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2484 				.cra_driver_name = "authenc-hmac-sha224-"
2485 						   "cbc-des-caam-qi2",
2486 				.cra_blocksize = DES_BLOCK_SIZE,
2487 			},
2488 			.setkey = aead_setkey,
2489 			.setauthsize = aead_setauthsize,
2490 			.encrypt = aead_encrypt,
2491 			.decrypt = aead_decrypt,
2492 			.ivsize = DES_BLOCK_SIZE,
2493 			.maxauthsize = SHA224_DIGEST_SIZE,
2494 		},
2495 		.caam = {
2496 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2497 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2498 					   OP_ALG_AAI_HMAC_PRECOMP,
2499 		},
2500 	},
2501 	{
2502 		.aead = {
2503 			.base = {
2504 				.cra_name = "echainiv(authenc(hmac(sha224),"
2505 					    "cbc(des)))",
2506 				.cra_driver_name = "echainiv-authenc-"
2507 						   "hmac-sha224-cbc-des-"
2508 						   "caam-qi2",
2509 				.cra_blocksize = DES_BLOCK_SIZE,
2510 			},
2511 			.setkey = aead_setkey,
2512 			.setauthsize = aead_setauthsize,
2513 			.encrypt = aead_encrypt,
2514 			.decrypt = aead_decrypt,
2515 			.ivsize = DES_BLOCK_SIZE,
2516 			.maxauthsize = SHA224_DIGEST_SIZE,
2517 		},
2518 		.caam = {
2519 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2520 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2521 					   OP_ALG_AAI_HMAC_PRECOMP,
2522 			.geniv = true,
2523 		}
2524 	},
2525 	{
2526 		.aead = {
2527 			.base = {
2528 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2529 				.cra_driver_name = "authenc-hmac-sha256-"
2530 						   "cbc-des-caam-qi2",
2531 				.cra_blocksize = DES_BLOCK_SIZE,
2532 			},
2533 			.setkey = aead_setkey,
2534 			.setauthsize = aead_setauthsize,
2535 			.encrypt = aead_encrypt,
2536 			.decrypt = aead_decrypt,
2537 			.ivsize = DES_BLOCK_SIZE,
2538 			.maxauthsize = SHA256_DIGEST_SIZE,
2539 		},
2540 		.caam = {
2541 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2542 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2543 					   OP_ALG_AAI_HMAC_PRECOMP,
2544 		},
2545 	},
2546 	{
2547 		.aead = {
2548 			.base = {
2549 				.cra_name = "echainiv(authenc(hmac(sha256),"
2550 					    "cbc(des)))",
2551 				.cra_driver_name = "echainiv-authenc-"
2552 						   "hmac-sha256-cbc-des-"
2553 						   "caam-qi2",
2554 				.cra_blocksize = DES_BLOCK_SIZE,
2555 			},
2556 			.setkey = aead_setkey,
2557 			.setauthsize = aead_setauthsize,
2558 			.encrypt = aead_encrypt,
2559 			.decrypt = aead_decrypt,
2560 			.ivsize = DES_BLOCK_SIZE,
2561 			.maxauthsize = SHA256_DIGEST_SIZE,
2562 		},
2563 		.caam = {
2564 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2565 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2566 					   OP_ALG_AAI_HMAC_PRECOMP,
2567 			.geniv = true,
2568 		},
2569 	},
2570 	{
2571 		.aead = {
2572 			.base = {
2573 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2574 				.cra_driver_name = "authenc-hmac-sha384-"
2575 						   "cbc-des-caam-qi2",
2576 				.cra_blocksize = DES_BLOCK_SIZE,
2577 			},
2578 			.setkey = aead_setkey,
2579 			.setauthsize = aead_setauthsize,
2580 			.encrypt = aead_encrypt,
2581 			.decrypt = aead_decrypt,
2582 			.ivsize = DES_BLOCK_SIZE,
2583 			.maxauthsize = SHA384_DIGEST_SIZE,
2584 		},
2585 		.caam = {
2586 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2587 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2588 					   OP_ALG_AAI_HMAC_PRECOMP,
2589 		},
2590 	},
2591 	{
2592 		.aead = {
2593 			.base = {
2594 				.cra_name = "echainiv(authenc(hmac(sha384),"
2595 					    "cbc(des)))",
2596 				.cra_driver_name = "echainiv-authenc-"
2597 						   "hmac-sha384-cbc-des-"
2598 						   "caam-qi2",
2599 				.cra_blocksize = DES_BLOCK_SIZE,
2600 			},
2601 			.setkey = aead_setkey,
2602 			.setauthsize = aead_setauthsize,
2603 			.encrypt = aead_encrypt,
2604 			.decrypt = aead_decrypt,
2605 			.ivsize = DES_BLOCK_SIZE,
2606 			.maxauthsize = SHA384_DIGEST_SIZE,
2607 		},
2608 		.caam = {
2609 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2610 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2611 					   OP_ALG_AAI_HMAC_PRECOMP,
2612 			.geniv = true,
2613 		}
2614 	},
2615 	{
2616 		.aead = {
2617 			.base = {
2618 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2619 				.cra_driver_name = "authenc-hmac-sha512-"
2620 						   "cbc-des-caam-qi2",
2621 				.cra_blocksize = DES_BLOCK_SIZE,
2622 			},
2623 			.setkey = aead_setkey,
2624 			.setauthsize = aead_setauthsize,
2625 			.encrypt = aead_encrypt,
2626 			.decrypt = aead_decrypt,
2627 			.ivsize = DES_BLOCK_SIZE,
2628 			.maxauthsize = SHA512_DIGEST_SIZE,
2629 		},
2630 		.caam = {
2631 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2632 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2633 					   OP_ALG_AAI_HMAC_PRECOMP,
2634 		}
2635 	},
2636 	{
2637 		.aead = {
2638 			.base = {
2639 				.cra_name = "echainiv(authenc(hmac(sha512),"
2640 					    "cbc(des)))",
2641 				.cra_driver_name = "echainiv-authenc-"
2642 						   "hmac-sha512-cbc-des-"
2643 						   "caam-qi2",
2644 				.cra_blocksize = DES_BLOCK_SIZE,
2645 			},
2646 			.setkey = aead_setkey,
2647 			.setauthsize = aead_setauthsize,
2648 			.encrypt = aead_encrypt,
2649 			.decrypt = aead_decrypt,
2650 			.ivsize = DES_BLOCK_SIZE,
2651 			.maxauthsize = SHA512_DIGEST_SIZE,
2652 		},
2653 		.caam = {
2654 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2655 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2656 					   OP_ALG_AAI_HMAC_PRECOMP,
2657 			.geniv = true,
2658 		}
2659 	},
2660 	{
2661 		.aead = {
2662 			.base = {
2663 				.cra_name = "authenc(hmac(md5),"
2664 					    "rfc3686(ctr(aes)))",
2665 				.cra_driver_name = "authenc-hmac-md5-"
2666 						   "rfc3686-ctr-aes-caam-qi2",
2667 				.cra_blocksize = 1,
2668 			},
2669 			.setkey = aead_setkey,
2670 			.setauthsize = aead_setauthsize,
2671 			.encrypt = aead_encrypt,
2672 			.decrypt = aead_decrypt,
2673 			.ivsize = CTR_RFC3686_IV_SIZE,
2674 			.maxauthsize = MD5_DIGEST_SIZE,
2675 		},
2676 		.caam = {
2677 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2678 					   OP_ALG_AAI_CTR_MOD128,
2679 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2680 					   OP_ALG_AAI_HMAC_PRECOMP,
2681 			.rfc3686 = true,
2682 		},
2683 	},
2684 	{
2685 		.aead = {
2686 			.base = {
2687 				.cra_name = "seqiv(authenc("
2688 					    "hmac(md5),rfc3686(ctr(aes))))",
2689 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2690 						   "rfc3686-ctr-aes-caam-qi2",
2691 				.cra_blocksize = 1,
2692 			},
2693 			.setkey = aead_setkey,
2694 			.setauthsize = aead_setauthsize,
2695 			.encrypt = aead_encrypt,
2696 			.decrypt = aead_decrypt,
2697 			.ivsize = CTR_RFC3686_IV_SIZE,
2698 			.maxauthsize = MD5_DIGEST_SIZE,
2699 		},
2700 		.caam = {
2701 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2702 					   OP_ALG_AAI_CTR_MOD128,
2703 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2704 					   OP_ALG_AAI_HMAC_PRECOMP,
2705 			.rfc3686 = true,
2706 			.geniv = true,
2707 		},
2708 	},
2709 	{
2710 		.aead = {
2711 			.base = {
2712 				.cra_name = "authenc(hmac(sha1),"
2713 					    "rfc3686(ctr(aes)))",
2714 				.cra_driver_name = "authenc-hmac-sha1-"
2715 						   "rfc3686-ctr-aes-caam-qi2",
2716 				.cra_blocksize = 1,
2717 			},
2718 			.setkey = aead_setkey,
2719 			.setauthsize = aead_setauthsize,
2720 			.encrypt = aead_encrypt,
2721 			.decrypt = aead_decrypt,
2722 			.ivsize = CTR_RFC3686_IV_SIZE,
2723 			.maxauthsize = SHA1_DIGEST_SIZE,
2724 		},
2725 		.caam = {
2726 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2727 					   OP_ALG_AAI_CTR_MOD128,
2728 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2729 					   OP_ALG_AAI_HMAC_PRECOMP,
2730 			.rfc3686 = true,
2731 		},
2732 	},
2733 	{
2734 		.aead = {
2735 			.base = {
2736 				.cra_name = "seqiv(authenc("
2737 					    "hmac(sha1),rfc3686(ctr(aes))))",
2738 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2739 						   "rfc3686-ctr-aes-caam-qi2",
2740 				.cra_blocksize = 1,
2741 			},
2742 			.setkey = aead_setkey,
2743 			.setauthsize = aead_setauthsize,
2744 			.encrypt = aead_encrypt,
2745 			.decrypt = aead_decrypt,
2746 			.ivsize = CTR_RFC3686_IV_SIZE,
2747 			.maxauthsize = SHA1_DIGEST_SIZE,
2748 		},
2749 		.caam = {
2750 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2751 					   OP_ALG_AAI_CTR_MOD128,
2752 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2753 					   OP_ALG_AAI_HMAC_PRECOMP,
2754 			.rfc3686 = true,
2755 			.geniv = true,
2756 		},
2757 	},
2758 	{
2759 		.aead = {
2760 			.base = {
2761 				.cra_name = "authenc(hmac(sha224),"
2762 					    "rfc3686(ctr(aes)))",
2763 				.cra_driver_name = "authenc-hmac-sha224-"
2764 						   "rfc3686-ctr-aes-caam-qi2",
2765 				.cra_blocksize = 1,
2766 			},
2767 			.setkey = aead_setkey,
2768 			.setauthsize = aead_setauthsize,
2769 			.encrypt = aead_encrypt,
2770 			.decrypt = aead_decrypt,
2771 			.ivsize = CTR_RFC3686_IV_SIZE,
2772 			.maxauthsize = SHA224_DIGEST_SIZE,
2773 		},
2774 		.caam = {
2775 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2776 					   OP_ALG_AAI_CTR_MOD128,
2777 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2778 					   OP_ALG_AAI_HMAC_PRECOMP,
2779 			.rfc3686 = true,
2780 		},
2781 	},
2782 	{
2783 		.aead = {
2784 			.base = {
2785 				.cra_name = "seqiv(authenc("
2786 					    "hmac(sha224),rfc3686(ctr(aes))))",
2787 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2788 						   "rfc3686-ctr-aes-caam-qi2",
2789 				.cra_blocksize = 1,
2790 			},
2791 			.setkey = aead_setkey,
2792 			.setauthsize = aead_setauthsize,
2793 			.encrypt = aead_encrypt,
2794 			.decrypt = aead_decrypt,
2795 			.ivsize = CTR_RFC3686_IV_SIZE,
2796 			.maxauthsize = SHA224_DIGEST_SIZE,
2797 		},
2798 		.caam = {
2799 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2800 					   OP_ALG_AAI_CTR_MOD128,
2801 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2802 					   OP_ALG_AAI_HMAC_PRECOMP,
2803 			.rfc3686 = true,
2804 			.geniv = true,
2805 		},
2806 	},
2807 	{
2808 		.aead = {
2809 			.base = {
2810 				.cra_name = "authenc(hmac(sha256),"
2811 					    "rfc3686(ctr(aes)))",
2812 				.cra_driver_name = "authenc-hmac-sha256-"
2813 						   "rfc3686-ctr-aes-caam-qi2",
2814 				.cra_blocksize = 1,
2815 			},
2816 			.setkey = aead_setkey,
2817 			.setauthsize = aead_setauthsize,
2818 			.encrypt = aead_encrypt,
2819 			.decrypt = aead_decrypt,
2820 			.ivsize = CTR_RFC3686_IV_SIZE,
2821 			.maxauthsize = SHA256_DIGEST_SIZE,
2822 		},
2823 		.caam = {
2824 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2825 					   OP_ALG_AAI_CTR_MOD128,
2826 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2827 					   OP_ALG_AAI_HMAC_PRECOMP,
2828 			.rfc3686 = true,
2829 		},
2830 	},
2831 	{
2832 		.aead = {
2833 			.base = {
2834 				.cra_name = "seqiv(authenc(hmac(sha256),"
2835 					    "rfc3686(ctr(aes))))",
2836 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2837 						   "rfc3686-ctr-aes-caam-qi2",
2838 				.cra_blocksize = 1,
2839 			},
2840 			.setkey = aead_setkey,
2841 			.setauthsize = aead_setauthsize,
2842 			.encrypt = aead_encrypt,
2843 			.decrypt = aead_decrypt,
2844 			.ivsize = CTR_RFC3686_IV_SIZE,
2845 			.maxauthsize = SHA256_DIGEST_SIZE,
2846 		},
2847 		.caam = {
2848 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2849 					   OP_ALG_AAI_CTR_MOD128,
2850 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2851 					   OP_ALG_AAI_HMAC_PRECOMP,
2852 			.rfc3686 = true,
2853 			.geniv = true,
2854 		},
2855 	},
2856 	{
2857 		.aead = {
2858 			.base = {
2859 				.cra_name = "authenc(hmac(sha384),"
2860 					    "rfc3686(ctr(aes)))",
2861 				.cra_driver_name = "authenc-hmac-sha384-"
2862 						   "rfc3686-ctr-aes-caam-qi2",
2863 				.cra_blocksize = 1,
2864 			},
2865 			.setkey = aead_setkey,
2866 			.setauthsize = aead_setauthsize,
2867 			.encrypt = aead_encrypt,
2868 			.decrypt = aead_decrypt,
2869 			.ivsize = CTR_RFC3686_IV_SIZE,
2870 			.maxauthsize = SHA384_DIGEST_SIZE,
2871 		},
2872 		.caam = {
2873 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2874 					   OP_ALG_AAI_CTR_MOD128,
2875 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2876 					   OP_ALG_AAI_HMAC_PRECOMP,
2877 			.rfc3686 = true,
2878 		},
2879 	},
2880 	{
2881 		.aead = {
2882 			.base = {
2883 				.cra_name = "seqiv(authenc(hmac(sha384),"
2884 					    "rfc3686(ctr(aes))))",
2885 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2886 						   "rfc3686-ctr-aes-caam-qi2",
2887 				.cra_blocksize = 1,
2888 			},
2889 			.setkey = aead_setkey,
2890 			.setauthsize = aead_setauthsize,
2891 			.encrypt = aead_encrypt,
2892 			.decrypt = aead_decrypt,
2893 			.ivsize = CTR_RFC3686_IV_SIZE,
2894 			.maxauthsize = SHA384_DIGEST_SIZE,
2895 		},
2896 		.caam = {
2897 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2898 					   OP_ALG_AAI_CTR_MOD128,
2899 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2900 					   OP_ALG_AAI_HMAC_PRECOMP,
2901 			.rfc3686 = true,
2902 			.geniv = true,
2903 		},
2904 	},
2905 	{
2906 		.aead = {
2907 			.base = {
2908 				.cra_name = "rfc7539(chacha20,poly1305)",
2909 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2910 						   "caam-qi2",
2911 				.cra_blocksize = 1,
2912 			},
2913 			.setkey = chachapoly_setkey,
2914 			.setauthsize = chachapoly_setauthsize,
2915 			.encrypt = aead_encrypt,
2916 			.decrypt = aead_decrypt,
2917 			.ivsize = CHACHAPOLY_IV_SIZE,
2918 			.maxauthsize = POLY1305_DIGEST_SIZE,
2919 		},
2920 		.caam = {
2921 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2922 					   OP_ALG_AAI_AEAD,
2923 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2924 					   OP_ALG_AAI_AEAD,
2925 			.nodkp = true,
2926 		},
2927 	},
2928 	{
2929 		.aead = {
2930 			.base = {
2931 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2932 				.cra_driver_name = "rfc7539esp-chacha20-"
2933 						   "poly1305-caam-qi2",
2934 				.cra_blocksize = 1,
2935 			},
2936 			.setkey = chachapoly_setkey,
2937 			.setauthsize = chachapoly_setauthsize,
2938 			.encrypt = aead_encrypt,
2939 			.decrypt = aead_decrypt,
2940 			.ivsize = 8,
2941 			.maxauthsize = POLY1305_DIGEST_SIZE,
2942 		},
2943 		.caam = {
2944 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2945 					   OP_ALG_AAI_AEAD,
2946 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2947 					   OP_ALG_AAI_AEAD,
2948 			.nodkp = true,
2949 		},
2950 	},
2951 	{
2952 		.aead = {
2953 			.base = {
2954 				.cra_name = "authenc(hmac(sha512),"
2955 					    "rfc3686(ctr(aes)))",
2956 				.cra_driver_name = "authenc-hmac-sha512-"
2957 						   "rfc3686-ctr-aes-caam-qi2",
2958 				.cra_blocksize = 1,
2959 			},
2960 			.setkey = aead_setkey,
2961 			.setauthsize = aead_setauthsize,
2962 			.encrypt = aead_encrypt,
2963 			.decrypt = aead_decrypt,
2964 			.ivsize = CTR_RFC3686_IV_SIZE,
2965 			.maxauthsize = SHA512_DIGEST_SIZE,
2966 		},
2967 		.caam = {
2968 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2969 					   OP_ALG_AAI_CTR_MOD128,
2970 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2971 					   OP_ALG_AAI_HMAC_PRECOMP,
2972 			.rfc3686 = true,
2973 		},
2974 	},
2975 	{
2976 		.aead = {
2977 			.base = {
2978 				.cra_name = "seqiv(authenc(hmac(sha512),"
2979 					    "rfc3686(ctr(aes))))",
2980 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2981 						   "rfc3686-ctr-aes-caam-qi2",
2982 				.cra_blocksize = 1,
2983 			},
2984 			.setkey = aead_setkey,
2985 			.setauthsize = aead_setauthsize,
2986 			.encrypt = aead_encrypt,
2987 			.decrypt = aead_decrypt,
2988 			.ivsize = CTR_RFC3686_IV_SIZE,
2989 			.maxauthsize = SHA512_DIGEST_SIZE,
2990 		},
2991 		.caam = {
2992 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2993 					   OP_ALG_AAI_CTR_MOD128,
2994 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2995 					   OP_ALG_AAI_HMAC_PRECOMP,
2996 			.rfc3686 = true,
2997 			.geniv = true,
2998 		},
2999 	},
3000 };
3001 
3002 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3003 {
3004 	struct skcipher_alg *alg = &t_alg->skcipher;
3005 
3006 	alg->base.cra_module = THIS_MODULE;
3007 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3008 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3009 	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3010 			      CRYPTO_ALG_KERN_DRIVER_ONLY);
3011 
3012 	alg->init = caam_cra_init_skcipher;
3013 	alg->exit = caam_cra_exit;
3014 }
3015 
3016 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3017 {
3018 	struct aead_alg *alg = &t_alg->aead;
3019 
3020 	alg->base.cra_module = THIS_MODULE;
3021 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3022 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3023 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3024 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
3025 
3026 	alg->init = caam_cra_init_aead;
3027 	alg->exit = caam_cra_exit_aead;
3028 }
3029 
3030 /* max hash key is max split key size */
3031 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
3032 
3033 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
3034 
3035 /* caam context sizes for hashes: running digest + 8 */
3036 #define HASH_MSG_LEN			8
3037 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3038 
3039 enum hash_optype {
3040 	UPDATE = 0,
3041 	UPDATE_FIRST,
3042 	FINALIZE,
3043 	DIGEST,
3044 	HASH_NUM_OP
3045 };
3046 
3047 /**
3048  * struct caam_hash_ctx - ahash per-session context
3049  * @flc: Flow Contexts array
3050  * @key: authentication key
3051  * @flc_dma: I/O virtual addresses of the Flow Contexts
3052  * @dev: dpseci device
3053  * @ctx_len: size of Context Register
3054  * @adata: hashing algorithm details
3055  */
3056 struct caam_hash_ctx {
3057 	struct caam_flc flc[HASH_NUM_OP];
3058 	u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3059 	dma_addr_t flc_dma[HASH_NUM_OP];
3060 	struct device *dev;
3061 	int ctx_len;
3062 	struct alginfo adata;
3063 };
3064 
3065 /* ahash state */
3066 struct caam_hash_state {
3067 	struct caam_request caam_req;
3068 	dma_addr_t buf_dma;
3069 	dma_addr_t ctx_dma;
3070 	int ctx_dma_len;
3071 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3072 	int buflen;
3073 	int next_buflen;
3074 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3075 	int (*update)(struct ahash_request *req);
3076 	int (*final)(struct ahash_request *req);
3077 	int (*finup)(struct ahash_request *req);
3078 };
3079 
3080 struct caam_export_state {
3081 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3082 	u8 caam_ctx[MAX_CTX_LEN];
3083 	int buflen;
3084 	int (*update)(struct ahash_request *req);
3085 	int (*final)(struct ahash_request *req);
3086 	int (*finup)(struct ahash_request *req);
3087 };
3088 
3089 /* Map current buffer in state (if length > 0) and put it in link table */
3090 static inline int buf_map_to_qm_sg(struct device *dev,
3091 				   struct dpaa2_sg_entry *qm_sg,
3092 				   struct caam_hash_state *state)
3093 {
3094 	int buflen = state->buflen;
3095 
3096 	if (!buflen)
3097 		return 0;
3098 
3099 	state->buf_dma = dma_map_single(dev, state->buf, buflen,
3100 					DMA_TO_DEVICE);
3101 	if (dma_mapping_error(dev, state->buf_dma)) {
3102 		dev_err(dev, "unable to map buf\n");
3103 		state->buf_dma = 0;
3104 		return -ENOMEM;
3105 	}
3106 
3107 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3108 
3109 	return 0;
3110 }
3111 
3112 /* Map state->caam_ctx, and add it to link table */
3113 static inline int ctx_map_to_qm_sg(struct device *dev,
3114 				   struct caam_hash_state *state, int ctx_len,
3115 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3116 {
3117 	state->ctx_dma_len = ctx_len;
3118 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3119 	if (dma_mapping_error(dev, state->ctx_dma)) {
3120 		dev_err(dev, "unable to map ctx\n");
3121 		state->ctx_dma = 0;
3122 		return -ENOMEM;
3123 	}
3124 
3125 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3126 
3127 	return 0;
3128 }
3129 
3130 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3131 {
3132 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3133 	int digestsize = crypto_ahash_digestsize(ahash);
3134 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3135 	struct caam_flc *flc;
3136 	u32 *desc;
3137 
3138 	/* ahash_update shared descriptor */
3139 	flc = &ctx->flc[UPDATE];
3140 	desc = flc->sh_desc;
3141 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3142 			  ctx->ctx_len, true, priv->sec_attr.era);
3143 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3144 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3145 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3146 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3147 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3148 			     1);
3149 
3150 	/* ahash_update_first shared descriptor */
3151 	flc = &ctx->flc[UPDATE_FIRST];
3152 	desc = flc->sh_desc;
3153 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3154 			  ctx->ctx_len, false, priv->sec_attr.era);
3155 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3156 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3157 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3158 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3159 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3160 			     1);
3161 
3162 	/* ahash_final shared descriptor */
3163 	flc = &ctx->flc[FINALIZE];
3164 	desc = flc->sh_desc;
3165 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3166 			  ctx->ctx_len, true, priv->sec_attr.era);
3167 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3168 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3169 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3170 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3171 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3172 			     1);
3173 
3174 	/* ahash_digest shared descriptor */
3175 	flc = &ctx->flc[DIGEST];
3176 	desc = flc->sh_desc;
3177 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3178 			  ctx->ctx_len, false, priv->sec_attr.era);
3179 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3180 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3181 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3182 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3183 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3184 			     1);
3185 
3186 	return 0;
3187 }
3188 
3189 struct split_key_sh_result {
3190 	struct completion completion;
3191 	int err;
3192 	struct device *dev;
3193 };
3194 
3195 static void split_key_sh_done(void *cbk_ctx, u32 err)
3196 {
3197 	struct split_key_sh_result *res = cbk_ctx;
3198 
3199 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3200 
3201 	res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3202 	complete(&res->completion);
3203 }
3204 
3205 /* Digest hash size if it is too large */
3206 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3207 			   u32 digestsize)
3208 {
3209 	struct caam_request *req_ctx;
3210 	u32 *desc;
3211 	struct split_key_sh_result result;
3212 	dma_addr_t key_dma;
3213 	struct caam_flc *flc;
3214 	dma_addr_t flc_dma;
3215 	int ret = -ENOMEM;
3216 	struct dpaa2_fl_entry *in_fle, *out_fle;
3217 
3218 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3219 	if (!req_ctx)
3220 		return -ENOMEM;
3221 
3222 	in_fle = &req_ctx->fd_flt[1];
3223 	out_fle = &req_ctx->fd_flt[0];
3224 
3225 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3226 	if (!flc)
3227 		goto err_flc;
3228 
3229 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3230 	if (dma_mapping_error(ctx->dev, key_dma)) {
3231 		dev_err(ctx->dev, "unable to map key memory\n");
3232 		goto err_key_dma;
3233 	}
3234 
3235 	desc = flc->sh_desc;
3236 
3237 	init_sh_desc(desc, 0);
3238 
3239 	/* descriptor to perform unkeyed hash on key_in */
3240 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3241 			 OP_ALG_AS_INITFINAL);
3242 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3243 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3244 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3245 			 LDST_SRCDST_BYTE_CONTEXT);
3246 
3247 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3248 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3249 				 desc_bytes(desc), DMA_TO_DEVICE);
3250 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3251 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3252 		goto err_flc_dma;
3253 	}
3254 
3255 	dpaa2_fl_set_final(in_fle, true);
3256 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3257 	dpaa2_fl_set_addr(in_fle, key_dma);
3258 	dpaa2_fl_set_len(in_fle, *keylen);
3259 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3260 	dpaa2_fl_set_addr(out_fle, key_dma);
3261 	dpaa2_fl_set_len(out_fle, digestsize);
3262 
3263 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3264 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3265 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3266 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3267 			     1);
3268 
3269 	result.err = 0;
3270 	init_completion(&result.completion);
3271 	result.dev = ctx->dev;
3272 
3273 	req_ctx->flc = flc;
3274 	req_ctx->flc_dma = flc_dma;
3275 	req_ctx->cbk = split_key_sh_done;
3276 	req_ctx->ctx = &result;
3277 
3278 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3279 	if (ret == -EINPROGRESS) {
3280 		/* in progress */
3281 		wait_for_completion(&result.completion);
3282 		ret = result.err;
3283 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3284 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3285 				     digestsize, 1);
3286 	}
3287 
3288 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3289 			 DMA_TO_DEVICE);
3290 err_flc_dma:
3291 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3292 err_key_dma:
3293 	kfree(flc);
3294 err_flc:
3295 	kfree(req_ctx);
3296 
3297 	*keylen = digestsize;
3298 
3299 	return ret;
3300 }
3301 
3302 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3303 			unsigned int keylen)
3304 {
3305 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3306 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3307 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3308 	int ret;
3309 	u8 *hashed_key = NULL;
3310 
3311 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3312 
3313 	if (keylen > blocksize) {
3314 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3315 		if (!hashed_key)
3316 			return -ENOMEM;
3317 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3318 		if (ret)
3319 			goto bad_free_key;
3320 		key = hashed_key;
3321 	}
3322 
3323 	ctx->adata.keylen = keylen;
3324 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3325 					      OP_ALG_ALGSEL_MASK);
3326 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3327 		goto bad_free_key;
3328 
3329 	ctx->adata.key_virt = key;
3330 	ctx->adata.key_inline = true;
3331 
3332 	/*
3333 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3334 	 * in invalid opcodes (last bytes of user key) in the resulting
3335 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3336 	 * addresses are needed.
3337 	 */
3338 	if (keylen > ctx->adata.keylen_pad) {
3339 		memcpy(ctx->key, key, keylen);
3340 		dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3341 					   ctx->adata.keylen_pad,
3342 					   DMA_TO_DEVICE);
3343 	}
3344 
3345 	ret = ahash_set_sh_desc(ahash);
3346 	kfree(hashed_key);
3347 	return ret;
3348 bad_free_key:
3349 	kfree(hashed_key);
3350 	return -EINVAL;
3351 }
3352 
3353 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3354 			       struct ahash_request *req)
3355 {
3356 	struct caam_hash_state *state = ahash_request_ctx(req);
3357 
3358 	if (edesc->src_nents)
3359 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3360 
3361 	if (edesc->qm_sg_bytes)
3362 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3363 				 DMA_TO_DEVICE);
3364 
3365 	if (state->buf_dma) {
3366 		dma_unmap_single(dev, state->buf_dma, state->buflen,
3367 				 DMA_TO_DEVICE);
3368 		state->buf_dma = 0;
3369 	}
3370 }
3371 
3372 static inline void ahash_unmap_ctx(struct device *dev,
3373 				   struct ahash_edesc *edesc,
3374 				   struct ahash_request *req, u32 flag)
3375 {
3376 	struct caam_hash_state *state = ahash_request_ctx(req);
3377 
3378 	if (state->ctx_dma) {
3379 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3380 		state->ctx_dma = 0;
3381 	}
3382 	ahash_unmap(dev, edesc, req);
3383 }
3384 
3385 static void ahash_done(void *cbk_ctx, u32 status)
3386 {
3387 	struct crypto_async_request *areq = cbk_ctx;
3388 	struct ahash_request *req = ahash_request_cast(areq);
3389 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3390 	struct caam_hash_state *state = ahash_request_ctx(req);
3391 	struct ahash_edesc *edesc = state->caam_req.edesc;
3392 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3393 	int digestsize = crypto_ahash_digestsize(ahash);
3394 	int ecode = 0;
3395 
3396 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3397 
3398 	if (unlikely(status))
3399 		ecode = caam_qi2_strstatus(ctx->dev, status);
3400 
3401 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3402 	memcpy(req->result, state->caam_ctx, digestsize);
3403 	qi_cache_free(edesc);
3404 
3405 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3406 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3407 			     ctx->ctx_len, 1);
3408 
3409 	req->base.complete(&req->base, ecode);
3410 }
3411 
3412 static void ahash_done_bi(void *cbk_ctx, u32 status)
3413 {
3414 	struct crypto_async_request *areq = cbk_ctx;
3415 	struct ahash_request *req = ahash_request_cast(areq);
3416 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3417 	struct caam_hash_state *state = ahash_request_ctx(req);
3418 	struct ahash_edesc *edesc = state->caam_req.edesc;
3419 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3420 	int ecode = 0;
3421 
3422 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3423 
3424 	if (unlikely(status))
3425 		ecode = caam_qi2_strstatus(ctx->dev, status);
3426 
3427 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3428 	qi_cache_free(edesc);
3429 
3430 	scatterwalk_map_and_copy(state->buf, req->src,
3431 				 req->nbytes - state->next_buflen,
3432 				 state->next_buflen, 0);
3433 	state->buflen = state->next_buflen;
3434 
3435 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3436 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3437 			     state->buflen, 1);
3438 
3439 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3440 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3441 			     ctx->ctx_len, 1);
3442 	if (req->result)
3443 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3444 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3445 				     crypto_ahash_digestsize(ahash), 1);
3446 
3447 	req->base.complete(&req->base, ecode);
3448 }
3449 
3450 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3451 {
3452 	struct crypto_async_request *areq = cbk_ctx;
3453 	struct ahash_request *req = ahash_request_cast(areq);
3454 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3455 	struct caam_hash_state *state = ahash_request_ctx(req);
3456 	struct ahash_edesc *edesc = state->caam_req.edesc;
3457 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3458 	int digestsize = crypto_ahash_digestsize(ahash);
3459 	int ecode = 0;
3460 
3461 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3462 
3463 	if (unlikely(status))
3464 		ecode = caam_qi2_strstatus(ctx->dev, status);
3465 
3466 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3467 	memcpy(req->result, state->caam_ctx, digestsize);
3468 	qi_cache_free(edesc);
3469 
3470 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3471 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3472 			     ctx->ctx_len, 1);
3473 
3474 	req->base.complete(&req->base, ecode);
3475 }
3476 
3477 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3478 {
3479 	struct crypto_async_request *areq = cbk_ctx;
3480 	struct ahash_request *req = ahash_request_cast(areq);
3481 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3482 	struct caam_hash_state *state = ahash_request_ctx(req);
3483 	struct ahash_edesc *edesc = state->caam_req.edesc;
3484 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3485 	int ecode = 0;
3486 
3487 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3488 
3489 	if (unlikely(status))
3490 		ecode = caam_qi2_strstatus(ctx->dev, status);
3491 
3492 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3493 	qi_cache_free(edesc);
3494 
3495 	scatterwalk_map_and_copy(state->buf, req->src,
3496 				 req->nbytes - state->next_buflen,
3497 				 state->next_buflen, 0);
3498 	state->buflen = state->next_buflen;
3499 
3500 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3501 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3502 			     state->buflen, 1);
3503 
3504 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3505 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3506 			     ctx->ctx_len, 1);
3507 	if (req->result)
3508 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3509 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3510 				     crypto_ahash_digestsize(ahash), 1);
3511 
3512 	req->base.complete(&req->base, ecode);
3513 }
3514 
3515 static int ahash_update_ctx(struct ahash_request *req)
3516 {
3517 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3518 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3519 	struct caam_hash_state *state = ahash_request_ctx(req);
3520 	struct caam_request *req_ctx = &state->caam_req;
3521 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3522 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3523 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3524 		      GFP_KERNEL : GFP_ATOMIC;
3525 	u8 *buf = state->buf;
3526 	int *buflen = &state->buflen;
3527 	int *next_buflen = &state->next_buflen;
3528 	int in_len = *buflen + req->nbytes, to_hash;
3529 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3530 	struct ahash_edesc *edesc;
3531 	int ret = 0;
3532 
3533 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3534 	to_hash = in_len - *next_buflen;
3535 
3536 	if (to_hash) {
3537 		struct dpaa2_sg_entry *sg_table;
3538 		int src_len = req->nbytes - *next_buflen;
3539 
3540 		src_nents = sg_nents_for_len(req->src, src_len);
3541 		if (src_nents < 0) {
3542 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3543 			return src_nents;
3544 		}
3545 
3546 		if (src_nents) {
3547 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3548 						  DMA_TO_DEVICE);
3549 			if (!mapped_nents) {
3550 				dev_err(ctx->dev, "unable to DMA map source\n");
3551 				return -ENOMEM;
3552 			}
3553 		} else {
3554 			mapped_nents = 0;
3555 		}
3556 
3557 		/* allocate space for base edesc and link tables */
3558 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3559 		if (!edesc) {
3560 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3561 				     DMA_TO_DEVICE);
3562 			return -ENOMEM;
3563 		}
3564 
3565 		edesc->src_nents = src_nents;
3566 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3567 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3568 			      sizeof(*sg_table);
3569 		sg_table = &edesc->sgt[0];
3570 
3571 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3572 				       DMA_BIDIRECTIONAL);
3573 		if (ret)
3574 			goto unmap_ctx;
3575 
3576 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3577 		if (ret)
3578 			goto unmap_ctx;
3579 
3580 		if (mapped_nents) {
3581 			sg_to_qm_sg_last(req->src, src_len,
3582 					 sg_table + qm_sg_src_index, 0);
3583 		} else {
3584 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3585 					   true);
3586 		}
3587 
3588 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3589 						  qm_sg_bytes, DMA_TO_DEVICE);
3590 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3591 			dev_err(ctx->dev, "unable to map S/G table\n");
3592 			ret = -ENOMEM;
3593 			goto unmap_ctx;
3594 		}
3595 		edesc->qm_sg_bytes = qm_sg_bytes;
3596 
3597 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3598 		dpaa2_fl_set_final(in_fle, true);
3599 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3600 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3601 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3602 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3603 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3604 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3605 
3606 		req_ctx->flc = &ctx->flc[UPDATE];
3607 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3608 		req_ctx->cbk = ahash_done_bi;
3609 		req_ctx->ctx = &req->base;
3610 		req_ctx->edesc = edesc;
3611 
3612 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3613 		if (ret != -EINPROGRESS &&
3614 		    !(ret == -EBUSY &&
3615 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3616 			goto unmap_ctx;
3617 	} else if (*next_buflen) {
3618 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3619 					 req->nbytes, 0);
3620 		*buflen = *next_buflen;
3621 
3622 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3623 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3624 				     *buflen, 1);
3625 	}
3626 
3627 	return ret;
3628 unmap_ctx:
3629 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3630 	qi_cache_free(edesc);
3631 	return ret;
3632 }
3633 
3634 static int ahash_final_ctx(struct ahash_request *req)
3635 {
3636 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3637 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3638 	struct caam_hash_state *state = ahash_request_ctx(req);
3639 	struct caam_request *req_ctx = &state->caam_req;
3640 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3641 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3642 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3643 		      GFP_KERNEL : GFP_ATOMIC;
3644 	int buflen = state->buflen;
3645 	int qm_sg_bytes;
3646 	int digestsize = crypto_ahash_digestsize(ahash);
3647 	struct ahash_edesc *edesc;
3648 	struct dpaa2_sg_entry *sg_table;
3649 	int ret;
3650 
3651 	/* allocate space for base edesc and link tables */
3652 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3653 	if (!edesc)
3654 		return -ENOMEM;
3655 
3656 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3657 	sg_table = &edesc->sgt[0];
3658 
3659 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3660 			       DMA_BIDIRECTIONAL);
3661 	if (ret)
3662 		goto unmap_ctx;
3663 
3664 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3665 	if (ret)
3666 		goto unmap_ctx;
3667 
3668 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3669 
3670 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3671 					  DMA_TO_DEVICE);
3672 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3673 		dev_err(ctx->dev, "unable to map S/G table\n");
3674 		ret = -ENOMEM;
3675 		goto unmap_ctx;
3676 	}
3677 	edesc->qm_sg_bytes = qm_sg_bytes;
3678 
3679 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3680 	dpaa2_fl_set_final(in_fle, true);
3681 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3682 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3683 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3684 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3685 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3686 	dpaa2_fl_set_len(out_fle, digestsize);
3687 
3688 	req_ctx->flc = &ctx->flc[FINALIZE];
3689 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3690 	req_ctx->cbk = ahash_done_ctx_src;
3691 	req_ctx->ctx = &req->base;
3692 	req_ctx->edesc = edesc;
3693 
3694 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3695 	if (ret == -EINPROGRESS ||
3696 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3697 		return ret;
3698 
3699 unmap_ctx:
3700 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3701 	qi_cache_free(edesc);
3702 	return ret;
3703 }
3704 
3705 static int ahash_finup_ctx(struct ahash_request *req)
3706 {
3707 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3708 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3709 	struct caam_hash_state *state = ahash_request_ctx(req);
3710 	struct caam_request *req_ctx = &state->caam_req;
3711 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3712 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3713 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3714 		      GFP_KERNEL : GFP_ATOMIC;
3715 	int buflen = state->buflen;
3716 	int qm_sg_bytes, qm_sg_src_index;
3717 	int src_nents, mapped_nents;
3718 	int digestsize = crypto_ahash_digestsize(ahash);
3719 	struct ahash_edesc *edesc;
3720 	struct dpaa2_sg_entry *sg_table;
3721 	int ret;
3722 
3723 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3724 	if (src_nents < 0) {
3725 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3726 		return src_nents;
3727 	}
3728 
3729 	if (src_nents) {
3730 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3731 					  DMA_TO_DEVICE);
3732 		if (!mapped_nents) {
3733 			dev_err(ctx->dev, "unable to DMA map source\n");
3734 			return -ENOMEM;
3735 		}
3736 	} else {
3737 		mapped_nents = 0;
3738 	}
3739 
3740 	/* allocate space for base edesc and link tables */
3741 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3742 	if (!edesc) {
3743 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3744 		return -ENOMEM;
3745 	}
3746 
3747 	edesc->src_nents = src_nents;
3748 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3749 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3750 		      sizeof(*sg_table);
3751 	sg_table = &edesc->sgt[0];
3752 
3753 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3754 			       DMA_BIDIRECTIONAL);
3755 	if (ret)
3756 		goto unmap_ctx;
3757 
3758 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3759 	if (ret)
3760 		goto unmap_ctx;
3761 
3762 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3763 
3764 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3765 					  DMA_TO_DEVICE);
3766 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3767 		dev_err(ctx->dev, "unable to map S/G table\n");
3768 		ret = -ENOMEM;
3769 		goto unmap_ctx;
3770 	}
3771 	edesc->qm_sg_bytes = qm_sg_bytes;
3772 
3773 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3774 	dpaa2_fl_set_final(in_fle, true);
3775 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3776 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3777 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3778 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3779 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3780 	dpaa2_fl_set_len(out_fle, digestsize);
3781 
3782 	req_ctx->flc = &ctx->flc[FINALIZE];
3783 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3784 	req_ctx->cbk = ahash_done_ctx_src;
3785 	req_ctx->ctx = &req->base;
3786 	req_ctx->edesc = edesc;
3787 
3788 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3789 	if (ret == -EINPROGRESS ||
3790 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3791 		return ret;
3792 
3793 unmap_ctx:
3794 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3795 	qi_cache_free(edesc);
3796 	return ret;
3797 }
3798 
3799 static int ahash_digest(struct ahash_request *req)
3800 {
3801 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3802 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3803 	struct caam_hash_state *state = ahash_request_ctx(req);
3804 	struct caam_request *req_ctx = &state->caam_req;
3805 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3806 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3807 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3808 		      GFP_KERNEL : GFP_ATOMIC;
3809 	int digestsize = crypto_ahash_digestsize(ahash);
3810 	int src_nents, mapped_nents;
3811 	struct ahash_edesc *edesc;
3812 	int ret = -ENOMEM;
3813 
3814 	state->buf_dma = 0;
3815 
3816 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3817 	if (src_nents < 0) {
3818 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3819 		return src_nents;
3820 	}
3821 
3822 	if (src_nents) {
3823 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3824 					  DMA_TO_DEVICE);
3825 		if (!mapped_nents) {
3826 			dev_err(ctx->dev, "unable to map source for DMA\n");
3827 			return ret;
3828 		}
3829 	} else {
3830 		mapped_nents = 0;
3831 	}
3832 
3833 	/* allocate space for base edesc and link tables */
3834 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3835 	if (!edesc) {
3836 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3837 		return ret;
3838 	}
3839 
3840 	edesc->src_nents = src_nents;
3841 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3842 
3843 	if (mapped_nents > 1) {
3844 		int qm_sg_bytes;
3845 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3846 
3847 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3848 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3849 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3850 						  qm_sg_bytes, DMA_TO_DEVICE);
3851 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3852 			dev_err(ctx->dev, "unable to map S/G table\n");
3853 			goto unmap;
3854 		}
3855 		edesc->qm_sg_bytes = qm_sg_bytes;
3856 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3857 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3858 	} else {
3859 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3860 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3861 	}
3862 
3863 	state->ctx_dma_len = digestsize;
3864 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3865 					DMA_FROM_DEVICE);
3866 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3867 		dev_err(ctx->dev, "unable to map ctx\n");
3868 		state->ctx_dma = 0;
3869 		goto unmap;
3870 	}
3871 
3872 	dpaa2_fl_set_final(in_fle, true);
3873 	dpaa2_fl_set_len(in_fle, req->nbytes);
3874 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3875 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3876 	dpaa2_fl_set_len(out_fle, digestsize);
3877 
3878 	req_ctx->flc = &ctx->flc[DIGEST];
3879 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3880 	req_ctx->cbk = ahash_done;
3881 	req_ctx->ctx = &req->base;
3882 	req_ctx->edesc = edesc;
3883 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3884 	if (ret == -EINPROGRESS ||
3885 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3886 		return ret;
3887 
3888 unmap:
3889 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3890 	qi_cache_free(edesc);
3891 	return ret;
3892 }
3893 
3894 static int ahash_final_no_ctx(struct ahash_request *req)
3895 {
3896 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3897 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3898 	struct caam_hash_state *state = ahash_request_ctx(req);
3899 	struct caam_request *req_ctx = &state->caam_req;
3900 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3901 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3902 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3903 		      GFP_KERNEL : GFP_ATOMIC;
3904 	u8 *buf = state->buf;
3905 	int buflen = state->buflen;
3906 	int digestsize = crypto_ahash_digestsize(ahash);
3907 	struct ahash_edesc *edesc;
3908 	int ret = -ENOMEM;
3909 
3910 	/* allocate space for base edesc and link tables */
3911 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3912 	if (!edesc)
3913 		return ret;
3914 
3915 	if (buflen) {
3916 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3917 						DMA_TO_DEVICE);
3918 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3919 			dev_err(ctx->dev, "unable to map src\n");
3920 			goto unmap;
3921 		}
3922 	}
3923 
3924 	state->ctx_dma_len = digestsize;
3925 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3926 					DMA_FROM_DEVICE);
3927 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3928 		dev_err(ctx->dev, "unable to map ctx\n");
3929 		state->ctx_dma = 0;
3930 		goto unmap;
3931 	}
3932 
3933 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3934 	dpaa2_fl_set_final(in_fle, true);
3935 	/*
3936 	 * crypto engine requires the input entry to be present when
3937 	 * "frame list" FD is used.
3938 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3939 	 * in_fle zeroized (except for "Final" flag) is the best option.
3940 	 */
3941 	if (buflen) {
3942 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3943 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3944 		dpaa2_fl_set_len(in_fle, buflen);
3945 	}
3946 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3947 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3948 	dpaa2_fl_set_len(out_fle, digestsize);
3949 
3950 	req_ctx->flc = &ctx->flc[DIGEST];
3951 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3952 	req_ctx->cbk = ahash_done;
3953 	req_ctx->ctx = &req->base;
3954 	req_ctx->edesc = edesc;
3955 
3956 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3957 	if (ret == -EINPROGRESS ||
3958 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3959 		return ret;
3960 
3961 unmap:
3962 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3963 	qi_cache_free(edesc);
3964 	return ret;
3965 }
3966 
3967 static int ahash_update_no_ctx(struct ahash_request *req)
3968 {
3969 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3970 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3971 	struct caam_hash_state *state = ahash_request_ctx(req);
3972 	struct caam_request *req_ctx = &state->caam_req;
3973 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3974 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3975 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3976 		      GFP_KERNEL : GFP_ATOMIC;
3977 	u8 *buf = state->buf;
3978 	int *buflen = &state->buflen;
3979 	int *next_buflen = &state->next_buflen;
3980 	int in_len = *buflen + req->nbytes, to_hash;
3981 	int qm_sg_bytes, src_nents, mapped_nents;
3982 	struct ahash_edesc *edesc;
3983 	int ret = 0;
3984 
3985 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3986 	to_hash = in_len - *next_buflen;
3987 
3988 	if (to_hash) {
3989 		struct dpaa2_sg_entry *sg_table;
3990 		int src_len = req->nbytes - *next_buflen;
3991 
3992 		src_nents = sg_nents_for_len(req->src, src_len);
3993 		if (src_nents < 0) {
3994 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3995 			return src_nents;
3996 		}
3997 
3998 		if (src_nents) {
3999 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4000 						  DMA_TO_DEVICE);
4001 			if (!mapped_nents) {
4002 				dev_err(ctx->dev, "unable to DMA map source\n");
4003 				return -ENOMEM;
4004 			}
4005 		} else {
4006 			mapped_nents = 0;
4007 		}
4008 
4009 		/* allocate space for base edesc and link tables */
4010 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4011 		if (!edesc) {
4012 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4013 				     DMA_TO_DEVICE);
4014 			return -ENOMEM;
4015 		}
4016 
4017 		edesc->src_nents = src_nents;
4018 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4019 			      sizeof(*sg_table);
4020 		sg_table = &edesc->sgt[0];
4021 
4022 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4023 		if (ret)
4024 			goto unmap_ctx;
4025 
4026 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4027 
4028 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4029 						  qm_sg_bytes, DMA_TO_DEVICE);
4030 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4031 			dev_err(ctx->dev, "unable to map S/G table\n");
4032 			ret = -ENOMEM;
4033 			goto unmap_ctx;
4034 		}
4035 		edesc->qm_sg_bytes = qm_sg_bytes;
4036 
4037 		state->ctx_dma_len = ctx->ctx_len;
4038 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4039 						ctx->ctx_len, DMA_FROM_DEVICE);
4040 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4041 			dev_err(ctx->dev, "unable to map ctx\n");
4042 			state->ctx_dma = 0;
4043 			ret = -ENOMEM;
4044 			goto unmap_ctx;
4045 		}
4046 
4047 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4048 		dpaa2_fl_set_final(in_fle, true);
4049 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4050 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4051 		dpaa2_fl_set_len(in_fle, to_hash);
4052 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4053 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4054 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4055 
4056 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4057 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4058 		req_ctx->cbk = ahash_done_ctx_dst;
4059 		req_ctx->ctx = &req->base;
4060 		req_ctx->edesc = edesc;
4061 
4062 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4063 		if (ret != -EINPROGRESS &&
4064 		    !(ret == -EBUSY &&
4065 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4066 			goto unmap_ctx;
4067 
4068 		state->update = ahash_update_ctx;
4069 		state->finup = ahash_finup_ctx;
4070 		state->final = ahash_final_ctx;
4071 	} else if (*next_buflen) {
4072 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4073 					 req->nbytes, 0);
4074 		*buflen = *next_buflen;
4075 
4076 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4077 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4078 				     *buflen, 1);
4079 	}
4080 
4081 	return ret;
4082 unmap_ctx:
4083 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4084 	qi_cache_free(edesc);
4085 	return ret;
4086 }
4087 
4088 static int ahash_finup_no_ctx(struct ahash_request *req)
4089 {
4090 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4091 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4092 	struct caam_hash_state *state = ahash_request_ctx(req);
4093 	struct caam_request *req_ctx = &state->caam_req;
4094 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4095 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4096 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4097 		      GFP_KERNEL : GFP_ATOMIC;
4098 	int buflen = state->buflen;
4099 	int qm_sg_bytes, src_nents, mapped_nents;
4100 	int digestsize = crypto_ahash_digestsize(ahash);
4101 	struct ahash_edesc *edesc;
4102 	struct dpaa2_sg_entry *sg_table;
4103 	int ret = -ENOMEM;
4104 
4105 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4106 	if (src_nents < 0) {
4107 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4108 		return src_nents;
4109 	}
4110 
4111 	if (src_nents) {
4112 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4113 					  DMA_TO_DEVICE);
4114 		if (!mapped_nents) {
4115 			dev_err(ctx->dev, "unable to DMA map source\n");
4116 			return ret;
4117 		}
4118 	} else {
4119 		mapped_nents = 0;
4120 	}
4121 
4122 	/* allocate space for base edesc and link tables */
4123 	edesc = qi_cache_zalloc(GFP_DMA | flags);
4124 	if (!edesc) {
4125 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4126 		return ret;
4127 	}
4128 
4129 	edesc->src_nents = src_nents;
4130 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4131 	sg_table = &edesc->sgt[0];
4132 
4133 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4134 	if (ret)
4135 		goto unmap;
4136 
4137 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4138 
4139 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4140 					  DMA_TO_DEVICE);
4141 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4142 		dev_err(ctx->dev, "unable to map S/G table\n");
4143 		ret = -ENOMEM;
4144 		goto unmap;
4145 	}
4146 	edesc->qm_sg_bytes = qm_sg_bytes;
4147 
4148 	state->ctx_dma_len = digestsize;
4149 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4150 					DMA_FROM_DEVICE);
4151 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4152 		dev_err(ctx->dev, "unable to map ctx\n");
4153 		state->ctx_dma = 0;
4154 		ret = -ENOMEM;
4155 		goto unmap;
4156 	}
4157 
4158 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4159 	dpaa2_fl_set_final(in_fle, true);
4160 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4161 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4162 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4163 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4164 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4165 	dpaa2_fl_set_len(out_fle, digestsize);
4166 
4167 	req_ctx->flc = &ctx->flc[DIGEST];
4168 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4169 	req_ctx->cbk = ahash_done;
4170 	req_ctx->ctx = &req->base;
4171 	req_ctx->edesc = edesc;
4172 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4173 	if (ret != -EINPROGRESS &&
4174 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4175 		goto unmap;
4176 
4177 	return ret;
4178 unmap:
4179 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4180 	qi_cache_free(edesc);
4181 	return ret;
4182 }
4183 
4184 static int ahash_update_first(struct ahash_request *req)
4185 {
4186 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4187 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4188 	struct caam_hash_state *state = ahash_request_ctx(req);
4189 	struct caam_request *req_ctx = &state->caam_req;
4190 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4191 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4192 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4193 		      GFP_KERNEL : GFP_ATOMIC;
4194 	u8 *buf = state->buf;
4195 	int *buflen = &state->buflen;
4196 	int *next_buflen = &state->next_buflen;
4197 	int to_hash;
4198 	int src_nents, mapped_nents;
4199 	struct ahash_edesc *edesc;
4200 	int ret = 0;
4201 
4202 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4203 				      1);
4204 	to_hash = req->nbytes - *next_buflen;
4205 
4206 	if (to_hash) {
4207 		struct dpaa2_sg_entry *sg_table;
4208 		int src_len = req->nbytes - *next_buflen;
4209 
4210 		src_nents = sg_nents_for_len(req->src, src_len);
4211 		if (src_nents < 0) {
4212 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4213 			return src_nents;
4214 		}
4215 
4216 		if (src_nents) {
4217 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4218 						  DMA_TO_DEVICE);
4219 			if (!mapped_nents) {
4220 				dev_err(ctx->dev, "unable to map source for DMA\n");
4221 				return -ENOMEM;
4222 			}
4223 		} else {
4224 			mapped_nents = 0;
4225 		}
4226 
4227 		/* allocate space for base edesc and link tables */
4228 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4229 		if (!edesc) {
4230 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4231 				     DMA_TO_DEVICE);
4232 			return -ENOMEM;
4233 		}
4234 
4235 		edesc->src_nents = src_nents;
4236 		sg_table = &edesc->sgt[0];
4237 
4238 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4239 		dpaa2_fl_set_final(in_fle, true);
4240 		dpaa2_fl_set_len(in_fle, to_hash);
4241 
4242 		if (mapped_nents > 1) {
4243 			int qm_sg_bytes;
4244 
4245 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4246 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4247 				      sizeof(*sg_table);
4248 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4249 							  qm_sg_bytes,
4250 							  DMA_TO_DEVICE);
4251 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4252 				dev_err(ctx->dev, "unable to map S/G table\n");
4253 				ret = -ENOMEM;
4254 				goto unmap_ctx;
4255 			}
4256 			edesc->qm_sg_bytes = qm_sg_bytes;
4257 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4258 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4259 		} else {
4260 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4261 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4262 		}
4263 
4264 		state->ctx_dma_len = ctx->ctx_len;
4265 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4266 						ctx->ctx_len, DMA_FROM_DEVICE);
4267 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4268 			dev_err(ctx->dev, "unable to map ctx\n");
4269 			state->ctx_dma = 0;
4270 			ret = -ENOMEM;
4271 			goto unmap_ctx;
4272 		}
4273 
4274 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4275 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4276 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4277 
4278 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4279 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4280 		req_ctx->cbk = ahash_done_ctx_dst;
4281 		req_ctx->ctx = &req->base;
4282 		req_ctx->edesc = edesc;
4283 
4284 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4285 		if (ret != -EINPROGRESS &&
4286 		    !(ret == -EBUSY && req->base.flags &
4287 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4288 			goto unmap_ctx;
4289 
4290 		state->update = ahash_update_ctx;
4291 		state->finup = ahash_finup_ctx;
4292 		state->final = ahash_final_ctx;
4293 	} else if (*next_buflen) {
4294 		state->update = ahash_update_no_ctx;
4295 		state->finup = ahash_finup_no_ctx;
4296 		state->final = ahash_final_no_ctx;
4297 		scatterwalk_map_and_copy(buf, req->src, 0,
4298 					 req->nbytes, 0);
4299 		*buflen = *next_buflen;
4300 
4301 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4302 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4303 				     *buflen, 1);
4304 	}
4305 
4306 	return ret;
4307 unmap_ctx:
4308 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4309 	qi_cache_free(edesc);
4310 	return ret;
4311 }
4312 
4313 static int ahash_finup_first(struct ahash_request *req)
4314 {
4315 	return ahash_digest(req);
4316 }
4317 
4318 static int ahash_init(struct ahash_request *req)
4319 {
4320 	struct caam_hash_state *state = ahash_request_ctx(req);
4321 
4322 	state->update = ahash_update_first;
4323 	state->finup = ahash_finup_first;
4324 	state->final = ahash_final_no_ctx;
4325 
4326 	state->ctx_dma = 0;
4327 	state->ctx_dma_len = 0;
4328 	state->buf_dma = 0;
4329 	state->buflen = 0;
4330 	state->next_buflen = 0;
4331 
4332 	return 0;
4333 }
4334 
4335 static int ahash_update(struct ahash_request *req)
4336 {
4337 	struct caam_hash_state *state = ahash_request_ctx(req);
4338 
4339 	return state->update(req);
4340 }
4341 
4342 static int ahash_finup(struct ahash_request *req)
4343 {
4344 	struct caam_hash_state *state = ahash_request_ctx(req);
4345 
4346 	return state->finup(req);
4347 }
4348 
4349 static int ahash_final(struct ahash_request *req)
4350 {
4351 	struct caam_hash_state *state = ahash_request_ctx(req);
4352 
4353 	return state->final(req);
4354 }
4355 
4356 static int ahash_export(struct ahash_request *req, void *out)
4357 {
4358 	struct caam_hash_state *state = ahash_request_ctx(req);
4359 	struct caam_export_state *export = out;
4360 	u8 *buf = state->buf;
4361 	int len = state->buflen;
4362 
4363 	memcpy(export->buf, buf, len);
4364 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4365 	export->buflen = len;
4366 	export->update = state->update;
4367 	export->final = state->final;
4368 	export->finup = state->finup;
4369 
4370 	return 0;
4371 }
4372 
4373 static int ahash_import(struct ahash_request *req, const void *in)
4374 {
4375 	struct caam_hash_state *state = ahash_request_ctx(req);
4376 	const struct caam_export_state *export = in;
4377 
4378 	memset(state, 0, sizeof(*state));
4379 	memcpy(state->buf, export->buf, export->buflen);
4380 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4381 	state->buflen = export->buflen;
4382 	state->update = export->update;
4383 	state->final = export->final;
4384 	state->finup = export->finup;
4385 
4386 	return 0;
4387 }
4388 
4389 struct caam_hash_template {
4390 	char name[CRYPTO_MAX_ALG_NAME];
4391 	char driver_name[CRYPTO_MAX_ALG_NAME];
4392 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4393 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4394 	unsigned int blocksize;
4395 	struct ahash_alg template_ahash;
4396 	u32 alg_type;
4397 };
4398 
4399 /* ahash descriptors */
4400 static struct caam_hash_template driver_hash[] = {
4401 	{
4402 		.name = "sha1",
4403 		.driver_name = "sha1-caam-qi2",
4404 		.hmac_name = "hmac(sha1)",
4405 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4406 		.blocksize = SHA1_BLOCK_SIZE,
4407 		.template_ahash = {
4408 			.init = ahash_init,
4409 			.update = ahash_update,
4410 			.final = ahash_final,
4411 			.finup = ahash_finup,
4412 			.digest = ahash_digest,
4413 			.export = ahash_export,
4414 			.import = ahash_import,
4415 			.setkey = ahash_setkey,
4416 			.halg = {
4417 				.digestsize = SHA1_DIGEST_SIZE,
4418 				.statesize = sizeof(struct caam_export_state),
4419 			},
4420 		},
4421 		.alg_type = OP_ALG_ALGSEL_SHA1,
4422 	}, {
4423 		.name = "sha224",
4424 		.driver_name = "sha224-caam-qi2",
4425 		.hmac_name = "hmac(sha224)",
4426 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4427 		.blocksize = SHA224_BLOCK_SIZE,
4428 		.template_ahash = {
4429 			.init = ahash_init,
4430 			.update = ahash_update,
4431 			.final = ahash_final,
4432 			.finup = ahash_finup,
4433 			.digest = ahash_digest,
4434 			.export = ahash_export,
4435 			.import = ahash_import,
4436 			.setkey = ahash_setkey,
4437 			.halg = {
4438 				.digestsize = SHA224_DIGEST_SIZE,
4439 				.statesize = sizeof(struct caam_export_state),
4440 			},
4441 		},
4442 		.alg_type = OP_ALG_ALGSEL_SHA224,
4443 	}, {
4444 		.name = "sha256",
4445 		.driver_name = "sha256-caam-qi2",
4446 		.hmac_name = "hmac(sha256)",
4447 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4448 		.blocksize = SHA256_BLOCK_SIZE,
4449 		.template_ahash = {
4450 			.init = ahash_init,
4451 			.update = ahash_update,
4452 			.final = ahash_final,
4453 			.finup = ahash_finup,
4454 			.digest = ahash_digest,
4455 			.export = ahash_export,
4456 			.import = ahash_import,
4457 			.setkey = ahash_setkey,
4458 			.halg = {
4459 				.digestsize = SHA256_DIGEST_SIZE,
4460 				.statesize = sizeof(struct caam_export_state),
4461 			},
4462 		},
4463 		.alg_type = OP_ALG_ALGSEL_SHA256,
4464 	}, {
4465 		.name = "sha384",
4466 		.driver_name = "sha384-caam-qi2",
4467 		.hmac_name = "hmac(sha384)",
4468 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4469 		.blocksize = SHA384_BLOCK_SIZE,
4470 		.template_ahash = {
4471 			.init = ahash_init,
4472 			.update = ahash_update,
4473 			.final = ahash_final,
4474 			.finup = ahash_finup,
4475 			.digest = ahash_digest,
4476 			.export = ahash_export,
4477 			.import = ahash_import,
4478 			.setkey = ahash_setkey,
4479 			.halg = {
4480 				.digestsize = SHA384_DIGEST_SIZE,
4481 				.statesize = sizeof(struct caam_export_state),
4482 			},
4483 		},
4484 		.alg_type = OP_ALG_ALGSEL_SHA384,
4485 	}, {
4486 		.name = "sha512",
4487 		.driver_name = "sha512-caam-qi2",
4488 		.hmac_name = "hmac(sha512)",
4489 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4490 		.blocksize = SHA512_BLOCK_SIZE,
4491 		.template_ahash = {
4492 			.init = ahash_init,
4493 			.update = ahash_update,
4494 			.final = ahash_final,
4495 			.finup = ahash_finup,
4496 			.digest = ahash_digest,
4497 			.export = ahash_export,
4498 			.import = ahash_import,
4499 			.setkey = ahash_setkey,
4500 			.halg = {
4501 				.digestsize = SHA512_DIGEST_SIZE,
4502 				.statesize = sizeof(struct caam_export_state),
4503 			},
4504 		},
4505 		.alg_type = OP_ALG_ALGSEL_SHA512,
4506 	}, {
4507 		.name = "md5",
4508 		.driver_name = "md5-caam-qi2",
4509 		.hmac_name = "hmac(md5)",
4510 		.hmac_driver_name = "hmac-md5-caam-qi2",
4511 		.blocksize = MD5_BLOCK_WORDS * 4,
4512 		.template_ahash = {
4513 			.init = ahash_init,
4514 			.update = ahash_update,
4515 			.final = ahash_final,
4516 			.finup = ahash_finup,
4517 			.digest = ahash_digest,
4518 			.export = ahash_export,
4519 			.import = ahash_import,
4520 			.setkey = ahash_setkey,
4521 			.halg = {
4522 				.digestsize = MD5_DIGEST_SIZE,
4523 				.statesize = sizeof(struct caam_export_state),
4524 			},
4525 		},
4526 		.alg_type = OP_ALG_ALGSEL_MD5,
4527 	}
4528 };
4529 
4530 struct caam_hash_alg {
4531 	struct list_head entry;
4532 	struct device *dev;
4533 	int alg_type;
4534 	struct ahash_alg ahash_alg;
4535 };
4536 
4537 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4538 {
4539 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4540 	struct crypto_alg *base = tfm->__crt_alg;
4541 	struct hash_alg_common *halg =
4542 		 container_of(base, struct hash_alg_common, base);
4543 	struct ahash_alg *alg =
4544 		 container_of(halg, struct ahash_alg, halg);
4545 	struct caam_hash_alg *caam_hash =
4546 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4547 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4548 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4549 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4550 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4551 					 HASH_MSG_LEN + 32,
4552 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4553 					 HASH_MSG_LEN + 64,
4554 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4555 	dma_addr_t dma_addr;
4556 	int i;
4557 
4558 	ctx->dev = caam_hash->dev;
4559 
4560 	if (alg->setkey) {
4561 		ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4562 							  ARRAY_SIZE(ctx->key),
4563 							  DMA_TO_DEVICE,
4564 							  DMA_ATTR_SKIP_CPU_SYNC);
4565 		if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4566 			dev_err(ctx->dev, "unable to map key\n");
4567 			return -ENOMEM;
4568 		}
4569 	}
4570 
4571 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4572 					DMA_BIDIRECTIONAL,
4573 					DMA_ATTR_SKIP_CPU_SYNC);
4574 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4575 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4576 		if (ctx->adata.key_dma)
4577 			dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4578 					       ARRAY_SIZE(ctx->key),
4579 					       DMA_TO_DEVICE,
4580 					       DMA_ATTR_SKIP_CPU_SYNC);
4581 		return -ENOMEM;
4582 	}
4583 
4584 	for (i = 0; i < HASH_NUM_OP; i++)
4585 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4586 
4587 	/* copy descriptor header template value */
4588 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4589 
4590 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4591 				   OP_ALG_ALGSEL_SUBMASK) >>
4592 				  OP_ALG_ALGSEL_SHIFT];
4593 
4594 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4595 				 sizeof(struct caam_hash_state));
4596 
4597 	/*
4598 	 * For keyed hash algorithms shared descriptors
4599 	 * will be created later in setkey() callback
4600 	 */
4601 	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4602 }
4603 
4604 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4605 {
4606 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4607 
4608 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4609 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4610 	if (ctx->adata.key_dma)
4611 		dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4612 				       ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4613 				       DMA_ATTR_SKIP_CPU_SYNC);
4614 }
4615 
4616 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4617 	struct caam_hash_template *template, bool keyed)
4618 {
4619 	struct caam_hash_alg *t_alg;
4620 	struct ahash_alg *halg;
4621 	struct crypto_alg *alg;
4622 
4623 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4624 	if (!t_alg)
4625 		return ERR_PTR(-ENOMEM);
4626 
4627 	t_alg->ahash_alg = template->template_ahash;
4628 	halg = &t_alg->ahash_alg;
4629 	alg = &halg->halg.base;
4630 
4631 	if (keyed) {
4632 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4633 			 template->hmac_name);
4634 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4635 			 template->hmac_driver_name);
4636 	} else {
4637 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4638 			 template->name);
4639 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4640 			 template->driver_name);
4641 		t_alg->ahash_alg.setkey = NULL;
4642 	}
4643 	alg->cra_module = THIS_MODULE;
4644 	alg->cra_init = caam_hash_cra_init;
4645 	alg->cra_exit = caam_hash_cra_exit;
4646 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4647 	alg->cra_priority = CAAM_CRA_PRIORITY;
4648 	alg->cra_blocksize = template->blocksize;
4649 	alg->cra_alignmask = 0;
4650 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4651 
4652 	t_alg->alg_type = template->alg_type;
4653 	t_alg->dev = dev;
4654 
4655 	return t_alg;
4656 }
4657 
4658 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4659 {
4660 	struct dpaa2_caam_priv_per_cpu *ppriv;
4661 
4662 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4663 	napi_schedule_irqoff(&ppriv->napi);
4664 }
4665 
4666 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4667 {
4668 	struct device *dev = priv->dev;
4669 	struct dpaa2_io_notification_ctx *nctx;
4670 	struct dpaa2_caam_priv_per_cpu *ppriv;
4671 	int err, i = 0, cpu;
4672 
4673 	for_each_online_cpu(cpu) {
4674 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4675 		ppriv->priv = priv;
4676 		nctx = &ppriv->nctx;
4677 		nctx->is_cdan = 0;
4678 		nctx->id = ppriv->rsp_fqid;
4679 		nctx->desired_cpu = cpu;
4680 		nctx->cb = dpaa2_caam_fqdan_cb;
4681 
4682 		/* Register notification callbacks */
4683 		ppriv->dpio = dpaa2_io_service_select(cpu);
4684 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4685 		if (unlikely(err)) {
4686 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4687 			nctx->cb = NULL;
4688 			/*
4689 			 * If no affine DPIO for this core, there's probably
4690 			 * none available for next cores either. Signal we want
4691 			 * to retry later, in case the DPIO devices weren't
4692 			 * probed yet.
4693 			 */
4694 			err = -EPROBE_DEFER;
4695 			goto err;
4696 		}
4697 
4698 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4699 						     dev);
4700 		if (unlikely(!ppriv->store)) {
4701 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4702 			err = -ENOMEM;
4703 			goto err;
4704 		}
4705 
4706 		if (++i == priv->num_pairs)
4707 			break;
4708 	}
4709 
4710 	return 0;
4711 
4712 err:
4713 	for_each_online_cpu(cpu) {
4714 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4715 		if (!ppriv->nctx.cb)
4716 			break;
4717 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4718 	}
4719 
4720 	for_each_online_cpu(cpu) {
4721 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4722 		if (!ppriv->store)
4723 			break;
4724 		dpaa2_io_store_destroy(ppriv->store);
4725 	}
4726 
4727 	return err;
4728 }
4729 
4730 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4731 {
4732 	struct dpaa2_caam_priv_per_cpu *ppriv;
4733 	int i = 0, cpu;
4734 
4735 	for_each_online_cpu(cpu) {
4736 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4737 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4738 					    priv->dev);
4739 		dpaa2_io_store_destroy(ppriv->store);
4740 
4741 		if (++i == priv->num_pairs)
4742 			return;
4743 	}
4744 }
4745 
4746 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4747 {
4748 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4749 	struct device *dev = priv->dev;
4750 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4751 	struct dpaa2_caam_priv_per_cpu *ppriv;
4752 	int err = 0, i = 0, cpu;
4753 
4754 	/* Configure Rx queues */
4755 	for_each_online_cpu(cpu) {
4756 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4757 
4758 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4759 				       DPSECI_QUEUE_OPT_USER_CTX;
4760 		rx_queue_cfg.order_preservation_en = 0;
4761 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4762 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4763 		/*
4764 		 * Rx priority (WQ) doesn't really matter, since we use
4765 		 * pull mode, i.e. volatile dequeues from specific FQs
4766 		 */
4767 		rx_queue_cfg.dest_cfg.priority = 0;
4768 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4769 
4770 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4771 					  &rx_queue_cfg);
4772 		if (err) {
4773 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4774 				err);
4775 			return err;
4776 		}
4777 
4778 		if (++i == priv->num_pairs)
4779 			break;
4780 	}
4781 
4782 	return err;
4783 }
4784 
4785 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4786 {
4787 	struct device *dev = priv->dev;
4788 
4789 	if (!priv->cscn_mem)
4790 		return;
4791 
4792 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4793 	kfree(priv->cscn_mem);
4794 }
4795 
4796 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4797 {
4798 	struct device *dev = priv->dev;
4799 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4800 	int err;
4801 
4802 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4803 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4804 		if (err)
4805 			dev_err(dev, "dpseci_reset() failed\n");
4806 	}
4807 
4808 	dpaa2_dpseci_congestion_free(priv);
4809 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4810 }
4811 
4812 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4813 				  const struct dpaa2_fd *fd)
4814 {
4815 	struct caam_request *req;
4816 	u32 fd_err;
4817 
4818 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4819 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4820 		return;
4821 	}
4822 
4823 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4824 	if (unlikely(fd_err))
4825 		dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4826 
4827 	/*
4828 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4829 	 * in FD[ERR] or FD[FRC].
4830 	 */
4831 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4832 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4833 			 DMA_BIDIRECTIONAL);
4834 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4835 }
4836 
4837 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4838 {
4839 	int err;
4840 
4841 	/* Retry while portal is busy */
4842 	do {
4843 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4844 					       ppriv->store);
4845 	} while (err == -EBUSY);
4846 
4847 	if (unlikely(err))
4848 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4849 
4850 	return err;
4851 }
4852 
4853 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4854 {
4855 	struct dpaa2_dq *dq;
4856 	int cleaned = 0, is_last;
4857 
4858 	do {
4859 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4860 		if (unlikely(!dq)) {
4861 			if (unlikely(!is_last)) {
4862 				dev_dbg(ppriv->priv->dev,
4863 					"FQ %d returned no valid frames\n",
4864 					ppriv->rsp_fqid);
4865 				/*
4866 				 * MUST retry until we get some sort of
4867 				 * valid response token (be it "empty dequeue"
4868 				 * or a valid frame).
4869 				 */
4870 				continue;
4871 			}
4872 			break;
4873 		}
4874 
4875 		/* Process FD */
4876 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4877 		cleaned++;
4878 	} while (!is_last);
4879 
4880 	return cleaned;
4881 }
4882 
4883 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4884 {
4885 	struct dpaa2_caam_priv_per_cpu *ppriv;
4886 	struct dpaa2_caam_priv *priv;
4887 	int err, cleaned = 0, store_cleaned;
4888 
4889 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4890 	priv = ppriv->priv;
4891 
4892 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4893 		return 0;
4894 
4895 	do {
4896 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4897 		cleaned += store_cleaned;
4898 
4899 		if (store_cleaned == 0 ||
4900 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4901 			break;
4902 
4903 		/* Try to dequeue some more */
4904 		err = dpaa2_caam_pull_fq(ppriv);
4905 		if (unlikely(err))
4906 			break;
4907 	} while (1);
4908 
4909 	if (cleaned < budget) {
4910 		napi_complete_done(napi, cleaned);
4911 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4912 		if (unlikely(err))
4913 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4914 				err);
4915 	}
4916 
4917 	return cleaned;
4918 }
4919 
4920 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4921 					 u16 token)
4922 {
4923 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4924 	struct device *dev = priv->dev;
4925 	int err;
4926 
4927 	/*
4928 	 * Congestion group feature supported starting with DPSECI API v5.1
4929 	 * and only when object has been created with this capability.
4930 	 */
4931 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4932 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4933 		return 0;
4934 
4935 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4936 				 GFP_KERNEL | GFP_DMA);
4937 	if (!priv->cscn_mem)
4938 		return -ENOMEM;
4939 
4940 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4941 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4942 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4943 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4944 		dev_err(dev, "Error mapping CSCN memory area\n");
4945 		err = -ENOMEM;
4946 		goto err_dma_map;
4947 	}
4948 
4949 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4950 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4951 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4952 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4953 	cong_notif_cfg.message_iova = priv->cscn_dma;
4954 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4955 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4956 					DPSECI_CGN_MODE_COHERENT_WRITE;
4957 
4958 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4959 						 &cong_notif_cfg);
4960 	if (err) {
4961 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4962 		goto err_set_cong;
4963 	}
4964 
4965 	return 0;
4966 
4967 err_set_cong:
4968 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4969 err_dma_map:
4970 	kfree(priv->cscn_mem);
4971 
4972 	return err;
4973 }
4974 
4975 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4976 {
4977 	struct device *dev = &ls_dev->dev;
4978 	struct dpaa2_caam_priv *priv;
4979 	struct dpaa2_caam_priv_per_cpu *ppriv;
4980 	int err, cpu;
4981 	u8 i;
4982 
4983 	priv = dev_get_drvdata(dev);
4984 
4985 	priv->dev = dev;
4986 	priv->dpsec_id = ls_dev->obj_desc.id;
4987 
4988 	/* Get a handle for the DPSECI this interface is associate with */
4989 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4990 	if (err) {
4991 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4992 		goto err_open;
4993 	}
4994 
4995 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4996 				     &priv->minor_ver);
4997 	if (err) {
4998 		dev_err(dev, "dpseci_get_api_version() failed\n");
4999 		goto err_get_vers;
5000 	}
5001 
5002 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5003 
5004 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5005 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5006 		if (err) {
5007 			dev_err(dev, "dpseci_reset() failed\n");
5008 			goto err_get_vers;
5009 		}
5010 	}
5011 
5012 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5013 				    &priv->dpseci_attr);
5014 	if (err) {
5015 		dev_err(dev, "dpseci_get_attributes() failed\n");
5016 		goto err_get_vers;
5017 	}
5018 
5019 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5020 				  &priv->sec_attr);
5021 	if (err) {
5022 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
5023 		goto err_get_vers;
5024 	}
5025 
5026 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5027 	if (err) {
5028 		dev_err(dev, "setup_congestion() failed\n");
5029 		goto err_get_vers;
5030 	}
5031 
5032 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5033 			      priv->dpseci_attr.num_tx_queues);
5034 	if (priv->num_pairs > num_online_cpus()) {
5035 		dev_warn(dev, "%d queues won't be used\n",
5036 			 priv->num_pairs - num_online_cpus());
5037 		priv->num_pairs = num_online_cpus();
5038 	}
5039 
5040 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5041 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5042 					  &priv->rx_queue_attr[i]);
5043 		if (err) {
5044 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
5045 			goto err_get_rx_queue;
5046 		}
5047 	}
5048 
5049 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5050 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5051 					  &priv->tx_queue_attr[i]);
5052 		if (err) {
5053 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
5054 			goto err_get_rx_queue;
5055 		}
5056 	}
5057 
5058 	i = 0;
5059 	for_each_online_cpu(cpu) {
5060 		u8 j;
5061 
5062 		j = i % priv->num_pairs;
5063 
5064 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
5065 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5066 
5067 		/*
5068 		 * Allow all cores to enqueue, while only some of them
5069 		 * will take part in dequeuing.
5070 		 */
5071 		if (++i > priv->num_pairs)
5072 			continue;
5073 
5074 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5075 		ppriv->prio = j;
5076 
5077 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5078 			priv->rx_queue_attr[j].fqid,
5079 			priv->tx_queue_attr[j].fqid);
5080 
5081 		ppriv->net_dev.dev = *dev;
5082 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5083 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5084 			       DPAA2_CAAM_NAPI_WEIGHT);
5085 	}
5086 
5087 	return 0;
5088 
5089 err_get_rx_queue:
5090 	dpaa2_dpseci_congestion_free(priv);
5091 err_get_vers:
5092 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5093 err_open:
5094 	return err;
5095 }
5096 
5097 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5098 {
5099 	struct device *dev = priv->dev;
5100 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5101 	struct dpaa2_caam_priv_per_cpu *ppriv;
5102 	int i;
5103 
5104 	for (i = 0; i < priv->num_pairs; i++) {
5105 		ppriv = per_cpu_ptr(priv->ppriv, i);
5106 		napi_enable(&ppriv->napi);
5107 	}
5108 
5109 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5110 }
5111 
5112 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5113 {
5114 	struct device *dev = priv->dev;
5115 	struct dpaa2_caam_priv_per_cpu *ppriv;
5116 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5117 	int i, err = 0, enabled;
5118 
5119 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5120 	if (err) {
5121 		dev_err(dev, "dpseci_disable() failed\n");
5122 		return err;
5123 	}
5124 
5125 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5126 	if (err) {
5127 		dev_err(dev, "dpseci_is_enabled() failed\n");
5128 		return err;
5129 	}
5130 
5131 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5132 
5133 	for (i = 0; i < priv->num_pairs; i++) {
5134 		ppriv = per_cpu_ptr(priv->ppriv, i);
5135 		napi_disable(&ppriv->napi);
5136 		netif_napi_del(&ppriv->napi);
5137 	}
5138 
5139 	return 0;
5140 }
5141 
5142 static struct list_head hash_list;
5143 
5144 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5145 {
5146 	struct device *dev;
5147 	struct dpaa2_caam_priv *priv;
5148 	int i, err = 0;
5149 	bool registered = false;
5150 
5151 	/*
5152 	 * There is no way to get CAAM endianness - there is no direct register
5153 	 * space access and MC f/w does not provide this attribute.
5154 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5155 	 * property.
5156 	 */
5157 	caam_little_end = true;
5158 
5159 	caam_imx = false;
5160 
5161 	dev = &dpseci_dev->dev;
5162 
5163 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5164 	if (!priv)
5165 		return -ENOMEM;
5166 
5167 	dev_set_drvdata(dev, priv);
5168 
5169 	priv->domain = iommu_get_domain_for_dev(dev);
5170 
5171 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5172 				     0, SLAB_CACHE_DMA, NULL);
5173 	if (!qi_cache) {
5174 		dev_err(dev, "Can't allocate SEC cache\n");
5175 		return -ENOMEM;
5176 	}
5177 
5178 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5179 	if (err) {
5180 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5181 		goto err_dma_mask;
5182 	}
5183 
5184 	/* Obtain a MC portal */
5185 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5186 	if (err) {
5187 		if (err == -ENXIO)
5188 			err = -EPROBE_DEFER;
5189 		else
5190 			dev_err(dev, "MC portal allocation failed\n");
5191 
5192 		goto err_dma_mask;
5193 	}
5194 
5195 	priv->ppriv = alloc_percpu(*priv->ppriv);
5196 	if (!priv->ppriv) {
5197 		dev_err(dev, "alloc_percpu() failed\n");
5198 		err = -ENOMEM;
5199 		goto err_alloc_ppriv;
5200 	}
5201 
5202 	/* DPSECI initialization */
5203 	err = dpaa2_dpseci_setup(dpseci_dev);
5204 	if (err) {
5205 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5206 		goto err_dpseci_setup;
5207 	}
5208 
5209 	/* DPIO */
5210 	err = dpaa2_dpseci_dpio_setup(priv);
5211 	if (err) {
5212 		dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5213 		goto err_dpio_setup;
5214 	}
5215 
5216 	/* DPSECI binding to DPIO */
5217 	err = dpaa2_dpseci_bind(priv);
5218 	if (err) {
5219 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5220 		goto err_bind;
5221 	}
5222 
5223 	/* DPSECI enable */
5224 	err = dpaa2_dpseci_enable(priv);
5225 	if (err) {
5226 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5227 		goto err_bind;
5228 	}
5229 
5230 	dpaa2_dpseci_debugfs_init(priv);
5231 
5232 	/* register crypto algorithms the device supports */
5233 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5234 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5235 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5236 
5237 		/* Skip DES algorithms if not supported by device */
5238 		if (!priv->sec_attr.des_acc_num &&
5239 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5240 		     alg_sel == OP_ALG_ALGSEL_DES))
5241 			continue;
5242 
5243 		/* Skip AES algorithms if not supported by device */
5244 		if (!priv->sec_attr.aes_acc_num &&
5245 		    alg_sel == OP_ALG_ALGSEL_AES)
5246 			continue;
5247 
5248 		/* Skip CHACHA20 algorithms if not supported by device */
5249 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5250 		    !priv->sec_attr.ccha_acc_num)
5251 			continue;
5252 
5253 		t_alg->caam.dev = dev;
5254 		caam_skcipher_alg_init(t_alg);
5255 
5256 		err = crypto_register_skcipher(&t_alg->skcipher);
5257 		if (err) {
5258 			dev_warn(dev, "%s alg registration failed: %d\n",
5259 				 t_alg->skcipher.base.cra_driver_name, err);
5260 			continue;
5261 		}
5262 
5263 		t_alg->registered = true;
5264 		registered = true;
5265 	}
5266 
5267 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5268 		struct caam_aead_alg *t_alg = driver_aeads + i;
5269 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5270 				 OP_ALG_ALGSEL_MASK;
5271 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5272 				 OP_ALG_ALGSEL_MASK;
5273 
5274 		/* Skip DES algorithms if not supported by device */
5275 		if (!priv->sec_attr.des_acc_num &&
5276 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5277 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5278 			continue;
5279 
5280 		/* Skip AES algorithms if not supported by device */
5281 		if (!priv->sec_attr.aes_acc_num &&
5282 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5283 			continue;
5284 
5285 		/* Skip CHACHA20 algorithms if not supported by device */
5286 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5287 		    !priv->sec_attr.ccha_acc_num)
5288 			continue;
5289 
5290 		/* Skip POLY1305 algorithms if not supported by device */
5291 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5292 		    !priv->sec_attr.ptha_acc_num)
5293 			continue;
5294 
5295 		/*
5296 		 * Skip algorithms requiring message digests
5297 		 * if MD not supported by device.
5298 		 */
5299 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5300 		    !priv->sec_attr.md_acc_num)
5301 			continue;
5302 
5303 		t_alg->caam.dev = dev;
5304 		caam_aead_alg_init(t_alg);
5305 
5306 		err = crypto_register_aead(&t_alg->aead);
5307 		if (err) {
5308 			dev_warn(dev, "%s alg registration failed: %d\n",
5309 				 t_alg->aead.base.cra_driver_name, err);
5310 			continue;
5311 		}
5312 
5313 		t_alg->registered = true;
5314 		registered = true;
5315 	}
5316 	if (registered)
5317 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5318 
5319 	/* register hash algorithms the device supports */
5320 	INIT_LIST_HEAD(&hash_list);
5321 
5322 	/*
5323 	 * Skip registration of any hashing algorithms if MD block
5324 	 * is not present.
5325 	 */
5326 	if (!priv->sec_attr.md_acc_num)
5327 		return 0;
5328 
5329 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5330 		struct caam_hash_alg *t_alg;
5331 		struct caam_hash_template *alg = driver_hash + i;
5332 
5333 		/* register hmac version */
5334 		t_alg = caam_hash_alloc(dev, alg, true);
5335 		if (IS_ERR(t_alg)) {
5336 			err = PTR_ERR(t_alg);
5337 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5338 				 alg->hmac_driver_name, err);
5339 			continue;
5340 		}
5341 
5342 		err = crypto_register_ahash(&t_alg->ahash_alg);
5343 		if (err) {
5344 			dev_warn(dev, "%s alg registration failed: %d\n",
5345 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5346 				 err);
5347 			kfree(t_alg);
5348 		} else {
5349 			list_add_tail(&t_alg->entry, &hash_list);
5350 		}
5351 
5352 		/* register unkeyed version */
5353 		t_alg = caam_hash_alloc(dev, alg, false);
5354 		if (IS_ERR(t_alg)) {
5355 			err = PTR_ERR(t_alg);
5356 			dev_warn(dev, "%s alg allocation failed: %d\n",
5357 				 alg->driver_name, err);
5358 			continue;
5359 		}
5360 
5361 		err = crypto_register_ahash(&t_alg->ahash_alg);
5362 		if (err) {
5363 			dev_warn(dev, "%s alg registration failed: %d\n",
5364 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5365 				 err);
5366 			kfree(t_alg);
5367 		} else {
5368 			list_add_tail(&t_alg->entry, &hash_list);
5369 		}
5370 	}
5371 	if (!list_empty(&hash_list))
5372 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5373 
5374 	return err;
5375 
5376 err_bind:
5377 	dpaa2_dpseci_dpio_free(priv);
5378 err_dpio_setup:
5379 	dpaa2_dpseci_free(priv);
5380 err_dpseci_setup:
5381 	free_percpu(priv->ppriv);
5382 err_alloc_ppriv:
5383 	fsl_mc_portal_free(priv->mc_io);
5384 err_dma_mask:
5385 	kmem_cache_destroy(qi_cache);
5386 
5387 	return err;
5388 }
5389 
5390 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5391 {
5392 	struct device *dev;
5393 	struct dpaa2_caam_priv *priv;
5394 	int i;
5395 
5396 	dev = &ls_dev->dev;
5397 	priv = dev_get_drvdata(dev);
5398 
5399 	dpaa2_dpseci_debugfs_exit(priv);
5400 
5401 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5402 		struct caam_aead_alg *t_alg = driver_aeads + i;
5403 
5404 		if (t_alg->registered)
5405 			crypto_unregister_aead(&t_alg->aead);
5406 	}
5407 
5408 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5409 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5410 
5411 		if (t_alg->registered)
5412 			crypto_unregister_skcipher(&t_alg->skcipher);
5413 	}
5414 
5415 	if (hash_list.next) {
5416 		struct caam_hash_alg *t_hash_alg, *p;
5417 
5418 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5419 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5420 			list_del(&t_hash_alg->entry);
5421 			kfree(t_hash_alg);
5422 		}
5423 	}
5424 
5425 	dpaa2_dpseci_disable(priv);
5426 	dpaa2_dpseci_dpio_free(priv);
5427 	dpaa2_dpseci_free(priv);
5428 	free_percpu(priv->ppriv);
5429 	fsl_mc_portal_free(priv->mc_io);
5430 	kmem_cache_destroy(qi_cache);
5431 
5432 	return 0;
5433 }
5434 
5435 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5436 {
5437 	struct dpaa2_fd fd;
5438 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5439 	struct dpaa2_caam_priv_per_cpu *ppriv;
5440 	int err = 0, i;
5441 
5442 	if (IS_ERR(req))
5443 		return PTR_ERR(req);
5444 
5445 	if (priv->cscn_mem) {
5446 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5447 					DPAA2_CSCN_SIZE,
5448 					DMA_FROM_DEVICE);
5449 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5450 			dev_dbg_ratelimited(dev, "Dropping request\n");
5451 			return -EBUSY;
5452 		}
5453 	}
5454 
5455 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5456 
5457 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5458 					 DMA_BIDIRECTIONAL);
5459 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5460 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5461 		goto err_out;
5462 	}
5463 
5464 	memset(&fd, 0, sizeof(fd));
5465 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5466 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5467 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5468 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5469 
5470 	ppriv = this_cpu_ptr(priv->ppriv);
5471 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5472 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5473 						  &fd);
5474 		if (err != -EBUSY)
5475 			break;
5476 
5477 		cpu_relax();
5478 	}
5479 
5480 	if (unlikely(err)) {
5481 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5482 		goto err_out;
5483 	}
5484 
5485 	return -EINPROGRESS;
5486 
5487 err_out:
5488 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5489 			 DMA_BIDIRECTIONAL);
5490 	return -EIO;
5491 }
5492 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5493 
5494 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5495 	{
5496 		.vendor = FSL_MC_VENDOR_FREESCALE,
5497 		.obj_type = "dpseci",
5498 	},
5499 	{ .vendor = 0x0 }
5500 };
5501 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5502 
5503 static struct fsl_mc_driver dpaa2_caam_driver = {
5504 	.driver = {
5505 		.name		= KBUILD_MODNAME,
5506 		.owner		= THIS_MODULE,
5507 	},
5508 	.probe		= dpaa2_caam_probe,
5509 	.remove		= dpaa2_caam_remove,
5510 	.match_id_table = dpaa2_caam_match_id_table
5511 };
5512 
5513 MODULE_LICENSE("Dual BSD/GPL");
5514 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5515 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5516 
5517 module_fsl_mc_driver(dpaa2_caam_driver);
5518