xref: /linux/drivers/crypto/caam/caamalg_qi2.c (revision 1c0ab408bb6e16285fcddc9b4ce74507081d053f)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 
23 #define CAAM_CRA_PRIORITY	2000
24 
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
27 				 SHA512_DIGEST_SIZE * 2)
28 
29 /*
30  * This is a a cache of buffers, from which the users of CAAM QI driver
31  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
32  * NOTE: A more elegant solution would be to have some headroom in the frames
33  *       being processed. This can be added by the dpaa2-eth driver. This would
34  *       pose a problem for userspace application processing which cannot
35  *       know of this limitation. So for now, this will work.
36  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
37  */
38 static struct kmem_cache *qi_cache;
39 
40 struct caam_alg_entry {
41 	struct device *dev;
42 	int class1_alg_type;
43 	int class2_alg_type;
44 	bool rfc3686;
45 	bool geniv;
46 	bool nodkp;
47 };
48 
49 struct caam_aead_alg {
50 	struct aead_alg aead;
51 	struct caam_alg_entry caam;
52 	bool registered;
53 };
54 
55 struct caam_skcipher_alg {
56 	struct skcipher_alg skcipher;
57 	struct caam_alg_entry caam;
58 	bool registered;
59 };
60 
61 /**
62  * caam_ctx - per-session context
63  * @flc: Flow Contexts array
64  * @key:  [authentication key], encryption key
65  * @flc_dma: I/O virtual addresses of the Flow Contexts
66  * @key_dma: I/O virtual address of the key
67  * @dir: DMA direction for mapping key and Flow Contexts
68  * @dev: dpseci device
69  * @adata: authentication algorithm details
70  * @cdata: encryption algorithm details
71  * @authsize: authentication tag (a.k.a. ICV / MAC) size
72  */
73 struct caam_ctx {
74 	struct caam_flc flc[NUM_OP];
75 	u8 key[CAAM_MAX_KEY_SIZE];
76 	dma_addr_t flc_dma[NUM_OP];
77 	dma_addr_t key_dma;
78 	enum dma_data_direction dir;
79 	struct device *dev;
80 	struct alginfo adata;
81 	struct alginfo cdata;
82 	unsigned int authsize;
83 };
84 
85 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
86 				     dma_addr_t iova_addr)
87 {
88 	phys_addr_t phys_addr;
89 
90 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
91 				   iova_addr;
92 
93 	return phys_to_virt(phys_addr);
94 }
95 
96 /*
97  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
98  *
99  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
100  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
101  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
102  * hosting 16 SG entries.
103  *
104  * @flags - flags that would be used for the equivalent kmalloc(..) call
105  *
106  * Returns a pointer to a retrieved buffer on success or NULL on failure.
107  */
108 static inline void *qi_cache_zalloc(gfp_t flags)
109 {
110 	return kmem_cache_zalloc(qi_cache, flags);
111 }
112 
113 /*
114  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
115  *
116  * @obj - buffer previously allocated by qi_cache_zalloc
117  *
118  * No checking is being done, the call is a passthrough call to
119  * kmem_cache_free(...)
120  */
121 static inline void qi_cache_free(void *obj)
122 {
123 	kmem_cache_free(qi_cache, obj);
124 }
125 
126 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
127 {
128 	switch (crypto_tfm_alg_type(areq->tfm)) {
129 	case CRYPTO_ALG_TYPE_SKCIPHER:
130 		return skcipher_request_ctx(skcipher_request_cast(areq));
131 	case CRYPTO_ALG_TYPE_AEAD:
132 		return aead_request_ctx(container_of(areq, struct aead_request,
133 						     base));
134 	case CRYPTO_ALG_TYPE_AHASH:
135 		return ahash_request_ctx(ahash_request_cast(areq));
136 	default:
137 		return ERR_PTR(-EINVAL);
138 	}
139 }
140 
141 static void caam_unmap(struct device *dev, struct scatterlist *src,
142 		       struct scatterlist *dst, int src_nents,
143 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
144 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
145 		       int qm_sg_bytes)
146 {
147 	if (dst != src) {
148 		if (src_nents)
149 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
150 		if (dst_nents)
151 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
152 	} else {
153 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
154 	}
155 
156 	if (iv_dma)
157 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
158 
159 	if (qm_sg_bytes)
160 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
161 }
162 
163 static int aead_set_sh_desc(struct crypto_aead *aead)
164 {
165 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
166 						 typeof(*alg), aead);
167 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
168 	unsigned int ivsize = crypto_aead_ivsize(aead);
169 	struct device *dev = ctx->dev;
170 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
171 	struct caam_flc *flc;
172 	u32 *desc;
173 	u32 ctx1_iv_off = 0;
174 	u32 *nonce = NULL;
175 	unsigned int data_len[2];
176 	u32 inl_mask;
177 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
178 			       OP_ALG_AAI_CTR_MOD128);
179 	const bool is_rfc3686 = alg->caam.rfc3686;
180 
181 	if (!ctx->cdata.keylen || !ctx->authsize)
182 		return 0;
183 
184 	/*
185 	 * AES-CTR needs to load IV in CONTEXT1 reg
186 	 * at an offset of 128bits (16bytes)
187 	 * CONTEXT1[255:128] = IV
188 	 */
189 	if (ctr_mode)
190 		ctx1_iv_off = 16;
191 
192 	/*
193 	 * RFC3686 specific:
194 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
195 	 */
196 	if (is_rfc3686) {
197 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
198 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
199 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
200 	}
201 
202 	data_len[0] = ctx->adata.keylen_pad;
203 	data_len[1] = ctx->cdata.keylen;
204 
205 	/* aead_encrypt shared descriptor */
206 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
207 						 DESC_QI_AEAD_ENC_LEN) +
208 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
209 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
210 			      ARRAY_SIZE(data_len)) < 0)
211 		return -EINVAL;
212 
213 	if (inl_mask & 1)
214 		ctx->adata.key_virt = ctx->key;
215 	else
216 		ctx->adata.key_dma = ctx->key_dma;
217 
218 	if (inl_mask & 2)
219 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
220 	else
221 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
222 
223 	ctx->adata.key_inline = !!(inl_mask & 1);
224 	ctx->cdata.key_inline = !!(inl_mask & 2);
225 
226 	flc = &ctx->flc[ENCRYPT];
227 	desc = flc->sh_desc;
228 
229 	if (alg->caam.geniv)
230 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
231 					  ivsize, ctx->authsize, is_rfc3686,
232 					  nonce, ctx1_iv_off, true,
233 					  priv->sec_attr.era);
234 	else
235 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
236 				       ivsize, ctx->authsize, is_rfc3686, nonce,
237 				       ctx1_iv_off, true, priv->sec_attr.era);
238 
239 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
240 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
241 				   sizeof(flc->flc) + desc_bytes(desc),
242 				   ctx->dir);
243 
244 	/* aead_decrypt shared descriptor */
245 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
246 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
247 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
248 			      ARRAY_SIZE(data_len)) < 0)
249 		return -EINVAL;
250 
251 	if (inl_mask & 1)
252 		ctx->adata.key_virt = ctx->key;
253 	else
254 		ctx->adata.key_dma = ctx->key_dma;
255 
256 	if (inl_mask & 2)
257 		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
258 	else
259 		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
260 
261 	ctx->adata.key_inline = !!(inl_mask & 1);
262 	ctx->cdata.key_inline = !!(inl_mask & 2);
263 
264 	flc = &ctx->flc[DECRYPT];
265 	desc = flc->sh_desc;
266 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
267 			       ivsize, ctx->authsize, alg->caam.geniv,
268 			       is_rfc3686, nonce, ctx1_iv_off, true,
269 			       priv->sec_attr.era);
270 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
271 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
272 				   sizeof(flc->flc) + desc_bytes(desc),
273 				   ctx->dir);
274 
275 	return 0;
276 }
277 
278 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
279 {
280 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
281 
282 	ctx->authsize = authsize;
283 	aead_set_sh_desc(authenc);
284 
285 	return 0;
286 }
287 
288 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
289 		       unsigned int keylen)
290 {
291 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
292 	struct device *dev = ctx->dev;
293 	struct crypto_authenc_keys keys;
294 
295 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
296 		goto badkey;
297 
298 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
299 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
300 		keys.authkeylen);
301 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
302 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
303 
304 	ctx->adata.keylen = keys.authkeylen;
305 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
306 					      OP_ALG_ALGSEL_MASK);
307 
308 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
309 		goto badkey;
310 
311 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
312 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
313 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
314 				   keys.enckeylen, ctx->dir);
315 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
316 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
317 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
318 
319 	ctx->cdata.keylen = keys.enckeylen;
320 
321 	memzero_explicit(&keys, sizeof(keys));
322 	return aead_set_sh_desc(aead);
323 badkey:
324 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
325 	memzero_explicit(&keys, sizeof(keys));
326 	return -EINVAL;
327 }
328 
329 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
330 			    unsigned int keylen)
331 {
332 	struct crypto_authenc_keys keys;
333 	u32 flags;
334 	int err;
335 
336 	err = crypto_authenc_extractkeys(&keys, key, keylen);
337 	if (unlikely(err))
338 		goto badkey;
339 
340 	err = -EINVAL;
341 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
342 		goto badkey;
343 
344 	flags = crypto_aead_get_flags(aead);
345 	err = __des3_verify_key(&flags, keys.enckey);
346 	if (unlikely(err)) {
347 		crypto_aead_set_flags(aead, flags);
348 		goto out;
349 	}
350 
351 	err = aead_setkey(aead, key, keylen);
352 
353 out:
354 	memzero_explicit(&keys, sizeof(keys));
355 	return err;
356 
357 badkey:
358 	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
359 	goto out;
360 }
361 
362 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
363 					   bool encrypt)
364 {
365 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
366 	struct caam_request *req_ctx = aead_request_ctx(req);
367 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
368 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
369 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
370 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
371 						 typeof(*alg), aead);
372 	struct device *dev = ctx->dev;
373 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
374 		      GFP_KERNEL : GFP_ATOMIC;
375 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
376 	int src_len, dst_len = 0;
377 	struct aead_edesc *edesc;
378 	dma_addr_t qm_sg_dma, iv_dma = 0;
379 	int ivsize = 0;
380 	unsigned int authsize = ctx->authsize;
381 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
382 	int in_len, out_len;
383 	struct dpaa2_sg_entry *sg_table;
384 
385 	/* allocate space for base edesc, link tables and IV */
386 	edesc = qi_cache_zalloc(GFP_DMA | flags);
387 	if (unlikely(!edesc)) {
388 		dev_err(dev, "could not allocate extended descriptor\n");
389 		return ERR_PTR(-ENOMEM);
390 	}
391 
392 	if (unlikely(req->dst != req->src)) {
393 		src_len = req->assoclen + req->cryptlen;
394 		dst_len = src_len + (encrypt ? authsize : (-authsize));
395 
396 		src_nents = sg_nents_for_len(req->src, src_len);
397 		if (unlikely(src_nents < 0)) {
398 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
399 				src_len);
400 			qi_cache_free(edesc);
401 			return ERR_PTR(src_nents);
402 		}
403 
404 		dst_nents = sg_nents_for_len(req->dst, dst_len);
405 		if (unlikely(dst_nents < 0)) {
406 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
407 				dst_len);
408 			qi_cache_free(edesc);
409 			return ERR_PTR(dst_nents);
410 		}
411 
412 		if (src_nents) {
413 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
414 						      DMA_TO_DEVICE);
415 			if (unlikely(!mapped_src_nents)) {
416 				dev_err(dev, "unable to map source\n");
417 				qi_cache_free(edesc);
418 				return ERR_PTR(-ENOMEM);
419 			}
420 		} else {
421 			mapped_src_nents = 0;
422 		}
423 
424 		if (dst_nents) {
425 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
426 						      DMA_FROM_DEVICE);
427 			if (unlikely(!mapped_dst_nents)) {
428 				dev_err(dev, "unable to map destination\n");
429 				dma_unmap_sg(dev, req->src, src_nents,
430 					     DMA_TO_DEVICE);
431 				qi_cache_free(edesc);
432 				return ERR_PTR(-ENOMEM);
433 			}
434 		} else {
435 			mapped_dst_nents = 0;
436 		}
437 	} else {
438 		src_len = req->assoclen + req->cryptlen +
439 			  (encrypt ? authsize : 0);
440 
441 		src_nents = sg_nents_for_len(req->src, src_len);
442 		if (unlikely(src_nents < 0)) {
443 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
444 				src_len);
445 			qi_cache_free(edesc);
446 			return ERR_PTR(src_nents);
447 		}
448 
449 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
450 					      DMA_BIDIRECTIONAL);
451 		if (unlikely(!mapped_src_nents)) {
452 			dev_err(dev, "unable to map source\n");
453 			qi_cache_free(edesc);
454 			return ERR_PTR(-ENOMEM);
455 		}
456 	}
457 
458 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
459 		ivsize = crypto_aead_ivsize(aead);
460 
461 	/*
462 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
463 	 * Input is not contiguous.
464 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
465 	 * the end of the table by allocating more S/G entries. Logic:
466 	 * if (src != dst && output S/G)
467 	 *      pad output S/G, if needed
468 	 * else if (src == dst && S/G)
469 	 *      overlapping S/Gs; pad one of them
470 	 * else if (input S/G) ...
471 	 *      pad input S/G, if needed
472 	 */
473 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
474 	if (mapped_dst_nents > 1)
475 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
476 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
477 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
478 				  1 + !!ivsize +
479 				  pad_sg_nents(mapped_src_nents));
480 	else
481 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
482 
483 	sg_table = &edesc->sgt[0];
484 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
485 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
486 		     CAAM_QI_MEMCACHE_SIZE)) {
487 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
488 			qm_sg_nents, ivsize);
489 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
490 			   0, DMA_NONE, 0, 0);
491 		qi_cache_free(edesc);
492 		return ERR_PTR(-ENOMEM);
493 	}
494 
495 	if (ivsize) {
496 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
497 
498 		/* Make sure IV is located in a DMAable area */
499 		memcpy(iv, req->iv, ivsize);
500 
501 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
502 		if (dma_mapping_error(dev, iv_dma)) {
503 			dev_err(dev, "unable to map IV\n");
504 			caam_unmap(dev, req->src, req->dst, src_nents,
505 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
506 			qi_cache_free(edesc);
507 			return ERR_PTR(-ENOMEM);
508 		}
509 	}
510 
511 	edesc->src_nents = src_nents;
512 	edesc->dst_nents = dst_nents;
513 	edesc->iv_dma = iv_dma;
514 
515 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
516 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
517 		/*
518 		 * The associated data comes already with the IV but we need
519 		 * to skip it when we authenticate or encrypt...
520 		 */
521 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
522 	else
523 		edesc->assoclen = cpu_to_caam32(req->assoclen);
524 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
525 					     DMA_TO_DEVICE);
526 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
527 		dev_err(dev, "unable to map assoclen\n");
528 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
529 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
530 		qi_cache_free(edesc);
531 		return ERR_PTR(-ENOMEM);
532 	}
533 
534 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
535 	qm_sg_index++;
536 	if (ivsize) {
537 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
538 		qm_sg_index++;
539 	}
540 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
541 	qm_sg_index += mapped_src_nents;
542 
543 	if (mapped_dst_nents > 1)
544 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
545 
546 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
547 	if (dma_mapping_error(dev, qm_sg_dma)) {
548 		dev_err(dev, "unable to map S/G table\n");
549 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
550 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
551 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
552 		qi_cache_free(edesc);
553 		return ERR_PTR(-ENOMEM);
554 	}
555 
556 	edesc->qm_sg_dma = qm_sg_dma;
557 	edesc->qm_sg_bytes = qm_sg_bytes;
558 
559 	out_len = req->assoclen + req->cryptlen +
560 		  (encrypt ? ctx->authsize : (-ctx->authsize));
561 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
562 
563 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
564 	dpaa2_fl_set_final(in_fle, true);
565 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
566 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
567 	dpaa2_fl_set_len(in_fle, in_len);
568 
569 	if (req->dst == req->src) {
570 		if (mapped_src_nents == 1) {
571 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
572 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
573 		} else {
574 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
575 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
576 					  (1 + !!ivsize) * sizeof(*sg_table));
577 		}
578 	} else if (!mapped_dst_nents) {
579 		/*
580 		 * crypto engine requires the output entry to be present when
581 		 * "frame list" FD is used.
582 		 * Since engine does not support FMT=2'b11 (unused entry type),
583 		 * leaving out_fle zeroized is the best option.
584 		 */
585 		goto skip_out_fle;
586 	} else if (mapped_dst_nents == 1) {
587 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
588 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
589 	} else {
590 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
591 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
592 				  sizeof(*sg_table));
593 	}
594 
595 	dpaa2_fl_set_len(out_fle, out_len);
596 
597 skip_out_fle:
598 	return edesc;
599 }
600 
601 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
602 {
603 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
604 	unsigned int ivsize = crypto_aead_ivsize(aead);
605 	struct device *dev = ctx->dev;
606 	struct caam_flc *flc;
607 	u32 *desc;
608 
609 	if (!ctx->cdata.keylen || !ctx->authsize)
610 		return 0;
611 
612 	flc = &ctx->flc[ENCRYPT];
613 	desc = flc->sh_desc;
614 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
615 			       ctx->authsize, true, true);
616 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
617 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
618 				   sizeof(flc->flc) + desc_bytes(desc),
619 				   ctx->dir);
620 
621 	flc = &ctx->flc[DECRYPT];
622 	desc = flc->sh_desc;
623 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
624 			       ctx->authsize, false, true);
625 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
626 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
627 				   sizeof(flc->flc) + desc_bytes(desc),
628 				   ctx->dir);
629 
630 	return 0;
631 }
632 
633 static int chachapoly_setauthsize(struct crypto_aead *aead,
634 				  unsigned int authsize)
635 {
636 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
637 
638 	if (authsize != POLY1305_DIGEST_SIZE)
639 		return -EINVAL;
640 
641 	ctx->authsize = authsize;
642 	return chachapoly_set_sh_desc(aead);
643 }
644 
645 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
646 			     unsigned int keylen)
647 {
648 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
649 	unsigned int ivsize = crypto_aead_ivsize(aead);
650 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
651 
652 	if (keylen != CHACHA_KEY_SIZE + saltlen) {
653 		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
654 		return -EINVAL;
655 	}
656 
657 	ctx->cdata.key_virt = key;
658 	ctx->cdata.keylen = keylen - saltlen;
659 
660 	return chachapoly_set_sh_desc(aead);
661 }
662 
663 static int gcm_set_sh_desc(struct crypto_aead *aead)
664 {
665 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
666 	struct device *dev = ctx->dev;
667 	unsigned int ivsize = crypto_aead_ivsize(aead);
668 	struct caam_flc *flc;
669 	u32 *desc;
670 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
671 			ctx->cdata.keylen;
672 
673 	if (!ctx->cdata.keylen || !ctx->authsize)
674 		return 0;
675 
676 	/*
677 	 * AES GCM encrypt shared descriptor
678 	 * Job Descriptor and Shared Descriptor
679 	 * must fit into the 64-word Descriptor h/w Buffer
680 	 */
681 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
682 		ctx->cdata.key_inline = true;
683 		ctx->cdata.key_virt = ctx->key;
684 	} else {
685 		ctx->cdata.key_inline = false;
686 		ctx->cdata.key_dma = ctx->key_dma;
687 	}
688 
689 	flc = &ctx->flc[ENCRYPT];
690 	desc = flc->sh_desc;
691 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
692 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
693 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
694 				   sizeof(flc->flc) + desc_bytes(desc),
695 				   ctx->dir);
696 
697 	/*
698 	 * Job Descriptor and Shared Descriptors
699 	 * must all fit into the 64-word Descriptor h/w Buffer
700 	 */
701 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
702 		ctx->cdata.key_inline = true;
703 		ctx->cdata.key_virt = ctx->key;
704 	} else {
705 		ctx->cdata.key_inline = false;
706 		ctx->cdata.key_dma = ctx->key_dma;
707 	}
708 
709 	flc = &ctx->flc[DECRYPT];
710 	desc = flc->sh_desc;
711 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
712 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
713 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
714 				   sizeof(flc->flc) + desc_bytes(desc),
715 				   ctx->dir);
716 
717 	return 0;
718 }
719 
720 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
721 {
722 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
723 
724 	ctx->authsize = authsize;
725 	gcm_set_sh_desc(authenc);
726 
727 	return 0;
728 }
729 
730 static int gcm_setkey(struct crypto_aead *aead,
731 		      const u8 *key, unsigned int keylen)
732 {
733 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
734 	struct device *dev = ctx->dev;
735 
736 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
737 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
738 
739 	memcpy(ctx->key, key, keylen);
740 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
741 	ctx->cdata.keylen = keylen;
742 
743 	return gcm_set_sh_desc(aead);
744 }
745 
746 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
747 {
748 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
749 	struct device *dev = ctx->dev;
750 	unsigned int ivsize = crypto_aead_ivsize(aead);
751 	struct caam_flc *flc;
752 	u32 *desc;
753 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
754 			ctx->cdata.keylen;
755 
756 	if (!ctx->cdata.keylen || !ctx->authsize)
757 		return 0;
758 
759 	ctx->cdata.key_virt = ctx->key;
760 
761 	/*
762 	 * RFC4106 encrypt shared descriptor
763 	 * Job Descriptor and Shared Descriptor
764 	 * must fit into the 64-word Descriptor h/w Buffer
765 	 */
766 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
767 		ctx->cdata.key_inline = true;
768 	} else {
769 		ctx->cdata.key_inline = false;
770 		ctx->cdata.key_dma = ctx->key_dma;
771 	}
772 
773 	flc = &ctx->flc[ENCRYPT];
774 	desc = flc->sh_desc;
775 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
776 				  true);
777 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
778 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
779 				   sizeof(flc->flc) + desc_bytes(desc),
780 				   ctx->dir);
781 
782 	/*
783 	 * Job Descriptor and Shared Descriptors
784 	 * must all fit into the 64-word Descriptor h/w Buffer
785 	 */
786 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
787 		ctx->cdata.key_inline = true;
788 	} else {
789 		ctx->cdata.key_inline = false;
790 		ctx->cdata.key_dma = ctx->key_dma;
791 	}
792 
793 	flc = &ctx->flc[DECRYPT];
794 	desc = flc->sh_desc;
795 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
796 				  true);
797 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
798 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
799 				   sizeof(flc->flc) + desc_bytes(desc),
800 				   ctx->dir);
801 
802 	return 0;
803 }
804 
805 static int rfc4106_setauthsize(struct crypto_aead *authenc,
806 			       unsigned int authsize)
807 {
808 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
809 
810 	ctx->authsize = authsize;
811 	rfc4106_set_sh_desc(authenc);
812 
813 	return 0;
814 }
815 
816 static int rfc4106_setkey(struct crypto_aead *aead,
817 			  const u8 *key, unsigned int keylen)
818 {
819 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
820 	struct device *dev = ctx->dev;
821 
822 	if (keylen < 4)
823 		return -EINVAL;
824 
825 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
826 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
827 
828 	memcpy(ctx->key, key, keylen);
829 	/*
830 	 * The last four bytes of the key material are used as the salt value
831 	 * in the nonce. Update the AES key length.
832 	 */
833 	ctx->cdata.keylen = keylen - 4;
834 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
835 				   ctx->dir);
836 
837 	return rfc4106_set_sh_desc(aead);
838 }
839 
840 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
841 {
842 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
843 	struct device *dev = ctx->dev;
844 	unsigned int ivsize = crypto_aead_ivsize(aead);
845 	struct caam_flc *flc;
846 	u32 *desc;
847 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
848 			ctx->cdata.keylen;
849 
850 	if (!ctx->cdata.keylen || !ctx->authsize)
851 		return 0;
852 
853 	ctx->cdata.key_virt = ctx->key;
854 
855 	/*
856 	 * RFC4543 encrypt shared descriptor
857 	 * Job Descriptor and Shared Descriptor
858 	 * must fit into the 64-word Descriptor h/w Buffer
859 	 */
860 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
861 		ctx->cdata.key_inline = true;
862 	} else {
863 		ctx->cdata.key_inline = false;
864 		ctx->cdata.key_dma = ctx->key_dma;
865 	}
866 
867 	flc = &ctx->flc[ENCRYPT];
868 	desc = flc->sh_desc;
869 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
870 				  true);
871 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
872 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
873 				   sizeof(flc->flc) + desc_bytes(desc),
874 				   ctx->dir);
875 
876 	/*
877 	 * Job Descriptor and Shared Descriptors
878 	 * must all fit into the 64-word Descriptor h/w Buffer
879 	 */
880 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
881 		ctx->cdata.key_inline = true;
882 	} else {
883 		ctx->cdata.key_inline = false;
884 		ctx->cdata.key_dma = ctx->key_dma;
885 	}
886 
887 	flc = &ctx->flc[DECRYPT];
888 	desc = flc->sh_desc;
889 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
890 				  true);
891 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
892 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
893 				   sizeof(flc->flc) + desc_bytes(desc),
894 				   ctx->dir);
895 
896 	return 0;
897 }
898 
899 static int rfc4543_setauthsize(struct crypto_aead *authenc,
900 			       unsigned int authsize)
901 {
902 	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
903 
904 	ctx->authsize = authsize;
905 	rfc4543_set_sh_desc(authenc);
906 
907 	return 0;
908 }
909 
910 static int rfc4543_setkey(struct crypto_aead *aead,
911 			  const u8 *key, unsigned int keylen)
912 {
913 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
914 	struct device *dev = ctx->dev;
915 
916 	if (keylen < 4)
917 		return -EINVAL;
918 
919 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
920 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
921 
922 	memcpy(ctx->key, key, keylen);
923 	/*
924 	 * The last four bytes of the key material are used as the salt value
925 	 * in the nonce. Update the AES key length.
926 	 */
927 	ctx->cdata.keylen = keylen - 4;
928 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
929 				   ctx->dir);
930 
931 	return rfc4543_set_sh_desc(aead);
932 }
933 
934 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
935 			   unsigned int keylen)
936 {
937 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
938 	struct caam_skcipher_alg *alg =
939 		container_of(crypto_skcipher_alg(skcipher),
940 			     struct caam_skcipher_alg, skcipher);
941 	struct device *dev = ctx->dev;
942 	struct caam_flc *flc;
943 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
944 	u32 *desc;
945 	u32 ctx1_iv_off = 0;
946 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
947 			       OP_ALG_AAI_CTR_MOD128) &&
948 			       ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
949 			       OP_ALG_ALGSEL_CHACHA20);
950 	const bool is_rfc3686 = alg->caam.rfc3686;
951 
952 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
953 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
954 
955 	/*
956 	 * AES-CTR needs to load IV in CONTEXT1 reg
957 	 * at an offset of 128bits (16bytes)
958 	 * CONTEXT1[255:128] = IV
959 	 */
960 	if (ctr_mode)
961 		ctx1_iv_off = 16;
962 
963 	/*
964 	 * RFC3686 specific:
965 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
966 	 *	| *key = {KEY, NONCE}
967 	 */
968 	if (is_rfc3686) {
969 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
970 		keylen -= CTR_RFC3686_NONCE_SIZE;
971 	}
972 
973 	ctx->cdata.keylen = keylen;
974 	ctx->cdata.key_virt = key;
975 	ctx->cdata.key_inline = true;
976 
977 	/* skcipher_encrypt shared descriptor */
978 	flc = &ctx->flc[ENCRYPT];
979 	desc = flc->sh_desc;
980 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
981 				   ctx1_iv_off);
982 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
983 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
984 				   sizeof(flc->flc) + desc_bytes(desc),
985 				   ctx->dir);
986 
987 	/* skcipher_decrypt shared descriptor */
988 	flc = &ctx->flc[DECRYPT];
989 	desc = flc->sh_desc;
990 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
991 				   ctx1_iv_off);
992 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
993 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
994 				   sizeof(flc->flc) + desc_bytes(desc),
995 				   ctx->dir);
996 
997 	return 0;
998 }
999 
1000 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1001 				const u8 *key, unsigned int keylen)
1002 {
1003 	return unlikely(des3_verify_key(skcipher, key)) ?:
1004 	       skcipher_setkey(skcipher, key, keylen);
1005 }
1006 
1007 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1008 			       unsigned int keylen)
1009 {
1010 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1011 	struct device *dev = ctx->dev;
1012 	struct caam_flc *flc;
1013 	u32 *desc;
1014 
1015 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1016 		dev_err(dev, "key size mismatch\n");
1017 		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1018 		return -EINVAL;
1019 	}
1020 
1021 	ctx->cdata.keylen = keylen;
1022 	ctx->cdata.key_virt = key;
1023 	ctx->cdata.key_inline = true;
1024 
1025 	/* xts_skcipher_encrypt shared descriptor */
1026 	flc = &ctx->flc[ENCRYPT];
1027 	desc = flc->sh_desc;
1028 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1029 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1030 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1031 				   sizeof(flc->flc) + desc_bytes(desc),
1032 				   ctx->dir);
1033 
1034 	/* xts_skcipher_decrypt shared descriptor */
1035 	flc = &ctx->flc[DECRYPT];
1036 	desc = flc->sh_desc;
1037 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1038 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1039 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1040 				   sizeof(flc->flc) + desc_bytes(desc),
1041 				   ctx->dir);
1042 
1043 	return 0;
1044 }
1045 
1046 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1047 {
1048 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1049 	struct caam_request *req_ctx = skcipher_request_ctx(req);
1050 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1051 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1052 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1053 	struct device *dev = ctx->dev;
1054 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1055 		       GFP_KERNEL : GFP_ATOMIC;
1056 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1057 	struct skcipher_edesc *edesc;
1058 	dma_addr_t iv_dma;
1059 	u8 *iv;
1060 	int ivsize = crypto_skcipher_ivsize(skcipher);
1061 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1062 	struct dpaa2_sg_entry *sg_table;
1063 
1064 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1065 	if (unlikely(src_nents < 0)) {
1066 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1067 			req->cryptlen);
1068 		return ERR_PTR(src_nents);
1069 	}
1070 
1071 	if (unlikely(req->dst != req->src)) {
1072 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1073 		if (unlikely(dst_nents < 0)) {
1074 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1075 				req->cryptlen);
1076 			return ERR_PTR(dst_nents);
1077 		}
1078 
1079 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1080 					      DMA_TO_DEVICE);
1081 		if (unlikely(!mapped_src_nents)) {
1082 			dev_err(dev, "unable to map source\n");
1083 			return ERR_PTR(-ENOMEM);
1084 		}
1085 
1086 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1087 					      DMA_FROM_DEVICE);
1088 		if (unlikely(!mapped_dst_nents)) {
1089 			dev_err(dev, "unable to map destination\n");
1090 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1091 			return ERR_PTR(-ENOMEM);
1092 		}
1093 	} else {
1094 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1095 					      DMA_BIDIRECTIONAL);
1096 		if (unlikely(!mapped_src_nents)) {
1097 			dev_err(dev, "unable to map source\n");
1098 			return ERR_PTR(-ENOMEM);
1099 		}
1100 	}
1101 
1102 	qm_sg_ents = 1 + mapped_src_nents;
1103 	dst_sg_idx = qm_sg_ents;
1104 
1105 	/*
1106 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1107 	 * IV entries point to the same buffer
1108 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1109 	 *
1110 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1111 	 * the end of the table by allocating more S/G entries.
1112 	 */
1113 	if (req->src != req->dst)
1114 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1115 	else
1116 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1117 
1118 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1119 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1120 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1121 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1122 			qm_sg_ents, ivsize);
1123 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1124 			   0, DMA_NONE, 0, 0);
1125 		return ERR_PTR(-ENOMEM);
1126 	}
1127 
1128 	/* allocate space for base edesc, link tables and IV */
1129 	edesc = qi_cache_zalloc(GFP_DMA | flags);
1130 	if (unlikely(!edesc)) {
1131 		dev_err(dev, "could not allocate extended descriptor\n");
1132 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1133 			   0, DMA_NONE, 0, 0);
1134 		return ERR_PTR(-ENOMEM);
1135 	}
1136 
1137 	/* Make sure IV is located in a DMAable area */
1138 	sg_table = &edesc->sgt[0];
1139 	iv = (u8 *)(sg_table + qm_sg_ents);
1140 	memcpy(iv, req->iv, ivsize);
1141 
1142 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1143 	if (dma_mapping_error(dev, iv_dma)) {
1144 		dev_err(dev, "unable to map IV\n");
1145 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1146 			   0, DMA_NONE, 0, 0);
1147 		qi_cache_free(edesc);
1148 		return ERR_PTR(-ENOMEM);
1149 	}
1150 
1151 	edesc->src_nents = src_nents;
1152 	edesc->dst_nents = dst_nents;
1153 	edesc->iv_dma = iv_dma;
1154 	edesc->qm_sg_bytes = qm_sg_bytes;
1155 
1156 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1157 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1158 
1159 	if (req->src != req->dst)
1160 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1161 
1162 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1163 			 ivsize, 0);
1164 
1165 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1166 					  DMA_TO_DEVICE);
1167 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1168 		dev_err(dev, "unable to map S/G table\n");
1169 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1170 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1171 		qi_cache_free(edesc);
1172 		return ERR_PTR(-ENOMEM);
1173 	}
1174 
1175 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1176 	dpaa2_fl_set_final(in_fle, true);
1177 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1178 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1179 
1180 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1181 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1182 
1183 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1184 
1185 	if (req->src == req->dst)
1186 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1187 				  sizeof(*sg_table));
1188 	else
1189 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1190 				  sizeof(*sg_table));
1191 
1192 	return edesc;
1193 }
1194 
1195 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1196 		       struct aead_request *req)
1197 {
1198 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1199 	int ivsize = crypto_aead_ivsize(aead);
1200 
1201 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1202 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1203 		   edesc->qm_sg_bytes);
1204 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1205 }
1206 
1207 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1208 			   struct skcipher_request *req)
1209 {
1210 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1211 	int ivsize = crypto_skcipher_ivsize(skcipher);
1212 
1213 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1214 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1215 		   edesc->qm_sg_bytes);
1216 }
1217 
1218 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1219 {
1220 	struct crypto_async_request *areq = cbk_ctx;
1221 	struct aead_request *req = container_of(areq, struct aead_request,
1222 						base);
1223 	struct caam_request *req_ctx = to_caam_req(areq);
1224 	struct aead_edesc *edesc = req_ctx->edesc;
1225 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1226 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1227 	int ecode = 0;
1228 
1229 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1230 
1231 	if (unlikely(status)) {
1232 		caam_qi2_strstatus(ctx->dev, status);
1233 		ecode = -EIO;
1234 	}
1235 
1236 	aead_unmap(ctx->dev, edesc, req);
1237 	qi_cache_free(edesc);
1238 	aead_request_complete(req, ecode);
1239 }
1240 
1241 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1242 {
1243 	struct crypto_async_request *areq = cbk_ctx;
1244 	struct aead_request *req = container_of(areq, struct aead_request,
1245 						base);
1246 	struct caam_request *req_ctx = to_caam_req(areq);
1247 	struct aead_edesc *edesc = req_ctx->edesc;
1248 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1249 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1250 	int ecode = 0;
1251 
1252 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1253 
1254 	if (unlikely(status)) {
1255 		caam_qi2_strstatus(ctx->dev, status);
1256 		/*
1257 		 * verify hw auth check passed else return -EBADMSG
1258 		 */
1259 		if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1260 		     JRSTA_CCBERR_ERRID_ICVCHK)
1261 			ecode = -EBADMSG;
1262 		else
1263 			ecode = -EIO;
1264 	}
1265 
1266 	aead_unmap(ctx->dev, edesc, req);
1267 	qi_cache_free(edesc);
1268 	aead_request_complete(req, ecode);
1269 }
1270 
1271 static int aead_encrypt(struct aead_request *req)
1272 {
1273 	struct aead_edesc *edesc;
1274 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1275 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1276 	struct caam_request *caam_req = aead_request_ctx(req);
1277 	int ret;
1278 
1279 	/* allocate extended descriptor */
1280 	edesc = aead_edesc_alloc(req, true);
1281 	if (IS_ERR(edesc))
1282 		return PTR_ERR(edesc);
1283 
1284 	caam_req->flc = &ctx->flc[ENCRYPT];
1285 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1286 	caam_req->cbk = aead_encrypt_done;
1287 	caam_req->ctx = &req->base;
1288 	caam_req->edesc = edesc;
1289 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1290 	if (ret != -EINPROGRESS &&
1291 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1292 		aead_unmap(ctx->dev, edesc, req);
1293 		qi_cache_free(edesc);
1294 	}
1295 
1296 	return ret;
1297 }
1298 
1299 static int aead_decrypt(struct aead_request *req)
1300 {
1301 	struct aead_edesc *edesc;
1302 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1303 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1304 	struct caam_request *caam_req = aead_request_ctx(req);
1305 	int ret;
1306 
1307 	/* allocate extended descriptor */
1308 	edesc = aead_edesc_alloc(req, false);
1309 	if (IS_ERR(edesc))
1310 		return PTR_ERR(edesc);
1311 
1312 	caam_req->flc = &ctx->flc[DECRYPT];
1313 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1314 	caam_req->cbk = aead_decrypt_done;
1315 	caam_req->ctx = &req->base;
1316 	caam_req->edesc = edesc;
1317 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1318 	if (ret != -EINPROGRESS &&
1319 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1320 		aead_unmap(ctx->dev, edesc, req);
1321 		qi_cache_free(edesc);
1322 	}
1323 
1324 	return ret;
1325 }
1326 
1327 static int ipsec_gcm_encrypt(struct aead_request *req)
1328 {
1329 	if (req->assoclen < 8)
1330 		return -EINVAL;
1331 
1332 	return aead_encrypt(req);
1333 }
1334 
1335 static int ipsec_gcm_decrypt(struct aead_request *req)
1336 {
1337 	if (req->assoclen < 8)
1338 		return -EINVAL;
1339 
1340 	return aead_decrypt(req);
1341 }
1342 
1343 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1344 {
1345 	struct crypto_async_request *areq = cbk_ctx;
1346 	struct skcipher_request *req = skcipher_request_cast(areq);
1347 	struct caam_request *req_ctx = to_caam_req(areq);
1348 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1349 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1350 	struct skcipher_edesc *edesc = req_ctx->edesc;
1351 	int ecode = 0;
1352 	int ivsize = crypto_skcipher_ivsize(skcipher);
1353 
1354 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1355 
1356 	if (unlikely(status)) {
1357 		caam_qi2_strstatus(ctx->dev, status);
1358 		ecode = -EIO;
1359 	}
1360 
1361 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1362 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1363 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1364 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1365 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1366 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1367 
1368 	skcipher_unmap(ctx->dev, edesc, req);
1369 
1370 	/*
1371 	 * The crypto API expects us to set the IV (req->iv) to the last
1372 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1373 	 * This is used e.g. by the CTS mode.
1374 	 */
1375 	memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
1376 
1377 	qi_cache_free(edesc);
1378 	skcipher_request_complete(req, ecode);
1379 }
1380 
1381 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1382 {
1383 	struct crypto_async_request *areq = cbk_ctx;
1384 	struct skcipher_request *req = skcipher_request_cast(areq);
1385 	struct caam_request *req_ctx = to_caam_req(areq);
1386 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1387 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1388 	struct skcipher_edesc *edesc = req_ctx->edesc;
1389 	int ecode = 0;
1390 	int ivsize = crypto_skcipher_ivsize(skcipher);
1391 
1392 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1393 
1394 	if (unlikely(status)) {
1395 		caam_qi2_strstatus(ctx->dev, status);
1396 		ecode = -EIO;
1397 	}
1398 
1399 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1400 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1401 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1402 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1403 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1404 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1405 
1406 	skcipher_unmap(ctx->dev, edesc, req);
1407 
1408 	/*
1409 	 * The crypto API expects us to set the IV (req->iv) to the last
1410 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1411 	 * This is used e.g. by the CTS mode.
1412 	 */
1413 	memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
1414 
1415 	qi_cache_free(edesc);
1416 	skcipher_request_complete(req, ecode);
1417 }
1418 
1419 static int skcipher_encrypt(struct skcipher_request *req)
1420 {
1421 	struct skcipher_edesc *edesc;
1422 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1423 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1424 	struct caam_request *caam_req = skcipher_request_ctx(req);
1425 	int ret;
1426 
1427 	/* allocate extended descriptor */
1428 	edesc = skcipher_edesc_alloc(req);
1429 	if (IS_ERR(edesc))
1430 		return PTR_ERR(edesc);
1431 
1432 	caam_req->flc = &ctx->flc[ENCRYPT];
1433 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1434 	caam_req->cbk = skcipher_encrypt_done;
1435 	caam_req->ctx = &req->base;
1436 	caam_req->edesc = edesc;
1437 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1438 	if (ret != -EINPROGRESS &&
1439 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1440 		skcipher_unmap(ctx->dev, edesc, req);
1441 		qi_cache_free(edesc);
1442 	}
1443 
1444 	return ret;
1445 }
1446 
1447 static int skcipher_decrypt(struct skcipher_request *req)
1448 {
1449 	struct skcipher_edesc *edesc;
1450 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1451 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1452 	struct caam_request *caam_req = skcipher_request_ctx(req);
1453 	int ret;
1454 
1455 	/* allocate extended descriptor */
1456 	edesc = skcipher_edesc_alloc(req);
1457 	if (IS_ERR(edesc))
1458 		return PTR_ERR(edesc);
1459 
1460 	caam_req->flc = &ctx->flc[DECRYPT];
1461 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1462 	caam_req->cbk = skcipher_decrypt_done;
1463 	caam_req->ctx = &req->base;
1464 	caam_req->edesc = edesc;
1465 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1466 	if (ret != -EINPROGRESS &&
1467 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1468 		skcipher_unmap(ctx->dev, edesc, req);
1469 		qi_cache_free(edesc);
1470 	}
1471 
1472 	return ret;
1473 }
1474 
1475 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1476 			 bool uses_dkp)
1477 {
1478 	dma_addr_t dma_addr;
1479 	int i;
1480 
1481 	/* copy descriptor header template value */
1482 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1483 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1484 
1485 	ctx->dev = caam->dev;
1486 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1487 
1488 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1489 					offsetof(struct caam_ctx, flc_dma),
1490 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1491 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1492 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1493 		return -ENOMEM;
1494 	}
1495 
1496 	for (i = 0; i < NUM_OP; i++)
1497 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1498 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1499 
1500 	return 0;
1501 }
1502 
1503 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1504 {
1505 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1506 	struct caam_skcipher_alg *caam_alg =
1507 		container_of(alg, typeof(*caam_alg), skcipher);
1508 
1509 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1510 	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1511 }
1512 
1513 static int caam_cra_init_aead(struct crypto_aead *tfm)
1514 {
1515 	struct aead_alg *alg = crypto_aead_alg(tfm);
1516 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1517 						      aead);
1518 
1519 	crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1520 	return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1521 			     !caam_alg->caam.nodkp);
1522 }
1523 
1524 static void caam_exit_common(struct caam_ctx *ctx)
1525 {
1526 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1527 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1528 			       DMA_ATTR_SKIP_CPU_SYNC);
1529 }
1530 
1531 static void caam_cra_exit(struct crypto_skcipher *tfm)
1532 {
1533 	caam_exit_common(crypto_skcipher_ctx(tfm));
1534 }
1535 
1536 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1537 {
1538 	caam_exit_common(crypto_aead_ctx(tfm));
1539 }
1540 
1541 static struct caam_skcipher_alg driver_algs[] = {
1542 	{
1543 		.skcipher = {
1544 			.base = {
1545 				.cra_name = "cbc(aes)",
1546 				.cra_driver_name = "cbc-aes-caam-qi2",
1547 				.cra_blocksize = AES_BLOCK_SIZE,
1548 			},
1549 			.setkey = skcipher_setkey,
1550 			.encrypt = skcipher_encrypt,
1551 			.decrypt = skcipher_decrypt,
1552 			.min_keysize = AES_MIN_KEY_SIZE,
1553 			.max_keysize = AES_MAX_KEY_SIZE,
1554 			.ivsize = AES_BLOCK_SIZE,
1555 		},
1556 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1557 	},
1558 	{
1559 		.skcipher = {
1560 			.base = {
1561 				.cra_name = "cbc(des3_ede)",
1562 				.cra_driver_name = "cbc-3des-caam-qi2",
1563 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1564 			},
1565 			.setkey = des3_skcipher_setkey,
1566 			.encrypt = skcipher_encrypt,
1567 			.decrypt = skcipher_decrypt,
1568 			.min_keysize = DES3_EDE_KEY_SIZE,
1569 			.max_keysize = DES3_EDE_KEY_SIZE,
1570 			.ivsize = DES3_EDE_BLOCK_SIZE,
1571 		},
1572 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1573 	},
1574 	{
1575 		.skcipher = {
1576 			.base = {
1577 				.cra_name = "cbc(des)",
1578 				.cra_driver_name = "cbc-des-caam-qi2",
1579 				.cra_blocksize = DES_BLOCK_SIZE,
1580 			},
1581 			.setkey = skcipher_setkey,
1582 			.encrypt = skcipher_encrypt,
1583 			.decrypt = skcipher_decrypt,
1584 			.min_keysize = DES_KEY_SIZE,
1585 			.max_keysize = DES_KEY_SIZE,
1586 			.ivsize = DES_BLOCK_SIZE,
1587 		},
1588 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1589 	},
1590 	{
1591 		.skcipher = {
1592 			.base = {
1593 				.cra_name = "ctr(aes)",
1594 				.cra_driver_name = "ctr-aes-caam-qi2",
1595 				.cra_blocksize = 1,
1596 			},
1597 			.setkey = skcipher_setkey,
1598 			.encrypt = skcipher_encrypt,
1599 			.decrypt = skcipher_decrypt,
1600 			.min_keysize = AES_MIN_KEY_SIZE,
1601 			.max_keysize = AES_MAX_KEY_SIZE,
1602 			.ivsize = AES_BLOCK_SIZE,
1603 			.chunksize = AES_BLOCK_SIZE,
1604 		},
1605 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1606 					OP_ALG_AAI_CTR_MOD128,
1607 	},
1608 	{
1609 		.skcipher = {
1610 			.base = {
1611 				.cra_name = "rfc3686(ctr(aes))",
1612 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1613 				.cra_blocksize = 1,
1614 			},
1615 			.setkey = skcipher_setkey,
1616 			.encrypt = skcipher_encrypt,
1617 			.decrypt = skcipher_decrypt,
1618 			.min_keysize = AES_MIN_KEY_SIZE +
1619 				       CTR_RFC3686_NONCE_SIZE,
1620 			.max_keysize = AES_MAX_KEY_SIZE +
1621 				       CTR_RFC3686_NONCE_SIZE,
1622 			.ivsize = CTR_RFC3686_IV_SIZE,
1623 			.chunksize = AES_BLOCK_SIZE,
1624 		},
1625 		.caam = {
1626 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1627 					   OP_ALG_AAI_CTR_MOD128,
1628 			.rfc3686 = true,
1629 		},
1630 	},
1631 	{
1632 		.skcipher = {
1633 			.base = {
1634 				.cra_name = "xts(aes)",
1635 				.cra_driver_name = "xts-aes-caam-qi2",
1636 				.cra_blocksize = AES_BLOCK_SIZE,
1637 			},
1638 			.setkey = xts_skcipher_setkey,
1639 			.encrypt = skcipher_encrypt,
1640 			.decrypt = skcipher_decrypt,
1641 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1642 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1643 			.ivsize = AES_BLOCK_SIZE,
1644 		},
1645 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1646 	},
1647 	{
1648 		.skcipher = {
1649 			.base = {
1650 				.cra_name = "chacha20",
1651 				.cra_driver_name = "chacha20-caam-qi2",
1652 				.cra_blocksize = 1,
1653 			},
1654 			.setkey = skcipher_setkey,
1655 			.encrypt = skcipher_encrypt,
1656 			.decrypt = skcipher_decrypt,
1657 			.min_keysize = CHACHA_KEY_SIZE,
1658 			.max_keysize = CHACHA_KEY_SIZE,
1659 			.ivsize = CHACHA_IV_SIZE,
1660 		},
1661 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1662 	},
1663 };
1664 
1665 static struct caam_aead_alg driver_aeads[] = {
1666 	{
1667 		.aead = {
1668 			.base = {
1669 				.cra_name = "rfc4106(gcm(aes))",
1670 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1671 				.cra_blocksize = 1,
1672 			},
1673 			.setkey = rfc4106_setkey,
1674 			.setauthsize = rfc4106_setauthsize,
1675 			.encrypt = ipsec_gcm_encrypt,
1676 			.decrypt = ipsec_gcm_decrypt,
1677 			.ivsize = 8,
1678 			.maxauthsize = AES_BLOCK_SIZE,
1679 		},
1680 		.caam = {
1681 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1682 			.nodkp = true,
1683 		},
1684 	},
1685 	{
1686 		.aead = {
1687 			.base = {
1688 				.cra_name = "rfc4543(gcm(aes))",
1689 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1690 				.cra_blocksize = 1,
1691 			},
1692 			.setkey = rfc4543_setkey,
1693 			.setauthsize = rfc4543_setauthsize,
1694 			.encrypt = ipsec_gcm_encrypt,
1695 			.decrypt = ipsec_gcm_decrypt,
1696 			.ivsize = 8,
1697 			.maxauthsize = AES_BLOCK_SIZE,
1698 		},
1699 		.caam = {
1700 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1701 			.nodkp = true,
1702 		},
1703 	},
1704 	/* Galois Counter Mode */
1705 	{
1706 		.aead = {
1707 			.base = {
1708 				.cra_name = "gcm(aes)",
1709 				.cra_driver_name = "gcm-aes-caam-qi2",
1710 				.cra_blocksize = 1,
1711 			},
1712 			.setkey = gcm_setkey,
1713 			.setauthsize = gcm_setauthsize,
1714 			.encrypt = aead_encrypt,
1715 			.decrypt = aead_decrypt,
1716 			.ivsize = 12,
1717 			.maxauthsize = AES_BLOCK_SIZE,
1718 		},
1719 		.caam = {
1720 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1721 			.nodkp = true,
1722 		}
1723 	},
1724 	/* single-pass ipsec_esp descriptor */
1725 	{
1726 		.aead = {
1727 			.base = {
1728 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1729 				.cra_driver_name = "authenc-hmac-md5-"
1730 						   "cbc-aes-caam-qi2",
1731 				.cra_blocksize = AES_BLOCK_SIZE,
1732 			},
1733 			.setkey = aead_setkey,
1734 			.setauthsize = aead_setauthsize,
1735 			.encrypt = aead_encrypt,
1736 			.decrypt = aead_decrypt,
1737 			.ivsize = AES_BLOCK_SIZE,
1738 			.maxauthsize = MD5_DIGEST_SIZE,
1739 		},
1740 		.caam = {
1741 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1742 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1743 					   OP_ALG_AAI_HMAC_PRECOMP,
1744 		}
1745 	},
1746 	{
1747 		.aead = {
1748 			.base = {
1749 				.cra_name = "echainiv(authenc(hmac(md5),"
1750 					    "cbc(aes)))",
1751 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1752 						   "cbc-aes-caam-qi2",
1753 				.cra_blocksize = AES_BLOCK_SIZE,
1754 			},
1755 			.setkey = aead_setkey,
1756 			.setauthsize = aead_setauthsize,
1757 			.encrypt = aead_encrypt,
1758 			.decrypt = aead_decrypt,
1759 			.ivsize = AES_BLOCK_SIZE,
1760 			.maxauthsize = MD5_DIGEST_SIZE,
1761 		},
1762 		.caam = {
1763 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1764 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1765 					   OP_ALG_AAI_HMAC_PRECOMP,
1766 			.geniv = true,
1767 		}
1768 	},
1769 	{
1770 		.aead = {
1771 			.base = {
1772 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1773 				.cra_driver_name = "authenc-hmac-sha1-"
1774 						   "cbc-aes-caam-qi2",
1775 				.cra_blocksize = AES_BLOCK_SIZE,
1776 			},
1777 			.setkey = aead_setkey,
1778 			.setauthsize = aead_setauthsize,
1779 			.encrypt = aead_encrypt,
1780 			.decrypt = aead_decrypt,
1781 			.ivsize = AES_BLOCK_SIZE,
1782 			.maxauthsize = SHA1_DIGEST_SIZE,
1783 		},
1784 		.caam = {
1785 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1786 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1787 					   OP_ALG_AAI_HMAC_PRECOMP,
1788 		}
1789 	},
1790 	{
1791 		.aead = {
1792 			.base = {
1793 				.cra_name = "echainiv(authenc(hmac(sha1),"
1794 					    "cbc(aes)))",
1795 				.cra_driver_name = "echainiv-authenc-"
1796 						   "hmac-sha1-cbc-aes-caam-qi2",
1797 				.cra_blocksize = AES_BLOCK_SIZE,
1798 			},
1799 			.setkey = aead_setkey,
1800 			.setauthsize = aead_setauthsize,
1801 			.encrypt = aead_encrypt,
1802 			.decrypt = aead_decrypt,
1803 			.ivsize = AES_BLOCK_SIZE,
1804 			.maxauthsize = SHA1_DIGEST_SIZE,
1805 		},
1806 		.caam = {
1807 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1808 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1809 					   OP_ALG_AAI_HMAC_PRECOMP,
1810 			.geniv = true,
1811 		},
1812 	},
1813 	{
1814 		.aead = {
1815 			.base = {
1816 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1817 				.cra_driver_name = "authenc-hmac-sha224-"
1818 						   "cbc-aes-caam-qi2",
1819 				.cra_blocksize = AES_BLOCK_SIZE,
1820 			},
1821 			.setkey = aead_setkey,
1822 			.setauthsize = aead_setauthsize,
1823 			.encrypt = aead_encrypt,
1824 			.decrypt = aead_decrypt,
1825 			.ivsize = AES_BLOCK_SIZE,
1826 			.maxauthsize = SHA224_DIGEST_SIZE,
1827 		},
1828 		.caam = {
1829 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1830 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1831 					   OP_ALG_AAI_HMAC_PRECOMP,
1832 		}
1833 	},
1834 	{
1835 		.aead = {
1836 			.base = {
1837 				.cra_name = "echainiv(authenc(hmac(sha224),"
1838 					    "cbc(aes)))",
1839 				.cra_driver_name = "echainiv-authenc-"
1840 						   "hmac-sha224-cbc-aes-caam-qi2",
1841 				.cra_blocksize = AES_BLOCK_SIZE,
1842 			},
1843 			.setkey = aead_setkey,
1844 			.setauthsize = aead_setauthsize,
1845 			.encrypt = aead_encrypt,
1846 			.decrypt = aead_decrypt,
1847 			.ivsize = AES_BLOCK_SIZE,
1848 			.maxauthsize = SHA224_DIGEST_SIZE,
1849 		},
1850 		.caam = {
1851 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1852 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1853 					   OP_ALG_AAI_HMAC_PRECOMP,
1854 			.geniv = true,
1855 		}
1856 	},
1857 	{
1858 		.aead = {
1859 			.base = {
1860 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1861 				.cra_driver_name = "authenc-hmac-sha256-"
1862 						   "cbc-aes-caam-qi2",
1863 				.cra_blocksize = AES_BLOCK_SIZE,
1864 			},
1865 			.setkey = aead_setkey,
1866 			.setauthsize = aead_setauthsize,
1867 			.encrypt = aead_encrypt,
1868 			.decrypt = aead_decrypt,
1869 			.ivsize = AES_BLOCK_SIZE,
1870 			.maxauthsize = SHA256_DIGEST_SIZE,
1871 		},
1872 		.caam = {
1873 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1874 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1875 					   OP_ALG_AAI_HMAC_PRECOMP,
1876 		}
1877 	},
1878 	{
1879 		.aead = {
1880 			.base = {
1881 				.cra_name = "echainiv(authenc(hmac(sha256),"
1882 					    "cbc(aes)))",
1883 				.cra_driver_name = "echainiv-authenc-"
1884 						   "hmac-sha256-cbc-aes-"
1885 						   "caam-qi2",
1886 				.cra_blocksize = AES_BLOCK_SIZE,
1887 			},
1888 			.setkey = aead_setkey,
1889 			.setauthsize = aead_setauthsize,
1890 			.encrypt = aead_encrypt,
1891 			.decrypt = aead_decrypt,
1892 			.ivsize = AES_BLOCK_SIZE,
1893 			.maxauthsize = SHA256_DIGEST_SIZE,
1894 		},
1895 		.caam = {
1896 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1897 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1898 					   OP_ALG_AAI_HMAC_PRECOMP,
1899 			.geniv = true,
1900 		}
1901 	},
1902 	{
1903 		.aead = {
1904 			.base = {
1905 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
1906 				.cra_driver_name = "authenc-hmac-sha384-"
1907 						   "cbc-aes-caam-qi2",
1908 				.cra_blocksize = AES_BLOCK_SIZE,
1909 			},
1910 			.setkey = aead_setkey,
1911 			.setauthsize = aead_setauthsize,
1912 			.encrypt = aead_encrypt,
1913 			.decrypt = aead_decrypt,
1914 			.ivsize = AES_BLOCK_SIZE,
1915 			.maxauthsize = SHA384_DIGEST_SIZE,
1916 		},
1917 		.caam = {
1918 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1919 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1920 					   OP_ALG_AAI_HMAC_PRECOMP,
1921 		}
1922 	},
1923 	{
1924 		.aead = {
1925 			.base = {
1926 				.cra_name = "echainiv(authenc(hmac(sha384),"
1927 					    "cbc(aes)))",
1928 				.cra_driver_name = "echainiv-authenc-"
1929 						   "hmac-sha384-cbc-aes-"
1930 						   "caam-qi2",
1931 				.cra_blocksize = AES_BLOCK_SIZE,
1932 			},
1933 			.setkey = aead_setkey,
1934 			.setauthsize = aead_setauthsize,
1935 			.encrypt = aead_encrypt,
1936 			.decrypt = aead_decrypt,
1937 			.ivsize = AES_BLOCK_SIZE,
1938 			.maxauthsize = SHA384_DIGEST_SIZE,
1939 		},
1940 		.caam = {
1941 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1942 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1943 					   OP_ALG_AAI_HMAC_PRECOMP,
1944 			.geniv = true,
1945 		}
1946 	},
1947 	{
1948 		.aead = {
1949 			.base = {
1950 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
1951 				.cra_driver_name = "authenc-hmac-sha512-"
1952 						   "cbc-aes-caam-qi2",
1953 				.cra_blocksize = AES_BLOCK_SIZE,
1954 			},
1955 			.setkey = aead_setkey,
1956 			.setauthsize = aead_setauthsize,
1957 			.encrypt = aead_encrypt,
1958 			.decrypt = aead_decrypt,
1959 			.ivsize = AES_BLOCK_SIZE,
1960 			.maxauthsize = SHA512_DIGEST_SIZE,
1961 		},
1962 		.caam = {
1963 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1964 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1965 					   OP_ALG_AAI_HMAC_PRECOMP,
1966 		}
1967 	},
1968 	{
1969 		.aead = {
1970 			.base = {
1971 				.cra_name = "echainiv(authenc(hmac(sha512),"
1972 					    "cbc(aes)))",
1973 				.cra_driver_name = "echainiv-authenc-"
1974 						   "hmac-sha512-cbc-aes-"
1975 						   "caam-qi2",
1976 				.cra_blocksize = AES_BLOCK_SIZE,
1977 			},
1978 			.setkey = aead_setkey,
1979 			.setauthsize = aead_setauthsize,
1980 			.encrypt = aead_encrypt,
1981 			.decrypt = aead_decrypt,
1982 			.ivsize = AES_BLOCK_SIZE,
1983 			.maxauthsize = SHA512_DIGEST_SIZE,
1984 		},
1985 		.caam = {
1986 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1987 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1988 					   OP_ALG_AAI_HMAC_PRECOMP,
1989 			.geniv = true,
1990 		}
1991 	},
1992 	{
1993 		.aead = {
1994 			.base = {
1995 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1996 				.cra_driver_name = "authenc-hmac-md5-"
1997 						   "cbc-des3_ede-caam-qi2",
1998 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1999 			},
2000 			.setkey = des3_aead_setkey,
2001 			.setauthsize = aead_setauthsize,
2002 			.encrypt = aead_encrypt,
2003 			.decrypt = aead_decrypt,
2004 			.ivsize = DES3_EDE_BLOCK_SIZE,
2005 			.maxauthsize = MD5_DIGEST_SIZE,
2006 		},
2007 		.caam = {
2008 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2009 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2010 					   OP_ALG_AAI_HMAC_PRECOMP,
2011 		}
2012 	},
2013 	{
2014 		.aead = {
2015 			.base = {
2016 				.cra_name = "echainiv(authenc(hmac(md5),"
2017 					    "cbc(des3_ede)))",
2018 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2019 						   "cbc-des3_ede-caam-qi2",
2020 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2021 			},
2022 			.setkey = des3_aead_setkey,
2023 			.setauthsize = aead_setauthsize,
2024 			.encrypt = aead_encrypt,
2025 			.decrypt = aead_decrypt,
2026 			.ivsize = DES3_EDE_BLOCK_SIZE,
2027 			.maxauthsize = MD5_DIGEST_SIZE,
2028 		},
2029 		.caam = {
2030 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2031 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2032 					   OP_ALG_AAI_HMAC_PRECOMP,
2033 			.geniv = true,
2034 		}
2035 	},
2036 	{
2037 		.aead = {
2038 			.base = {
2039 				.cra_name = "authenc(hmac(sha1),"
2040 					    "cbc(des3_ede))",
2041 				.cra_driver_name = "authenc-hmac-sha1-"
2042 						   "cbc-des3_ede-caam-qi2",
2043 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2044 			},
2045 			.setkey = des3_aead_setkey,
2046 			.setauthsize = aead_setauthsize,
2047 			.encrypt = aead_encrypt,
2048 			.decrypt = aead_decrypt,
2049 			.ivsize = DES3_EDE_BLOCK_SIZE,
2050 			.maxauthsize = SHA1_DIGEST_SIZE,
2051 		},
2052 		.caam = {
2053 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2054 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2055 					   OP_ALG_AAI_HMAC_PRECOMP,
2056 		},
2057 	},
2058 	{
2059 		.aead = {
2060 			.base = {
2061 				.cra_name = "echainiv(authenc(hmac(sha1),"
2062 					    "cbc(des3_ede)))",
2063 				.cra_driver_name = "echainiv-authenc-"
2064 						   "hmac-sha1-"
2065 						   "cbc-des3_ede-caam-qi2",
2066 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2067 			},
2068 			.setkey = des3_aead_setkey,
2069 			.setauthsize = aead_setauthsize,
2070 			.encrypt = aead_encrypt,
2071 			.decrypt = aead_decrypt,
2072 			.ivsize = DES3_EDE_BLOCK_SIZE,
2073 			.maxauthsize = SHA1_DIGEST_SIZE,
2074 		},
2075 		.caam = {
2076 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2077 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2078 					   OP_ALG_AAI_HMAC_PRECOMP,
2079 			.geniv = true,
2080 		}
2081 	},
2082 	{
2083 		.aead = {
2084 			.base = {
2085 				.cra_name = "authenc(hmac(sha224),"
2086 					    "cbc(des3_ede))",
2087 				.cra_driver_name = "authenc-hmac-sha224-"
2088 						   "cbc-des3_ede-caam-qi2",
2089 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2090 			},
2091 			.setkey = des3_aead_setkey,
2092 			.setauthsize = aead_setauthsize,
2093 			.encrypt = aead_encrypt,
2094 			.decrypt = aead_decrypt,
2095 			.ivsize = DES3_EDE_BLOCK_SIZE,
2096 			.maxauthsize = SHA224_DIGEST_SIZE,
2097 		},
2098 		.caam = {
2099 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2100 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2101 					   OP_ALG_AAI_HMAC_PRECOMP,
2102 		},
2103 	},
2104 	{
2105 		.aead = {
2106 			.base = {
2107 				.cra_name = "echainiv(authenc(hmac(sha224),"
2108 					    "cbc(des3_ede)))",
2109 				.cra_driver_name = "echainiv-authenc-"
2110 						   "hmac-sha224-"
2111 						   "cbc-des3_ede-caam-qi2",
2112 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2113 			},
2114 			.setkey = des3_aead_setkey,
2115 			.setauthsize = aead_setauthsize,
2116 			.encrypt = aead_encrypt,
2117 			.decrypt = aead_decrypt,
2118 			.ivsize = DES3_EDE_BLOCK_SIZE,
2119 			.maxauthsize = SHA224_DIGEST_SIZE,
2120 		},
2121 		.caam = {
2122 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2123 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2124 					   OP_ALG_AAI_HMAC_PRECOMP,
2125 			.geniv = true,
2126 		}
2127 	},
2128 	{
2129 		.aead = {
2130 			.base = {
2131 				.cra_name = "authenc(hmac(sha256),"
2132 					    "cbc(des3_ede))",
2133 				.cra_driver_name = "authenc-hmac-sha256-"
2134 						   "cbc-des3_ede-caam-qi2",
2135 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2136 			},
2137 			.setkey = des3_aead_setkey,
2138 			.setauthsize = aead_setauthsize,
2139 			.encrypt = aead_encrypt,
2140 			.decrypt = aead_decrypt,
2141 			.ivsize = DES3_EDE_BLOCK_SIZE,
2142 			.maxauthsize = SHA256_DIGEST_SIZE,
2143 		},
2144 		.caam = {
2145 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2146 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2147 					   OP_ALG_AAI_HMAC_PRECOMP,
2148 		},
2149 	},
2150 	{
2151 		.aead = {
2152 			.base = {
2153 				.cra_name = "echainiv(authenc(hmac(sha256),"
2154 					    "cbc(des3_ede)))",
2155 				.cra_driver_name = "echainiv-authenc-"
2156 						   "hmac-sha256-"
2157 						   "cbc-des3_ede-caam-qi2",
2158 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2159 			},
2160 			.setkey = des3_aead_setkey,
2161 			.setauthsize = aead_setauthsize,
2162 			.encrypt = aead_encrypt,
2163 			.decrypt = aead_decrypt,
2164 			.ivsize = DES3_EDE_BLOCK_SIZE,
2165 			.maxauthsize = SHA256_DIGEST_SIZE,
2166 		},
2167 		.caam = {
2168 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2169 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2170 					   OP_ALG_AAI_HMAC_PRECOMP,
2171 			.geniv = true,
2172 		}
2173 	},
2174 	{
2175 		.aead = {
2176 			.base = {
2177 				.cra_name = "authenc(hmac(sha384),"
2178 					    "cbc(des3_ede))",
2179 				.cra_driver_name = "authenc-hmac-sha384-"
2180 						   "cbc-des3_ede-caam-qi2",
2181 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2182 			},
2183 			.setkey = des3_aead_setkey,
2184 			.setauthsize = aead_setauthsize,
2185 			.encrypt = aead_encrypt,
2186 			.decrypt = aead_decrypt,
2187 			.ivsize = DES3_EDE_BLOCK_SIZE,
2188 			.maxauthsize = SHA384_DIGEST_SIZE,
2189 		},
2190 		.caam = {
2191 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2192 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2193 					   OP_ALG_AAI_HMAC_PRECOMP,
2194 		},
2195 	},
2196 	{
2197 		.aead = {
2198 			.base = {
2199 				.cra_name = "echainiv(authenc(hmac(sha384),"
2200 					    "cbc(des3_ede)))",
2201 				.cra_driver_name = "echainiv-authenc-"
2202 						   "hmac-sha384-"
2203 						   "cbc-des3_ede-caam-qi2",
2204 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2205 			},
2206 			.setkey = des3_aead_setkey,
2207 			.setauthsize = aead_setauthsize,
2208 			.encrypt = aead_encrypt,
2209 			.decrypt = aead_decrypt,
2210 			.ivsize = DES3_EDE_BLOCK_SIZE,
2211 			.maxauthsize = SHA384_DIGEST_SIZE,
2212 		},
2213 		.caam = {
2214 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2215 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2216 					   OP_ALG_AAI_HMAC_PRECOMP,
2217 			.geniv = true,
2218 		}
2219 	},
2220 	{
2221 		.aead = {
2222 			.base = {
2223 				.cra_name = "authenc(hmac(sha512),"
2224 					    "cbc(des3_ede))",
2225 				.cra_driver_name = "authenc-hmac-sha512-"
2226 						   "cbc-des3_ede-caam-qi2",
2227 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2228 			},
2229 			.setkey = des3_aead_setkey,
2230 			.setauthsize = aead_setauthsize,
2231 			.encrypt = aead_encrypt,
2232 			.decrypt = aead_decrypt,
2233 			.ivsize = DES3_EDE_BLOCK_SIZE,
2234 			.maxauthsize = SHA512_DIGEST_SIZE,
2235 		},
2236 		.caam = {
2237 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2238 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2239 					   OP_ALG_AAI_HMAC_PRECOMP,
2240 		},
2241 	},
2242 	{
2243 		.aead = {
2244 			.base = {
2245 				.cra_name = "echainiv(authenc(hmac(sha512),"
2246 					    "cbc(des3_ede)))",
2247 				.cra_driver_name = "echainiv-authenc-"
2248 						   "hmac-sha512-"
2249 						   "cbc-des3_ede-caam-qi2",
2250 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2251 			},
2252 			.setkey = des3_aead_setkey,
2253 			.setauthsize = aead_setauthsize,
2254 			.encrypt = aead_encrypt,
2255 			.decrypt = aead_decrypt,
2256 			.ivsize = DES3_EDE_BLOCK_SIZE,
2257 			.maxauthsize = SHA512_DIGEST_SIZE,
2258 		},
2259 		.caam = {
2260 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2261 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2262 					   OP_ALG_AAI_HMAC_PRECOMP,
2263 			.geniv = true,
2264 		}
2265 	},
2266 	{
2267 		.aead = {
2268 			.base = {
2269 				.cra_name = "authenc(hmac(md5),cbc(des))",
2270 				.cra_driver_name = "authenc-hmac-md5-"
2271 						   "cbc-des-caam-qi2",
2272 				.cra_blocksize = DES_BLOCK_SIZE,
2273 			},
2274 			.setkey = aead_setkey,
2275 			.setauthsize = aead_setauthsize,
2276 			.encrypt = aead_encrypt,
2277 			.decrypt = aead_decrypt,
2278 			.ivsize = DES_BLOCK_SIZE,
2279 			.maxauthsize = MD5_DIGEST_SIZE,
2280 		},
2281 		.caam = {
2282 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2283 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2284 					   OP_ALG_AAI_HMAC_PRECOMP,
2285 		},
2286 	},
2287 	{
2288 		.aead = {
2289 			.base = {
2290 				.cra_name = "echainiv(authenc(hmac(md5),"
2291 					    "cbc(des)))",
2292 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2293 						   "cbc-des-caam-qi2",
2294 				.cra_blocksize = DES_BLOCK_SIZE,
2295 			},
2296 			.setkey = aead_setkey,
2297 			.setauthsize = aead_setauthsize,
2298 			.encrypt = aead_encrypt,
2299 			.decrypt = aead_decrypt,
2300 			.ivsize = DES_BLOCK_SIZE,
2301 			.maxauthsize = MD5_DIGEST_SIZE,
2302 		},
2303 		.caam = {
2304 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2305 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2306 					   OP_ALG_AAI_HMAC_PRECOMP,
2307 			.geniv = true,
2308 		}
2309 	},
2310 	{
2311 		.aead = {
2312 			.base = {
2313 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2314 				.cra_driver_name = "authenc-hmac-sha1-"
2315 						   "cbc-des-caam-qi2",
2316 				.cra_blocksize = DES_BLOCK_SIZE,
2317 			},
2318 			.setkey = aead_setkey,
2319 			.setauthsize = aead_setauthsize,
2320 			.encrypt = aead_encrypt,
2321 			.decrypt = aead_decrypt,
2322 			.ivsize = DES_BLOCK_SIZE,
2323 			.maxauthsize = SHA1_DIGEST_SIZE,
2324 		},
2325 		.caam = {
2326 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2327 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2328 					   OP_ALG_AAI_HMAC_PRECOMP,
2329 		},
2330 	},
2331 	{
2332 		.aead = {
2333 			.base = {
2334 				.cra_name = "echainiv(authenc(hmac(sha1),"
2335 					    "cbc(des)))",
2336 				.cra_driver_name = "echainiv-authenc-"
2337 						   "hmac-sha1-cbc-des-caam-qi2",
2338 				.cra_blocksize = DES_BLOCK_SIZE,
2339 			},
2340 			.setkey = aead_setkey,
2341 			.setauthsize = aead_setauthsize,
2342 			.encrypt = aead_encrypt,
2343 			.decrypt = aead_decrypt,
2344 			.ivsize = DES_BLOCK_SIZE,
2345 			.maxauthsize = SHA1_DIGEST_SIZE,
2346 		},
2347 		.caam = {
2348 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2349 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2350 					   OP_ALG_AAI_HMAC_PRECOMP,
2351 			.geniv = true,
2352 		}
2353 	},
2354 	{
2355 		.aead = {
2356 			.base = {
2357 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2358 				.cra_driver_name = "authenc-hmac-sha224-"
2359 						   "cbc-des-caam-qi2",
2360 				.cra_blocksize = DES_BLOCK_SIZE,
2361 			},
2362 			.setkey = aead_setkey,
2363 			.setauthsize = aead_setauthsize,
2364 			.encrypt = aead_encrypt,
2365 			.decrypt = aead_decrypt,
2366 			.ivsize = DES_BLOCK_SIZE,
2367 			.maxauthsize = SHA224_DIGEST_SIZE,
2368 		},
2369 		.caam = {
2370 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2371 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2372 					   OP_ALG_AAI_HMAC_PRECOMP,
2373 		},
2374 	},
2375 	{
2376 		.aead = {
2377 			.base = {
2378 				.cra_name = "echainiv(authenc(hmac(sha224),"
2379 					    "cbc(des)))",
2380 				.cra_driver_name = "echainiv-authenc-"
2381 						   "hmac-sha224-cbc-des-"
2382 						   "caam-qi2",
2383 				.cra_blocksize = DES_BLOCK_SIZE,
2384 			},
2385 			.setkey = aead_setkey,
2386 			.setauthsize = aead_setauthsize,
2387 			.encrypt = aead_encrypt,
2388 			.decrypt = aead_decrypt,
2389 			.ivsize = DES_BLOCK_SIZE,
2390 			.maxauthsize = SHA224_DIGEST_SIZE,
2391 		},
2392 		.caam = {
2393 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2394 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2395 					   OP_ALG_AAI_HMAC_PRECOMP,
2396 			.geniv = true,
2397 		}
2398 	},
2399 	{
2400 		.aead = {
2401 			.base = {
2402 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2403 				.cra_driver_name = "authenc-hmac-sha256-"
2404 						   "cbc-des-caam-qi2",
2405 				.cra_blocksize = DES_BLOCK_SIZE,
2406 			},
2407 			.setkey = aead_setkey,
2408 			.setauthsize = aead_setauthsize,
2409 			.encrypt = aead_encrypt,
2410 			.decrypt = aead_decrypt,
2411 			.ivsize = DES_BLOCK_SIZE,
2412 			.maxauthsize = SHA256_DIGEST_SIZE,
2413 		},
2414 		.caam = {
2415 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2416 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2417 					   OP_ALG_AAI_HMAC_PRECOMP,
2418 		},
2419 	},
2420 	{
2421 		.aead = {
2422 			.base = {
2423 				.cra_name = "echainiv(authenc(hmac(sha256),"
2424 					    "cbc(des)))",
2425 				.cra_driver_name = "echainiv-authenc-"
2426 						   "hmac-sha256-cbc-desi-"
2427 						   "caam-qi2",
2428 				.cra_blocksize = DES_BLOCK_SIZE,
2429 			},
2430 			.setkey = aead_setkey,
2431 			.setauthsize = aead_setauthsize,
2432 			.encrypt = aead_encrypt,
2433 			.decrypt = aead_decrypt,
2434 			.ivsize = DES_BLOCK_SIZE,
2435 			.maxauthsize = SHA256_DIGEST_SIZE,
2436 		},
2437 		.caam = {
2438 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2439 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2440 					   OP_ALG_AAI_HMAC_PRECOMP,
2441 			.geniv = true,
2442 		},
2443 	},
2444 	{
2445 		.aead = {
2446 			.base = {
2447 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2448 				.cra_driver_name = "authenc-hmac-sha384-"
2449 						   "cbc-des-caam-qi2",
2450 				.cra_blocksize = DES_BLOCK_SIZE,
2451 			},
2452 			.setkey = aead_setkey,
2453 			.setauthsize = aead_setauthsize,
2454 			.encrypt = aead_encrypt,
2455 			.decrypt = aead_decrypt,
2456 			.ivsize = DES_BLOCK_SIZE,
2457 			.maxauthsize = SHA384_DIGEST_SIZE,
2458 		},
2459 		.caam = {
2460 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2461 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2462 					   OP_ALG_AAI_HMAC_PRECOMP,
2463 		},
2464 	},
2465 	{
2466 		.aead = {
2467 			.base = {
2468 				.cra_name = "echainiv(authenc(hmac(sha384),"
2469 					    "cbc(des)))",
2470 				.cra_driver_name = "echainiv-authenc-"
2471 						   "hmac-sha384-cbc-des-"
2472 						   "caam-qi2",
2473 				.cra_blocksize = DES_BLOCK_SIZE,
2474 			},
2475 			.setkey = aead_setkey,
2476 			.setauthsize = aead_setauthsize,
2477 			.encrypt = aead_encrypt,
2478 			.decrypt = aead_decrypt,
2479 			.ivsize = DES_BLOCK_SIZE,
2480 			.maxauthsize = SHA384_DIGEST_SIZE,
2481 		},
2482 		.caam = {
2483 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2484 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2485 					   OP_ALG_AAI_HMAC_PRECOMP,
2486 			.geniv = true,
2487 		}
2488 	},
2489 	{
2490 		.aead = {
2491 			.base = {
2492 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2493 				.cra_driver_name = "authenc-hmac-sha512-"
2494 						   "cbc-des-caam-qi2",
2495 				.cra_blocksize = DES_BLOCK_SIZE,
2496 			},
2497 			.setkey = aead_setkey,
2498 			.setauthsize = aead_setauthsize,
2499 			.encrypt = aead_encrypt,
2500 			.decrypt = aead_decrypt,
2501 			.ivsize = DES_BLOCK_SIZE,
2502 			.maxauthsize = SHA512_DIGEST_SIZE,
2503 		},
2504 		.caam = {
2505 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2506 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2507 					   OP_ALG_AAI_HMAC_PRECOMP,
2508 		}
2509 	},
2510 	{
2511 		.aead = {
2512 			.base = {
2513 				.cra_name = "echainiv(authenc(hmac(sha512),"
2514 					    "cbc(des)))",
2515 				.cra_driver_name = "echainiv-authenc-"
2516 						   "hmac-sha512-cbc-des-"
2517 						   "caam-qi2",
2518 				.cra_blocksize = DES_BLOCK_SIZE,
2519 			},
2520 			.setkey = aead_setkey,
2521 			.setauthsize = aead_setauthsize,
2522 			.encrypt = aead_encrypt,
2523 			.decrypt = aead_decrypt,
2524 			.ivsize = DES_BLOCK_SIZE,
2525 			.maxauthsize = SHA512_DIGEST_SIZE,
2526 		},
2527 		.caam = {
2528 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2529 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2530 					   OP_ALG_AAI_HMAC_PRECOMP,
2531 			.geniv = true,
2532 		}
2533 	},
2534 	{
2535 		.aead = {
2536 			.base = {
2537 				.cra_name = "authenc(hmac(md5),"
2538 					    "rfc3686(ctr(aes)))",
2539 				.cra_driver_name = "authenc-hmac-md5-"
2540 						   "rfc3686-ctr-aes-caam-qi2",
2541 				.cra_blocksize = 1,
2542 			},
2543 			.setkey = aead_setkey,
2544 			.setauthsize = aead_setauthsize,
2545 			.encrypt = aead_encrypt,
2546 			.decrypt = aead_decrypt,
2547 			.ivsize = CTR_RFC3686_IV_SIZE,
2548 			.maxauthsize = MD5_DIGEST_SIZE,
2549 		},
2550 		.caam = {
2551 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2552 					   OP_ALG_AAI_CTR_MOD128,
2553 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2554 					   OP_ALG_AAI_HMAC_PRECOMP,
2555 			.rfc3686 = true,
2556 		},
2557 	},
2558 	{
2559 		.aead = {
2560 			.base = {
2561 				.cra_name = "seqiv(authenc("
2562 					    "hmac(md5),rfc3686(ctr(aes))))",
2563 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2564 						   "rfc3686-ctr-aes-caam-qi2",
2565 				.cra_blocksize = 1,
2566 			},
2567 			.setkey = aead_setkey,
2568 			.setauthsize = aead_setauthsize,
2569 			.encrypt = aead_encrypt,
2570 			.decrypt = aead_decrypt,
2571 			.ivsize = CTR_RFC3686_IV_SIZE,
2572 			.maxauthsize = MD5_DIGEST_SIZE,
2573 		},
2574 		.caam = {
2575 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2576 					   OP_ALG_AAI_CTR_MOD128,
2577 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2578 					   OP_ALG_AAI_HMAC_PRECOMP,
2579 			.rfc3686 = true,
2580 			.geniv = true,
2581 		},
2582 	},
2583 	{
2584 		.aead = {
2585 			.base = {
2586 				.cra_name = "authenc(hmac(sha1),"
2587 					    "rfc3686(ctr(aes)))",
2588 				.cra_driver_name = "authenc-hmac-sha1-"
2589 						   "rfc3686-ctr-aes-caam-qi2",
2590 				.cra_blocksize = 1,
2591 			},
2592 			.setkey = aead_setkey,
2593 			.setauthsize = aead_setauthsize,
2594 			.encrypt = aead_encrypt,
2595 			.decrypt = aead_decrypt,
2596 			.ivsize = CTR_RFC3686_IV_SIZE,
2597 			.maxauthsize = SHA1_DIGEST_SIZE,
2598 		},
2599 		.caam = {
2600 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2601 					   OP_ALG_AAI_CTR_MOD128,
2602 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2603 					   OP_ALG_AAI_HMAC_PRECOMP,
2604 			.rfc3686 = true,
2605 		},
2606 	},
2607 	{
2608 		.aead = {
2609 			.base = {
2610 				.cra_name = "seqiv(authenc("
2611 					    "hmac(sha1),rfc3686(ctr(aes))))",
2612 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2613 						   "rfc3686-ctr-aes-caam-qi2",
2614 				.cra_blocksize = 1,
2615 			},
2616 			.setkey = aead_setkey,
2617 			.setauthsize = aead_setauthsize,
2618 			.encrypt = aead_encrypt,
2619 			.decrypt = aead_decrypt,
2620 			.ivsize = CTR_RFC3686_IV_SIZE,
2621 			.maxauthsize = SHA1_DIGEST_SIZE,
2622 		},
2623 		.caam = {
2624 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2625 					   OP_ALG_AAI_CTR_MOD128,
2626 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2627 					   OP_ALG_AAI_HMAC_PRECOMP,
2628 			.rfc3686 = true,
2629 			.geniv = true,
2630 		},
2631 	},
2632 	{
2633 		.aead = {
2634 			.base = {
2635 				.cra_name = "authenc(hmac(sha224),"
2636 					    "rfc3686(ctr(aes)))",
2637 				.cra_driver_name = "authenc-hmac-sha224-"
2638 						   "rfc3686-ctr-aes-caam-qi2",
2639 				.cra_blocksize = 1,
2640 			},
2641 			.setkey = aead_setkey,
2642 			.setauthsize = aead_setauthsize,
2643 			.encrypt = aead_encrypt,
2644 			.decrypt = aead_decrypt,
2645 			.ivsize = CTR_RFC3686_IV_SIZE,
2646 			.maxauthsize = SHA224_DIGEST_SIZE,
2647 		},
2648 		.caam = {
2649 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2650 					   OP_ALG_AAI_CTR_MOD128,
2651 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2652 					   OP_ALG_AAI_HMAC_PRECOMP,
2653 			.rfc3686 = true,
2654 		},
2655 	},
2656 	{
2657 		.aead = {
2658 			.base = {
2659 				.cra_name = "seqiv(authenc("
2660 					    "hmac(sha224),rfc3686(ctr(aes))))",
2661 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2662 						   "rfc3686-ctr-aes-caam-qi2",
2663 				.cra_blocksize = 1,
2664 			},
2665 			.setkey = aead_setkey,
2666 			.setauthsize = aead_setauthsize,
2667 			.encrypt = aead_encrypt,
2668 			.decrypt = aead_decrypt,
2669 			.ivsize = CTR_RFC3686_IV_SIZE,
2670 			.maxauthsize = SHA224_DIGEST_SIZE,
2671 		},
2672 		.caam = {
2673 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2674 					   OP_ALG_AAI_CTR_MOD128,
2675 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2676 					   OP_ALG_AAI_HMAC_PRECOMP,
2677 			.rfc3686 = true,
2678 			.geniv = true,
2679 		},
2680 	},
2681 	{
2682 		.aead = {
2683 			.base = {
2684 				.cra_name = "authenc(hmac(sha256),"
2685 					    "rfc3686(ctr(aes)))",
2686 				.cra_driver_name = "authenc-hmac-sha256-"
2687 						   "rfc3686-ctr-aes-caam-qi2",
2688 				.cra_blocksize = 1,
2689 			},
2690 			.setkey = aead_setkey,
2691 			.setauthsize = aead_setauthsize,
2692 			.encrypt = aead_encrypt,
2693 			.decrypt = aead_decrypt,
2694 			.ivsize = CTR_RFC3686_IV_SIZE,
2695 			.maxauthsize = SHA256_DIGEST_SIZE,
2696 		},
2697 		.caam = {
2698 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2699 					   OP_ALG_AAI_CTR_MOD128,
2700 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2701 					   OP_ALG_AAI_HMAC_PRECOMP,
2702 			.rfc3686 = true,
2703 		},
2704 	},
2705 	{
2706 		.aead = {
2707 			.base = {
2708 				.cra_name = "seqiv(authenc(hmac(sha256),"
2709 					    "rfc3686(ctr(aes))))",
2710 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2711 						   "rfc3686-ctr-aes-caam-qi2",
2712 				.cra_blocksize = 1,
2713 			},
2714 			.setkey = aead_setkey,
2715 			.setauthsize = aead_setauthsize,
2716 			.encrypt = aead_encrypt,
2717 			.decrypt = aead_decrypt,
2718 			.ivsize = CTR_RFC3686_IV_SIZE,
2719 			.maxauthsize = SHA256_DIGEST_SIZE,
2720 		},
2721 		.caam = {
2722 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2723 					   OP_ALG_AAI_CTR_MOD128,
2724 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2725 					   OP_ALG_AAI_HMAC_PRECOMP,
2726 			.rfc3686 = true,
2727 			.geniv = true,
2728 		},
2729 	},
2730 	{
2731 		.aead = {
2732 			.base = {
2733 				.cra_name = "authenc(hmac(sha384),"
2734 					    "rfc3686(ctr(aes)))",
2735 				.cra_driver_name = "authenc-hmac-sha384-"
2736 						   "rfc3686-ctr-aes-caam-qi2",
2737 				.cra_blocksize = 1,
2738 			},
2739 			.setkey = aead_setkey,
2740 			.setauthsize = aead_setauthsize,
2741 			.encrypt = aead_encrypt,
2742 			.decrypt = aead_decrypt,
2743 			.ivsize = CTR_RFC3686_IV_SIZE,
2744 			.maxauthsize = SHA384_DIGEST_SIZE,
2745 		},
2746 		.caam = {
2747 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2748 					   OP_ALG_AAI_CTR_MOD128,
2749 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2750 					   OP_ALG_AAI_HMAC_PRECOMP,
2751 			.rfc3686 = true,
2752 		},
2753 	},
2754 	{
2755 		.aead = {
2756 			.base = {
2757 				.cra_name = "seqiv(authenc(hmac(sha384),"
2758 					    "rfc3686(ctr(aes))))",
2759 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2760 						   "rfc3686-ctr-aes-caam-qi2",
2761 				.cra_blocksize = 1,
2762 			},
2763 			.setkey = aead_setkey,
2764 			.setauthsize = aead_setauthsize,
2765 			.encrypt = aead_encrypt,
2766 			.decrypt = aead_decrypt,
2767 			.ivsize = CTR_RFC3686_IV_SIZE,
2768 			.maxauthsize = SHA384_DIGEST_SIZE,
2769 		},
2770 		.caam = {
2771 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2772 					   OP_ALG_AAI_CTR_MOD128,
2773 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2774 					   OP_ALG_AAI_HMAC_PRECOMP,
2775 			.rfc3686 = true,
2776 			.geniv = true,
2777 		},
2778 	},
2779 	{
2780 		.aead = {
2781 			.base = {
2782 				.cra_name = "rfc7539(chacha20,poly1305)",
2783 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2784 						   "caam-qi2",
2785 				.cra_blocksize = 1,
2786 			},
2787 			.setkey = chachapoly_setkey,
2788 			.setauthsize = chachapoly_setauthsize,
2789 			.encrypt = aead_encrypt,
2790 			.decrypt = aead_decrypt,
2791 			.ivsize = CHACHAPOLY_IV_SIZE,
2792 			.maxauthsize = POLY1305_DIGEST_SIZE,
2793 		},
2794 		.caam = {
2795 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2796 					   OP_ALG_AAI_AEAD,
2797 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2798 					   OP_ALG_AAI_AEAD,
2799 			.nodkp = true,
2800 		},
2801 	},
2802 	{
2803 		.aead = {
2804 			.base = {
2805 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2806 				.cra_driver_name = "rfc7539esp-chacha20-"
2807 						   "poly1305-caam-qi2",
2808 				.cra_blocksize = 1,
2809 			},
2810 			.setkey = chachapoly_setkey,
2811 			.setauthsize = chachapoly_setauthsize,
2812 			.encrypt = aead_encrypt,
2813 			.decrypt = aead_decrypt,
2814 			.ivsize = 8,
2815 			.maxauthsize = POLY1305_DIGEST_SIZE,
2816 		},
2817 		.caam = {
2818 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2819 					   OP_ALG_AAI_AEAD,
2820 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2821 					   OP_ALG_AAI_AEAD,
2822 			.nodkp = true,
2823 		},
2824 	},
2825 	{
2826 		.aead = {
2827 			.base = {
2828 				.cra_name = "authenc(hmac(sha512),"
2829 					    "rfc3686(ctr(aes)))",
2830 				.cra_driver_name = "authenc-hmac-sha512-"
2831 						   "rfc3686-ctr-aes-caam-qi2",
2832 				.cra_blocksize = 1,
2833 			},
2834 			.setkey = aead_setkey,
2835 			.setauthsize = aead_setauthsize,
2836 			.encrypt = aead_encrypt,
2837 			.decrypt = aead_decrypt,
2838 			.ivsize = CTR_RFC3686_IV_SIZE,
2839 			.maxauthsize = SHA512_DIGEST_SIZE,
2840 		},
2841 		.caam = {
2842 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2843 					   OP_ALG_AAI_CTR_MOD128,
2844 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2845 					   OP_ALG_AAI_HMAC_PRECOMP,
2846 			.rfc3686 = true,
2847 		},
2848 	},
2849 	{
2850 		.aead = {
2851 			.base = {
2852 				.cra_name = "seqiv(authenc(hmac(sha512),"
2853 					    "rfc3686(ctr(aes))))",
2854 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2855 						   "rfc3686-ctr-aes-caam-qi2",
2856 				.cra_blocksize = 1,
2857 			},
2858 			.setkey = aead_setkey,
2859 			.setauthsize = aead_setauthsize,
2860 			.encrypt = aead_encrypt,
2861 			.decrypt = aead_decrypt,
2862 			.ivsize = CTR_RFC3686_IV_SIZE,
2863 			.maxauthsize = SHA512_DIGEST_SIZE,
2864 		},
2865 		.caam = {
2866 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2867 					   OP_ALG_AAI_CTR_MOD128,
2868 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2869 					   OP_ALG_AAI_HMAC_PRECOMP,
2870 			.rfc3686 = true,
2871 			.geniv = true,
2872 		},
2873 	},
2874 };
2875 
2876 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2877 {
2878 	struct skcipher_alg *alg = &t_alg->skcipher;
2879 
2880 	alg->base.cra_module = THIS_MODULE;
2881 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2882 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2883 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2884 
2885 	alg->init = caam_cra_init_skcipher;
2886 	alg->exit = caam_cra_exit;
2887 }
2888 
2889 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2890 {
2891 	struct aead_alg *alg = &t_alg->aead;
2892 
2893 	alg->base.cra_module = THIS_MODULE;
2894 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
2895 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2896 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2897 
2898 	alg->init = caam_cra_init_aead;
2899 	alg->exit = caam_cra_exit_aead;
2900 }
2901 
2902 /* max hash key is max split key size */
2903 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
2904 
2905 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
2906 
2907 /* caam context sizes for hashes: running digest + 8 */
2908 #define HASH_MSG_LEN			8
2909 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2910 
2911 enum hash_optype {
2912 	UPDATE = 0,
2913 	UPDATE_FIRST,
2914 	FINALIZE,
2915 	DIGEST,
2916 	HASH_NUM_OP
2917 };
2918 
2919 /**
2920  * caam_hash_ctx - ahash per-session context
2921  * @flc: Flow Contexts array
2922  * @flc_dma: I/O virtual addresses of the Flow Contexts
2923  * @dev: dpseci device
2924  * @ctx_len: size of Context Register
2925  * @adata: hashing algorithm details
2926  */
2927 struct caam_hash_ctx {
2928 	struct caam_flc flc[HASH_NUM_OP];
2929 	dma_addr_t flc_dma[HASH_NUM_OP];
2930 	struct device *dev;
2931 	int ctx_len;
2932 	struct alginfo adata;
2933 };
2934 
2935 /* ahash state */
2936 struct caam_hash_state {
2937 	struct caam_request caam_req;
2938 	dma_addr_t buf_dma;
2939 	dma_addr_t ctx_dma;
2940 	int ctx_dma_len;
2941 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2942 	int buflen_0;
2943 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2944 	int buflen_1;
2945 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2946 	int (*update)(struct ahash_request *req);
2947 	int (*final)(struct ahash_request *req);
2948 	int (*finup)(struct ahash_request *req);
2949 	int current_buf;
2950 };
2951 
2952 struct caam_export_state {
2953 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2954 	u8 caam_ctx[MAX_CTX_LEN];
2955 	int buflen;
2956 	int (*update)(struct ahash_request *req);
2957 	int (*final)(struct ahash_request *req);
2958 	int (*finup)(struct ahash_request *req);
2959 };
2960 
2961 static inline void switch_buf(struct caam_hash_state *state)
2962 {
2963 	state->current_buf ^= 1;
2964 }
2965 
2966 static inline u8 *current_buf(struct caam_hash_state *state)
2967 {
2968 	return state->current_buf ? state->buf_1 : state->buf_0;
2969 }
2970 
2971 static inline u8 *alt_buf(struct caam_hash_state *state)
2972 {
2973 	return state->current_buf ? state->buf_0 : state->buf_1;
2974 }
2975 
2976 static inline int *current_buflen(struct caam_hash_state *state)
2977 {
2978 	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2979 }
2980 
2981 static inline int *alt_buflen(struct caam_hash_state *state)
2982 {
2983 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2984 }
2985 
2986 /* Map current buffer in state (if length > 0) and put it in link table */
2987 static inline int buf_map_to_qm_sg(struct device *dev,
2988 				   struct dpaa2_sg_entry *qm_sg,
2989 				   struct caam_hash_state *state)
2990 {
2991 	int buflen = *current_buflen(state);
2992 
2993 	if (!buflen)
2994 		return 0;
2995 
2996 	state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2997 					DMA_TO_DEVICE);
2998 	if (dma_mapping_error(dev, state->buf_dma)) {
2999 		dev_err(dev, "unable to map buf\n");
3000 		state->buf_dma = 0;
3001 		return -ENOMEM;
3002 	}
3003 
3004 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3005 
3006 	return 0;
3007 }
3008 
3009 /* Map state->caam_ctx, and add it to link table */
3010 static inline int ctx_map_to_qm_sg(struct device *dev,
3011 				   struct caam_hash_state *state, int ctx_len,
3012 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3013 {
3014 	state->ctx_dma_len = ctx_len;
3015 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3016 	if (dma_mapping_error(dev, state->ctx_dma)) {
3017 		dev_err(dev, "unable to map ctx\n");
3018 		state->ctx_dma = 0;
3019 		return -ENOMEM;
3020 	}
3021 
3022 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3023 
3024 	return 0;
3025 }
3026 
3027 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3028 {
3029 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3030 	int digestsize = crypto_ahash_digestsize(ahash);
3031 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3032 	struct caam_flc *flc;
3033 	u32 *desc;
3034 
3035 	/* ahash_update shared descriptor */
3036 	flc = &ctx->flc[UPDATE];
3037 	desc = flc->sh_desc;
3038 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3039 			  ctx->ctx_len, true, priv->sec_attr.era);
3040 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3041 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3042 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3043 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3044 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3045 			     1);
3046 
3047 	/* ahash_update_first shared descriptor */
3048 	flc = &ctx->flc[UPDATE_FIRST];
3049 	desc = flc->sh_desc;
3050 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3051 			  ctx->ctx_len, false, priv->sec_attr.era);
3052 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3053 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3054 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3055 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3056 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3057 			     1);
3058 
3059 	/* ahash_final shared descriptor */
3060 	flc = &ctx->flc[FINALIZE];
3061 	desc = flc->sh_desc;
3062 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3063 			  ctx->ctx_len, true, priv->sec_attr.era);
3064 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3065 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3066 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3067 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3068 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3069 			     1);
3070 
3071 	/* ahash_digest shared descriptor */
3072 	flc = &ctx->flc[DIGEST];
3073 	desc = flc->sh_desc;
3074 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3075 			  ctx->ctx_len, false, priv->sec_attr.era);
3076 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3077 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3078 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3079 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3080 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3081 			     1);
3082 
3083 	return 0;
3084 }
3085 
3086 struct split_key_sh_result {
3087 	struct completion completion;
3088 	int err;
3089 	struct device *dev;
3090 };
3091 
3092 static void split_key_sh_done(void *cbk_ctx, u32 err)
3093 {
3094 	struct split_key_sh_result *res = cbk_ctx;
3095 
3096 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3097 
3098 	if (err)
3099 		caam_qi2_strstatus(res->dev, err);
3100 
3101 	res->err = err;
3102 	complete(&res->completion);
3103 }
3104 
3105 /* Digest hash size if it is too large */
3106 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3107 			   u32 digestsize)
3108 {
3109 	struct caam_request *req_ctx;
3110 	u32 *desc;
3111 	struct split_key_sh_result result;
3112 	dma_addr_t key_dma;
3113 	struct caam_flc *flc;
3114 	dma_addr_t flc_dma;
3115 	int ret = -ENOMEM;
3116 	struct dpaa2_fl_entry *in_fle, *out_fle;
3117 
3118 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3119 	if (!req_ctx)
3120 		return -ENOMEM;
3121 
3122 	in_fle = &req_ctx->fd_flt[1];
3123 	out_fle = &req_ctx->fd_flt[0];
3124 
3125 	flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3126 	if (!flc)
3127 		goto err_flc;
3128 
3129 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3130 	if (dma_mapping_error(ctx->dev, key_dma)) {
3131 		dev_err(ctx->dev, "unable to map key memory\n");
3132 		goto err_key_dma;
3133 	}
3134 
3135 	desc = flc->sh_desc;
3136 
3137 	init_sh_desc(desc, 0);
3138 
3139 	/* descriptor to perform unkeyed hash on key_in */
3140 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3141 			 OP_ALG_AS_INITFINAL);
3142 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3143 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3144 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3145 			 LDST_SRCDST_BYTE_CONTEXT);
3146 
3147 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3148 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3149 				 desc_bytes(desc), DMA_TO_DEVICE);
3150 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3151 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3152 		goto err_flc_dma;
3153 	}
3154 
3155 	dpaa2_fl_set_final(in_fle, true);
3156 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3157 	dpaa2_fl_set_addr(in_fle, key_dma);
3158 	dpaa2_fl_set_len(in_fle, *keylen);
3159 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3160 	dpaa2_fl_set_addr(out_fle, key_dma);
3161 	dpaa2_fl_set_len(out_fle, digestsize);
3162 
3163 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3164 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3165 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3166 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3167 			     1);
3168 
3169 	result.err = 0;
3170 	init_completion(&result.completion);
3171 	result.dev = ctx->dev;
3172 
3173 	req_ctx->flc = flc;
3174 	req_ctx->flc_dma = flc_dma;
3175 	req_ctx->cbk = split_key_sh_done;
3176 	req_ctx->ctx = &result;
3177 
3178 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3179 	if (ret == -EINPROGRESS) {
3180 		/* in progress */
3181 		wait_for_completion(&result.completion);
3182 		ret = result.err;
3183 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3184 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3185 				     digestsize, 1);
3186 	}
3187 
3188 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3189 			 DMA_TO_DEVICE);
3190 err_flc_dma:
3191 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3192 err_key_dma:
3193 	kfree(flc);
3194 err_flc:
3195 	kfree(req_ctx);
3196 
3197 	*keylen = digestsize;
3198 
3199 	return ret;
3200 }
3201 
3202 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3203 			unsigned int keylen)
3204 {
3205 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3206 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3207 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3208 	int ret;
3209 	u8 *hashed_key = NULL;
3210 
3211 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3212 
3213 	if (keylen > blocksize) {
3214 		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3215 		if (!hashed_key)
3216 			return -ENOMEM;
3217 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3218 		if (ret)
3219 			goto bad_free_key;
3220 		key = hashed_key;
3221 	}
3222 
3223 	ctx->adata.keylen = keylen;
3224 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3225 					      OP_ALG_ALGSEL_MASK);
3226 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3227 		goto bad_free_key;
3228 
3229 	ctx->adata.key_virt = key;
3230 	ctx->adata.key_inline = true;
3231 
3232 	ret = ahash_set_sh_desc(ahash);
3233 	kfree(hashed_key);
3234 	return ret;
3235 bad_free_key:
3236 	kfree(hashed_key);
3237 	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3238 	return -EINVAL;
3239 }
3240 
3241 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3242 			       struct ahash_request *req)
3243 {
3244 	struct caam_hash_state *state = ahash_request_ctx(req);
3245 
3246 	if (edesc->src_nents)
3247 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3248 
3249 	if (edesc->qm_sg_bytes)
3250 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3251 				 DMA_TO_DEVICE);
3252 
3253 	if (state->buf_dma) {
3254 		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3255 				 DMA_TO_DEVICE);
3256 		state->buf_dma = 0;
3257 	}
3258 }
3259 
3260 static inline void ahash_unmap_ctx(struct device *dev,
3261 				   struct ahash_edesc *edesc,
3262 				   struct ahash_request *req, u32 flag)
3263 {
3264 	struct caam_hash_state *state = ahash_request_ctx(req);
3265 
3266 	if (state->ctx_dma) {
3267 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3268 		state->ctx_dma = 0;
3269 	}
3270 	ahash_unmap(dev, edesc, req);
3271 }
3272 
3273 static void ahash_done(void *cbk_ctx, u32 status)
3274 {
3275 	struct crypto_async_request *areq = cbk_ctx;
3276 	struct ahash_request *req = ahash_request_cast(areq);
3277 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3278 	struct caam_hash_state *state = ahash_request_ctx(req);
3279 	struct ahash_edesc *edesc = state->caam_req.edesc;
3280 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3281 	int digestsize = crypto_ahash_digestsize(ahash);
3282 	int ecode = 0;
3283 
3284 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3285 
3286 	if (unlikely(status)) {
3287 		caam_qi2_strstatus(ctx->dev, status);
3288 		ecode = -EIO;
3289 	}
3290 
3291 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3292 	memcpy(req->result, state->caam_ctx, digestsize);
3293 	qi_cache_free(edesc);
3294 
3295 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3296 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3297 			     ctx->ctx_len, 1);
3298 
3299 	req->base.complete(&req->base, ecode);
3300 }
3301 
3302 static void ahash_done_bi(void *cbk_ctx, u32 status)
3303 {
3304 	struct crypto_async_request *areq = cbk_ctx;
3305 	struct ahash_request *req = ahash_request_cast(areq);
3306 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3307 	struct caam_hash_state *state = ahash_request_ctx(req);
3308 	struct ahash_edesc *edesc = state->caam_req.edesc;
3309 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3310 	int ecode = 0;
3311 
3312 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3313 
3314 	if (unlikely(status)) {
3315 		caam_qi2_strstatus(ctx->dev, status);
3316 		ecode = -EIO;
3317 	}
3318 
3319 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3320 	switch_buf(state);
3321 	qi_cache_free(edesc);
3322 
3323 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3324 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3325 			     ctx->ctx_len, 1);
3326 	if (req->result)
3327 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3328 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3329 				     crypto_ahash_digestsize(ahash), 1);
3330 
3331 	req->base.complete(&req->base, ecode);
3332 }
3333 
3334 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3335 {
3336 	struct crypto_async_request *areq = cbk_ctx;
3337 	struct ahash_request *req = ahash_request_cast(areq);
3338 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3339 	struct caam_hash_state *state = ahash_request_ctx(req);
3340 	struct ahash_edesc *edesc = state->caam_req.edesc;
3341 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3342 	int digestsize = crypto_ahash_digestsize(ahash);
3343 	int ecode = 0;
3344 
3345 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3346 
3347 	if (unlikely(status)) {
3348 		caam_qi2_strstatus(ctx->dev, status);
3349 		ecode = -EIO;
3350 	}
3351 
3352 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3353 	memcpy(req->result, state->caam_ctx, digestsize);
3354 	qi_cache_free(edesc);
3355 
3356 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3357 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3358 			     ctx->ctx_len, 1);
3359 
3360 	req->base.complete(&req->base, ecode);
3361 }
3362 
3363 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3364 {
3365 	struct crypto_async_request *areq = cbk_ctx;
3366 	struct ahash_request *req = ahash_request_cast(areq);
3367 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3368 	struct caam_hash_state *state = ahash_request_ctx(req);
3369 	struct ahash_edesc *edesc = state->caam_req.edesc;
3370 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3371 	int ecode = 0;
3372 
3373 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3374 
3375 	if (unlikely(status)) {
3376 		caam_qi2_strstatus(ctx->dev, status);
3377 		ecode = -EIO;
3378 	}
3379 
3380 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3381 	switch_buf(state);
3382 	qi_cache_free(edesc);
3383 
3384 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3385 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3386 			     ctx->ctx_len, 1);
3387 	if (req->result)
3388 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3389 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3390 				     crypto_ahash_digestsize(ahash), 1);
3391 
3392 	req->base.complete(&req->base, ecode);
3393 }
3394 
3395 static int ahash_update_ctx(struct ahash_request *req)
3396 {
3397 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3398 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3399 	struct caam_hash_state *state = ahash_request_ctx(req);
3400 	struct caam_request *req_ctx = &state->caam_req;
3401 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3402 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3403 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3404 		      GFP_KERNEL : GFP_ATOMIC;
3405 	u8 *buf = current_buf(state);
3406 	int *buflen = current_buflen(state);
3407 	u8 *next_buf = alt_buf(state);
3408 	int *next_buflen = alt_buflen(state), last_buflen;
3409 	int in_len = *buflen + req->nbytes, to_hash;
3410 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3411 	struct ahash_edesc *edesc;
3412 	int ret = 0;
3413 
3414 	last_buflen = *next_buflen;
3415 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3416 	to_hash = in_len - *next_buflen;
3417 
3418 	if (to_hash) {
3419 		struct dpaa2_sg_entry *sg_table;
3420 		int src_len = req->nbytes - *next_buflen;
3421 
3422 		src_nents = sg_nents_for_len(req->src, src_len);
3423 		if (src_nents < 0) {
3424 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3425 			return src_nents;
3426 		}
3427 
3428 		if (src_nents) {
3429 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3430 						  DMA_TO_DEVICE);
3431 			if (!mapped_nents) {
3432 				dev_err(ctx->dev, "unable to DMA map source\n");
3433 				return -ENOMEM;
3434 			}
3435 		} else {
3436 			mapped_nents = 0;
3437 		}
3438 
3439 		/* allocate space for base edesc and link tables */
3440 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3441 		if (!edesc) {
3442 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3443 				     DMA_TO_DEVICE);
3444 			return -ENOMEM;
3445 		}
3446 
3447 		edesc->src_nents = src_nents;
3448 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3449 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3450 			      sizeof(*sg_table);
3451 		sg_table = &edesc->sgt[0];
3452 
3453 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3454 				       DMA_BIDIRECTIONAL);
3455 		if (ret)
3456 			goto unmap_ctx;
3457 
3458 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3459 		if (ret)
3460 			goto unmap_ctx;
3461 
3462 		if (mapped_nents) {
3463 			sg_to_qm_sg_last(req->src, src_len,
3464 					 sg_table + qm_sg_src_index, 0);
3465 			if (*next_buflen)
3466 				scatterwalk_map_and_copy(next_buf, req->src,
3467 							 to_hash - *buflen,
3468 							 *next_buflen, 0);
3469 		} else {
3470 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3471 					   true);
3472 		}
3473 
3474 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3475 						  qm_sg_bytes, DMA_TO_DEVICE);
3476 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3477 			dev_err(ctx->dev, "unable to map S/G table\n");
3478 			ret = -ENOMEM;
3479 			goto unmap_ctx;
3480 		}
3481 		edesc->qm_sg_bytes = qm_sg_bytes;
3482 
3483 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3484 		dpaa2_fl_set_final(in_fle, true);
3485 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3486 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3487 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3488 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3489 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3490 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3491 
3492 		req_ctx->flc = &ctx->flc[UPDATE];
3493 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3494 		req_ctx->cbk = ahash_done_bi;
3495 		req_ctx->ctx = &req->base;
3496 		req_ctx->edesc = edesc;
3497 
3498 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3499 		if (ret != -EINPROGRESS &&
3500 		    !(ret == -EBUSY &&
3501 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3502 			goto unmap_ctx;
3503 	} else if (*next_buflen) {
3504 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3505 					 req->nbytes, 0);
3506 		*buflen = *next_buflen;
3507 		*next_buflen = last_buflen;
3508 	}
3509 
3510 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3511 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3512 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3513 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3514 			     1);
3515 
3516 	return ret;
3517 unmap_ctx:
3518 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3519 	qi_cache_free(edesc);
3520 	return ret;
3521 }
3522 
3523 static int ahash_final_ctx(struct ahash_request *req)
3524 {
3525 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3526 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3527 	struct caam_hash_state *state = ahash_request_ctx(req);
3528 	struct caam_request *req_ctx = &state->caam_req;
3529 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3530 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3531 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3532 		      GFP_KERNEL : GFP_ATOMIC;
3533 	int buflen = *current_buflen(state);
3534 	int qm_sg_bytes;
3535 	int digestsize = crypto_ahash_digestsize(ahash);
3536 	struct ahash_edesc *edesc;
3537 	struct dpaa2_sg_entry *sg_table;
3538 	int ret;
3539 
3540 	/* allocate space for base edesc and link tables */
3541 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3542 	if (!edesc)
3543 		return -ENOMEM;
3544 
3545 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3546 	sg_table = &edesc->sgt[0];
3547 
3548 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3549 			       DMA_BIDIRECTIONAL);
3550 	if (ret)
3551 		goto unmap_ctx;
3552 
3553 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3554 	if (ret)
3555 		goto unmap_ctx;
3556 
3557 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3558 
3559 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3560 					  DMA_TO_DEVICE);
3561 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3562 		dev_err(ctx->dev, "unable to map S/G table\n");
3563 		ret = -ENOMEM;
3564 		goto unmap_ctx;
3565 	}
3566 	edesc->qm_sg_bytes = qm_sg_bytes;
3567 
3568 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3569 	dpaa2_fl_set_final(in_fle, true);
3570 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3571 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3572 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3573 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3574 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3575 	dpaa2_fl_set_len(out_fle, digestsize);
3576 
3577 	req_ctx->flc = &ctx->flc[FINALIZE];
3578 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3579 	req_ctx->cbk = ahash_done_ctx_src;
3580 	req_ctx->ctx = &req->base;
3581 	req_ctx->edesc = edesc;
3582 
3583 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3584 	if (ret == -EINPROGRESS ||
3585 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3586 		return ret;
3587 
3588 unmap_ctx:
3589 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3590 	qi_cache_free(edesc);
3591 	return ret;
3592 }
3593 
3594 static int ahash_finup_ctx(struct ahash_request *req)
3595 {
3596 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3597 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3598 	struct caam_hash_state *state = ahash_request_ctx(req);
3599 	struct caam_request *req_ctx = &state->caam_req;
3600 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3601 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3602 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3603 		      GFP_KERNEL : GFP_ATOMIC;
3604 	int buflen = *current_buflen(state);
3605 	int qm_sg_bytes, qm_sg_src_index;
3606 	int src_nents, mapped_nents;
3607 	int digestsize = crypto_ahash_digestsize(ahash);
3608 	struct ahash_edesc *edesc;
3609 	struct dpaa2_sg_entry *sg_table;
3610 	int ret;
3611 
3612 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3613 	if (src_nents < 0) {
3614 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3615 		return src_nents;
3616 	}
3617 
3618 	if (src_nents) {
3619 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3620 					  DMA_TO_DEVICE);
3621 		if (!mapped_nents) {
3622 			dev_err(ctx->dev, "unable to DMA map source\n");
3623 			return -ENOMEM;
3624 		}
3625 	} else {
3626 		mapped_nents = 0;
3627 	}
3628 
3629 	/* allocate space for base edesc and link tables */
3630 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3631 	if (!edesc) {
3632 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3633 		return -ENOMEM;
3634 	}
3635 
3636 	edesc->src_nents = src_nents;
3637 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3638 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3639 		      sizeof(*sg_table);
3640 	sg_table = &edesc->sgt[0];
3641 
3642 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3643 			       DMA_BIDIRECTIONAL);
3644 	if (ret)
3645 		goto unmap_ctx;
3646 
3647 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3648 	if (ret)
3649 		goto unmap_ctx;
3650 
3651 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3652 
3653 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3654 					  DMA_TO_DEVICE);
3655 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3656 		dev_err(ctx->dev, "unable to map S/G table\n");
3657 		ret = -ENOMEM;
3658 		goto unmap_ctx;
3659 	}
3660 	edesc->qm_sg_bytes = qm_sg_bytes;
3661 
3662 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3663 	dpaa2_fl_set_final(in_fle, true);
3664 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3665 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3666 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3667 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3668 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3669 	dpaa2_fl_set_len(out_fle, digestsize);
3670 
3671 	req_ctx->flc = &ctx->flc[FINALIZE];
3672 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3673 	req_ctx->cbk = ahash_done_ctx_src;
3674 	req_ctx->ctx = &req->base;
3675 	req_ctx->edesc = edesc;
3676 
3677 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3678 	if (ret == -EINPROGRESS ||
3679 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3680 		return ret;
3681 
3682 unmap_ctx:
3683 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3684 	qi_cache_free(edesc);
3685 	return ret;
3686 }
3687 
3688 static int ahash_digest(struct ahash_request *req)
3689 {
3690 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3691 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3692 	struct caam_hash_state *state = ahash_request_ctx(req);
3693 	struct caam_request *req_ctx = &state->caam_req;
3694 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3695 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3696 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3697 		      GFP_KERNEL : GFP_ATOMIC;
3698 	int digestsize = crypto_ahash_digestsize(ahash);
3699 	int src_nents, mapped_nents;
3700 	struct ahash_edesc *edesc;
3701 	int ret = -ENOMEM;
3702 
3703 	state->buf_dma = 0;
3704 
3705 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3706 	if (src_nents < 0) {
3707 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3708 		return src_nents;
3709 	}
3710 
3711 	if (src_nents) {
3712 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3713 					  DMA_TO_DEVICE);
3714 		if (!mapped_nents) {
3715 			dev_err(ctx->dev, "unable to map source for DMA\n");
3716 			return ret;
3717 		}
3718 	} else {
3719 		mapped_nents = 0;
3720 	}
3721 
3722 	/* allocate space for base edesc and link tables */
3723 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3724 	if (!edesc) {
3725 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3726 		return ret;
3727 	}
3728 
3729 	edesc->src_nents = src_nents;
3730 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3731 
3732 	if (mapped_nents > 1) {
3733 		int qm_sg_bytes;
3734 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3735 
3736 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3737 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3738 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3739 						  qm_sg_bytes, DMA_TO_DEVICE);
3740 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3741 			dev_err(ctx->dev, "unable to map S/G table\n");
3742 			goto unmap;
3743 		}
3744 		edesc->qm_sg_bytes = qm_sg_bytes;
3745 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3746 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3747 	} else {
3748 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3749 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3750 	}
3751 
3752 	state->ctx_dma_len = digestsize;
3753 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3754 					DMA_FROM_DEVICE);
3755 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3756 		dev_err(ctx->dev, "unable to map ctx\n");
3757 		state->ctx_dma = 0;
3758 		goto unmap;
3759 	}
3760 
3761 	dpaa2_fl_set_final(in_fle, true);
3762 	dpaa2_fl_set_len(in_fle, req->nbytes);
3763 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3764 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3765 	dpaa2_fl_set_len(out_fle, digestsize);
3766 
3767 	req_ctx->flc = &ctx->flc[DIGEST];
3768 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3769 	req_ctx->cbk = ahash_done;
3770 	req_ctx->ctx = &req->base;
3771 	req_ctx->edesc = edesc;
3772 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3773 	if (ret == -EINPROGRESS ||
3774 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3775 		return ret;
3776 
3777 unmap:
3778 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3779 	qi_cache_free(edesc);
3780 	return ret;
3781 }
3782 
3783 static int ahash_final_no_ctx(struct ahash_request *req)
3784 {
3785 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3786 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3787 	struct caam_hash_state *state = ahash_request_ctx(req);
3788 	struct caam_request *req_ctx = &state->caam_req;
3789 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3790 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3791 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3792 		      GFP_KERNEL : GFP_ATOMIC;
3793 	u8 *buf = current_buf(state);
3794 	int buflen = *current_buflen(state);
3795 	int digestsize = crypto_ahash_digestsize(ahash);
3796 	struct ahash_edesc *edesc;
3797 	int ret = -ENOMEM;
3798 
3799 	/* allocate space for base edesc and link tables */
3800 	edesc = qi_cache_zalloc(GFP_DMA | flags);
3801 	if (!edesc)
3802 		return ret;
3803 
3804 	if (buflen) {
3805 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3806 						DMA_TO_DEVICE);
3807 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3808 			dev_err(ctx->dev, "unable to map src\n");
3809 			goto unmap;
3810 		}
3811 	}
3812 
3813 	state->ctx_dma_len = digestsize;
3814 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3815 					DMA_FROM_DEVICE);
3816 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3817 		dev_err(ctx->dev, "unable to map ctx\n");
3818 		state->ctx_dma = 0;
3819 		goto unmap;
3820 	}
3821 
3822 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3823 	dpaa2_fl_set_final(in_fle, true);
3824 	/*
3825 	 * crypto engine requires the input entry to be present when
3826 	 * "frame list" FD is used.
3827 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3828 	 * in_fle zeroized (except for "Final" flag) is the best option.
3829 	 */
3830 	if (buflen) {
3831 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3832 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3833 		dpaa2_fl_set_len(in_fle, buflen);
3834 	}
3835 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3836 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3837 	dpaa2_fl_set_len(out_fle, digestsize);
3838 
3839 	req_ctx->flc = &ctx->flc[DIGEST];
3840 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3841 	req_ctx->cbk = ahash_done;
3842 	req_ctx->ctx = &req->base;
3843 	req_ctx->edesc = edesc;
3844 
3845 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3846 	if (ret == -EINPROGRESS ||
3847 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3848 		return ret;
3849 
3850 unmap:
3851 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3852 	qi_cache_free(edesc);
3853 	return ret;
3854 }
3855 
3856 static int ahash_update_no_ctx(struct ahash_request *req)
3857 {
3858 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3859 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3860 	struct caam_hash_state *state = ahash_request_ctx(req);
3861 	struct caam_request *req_ctx = &state->caam_req;
3862 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3863 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3864 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3865 		      GFP_KERNEL : GFP_ATOMIC;
3866 	u8 *buf = current_buf(state);
3867 	int *buflen = current_buflen(state);
3868 	u8 *next_buf = alt_buf(state);
3869 	int *next_buflen = alt_buflen(state);
3870 	int in_len = *buflen + req->nbytes, to_hash;
3871 	int qm_sg_bytes, src_nents, mapped_nents;
3872 	struct ahash_edesc *edesc;
3873 	int ret = 0;
3874 
3875 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3876 	to_hash = in_len - *next_buflen;
3877 
3878 	if (to_hash) {
3879 		struct dpaa2_sg_entry *sg_table;
3880 		int src_len = req->nbytes - *next_buflen;
3881 
3882 		src_nents = sg_nents_for_len(req->src, src_len);
3883 		if (src_nents < 0) {
3884 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3885 			return src_nents;
3886 		}
3887 
3888 		if (src_nents) {
3889 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3890 						  DMA_TO_DEVICE);
3891 			if (!mapped_nents) {
3892 				dev_err(ctx->dev, "unable to DMA map source\n");
3893 				return -ENOMEM;
3894 			}
3895 		} else {
3896 			mapped_nents = 0;
3897 		}
3898 
3899 		/* allocate space for base edesc and link tables */
3900 		edesc = qi_cache_zalloc(GFP_DMA | flags);
3901 		if (!edesc) {
3902 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3903 				     DMA_TO_DEVICE);
3904 			return -ENOMEM;
3905 		}
3906 
3907 		edesc->src_nents = src_nents;
3908 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
3909 			      sizeof(*sg_table);
3910 		sg_table = &edesc->sgt[0];
3911 
3912 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3913 		if (ret)
3914 			goto unmap_ctx;
3915 
3916 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
3917 
3918 		if (*next_buflen)
3919 			scatterwalk_map_and_copy(next_buf, req->src,
3920 						 to_hash - *buflen,
3921 						 *next_buflen, 0);
3922 
3923 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3924 						  qm_sg_bytes, DMA_TO_DEVICE);
3925 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3926 			dev_err(ctx->dev, "unable to map S/G table\n");
3927 			ret = -ENOMEM;
3928 			goto unmap_ctx;
3929 		}
3930 		edesc->qm_sg_bytes = qm_sg_bytes;
3931 
3932 		state->ctx_dma_len = ctx->ctx_len;
3933 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3934 						ctx->ctx_len, DMA_FROM_DEVICE);
3935 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3936 			dev_err(ctx->dev, "unable to map ctx\n");
3937 			state->ctx_dma = 0;
3938 			ret = -ENOMEM;
3939 			goto unmap_ctx;
3940 		}
3941 
3942 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3943 		dpaa2_fl_set_final(in_fle, true);
3944 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3945 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3946 		dpaa2_fl_set_len(in_fle, to_hash);
3947 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3948 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3949 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3950 
3951 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3952 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3953 		req_ctx->cbk = ahash_done_ctx_dst;
3954 		req_ctx->ctx = &req->base;
3955 		req_ctx->edesc = edesc;
3956 
3957 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3958 		if (ret != -EINPROGRESS &&
3959 		    !(ret == -EBUSY &&
3960 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3961 			goto unmap_ctx;
3962 
3963 		state->update = ahash_update_ctx;
3964 		state->finup = ahash_finup_ctx;
3965 		state->final = ahash_final_ctx;
3966 	} else if (*next_buflen) {
3967 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3968 					 req->nbytes, 0);
3969 		*buflen = *next_buflen;
3970 		*next_buflen = 0;
3971 	}
3972 
3973 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3974 			     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3975 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3976 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3977 			     1);
3978 
3979 	return ret;
3980 unmap_ctx:
3981 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3982 	qi_cache_free(edesc);
3983 	return ret;
3984 }
3985 
3986 static int ahash_finup_no_ctx(struct ahash_request *req)
3987 {
3988 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3989 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3990 	struct caam_hash_state *state = ahash_request_ctx(req);
3991 	struct caam_request *req_ctx = &state->caam_req;
3992 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3993 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3994 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3995 		      GFP_KERNEL : GFP_ATOMIC;
3996 	int buflen = *current_buflen(state);
3997 	int qm_sg_bytes, src_nents, mapped_nents;
3998 	int digestsize = crypto_ahash_digestsize(ahash);
3999 	struct ahash_edesc *edesc;
4000 	struct dpaa2_sg_entry *sg_table;
4001 	int ret;
4002 
4003 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4004 	if (src_nents < 0) {
4005 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4006 		return src_nents;
4007 	}
4008 
4009 	if (src_nents) {
4010 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4011 					  DMA_TO_DEVICE);
4012 		if (!mapped_nents) {
4013 			dev_err(ctx->dev, "unable to DMA map source\n");
4014 			return -ENOMEM;
4015 		}
4016 	} else {
4017 		mapped_nents = 0;
4018 	}
4019 
4020 	/* allocate space for base edesc and link tables */
4021 	edesc = qi_cache_zalloc(GFP_DMA | flags);
4022 	if (!edesc) {
4023 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4024 		return -ENOMEM;
4025 	}
4026 
4027 	edesc->src_nents = src_nents;
4028 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4029 	sg_table = &edesc->sgt[0];
4030 
4031 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4032 	if (ret)
4033 		goto unmap;
4034 
4035 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4036 
4037 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4038 					  DMA_TO_DEVICE);
4039 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4040 		dev_err(ctx->dev, "unable to map S/G table\n");
4041 		ret = -ENOMEM;
4042 		goto unmap;
4043 	}
4044 	edesc->qm_sg_bytes = qm_sg_bytes;
4045 
4046 	state->ctx_dma_len = digestsize;
4047 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4048 					DMA_FROM_DEVICE);
4049 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4050 		dev_err(ctx->dev, "unable to map ctx\n");
4051 		state->ctx_dma = 0;
4052 		ret = -ENOMEM;
4053 		goto unmap;
4054 	}
4055 
4056 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4057 	dpaa2_fl_set_final(in_fle, true);
4058 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4059 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4060 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4061 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4062 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4063 	dpaa2_fl_set_len(out_fle, digestsize);
4064 
4065 	req_ctx->flc = &ctx->flc[DIGEST];
4066 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4067 	req_ctx->cbk = ahash_done;
4068 	req_ctx->ctx = &req->base;
4069 	req_ctx->edesc = edesc;
4070 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4071 	if (ret != -EINPROGRESS &&
4072 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4073 		goto unmap;
4074 
4075 	return ret;
4076 unmap:
4077 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4078 	qi_cache_free(edesc);
4079 	return -ENOMEM;
4080 }
4081 
4082 static int ahash_update_first(struct ahash_request *req)
4083 {
4084 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4085 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4086 	struct caam_hash_state *state = ahash_request_ctx(req);
4087 	struct caam_request *req_ctx = &state->caam_req;
4088 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4089 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4090 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4091 		      GFP_KERNEL : GFP_ATOMIC;
4092 	u8 *next_buf = alt_buf(state);
4093 	int *next_buflen = alt_buflen(state);
4094 	int to_hash;
4095 	int src_nents, mapped_nents;
4096 	struct ahash_edesc *edesc;
4097 	int ret = 0;
4098 
4099 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4100 				      1);
4101 	to_hash = req->nbytes - *next_buflen;
4102 
4103 	if (to_hash) {
4104 		struct dpaa2_sg_entry *sg_table;
4105 		int src_len = req->nbytes - *next_buflen;
4106 
4107 		src_nents = sg_nents_for_len(req->src, src_len);
4108 		if (src_nents < 0) {
4109 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4110 			return src_nents;
4111 		}
4112 
4113 		if (src_nents) {
4114 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4115 						  DMA_TO_DEVICE);
4116 			if (!mapped_nents) {
4117 				dev_err(ctx->dev, "unable to map source for DMA\n");
4118 				return -ENOMEM;
4119 			}
4120 		} else {
4121 			mapped_nents = 0;
4122 		}
4123 
4124 		/* allocate space for base edesc and link tables */
4125 		edesc = qi_cache_zalloc(GFP_DMA | flags);
4126 		if (!edesc) {
4127 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4128 				     DMA_TO_DEVICE);
4129 			return -ENOMEM;
4130 		}
4131 
4132 		edesc->src_nents = src_nents;
4133 		sg_table = &edesc->sgt[0];
4134 
4135 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4136 		dpaa2_fl_set_final(in_fle, true);
4137 		dpaa2_fl_set_len(in_fle, to_hash);
4138 
4139 		if (mapped_nents > 1) {
4140 			int qm_sg_bytes;
4141 
4142 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4143 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4144 				      sizeof(*sg_table);
4145 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4146 							  qm_sg_bytes,
4147 							  DMA_TO_DEVICE);
4148 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4149 				dev_err(ctx->dev, "unable to map S/G table\n");
4150 				ret = -ENOMEM;
4151 				goto unmap_ctx;
4152 			}
4153 			edesc->qm_sg_bytes = qm_sg_bytes;
4154 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4155 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4156 		} else {
4157 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4158 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4159 		}
4160 
4161 		if (*next_buflen)
4162 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4163 						 *next_buflen, 0);
4164 
4165 		state->ctx_dma_len = ctx->ctx_len;
4166 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4167 						ctx->ctx_len, DMA_FROM_DEVICE);
4168 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4169 			dev_err(ctx->dev, "unable to map ctx\n");
4170 			state->ctx_dma = 0;
4171 			ret = -ENOMEM;
4172 			goto unmap_ctx;
4173 		}
4174 
4175 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4176 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4177 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4178 
4179 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4180 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4181 		req_ctx->cbk = ahash_done_ctx_dst;
4182 		req_ctx->ctx = &req->base;
4183 		req_ctx->edesc = edesc;
4184 
4185 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4186 		if (ret != -EINPROGRESS &&
4187 		    !(ret == -EBUSY && req->base.flags &
4188 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4189 			goto unmap_ctx;
4190 
4191 		state->update = ahash_update_ctx;
4192 		state->finup = ahash_finup_ctx;
4193 		state->final = ahash_final_ctx;
4194 	} else if (*next_buflen) {
4195 		state->update = ahash_update_no_ctx;
4196 		state->finup = ahash_finup_no_ctx;
4197 		state->final = ahash_final_no_ctx;
4198 		scatterwalk_map_and_copy(next_buf, req->src, 0,
4199 					 req->nbytes, 0);
4200 		switch_buf(state);
4201 	}
4202 
4203 	print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4204 			     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4205 			     1);
4206 
4207 	return ret;
4208 unmap_ctx:
4209 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4210 	qi_cache_free(edesc);
4211 	return ret;
4212 }
4213 
4214 static int ahash_finup_first(struct ahash_request *req)
4215 {
4216 	return ahash_digest(req);
4217 }
4218 
4219 static int ahash_init(struct ahash_request *req)
4220 {
4221 	struct caam_hash_state *state = ahash_request_ctx(req);
4222 
4223 	state->update = ahash_update_first;
4224 	state->finup = ahash_finup_first;
4225 	state->final = ahash_final_no_ctx;
4226 
4227 	state->ctx_dma = 0;
4228 	state->ctx_dma_len = 0;
4229 	state->current_buf = 0;
4230 	state->buf_dma = 0;
4231 	state->buflen_0 = 0;
4232 	state->buflen_1 = 0;
4233 
4234 	return 0;
4235 }
4236 
4237 static int ahash_update(struct ahash_request *req)
4238 {
4239 	struct caam_hash_state *state = ahash_request_ctx(req);
4240 
4241 	return state->update(req);
4242 }
4243 
4244 static int ahash_finup(struct ahash_request *req)
4245 {
4246 	struct caam_hash_state *state = ahash_request_ctx(req);
4247 
4248 	return state->finup(req);
4249 }
4250 
4251 static int ahash_final(struct ahash_request *req)
4252 {
4253 	struct caam_hash_state *state = ahash_request_ctx(req);
4254 
4255 	return state->final(req);
4256 }
4257 
4258 static int ahash_export(struct ahash_request *req, void *out)
4259 {
4260 	struct caam_hash_state *state = ahash_request_ctx(req);
4261 	struct caam_export_state *export = out;
4262 	int len;
4263 	u8 *buf;
4264 
4265 	if (state->current_buf) {
4266 		buf = state->buf_1;
4267 		len = state->buflen_1;
4268 	} else {
4269 		buf = state->buf_0;
4270 		len = state->buflen_0;
4271 	}
4272 
4273 	memcpy(export->buf, buf, len);
4274 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4275 	export->buflen = len;
4276 	export->update = state->update;
4277 	export->final = state->final;
4278 	export->finup = state->finup;
4279 
4280 	return 0;
4281 }
4282 
4283 static int ahash_import(struct ahash_request *req, const void *in)
4284 {
4285 	struct caam_hash_state *state = ahash_request_ctx(req);
4286 	const struct caam_export_state *export = in;
4287 
4288 	memset(state, 0, sizeof(*state));
4289 	memcpy(state->buf_0, export->buf, export->buflen);
4290 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4291 	state->buflen_0 = export->buflen;
4292 	state->update = export->update;
4293 	state->final = export->final;
4294 	state->finup = export->finup;
4295 
4296 	return 0;
4297 }
4298 
4299 struct caam_hash_template {
4300 	char name[CRYPTO_MAX_ALG_NAME];
4301 	char driver_name[CRYPTO_MAX_ALG_NAME];
4302 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4303 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4304 	unsigned int blocksize;
4305 	struct ahash_alg template_ahash;
4306 	u32 alg_type;
4307 };
4308 
4309 /* ahash descriptors */
4310 static struct caam_hash_template driver_hash[] = {
4311 	{
4312 		.name = "sha1",
4313 		.driver_name = "sha1-caam-qi2",
4314 		.hmac_name = "hmac(sha1)",
4315 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4316 		.blocksize = SHA1_BLOCK_SIZE,
4317 		.template_ahash = {
4318 			.init = ahash_init,
4319 			.update = ahash_update,
4320 			.final = ahash_final,
4321 			.finup = ahash_finup,
4322 			.digest = ahash_digest,
4323 			.export = ahash_export,
4324 			.import = ahash_import,
4325 			.setkey = ahash_setkey,
4326 			.halg = {
4327 				.digestsize = SHA1_DIGEST_SIZE,
4328 				.statesize = sizeof(struct caam_export_state),
4329 			},
4330 		},
4331 		.alg_type = OP_ALG_ALGSEL_SHA1,
4332 	}, {
4333 		.name = "sha224",
4334 		.driver_name = "sha224-caam-qi2",
4335 		.hmac_name = "hmac(sha224)",
4336 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4337 		.blocksize = SHA224_BLOCK_SIZE,
4338 		.template_ahash = {
4339 			.init = ahash_init,
4340 			.update = ahash_update,
4341 			.final = ahash_final,
4342 			.finup = ahash_finup,
4343 			.digest = ahash_digest,
4344 			.export = ahash_export,
4345 			.import = ahash_import,
4346 			.setkey = ahash_setkey,
4347 			.halg = {
4348 				.digestsize = SHA224_DIGEST_SIZE,
4349 				.statesize = sizeof(struct caam_export_state),
4350 			},
4351 		},
4352 		.alg_type = OP_ALG_ALGSEL_SHA224,
4353 	}, {
4354 		.name = "sha256",
4355 		.driver_name = "sha256-caam-qi2",
4356 		.hmac_name = "hmac(sha256)",
4357 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4358 		.blocksize = SHA256_BLOCK_SIZE,
4359 		.template_ahash = {
4360 			.init = ahash_init,
4361 			.update = ahash_update,
4362 			.final = ahash_final,
4363 			.finup = ahash_finup,
4364 			.digest = ahash_digest,
4365 			.export = ahash_export,
4366 			.import = ahash_import,
4367 			.setkey = ahash_setkey,
4368 			.halg = {
4369 				.digestsize = SHA256_DIGEST_SIZE,
4370 				.statesize = sizeof(struct caam_export_state),
4371 			},
4372 		},
4373 		.alg_type = OP_ALG_ALGSEL_SHA256,
4374 	}, {
4375 		.name = "sha384",
4376 		.driver_name = "sha384-caam-qi2",
4377 		.hmac_name = "hmac(sha384)",
4378 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4379 		.blocksize = SHA384_BLOCK_SIZE,
4380 		.template_ahash = {
4381 			.init = ahash_init,
4382 			.update = ahash_update,
4383 			.final = ahash_final,
4384 			.finup = ahash_finup,
4385 			.digest = ahash_digest,
4386 			.export = ahash_export,
4387 			.import = ahash_import,
4388 			.setkey = ahash_setkey,
4389 			.halg = {
4390 				.digestsize = SHA384_DIGEST_SIZE,
4391 				.statesize = sizeof(struct caam_export_state),
4392 			},
4393 		},
4394 		.alg_type = OP_ALG_ALGSEL_SHA384,
4395 	}, {
4396 		.name = "sha512",
4397 		.driver_name = "sha512-caam-qi2",
4398 		.hmac_name = "hmac(sha512)",
4399 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4400 		.blocksize = SHA512_BLOCK_SIZE,
4401 		.template_ahash = {
4402 			.init = ahash_init,
4403 			.update = ahash_update,
4404 			.final = ahash_final,
4405 			.finup = ahash_finup,
4406 			.digest = ahash_digest,
4407 			.export = ahash_export,
4408 			.import = ahash_import,
4409 			.setkey = ahash_setkey,
4410 			.halg = {
4411 				.digestsize = SHA512_DIGEST_SIZE,
4412 				.statesize = sizeof(struct caam_export_state),
4413 			},
4414 		},
4415 		.alg_type = OP_ALG_ALGSEL_SHA512,
4416 	}, {
4417 		.name = "md5",
4418 		.driver_name = "md5-caam-qi2",
4419 		.hmac_name = "hmac(md5)",
4420 		.hmac_driver_name = "hmac-md5-caam-qi2",
4421 		.blocksize = MD5_BLOCK_WORDS * 4,
4422 		.template_ahash = {
4423 			.init = ahash_init,
4424 			.update = ahash_update,
4425 			.final = ahash_final,
4426 			.finup = ahash_finup,
4427 			.digest = ahash_digest,
4428 			.export = ahash_export,
4429 			.import = ahash_import,
4430 			.setkey = ahash_setkey,
4431 			.halg = {
4432 				.digestsize = MD5_DIGEST_SIZE,
4433 				.statesize = sizeof(struct caam_export_state),
4434 			},
4435 		},
4436 		.alg_type = OP_ALG_ALGSEL_MD5,
4437 	}
4438 };
4439 
4440 struct caam_hash_alg {
4441 	struct list_head entry;
4442 	struct device *dev;
4443 	int alg_type;
4444 	struct ahash_alg ahash_alg;
4445 };
4446 
4447 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4448 {
4449 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4450 	struct crypto_alg *base = tfm->__crt_alg;
4451 	struct hash_alg_common *halg =
4452 		 container_of(base, struct hash_alg_common, base);
4453 	struct ahash_alg *alg =
4454 		 container_of(halg, struct ahash_alg, halg);
4455 	struct caam_hash_alg *caam_hash =
4456 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4457 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4458 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4459 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4460 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4461 					 HASH_MSG_LEN + 32,
4462 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4463 					 HASH_MSG_LEN + 64,
4464 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4465 	dma_addr_t dma_addr;
4466 	int i;
4467 
4468 	ctx->dev = caam_hash->dev;
4469 
4470 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4471 					DMA_BIDIRECTIONAL,
4472 					DMA_ATTR_SKIP_CPU_SYNC);
4473 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4474 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4475 		return -ENOMEM;
4476 	}
4477 
4478 	for (i = 0; i < HASH_NUM_OP; i++)
4479 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4480 
4481 	/* copy descriptor header template value */
4482 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4483 
4484 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4485 				   OP_ALG_ALGSEL_SUBMASK) >>
4486 				  OP_ALG_ALGSEL_SHIFT];
4487 
4488 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4489 				 sizeof(struct caam_hash_state));
4490 
4491 	return ahash_set_sh_desc(ahash);
4492 }
4493 
4494 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4495 {
4496 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4497 
4498 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4499 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4500 }
4501 
4502 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4503 	struct caam_hash_template *template, bool keyed)
4504 {
4505 	struct caam_hash_alg *t_alg;
4506 	struct ahash_alg *halg;
4507 	struct crypto_alg *alg;
4508 
4509 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4510 	if (!t_alg)
4511 		return ERR_PTR(-ENOMEM);
4512 
4513 	t_alg->ahash_alg = template->template_ahash;
4514 	halg = &t_alg->ahash_alg;
4515 	alg = &halg->halg.base;
4516 
4517 	if (keyed) {
4518 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4519 			 template->hmac_name);
4520 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4521 			 template->hmac_driver_name);
4522 	} else {
4523 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4524 			 template->name);
4525 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4526 			 template->driver_name);
4527 		t_alg->ahash_alg.setkey = NULL;
4528 	}
4529 	alg->cra_module = THIS_MODULE;
4530 	alg->cra_init = caam_hash_cra_init;
4531 	alg->cra_exit = caam_hash_cra_exit;
4532 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4533 	alg->cra_priority = CAAM_CRA_PRIORITY;
4534 	alg->cra_blocksize = template->blocksize;
4535 	alg->cra_alignmask = 0;
4536 	alg->cra_flags = CRYPTO_ALG_ASYNC;
4537 
4538 	t_alg->alg_type = template->alg_type;
4539 	t_alg->dev = dev;
4540 
4541 	return t_alg;
4542 }
4543 
4544 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4545 {
4546 	struct dpaa2_caam_priv_per_cpu *ppriv;
4547 
4548 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4549 	napi_schedule_irqoff(&ppriv->napi);
4550 }
4551 
4552 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4553 {
4554 	struct device *dev = priv->dev;
4555 	struct dpaa2_io_notification_ctx *nctx;
4556 	struct dpaa2_caam_priv_per_cpu *ppriv;
4557 	int err, i = 0, cpu;
4558 
4559 	for_each_online_cpu(cpu) {
4560 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4561 		ppriv->priv = priv;
4562 		nctx = &ppriv->nctx;
4563 		nctx->is_cdan = 0;
4564 		nctx->id = ppriv->rsp_fqid;
4565 		nctx->desired_cpu = cpu;
4566 		nctx->cb = dpaa2_caam_fqdan_cb;
4567 
4568 		/* Register notification callbacks */
4569 		ppriv->dpio = dpaa2_io_service_select(cpu);
4570 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4571 		if (unlikely(err)) {
4572 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4573 			nctx->cb = NULL;
4574 			/*
4575 			 * If no affine DPIO for this core, there's probably
4576 			 * none available for next cores either. Signal we want
4577 			 * to retry later, in case the DPIO devices weren't
4578 			 * probed yet.
4579 			 */
4580 			err = -EPROBE_DEFER;
4581 			goto err;
4582 		}
4583 
4584 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4585 						     dev);
4586 		if (unlikely(!ppriv->store)) {
4587 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4588 			err = -ENOMEM;
4589 			goto err;
4590 		}
4591 
4592 		if (++i == priv->num_pairs)
4593 			break;
4594 	}
4595 
4596 	return 0;
4597 
4598 err:
4599 	for_each_online_cpu(cpu) {
4600 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4601 		if (!ppriv->nctx.cb)
4602 			break;
4603 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4604 	}
4605 
4606 	for_each_online_cpu(cpu) {
4607 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4608 		if (!ppriv->store)
4609 			break;
4610 		dpaa2_io_store_destroy(ppriv->store);
4611 	}
4612 
4613 	return err;
4614 }
4615 
4616 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4617 {
4618 	struct dpaa2_caam_priv_per_cpu *ppriv;
4619 	int i = 0, cpu;
4620 
4621 	for_each_online_cpu(cpu) {
4622 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4623 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4624 					    priv->dev);
4625 		dpaa2_io_store_destroy(ppriv->store);
4626 
4627 		if (++i == priv->num_pairs)
4628 			return;
4629 	}
4630 }
4631 
4632 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4633 {
4634 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4635 	struct device *dev = priv->dev;
4636 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4637 	struct dpaa2_caam_priv_per_cpu *ppriv;
4638 	int err = 0, i = 0, cpu;
4639 
4640 	/* Configure Rx queues */
4641 	for_each_online_cpu(cpu) {
4642 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4643 
4644 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4645 				       DPSECI_QUEUE_OPT_USER_CTX;
4646 		rx_queue_cfg.order_preservation_en = 0;
4647 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4648 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4649 		/*
4650 		 * Rx priority (WQ) doesn't really matter, since we use
4651 		 * pull mode, i.e. volatile dequeues from specific FQs
4652 		 */
4653 		rx_queue_cfg.dest_cfg.priority = 0;
4654 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4655 
4656 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4657 					  &rx_queue_cfg);
4658 		if (err) {
4659 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4660 				err);
4661 			return err;
4662 		}
4663 
4664 		if (++i == priv->num_pairs)
4665 			break;
4666 	}
4667 
4668 	return err;
4669 }
4670 
4671 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4672 {
4673 	struct device *dev = priv->dev;
4674 
4675 	if (!priv->cscn_mem)
4676 		return;
4677 
4678 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4679 	kfree(priv->cscn_mem);
4680 }
4681 
4682 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4683 {
4684 	struct device *dev = priv->dev;
4685 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4686 
4687 	dpaa2_dpseci_congestion_free(priv);
4688 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4689 }
4690 
4691 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4692 				  const struct dpaa2_fd *fd)
4693 {
4694 	struct caam_request *req;
4695 	u32 fd_err;
4696 
4697 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4698 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4699 		return;
4700 	}
4701 
4702 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4703 	if (unlikely(fd_err))
4704 		dev_err(priv->dev, "FD error: %08x\n", fd_err);
4705 
4706 	/*
4707 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4708 	 * in FD[ERR] or FD[FRC].
4709 	 */
4710 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4711 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4712 			 DMA_BIDIRECTIONAL);
4713 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4714 }
4715 
4716 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4717 {
4718 	int err;
4719 
4720 	/* Retry while portal is busy */
4721 	do {
4722 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4723 					       ppriv->store);
4724 	} while (err == -EBUSY);
4725 
4726 	if (unlikely(err))
4727 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4728 
4729 	return err;
4730 }
4731 
4732 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4733 {
4734 	struct dpaa2_dq *dq;
4735 	int cleaned = 0, is_last;
4736 
4737 	do {
4738 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4739 		if (unlikely(!dq)) {
4740 			if (unlikely(!is_last)) {
4741 				dev_dbg(ppriv->priv->dev,
4742 					"FQ %d returned no valid frames\n",
4743 					ppriv->rsp_fqid);
4744 				/*
4745 				 * MUST retry until we get some sort of
4746 				 * valid response token (be it "empty dequeue"
4747 				 * or a valid frame).
4748 				 */
4749 				continue;
4750 			}
4751 			break;
4752 		}
4753 
4754 		/* Process FD */
4755 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4756 		cleaned++;
4757 	} while (!is_last);
4758 
4759 	return cleaned;
4760 }
4761 
4762 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4763 {
4764 	struct dpaa2_caam_priv_per_cpu *ppriv;
4765 	struct dpaa2_caam_priv *priv;
4766 	int err, cleaned = 0, store_cleaned;
4767 
4768 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4769 	priv = ppriv->priv;
4770 
4771 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4772 		return 0;
4773 
4774 	do {
4775 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4776 		cleaned += store_cleaned;
4777 
4778 		if (store_cleaned == 0 ||
4779 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4780 			break;
4781 
4782 		/* Try to dequeue some more */
4783 		err = dpaa2_caam_pull_fq(ppriv);
4784 		if (unlikely(err))
4785 			break;
4786 	} while (1);
4787 
4788 	if (cleaned < budget) {
4789 		napi_complete_done(napi, cleaned);
4790 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4791 		if (unlikely(err))
4792 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4793 				err);
4794 	}
4795 
4796 	return cleaned;
4797 }
4798 
4799 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4800 					 u16 token)
4801 {
4802 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4803 	struct device *dev = priv->dev;
4804 	int err;
4805 
4806 	/*
4807 	 * Congestion group feature supported starting with DPSECI API v5.1
4808 	 * and only when object has been created with this capability.
4809 	 */
4810 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4811 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4812 		return 0;
4813 
4814 	priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4815 				 GFP_KERNEL | GFP_DMA);
4816 	if (!priv->cscn_mem)
4817 		return -ENOMEM;
4818 
4819 	priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4820 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4821 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4822 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4823 		dev_err(dev, "Error mapping CSCN memory area\n");
4824 		err = -ENOMEM;
4825 		goto err_dma_map;
4826 	}
4827 
4828 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4829 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4830 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4831 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4832 	cong_notif_cfg.message_iova = priv->cscn_dma;
4833 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4834 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4835 					DPSECI_CGN_MODE_COHERENT_WRITE;
4836 
4837 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4838 						 &cong_notif_cfg);
4839 	if (err) {
4840 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4841 		goto err_set_cong;
4842 	}
4843 
4844 	return 0;
4845 
4846 err_set_cong:
4847 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4848 err_dma_map:
4849 	kfree(priv->cscn_mem);
4850 
4851 	return err;
4852 }
4853 
4854 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4855 {
4856 	struct device *dev = &ls_dev->dev;
4857 	struct dpaa2_caam_priv *priv;
4858 	struct dpaa2_caam_priv_per_cpu *ppriv;
4859 	int err, cpu;
4860 	u8 i;
4861 
4862 	priv = dev_get_drvdata(dev);
4863 
4864 	priv->dev = dev;
4865 	priv->dpsec_id = ls_dev->obj_desc.id;
4866 
4867 	/* Get a handle for the DPSECI this interface is associate with */
4868 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4869 	if (err) {
4870 		dev_err(dev, "dpseci_open() failed: %d\n", err);
4871 		goto err_open;
4872 	}
4873 
4874 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4875 				     &priv->minor_ver);
4876 	if (err) {
4877 		dev_err(dev, "dpseci_get_api_version() failed\n");
4878 		goto err_get_vers;
4879 	}
4880 
4881 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4882 
4883 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4884 				    &priv->dpseci_attr);
4885 	if (err) {
4886 		dev_err(dev, "dpseci_get_attributes() failed\n");
4887 		goto err_get_vers;
4888 	}
4889 
4890 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4891 				  &priv->sec_attr);
4892 	if (err) {
4893 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
4894 		goto err_get_vers;
4895 	}
4896 
4897 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4898 	if (err) {
4899 		dev_err(dev, "setup_congestion() failed\n");
4900 		goto err_get_vers;
4901 	}
4902 
4903 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4904 			      priv->dpseci_attr.num_tx_queues);
4905 	if (priv->num_pairs > num_online_cpus()) {
4906 		dev_warn(dev, "%d queues won't be used\n",
4907 			 priv->num_pairs - num_online_cpus());
4908 		priv->num_pairs = num_online_cpus();
4909 	}
4910 
4911 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4912 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4913 					  &priv->rx_queue_attr[i]);
4914 		if (err) {
4915 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
4916 			goto err_get_rx_queue;
4917 		}
4918 	}
4919 
4920 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4921 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4922 					  &priv->tx_queue_attr[i]);
4923 		if (err) {
4924 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
4925 			goto err_get_rx_queue;
4926 		}
4927 	}
4928 
4929 	i = 0;
4930 	for_each_online_cpu(cpu) {
4931 		u8 j;
4932 
4933 		j = i % priv->num_pairs;
4934 
4935 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4936 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4937 
4938 		/*
4939 		 * Allow all cores to enqueue, while only some of them
4940 		 * will take part in dequeuing.
4941 		 */
4942 		if (++i > priv->num_pairs)
4943 			continue;
4944 
4945 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4946 		ppriv->prio = j;
4947 
4948 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4949 			priv->rx_queue_attr[j].fqid,
4950 			priv->tx_queue_attr[j].fqid);
4951 
4952 		ppriv->net_dev.dev = *dev;
4953 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4954 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4955 			       DPAA2_CAAM_NAPI_WEIGHT);
4956 	}
4957 
4958 	return 0;
4959 
4960 err_get_rx_queue:
4961 	dpaa2_dpseci_congestion_free(priv);
4962 err_get_vers:
4963 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4964 err_open:
4965 	return err;
4966 }
4967 
4968 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4969 {
4970 	struct device *dev = priv->dev;
4971 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4972 	struct dpaa2_caam_priv_per_cpu *ppriv;
4973 	int i;
4974 
4975 	for (i = 0; i < priv->num_pairs; i++) {
4976 		ppriv = per_cpu_ptr(priv->ppriv, i);
4977 		napi_enable(&ppriv->napi);
4978 	}
4979 
4980 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4981 }
4982 
4983 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4984 {
4985 	struct device *dev = priv->dev;
4986 	struct dpaa2_caam_priv_per_cpu *ppriv;
4987 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4988 	int i, err = 0, enabled;
4989 
4990 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4991 	if (err) {
4992 		dev_err(dev, "dpseci_disable() failed\n");
4993 		return err;
4994 	}
4995 
4996 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4997 	if (err) {
4998 		dev_err(dev, "dpseci_is_enabled() failed\n");
4999 		return err;
5000 	}
5001 
5002 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5003 
5004 	for (i = 0; i < priv->num_pairs; i++) {
5005 		ppriv = per_cpu_ptr(priv->ppriv, i);
5006 		napi_disable(&ppriv->napi);
5007 		netif_napi_del(&ppriv->napi);
5008 	}
5009 
5010 	return 0;
5011 }
5012 
5013 static struct list_head hash_list;
5014 
5015 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5016 {
5017 	struct device *dev;
5018 	struct dpaa2_caam_priv *priv;
5019 	int i, err = 0;
5020 	bool registered = false;
5021 
5022 	/*
5023 	 * There is no way to get CAAM endianness - there is no direct register
5024 	 * space access and MC f/w does not provide this attribute.
5025 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5026 	 * property.
5027 	 */
5028 	caam_little_end = true;
5029 
5030 	caam_imx = false;
5031 
5032 	dev = &dpseci_dev->dev;
5033 
5034 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5035 	if (!priv)
5036 		return -ENOMEM;
5037 
5038 	dev_set_drvdata(dev, priv);
5039 
5040 	priv->domain = iommu_get_domain_for_dev(dev);
5041 
5042 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5043 				     0, SLAB_CACHE_DMA, NULL);
5044 	if (!qi_cache) {
5045 		dev_err(dev, "Can't allocate SEC cache\n");
5046 		return -ENOMEM;
5047 	}
5048 
5049 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5050 	if (err) {
5051 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5052 		goto err_dma_mask;
5053 	}
5054 
5055 	/* Obtain a MC portal */
5056 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5057 	if (err) {
5058 		if (err == -ENXIO)
5059 			err = -EPROBE_DEFER;
5060 		else
5061 			dev_err(dev, "MC portal allocation failed\n");
5062 
5063 		goto err_dma_mask;
5064 	}
5065 
5066 	priv->ppriv = alloc_percpu(*priv->ppriv);
5067 	if (!priv->ppriv) {
5068 		dev_err(dev, "alloc_percpu() failed\n");
5069 		err = -ENOMEM;
5070 		goto err_alloc_ppriv;
5071 	}
5072 
5073 	/* DPSECI initialization */
5074 	err = dpaa2_dpseci_setup(dpseci_dev);
5075 	if (err) {
5076 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5077 		goto err_dpseci_setup;
5078 	}
5079 
5080 	/* DPIO */
5081 	err = dpaa2_dpseci_dpio_setup(priv);
5082 	if (err) {
5083 		if (err != -EPROBE_DEFER)
5084 			dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5085 		goto err_dpio_setup;
5086 	}
5087 
5088 	/* DPSECI binding to DPIO */
5089 	err = dpaa2_dpseci_bind(priv);
5090 	if (err) {
5091 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5092 		goto err_bind;
5093 	}
5094 
5095 	/* DPSECI enable */
5096 	err = dpaa2_dpseci_enable(priv);
5097 	if (err) {
5098 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5099 		goto err_bind;
5100 	}
5101 
5102 	dpaa2_dpseci_debugfs_init(priv);
5103 
5104 	/* register crypto algorithms the device supports */
5105 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5106 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5107 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5108 
5109 		/* Skip DES algorithms if not supported by device */
5110 		if (!priv->sec_attr.des_acc_num &&
5111 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5112 		     alg_sel == OP_ALG_ALGSEL_DES))
5113 			continue;
5114 
5115 		/* Skip AES algorithms if not supported by device */
5116 		if (!priv->sec_attr.aes_acc_num &&
5117 		    alg_sel == OP_ALG_ALGSEL_AES)
5118 			continue;
5119 
5120 		/* Skip CHACHA20 algorithms if not supported by device */
5121 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5122 		    !priv->sec_attr.ccha_acc_num)
5123 			continue;
5124 
5125 		t_alg->caam.dev = dev;
5126 		caam_skcipher_alg_init(t_alg);
5127 
5128 		err = crypto_register_skcipher(&t_alg->skcipher);
5129 		if (err) {
5130 			dev_warn(dev, "%s alg registration failed: %d\n",
5131 				 t_alg->skcipher.base.cra_driver_name, err);
5132 			continue;
5133 		}
5134 
5135 		t_alg->registered = true;
5136 		registered = true;
5137 	}
5138 
5139 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5140 		struct caam_aead_alg *t_alg = driver_aeads + i;
5141 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5142 				 OP_ALG_ALGSEL_MASK;
5143 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5144 				 OP_ALG_ALGSEL_MASK;
5145 
5146 		/* Skip DES algorithms if not supported by device */
5147 		if (!priv->sec_attr.des_acc_num &&
5148 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5149 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5150 			continue;
5151 
5152 		/* Skip AES algorithms if not supported by device */
5153 		if (!priv->sec_attr.aes_acc_num &&
5154 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5155 			continue;
5156 
5157 		/* Skip CHACHA20 algorithms if not supported by device */
5158 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5159 		    !priv->sec_attr.ccha_acc_num)
5160 			continue;
5161 
5162 		/* Skip POLY1305 algorithms if not supported by device */
5163 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5164 		    !priv->sec_attr.ptha_acc_num)
5165 			continue;
5166 
5167 		/*
5168 		 * Skip algorithms requiring message digests
5169 		 * if MD not supported by device.
5170 		 */
5171 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5172 		    !priv->sec_attr.md_acc_num)
5173 			continue;
5174 
5175 		t_alg->caam.dev = dev;
5176 		caam_aead_alg_init(t_alg);
5177 
5178 		err = crypto_register_aead(&t_alg->aead);
5179 		if (err) {
5180 			dev_warn(dev, "%s alg registration failed: %d\n",
5181 				 t_alg->aead.base.cra_driver_name, err);
5182 			continue;
5183 		}
5184 
5185 		t_alg->registered = true;
5186 		registered = true;
5187 	}
5188 	if (registered)
5189 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5190 
5191 	/* register hash algorithms the device supports */
5192 	INIT_LIST_HEAD(&hash_list);
5193 
5194 	/*
5195 	 * Skip registration of any hashing algorithms if MD block
5196 	 * is not present.
5197 	 */
5198 	if (!priv->sec_attr.md_acc_num)
5199 		return 0;
5200 
5201 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5202 		struct caam_hash_alg *t_alg;
5203 		struct caam_hash_template *alg = driver_hash + i;
5204 
5205 		/* register hmac version */
5206 		t_alg = caam_hash_alloc(dev, alg, true);
5207 		if (IS_ERR(t_alg)) {
5208 			err = PTR_ERR(t_alg);
5209 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5210 				 alg->driver_name, err);
5211 			continue;
5212 		}
5213 
5214 		err = crypto_register_ahash(&t_alg->ahash_alg);
5215 		if (err) {
5216 			dev_warn(dev, "%s alg registration failed: %d\n",
5217 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5218 				 err);
5219 			kfree(t_alg);
5220 		} else {
5221 			list_add_tail(&t_alg->entry, &hash_list);
5222 		}
5223 
5224 		/* register unkeyed version */
5225 		t_alg = caam_hash_alloc(dev, alg, false);
5226 		if (IS_ERR(t_alg)) {
5227 			err = PTR_ERR(t_alg);
5228 			dev_warn(dev, "%s alg allocation failed: %d\n",
5229 				 alg->driver_name, err);
5230 			continue;
5231 		}
5232 
5233 		err = crypto_register_ahash(&t_alg->ahash_alg);
5234 		if (err) {
5235 			dev_warn(dev, "%s alg registration failed: %d\n",
5236 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5237 				 err);
5238 			kfree(t_alg);
5239 		} else {
5240 			list_add_tail(&t_alg->entry, &hash_list);
5241 		}
5242 	}
5243 	if (!list_empty(&hash_list))
5244 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5245 
5246 	return err;
5247 
5248 err_bind:
5249 	dpaa2_dpseci_dpio_free(priv);
5250 err_dpio_setup:
5251 	dpaa2_dpseci_free(priv);
5252 err_dpseci_setup:
5253 	free_percpu(priv->ppriv);
5254 err_alloc_ppriv:
5255 	fsl_mc_portal_free(priv->mc_io);
5256 err_dma_mask:
5257 	kmem_cache_destroy(qi_cache);
5258 
5259 	return err;
5260 }
5261 
5262 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5263 {
5264 	struct device *dev;
5265 	struct dpaa2_caam_priv *priv;
5266 	int i;
5267 
5268 	dev = &ls_dev->dev;
5269 	priv = dev_get_drvdata(dev);
5270 
5271 	dpaa2_dpseci_debugfs_exit(priv);
5272 
5273 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5274 		struct caam_aead_alg *t_alg = driver_aeads + i;
5275 
5276 		if (t_alg->registered)
5277 			crypto_unregister_aead(&t_alg->aead);
5278 	}
5279 
5280 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5281 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5282 
5283 		if (t_alg->registered)
5284 			crypto_unregister_skcipher(&t_alg->skcipher);
5285 	}
5286 
5287 	if (hash_list.next) {
5288 		struct caam_hash_alg *t_hash_alg, *p;
5289 
5290 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5291 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5292 			list_del(&t_hash_alg->entry);
5293 			kfree(t_hash_alg);
5294 		}
5295 	}
5296 
5297 	dpaa2_dpseci_disable(priv);
5298 	dpaa2_dpseci_dpio_free(priv);
5299 	dpaa2_dpseci_free(priv);
5300 	free_percpu(priv->ppriv);
5301 	fsl_mc_portal_free(priv->mc_io);
5302 	kmem_cache_destroy(qi_cache);
5303 
5304 	return 0;
5305 }
5306 
5307 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5308 {
5309 	struct dpaa2_fd fd;
5310 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5311 	struct dpaa2_caam_priv_per_cpu *ppriv;
5312 	int err = 0, i;
5313 
5314 	if (IS_ERR(req))
5315 		return PTR_ERR(req);
5316 
5317 	if (priv->cscn_mem) {
5318 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5319 					DPAA2_CSCN_SIZE,
5320 					DMA_FROM_DEVICE);
5321 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5322 			dev_dbg_ratelimited(dev, "Dropping request\n");
5323 			return -EBUSY;
5324 		}
5325 	}
5326 
5327 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5328 
5329 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5330 					 DMA_BIDIRECTIONAL);
5331 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5332 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5333 		goto err_out;
5334 	}
5335 
5336 	memset(&fd, 0, sizeof(fd));
5337 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5338 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5339 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5340 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5341 
5342 	ppriv = this_cpu_ptr(priv->ppriv);
5343 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5344 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5345 						  &fd);
5346 		if (err != -EBUSY)
5347 			break;
5348 
5349 		cpu_relax();
5350 	}
5351 
5352 	if (unlikely(err)) {
5353 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5354 		goto err_out;
5355 	}
5356 
5357 	return -EINPROGRESS;
5358 
5359 err_out:
5360 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5361 			 DMA_BIDIRECTIONAL);
5362 	return -EIO;
5363 }
5364 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5365 
5366 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5367 	{
5368 		.vendor = FSL_MC_VENDOR_FREESCALE,
5369 		.obj_type = "dpseci",
5370 	},
5371 	{ .vendor = 0x0 }
5372 };
5373 
5374 static struct fsl_mc_driver dpaa2_caam_driver = {
5375 	.driver = {
5376 		.name		= KBUILD_MODNAME,
5377 		.owner		= THIS_MODULE,
5378 	},
5379 	.probe		= dpaa2_caam_probe,
5380 	.remove		= dpaa2_caam_remove,
5381 	.match_id_table = dpaa2_caam_match_id_table
5382 };
5383 
5384 MODULE_LICENSE("Dual BSD/GPL");
5385 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5386 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5387 
5388 module_fsl_mc_driver(dpaa2_caam_driver);
5389