xref: /linux/drivers/crypto/caam/caamalg_qi2.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2019 NXP
5  */
6 
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/dma-mapping.h>
20 #include <linux/fsl/mc.h>
21 #include <linux/kernel.h>
22 #include <soc/fsl/dpaa2-io.h>
23 #include <soc/fsl/dpaa2-fd.h>
24 #include <crypto/xts.h>
25 #include <linux/unaligned.h>
26 
27 #define CAAM_CRA_PRIORITY	2000
28 
29 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
30 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
31 				 SHA512_DIGEST_SIZE * 2)
32 
33 /*
34  * This is a cache of buffers, from which the users of CAAM QI driver
35  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
36  * NOTE: A more elegant solution would be to have some headroom in the frames
37  *       being processed. This can be added by the dpaa2-eth driver. This would
38  *       pose a problem for userspace application processing which cannot
39  *       know of this limitation. So for now, this will work.
40  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
41  */
42 static struct kmem_cache *qi_cache;
43 
44 struct caam_alg_entry {
45 	struct device *dev;
46 	int class1_alg_type;
47 	int class2_alg_type;
48 	bool rfc3686;
49 	bool geniv;
50 	bool nodkp;
51 };
52 
53 struct caam_aead_alg {
54 	struct aead_alg aead;
55 	struct caam_alg_entry caam;
56 	bool registered;
57 };
58 
59 struct caam_skcipher_alg {
60 	struct skcipher_alg skcipher;
61 	struct caam_alg_entry caam;
62 	bool registered;
63 };
64 
65 /**
66  * struct caam_ctx - per-session context
67  * @flc: Flow Contexts array
68  * @key:  [authentication key], encryption key
69  * @flc_dma: I/O virtual addresses of the Flow Contexts
70  * @key_dma: I/O virtual address of the key
71  * @dir: DMA direction for mapping key and Flow Contexts
72  * @dev: dpseci device
73  * @adata: authentication algorithm details
74  * @cdata: encryption algorithm details
75  * @authsize: authentication tag (a.k.a. ICV / MAC) size
76  * @xts_key_fallback: true if fallback tfm needs to be used due
77  *		      to unsupported xts key lengths
78  * @fallback: xts fallback tfm
79  */
80 struct caam_ctx {
81 	struct caam_flc flc[NUM_OP];
82 	u8 key[CAAM_MAX_KEY_SIZE];
83 	dma_addr_t flc_dma[NUM_OP];
84 	dma_addr_t key_dma;
85 	enum dma_data_direction dir;
86 	struct device *dev;
87 	struct alginfo adata;
88 	struct alginfo cdata;
89 	unsigned int authsize;
90 	bool xts_key_fallback;
91 	struct crypto_skcipher *fallback;
92 };
93 
dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv * priv,dma_addr_t iova_addr)94 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
95 				     dma_addr_t iova_addr)
96 {
97 	phys_addr_t phys_addr;
98 
99 	phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
100 				   iova_addr;
101 
102 	return phys_to_virt(phys_addr);
103 }
104 
105 /*
106  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
107  *
108  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
109  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
110  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
111  * hosting 16 SG entries.
112  *
113  * @flags - flags that would be used for the equivalent kmalloc(..) call
114  *
115  * Returns a pointer to a retrieved buffer on success or NULL on failure.
116  */
qi_cache_zalloc(gfp_t flags)117 static inline void *qi_cache_zalloc(gfp_t flags)
118 {
119 	return kmem_cache_zalloc(qi_cache, flags);
120 }
121 
122 /*
123  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
124  *
125  * @obj - buffer previously allocated by qi_cache_zalloc
126  *
127  * No checking is being done, the call is a passthrough call to
128  * kmem_cache_free(...)
129  */
qi_cache_free(void * obj)130 static inline void qi_cache_free(void *obj)
131 {
132 	kmem_cache_free(qi_cache, obj);
133 }
134 
to_caam_req(struct crypto_async_request * areq)135 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
136 {
137 	switch (crypto_tfm_alg_type(areq->tfm)) {
138 	case CRYPTO_ALG_TYPE_SKCIPHER:
139 		return skcipher_request_ctx_dma(skcipher_request_cast(areq));
140 	case CRYPTO_ALG_TYPE_AEAD:
141 		return aead_request_ctx_dma(
142 			container_of(areq, struct aead_request, base));
143 	case CRYPTO_ALG_TYPE_AHASH:
144 		return ahash_request_ctx_dma(ahash_request_cast(areq));
145 	default:
146 		return ERR_PTR(-EINVAL);
147 	}
148 }
149 
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum dma_data_direction iv_dir,dma_addr_t qm_sg_dma,int qm_sg_bytes)150 static void caam_unmap(struct device *dev, struct scatterlist *src,
151 		       struct scatterlist *dst, int src_nents,
152 		       int dst_nents, dma_addr_t iv_dma, int ivsize,
153 		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
154 		       int qm_sg_bytes)
155 {
156 	if (dst != src) {
157 		if (src_nents)
158 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
159 		if (dst_nents)
160 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
161 	} else {
162 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
163 	}
164 
165 	if (iv_dma)
166 		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
167 
168 	if (qm_sg_bytes)
169 		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
170 }
171 
aead_set_sh_desc(struct crypto_aead * aead)172 static int aead_set_sh_desc(struct crypto_aead *aead)
173 {
174 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
175 						 typeof(*alg), aead);
176 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
177 	unsigned int ivsize = crypto_aead_ivsize(aead);
178 	struct device *dev = ctx->dev;
179 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
180 	struct caam_flc *flc;
181 	u32 *desc;
182 	u32 ctx1_iv_off = 0;
183 	u32 *nonce = NULL;
184 	unsigned int data_len[2];
185 	u32 inl_mask;
186 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
187 			       OP_ALG_AAI_CTR_MOD128);
188 	const bool is_rfc3686 = alg->caam.rfc3686;
189 
190 	if (!ctx->cdata.keylen || !ctx->authsize)
191 		return 0;
192 
193 	/*
194 	 * AES-CTR needs to load IV in CONTEXT1 reg
195 	 * at an offset of 128bits (16bytes)
196 	 * CONTEXT1[255:128] = IV
197 	 */
198 	if (ctr_mode)
199 		ctx1_iv_off = 16;
200 
201 	/*
202 	 * RFC3686 specific:
203 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
204 	 */
205 	if (is_rfc3686) {
206 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
207 		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
208 				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
209 	}
210 
211 	/*
212 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
213 	 * in invalid opcodes (last bytes of user key) in the resulting
214 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
215 	 * addresses are needed.
216 	 */
217 	ctx->adata.key_virt = ctx->key;
218 	ctx->adata.key_dma = ctx->key_dma;
219 
220 	ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
221 	ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
222 
223 	data_len[0] = ctx->adata.keylen_pad;
224 	data_len[1] = ctx->cdata.keylen;
225 
226 	/* aead_encrypt shared descriptor */
227 	if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
228 						 DESC_QI_AEAD_ENC_LEN) +
229 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
230 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
231 			      ARRAY_SIZE(data_len)) < 0)
232 		return -EINVAL;
233 
234 	ctx->adata.key_inline = !!(inl_mask & 1);
235 	ctx->cdata.key_inline = !!(inl_mask & 2);
236 
237 	flc = &ctx->flc[ENCRYPT];
238 	desc = flc->sh_desc;
239 
240 	if (alg->caam.geniv)
241 		cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
242 					  ivsize, ctx->authsize, is_rfc3686,
243 					  nonce, ctx1_iv_off, true,
244 					  priv->sec_attr.era);
245 	else
246 		cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
247 				       ivsize, ctx->authsize, is_rfc3686, nonce,
248 				       ctx1_iv_off, true, priv->sec_attr.era);
249 
250 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
251 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
252 				   sizeof(flc->flc) + desc_bytes(desc),
253 				   ctx->dir);
254 
255 	/* aead_decrypt shared descriptor */
256 	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
257 			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
258 			      DESC_JOB_IO_LEN, data_len, &inl_mask,
259 			      ARRAY_SIZE(data_len)) < 0)
260 		return -EINVAL;
261 
262 	ctx->adata.key_inline = !!(inl_mask & 1);
263 	ctx->cdata.key_inline = !!(inl_mask & 2);
264 
265 	flc = &ctx->flc[DECRYPT];
266 	desc = flc->sh_desc;
267 	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
268 			       ivsize, ctx->authsize, alg->caam.geniv,
269 			       is_rfc3686, nonce, ctx1_iv_off, true,
270 			       priv->sec_attr.era);
271 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
272 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
273 				   sizeof(flc->flc) + desc_bytes(desc),
274 				   ctx->dir);
275 
276 	return 0;
277 }
278 
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)279 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
280 {
281 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
282 
283 	ctx->authsize = authsize;
284 	aead_set_sh_desc(authenc);
285 
286 	return 0;
287 }
288 
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)289 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
290 		       unsigned int keylen)
291 {
292 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
293 	struct device *dev = ctx->dev;
294 	struct crypto_authenc_keys keys;
295 
296 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
297 		goto badkey;
298 
299 	dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
300 		keys.authkeylen + keys.enckeylen, keys.enckeylen,
301 		keys.authkeylen);
302 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
303 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
304 
305 	ctx->adata.keylen = keys.authkeylen;
306 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
307 					      OP_ALG_ALGSEL_MASK);
308 
309 	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
310 		goto badkey;
311 
312 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
313 	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
314 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
315 				   keys.enckeylen, ctx->dir);
316 	print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
317 			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
318 			     ctx->adata.keylen_pad + keys.enckeylen, 1);
319 
320 	ctx->cdata.keylen = keys.enckeylen;
321 
322 	memzero_explicit(&keys, sizeof(keys));
323 	return aead_set_sh_desc(aead);
324 badkey:
325 	memzero_explicit(&keys, sizeof(keys));
326 	return -EINVAL;
327 }
328 
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)329 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
330 			    unsigned int keylen)
331 {
332 	struct crypto_authenc_keys keys;
333 	int err;
334 
335 	err = crypto_authenc_extractkeys(&keys, key, keylen);
336 	if (unlikely(err))
337 		goto out;
338 
339 	err = -EINVAL;
340 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
341 		goto out;
342 
343 	err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
344 	      aead_setkey(aead, key, keylen);
345 
346 out:
347 	memzero_explicit(&keys, sizeof(keys));
348 	return err;
349 }
350 
aead_edesc_alloc(struct aead_request * req,bool encrypt)351 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
352 					   bool encrypt)
353 {
354 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
355 	struct caam_request *req_ctx = aead_request_ctx_dma(req);
356 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
357 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
358 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
359 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
360 						 typeof(*alg), aead);
361 	struct device *dev = ctx->dev;
362 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
363 		      GFP_KERNEL : GFP_ATOMIC;
364 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
365 	int src_len, dst_len = 0;
366 	struct aead_edesc *edesc;
367 	dma_addr_t qm_sg_dma, iv_dma = 0;
368 	int ivsize = 0;
369 	unsigned int authsize = ctx->authsize;
370 	int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
371 	int in_len, out_len;
372 	struct dpaa2_sg_entry *sg_table;
373 
374 	/* allocate space for base edesc, link tables and IV */
375 	edesc = qi_cache_zalloc(flags);
376 	if (unlikely(!edesc)) {
377 		dev_err(dev, "could not allocate extended descriptor\n");
378 		return ERR_PTR(-ENOMEM);
379 	}
380 
381 	if (unlikely(req->dst != req->src)) {
382 		src_len = req->assoclen + req->cryptlen;
383 		dst_len = src_len + (encrypt ? authsize : (-authsize));
384 
385 		src_nents = sg_nents_for_len(req->src, src_len);
386 		if (unlikely(src_nents < 0)) {
387 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
388 				src_len);
389 			qi_cache_free(edesc);
390 			return ERR_PTR(src_nents);
391 		}
392 
393 		dst_nents = sg_nents_for_len(req->dst, dst_len);
394 		if (unlikely(dst_nents < 0)) {
395 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
396 				dst_len);
397 			qi_cache_free(edesc);
398 			return ERR_PTR(dst_nents);
399 		}
400 
401 		if (src_nents) {
402 			mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
403 						      DMA_TO_DEVICE);
404 			if (unlikely(!mapped_src_nents)) {
405 				dev_err(dev, "unable to map source\n");
406 				qi_cache_free(edesc);
407 				return ERR_PTR(-ENOMEM);
408 			}
409 		} else {
410 			mapped_src_nents = 0;
411 		}
412 
413 		if (dst_nents) {
414 			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
415 						      DMA_FROM_DEVICE);
416 			if (unlikely(!mapped_dst_nents)) {
417 				dev_err(dev, "unable to map destination\n");
418 				dma_unmap_sg(dev, req->src, src_nents,
419 					     DMA_TO_DEVICE);
420 				qi_cache_free(edesc);
421 				return ERR_PTR(-ENOMEM);
422 			}
423 		} else {
424 			mapped_dst_nents = 0;
425 		}
426 	} else {
427 		src_len = req->assoclen + req->cryptlen +
428 			  (encrypt ? authsize : 0);
429 
430 		src_nents = sg_nents_for_len(req->src, src_len);
431 		if (unlikely(src_nents < 0)) {
432 			dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
433 				src_len);
434 			qi_cache_free(edesc);
435 			return ERR_PTR(src_nents);
436 		}
437 
438 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
439 					      DMA_BIDIRECTIONAL);
440 		if (unlikely(!mapped_src_nents)) {
441 			dev_err(dev, "unable to map source\n");
442 			qi_cache_free(edesc);
443 			return ERR_PTR(-ENOMEM);
444 		}
445 	}
446 
447 	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
448 		ivsize = crypto_aead_ivsize(aead);
449 
450 	/*
451 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
452 	 * Input is not contiguous.
453 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
454 	 * the end of the table by allocating more S/G entries. Logic:
455 	 * if (src != dst && output S/G)
456 	 *      pad output S/G, if needed
457 	 * else if (src == dst && S/G)
458 	 *      overlapping S/Gs; pad one of them
459 	 * else if (input S/G) ...
460 	 *      pad input S/G, if needed
461 	 */
462 	qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
463 	if (mapped_dst_nents > 1)
464 		qm_sg_nents += pad_sg_nents(mapped_dst_nents);
465 	else if ((req->src == req->dst) && (mapped_src_nents > 1))
466 		qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
467 				  1 + !!ivsize +
468 				  pad_sg_nents(mapped_src_nents));
469 	else
470 		qm_sg_nents = pad_sg_nents(qm_sg_nents);
471 
472 	sg_table = &edesc->sgt[0];
473 	qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
474 	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
475 		     CAAM_QI_MEMCACHE_SIZE)) {
476 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
477 			qm_sg_nents, ivsize);
478 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
479 			   0, DMA_NONE, 0, 0);
480 		qi_cache_free(edesc);
481 		return ERR_PTR(-ENOMEM);
482 	}
483 
484 	if (ivsize) {
485 		u8 *iv = (u8 *)(sg_table + qm_sg_nents);
486 
487 		/* Make sure IV is located in a DMAable area */
488 		memcpy(iv, req->iv, ivsize);
489 
490 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
491 		if (dma_mapping_error(dev, iv_dma)) {
492 			dev_err(dev, "unable to map IV\n");
493 			caam_unmap(dev, req->src, req->dst, src_nents,
494 				   dst_nents, 0, 0, DMA_NONE, 0, 0);
495 			qi_cache_free(edesc);
496 			return ERR_PTR(-ENOMEM);
497 		}
498 	}
499 
500 	edesc->src_nents = src_nents;
501 	edesc->dst_nents = dst_nents;
502 	edesc->iv_dma = iv_dma;
503 
504 	if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
505 	    OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
506 		/*
507 		 * The associated data comes already with the IV but we need
508 		 * to skip it when we authenticate or encrypt...
509 		 */
510 		edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
511 	else
512 		edesc->assoclen = cpu_to_caam32(req->assoclen);
513 	edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
514 					     DMA_TO_DEVICE);
515 	if (dma_mapping_error(dev, edesc->assoclen_dma)) {
516 		dev_err(dev, "unable to map assoclen\n");
517 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
518 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
519 		qi_cache_free(edesc);
520 		return ERR_PTR(-ENOMEM);
521 	}
522 
523 	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
524 	qm_sg_index++;
525 	if (ivsize) {
526 		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
527 		qm_sg_index++;
528 	}
529 	sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
530 	qm_sg_index += mapped_src_nents;
531 
532 	if (mapped_dst_nents > 1)
533 		sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
534 
535 	qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
536 	if (dma_mapping_error(dev, qm_sg_dma)) {
537 		dev_err(dev, "unable to map S/G table\n");
538 		dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
539 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
540 			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
541 		qi_cache_free(edesc);
542 		return ERR_PTR(-ENOMEM);
543 	}
544 
545 	edesc->qm_sg_dma = qm_sg_dma;
546 	edesc->qm_sg_bytes = qm_sg_bytes;
547 
548 	out_len = req->assoclen + req->cryptlen +
549 		  (encrypt ? ctx->authsize : (-ctx->authsize));
550 	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
551 
552 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
553 	dpaa2_fl_set_final(in_fle, true);
554 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
555 	dpaa2_fl_set_addr(in_fle, qm_sg_dma);
556 	dpaa2_fl_set_len(in_fle, in_len);
557 
558 	if (req->dst == req->src) {
559 		if (mapped_src_nents == 1) {
560 			dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
561 			dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
562 		} else {
563 			dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
564 			dpaa2_fl_set_addr(out_fle, qm_sg_dma +
565 					  (1 + !!ivsize) * sizeof(*sg_table));
566 		}
567 	} else if (!mapped_dst_nents) {
568 		/*
569 		 * crypto engine requires the output entry to be present when
570 		 * "frame list" FD is used.
571 		 * Since engine does not support FMT=2'b11 (unused entry type),
572 		 * leaving out_fle zeroized is the best option.
573 		 */
574 		goto skip_out_fle;
575 	} else if (mapped_dst_nents == 1) {
576 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
577 		dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
578 	} else {
579 		dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
580 		dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
581 				  sizeof(*sg_table));
582 	}
583 
584 	dpaa2_fl_set_len(out_fle, out_len);
585 
586 skip_out_fle:
587 	return edesc;
588 }
589 
chachapoly_set_sh_desc(struct crypto_aead * aead)590 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
591 {
592 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
593 	unsigned int ivsize = crypto_aead_ivsize(aead);
594 	struct device *dev = ctx->dev;
595 	struct caam_flc *flc;
596 	u32 *desc;
597 
598 	if (!ctx->cdata.keylen || !ctx->authsize)
599 		return 0;
600 
601 	flc = &ctx->flc[ENCRYPT];
602 	desc = flc->sh_desc;
603 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
604 			       ctx->authsize, true, true);
605 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
606 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
607 				   sizeof(flc->flc) + desc_bytes(desc),
608 				   ctx->dir);
609 
610 	flc = &ctx->flc[DECRYPT];
611 	desc = flc->sh_desc;
612 	cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
613 			       ctx->authsize, false, true);
614 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
615 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
616 				   sizeof(flc->flc) + desc_bytes(desc),
617 				   ctx->dir);
618 
619 	return 0;
620 }
621 
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)622 static int chachapoly_setauthsize(struct crypto_aead *aead,
623 				  unsigned int authsize)
624 {
625 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
626 
627 	if (authsize != POLY1305_DIGEST_SIZE)
628 		return -EINVAL;
629 
630 	ctx->authsize = authsize;
631 	return chachapoly_set_sh_desc(aead);
632 }
633 
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)634 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
635 			     unsigned int keylen)
636 {
637 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
638 	unsigned int ivsize = crypto_aead_ivsize(aead);
639 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
640 
641 	if (keylen != CHACHA_KEY_SIZE + saltlen)
642 		return -EINVAL;
643 
644 	memcpy(ctx->key, key, keylen);
645 	ctx->cdata.key_virt = ctx->key;
646 	ctx->cdata.keylen = keylen - saltlen;
647 
648 	return chachapoly_set_sh_desc(aead);
649 }
650 
gcm_set_sh_desc(struct crypto_aead * aead)651 static int gcm_set_sh_desc(struct crypto_aead *aead)
652 {
653 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
654 	struct device *dev = ctx->dev;
655 	unsigned int ivsize = crypto_aead_ivsize(aead);
656 	struct caam_flc *flc;
657 	u32 *desc;
658 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
659 			ctx->cdata.keylen;
660 
661 	if (!ctx->cdata.keylen || !ctx->authsize)
662 		return 0;
663 
664 	/*
665 	 * AES GCM encrypt shared descriptor
666 	 * Job Descriptor and Shared Descriptor
667 	 * must fit into the 64-word Descriptor h/w Buffer
668 	 */
669 	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
670 		ctx->cdata.key_inline = true;
671 		ctx->cdata.key_virt = ctx->key;
672 	} else {
673 		ctx->cdata.key_inline = false;
674 		ctx->cdata.key_dma = ctx->key_dma;
675 	}
676 
677 	flc = &ctx->flc[ENCRYPT];
678 	desc = flc->sh_desc;
679 	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
680 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
681 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
682 				   sizeof(flc->flc) + desc_bytes(desc),
683 				   ctx->dir);
684 
685 	/*
686 	 * Job Descriptor and Shared Descriptors
687 	 * must all fit into the 64-word Descriptor h/w Buffer
688 	 */
689 	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
690 		ctx->cdata.key_inline = true;
691 		ctx->cdata.key_virt = ctx->key;
692 	} else {
693 		ctx->cdata.key_inline = false;
694 		ctx->cdata.key_dma = ctx->key_dma;
695 	}
696 
697 	flc = &ctx->flc[DECRYPT];
698 	desc = flc->sh_desc;
699 	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
700 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
701 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
702 				   sizeof(flc->flc) + desc_bytes(desc),
703 				   ctx->dir);
704 
705 	return 0;
706 }
707 
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)708 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
709 {
710 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
711 	int err;
712 
713 	err = crypto_gcm_check_authsize(authsize);
714 	if (err)
715 		return err;
716 
717 	ctx->authsize = authsize;
718 	gcm_set_sh_desc(authenc);
719 
720 	return 0;
721 }
722 
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)723 static int gcm_setkey(struct crypto_aead *aead,
724 		      const u8 *key, unsigned int keylen)
725 {
726 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
727 	struct device *dev = ctx->dev;
728 	int ret;
729 
730 	ret = aes_check_keylen(keylen);
731 	if (ret)
732 		return ret;
733 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
734 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
735 
736 	memcpy(ctx->key, key, keylen);
737 	dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
738 	ctx->cdata.keylen = keylen;
739 
740 	return gcm_set_sh_desc(aead);
741 }
742 
rfc4106_set_sh_desc(struct crypto_aead * aead)743 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
744 {
745 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
746 	struct device *dev = ctx->dev;
747 	unsigned int ivsize = crypto_aead_ivsize(aead);
748 	struct caam_flc *flc;
749 	u32 *desc;
750 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
751 			ctx->cdata.keylen;
752 
753 	if (!ctx->cdata.keylen || !ctx->authsize)
754 		return 0;
755 
756 	ctx->cdata.key_virt = ctx->key;
757 
758 	/*
759 	 * RFC4106 encrypt shared descriptor
760 	 * Job Descriptor and Shared Descriptor
761 	 * must fit into the 64-word Descriptor h/w Buffer
762 	 */
763 	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
764 		ctx->cdata.key_inline = true;
765 	} else {
766 		ctx->cdata.key_inline = false;
767 		ctx->cdata.key_dma = ctx->key_dma;
768 	}
769 
770 	flc = &ctx->flc[ENCRYPT];
771 	desc = flc->sh_desc;
772 	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
773 				  true);
774 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
775 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
776 				   sizeof(flc->flc) + desc_bytes(desc),
777 				   ctx->dir);
778 
779 	/*
780 	 * Job Descriptor and Shared Descriptors
781 	 * must all fit into the 64-word Descriptor h/w Buffer
782 	 */
783 	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
784 		ctx->cdata.key_inline = true;
785 	} else {
786 		ctx->cdata.key_inline = false;
787 		ctx->cdata.key_dma = ctx->key_dma;
788 	}
789 
790 	flc = &ctx->flc[DECRYPT];
791 	desc = flc->sh_desc;
792 	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
793 				  true);
794 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
795 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
796 				   sizeof(flc->flc) + desc_bytes(desc),
797 				   ctx->dir);
798 
799 	return 0;
800 }
801 
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)802 static int rfc4106_setauthsize(struct crypto_aead *authenc,
803 			       unsigned int authsize)
804 {
805 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
806 	int err;
807 
808 	err = crypto_rfc4106_check_authsize(authsize);
809 	if (err)
810 		return err;
811 
812 	ctx->authsize = authsize;
813 	rfc4106_set_sh_desc(authenc);
814 
815 	return 0;
816 }
817 
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)818 static int rfc4106_setkey(struct crypto_aead *aead,
819 			  const u8 *key, unsigned int keylen)
820 {
821 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
822 	struct device *dev = ctx->dev;
823 	int ret;
824 
825 	ret = aes_check_keylen(keylen - 4);
826 	if (ret)
827 		return ret;
828 
829 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
830 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
831 
832 	memcpy(ctx->key, key, keylen);
833 	/*
834 	 * The last four bytes of the key material are used as the salt value
835 	 * in the nonce. Update the AES key length.
836 	 */
837 	ctx->cdata.keylen = keylen - 4;
838 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
839 				   ctx->dir);
840 
841 	return rfc4106_set_sh_desc(aead);
842 }
843 
rfc4543_set_sh_desc(struct crypto_aead * aead)844 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
845 {
846 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
847 	struct device *dev = ctx->dev;
848 	unsigned int ivsize = crypto_aead_ivsize(aead);
849 	struct caam_flc *flc;
850 	u32 *desc;
851 	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
852 			ctx->cdata.keylen;
853 
854 	if (!ctx->cdata.keylen || !ctx->authsize)
855 		return 0;
856 
857 	ctx->cdata.key_virt = ctx->key;
858 
859 	/*
860 	 * RFC4543 encrypt shared descriptor
861 	 * Job Descriptor and Shared Descriptor
862 	 * must fit into the 64-word Descriptor h/w Buffer
863 	 */
864 	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
865 		ctx->cdata.key_inline = true;
866 	} else {
867 		ctx->cdata.key_inline = false;
868 		ctx->cdata.key_dma = ctx->key_dma;
869 	}
870 
871 	flc = &ctx->flc[ENCRYPT];
872 	desc = flc->sh_desc;
873 	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
874 				  true);
875 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
876 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
877 				   sizeof(flc->flc) + desc_bytes(desc),
878 				   ctx->dir);
879 
880 	/*
881 	 * Job Descriptor and Shared Descriptors
882 	 * must all fit into the 64-word Descriptor h/w Buffer
883 	 */
884 	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
885 		ctx->cdata.key_inline = true;
886 	} else {
887 		ctx->cdata.key_inline = false;
888 		ctx->cdata.key_dma = ctx->key_dma;
889 	}
890 
891 	flc = &ctx->flc[DECRYPT];
892 	desc = flc->sh_desc;
893 	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
894 				  true);
895 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
896 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
897 				   sizeof(flc->flc) + desc_bytes(desc),
898 				   ctx->dir);
899 
900 	return 0;
901 }
902 
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)903 static int rfc4543_setauthsize(struct crypto_aead *authenc,
904 			       unsigned int authsize)
905 {
906 	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
907 
908 	if (authsize != 16)
909 		return -EINVAL;
910 
911 	ctx->authsize = authsize;
912 	rfc4543_set_sh_desc(authenc);
913 
914 	return 0;
915 }
916 
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)917 static int rfc4543_setkey(struct crypto_aead *aead,
918 			  const u8 *key, unsigned int keylen)
919 {
920 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
921 	struct device *dev = ctx->dev;
922 	int ret;
923 
924 	ret = aes_check_keylen(keylen - 4);
925 	if (ret)
926 		return ret;
927 
928 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
929 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
930 
931 	memcpy(ctx->key, key, keylen);
932 	/*
933 	 * The last four bytes of the key material are used as the salt value
934 	 * in the nonce. Update the AES key length.
935 	 */
936 	ctx->cdata.keylen = keylen - 4;
937 	dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
938 				   ctx->dir);
939 
940 	return rfc4543_set_sh_desc(aead);
941 }
942 
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)943 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
944 			   unsigned int keylen, const u32 ctx1_iv_off)
945 {
946 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
947 	struct caam_skcipher_alg *alg =
948 		container_of(crypto_skcipher_alg(skcipher),
949 			     struct caam_skcipher_alg, skcipher);
950 	struct device *dev = ctx->dev;
951 	struct caam_flc *flc;
952 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
953 	u32 *desc;
954 	const bool is_rfc3686 = alg->caam.rfc3686;
955 
956 	print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
957 			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
958 
959 	ctx->cdata.keylen = keylen;
960 	ctx->cdata.key_virt = key;
961 	ctx->cdata.key_inline = true;
962 
963 	/* skcipher_encrypt shared descriptor */
964 	flc = &ctx->flc[ENCRYPT];
965 	desc = flc->sh_desc;
966 	cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
967 				   ctx1_iv_off);
968 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
969 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
970 				   sizeof(flc->flc) + desc_bytes(desc),
971 				   ctx->dir);
972 
973 	/* skcipher_decrypt shared descriptor */
974 	flc = &ctx->flc[DECRYPT];
975 	desc = flc->sh_desc;
976 	cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
977 				   ctx1_iv_off);
978 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
979 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
980 				   sizeof(flc->flc) + desc_bytes(desc),
981 				   ctx->dir);
982 
983 	return 0;
984 }
985 
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)986 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
987 			       const u8 *key, unsigned int keylen)
988 {
989 	int err;
990 
991 	err = aes_check_keylen(keylen);
992 	if (err)
993 		return err;
994 
995 	return skcipher_setkey(skcipher, key, keylen, 0);
996 }
997 
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)998 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
999 				   const u8 *key, unsigned int keylen)
1000 {
1001 	u32 ctx1_iv_off;
1002 	int err;
1003 
1004 	/*
1005 	 * RFC3686 specific:
1006 	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1007 	 *	| *key = {KEY, NONCE}
1008 	 */
1009 	ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1010 	keylen -= CTR_RFC3686_NONCE_SIZE;
1011 
1012 	err = aes_check_keylen(keylen);
1013 	if (err)
1014 		return err;
1015 
1016 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1017 }
1018 
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1019 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1020 			       const u8 *key, unsigned int keylen)
1021 {
1022 	u32 ctx1_iv_off;
1023 	int err;
1024 
1025 	/*
1026 	 * AES-CTR needs to load IV in CONTEXT1 reg
1027 	 * at an offset of 128bits (16bytes)
1028 	 * CONTEXT1[255:128] = IV
1029 	 */
1030 	ctx1_iv_off = 16;
1031 
1032 	err = aes_check_keylen(keylen);
1033 	if (err)
1034 		return err;
1035 
1036 	return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1037 }
1038 
chacha20_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1039 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1040 				    const u8 *key, unsigned int keylen)
1041 {
1042 	if (keylen != CHACHA_KEY_SIZE)
1043 		return -EINVAL;
1044 
1045 	return skcipher_setkey(skcipher, key, keylen, 0);
1046 }
1047 
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1048 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1049 			       const u8 *key, unsigned int keylen)
1050 {
1051 	return verify_skcipher_des_key(skcipher, key) ?:
1052 	       skcipher_setkey(skcipher, key, keylen, 0);
1053 }
1054 
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1055 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1056 			        const u8 *key, unsigned int keylen)
1057 {
1058 	return verify_skcipher_des3_key(skcipher, key) ?:
1059 	       skcipher_setkey(skcipher, key, keylen, 0);
1060 }
1061 
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1062 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1063 			       unsigned int keylen)
1064 {
1065 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1066 	struct device *dev = ctx->dev;
1067 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1068 	struct caam_flc *flc;
1069 	u32 *desc;
1070 	int err;
1071 
1072 	err = xts_verify_key(skcipher, key, keylen);
1073 	if (err) {
1074 		dev_dbg(dev, "key size mismatch\n");
1075 		return err;
1076 	}
1077 
1078 	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1079 		ctx->xts_key_fallback = true;
1080 
1081 	if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1082 		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1083 		if (err)
1084 			return err;
1085 	}
1086 
1087 	ctx->cdata.keylen = keylen;
1088 	ctx->cdata.key_virt = key;
1089 	ctx->cdata.key_inline = true;
1090 
1091 	/* xts_skcipher_encrypt shared descriptor */
1092 	flc = &ctx->flc[ENCRYPT];
1093 	desc = flc->sh_desc;
1094 	cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1095 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1096 	dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1097 				   sizeof(flc->flc) + desc_bytes(desc),
1098 				   ctx->dir);
1099 
1100 	/* xts_skcipher_decrypt shared descriptor */
1101 	flc = &ctx->flc[DECRYPT];
1102 	desc = flc->sh_desc;
1103 	cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1104 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1105 	dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1106 				   sizeof(flc->flc) + desc_bytes(desc),
1107 				   ctx->dir);
1108 
1109 	return 0;
1110 }
1111 
skcipher_edesc_alloc(struct skcipher_request * req)1112 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1113 {
1114 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1115 	struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
1116 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1117 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1118 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1119 	struct device *dev = ctx->dev;
1120 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1121 		       GFP_KERNEL : GFP_ATOMIC;
1122 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1123 	struct skcipher_edesc *edesc;
1124 	dma_addr_t iv_dma;
1125 	u8 *iv;
1126 	int ivsize = crypto_skcipher_ivsize(skcipher);
1127 	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1128 	struct dpaa2_sg_entry *sg_table;
1129 
1130 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
1131 	if (unlikely(src_nents < 0)) {
1132 		dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1133 			req->cryptlen);
1134 		return ERR_PTR(src_nents);
1135 	}
1136 
1137 	if (unlikely(req->dst != req->src)) {
1138 		dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1139 		if (unlikely(dst_nents < 0)) {
1140 			dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1141 				req->cryptlen);
1142 			return ERR_PTR(dst_nents);
1143 		}
1144 
1145 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1146 					      DMA_TO_DEVICE);
1147 		if (unlikely(!mapped_src_nents)) {
1148 			dev_err(dev, "unable to map source\n");
1149 			return ERR_PTR(-ENOMEM);
1150 		}
1151 
1152 		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1153 					      DMA_FROM_DEVICE);
1154 		if (unlikely(!mapped_dst_nents)) {
1155 			dev_err(dev, "unable to map destination\n");
1156 			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1157 			return ERR_PTR(-ENOMEM);
1158 		}
1159 	} else {
1160 		mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1161 					      DMA_BIDIRECTIONAL);
1162 		if (unlikely(!mapped_src_nents)) {
1163 			dev_err(dev, "unable to map source\n");
1164 			return ERR_PTR(-ENOMEM);
1165 		}
1166 	}
1167 
1168 	qm_sg_ents = 1 + mapped_src_nents;
1169 	dst_sg_idx = qm_sg_ents;
1170 
1171 	/*
1172 	 * Input, output HW S/G tables: [IV, src][dst, IV]
1173 	 * IV entries point to the same buffer
1174 	 * If src == dst, S/G entries are reused (S/G tables overlap)
1175 	 *
1176 	 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1177 	 * the end of the table by allocating more S/G entries.
1178 	 */
1179 	if (req->src != req->dst)
1180 		qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1181 	else
1182 		qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1183 
1184 	qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1185 	if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1186 		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1187 		dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1188 			qm_sg_ents, ivsize);
1189 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1190 			   0, DMA_NONE, 0, 0);
1191 		return ERR_PTR(-ENOMEM);
1192 	}
1193 
1194 	/* allocate space for base edesc, link tables and IV */
1195 	edesc = qi_cache_zalloc(flags);
1196 	if (unlikely(!edesc)) {
1197 		dev_err(dev, "could not allocate extended descriptor\n");
1198 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1199 			   0, DMA_NONE, 0, 0);
1200 		return ERR_PTR(-ENOMEM);
1201 	}
1202 
1203 	/* Make sure IV is located in a DMAable area */
1204 	sg_table = &edesc->sgt[0];
1205 	iv = (u8 *)(sg_table + qm_sg_ents);
1206 	memcpy(iv, req->iv, ivsize);
1207 
1208 	iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1209 	if (dma_mapping_error(dev, iv_dma)) {
1210 		dev_err(dev, "unable to map IV\n");
1211 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1212 			   0, DMA_NONE, 0, 0);
1213 		qi_cache_free(edesc);
1214 		return ERR_PTR(-ENOMEM);
1215 	}
1216 
1217 	edesc->src_nents = src_nents;
1218 	edesc->dst_nents = dst_nents;
1219 	edesc->iv_dma = iv_dma;
1220 	edesc->qm_sg_bytes = qm_sg_bytes;
1221 
1222 	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1223 	sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1224 
1225 	if (req->src != req->dst)
1226 		sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1227 
1228 	dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1229 			 ivsize, 0);
1230 
1231 	edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1232 					  DMA_TO_DEVICE);
1233 	if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1234 		dev_err(dev, "unable to map S/G table\n");
1235 		caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1236 			   iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1237 		qi_cache_free(edesc);
1238 		return ERR_PTR(-ENOMEM);
1239 	}
1240 
1241 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1242 	dpaa2_fl_set_final(in_fle, true);
1243 	dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1244 	dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1245 
1246 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1247 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1248 
1249 	dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1250 
1251 	if (req->src == req->dst)
1252 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1253 				  sizeof(*sg_table));
1254 	else
1255 		dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1256 				  sizeof(*sg_table));
1257 
1258 	return edesc;
1259 }
1260 
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1261 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1262 		       struct aead_request *req)
1263 {
1264 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1265 	int ivsize = crypto_aead_ivsize(aead);
1266 
1267 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1268 		   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1269 		   edesc->qm_sg_bytes);
1270 	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1271 }
1272 
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)1273 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1274 			   struct skcipher_request *req)
1275 {
1276 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1277 	int ivsize = crypto_skcipher_ivsize(skcipher);
1278 
1279 	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1280 		   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1281 		   edesc->qm_sg_bytes);
1282 }
1283 
aead_encrypt_done(void * cbk_ctx,u32 status)1284 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1285 {
1286 	struct crypto_async_request *areq = cbk_ctx;
1287 	struct aead_request *req = container_of(areq, struct aead_request,
1288 						base);
1289 	struct caam_request *req_ctx = to_caam_req(areq);
1290 	struct aead_edesc *edesc = req_ctx->edesc;
1291 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1292 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1293 	int ecode = 0;
1294 
1295 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1296 
1297 	if (unlikely(status))
1298 		ecode = caam_qi2_strstatus(ctx->dev, status);
1299 
1300 	aead_unmap(ctx->dev, edesc, req);
1301 	qi_cache_free(edesc);
1302 	aead_request_complete(req, ecode);
1303 }
1304 
aead_decrypt_done(void * cbk_ctx,u32 status)1305 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1306 {
1307 	struct crypto_async_request *areq = cbk_ctx;
1308 	struct aead_request *req = container_of(areq, struct aead_request,
1309 						base);
1310 	struct caam_request *req_ctx = to_caam_req(areq);
1311 	struct aead_edesc *edesc = req_ctx->edesc;
1312 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1313 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1314 	int ecode = 0;
1315 
1316 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1317 
1318 	if (unlikely(status))
1319 		ecode = caam_qi2_strstatus(ctx->dev, status);
1320 
1321 	aead_unmap(ctx->dev, edesc, req);
1322 	qi_cache_free(edesc);
1323 	aead_request_complete(req, ecode);
1324 }
1325 
aead_encrypt(struct aead_request * req)1326 static int aead_encrypt(struct aead_request *req)
1327 {
1328 	struct aead_edesc *edesc;
1329 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1330 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1331 	struct caam_request *caam_req = aead_request_ctx_dma(req);
1332 	int ret;
1333 
1334 	/* allocate extended descriptor */
1335 	edesc = aead_edesc_alloc(req, true);
1336 	if (IS_ERR(edesc))
1337 		return PTR_ERR(edesc);
1338 
1339 	caam_req->flc = &ctx->flc[ENCRYPT];
1340 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1341 	caam_req->cbk = aead_encrypt_done;
1342 	caam_req->ctx = &req->base;
1343 	caam_req->edesc = edesc;
1344 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1345 	if (ret != -EINPROGRESS &&
1346 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1347 		aead_unmap(ctx->dev, edesc, req);
1348 		qi_cache_free(edesc);
1349 	}
1350 
1351 	return ret;
1352 }
1353 
aead_decrypt(struct aead_request * req)1354 static int aead_decrypt(struct aead_request *req)
1355 {
1356 	struct aead_edesc *edesc;
1357 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1358 	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1359 	struct caam_request *caam_req = aead_request_ctx_dma(req);
1360 	int ret;
1361 
1362 	/* allocate extended descriptor */
1363 	edesc = aead_edesc_alloc(req, false);
1364 	if (IS_ERR(edesc))
1365 		return PTR_ERR(edesc);
1366 
1367 	caam_req->flc = &ctx->flc[DECRYPT];
1368 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1369 	caam_req->cbk = aead_decrypt_done;
1370 	caam_req->ctx = &req->base;
1371 	caam_req->edesc = edesc;
1372 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1373 	if (ret != -EINPROGRESS &&
1374 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1375 		aead_unmap(ctx->dev, edesc, req);
1376 		qi_cache_free(edesc);
1377 	}
1378 
1379 	return ret;
1380 }
1381 
ipsec_gcm_encrypt(struct aead_request * req)1382 static int ipsec_gcm_encrypt(struct aead_request *req)
1383 {
1384 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1385 }
1386 
ipsec_gcm_decrypt(struct aead_request * req)1387 static int ipsec_gcm_decrypt(struct aead_request *req)
1388 {
1389 	return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1390 }
1391 
skcipher_encrypt_done(void * cbk_ctx,u32 status)1392 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1393 {
1394 	struct crypto_async_request *areq = cbk_ctx;
1395 	struct skcipher_request *req = skcipher_request_cast(areq);
1396 	struct caam_request *req_ctx = to_caam_req(areq);
1397 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1398 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1399 	struct skcipher_edesc *edesc = req_ctx->edesc;
1400 	int ecode = 0;
1401 	int ivsize = crypto_skcipher_ivsize(skcipher);
1402 
1403 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1404 
1405 	if (unlikely(status))
1406 		ecode = caam_qi2_strstatus(ctx->dev, status);
1407 
1408 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1409 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1410 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1411 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1412 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1413 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1414 
1415 	skcipher_unmap(ctx->dev, edesc, req);
1416 
1417 	/*
1418 	 * The crypto API expects us to set the IV (req->iv) to the last
1419 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1420 	 * This is used e.g. by the CTS mode.
1421 	 */
1422 	if (!ecode)
1423 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1424 		       ivsize);
1425 
1426 	qi_cache_free(edesc);
1427 	skcipher_request_complete(req, ecode);
1428 }
1429 
skcipher_decrypt_done(void * cbk_ctx,u32 status)1430 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1431 {
1432 	struct crypto_async_request *areq = cbk_ctx;
1433 	struct skcipher_request *req = skcipher_request_cast(areq);
1434 	struct caam_request *req_ctx = to_caam_req(areq);
1435 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1436 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1437 	struct skcipher_edesc *edesc = req_ctx->edesc;
1438 	int ecode = 0;
1439 	int ivsize = crypto_skcipher_ivsize(skcipher);
1440 
1441 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1442 
1443 	if (unlikely(status))
1444 		ecode = caam_qi2_strstatus(ctx->dev, status);
1445 
1446 	print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1447 			     DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1448 			     edesc->src_nents > 1 ? 100 : ivsize, 1);
1449 	caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1450 		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1451 		     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1452 
1453 	skcipher_unmap(ctx->dev, edesc, req);
1454 
1455 	/*
1456 	 * The crypto API expects us to set the IV (req->iv) to the last
1457 	 * ciphertext block (CBC mode) or last counter (CTR mode).
1458 	 * This is used e.g. by the CTS mode.
1459 	 */
1460 	if (!ecode)
1461 		memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1462 		       ivsize);
1463 
1464 	qi_cache_free(edesc);
1465 	skcipher_request_complete(req, ecode);
1466 }
1467 
xts_skcipher_ivsize(struct skcipher_request * req)1468 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1469 {
1470 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1471 	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1472 
1473 	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1474 }
1475 
skcipher_encrypt(struct skcipher_request * req)1476 static int skcipher_encrypt(struct skcipher_request *req)
1477 {
1478 	struct skcipher_edesc *edesc;
1479 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1480 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1481 	struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1482 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1483 	int ret;
1484 
1485 	/*
1486 	 * XTS is expected to return an error even for input length = 0
1487 	 * Note that the case input length < block size will be caught during
1488 	 * HW offloading and return an error.
1489 	 */
1490 	if (!req->cryptlen && !ctx->fallback)
1491 		return 0;
1492 
1493 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1494 			      ctx->xts_key_fallback)) {
1495 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1496 		skcipher_request_set_callback(&caam_req->fallback_req,
1497 					      req->base.flags,
1498 					      req->base.complete,
1499 					      req->base.data);
1500 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1501 					   req->dst, req->cryptlen, req->iv);
1502 
1503 		return crypto_skcipher_encrypt(&caam_req->fallback_req);
1504 	}
1505 
1506 	/* allocate extended descriptor */
1507 	edesc = skcipher_edesc_alloc(req);
1508 	if (IS_ERR(edesc))
1509 		return PTR_ERR(edesc);
1510 
1511 	caam_req->flc = &ctx->flc[ENCRYPT];
1512 	caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1513 	caam_req->cbk = skcipher_encrypt_done;
1514 	caam_req->ctx = &req->base;
1515 	caam_req->edesc = edesc;
1516 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1517 	if (ret != -EINPROGRESS &&
1518 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1519 		skcipher_unmap(ctx->dev, edesc, req);
1520 		qi_cache_free(edesc);
1521 	}
1522 
1523 	return ret;
1524 }
1525 
skcipher_decrypt(struct skcipher_request * req)1526 static int skcipher_decrypt(struct skcipher_request *req)
1527 {
1528 	struct skcipher_edesc *edesc;
1529 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1530 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1531 	struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1532 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1533 	int ret;
1534 
1535 	/*
1536 	 * XTS is expected to return an error even for input length = 0
1537 	 * Note that the case input length < block size will be caught during
1538 	 * HW offloading and return an error.
1539 	 */
1540 	if (!req->cryptlen && !ctx->fallback)
1541 		return 0;
1542 
1543 	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1544 			      ctx->xts_key_fallback)) {
1545 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1546 		skcipher_request_set_callback(&caam_req->fallback_req,
1547 					      req->base.flags,
1548 					      req->base.complete,
1549 					      req->base.data);
1550 		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1551 					   req->dst, req->cryptlen, req->iv);
1552 
1553 		return crypto_skcipher_decrypt(&caam_req->fallback_req);
1554 	}
1555 
1556 	/* allocate extended descriptor */
1557 	edesc = skcipher_edesc_alloc(req);
1558 	if (IS_ERR(edesc))
1559 		return PTR_ERR(edesc);
1560 
1561 	caam_req->flc = &ctx->flc[DECRYPT];
1562 	caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1563 	caam_req->cbk = skcipher_decrypt_done;
1564 	caam_req->ctx = &req->base;
1565 	caam_req->edesc = edesc;
1566 	ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1567 	if (ret != -EINPROGRESS &&
1568 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1569 		skcipher_unmap(ctx->dev, edesc, req);
1570 		qi_cache_free(edesc);
1571 	}
1572 
1573 	return ret;
1574 }
1575 
caam_cra_init(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)1576 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1577 			 bool uses_dkp)
1578 {
1579 	dma_addr_t dma_addr;
1580 	int i;
1581 
1582 	/* copy descriptor header template value */
1583 	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1584 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1585 
1586 	ctx->dev = caam->dev;
1587 	ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1588 
1589 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1590 					offsetof(struct caam_ctx, flc_dma),
1591 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1592 	if (dma_mapping_error(ctx->dev, dma_addr)) {
1593 		dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1594 		return -ENOMEM;
1595 	}
1596 
1597 	for (i = 0; i < NUM_OP; i++)
1598 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1599 	ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1600 
1601 	return 0;
1602 }
1603 
caam_cra_init_skcipher(struct crypto_skcipher * tfm)1604 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1605 {
1606 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1607 	struct caam_skcipher_alg *caam_alg =
1608 		container_of(alg, typeof(*caam_alg), skcipher);
1609 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1610 	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1611 	int ret = 0;
1612 
1613 	if (alg_aai == OP_ALG_AAI_XTS) {
1614 		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1615 		struct crypto_skcipher *fallback;
1616 
1617 		fallback = crypto_alloc_skcipher(tfm_name, 0,
1618 						 CRYPTO_ALG_NEED_FALLBACK);
1619 		if (IS_ERR(fallback)) {
1620 			dev_err(caam_alg->caam.dev,
1621 				"Failed to allocate %s fallback: %ld\n",
1622 				tfm_name, PTR_ERR(fallback));
1623 			return PTR_ERR(fallback);
1624 		}
1625 
1626 		ctx->fallback = fallback;
1627 		crypto_skcipher_set_reqsize_dma(
1628 			tfm, sizeof(struct caam_request) +
1629 			     crypto_skcipher_reqsize(fallback));
1630 	} else {
1631 		crypto_skcipher_set_reqsize_dma(tfm,
1632 						sizeof(struct caam_request));
1633 	}
1634 
1635 	ret = caam_cra_init(ctx, &caam_alg->caam, false);
1636 	if (ret && ctx->fallback)
1637 		crypto_free_skcipher(ctx->fallback);
1638 
1639 	return ret;
1640 }
1641 
caam_cra_init_aead(struct crypto_aead * tfm)1642 static int caam_cra_init_aead(struct crypto_aead *tfm)
1643 {
1644 	struct aead_alg *alg = crypto_aead_alg(tfm);
1645 	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1646 						      aead);
1647 
1648 	crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
1649 	return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
1650 			     !caam_alg->caam.nodkp);
1651 }
1652 
caam_exit_common(struct caam_ctx * ctx)1653 static void caam_exit_common(struct caam_ctx *ctx)
1654 {
1655 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1656 			       offsetof(struct caam_ctx, flc_dma), ctx->dir,
1657 			       DMA_ATTR_SKIP_CPU_SYNC);
1658 }
1659 
caam_cra_exit(struct crypto_skcipher * tfm)1660 static void caam_cra_exit(struct crypto_skcipher *tfm)
1661 {
1662 	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1663 
1664 	if (ctx->fallback)
1665 		crypto_free_skcipher(ctx->fallback);
1666 	caam_exit_common(ctx);
1667 }
1668 
caam_cra_exit_aead(struct crypto_aead * tfm)1669 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1670 {
1671 	caam_exit_common(crypto_aead_ctx_dma(tfm));
1672 }
1673 
1674 static struct caam_skcipher_alg driver_algs[] = {
1675 	{
1676 		.skcipher = {
1677 			.base = {
1678 				.cra_name = "cbc(aes)",
1679 				.cra_driver_name = "cbc-aes-caam-qi2",
1680 				.cra_blocksize = AES_BLOCK_SIZE,
1681 			},
1682 			.setkey = aes_skcipher_setkey,
1683 			.encrypt = skcipher_encrypt,
1684 			.decrypt = skcipher_decrypt,
1685 			.min_keysize = AES_MIN_KEY_SIZE,
1686 			.max_keysize = AES_MAX_KEY_SIZE,
1687 			.ivsize = AES_BLOCK_SIZE,
1688 		},
1689 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1690 	},
1691 	{
1692 		.skcipher = {
1693 			.base = {
1694 				.cra_name = "cbc(des3_ede)",
1695 				.cra_driver_name = "cbc-3des-caam-qi2",
1696 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1697 			},
1698 			.setkey = des3_skcipher_setkey,
1699 			.encrypt = skcipher_encrypt,
1700 			.decrypt = skcipher_decrypt,
1701 			.min_keysize = DES3_EDE_KEY_SIZE,
1702 			.max_keysize = DES3_EDE_KEY_SIZE,
1703 			.ivsize = DES3_EDE_BLOCK_SIZE,
1704 		},
1705 		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1706 	},
1707 	{
1708 		.skcipher = {
1709 			.base = {
1710 				.cra_name = "cbc(des)",
1711 				.cra_driver_name = "cbc-des-caam-qi2",
1712 				.cra_blocksize = DES_BLOCK_SIZE,
1713 			},
1714 			.setkey = des_skcipher_setkey,
1715 			.encrypt = skcipher_encrypt,
1716 			.decrypt = skcipher_decrypt,
1717 			.min_keysize = DES_KEY_SIZE,
1718 			.max_keysize = DES_KEY_SIZE,
1719 			.ivsize = DES_BLOCK_SIZE,
1720 		},
1721 		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1722 	},
1723 	{
1724 		.skcipher = {
1725 			.base = {
1726 				.cra_name = "ctr(aes)",
1727 				.cra_driver_name = "ctr-aes-caam-qi2",
1728 				.cra_blocksize = 1,
1729 			},
1730 			.setkey = ctr_skcipher_setkey,
1731 			.encrypt = skcipher_encrypt,
1732 			.decrypt = skcipher_decrypt,
1733 			.min_keysize = AES_MIN_KEY_SIZE,
1734 			.max_keysize = AES_MAX_KEY_SIZE,
1735 			.ivsize = AES_BLOCK_SIZE,
1736 			.chunksize = AES_BLOCK_SIZE,
1737 		},
1738 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1739 					OP_ALG_AAI_CTR_MOD128,
1740 	},
1741 	{
1742 		.skcipher = {
1743 			.base = {
1744 				.cra_name = "rfc3686(ctr(aes))",
1745 				.cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1746 				.cra_blocksize = 1,
1747 			},
1748 			.setkey = rfc3686_skcipher_setkey,
1749 			.encrypt = skcipher_encrypt,
1750 			.decrypt = skcipher_decrypt,
1751 			.min_keysize = AES_MIN_KEY_SIZE +
1752 				       CTR_RFC3686_NONCE_SIZE,
1753 			.max_keysize = AES_MAX_KEY_SIZE +
1754 				       CTR_RFC3686_NONCE_SIZE,
1755 			.ivsize = CTR_RFC3686_IV_SIZE,
1756 			.chunksize = AES_BLOCK_SIZE,
1757 		},
1758 		.caam = {
1759 			.class1_alg_type = OP_ALG_ALGSEL_AES |
1760 					   OP_ALG_AAI_CTR_MOD128,
1761 			.rfc3686 = true,
1762 		},
1763 	},
1764 	{
1765 		.skcipher = {
1766 			.base = {
1767 				.cra_name = "xts(aes)",
1768 				.cra_driver_name = "xts-aes-caam-qi2",
1769 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1770 				.cra_blocksize = AES_BLOCK_SIZE,
1771 			},
1772 			.setkey = xts_skcipher_setkey,
1773 			.encrypt = skcipher_encrypt,
1774 			.decrypt = skcipher_decrypt,
1775 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
1776 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
1777 			.ivsize = AES_BLOCK_SIZE,
1778 		},
1779 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1780 	},
1781 	{
1782 		.skcipher = {
1783 			.base = {
1784 				.cra_name = "chacha20",
1785 				.cra_driver_name = "chacha20-caam-qi2",
1786 				.cra_blocksize = 1,
1787 			},
1788 			.setkey = chacha20_skcipher_setkey,
1789 			.encrypt = skcipher_encrypt,
1790 			.decrypt = skcipher_decrypt,
1791 			.min_keysize = CHACHA_KEY_SIZE,
1792 			.max_keysize = CHACHA_KEY_SIZE,
1793 			.ivsize = CHACHA_IV_SIZE,
1794 		},
1795 		.caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1796 	},
1797 };
1798 
1799 static struct caam_aead_alg driver_aeads[] = {
1800 	{
1801 		.aead = {
1802 			.base = {
1803 				.cra_name = "rfc4106(gcm(aes))",
1804 				.cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1805 				.cra_blocksize = 1,
1806 			},
1807 			.setkey = rfc4106_setkey,
1808 			.setauthsize = rfc4106_setauthsize,
1809 			.encrypt = ipsec_gcm_encrypt,
1810 			.decrypt = ipsec_gcm_decrypt,
1811 			.ivsize = 8,
1812 			.maxauthsize = AES_BLOCK_SIZE,
1813 		},
1814 		.caam = {
1815 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1816 			.nodkp = true,
1817 		},
1818 	},
1819 	{
1820 		.aead = {
1821 			.base = {
1822 				.cra_name = "rfc4543(gcm(aes))",
1823 				.cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1824 				.cra_blocksize = 1,
1825 			},
1826 			.setkey = rfc4543_setkey,
1827 			.setauthsize = rfc4543_setauthsize,
1828 			.encrypt = ipsec_gcm_encrypt,
1829 			.decrypt = ipsec_gcm_decrypt,
1830 			.ivsize = 8,
1831 			.maxauthsize = AES_BLOCK_SIZE,
1832 		},
1833 		.caam = {
1834 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1835 			.nodkp = true,
1836 		},
1837 	},
1838 	/* Galois Counter Mode */
1839 	{
1840 		.aead = {
1841 			.base = {
1842 				.cra_name = "gcm(aes)",
1843 				.cra_driver_name = "gcm-aes-caam-qi2",
1844 				.cra_blocksize = 1,
1845 			},
1846 			.setkey = gcm_setkey,
1847 			.setauthsize = gcm_setauthsize,
1848 			.encrypt = aead_encrypt,
1849 			.decrypt = aead_decrypt,
1850 			.ivsize = 12,
1851 			.maxauthsize = AES_BLOCK_SIZE,
1852 		},
1853 		.caam = {
1854 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1855 			.nodkp = true,
1856 		}
1857 	},
1858 	/* single-pass ipsec_esp descriptor */
1859 	{
1860 		.aead = {
1861 			.base = {
1862 				.cra_name = "authenc(hmac(md5),cbc(aes))",
1863 				.cra_driver_name = "authenc-hmac-md5-"
1864 						   "cbc-aes-caam-qi2",
1865 				.cra_blocksize = AES_BLOCK_SIZE,
1866 			},
1867 			.setkey = aead_setkey,
1868 			.setauthsize = aead_setauthsize,
1869 			.encrypt = aead_encrypt,
1870 			.decrypt = aead_decrypt,
1871 			.ivsize = AES_BLOCK_SIZE,
1872 			.maxauthsize = MD5_DIGEST_SIZE,
1873 		},
1874 		.caam = {
1875 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1876 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1877 					   OP_ALG_AAI_HMAC_PRECOMP,
1878 		}
1879 	},
1880 	{
1881 		.aead = {
1882 			.base = {
1883 				.cra_name = "echainiv(authenc(hmac(md5),"
1884 					    "cbc(aes)))",
1885 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
1886 						   "cbc-aes-caam-qi2",
1887 				.cra_blocksize = AES_BLOCK_SIZE,
1888 			},
1889 			.setkey = aead_setkey,
1890 			.setauthsize = aead_setauthsize,
1891 			.encrypt = aead_encrypt,
1892 			.decrypt = aead_decrypt,
1893 			.ivsize = AES_BLOCK_SIZE,
1894 			.maxauthsize = MD5_DIGEST_SIZE,
1895 		},
1896 		.caam = {
1897 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1898 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
1899 					   OP_ALG_AAI_HMAC_PRECOMP,
1900 			.geniv = true,
1901 		}
1902 	},
1903 	{
1904 		.aead = {
1905 			.base = {
1906 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
1907 				.cra_driver_name = "authenc-hmac-sha1-"
1908 						   "cbc-aes-caam-qi2",
1909 				.cra_blocksize = AES_BLOCK_SIZE,
1910 			},
1911 			.setkey = aead_setkey,
1912 			.setauthsize = aead_setauthsize,
1913 			.encrypt = aead_encrypt,
1914 			.decrypt = aead_decrypt,
1915 			.ivsize = AES_BLOCK_SIZE,
1916 			.maxauthsize = SHA1_DIGEST_SIZE,
1917 		},
1918 		.caam = {
1919 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1920 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1921 					   OP_ALG_AAI_HMAC_PRECOMP,
1922 		}
1923 	},
1924 	{
1925 		.aead = {
1926 			.base = {
1927 				.cra_name = "echainiv(authenc(hmac(sha1),"
1928 					    "cbc(aes)))",
1929 				.cra_driver_name = "echainiv-authenc-"
1930 						   "hmac-sha1-cbc-aes-caam-qi2",
1931 				.cra_blocksize = AES_BLOCK_SIZE,
1932 			},
1933 			.setkey = aead_setkey,
1934 			.setauthsize = aead_setauthsize,
1935 			.encrypt = aead_encrypt,
1936 			.decrypt = aead_decrypt,
1937 			.ivsize = AES_BLOCK_SIZE,
1938 			.maxauthsize = SHA1_DIGEST_SIZE,
1939 		},
1940 		.caam = {
1941 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1942 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1943 					   OP_ALG_AAI_HMAC_PRECOMP,
1944 			.geniv = true,
1945 		},
1946 	},
1947 	{
1948 		.aead = {
1949 			.base = {
1950 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
1951 				.cra_driver_name = "authenc-hmac-sha224-"
1952 						   "cbc-aes-caam-qi2",
1953 				.cra_blocksize = AES_BLOCK_SIZE,
1954 			},
1955 			.setkey = aead_setkey,
1956 			.setauthsize = aead_setauthsize,
1957 			.encrypt = aead_encrypt,
1958 			.decrypt = aead_decrypt,
1959 			.ivsize = AES_BLOCK_SIZE,
1960 			.maxauthsize = SHA224_DIGEST_SIZE,
1961 		},
1962 		.caam = {
1963 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1964 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1965 					   OP_ALG_AAI_HMAC_PRECOMP,
1966 		}
1967 	},
1968 	{
1969 		.aead = {
1970 			.base = {
1971 				.cra_name = "echainiv(authenc(hmac(sha224),"
1972 					    "cbc(aes)))",
1973 				.cra_driver_name = "echainiv-authenc-"
1974 						   "hmac-sha224-cbc-aes-caam-qi2",
1975 				.cra_blocksize = AES_BLOCK_SIZE,
1976 			},
1977 			.setkey = aead_setkey,
1978 			.setauthsize = aead_setauthsize,
1979 			.encrypt = aead_encrypt,
1980 			.decrypt = aead_decrypt,
1981 			.ivsize = AES_BLOCK_SIZE,
1982 			.maxauthsize = SHA224_DIGEST_SIZE,
1983 		},
1984 		.caam = {
1985 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1986 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1987 					   OP_ALG_AAI_HMAC_PRECOMP,
1988 			.geniv = true,
1989 		}
1990 	},
1991 	{
1992 		.aead = {
1993 			.base = {
1994 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
1995 				.cra_driver_name = "authenc-hmac-sha256-"
1996 						   "cbc-aes-caam-qi2",
1997 				.cra_blocksize = AES_BLOCK_SIZE,
1998 			},
1999 			.setkey = aead_setkey,
2000 			.setauthsize = aead_setauthsize,
2001 			.encrypt = aead_encrypt,
2002 			.decrypt = aead_decrypt,
2003 			.ivsize = AES_BLOCK_SIZE,
2004 			.maxauthsize = SHA256_DIGEST_SIZE,
2005 		},
2006 		.caam = {
2007 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2008 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2009 					   OP_ALG_AAI_HMAC_PRECOMP,
2010 		}
2011 	},
2012 	{
2013 		.aead = {
2014 			.base = {
2015 				.cra_name = "echainiv(authenc(hmac(sha256),"
2016 					    "cbc(aes)))",
2017 				.cra_driver_name = "echainiv-authenc-"
2018 						   "hmac-sha256-cbc-aes-"
2019 						   "caam-qi2",
2020 				.cra_blocksize = AES_BLOCK_SIZE,
2021 			},
2022 			.setkey = aead_setkey,
2023 			.setauthsize = aead_setauthsize,
2024 			.encrypt = aead_encrypt,
2025 			.decrypt = aead_decrypt,
2026 			.ivsize = AES_BLOCK_SIZE,
2027 			.maxauthsize = SHA256_DIGEST_SIZE,
2028 		},
2029 		.caam = {
2030 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2031 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2032 					   OP_ALG_AAI_HMAC_PRECOMP,
2033 			.geniv = true,
2034 		}
2035 	},
2036 	{
2037 		.aead = {
2038 			.base = {
2039 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2040 				.cra_driver_name = "authenc-hmac-sha384-"
2041 						   "cbc-aes-caam-qi2",
2042 				.cra_blocksize = AES_BLOCK_SIZE,
2043 			},
2044 			.setkey = aead_setkey,
2045 			.setauthsize = aead_setauthsize,
2046 			.encrypt = aead_encrypt,
2047 			.decrypt = aead_decrypt,
2048 			.ivsize = AES_BLOCK_SIZE,
2049 			.maxauthsize = SHA384_DIGEST_SIZE,
2050 		},
2051 		.caam = {
2052 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2053 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2054 					   OP_ALG_AAI_HMAC_PRECOMP,
2055 		}
2056 	},
2057 	{
2058 		.aead = {
2059 			.base = {
2060 				.cra_name = "echainiv(authenc(hmac(sha384),"
2061 					    "cbc(aes)))",
2062 				.cra_driver_name = "echainiv-authenc-"
2063 						   "hmac-sha384-cbc-aes-"
2064 						   "caam-qi2",
2065 				.cra_blocksize = AES_BLOCK_SIZE,
2066 			},
2067 			.setkey = aead_setkey,
2068 			.setauthsize = aead_setauthsize,
2069 			.encrypt = aead_encrypt,
2070 			.decrypt = aead_decrypt,
2071 			.ivsize = AES_BLOCK_SIZE,
2072 			.maxauthsize = SHA384_DIGEST_SIZE,
2073 		},
2074 		.caam = {
2075 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2076 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2077 					   OP_ALG_AAI_HMAC_PRECOMP,
2078 			.geniv = true,
2079 		}
2080 	},
2081 	{
2082 		.aead = {
2083 			.base = {
2084 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2085 				.cra_driver_name = "authenc-hmac-sha512-"
2086 						   "cbc-aes-caam-qi2",
2087 				.cra_blocksize = AES_BLOCK_SIZE,
2088 			},
2089 			.setkey = aead_setkey,
2090 			.setauthsize = aead_setauthsize,
2091 			.encrypt = aead_encrypt,
2092 			.decrypt = aead_decrypt,
2093 			.ivsize = AES_BLOCK_SIZE,
2094 			.maxauthsize = SHA512_DIGEST_SIZE,
2095 		},
2096 		.caam = {
2097 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2098 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2099 					   OP_ALG_AAI_HMAC_PRECOMP,
2100 		}
2101 	},
2102 	{
2103 		.aead = {
2104 			.base = {
2105 				.cra_name = "echainiv(authenc(hmac(sha512),"
2106 					    "cbc(aes)))",
2107 				.cra_driver_name = "echainiv-authenc-"
2108 						   "hmac-sha512-cbc-aes-"
2109 						   "caam-qi2",
2110 				.cra_blocksize = AES_BLOCK_SIZE,
2111 			},
2112 			.setkey = aead_setkey,
2113 			.setauthsize = aead_setauthsize,
2114 			.encrypt = aead_encrypt,
2115 			.decrypt = aead_decrypt,
2116 			.ivsize = AES_BLOCK_SIZE,
2117 			.maxauthsize = SHA512_DIGEST_SIZE,
2118 		},
2119 		.caam = {
2120 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2121 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2122 					   OP_ALG_AAI_HMAC_PRECOMP,
2123 			.geniv = true,
2124 		}
2125 	},
2126 	{
2127 		.aead = {
2128 			.base = {
2129 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2130 				.cra_driver_name = "authenc-hmac-md5-"
2131 						   "cbc-des3_ede-caam-qi2",
2132 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2133 			},
2134 			.setkey = des3_aead_setkey,
2135 			.setauthsize = aead_setauthsize,
2136 			.encrypt = aead_encrypt,
2137 			.decrypt = aead_decrypt,
2138 			.ivsize = DES3_EDE_BLOCK_SIZE,
2139 			.maxauthsize = MD5_DIGEST_SIZE,
2140 		},
2141 		.caam = {
2142 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2143 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2144 					   OP_ALG_AAI_HMAC_PRECOMP,
2145 		}
2146 	},
2147 	{
2148 		.aead = {
2149 			.base = {
2150 				.cra_name = "echainiv(authenc(hmac(md5),"
2151 					    "cbc(des3_ede)))",
2152 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2153 						   "cbc-des3_ede-caam-qi2",
2154 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2155 			},
2156 			.setkey = des3_aead_setkey,
2157 			.setauthsize = aead_setauthsize,
2158 			.encrypt = aead_encrypt,
2159 			.decrypt = aead_decrypt,
2160 			.ivsize = DES3_EDE_BLOCK_SIZE,
2161 			.maxauthsize = MD5_DIGEST_SIZE,
2162 		},
2163 		.caam = {
2164 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2165 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2166 					   OP_ALG_AAI_HMAC_PRECOMP,
2167 			.geniv = true,
2168 		}
2169 	},
2170 	{
2171 		.aead = {
2172 			.base = {
2173 				.cra_name = "authenc(hmac(sha1),"
2174 					    "cbc(des3_ede))",
2175 				.cra_driver_name = "authenc-hmac-sha1-"
2176 						   "cbc-des3_ede-caam-qi2",
2177 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2178 			},
2179 			.setkey = des3_aead_setkey,
2180 			.setauthsize = aead_setauthsize,
2181 			.encrypt = aead_encrypt,
2182 			.decrypt = aead_decrypt,
2183 			.ivsize = DES3_EDE_BLOCK_SIZE,
2184 			.maxauthsize = SHA1_DIGEST_SIZE,
2185 		},
2186 		.caam = {
2187 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2188 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2189 					   OP_ALG_AAI_HMAC_PRECOMP,
2190 		},
2191 	},
2192 	{
2193 		.aead = {
2194 			.base = {
2195 				.cra_name = "echainiv(authenc(hmac(sha1),"
2196 					    "cbc(des3_ede)))",
2197 				.cra_driver_name = "echainiv-authenc-"
2198 						   "hmac-sha1-"
2199 						   "cbc-des3_ede-caam-qi2",
2200 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2201 			},
2202 			.setkey = des3_aead_setkey,
2203 			.setauthsize = aead_setauthsize,
2204 			.encrypt = aead_encrypt,
2205 			.decrypt = aead_decrypt,
2206 			.ivsize = DES3_EDE_BLOCK_SIZE,
2207 			.maxauthsize = SHA1_DIGEST_SIZE,
2208 		},
2209 		.caam = {
2210 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2211 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2212 					   OP_ALG_AAI_HMAC_PRECOMP,
2213 			.geniv = true,
2214 		}
2215 	},
2216 	{
2217 		.aead = {
2218 			.base = {
2219 				.cra_name = "authenc(hmac(sha224),"
2220 					    "cbc(des3_ede))",
2221 				.cra_driver_name = "authenc-hmac-sha224-"
2222 						   "cbc-des3_ede-caam-qi2",
2223 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2224 			},
2225 			.setkey = des3_aead_setkey,
2226 			.setauthsize = aead_setauthsize,
2227 			.encrypt = aead_encrypt,
2228 			.decrypt = aead_decrypt,
2229 			.ivsize = DES3_EDE_BLOCK_SIZE,
2230 			.maxauthsize = SHA224_DIGEST_SIZE,
2231 		},
2232 		.caam = {
2233 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2234 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2235 					   OP_ALG_AAI_HMAC_PRECOMP,
2236 		},
2237 	},
2238 	{
2239 		.aead = {
2240 			.base = {
2241 				.cra_name = "echainiv(authenc(hmac(sha224),"
2242 					    "cbc(des3_ede)))",
2243 				.cra_driver_name = "echainiv-authenc-"
2244 						   "hmac-sha224-"
2245 						   "cbc-des3_ede-caam-qi2",
2246 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2247 			},
2248 			.setkey = des3_aead_setkey,
2249 			.setauthsize = aead_setauthsize,
2250 			.encrypt = aead_encrypt,
2251 			.decrypt = aead_decrypt,
2252 			.ivsize = DES3_EDE_BLOCK_SIZE,
2253 			.maxauthsize = SHA224_DIGEST_SIZE,
2254 		},
2255 		.caam = {
2256 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2257 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2258 					   OP_ALG_AAI_HMAC_PRECOMP,
2259 			.geniv = true,
2260 		}
2261 	},
2262 	{
2263 		.aead = {
2264 			.base = {
2265 				.cra_name = "authenc(hmac(sha256),"
2266 					    "cbc(des3_ede))",
2267 				.cra_driver_name = "authenc-hmac-sha256-"
2268 						   "cbc-des3_ede-caam-qi2",
2269 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2270 			},
2271 			.setkey = des3_aead_setkey,
2272 			.setauthsize = aead_setauthsize,
2273 			.encrypt = aead_encrypt,
2274 			.decrypt = aead_decrypt,
2275 			.ivsize = DES3_EDE_BLOCK_SIZE,
2276 			.maxauthsize = SHA256_DIGEST_SIZE,
2277 		},
2278 		.caam = {
2279 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2280 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2281 					   OP_ALG_AAI_HMAC_PRECOMP,
2282 		},
2283 	},
2284 	{
2285 		.aead = {
2286 			.base = {
2287 				.cra_name = "echainiv(authenc(hmac(sha256),"
2288 					    "cbc(des3_ede)))",
2289 				.cra_driver_name = "echainiv-authenc-"
2290 						   "hmac-sha256-"
2291 						   "cbc-des3_ede-caam-qi2",
2292 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2293 			},
2294 			.setkey = des3_aead_setkey,
2295 			.setauthsize = aead_setauthsize,
2296 			.encrypt = aead_encrypt,
2297 			.decrypt = aead_decrypt,
2298 			.ivsize = DES3_EDE_BLOCK_SIZE,
2299 			.maxauthsize = SHA256_DIGEST_SIZE,
2300 		},
2301 		.caam = {
2302 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2303 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2304 					   OP_ALG_AAI_HMAC_PRECOMP,
2305 			.geniv = true,
2306 		}
2307 	},
2308 	{
2309 		.aead = {
2310 			.base = {
2311 				.cra_name = "authenc(hmac(sha384),"
2312 					    "cbc(des3_ede))",
2313 				.cra_driver_name = "authenc-hmac-sha384-"
2314 						   "cbc-des3_ede-caam-qi2",
2315 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2316 			},
2317 			.setkey = des3_aead_setkey,
2318 			.setauthsize = aead_setauthsize,
2319 			.encrypt = aead_encrypt,
2320 			.decrypt = aead_decrypt,
2321 			.ivsize = DES3_EDE_BLOCK_SIZE,
2322 			.maxauthsize = SHA384_DIGEST_SIZE,
2323 		},
2324 		.caam = {
2325 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2326 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2327 					   OP_ALG_AAI_HMAC_PRECOMP,
2328 		},
2329 	},
2330 	{
2331 		.aead = {
2332 			.base = {
2333 				.cra_name = "echainiv(authenc(hmac(sha384),"
2334 					    "cbc(des3_ede)))",
2335 				.cra_driver_name = "echainiv-authenc-"
2336 						   "hmac-sha384-"
2337 						   "cbc-des3_ede-caam-qi2",
2338 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2339 			},
2340 			.setkey = des3_aead_setkey,
2341 			.setauthsize = aead_setauthsize,
2342 			.encrypt = aead_encrypt,
2343 			.decrypt = aead_decrypt,
2344 			.ivsize = DES3_EDE_BLOCK_SIZE,
2345 			.maxauthsize = SHA384_DIGEST_SIZE,
2346 		},
2347 		.caam = {
2348 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2349 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2350 					   OP_ALG_AAI_HMAC_PRECOMP,
2351 			.geniv = true,
2352 		}
2353 	},
2354 	{
2355 		.aead = {
2356 			.base = {
2357 				.cra_name = "authenc(hmac(sha512),"
2358 					    "cbc(des3_ede))",
2359 				.cra_driver_name = "authenc-hmac-sha512-"
2360 						   "cbc-des3_ede-caam-qi2",
2361 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2362 			},
2363 			.setkey = des3_aead_setkey,
2364 			.setauthsize = aead_setauthsize,
2365 			.encrypt = aead_encrypt,
2366 			.decrypt = aead_decrypt,
2367 			.ivsize = DES3_EDE_BLOCK_SIZE,
2368 			.maxauthsize = SHA512_DIGEST_SIZE,
2369 		},
2370 		.caam = {
2371 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2372 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2373 					   OP_ALG_AAI_HMAC_PRECOMP,
2374 		},
2375 	},
2376 	{
2377 		.aead = {
2378 			.base = {
2379 				.cra_name = "echainiv(authenc(hmac(sha512),"
2380 					    "cbc(des3_ede)))",
2381 				.cra_driver_name = "echainiv-authenc-"
2382 						   "hmac-sha512-"
2383 						   "cbc-des3_ede-caam-qi2",
2384 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2385 			},
2386 			.setkey = des3_aead_setkey,
2387 			.setauthsize = aead_setauthsize,
2388 			.encrypt = aead_encrypt,
2389 			.decrypt = aead_decrypt,
2390 			.ivsize = DES3_EDE_BLOCK_SIZE,
2391 			.maxauthsize = SHA512_DIGEST_SIZE,
2392 		},
2393 		.caam = {
2394 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2395 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2396 					   OP_ALG_AAI_HMAC_PRECOMP,
2397 			.geniv = true,
2398 		}
2399 	},
2400 	{
2401 		.aead = {
2402 			.base = {
2403 				.cra_name = "authenc(hmac(md5),cbc(des))",
2404 				.cra_driver_name = "authenc-hmac-md5-"
2405 						   "cbc-des-caam-qi2",
2406 				.cra_blocksize = DES_BLOCK_SIZE,
2407 			},
2408 			.setkey = aead_setkey,
2409 			.setauthsize = aead_setauthsize,
2410 			.encrypt = aead_encrypt,
2411 			.decrypt = aead_decrypt,
2412 			.ivsize = DES_BLOCK_SIZE,
2413 			.maxauthsize = MD5_DIGEST_SIZE,
2414 		},
2415 		.caam = {
2416 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2417 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2418 					   OP_ALG_AAI_HMAC_PRECOMP,
2419 		},
2420 	},
2421 	{
2422 		.aead = {
2423 			.base = {
2424 				.cra_name = "echainiv(authenc(hmac(md5),"
2425 					    "cbc(des)))",
2426 				.cra_driver_name = "echainiv-authenc-hmac-md5-"
2427 						   "cbc-des-caam-qi2",
2428 				.cra_blocksize = DES_BLOCK_SIZE,
2429 			},
2430 			.setkey = aead_setkey,
2431 			.setauthsize = aead_setauthsize,
2432 			.encrypt = aead_encrypt,
2433 			.decrypt = aead_decrypt,
2434 			.ivsize = DES_BLOCK_SIZE,
2435 			.maxauthsize = MD5_DIGEST_SIZE,
2436 		},
2437 		.caam = {
2438 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2439 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2440 					   OP_ALG_AAI_HMAC_PRECOMP,
2441 			.geniv = true,
2442 		}
2443 	},
2444 	{
2445 		.aead = {
2446 			.base = {
2447 				.cra_name = "authenc(hmac(sha1),cbc(des))",
2448 				.cra_driver_name = "authenc-hmac-sha1-"
2449 						   "cbc-des-caam-qi2",
2450 				.cra_blocksize = DES_BLOCK_SIZE,
2451 			},
2452 			.setkey = aead_setkey,
2453 			.setauthsize = aead_setauthsize,
2454 			.encrypt = aead_encrypt,
2455 			.decrypt = aead_decrypt,
2456 			.ivsize = DES_BLOCK_SIZE,
2457 			.maxauthsize = SHA1_DIGEST_SIZE,
2458 		},
2459 		.caam = {
2460 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2461 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2462 					   OP_ALG_AAI_HMAC_PRECOMP,
2463 		},
2464 	},
2465 	{
2466 		.aead = {
2467 			.base = {
2468 				.cra_name = "echainiv(authenc(hmac(sha1),"
2469 					    "cbc(des)))",
2470 				.cra_driver_name = "echainiv-authenc-"
2471 						   "hmac-sha1-cbc-des-caam-qi2",
2472 				.cra_blocksize = DES_BLOCK_SIZE,
2473 			},
2474 			.setkey = aead_setkey,
2475 			.setauthsize = aead_setauthsize,
2476 			.encrypt = aead_encrypt,
2477 			.decrypt = aead_decrypt,
2478 			.ivsize = DES_BLOCK_SIZE,
2479 			.maxauthsize = SHA1_DIGEST_SIZE,
2480 		},
2481 		.caam = {
2482 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2483 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2484 					   OP_ALG_AAI_HMAC_PRECOMP,
2485 			.geniv = true,
2486 		}
2487 	},
2488 	{
2489 		.aead = {
2490 			.base = {
2491 				.cra_name = "authenc(hmac(sha224),cbc(des))",
2492 				.cra_driver_name = "authenc-hmac-sha224-"
2493 						   "cbc-des-caam-qi2",
2494 				.cra_blocksize = DES_BLOCK_SIZE,
2495 			},
2496 			.setkey = aead_setkey,
2497 			.setauthsize = aead_setauthsize,
2498 			.encrypt = aead_encrypt,
2499 			.decrypt = aead_decrypt,
2500 			.ivsize = DES_BLOCK_SIZE,
2501 			.maxauthsize = SHA224_DIGEST_SIZE,
2502 		},
2503 		.caam = {
2504 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2505 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2506 					   OP_ALG_AAI_HMAC_PRECOMP,
2507 		},
2508 	},
2509 	{
2510 		.aead = {
2511 			.base = {
2512 				.cra_name = "echainiv(authenc(hmac(sha224),"
2513 					    "cbc(des)))",
2514 				.cra_driver_name = "echainiv-authenc-"
2515 						   "hmac-sha224-cbc-des-"
2516 						   "caam-qi2",
2517 				.cra_blocksize = DES_BLOCK_SIZE,
2518 			},
2519 			.setkey = aead_setkey,
2520 			.setauthsize = aead_setauthsize,
2521 			.encrypt = aead_encrypt,
2522 			.decrypt = aead_decrypt,
2523 			.ivsize = DES_BLOCK_SIZE,
2524 			.maxauthsize = SHA224_DIGEST_SIZE,
2525 		},
2526 		.caam = {
2527 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2528 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2529 					   OP_ALG_AAI_HMAC_PRECOMP,
2530 			.geniv = true,
2531 		}
2532 	},
2533 	{
2534 		.aead = {
2535 			.base = {
2536 				.cra_name = "authenc(hmac(sha256),cbc(des))",
2537 				.cra_driver_name = "authenc-hmac-sha256-"
2538 						   "cbc-des-caam-qi2",
2539 				.cra_blocksize = DES_BLOCK_SIZE,
2540 			},
2541 			.setkey = aead_setkey,
2542 			.setauthsize = aead_setauthsize,
2543 			.encrypt = aead_encrypt,
2544 			.decrypt = aead_decrypt,
2545 			.ivsize = DES_BLOCK_SIZE,
2546 			.maxauthsize = SHA256_DIGEST_SIZE,
2547 		},
2548 		.caam = {
2549 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2550 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2551 					   OP_ALG_AAI_HMAC_PRECOMP,
2552 		},
2553 	},
2554 	{
2555 		.aead = {
2556 			.base = {
2557 				.cra_name = "echainiv(authenc(hmac(sha256),"
2558 					    "cbc(des)))",
2559 				.cra_driver_name = "echainiv-authenc-"
2560 						   "hmac-sha256-cbc-des-"
2561 						   "caam-qi2",
2562 				.cra_blocksize = DES_BLOCK_SIZE,
2563 			},
2564 			.setkey = aead_setkey,
2565 			.setauthsize = aead_setauthsize,
2566 			.encrypt = aead_encrypt,
2567 			.decrypt = aead_decrypt,
2568 			.ivsize = DES_BLOCK_SIZE,
2569 			.maxauthsize = SHA256_DIGEST_SIZE,
2570 		},
2571 		.caam = {
2572 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2573 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2574 					   OP_ALG_AAI_HMAC_PRECOMP,
2575 			.geniv = true,
2576 		},
2577 	},
2578 	{
2579 		.aead = {
2580 			.base = {
2581 				.cra_name = "authenc(hmac(sha384),cbc(des))",
2582 				.cra_driver_name = "authenc-hmac-sha384-"
2583 						   "cbc-des-caam-qi2",
2584 				.cra_blocksize = DES_BLOCK_SIZE,
2585 			},
2586 			.setkey = aead_setkey,
2587 			.setauthsize = aead_setauthsize,
2588 			.encrypt = aead_encrypt,
2589 			.decrypt = aead_decrypt,
2590 			.ivsize = DES_BLOCK_SIZE,
2591 			.maxauthsize = SHA384_DIGEST_SIZE,
2592 		},
2593 		.caam = {
2594 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2595 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2596 					   OP_ALG_AAI_HMAC_PRECOMP,
2597 		},
2598 	},
2599 	{
2600 		.aead = {
2601 			.base = {
2602 				.cra_name = "echainiv(authenc(hmac(sha384),"
2603 					    "cbc(des)))",
2604 				.cra_driver_name = "echainiv-authenc-"
2605 						   "hmac-sha384-cbc-des-"
2606 						   "caam-qi2",
2607 				.cra_blocksize = DES_BLOCK_SIZE,
2608 			},
2609 			.setkey = aead_setkey,
2610 			.setauthsize = aead_setauthsize,
2611 			.encrypt = aead_encrypt,
2612 			.decrypt = aead_decrypt,
2613 			.ivsize = DES_BLOCK_SIZE,
2614 			.maxauthsize = SHA384_DIGEST_SIZE,
2615 		},
2616 		.caam = {
2617 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2618 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2619 					   OP_ALG_AAI_HMAC_PRECOMP,
2620 			.geniv = true,
2621 		}
2622 	},
2623 	{
2624 		.aead = {
2625 			.base = {
2626 				.cra_name = "authenc(hmac(sha512),cbc(des))",
2627 				.cra_driver_name = "authenc-hmac-sha512-"
2628 						   "cbc-des-caam-qi2",
2629 				.cra_blocksize = DES_BLOCK_SIZE,
2630 			},
2631 			.setkey = aead_setkey,
2632 			.setauthsize = aead_setauthsize,
2633 			.encrypt = aead_encrypt,
2634 			.decrypt = aead_decrypt,
2635 			.ivsize = DES_BLOCK_SIZE,
2636 			.maxauthsize = SHA512_DIGEST_SIZE,
2637 		},
2638 		.caam = {
2639 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2640 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2641 					   OP_ALG_AAI_HMAC_PRECOMP,
2642 		}
2643 	},
2644 	{
2645 		.aead = {
2646 			.base = {
2647 				.cra_name = "echainiv(authenc(hmac(sha512),"
2648 					    "cbc(des)))",
2649 				.cra_driver_name = "echainiv-authenc-"
2650 						   "hmac-sha512-cbc-des-"
2651 						   "caam-qi2",
2652 				.cra_blocksize = DES_BLOCK_SIZE,
2653 			},
2654 			.setkey = aead_setkey,
2655 			.setauthsize = aead_setauthsize,
2656 			.encrypt = aead_encrypt,
2657 			.decrypt = aead_decrypt,
2658 			.ivsize = DES_BLOCK_SIZE,
2659 			.maxauthsize = SHA512_DIGEST_SIZE,
2660 		},
2661 		.caam = {
2662 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2663 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2664 					   OP_ALG_AAI_HMAC_PRECOMP,
2665 			.geniv = true,
2666 		}
2667 	},
2668 	{
2669 		.aead = {
2670 			.base = {
2671 				.cra_name = "authenc(hmac(md5),"
2672 					    "rfc3686(ctr(aes)))",
2673 				.cra_driver_name = "authenc-hmac-md5-"
2674 						   "rfc3686-ctr-aes-caam-qi2",
2675 				.cra_blocksize = 1,
2676 			},
2677 			.setkey = aead_setkey,
2678 			.setauthsize = aead_setauthsize,
2679 			.encrypt = aead_encrypt,
2680 			.decrypt = aead_decrypt,
2681 			.ivsize = CTR_RFC3686_IV_SIZE,
2682 			.maxauthsize = MD5_DIGEST_SIZE,
2683 		},
2684 		.caam = {
2685 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2686 					   OP_ALG_AAI_CTR_MOD128,
2687 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2688 					   OP_ALG_AAI_HMAC_PRECOMP,
2689 			.rfc3686 = true,
2690 		},
2691 	},
2692 	{
2693 		.aead = {
2694 			.base = {
2695 				.cra_name = "seqiv(authenc("
2696 					    "hmac(md5),rfc3686(ctr(aes))))",
2697 				.cra_driver_name = "seqiv-authenc-hmac-md5-"
2698 						   "rfc3686-ctr-aes-caam-qi2",
2699 				.cra_blocksize = 1,
2700 			},
2701 			.setkey = aead_setkey,
2702 			.setauthsize = aead_setauthsize,
2703 			.encrypt = aead_encrypt,
2704 			.decrypt = aead_decrypt,
2705 			.ivsize = CTR_RFC3686_IV_SIZE,
2706 			.maxauthsize = MD5_DIGEST_SIZE,
2707 		},
2708 		.caam = {
2709 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2710 					   OP_ALG_AAI_CTR_MOD128,
2711 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
2712 					   OP_ALG_AAI_HMAC_PRECOMP,
2713 			.rfc3686 = true,
2714 			.geniv = true,
2715 		},
2716 	},
2717 	{
2718 		.aead = {
2719 			.base = {
2720 				.cra_name = "authenc(hmac(sha1),"
2721 					    "rfc3686(ctr(aes)))",
2722 				.cra_driver_name = "authenc-hmac-sha1-"
2723 						   "rfc3686-ctr-aes-caam-qi2",
2724 				.cra_blocksize = 1,
2725 			},
2726 			.setkey = aead_setkey,
2727 			.setauthsize = aead_setauthsize,
2728 			.encrypt = aead_encrypt,
2729 			.decrypt = aead_decrypt,
2730 			.ivsize = CTR_RFC3686_IV_SIZE,
2731 			.maxauthsize = SHA1_DIGEST_SIZE,
2732 		},
2733 		.caam = {
2734 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2735 					   OP_ALG_AAI_CTR_MOD128,
2736 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2737 					   OP_ALG_AAI_HMAC_PRECOMP,
2738 			.rfc3686 = true,
2739 		},
2740 	},
2741 	{
2742 		.aead = {
2743 			.base = {
2744 				.cra_name = "seqiv(authenc("
2745 					    "hmac(sha1),rfc3686(ctr(aes))))",
2746 				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
2747 						   "rfc3686-ctr-aes-caam-qi2",
2748 				.cra_blocksize = 1,
2749 			},
2750 			.setkey = aead_setkey,
2751 			.setauthsize = aead_setauthsize,
2752 			.encrypt = aead_encrypt,
2753 			.decrypt = aead_decrypt,
2754 			.ivsize = CTR_RFC3686_IV_SIZE,
2755 			.maxauthsize = SHA1_DIGEST_SIZE,
2756 		},
2757 		.caam = {
2758 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2759 					   OP_ALG_AAI_CTR_MOD128,
2760 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2761 					   OP_ALG_AAI_HMAC_PRECOMP,
2762 			.rfc3686 = true,
2763 			.geniv = true,
2764 		},
2765 	},
2766 	{
2767 		.aead = {
2768 			.base = {
2769 				.cra_name = "authenc(hmac(sha224),"
2770 					    "rfc3686(ctr(aes)))",
2771 				.cra_driver_name = "authenc-hmac-sha224-"
2772 						   "rfc3686-ctr-aes-caam-qi2",
2773 				.cra_blocksize = 1,
2774 			},
2775 			.setkey = aead_setkey,
2776 			.setauthsize = aead_setauthsize,
2777 			.encrypt = aead_encrypt,
2778 			.decrypt = aead_decrypt,
2779 			.ivsize = CTR_RFC3686_IV_SIZE,
2780 			.maxauthsize = SHA224_DIGEST_SIZE,
2781 		},
2782 		.caam = {
2783 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2784 					   OP_ALG_AAI_CTR_MOD128,
2785 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2786 					   OP_ALG_AAI_HMAC_PRECOMP,
2787 			.rfc3686 = true,
2788 		},
2789 	},
2790 	{
2791 		.aead = {
2792 			.base = {
2793 				.cra_name = "seqiv(authenc("
2794 					    "hmac(sha224),rfc3686(ctr(aes))))",
2795 				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
2796 						   "rfc3686-ctr-aes-caam-qi2",
2797 				.cra_blocksize = 1,
2798 			},
2799 			.setkey = aead_setkey,
2800 			.setauthsize = aead_setauthsize,
2801 			.encrypt = aead_encrypt,
2802 			.decrypt = aead_decrypt,
2803 			.ivsize = CTR_RFC3686_IV_SIZE,
2804 			.maxauthsize = SHA224_DIGEST_SIZE,
2805 		},
2806 		.caam = {
2807 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2808 					   OP_ALG_AAI_CTR_MOD128,
2809 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2810 					   OP_ALG_AAI_HMAC_PRECOMP,
2811 			.rfc3686 = true,
2812 			.geniv = true,
2813 		},
2814 	},
2815 	{
2816 		.aead = {
2817 			.base = {
2818 				.cra_name = "authenc(hmac(sha256),"
2819 					    "rfc3686(ctr(aes)))",
2820 				.cra_driver_name = "authenc-hmac-sha256-"
2821 						   "rfc3686-ctr-aes-caam-qi2",
2822 				.cra_blocksize = 1,
2823 			},
2824 			.setkey = aead_setkey,
2825 			.setauthsize = aead_setauthsize,
2826 			.encrypt = aead_encrypt,
2827 			.decrypt = aead_decrypt,
2828 			.ivsize = CTR_RFC3686_IV_SIZE,
2829 			.maxauthsize = SHA256_DIGEST_SIZE,
2830 		},
2831 		.caam = {
2832 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2833 					   OP_ALG_AAI_CTR_MOD128,
2834 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2835 					   OP_ALG_AAI_HMAC_PRECOMP,
2836 			.rfc3686 = true,
2837 		},
2838 	},
2839 	{
2840 		.aead = {
2841 			.base = {
2842 				.cra_name = "seqiv(authenc(hmac(sha256),"
2843 					    "rfc3686(ctr(aes))))",
2844 				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
2845 						   "rfc3686-ctr-aes-caam-qi2",
2846 				.cra_blocksize = 1,
2847 			},
2848 			.setkey = aead_setkey,
2849 			.setauthsize = aead_setauthsize,
2850 			.encrypt = aead_encrypt,
2851 			.decrypt = aead_decrypt,
2852 			.ivsize = CTR_RFC3686_IV_SIZE,
2853 			.maxauthsize = SHA256_DIGEST_SIZE,
2854 		},
2855 		.caam = {
2856 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2857 					   OP_ALG_AAI_CTR_MOD128,
2858 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2859 					   OP_ALG_AAI_HMAC_PRECOMP,
2860 			.rfc3686 = true,
2861 			.geniv = true,
2862 		},
2863 	},
2864 	{
2865 		.aead = {
2866 			.base = {
2867 				.cra_name = "authenc(hmac(sha384),"
2868 					    "rfc3686(ctr(aes)))",
2869 				.cra_driver_name = "authenc-hmac-sha384-"
2870 						   "rfc3686-ctr-aes-caam-qi2",
2871 				.cra_blocksize = 1,
2872 			},
2873 			.setkey = aead_setkey,
2874 			.setauthsize = aead_setauthsize,
2875 			.encrypt = aead_encrypt,
2876 			.decrypt = aead_decrypt,
2877 			.ivsize = CTR_RFC3686_IV_SIZE,
2878 			.maxauthsize = SHA384_DIGEST_SIZE,
2879 		},
2880 		.caam = {
2881 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2882 					   OP_ALG_AAI_CTR_MOD128,
2883 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2884 					   OP_ALG_AAI_HMAC_PRECOMP,
2885 			.rfc3686 = true,
2886 		},
2887 	},
2888 	{
2889 		.aead = {
2890 			.base = {
2891 				.cra_name = "seqiv(authenc(hmac(sha384),"
2892 					    "rfc3686(ctr(aes))))",
2893 				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
2894 						   "rfc3686-ctr-aes-caam-qi2",
2895 				.cra_blocksize = 1,
2896 			},
2897 			.setkey = aead_setkey,
2898 			.setauthsize = aead_setauthsize,
2899 			.encrypt = aead_encrypt,
2900 			.decrypt = aead_decrypt,
2901 			.ivsize = CTR_RFC3686_IV_SIZE,
2902 			.maxauthsize = SHA384_DIGEST_SIZE,
2903 		},
2904 		.caam = {
2905 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2906 					   OP_ALG_AAI_CTR_MOD128,
2907 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2908 					   OP_ALG_AAI_HMAC_PRECOMP,
2909 			.rfc3686 = true,
2910 			.geniv = true,
2911 		},
2912 	},
2913 	{
2914 		.aead = {
2915 			.base = {
2916 				.cra_name = "rfc7539(chacha20,poly1305)",
2917 				.cra_driver_name = "rfc7539-chacha20-poly1305-"
2918 						   "caam-qi2",
2919 				.cra_blocksize = 1,
2920 			},
2921 			.setkey = chachapoly_setkey,
2922 			.setauthsize = chachapoly_setauthsize,
2923 			.encrypt = aead_encrypt,
2924 			.decrypt = aead_decrypt,
2925 			.ivsize = CHACHAPOLY_IV_SIZE,
2926 			.maxauthsize = POLY1305_DIGEST_SIZE,
2927 		},
2928 		.caam = {
2929 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2930 					   OP_ALG_AAI_AEAD,
2931 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2932 					   OP_ALG_AAI_AEAD,
2933 			.nodkp = true,
2934 		},
2935 	},
2936 	{
2937 		.aead = {
2938 			.base = {
2939 				.cra_name = "rfc7539esp(chacha20,poly1305)",
2940 				.cra_driver_name = "rfc7539esp-chacha20-"
2941 						   "poly1305-caam-qi2",
2942 				.cra_blocksize = 1,
2943 			},
2944 			.setkey = chachapoly_setkey,
2945 			.setauthsize = chachapoly_setauthsize,
2946 			.encrypt = aead_encrypt,
2947 			.decrypt = aead_decrypt,
2948 			.ivsize = 8,
2949 			.maxauthsize = POLY1305_DIGEST_SIZE,
2950 		},
2951 		.caam = {
2952 			.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2953 					   OP_ALG_AAI_AEAD,
2954 			.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2955 					   OP_ALG_AAI_AEAD,
2956 			.nodkp = true,
2957 		},
2958 	},
2959 	{
2960 		.aead = {
2961 			.base = {
2962 				.cra_name = "authenc(hmac(sha512),"
2963 					    "rfc3686(ctr(aes)))",
2964 				.cra_driver_name = "authenc-hmac-sha512-"
2965 						   "rfc3686-ctr-aes-caam-qi2",
2966 				.cra_blocksize = 1,
2967 			},
2968 			.setkey = aead_setkey,
2969 			.setauthsize = aead_setauthsize,
2970 			.encrypt = aead_encrypt,
2971 			.decrypt = aead_decrypt,
2972 			.ivsize = CTR_RFC3686_IV_SIZE,
2973 			.maxauthsize = SHA512_DIGEST_SIZE,
2974 		},
2975 		.caam = {
2976 			.class1_alg_type = OP_ALG_ALGSEL_AES |
2977 					   OP_ALG_AAI_CTR_MOD128,
2978 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2979 					   OP_ALG_AAI_HMAC_PRECOMP,
2980 			.rfc3686 = true,
2981 		},
2982 	},
2983 	{
2984 		.aead = {
2985 			.base = {
2986 				.cra_name = "seqiv(authenc(hmac(sha512),"
2987 					    "rfc3686(ctr(aes))))",
2988 				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
2989 						   "rfc3686-ctr-aes-caam-qi2",
2990 				.cra_blocksize = 1,
2991 			},
2992 			.setkey = aead_setkey,
2993 			.setauthsize = aead_setauthsize,
2994 			.encrypt = aead_encrypt,
2995 			.decrypt = aead_decrypt,
2996 			.ivsize = CTR_RFC3686_IV_SIZE,
2997 			.maxauthsize = SHA512_DIGEST_SIZE,
2998 		},
2999 		.caam = {
3000 			.class1_alg_type = OP_ALG_ALGSEL_AES |
3001 					   OP_ALG_AAI_CTR_MOD128,
3002 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3003 					   OP_ALG_AAI_HMAC_PRECOMP,
3004 			.rfc3686 = true,
3005 			.geniv = true,
3006 		},
3007 	},
3008 };
3009 
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3010 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3011 {
3012 	struct skcipher_alg *alg = &t_alg->skcipher;
3013 
3014 	alg->base.cra_module = THIS_MODULE;
3015 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3016 	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3017 	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3018 			      CRYPTO_ALG_KERN_DRIVER_ONLY);
3019 
3020 	alg->init = caam_cra_init_skcipher;
3021 	alg->exit = caam_cra_exit;
3022 }
3023 
caam_aead_alg_init(struct caam_aead_alg * t_alg)3024 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3025 {
3026 	struct aead_alg *alg = &t_alg->aead;
3027 
3028 	alg->base.cra_module = THIS_MODULE;
3029 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
3030 	alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3031 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3032 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
3033 
3034 	alg->init = caam_cra_init_aead;
3035 	alg->exit = caam_cra_exit_aead;
3036 }
3037 
3038 /* max hash key is max split key size */
3039 #define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
3040 
3041 #define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
3042 
3043 /* caam context sizes for hashes: running digest + 8 */
3044 #define HASH_MSG_LEN			8
3045 #define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3046 
3047 enum hash_optype {
3048 	UPDATE = 0,
3049 	UPDATE_FIRST,
3050 	FINALIZE,
3051 	DIGEST,
3052 	HASH_NUM_OP
3053 };
3054 
3055 /**
3056  * struct caam_hash_ctx - ahash per-session context
3057  * @flc: Flow Contexts array
3058  * @key: authentication key
3059  * @flc_dma: I/O virtual addresses of the Flow Contexts
3060  * @dev: dpseci device
3061  * @ctx_len: size of Context Register
3062  * @adata: hashing algorithm details
3063  */
3064 struct caam_hash_ctx {
3065 	struct caam_flc flc[HASH_NUM_OP];
3066 	u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3067 	dma_addr_t flc_dma[HASH_NUM_OP];
3068 	struct device *dev;
3069 	int ctx_len;
3070 	struct alginfo adata;
3071 };
3072 
3073 /* ahash state */
3074 struct caam_hash_state {
3075 	struct caam_request caam_req;
3076 	dma_addr_t buf_dma;
3077 	dma_addr_t ctx_dma;
3078 	int ctx_dma_len;
3079 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3080 	int buflen;
3081 	int next_buflen;
3082 	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3083 	int (*update)(struct ahash_request *req);
3084 	int (*final)(struct ahash_request *req);
3085 	int (*finup)(struct ahash_request *req);
3086 };
3087 
3088 struct caam_export_state {
3089 	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3090 	u8 caam_ctx[MAX_CTX_LEN];
3091 	int buflen;
3092 	int (*update)(struct ahash_request *req);
3093 	int (*final)(struct ahash_request *req);
3094 	int (*finup)(struct ahash_request *req);
3095 };
3096 
3097 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_qm_sg(struct device * dev,struct dpaa2_sg_entry * qm_sg,struct caam_hash_state * state)3098 static inline int buf_map_to_qm_sg(struct device *dev,
3099 				   struct dpaa2_sg_entry *qm_sg,
3100 				   struct caam_hash_state *state)
3101 {
3102 	int buflen = state->buflen;
3103 
3104 	if (!buflen)
3105 		return 0;
3106 
3107 	state->buf_dma = dma_map_single(dev, state->buf, buflen,
3108 					DMA_TO_DEVICE);
3109 	if (dma_mapping_error(dev, state->buf_dma)) {
3110 		dev_err(dev, "unable to map buf\n");
3111 		state->buf_dma = 0;
3112 		return -ENOMEM;
3113 	}
3114 
3115 	dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3116 
3117 	return 0;
3118 }
3119 
3120 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_qm_sg(struct device * dev,struct caam_hash_state * state,int ctx_len,struct dpaa2_sg_entry * qm_sg,u32 flag)3121 static inline int ctx_map_to_qm_sg(struct device *dev,
3122 				   struct caam_hash_state *state, int ctx_len,
3123 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
3124 {
3125 	state->ctx_dma_len = ctx_len;
3126 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3127 	if (dma_mapping_error(dev, state->ctx_dma)) {
3128 		dev_err(dev, "unable to map ctx\n");
3129 		state->ctx_dma = 0;
3130 		return -ENOMEM;
3131 	}
3132 
3133 	dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3134 
3135 	return 0;
3136 }
3137 
ahash_set_sh_desc(struct crypto_ahash * ahash)3138 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3139 {
3140 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3141 	int digestsize = crypto_ahash_digestsize(ahash);
3142 	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3143 	struct caam_flc *flc;
3144 	u32 *desc;
3145 
3146 	/* ahash_update shared descriptor */
3147 	flc = &ctx->flc[UPDATE];
3148 	desc = flc->sh_desc;
3149 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3150 			  ctx->ctx_len, true, priv->sec_attr.era);
3151 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3152 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3153 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3154 	print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3155 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3156 			     1);
3157 
3158 	/* ahash_update_first shared descriptor */
3159 	flc = &ctx->flc[UPDATE_FIRST];
3160 	desc = flc->sh_desc;
3161 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3162 			  ctx->ctx_len, false, priv->sec_attr.era);
3163 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3164 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3165 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3166 	print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3167 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3168 			     1);
3169 
3170 	/* ahash_final shared descriptor */
3171 	flc = &ctx->flc[FINALIZE];
3172 	desc = flc->sh_desc;
3173 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3174 			  ctx->ctx_len, true, priv->sec_attr.era);
3175 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3176 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3177 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3178 	print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3179 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3180 			     1);
3181 
3182 	/* ahash_digest shared descriptor */
3183 	flc = &ctx->flc[DIGEST];
3184 	desc = flc->sh_desc;
3185 	cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3186 			  ctx->ctx_len, false, priv->sec_attr.era);
3187 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3188 	dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3189 				   desc_bytes(desc), DMA_BIDIRECTIONAL);
3190 	print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3191 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3192 			     1);
3193 
3194 	return 0;
3195 }
3196 
3197 struct split_key_sh_result {
3198 	struct completion completion;
3199 	int err;
3200 	struct device *dev;
3201 };
3202 
split_key_sh_done(void * cbk_ctx,u32 err)3203 static void split_key_sh_done(void *cbk_ctx, u32 err)
3204 {
3205 	struct split_key_sh_result *res = cbk_ctx;
3206 
3207 	dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3208 
3209 	res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3210 	complete(&res->completion);
3211 }
3212 
3213 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)3214 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3215 			   u32 digestsize)
3216 {
3217 	struct caam_request *req_ctx;
3218 	u32 *desc;
3219 	struct split_key_sh_result result;
3220 	dma_addr_t key_dma;
3221 	struct caam_flc *flc;
3222 	dma_addr_t flc_dma;
3223 	int ret = -ENOMEM;
3224 	struct dpaa2_fl_entry *in_fle, *out_fle;
3225 
3226 	req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL);
3227 	if (!req_ctx)
3228 		return -ENOMEM;
3229 
3230 	in_fle = &req_ctx->fd_flt[1];
3231 	out_fle = &req_ctx->fd_flt[0];
3232 
3233 	flc = kzalloc(sizeof(*flc), GFP_KERNEL);
3234 	if (!flc)
3235 		goto err_flc;
3236 
3237 	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3238 	if (dma_mapping_error(ctx->dev, key_dma)) {
3239 		dev_err(ctx->dev, "unable to map key memory\n");
3240 		goto err_key_dma;
3241 	}
3242 
3243 	desc = flc->sh_desc;
3244 
3245 	init_sh_desc(desc, 0);
3246 
3247 	/* descriptor to perform unkeyed hash on key_in */
3248 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3249 			 OP_ALG_AS_INITFINAL);
3250 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3251 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3252 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3253 			 LDST_SRCDST_BYTE_CONTEXT);
3254 
3255 	flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3256 	flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3257 				 desc_bytes(desc), DMA_TO_DEVICE);
3258 	if (dma_mapping_error(ctx->dev, flc_dma)) {
3259 		dev_err(ctx->dev, "unable to map shared descriptor\n");
3260 		goto err_flc_dma;
3261 	}
3262 
3263 	dpaa2_fl_set_final(in_fle, true);
3264 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3265 	dpaa2_fl_set_addr(in_fle, key_dma);
3266 	dpaa2_fl_set_len(in_fle, *keylen);
3267 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3268 	dpaa2_fl_set_addr(out_fle, key_dma);
3269 	dpaa2_fl_set_len(out_fle, digestsize);
3270 
3271 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3272 			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3273 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3274 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3275 			     1);
3276 
3277 	result.err = 0;
3278 	init_completion(&result.completion);
3279 	result.dev = ctx->dev;
3280 
3281 	req_ctx->flc = flc;
3282 	req_ctx->flc_dma = flc_dma;
3283 	req_ctx->cbk = split_key_sh_done;
3284 	req_ctx->ctx = &result;
3285 
3286 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3287 	if (ret == -EINPROGRESS) {
3288 		/* in progress */
3289 		wait_for_completion(&result.completion);
3290 		ret = result.err;
3291 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3292 				     DUMP_PREFIX_ADDRESS, 16, 4, key,
3293 				     digestsize, 1);
3294 	}
3295 
3296 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3297 			 DMA_TO_DEVICE);
3298 err_flc_dma:
3299 	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3300 err_key_dma:
3301 	kfree(flc);
3302 err_flc:
3303 	kfree(req_ctx);
3304 
3305 	*keylen = digestsize;
3306 
3307 	return ret;
3308 }
3309 
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)3310 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3311 			unsigned int keylen)
3312 {
3313 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3314 	unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3315 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
3316 	int ret;
3317 	u8 *hashed_key = NULL;
3318 
3319 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3320 
3321 	if (keylen > blocksize) {
3322 		unsigned int aligned_len =
3323 			ALIGN(keylen, dma_get_cache_alignment());
3324 
3325 		if (aligned_len < keylen)
3326 			return -EOVERFLOW;
3327 
3328 		hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
3329 		if (!hashed_key)
3330 			return -ENOMEM;
3331 		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3332 		if (ret)
3333 			goto bad_free_key;
3334 		key = hashed_key;
3335 	}
3336 
3337 	ctx->adata.keylen = keylen;
3338 	ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3339 					      OP_ALG_ALGSEL_MASK);
3340 	if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3341 		goto bad_free_key;
3342 
3343 	ctx->adata.key_virt = key;
3344 	ctx->adata.key_inline = true;
3345 
3346 	/*
3347 	 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3348 	 * in invalid opcodes (last bytes of user key) in the resulting
3349 	 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3350 	 * addresses are needed.
3351 	 */
3352 	if (keylen > ctx->adata.keylen_pad) {
3353 		memcpy(ctx->key, key, keylen);
3354 		dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3355 					   ctx->adata.keylen_pad,
3356 					   DMA_TO_DEVICE);
3357 	}
3358 
3359 	ret = ahash_set_sh_desc(ahash);
3360 	kfree(hashed_key);
3361 	return ret;
3362 bad_free_key:
3363 	kfree(hashed_key);
3364 	return -EINVAL;
3365 }
3366 
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req)3367 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3368 			       struct ahash_request *req)
3369 {
3370 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3371 
3372 	if (edesc->src_nents)
3373 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3374 
3375 	if (edesc->qm_sg_bytes)
3376 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3377 				 DMA_TO_DEVICE);
3378 
3379 	if (state->buf_dma) {
3380 		dma_unmap_single(dev, state->buf_dma, state->buflen,
3381 				 DMA_TO_DEVICE);
3382 		state->buf_dma = 0;
3383 	}
3384 }
3385 
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,u32 flag)3386 static inline void ahash_unmap_ctx(struct device *dev,
3387 				   struct ahash_edesc *edesc,
3388 				   struct ahash_request *req, u32 flag)
3389 {
3390 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3391 
3392 	if (state->ctx_dma) {
3393 		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3394 		state->ctx_dma = 0;
3395 	}
3396 	ahash_unmap(dev, edesc, req);
3397 }
3398 
ahash_done(void * cbk_ctx,u32 status)3399 static void ahash_done(void *cbk_ctx, u32 status)
3400 {
3401 	struct crypto_async_request *areq = cbk_ctx;
3402 	struct ahash_request *req = ahash_request_cast(areq);
3403 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3404 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3405 	struct ahash_edesc *edesc = state->caam_req.edesc;
3406 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3407 	int digestsize = crypto_ahash_digestsize(ahash);
3408 	int ecode = 0;
3409 
3410 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3411 
3412 	if (unlikely(status))
3413 		ecode = caam_qi2_strstatus(ctx->dev, status);
3414 
3415 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3416 	memcpy(req->result, state->caam_ctx, digestsize);
3417 	qi_cache_free(edesc);
3418 
3419 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3420 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3421 			     ctx->ctx_len, 1);
3422 
3423 	ahash_request_complete(req, ecode);
3424 }
3425 
ahash_done_bi(void * cbk_ctx,u32 status)3426 static void ahash_done_bi(void *cbk_ctx, u32 status)
3427 {
3428 	struct crypto_async_request *areq = cbk_ctx;
3429 	struct ahash_request *req = ahash_request_cast(areq);
3430 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3431 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3432 	struct ahash_edesc *edesc = state->caam_req.edesc;
3433 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3434 	int ecode = 0;
3435 
3436 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3437 
3438 	if (unlikely(status))
3439 		ecode = caam_qi2_strstatus(ctx->dev, status);
3440 
3441 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3442 	qi_cache_free(edesc);
3443 
3444 	scatterwalk_map_and_copy(state->buf, req->src,
3445 				 req->nbytes - state->next_buflen,
3446 				 state->next_buflen, 0);
3447 	state->buflen = state->next_buflen;
3448 
3449 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3450 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3451 			     state->buflen, 1);
3452 
3453 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3454 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3455 			     ctx->ctx_len, 1);
3456 	if (req->result)
3457 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3458 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3459 				     crypto_ahash_digestsize(ahash), 1);
3460 
3461 	ahash_request_complete(req, ecode);
3462 }
3463 
ahash_done_ctx_src(void * cbk_ctx,u32 status)3464 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3465 {
3466 	struct crypto_async_request *areq = cbk_ctx;
3467 	struct ahash_request *req = ahash_request_cast(areq);
3468 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3469 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3470 	struct ahash_edesc *edesc = state->caam_req.edesc;
3471 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3472 	int digestsize = crypto_ahash_digestsize(ahash);
3473 	int ecode = 0;
3474 
3475 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3476 
3477 	if (unlikely(status))
3478 		ecode = caam_qi2_strstatus(ctx->dev, status);
3479 
3480 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3481 	memcpy(req->result, state->caam_ctx, digestsize);
3482 	qi_cache_free(edesc);
3483 
3484 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3485 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3486 			     ctx->ctx_len, 1);
3487 
3488 	ahash_request_complete(req, ecode);
3489 }
3490 
ahash_done_ctx_dst(void * cbk_ctx,u32 status)3491 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3492 {
3493 	struct crypto_async_request *areq = cbk_ctx;
3494 	struct ahash_request *req = ahash_request_cast(areq);
3495 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3496 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3497 	struct ahash_edesc *edesc = state->caam_req.edesc;
3498 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3499 	int ecode = 0;
3500 
3501 	dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3502 
3503 	if (unlikely(status))
3504 		ecode = caam_qi2_strstatus(ctx->dev, status);
3505 
3506 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3507 	qi_cache_free(edesc);
3508 
3509 	scatterwalk_map_and_copy(state->buf, req->src,
3510 				 req->nbytes - state->next_buflen,
3511 				 state->next_buflen, 0);
3512 	state->buflen = state->next_buflen;
3513 
3514 	print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3515 			     DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3516 			     state->buflen, 1);
3517 
3518 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3519 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3520 			     ctx->ctx_len, 1);
3521 	if (req->result)
3522 		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3523 				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3524 				     crypto_ahash_digestsize(ahash), 1);
3525 
3526 	ahash_request_complete(req, ecode);
3527 }
3528 
ahash_update_ctx(struct ahash_request * req)3529 static int ahash_update_ctx(struct ahash_request *req)
3530 {
3531 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3532 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3533 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3534 	struct caam_request *req_ctx = &state->caam_req;
3535 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3536 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3537 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3538 		      GFP_KERNEL : GFP_ATOMIC;
3539 	u8 *buf = state->buf;
3540 	int *buflen = &state->buflen;
3541 	int *next_buflen = &state->next_buflen;
3542 	int in_len = *buflen + req->nbytes, to_hash;
3543 	int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3544 	struct ahash_edesc *edesc;
3545 	int ret = 0;
3546 
3547 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3548 	to_hash = in_len - *next_buflen;
3549 
3550 	if (to_hash) {
3551 		struct dpaa2_sg_entry *sg_table;
3552 		int src_len = req->nbytes - *next_buflen;
3553 
3554 		src_nents = sg_nents_for_len(req->src, src_len);
3555 		if (src_nents < 0) {
3556 			dev_err(ctx->dev, "Invalid number of src SG.\n");
3557 			return src_nents;
3558 		}
3559 
3560 		if (src_nents) {
3561 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3562 						  DMA_TO_DEVICE);
3563 			if (!mapped_nents) {
3564 				dev_err(ctx->dev, "unable to DMA map source\n");
3565 				return -ENOMEM;
3566 			}
3567 		} else {
3568 			mapped_nents = 0;
3569 		}
3570 
3571 		/* allocate space for base edesc and link tables */
3572 		edesc = qi_cache_zalloc(flags);
3573 		if (!edesc) {
3574 			dma_unmap_sg(ctx->dev, req->src, src_nents,
3575 				     DMA_TO_DEVICE);
3576 			return -ENOMEM;
3577 		}
3578 
3579 		edesc->src_nents = src_nents;
3580 		qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3581 		qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3582 			      sizeof(*sg_table);
3583 		sg_table = &edesc->sgt[0];
3584 
3585 		ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3586 				       DMA_BIDIRECTIONAL);
3587 		if (ret)
3588 			goto unmap_ctx;
3589 
3590 		ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3591 		if (ret)
3592 			goto unmap_ctx;
3593 
3594 		if (mapped_nents) {
3595 			sg_to_qm_sg_last(req->src, src_len,
3596 					 sg_table + qm_sg_src_index, 0);
3597 		} else {
3598 			dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3599 					   true);
3600 		}
3601 
3602 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3603 						  qm_sg_bytes, DMA_TO_DEVICE);
3604 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3605 			dev_err(ctx->dev, "unable to map S/G table\n");
3606 			ret = -ENOMEM;
3607 			goto unmap_ctx;
3608 		}
3609 		edesc->qm_sg_bytes = qm_sg_bytes;
3610 
3611 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3612 		dpaa2_fl_set_final(in_fle, true);
3613 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3614 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3615 		dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3616 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3617 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3618 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3619 
3620 		req_ctx->flc = &ctx->flc[UPDATE];
3621 		req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3622 		req_ctx->cbk = ahash_done_bi;
3623 		req_ctx->ctx = &req->base;
3624 		req_ctx->edesc = edesc;
3625 
3626 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3627 		if (ret != -EINPROGRESS &&
3628 		    !(ret == -EBUSY &&
3629 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3630 			goto unmap_ctx;
3631 	} else if (*next_buflen) {
3632 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3633 					 req->nbytes, 0);
3634 		*buflen = *next_buflen;
3635 
3636 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3637 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3638 				     *buflen, 1);
3639 	}
3640 
3641 	return ret;
3642 unmap_ctx:
3643 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3644 	qi_cache_free(edesc);
3645 	return ret;
3646 }
3647 
ahash_final_ctx(struct ahash_request * req)3648 static int ahash_final_ctx(struct ahash_request *req)
3649 {
3650 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3651 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3652 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3653 	struct caam_request *req_ctx = &state->caam_req;
3654 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3655 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3656 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3657 		      GFP_KERNEL : GFP_ATOMIC;
3658 	int buflen = state->buflen;
3659 	int qm_sg_bytes;
3660 	int digestsize = crypto_ahash_digestsize(ahash);
3661 	struct ahash_edesc *edesc;
3662 	struct dpaa2_sg_entry *sg_table;
3663 	int ret;
3664 
3665 	/* allocate space for base edesc and link tables */
3666 	edesc = qi_cache_zalloc(flags);
3667 	if (!edesc)
3668 		return -ENOMEM;
3669 
3670 	qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3671 	sg_table = &edesc->sgt[0];
3672 
3673 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3674 			       DMA_BIDIRECTIONAL);
3675 	if (ret)
3676 		goto unmap_ctx;
3677 
3678 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3679 	if (ret)
3680 		goto unmap_ctx;
3681 
3682 	dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3683 
3684 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3685 					  DMA_TO_DEVICE);
3686 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3687 		dev_err(ctx->dev, "unable to map S/G table\n");
3688 		ret = -ENOMEM;
3689 		goto unmap_ctx;
3690 	}
3691 	edesc->qm_sg_bytes = qm_sg_bytes;
3692 
3693 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3694 	dpaa2_fl_set_final(in_fle, true);
3695 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3696 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3697 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3698 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3699 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3700 	dpaa2_fl_set_len(out_fle, digestsize);
3701 
3702 	req_ctx->flc = &ctx->flc[FINALIZE];
3703 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3704 	req_ctx->cbk = ahash_done_ctx_src;
3705 	req_ctx->ctx = &req->base;
3706 	req_ctx->edesc = edesc;
3707 
3708 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3709 	if (ret == -EINPROGRESS ||
3710 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3711 		return ret;
3712 
3713 unmap_ctx:
3714 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3715 	qi_cache_free(edesc);
3716 	return ret;
3717 }
3718 
ahash_finup_ctx(struct ahash_request * req)3719 static int ahash_finup_ctx(struct ahash_request *req)
3720 {
3721 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3722 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3723 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3724 	struct caam_request *req_ctx = &state->caam_req;
3725 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3726 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3727 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3728 		      GFP_KERNEL : GFP_ATOMIC;
3729 	int buflen = state->buflen;
3730 	int qm_sg_bytes, qm_sg_src_index;
3731 	int src_nents, mapped_nents;
3732 	int digestsize = crypto_ahash_digestsize(ahash);
3733 	struct ahash_edesc *edesc;
3734 	struct dpaa2_sg_entry *sg_table;
3735 	int ret;
3736 
3737 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3738 	if (src_nents < 0) {
3739 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3740 		return src_nents;
3741 	}
3742 
3743 	if (src_nents) {
3744 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3745 					  DMA_TO_DEVICE);
3746 		if (!mapped_nents) {
3747 			dev_err(ctx->dev, "unable to DMA map source\n");
3748 			return -ENOMEM;
3749 		}
3750 	} else {
3751 		mapped_nents = 0;
3752 	}
3753 
3754 	/* allocate space for base edesc and link tables */
3755 	edesc = qi_cache_zalloc(flags);
3756 	if (!edesc) {
3757 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3758 		return -ENOMEM;
3759 	}
3760 
3761 	edesc->src_nents = src_nents;
3762 	qm_sg_src_index = 1 + (buflen ? 1 : 0);
3763 	qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3764 		      sizeof(*sg_table);
3765 	sg_table = &edesc->sgt[0];
3766 
3767 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3768 			       DMA_BIDIRECTIONAL);
3769 	if (ret)
3770 		goto unmap_ctx;
3771 
3772 	ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3773 	if (ret)
3774 		goto unmap_ctx;
3775 
3776 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3777 
3778 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3779 					  DMA_TO_DEVICE);
3780 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3781 		dev_err(ctx->dev, "unable to map S/G table\n");
3782 		ret = -ENOMEM;
3783 		goto unmap_ctx;
3784 	}
3785 	edesc->qm_sg_bytes = qm_sg_bytes;
3786 
3787 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3788 	dpaa2_fl_set_final(in_fle, true);
3789 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3790 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3791 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3792 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3793 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3794 	dpaa2_fl_set_len(out_fle, digestsize);
3795 
3796 	req_ctx->flc = &ctx->flc[FINALIZE];
3797 	req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3798 	req_ctx->cbk = ahash_done_ctx_src;
3799 	req_ctx->ctx = &req->base;
3800 	req_ctx->edesc = edesc;
3801 
3802 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3803 	if (ret == -EINPROGRESS ||
3804 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3805 		return ret;
3806 
3807 unmap_ctx:
3808 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3809 	qi_cache_free(edesc);
3810 	return ret;
3811 }
3812 
ahash_digest(struct ahash_request * req)3813 static int ahash_digest(struct ahash_request *req)
3814 {
3815 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3816 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3817 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3818 	struct caam_request *req_ctx = &state->caam_req;
3819 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3820 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3821 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3822 		      GFP_KERNEL : GFP_ATOMIC;
3823 	int digestsize = crypto_ahash_digestsize(ahash);
3824 	int src_nents, mapped_nents;
3825 	struct ahash_edesc *edesc;
3826 	int ret = -ENOMEM;
3827 
3828 	state->buf_dma = 0;
3829 
3830 	src_nents = sg_nents_for_len(req->src, req->nbytes);
3831 	if (src_nents < 0) {
3832 		dev_err(ctx->dev, "Invalid number of src SG.\n");
3833 		return src_nents;
3834 	}
3835 
3836 	if (src_nents) {
3837 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3838 					  DMA_TO_DEVICE);
3839 		if (!mapped_nents) {
3840 			dev_err(ctx->dev, "unable to map source for DMA\n");
3841 			return ret;
3842 		}
3843 	} else {
3844 		mapped_nents = 0;
3845 	}
3846 
3847 	/* allocate space for base edesc and link tables */
3848 	edesc = qi_cache_zalloc(flags);
3849 	if (!edesc) {
3850 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3851 		return ret;
3852 	}
3853 
3854 	edesc->src_nents = src_nents;
3855 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3856 
3857 	if (mapped_nents > 1) {
3858 		int qm_sg_bytes;
3859 		struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3860 
3861 		qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3862 		sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3863 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3864 						  qm_sg_bytes, DMA_TO_DEVICE);
3865 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3866 			dev_err(ctx->dev, "unable to map S/G table\n");
3867 			goto unmap;
3868 		}
3869 		edesc->qm_sg_bytes = qm_sg_bytes;
3870 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3871 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3872 	} else {
3873 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3874 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3875 	}
3876 
3877 	state->ctx_dma_len = digestsize;
3878 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3879 					DMA_FROM_DEVICE);
3880 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3881 		dev_err(ctx->dev, "unable to map ctx\n");
3882 		state->ctx_dma = 0;
3883 		goto unmap;
3884 	}
3885 
3886 	dpaa2_fl_set_final(in_fle, true);
3887 	dpaa2_fl_set_len(in_fle, req->nbytes);
3888 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3889 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3890 	dpaa2_fl_set_len(out_fle, digestsize);
3891 
3892 	req_ctx->flc = &ctx->flc[DIGEST];
3893 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3894 	req_ctx->cbk = ahash_done;
3895 	req_ctx->ctx = &req->base;
3896 	req_ctx->edesc = edesc;
3897 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3898 	if (ret == -EINPROGRESS ||
3899 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3900 		return ret;
3901 
3902 unmap:
3903 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3904 	qi_cache_free(edesc);
3905 	return ret;
3906 }
3907 
ahash_final_no_ctx(struct ahash_request * req)3908 static int ahash_final_no_ctx(struct ahash_request *req)
3909 {
3910 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3911 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3912 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3913 	struct caam_request *req_ctx = &state->caam_req;
3914 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3915 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3916 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3917 		      GFP_KERNEL : GFP_ATOMIC;
3918 	u8 *buf = state->buf;
3919 	int buflen = state->buflen;
3920 	int digestsize = crypto_ahash_digestsize(ahash);
3921 	struct ahash_edesc *edesc;
3922 	int ret = -ENOMEM;
3923 
3924 	/* allocate space for base edesc and link tables */
3925 	edesc = qi_cache_zalloc(flags);
3926 	if (!edesc)
3927 		return ret;
3928 
3929 	if (buflen) {
3930 		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3931 						DMA_TO_DEVICE);
3932 		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3933 			dev_err(ctx->dev, "unable to map src\n");
3934 			goto unmap;
3935 		}
3936 	}
3937 
3938 	state->ctx_dma_len = digestsize;
3939 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3940 					DMA_FROM_DEVICE);
3941 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3942 		dev_err(ctx->dev, "unable to map ctx\n");
3943 		state->ctx_dma = 0;
3944 		goto unmap;
3945 	}
3946 
3947 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3948 	dpaa2_fl_set_final(in_fle, true);
3949 	/*
3950 	 * crypto engine requires the input entry to be present when
3951 	 * "frame list" FD is used.
3952 	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3953 	 * in_fle zeroized (except for "Final" flag) is the best option.
3954 	 */
3955 	if (buflen) {
3956 		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3957 		dpaa2_fl_set_addr(in_fle, state->buf_dma);
3958 		dpaa2_fl_set_len(in_fle, buflen);
3959 	}
3960 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3961 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3962 	dpaa2_fl_set_len(out_fle, digestsize);
3963 
3964 	req_ctx->flc = &ctx->flc[DIGEST];
3965 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3966 	req_ctx->cbk = ahash_done;
3967 	req_ctx->ctx = &req->base;
3968 	req_ctx->edesc = edesc;
3969 
3970 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3971 	if (ret == -EINPROGRESS ||
3972 	    (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3973 		return ret;
3974 
3975 unmap:
3976 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3977 	qi_cache_free(edesc);
3978 	return ret;
3979 }
3980 
ahash_update_no_ctx(struct ahash_request * req)3981 static int ahash_update_no_ctx(struct ahash_request *req)
3982 {
3983 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3984 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3985 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
3986 	struct caam_request *req_ctx = &state->caam_req;
3987 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3988 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3989 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3990 		      GFP_KERNEL : GFP_ATOMIC;
3991 	u8 *buf = state->buf;
3992 	int *buflen = &state->buflen;
3993 	int *next_buflen = &state->next_buflen;
3994 	int in_len = *buflen + req->nbytes, to_hash;
3995 	int qm_sg_bytes, src_nents, mapped_nents;
3996 	struct ahash_edesc *edesc;
3997 	int ret = 0;
3998 
3999 	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
4000 	to_hash = in_len - *next_buflen;
4001 
4002 	if (to_hash) {
4003 		struct dpaa2_sg_entry *sg_table;
4004 		int src_len = req->nbytes - *next_buflen;
4005 
4006 		src_nents = sg_nents_for_len(req->src, src_len);
4007 		if (src_nents < 0) {
4008 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4009 			return src_nents;
4010 		}
4011 
4012 		if (src_nents) {
4013 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4014 						  DMA_TO_DEVICE);
4015 			if (!mapped_nents) {
4016 				dev_err(ctx->dev, "unable to DMA map source\n");
4017 				return -ENOMEM;
4018 			}
4019 		} else {
4020 			mapped_nents = 0;
4021 		}
4022 
4023 		/* allocate space for base edesc and link tables */
4024 		edesc = qi_cache_zalloc(flags);
4025 		if (!edesc) {
4026 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4027 				     DMA_TO_DEVICE);
4028 			return -ENOMEM;
4029 		}
4030 
4031 		edesc->src_nents = src_nents;
4032 		qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4033 			      sizeof(*sg_table);
4034 		sg_table = &edesc->sgt[0];
4035 
4036 		ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4037 		if (ret)
4038 			goto unmap_ctx;
4039 
4040 		sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4041 
4042 		edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4043 						  qm_sg_bytes, DMA_TO_DEVICE);
4044 		if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4045 			dev_err(ctx->dev, "unable to map S/G table\n");
4046 			ret = -ENOMEM;
4047 			goto unmap_ctx;
4048 		}
4049 		edesc->qm_sg_bytes = qm_sg_bytes;
4050 
4051 		state->ctx_dma_len = ctx->ctx_len;
4052 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4053 						ctx->ctx_len, DMA_FROM_DEVICE);
4054 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4055 			dev_err(ctx->dev, "unable to map ctx\n");
4056 			state->ctx_dma = 0;
4057 			ret = -ENOMEM;
4058 			goto unmap_ctx;
4059 		}
4060 
4061 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4062 		dpaa2_fl_set_final(in_fle, true);
4063 		dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4064 		dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4065 		dpaa2_fl_set_len(in_fle, to_hash);
4066 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4067 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4068 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4069 
4070 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4071 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4072 		req_ctx->cbk = ahash_done_ctx_dst;
4073 		req_ctx->ctx = &req->base;
4074 		req_ctx->edesc = edesc;
4075 
4076 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4077 		if (ret != -EINPROGRESS &&
4078 		    !(ret == -EBUSY &&
4079 		      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4080 			goto unmap_ctx;
4081 
4082 		state->update = ahash_update_ctx;
4083 		state->finup = ahash_finup_ctx;
4084 		state->final = ahash_final_ctx;
4085 	} else if (*next_buflen) {
4086 		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4087 					 req->nbytes, 0);
4088 		*buflen = *next_buflen;
4089 
4090 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4091 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4092 				     *buflen, 1);
4093 	}
4094 
4095 	return ret;
4096 unmap_ctx:
4097 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4098 	qi_cache_free(edesc);
4099 	return ret;
4100 }
4101 
ahash_finup_no_ctx(struct ahash_request * req)4102 static int ahash_finup_no_ctx(struct ahash_request *req)
4103 {
4104 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4105 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4106 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4107 	struct caam_request *req_ctx = &state->caam_req;
4108 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4109 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4110 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4111 		      GFP_KERNEL : GFP_ATOMIC;
4112 	int buflen = state->buflen;
4113 	int qm_sg_bytes, src_nents, mapped_nents;
4114 	int digestsize = crypto_ahash_digestsize(ahash);
4115 	struct ahash_edesc *edesc;
4116 	struct dpaa2_sg_entry *sg_table;
4117 	int ret = -ENOMEM;
4118 
4119 	src_nents = sg_nents_for_len(req->src, req->nbytes);
4120 	if (src_nents < 0) {
4121 		dev_err(ctx->dev, "Invalid number of src SG.\n");
4122 		return src_nents;
4123 	}
4124 
4125 	if (src_nents) {
4126 		mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4127 					  DMA_TO_DEVICE);
4128 		if (!mapped_nents) {
4129 			dev_err(ctx->dev, "unable to DMA map source\n");
4130 			return ret;
4131 		}
4132 	} else {
4133 		mapped_nents = 0;
4134 	}
4135 
4136 	/* allocate space for base edesc and link tables */
4137 	edesc = qi_cache_zalloc(flags);
4138 	if (!edesc) {
4139 		dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4140 		return ret;
4141 	}
4142 
4143 	edesc->src_nents = src_nents;
4144 	qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4145 	sg_table = &edesc->sgt[0];
4146 
4147 	ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4148 	if (ret)
4149 		goto unmap;
4150 
4151 	sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4152 
4153 	edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4154 					  DMA_TO_DEVICE);
4155 	if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4156 		dev_err(ctx->dev, "unable to map S/G table\n");
4157 		ret = -ENOMEM;
4158 		goto unmap;
4159 	}
4160 	edesc->qm_sg_bytes = qm_sg_bytes;
4161 
4162 	state->ctx_dma_len = digestsize;
4163 	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4164 					DMA_FROM_DEVICE);
4165 	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4166 		dev_err(ctx->dev, "unable to map ctx\n");
4167 		state->ctx_dma = 0;
4168 		ret = -ENOMEM;
4169 		goto unmap;
4170 	}
4171 
4172 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4173 	dpaa2_fl_set_final(in_fle, true);
4174 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4175 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4176 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4177 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4178 	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4179 	dpaa2_fl_set_len(out_fle, digestsize);
4180 
4181 	req_ctx->flc = &ctx->flc[DIGEST];
4182 	req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4183 	req_ctx->cbk = ahash_done;
4184 	req_ctx->ctx = &req->base;
4185 	req_ctx->edesc = edesc;
4186 	ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4187 	if (ret != -EINPROGRESS &&
4188 	    !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4189 		goto unmap;
4190 
4191 	return ret;
4192 unmap:
4193 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4194 	qi_cache_free(edesc);
4195 	return ret;
4196 }
4197 
ahash_update_first(struct ahash_request * req)4198 static int ahash_update_first(struct ahash_request *req)
4199 {
4200 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4201 	struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4202 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4203 	struct caam_request *req_ctx = &state->caam_req;
4204 	struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4205 	struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4206 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4207 		      GFP_KERNEL : GFP_ATOMIC;
4208 	u8 *buf = state->buf;
4209 	int *buflen = &state->buflen;
4210 	int *next_buflen = &state->next_buflen;
4211 	int to_hash;
4212 	int src_nents, mapped_nents;
4213 	struct ahash_edesc *edesc;
4214 	int ret = 0;
4215 
4216 	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4217 				      1);
4218 	to_hash = req->nbytes - *next_buflen;
4219 
4220 	if (to_hash) {
4221 		struct dpaa2_sg_entry *sg_table;
4222 		int src_len = req->nbytes - *next_buflen;
4223 
4224 		src_nents = sg_nents_for_len(req->src, src_len);
4225 		if (src_nents < 0) {
4226 			dev_err(ctx->dev, "Invalid number of src SG.\n");
4227 			return src_nents;
4228 		}
4229 
4230 		if (src_nents) {
4231 			mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4232 						  DMA_TO_DEVICE);
4233 			if (!mapped_nents) {
4234 				dev_err(ctx->dev, "unable to map source for DMA\n");
4235 				return -ENOMEM;
4236 			}
4237 		} else {
4238 			mapped_nents = 0;
4239 		}
4240 
4241 		/* allocate space for base edesc and link tables */
4242 		edesc = qi_cache_zalloc(flags);
4243 		if (!edesc) {
4244 			dma_unmap_sg(ctx->dev, req->src, src_nents,
4245 				     DMA_TO_DEVICE);
4246 			return -ENOMEM;
4247 		}
4248 
4249 		edesc->src_nents = src_nents;
4250 		sg_table = &edesc->sgt[0];
4251 
4252 		memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4253 		dpaa2_fl_set_final(in_fle, true);
4254 		dpaa2_fl_set_len(in_fle, to_hash);
4255 
4256 		if (mapped_nents > 1) {
4257 			int qm_sg_bytes;
4258 
4259 			sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4260 			qm_sg_bytes = pad_sg_nents(mapped_nents) *
4261 				      sizeof(*sg_table);
4262 			edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4263 							  qm_sg_bytes,
4264 							  DMA_TO_DEVICE);
4265 			if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4266 				dev_err(ctx->dev, "unable to map S/G table\n");
4267 				ret = -ENOMEM;
4268 				goto unmap_ctx;
4269 			}
4270 			edesc->qm_sg_bytes = qm_sg_bytes;
4271 			dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4272 			dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4273 		} else {
4274 			dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4275 			dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4276 		}
4277 
4278 		state->ctx_dma_len = ctx->ctx_len;
4279 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4280 						ctx->ctx_len, DMA_FROM_DEVICE);
4281 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4282 			dev_err(ctx->dev, "unable to map ctx\n");
4283 			state->ctx_dma = 0;
4284 			ret = -ENOMEM;
4285 			goto unmap_ctx;
4286 		}
4287 
4288 		dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4289 		dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4290 		dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4291 
4292 		req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4293 		req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4294 		req_ctx->cbk = ahash_done_ctx_dst;
4295 		req_ctx->ctx = &req->base;
4296 		req_ctx->edesc = edesc;
4297 
4298 		ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4299 		if (ret != -EINPROGRESS &&
4300 		    !(ret == -EBUSY && req->base.flags &
4301 		      CRYPTO_TFM_REQ_MAY_BACKLOG))
4302 			goto unmap_ctx;
4303 
4304 		state->update = ahash_update_ctx;
4305 		state->finup = ahash_finup_ctx;
4306 		state->final = ahash_final_ctx;
4307 	} else if (*next_buflen) {
4308 		state->update = ahash_update_no_ctx;
4309 		state->finup = ahash_finup_no_ctx;
4310 		state->final = ahash_final_no_ctx;
4311 		scatterwalk_map_and_copy(buf, req->src, 0,
4312 					 req->nbytes, 0);
4313 		*buflen = *next_buflen;
4314 
4315 		print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4316 				     DUMP_PREFIX_ADDRESS, 16, 4, buf,
4317 				     *buflen, 1);
4318 	}
4319 
4320 	return ret;
4321 unmap_ctx:
4322 	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4323 	qi_cache_free(edesc);
4324 	return ret;
4325 }
4326 
ahash_finup_first(struct ahash_request * req)4327 static int ahash_finup_first(struct ahash_request *req)
4328 {
4329 	return ahash_digest(req);
4330 }
4331 
ahash_init(struct ahash_request * req)4332 static int ahash_init(struct ahash_request *req)
4333 {
4334 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4335 
4336 	state->update = ahash_update_first;
4337 	state->finup = ahash_finup_first;
4338 	state->final = ahash_final_no_ctx;
4339 
4340 	state->ctx_dma = 0;
4341 	state->ctx_dma_len = 0;
4342 	state->buf_dma = 0;
4343 	state->buflen = 0;
4344 	state->next_buflen = 0;
4345 
4346 	return 0;
4347 }
4348 
ahash_update(struct ahash_request * req)4349 static int ahash_update(struct ahash_request *req)
4350 {
4351 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4352 
4353 	return state->update(req);
4354 }
4355 
ahash_finup(struct ahash_request * req)4356 static int ahash_finup(struct ahash_request *req)
4357 {
4358 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4359 
4360 	return state->finup(req);
4361 }
4362 
ahash_final(struct ahash_request * req)4363 static int ahash_final(struct ahash_request *req)
4364 {
4365 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4366 
4367 	return state->final(req);
4368 }
4369 
ahash_export(struct ahash_request * req,void * out)4370 static int ahash_export(struct ahash_request *req, void *out)
4371 {
4372 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4373 	struct caam_export_state *export = out;
4374 	u8 *buf = state->buf;
4375 	int len = state->buflen;
4376 
4377 	memcpy(export->buf, buf, len);
4378 	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4379 	export->buflen = len;
4380 	export->update = state->update;
4381 	export->final = state->final;
4382 	export->finup = state->finup;
4383 
4384 	return 0;
4385 }
4386 
ahash_import(struct ahash_request * req,const void * in)4387 static int ahash_import(struct ahash_request *req, const void *in)
4388 {
4389 	struct caam_hash_state *state = ahash_request_ctx_dma(req);
4390 	const struct caam_export_state *export = in;
4391 
4392 	memset(state, 0, sizeof(*state));
4393 	memcpy(state->buf, export->buf, export->buflen);
4394 	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4395 	state->buflen = export->buflen;
4396 	state->update = export->update;
4397 	state->final = export->final;
4398 	state->finup = export->finup;
4399 
4400 	return 0;
4401 }
4402 
4403 struct caam_hash_template {
4404 	char name[CRYPTO_MAX_ALG_NAME];
4405 	char driver_name[CRYPTO_MAX_ALG_NAME];
4406 	char hmac_name[CRYPTO_MAX_ALG_NAME];
4407 	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4408 	unsigned int blocksize;
4409 	struct ahash_alg template_ahash;
4410 	u32 alg_type;
4411 };
4412 
4413 /* ahash descriptors */
4414 static struct caam_hash_template driver_hash[] = {
4415 	{
4416 		.name = "sha1",
4417 		.driver_name = "sha1-caam-qi2",
4418 		.hmac_name = "hmac(sha1)",
4419 		.hmac_driver_name = "hmac-sha1-caam-qi2",
4420 		.blocksize = SHA1_BLOCK_SIZE,
4421 		.template_ahash = {
4422 			.init = ahash_init,
4423 			.update = ahash_update,
4424 			.final = ahash_final,
4425 			.finup = ahash_finup,
4426 			.digest = ahash_digest,
4427 			.export = ahash_export,
4428 			.import = ahash_import,
4429 			.setkey = ahash_setkey,
4430 			.halg = {
4431 				.digestsize = SHA1_DIGEST_SIZE,
4432 				.statesize = sizeof(struct caam_export_state),
4433 			},
4434 		},
4435 		.alg_type = OP_ALG_ALGSEL_SHA1,
4436 	}, {
4437 		.name = "sha224",
4438 		.driver_name = "sha224-caam-qi2",
4439 		.hmac_name = "hmac(sha224)",
4440 		.hmac_driver_name = "hmac-sha224-caam-qi2",
4441 		.blocksize = SHA224_BLOCK_SIZE,
4442 		.template_ahash = {
4443 			.init = ahash_init,
4444 			.update = ahash_update,
4445 			.final = ahash_final,
4446 			.finup = ahash_finup,
4447 			.digest = ahash_digest,
4448 			.export = ahash_export,
4449 			.import = ahash_import,
4450 			.setkey = ahash_setkey,
4451 			.halg = {
4452 				.digestsize = SHA224_DIGEST_SIZE,
4453 				.statesize = sizeof(struct caam_export_state),
4454 			},
4455 		},
4456 		.alg_type = OP_ALG_ALGSEL_SHA224,
4457 	}, {
4458 		.name = "sha256",
4459 		.driver_name = "sha256-caam-qi2",
4460 		.hmac_name = "hmac(sha256)",
4461 		.hmac_driver_name = "hmac-sha256-caam-qi2",
4462 		.blocksize = SHA256_BLOCK_SIZE,
4463 		.template_ahash = {
4464 			.init = ahash_init,
4465 			.update = ahash_update,
4466 			.final = ahash_final,
4467 			.finup = ahash_finup,
4468 			.digest = ahash_digest,
4469 			.export = ahash_export,
4470 			.import = ahash_import,
4471 			.setkey = ahash_setkey,
4472 			.halg = {
4473 				.digestsize = SHA256_DIGEST_SIZE,
4474 				.statesize = sizeof(struct caam_export_state),
4475 			},
4476 		},
4477 		.alg_type = OP_ALG_ALGSEL_SHA256,
4478 	}, {
4479 		.name = "sha384",
4480 		.driver_name = "sha384-caam-qi2",
4481 		.hmac_name = "hmac(sha384)",
4482 		.hmac_driver_name = "hmac-sha384-caam-qi2",
4483 		.blocksize = SHA384_BLOCK_SIZE,
4484 		.template_ahash = {
4485 			.init = ahash_init,
4486 			.update = ahash_update,
4487 			.final = ahash_final,
4488 			.finup = ahash_finup,
4489 			.digest = ahash_digest,
4490 			.export = ahash_export,
4491 			.import = ahash_import,
4492 			.setkey = ahash_setkey,
4493 			.halg = {
4494 				.digestsize = SHA384_DIGEST_SIZE,
4495 				.statesize = sizeof(struct caam_export_state),
4496 			},
4497 		},
4498 		.alg_type = OP_ALG_ALGSEL_SHA384,
4499 	}, {
4500 		.name = "sha512",
4501 		.driver_name = "sha512-caam-qi2",
4502 		.hmac_name = "hmac(sha512)",
4503 		.hmac_driver_name = "hmac-sha512-caam-qi2",
4504 		.blocksize = SHA512_BLOCK_SIZE,
4505 		.template_ahash = {
4506 			.init = ahash_init,
4507 			.update = ahash_update,
4508 			.final = ahash_final,
4509 			.finup = ahash_finup,
4510 			.digest = ahash_digest,
4511 			.export = ahash_export,
4512 			.import = ahash_import,
4513 			.setkey = ahash_setkey,
4514 			.halg = {
4515 				.digestsize = SHA512_DIGEST_SIZE,
4516 				.statesize = sizeof(struct caam_export_state),
4517 			},
4518 		},
4519 		.alg_type = OP_ALG_ALGSEL_SHA512,
4520 	}, {
4521 		.name = "md5",
4522 		.driver_name = "md5-caam-qi2",
4523 		.hmac_name = "hmac(md5)",
4524 		.hmac_driver_name = "hmac-md5-caam-qi2",
4525 		.blocksize = MD5_BLOCK_WORDS * 4,
4526 		.template_ahash = {
4527 			.init = ahash_init,
4528 			.update = ahash_update,
4529 			.final = ahash_final,
4530 			.finup = ahash_finup,
4531 			.digest = ahash_digest,
4532 			.export = ahash_export,
4533 			.import = ahash_import,
4534 			.setkey = ahash_setkey,
4535 			.halg = {
4536 				.digestsize = MD5_DIGEST_SIZE,
4537 				.statesize = sizeof(struct caam_export_state),
4538 			},
4539 		},
4540 		.alg_type = OP_ALG_ALGSEL_MD5,
4541 	}
4542 };
4543 
4544 struct caam_hash_alg {
4545 	struct list_head entry;
4546 	struct device *dev;
4547 	int alg_type;
4548 	bool is_hmac;
4549 	struct ahash_alg ahash_alg;
4550 };
4551 
caam_hash_cra_init(struct crypto_tfm * tfm)4552 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4553 {
4554 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4555 	struct crypto_alg *base = tfm->__crt_alg;
4556 	struct hash_alg_common *halg =
4557 		 container_of(base, struct hash_alg_common, base);
4558 	struct ahash_alg *alg =
4559 		 container_of(halg, struct ahash_alg, halg);
4560 	struct caam_hash_alg *caam_hash =
4561 		 container_of(alg, struct caam_hash_alg, ahash_alg);
4562 	struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4563 	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4564 	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4565 					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4566 					 HASH_MSG_LEN + 32,
4567 					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4568 					 HASH_MSG_LEN + 64,
4569 					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4570 	dma_addr_t dma_addr;
4571 	int i;
4572 
4573 	ctx->dev = caam_hash->dev;
4574 
4575 	if (caam_hash->is_hmac) {
4576 		ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4577 							  ARRAY_SIZE(ctx->key),
4578 							  DMA_TO_DEVICE,
4579 							  DMA_ATTR_SKIP_CPU_SYNC);
4580 		if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4581 			dev_err(ctx->dev, "unable to map key\n");
4582 			return -ENOMEM;
4583 		}
4584 	}
4585 
4586 	dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4587 					DMA_BIDIRECTIONAL,
4588 					DMA_ATTR_SKIP_CPU_SYNC);
4589 	if (dma_mapping_error(ctx->dev, dma_addr)) {
4590 		dev_err(ctx->dev, "unable to map shared descriptors\n");
4591 		if (ctx->adata.key_dma)
4592 			dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4593 					       ARRAY_SIZE(ctx->key),
4594 					       DMA_TO_DEVICE,
4595 					       DMA_ATTR_SKIP_CPU_SYNC);
4596 		return -ENOMEM;
4597 	}
4598 
4599 	for (i = 0; i < HASH_NUM_OP; i++)
4600 		ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4601 
4602 	/* copy descriptor header template value */
4603 	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4604 
4605 	ctx->ctx_len = runninglen[(ctx->adata.algtype &
4606 				   OP_ALG_ALGSEL_SUBMASK) >>
4607 				  OP_ALG_ALGSEL_SHIFT];
4608 
4609 	crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
4610 
4611 	/*
4612 	 * For keyed hash algorithms shared descriptors
4613 	 * will be created later in setkey() callback
4614 	 */
4615 	return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
4616 }
4617 
caam_hash_cra_exit(struct crypto_tfm * tfm)4618 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4619 {
4620 	struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4621 
4622 	dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4623 			       DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4624 	if (ctx->adata.key_dma)
4625 		dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4626 				       ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4627 				       DMA_ATTR_SKIP_CPU_SYNC);
4628 }
4629 
caam_hash_alloc(struct device * dev,struct caam_hash_template * template,bool keyed)4630 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4631 	struct caam_hash_template *template, bool keyed)
4632 {
4633 	struct caam_hash_alg *t_alg;
4634 	struct ahash_alg *halg;
4635 	struct crypto_alg *alg;
4636 
4637 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4638 	if (!t_alg)
4639 		return ERR_PTR(-ENOMEM);
4640 
4641 	t_alg->ahash_alg = template->template_ahash;
4642 	halg = &t_alg->ahash_alg;
4643 	alg = &halg->halg.base;
4644 
4645 	if (keyed) {
4646 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4647 			 template->hmac_name);
4648 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4649 			 template->hmac_driver_name);
4650 		t_alg->is_hmac = true;
4651 	} else {
4652 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4653 			 template->name);
4654 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4655 			 template->driver_name);
4656 		t_alg->ahash_alg.setkey = NULL;
4657 		t_alg->is_hmac = false;
4658 	}
4659 	alg->cra_module = THIS_MODULE;
4660 	alg->cra_init = caam_hash_cra_init;
4661 	alg->cra_exit = caam_hash_cra_exit;
4662 	alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
4663 	alg->cra_priority = CAAM_CRA_PRIORITY;
4664 	alg->cra_blocksize = template->blocksize;
4665 	alg->cra_alignmask = 0;
4666 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4667 
4668 	t_alg->alg_type = template->alg_type;
4669 	t_alg->dev = dev;
4670 
4671 	return t_alg;
4672 }
4673 
dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)4674 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4675 {
4676 	struct dpaa2_caam_priv_per_cpu *ppriv;
4677 
4678 	ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4679 	napi_schedule_irqoff(&ppriv->napi);
4680 }
4681 
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv * priv)4682 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4683 {
4684 	struct device *dev = priv->dev;
4685 	struct dpaa2_io_notification_ctx *nctx;
4686 	struct dpaa2_caam_priv_per_cpu *ppriv;
4687 	int err, i = 0, cpu;
4688 
4689 	for_each_online_cpu(cpu) {
4690 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4691 		ppriv->priv = priv;
4692 		nctx = &ppriv->nctx;
4693 		nctx->is_cdan = 0;
4694 		nctx->id = ppriv->rsp_fqid;
4695 		nctx->desired_cpu = cpu;
4696 		nctx->cb = dpaa2_caam_fqdan_cb;
4697 
4698 		/* Register notification callbacks */
4699 		ppriv->dpio = dpaa2_io_service_select(cpu);
4700 		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4701 		if (unlikely(err)) {
4702 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4703 			nctx->cb = NULL;
4704 			/*
4705 			 * If no affine DPIO for this core, there's probably
4706 			 * none available for next cores either. Signal we want
4707 			 * to retry later, in case the DPIO devices weren't
4708 			 * probed yet.
4709 			 */
4710 			err = -EPROBE_DEFER;
4711 			goto err;
4712 		}
4713 
4714 		ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4715 						     dev);
4716 		if (unlikely(!ppriv->store)) {
4717 			dev_err(dev, "dpaa2_io_store_create() failed\n");
4718 			err = -ENOMEM;
4719 			goto err;
4720 		}
4721 
4722 		if (++i == priv->num_pairs)
4723 			break;
4724 	}
4725 
4726 	return 0;
4727 
4728 err:
4729 	for_each_online_cpu(cpu) {
4730 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4731 		if (!ppriv->nctx.cb)
4732 			break;
4733 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4734 	}
4735 
4736 	for_each_online_cpu(cpu) {
4737 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4738 		if (!ppriv->store)
4739 			break;
4740 		dpaa2_io_store_destroy(ppriv->store);
4741 	}
4742 
4743 	return err;
4744 }
4745 
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv * priv)4746 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4747 {
4748 	struct dpaa2_caam_priv_per_cpu *ppriv;
4749 	int i = 0, cpu;
4750 
4751 	for_each_online_cpu(cpu) {
4752 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4753 		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4754 					    priv->dev);
4755 		dpaa2_io_store_destroy(ppriv->store);
4756 
4757 		if (++i == priv->num_pairs)
4758 			return;
4759 	}
4760 }
4761 
dpaa2_dpseci_bind(struct dpaa2_caam_priv * priv)4762 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4763 {
4764 	struct dpseci_rx_queue_cfg rx_queue_cfg;
4765 	struct device *dev = priv->dev;
4766 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4767 	struct dpaa2_caam_priv_per_cpu *ppriv;
4768 	int err = 0, i = 0, cpu;
4769 
4770 	/* Configure Rx queues */
4771 	for_each_online_cpu(cpu) {
4772 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
4773 
4774 		rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4775 				       DPSECI_QUEUE_OPT_USER_CTX;
4776 		rx_queue_cfg.order_preservation_en = 0;
4777 		rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4778 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4779 		/*
4780 		 * Rx priority (WQ) doesn't really matter, since we use
4781 		 * pull mode, i.e. volatile dequeues from specific FQs
4782 		 */
4783 		rx_queue_cfg.dest_cfg.priority = 0;
4784 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4785 
4786 		err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4787 					  &rx_queue_cfg);
4788 		if (err) {
4789 			dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4790 				err);
4791 			return err;
4792 		}
4793 
4794 		if (++i == priv->num_pairs)
4795 			break;
4796 	}
4797 
4798 	return err;
4799 }
4800 
dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv * priv)4801 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4802 {
4803 	struct device *dev = priv->dev;
4804 
4805 	if (!priv->cscn_mem)
4806 		return;
4807 
4808 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4809 	kfree(priv->cscn_mem);
4810 }
4811 
dpaa2_dpseci_free(struct dpaa2_caam_priv * priv)4812 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4813 {
4814 	struct device *dev = priv->dev;
4815 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4816 	int err;
4817 
4818 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4819 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4820 		if (err)
4821 			dev_err(dev, "dpseci_reset() failed\n");
4822 	}
4823 
4824 	dpaa2_dpseci_congestion_free(priv);
4825 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4826 }
4827 
dpaa2_caam_process_fd(struct dpaa2_caam_priv * priv,const struct dpaa2_fd * fd)4828 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4829 				  const struct dpaa2_fd *fd)
4830 {
4831 	struct caam_request *req;
4832 	u32 fd_err;
4833 
4834 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4835 		dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4836 		return;
4837 	}
4838 
4839 	fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4840 	if (unlikely(fd_err))
4841 		dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4842 
4843 	/*
4844 	 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4845 	 * in FD[ERR] or FD[FRC].
4846 	 */
4847 	req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4848 	dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4849 			 DMA_BIDIRECTIONAL);
4850 	req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4851 }
4852 
dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu * ppriv)4853 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4854 {
4855 	int err;
4856 
4857 	/* Retry while portal is busy */
4858 	do {
4859 		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4860 					       ppriv->store);
4861 	} while (err == -EBUSY);
4862 
4863 	if (unlikely(err))
4864 		dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4865 
4866 	return err;
4867 }
4868 
dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu * ppriv)4869 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4870 {
4871 	struct dpaa2_dq *dq;
4872 	int cleaned = 0, is_last;
4873 
4874 	do {
4875 		dq = dpaa2_io_store_next(ppriv->store, &is_last);
4876 		if (unlikely(!dq)) {
4877 			if (unlikely(!is_last)) {
4878 				dev_dbg(ppriv->priv->dev,
4879 					"FQ %d returned no valid frames\n",
4880 					ppriv->rsp_fqid);
4881 				/*
4882 				 * MUST retry until we get some sort of
4883 				 * valid response token (be it "empty dequeue"
4884 				 * or a valid frame).
4885 				 */
4886 				continue;
4887 			}
4888 			break;
4889 		}
4890 
4891 		/* Process FD */
4892 		dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4893 		cleaned++;
4894 	} while (!is_last);
4895 
4896 	return cleaned;
4897 }
4898 
dpaa2_dpseci_poll(struct napi_struct * napi,int budget)4899 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4900 {
4901 	struct dpaa2_caam_priv_per_cpu *ppriv;
4902 	struct dpaa2_caam_priv *priv;
4903 	int err, cleaned = 0, store_cleaned;
4904 
4905 	ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4906 	priv = ppriv->priv;
4907 
4908 	if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4909 		return 0;
4910 
4911 	do {
4912 		store_cleaned = dpaa2_caam_store_consume(ppriv);
4913 		cleaned += store_cleaned;
4914 
4915 		if (store_cleaned == 0 ||
4916 		    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4917 			break;
4918 
4919 		/* Try to dequeue some more */
4920 		err = dpaa2_caam_pull_fq(ppriv);
4921 		if (unlikely(err))
4922 			break;
4923 	} while (1);
4924 
4925 	if (cleaned < budget) {
4926 		napi_complete_done(napi, cleaned);
4927 		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4928 		if (unlikely(err))
4929 			dev_err(priv->dev, "Notification rearm failed: %d\n",
4930 				err);
4931 	}
4932 
4933 	return cleaned;
4934 }
4935 
dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv * priv,u16 token)4936 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4937 					 u16 token)
4938 {
4939 	struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4940 	struct device *dev = priv->dev;
4941 	unsigned int alignmask;
4942 	int err;
4943 
4944 	/*
4945 	 * Congestion group feature supported starting with DPSECI API v5.1
4946 	 * and only when object has been created with this capability.
4947 	 */
4948 	if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4949 	    !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4950 		return 0;
4951 
4952 	alignmask = DPAA2_CSCN_ALIGN - 1;
4953 	alignmask |= dma_get_cache_alignment() - 1;
4954 	priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
4955 				 GFP_KERNEL);
4956 	if (!priv->cscn_mem)
4957 		return -ENOMEM;
4958 
4959 	priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
4960 					DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4961 	if (dma_mapping_error(dev, priv->cscn_dma)) {
4962 		dev_err(dev, "Error mapping CSCN memory area\n");
4963 		err = -ENOMEM;
4964 		goto err_dma_map;
4965 	}
4966 
4967 	cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4968 	cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4969 	cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4970 	cong_notif_cfg.message_ctx = (uintptr_t)priv;
4971 	cong_notif_cfg.message_iova = priv->cscn_dma;
4972 	cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4973 					DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4974 					DPSECI_CGN_MODE_COHERENT_WRITE;
4975 
4976 	err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4977 						 &cong_notif_cfg);
4978 	if (err) {
4979 		dev_err(dev, "dpseci_set_congestion_notification failed\n");
4980 		goto err_set_cong;
4981 	}
4982 
4983 	return 0;
4984 
4985 err_set_cong:
4986 	dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4987 err_dma_map:
4988 	kfree(priv->cscn_mem);
4989 
4990 	return err;
4991 }
4992 
free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv * priv,const cpumask_t * cpus)4993 static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
4994 {
4995 	struct dpaa2_caam_priv_per_cpu *ppriv;
4996 	int i;
4997 
4998 	for_each_cpu(i, cpus) {
4999 		ppriv = per_cpu_ptr(priv->ppriv, i);
5000 		free_netdev(ppriv->net_dev);
5001 	}
5002 }
5003 
dpaa2_dpseci_setup(struct fsl_mc_device * ls_dev)5004 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
5005 {
5006 	struct device *dev = &ls_dev->dev;
5007 	struct dpaa2_caam_priv *priv;
5008 	struct dpaa2_caam_priv_per_cpu *ppriv;
5009 	cpumask_var_t clean_mask;
5010 	int err, cpu;
5011 	u8 i;
5012 
5013 	err = -ENOMEM;
5014 	if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
5015 		goto err_cpumask;
5016 
5017 	priv = dev_get_drvdata(dev);
5018 
5019 	priv->dev = dev;
5020 	priv->dpsec_id = ls_dev->obj_desc.id;
5021 
5022 	/* Get a handle for the DPSECI this interface is associate with */
5023 	err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
5024 	if (err) {
5025 		dev_err(dev, "dpseci_open() failed: %d\n", err);
5026 		goto err_open;
5027 	}
5028 
5029 	err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
5030 				     &priv->minor_ver);
5031 	if (err) {
5032 		dev_err(dev, "dpseci_get_api_version() failed\n");
5033 		goto err_get_vers;
5034 	}
5035 
5036 	dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5037 
5038 	if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5039 		err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5040 		if (err) {
5041 			dev_err(dev, "dpseci_reset() failed\n");
5042 			goto err_get_vers;
5043 		}
5044 	}
5045 
5046 	err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5047 				    &priv->dpseci_attr);
5048 	if (err) {
5049 		dev_err(dev, "dpseci_get_attributes() failed\n");
5050 		goto err_get_vers;
5051 	}
5052 
5053 	err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5054 				  &priv->sec_attr);
5055 	if (err) {
5056 		dev_err(dev, "dpseci_get_sec_attr() failed\n");
5057 		goto err_get_vers;
5058 	}
5059 
5060 	err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5061 	if (err) {
5062 		dev_err(dev, "setup_congestion() failed\n");
5063 		goto err_get_vers;
5064 	}
5065 
5066 	priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5067 			      priv->dpseci_attr.num_tx_queues);
5068 	if (priv->num_pairs > num_online_cpus()) {
5069 		dev_warn(dev, "%d queues won't be used\n",
5070 			 priv->num_pairs - num_online_cpus());
5071 		priv->num_pairs = num_online_cpus();
5072 	}
5073 
5074 	for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5075 		err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5076 					  &priv->rx_queue_attr[i]);
5077 		if (err) {
5078 			dev_err(dev, "dpseci_get_rx_queue() failed\n");
5079 			goto err_get_rx_queue;
5080 		}
5081 	}
5082 
5083 	for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5084 		err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5085 					  &priv->tx_queue_attr[i]);
5086 		if (err) {
5087 			dev_err(dev, "dpseci_get_tx_queue() failed\n");
5088 			goto err_get_rx_queue;
5089 		}
5090 	}
5091 
5092 	i = 0;
5093 	for_each_online_cpu(cpu) {
5094 		u8 j;
5095 
5096 		j = i % priv->num_pairs;
5097 
5098 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
5099 		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5100 
5101 		/*
5102 		 * Allow all cores to enqueue, while only some of them
5103 		 * will take part in dequeuing.
5104 		 */
5105 		if (++i > priv->num_pairs)
5106 			continue;
5107 
5108 		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5109 		ppriv->prio = j;
5110 
5111 		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5112 			priv->rx_queue_attr[j].fqid,
5113 			priv->tx_queue_attr[j].fqid);
5114 
5115 		ppriv->net_dev = alloc_netdev_dummy(0);
5116 		if (!ppriv->net_dev) {
5117 			err = -ENOMEM;
5118 			goto err_alloc_netdev;
5119 		}
5120 		cpumask_set_cpu(cpu, clean_mask);
5121 		ppriv->net_dev->dev = *dev;
5122 
5123 		netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
5124 					 dpaa2_dpseci_poll,
5125 					 DPAA2_CAAM_NAPI_WEIGHT);
5126 	}
5127 
5128 	err = 0;
5129 	goto free_cpumask;
5130 
5131 err_alloc_netdev:
5132 	free_dpaa2_pcpu_netdev(priv, clean_mask);
5133 err_get_rx_queue:
5134 	dpaa2_dpseci_congestion_free(priv);
5135 err_get_vers:
5136 	dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5137 err_open:
5138 free_cpumask:
5139 	free_cpumask_var(clean_mask);
5140 err_cpumask:
5141 	return err;
5142 }
5143 
dpaa2_dpseci_enable(struct dpaa2_caam_priv * priv)5144 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5145 {
5146 	struct device *dev = priv->dev;
5147 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5148 	struct dpaa2_caam_priv_per_cpu *ppriv;
5149 	int i;
5150 
5151 	for (i = 0; i < priv->num_pairs; i++) {
5152 		ppriv = per_cpu_ptr(priv->ppriv, i);
5153 		napi_enable(&ppriv->napi);
5154 	}
5155 
5156 	return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5157 }
5158 
dpaa2_dpseci_disable(struct dpaa2_caam_priv * priv)5159 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5160 {
5161 	struct device *dev = priv->dev;
5162 	struct dpaa2_caam_priv_per_cpu *ppriv;
5163 	struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5164 	int i, err = 0, enabled;
5165 
5166 	err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5167 	if (err) {
5168 		dev_err(dev, "dpseci_disable() failed\n");
5169 		return err;
5170 	}
5171 
5172 	err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5173 	if (err) {
5174 		dev_err(dev, "dpseci_is_enabled() failed\n");
5175 		return err;
5176 	}
5177 
5178 	dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5179 
5180 	for (i = 0; i < priv->num_pairs; i++) {
5181 		ppriv = per_cpu_ptr(priv->ppriv, i);
5182 		napi_disable(&ppriv->napi);
5183 		netif_napi_del(&ppriv->napi);
5184 		free_netdev(ppriv->net_dev);
5185 	}
5186 
5187 	return 0;
5188 }
5189 
5190 static struct list_head hash_list;
5191 
dpaa2_caam_probe(struct fsl_mc_device * dpseci_dev)5192 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5193 {
5194 	struct device *dev;
5195 	struct dpaa2_caam_priv *priv;
5196 	int i, err = 0;
5197 	bool registered = false;
5198 
5199 	/*
5200 	 * There is no way to get CAAM endianness - there is no direct register
5201 	 * space access and MC f/w does not provide this attribute.
5202 	 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5203 	 * property.
5204 	 */
5205 	caam_little_end = true;
5206 
5207 	caam_imx = false;
5208 
5209 	dev = &dpseci_dev->dev;
5210 
5211 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5212 	if (!priv)
5213 		return -ENOMEM;
5214 
5215 	dev_set_drvdata(dev, priv);
5216 
5217 	priv->domain = iommu_get_domain_for_dev(dev);
5218 
5219 	qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5220 				     0, 0, NULL);
5221 	if (!qi_cache) {
5222 		dev_err(dev, "Can't allocate SEC cache\n");
5223 		return -ENOMEM;
5224 	}
5225 
5226 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5227 	if (err) {
5228 		dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5229 		goto err_dma_mask;
5230 	}
5231 
5232 	/* Obtain a MC portal */
5233 	err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5234 	if (err) {
5235 		if (err == -ENXIO)
5236 			err = -EPROBE_DEFER;
5237 		else
5238 			dev_err(dev, "MC portal allocation failed\n");
5239 
5240 		goto err_dma_mask;
5241 	}
5242 
5243 	priv->ppriv = alloc_percpu(*priv->ppriv);
5244 	if (!priv->ppriv) {
5245 		dev_err(dev, "alloc_percpu() failed\n");
5246 		err = -ENOMEM;
5247 		goto err_alloc_ppriv;
5248 	}
5249 
5250 	/* DPSECI initialization */
5251 	err = dpaa2_dpseci_setup(dpseci_dev);
5252 	if (err) {
5253 		dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5254 		goto err_dpseci_setup;
5255 	}
5256 
5257 	/* DPIO */
5258 	err = dpaa2_dpseci_dpio_setup(priv);
5259 	if (err) {
5260 		dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5261 		goto err_dpio_setup;
5262 	}
5263 
5264 	/* DPSECI binding to DPIO */
5265 	err = dpaa2_dpseci_bind(priv);
5266 	if (err) {
5267 		dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5268 		goto err_bind;
5269 	}
5270 
5271 	/* DPSECI enable */
5272 	err = dpaa2_dpseci_enable(priv);
5273 	if (err) {
5274 		dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5275 		goto err_bind;
5276 	}
5277 
5278 	dpaa2_dpseci_debugfs_init(priv);
5279 
5280 	/* register crypto algorithms the device supports */
5281 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5282 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5283 		u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5284 
5285 		/* Skip DES algorithms if not supported by device */
5286 		if (!priv->sec_attr.des_acc_num &&
5287 		    (alg_sel == OP_ALG_ALGSEL_3DES ||
5288 		     alg_sel == OP_ALG_ALGSEL_DES))
5289 			continue;
5290 
5291 		/* Skip AES algorithms if not supported by device */
5292 		if (!priv->sec_attr.aes_acc_num &&
5293 		    alg_sel == OP_ALG_ALGSEL_AES)
5294 			continue;
5295 
5296 		/* Skip CHACHA20 algorithms if not supported by device */
5297 		if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5298 		    !priv->sec_attr.ccha_acc_num)
5299 			continue;
5300 
5301 		t_alg->caam.dev = dev;
5302 		caam_skcipher_alg_init(t_alg);
5303 
5304 		err = crypto_register_skcipher(&t_alg->skcipher);
5305 		if (err) {
5306 			dev_warn(dev, "%s alg registration failed: %d\n",
5307 				 t_alg->skcipher.base.cra_driver_name, err);
5308 			continue;
5309 		}
5310 
5311 		t_alg->registered = true;
5312 		registered = true;
5313 	}
5314 
5315 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5316 		struct caam_aead_alg *t_alg = driver_aeads + i;
5317 		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5318 				 OP_ALG_ALGSEL_MASK;
5319 		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5320 				 OP_ALG_ALGSEL_MASK;
5321 
5322 		/* Skip DES algorithms if not supported by device */
5323 		if (!priv->sec_attr.des_acc_num &&
5324 		    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5325 		     c1_alg_sel == OP_ALG_ALGSEL_DES))
5326 			continue;
5327 
5328 		/* Skip AES algorithms if not supported by device */
5329 		if (!priv->sec_attr.aes_acc_num &&
5330 		    c1_alg_sel == OP_ALG_ALGSEL_AES)
5331 			continue;
5332 
5333 		/* Skip CHACHA20 algorithms if not supported by device */
5334 		if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5335 		    !priv->sec_attr.ccha_acc_num)
5336 			continue;
5337 
5338 		/* Skip POLY1305 algorithms if not supported by device */
5339 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5340 		    !priv->sec_attr.ptha_acc_num)
5341 			continue;
5342 
5343 		/*
5344 		 * Skip algorithms requiring message digests
5345 		 * if MD not supported by device.
5346 		 */
5347 		if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5348 		    !priv->sec_attr.md_acc_num)
5349 			continue;
5350 
5351 		t_alg->caam.dev = dev;
5352 		caam_aead_alg_init(t_alg);
5353 
5354 		err = crypto_register_aead(&t_alg->aead);
5355 		if (err) {
5356 			dev_warn(dev, "%s alg registration failed: %d\n",
5357 				 t_alg->aead.base.cra_driver_name, err);
5358 			continue;
5359 		}
5360 
5361 		t_alg->registered = true;
5362 		registered = true;
5363 	}
5364 	if (registered)
5365 		dev_info(dev, "algorithms registered in /proc/crypto\n");
5366 
5367 	/* register hash algorithms the device supports */
5368 	INIT_LIST_HEAD(&hash_list);
5369 
5370 	/*
5371 	 * Skip registration of any hashing algorithms if MD block
5372 	 * is not present.
5373 	 */
5374 	if (!priv->sec_attr.md_acc_num)
5375 		return 0;
5376 
5377 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5378 		struct caam_hash_alg *t_alg;
5379 		struct caam_hash_template *alg = driver_hash + i;
5380 
5381 		/* register hmac version */
5382 		t_alg = caam_hash_alloc(dev, alg, true);
5383 		if (IS_ERR(t_alg)) {
5384 			err = PTR_ERR(t_alg);
5385 			dev_warn(dev, "%s hash alg allocation failed: %d\n",
5386 				 alg->hmac_driver_name, err);
5387 			continue;
5388 		}
5389 
5390 		err = crypto_register_ahash(&t_alg->ahash_alg);
5391 		if (err) {
5392 			dev_warn(dev, "%s alg registration failed: %d\n",
5393 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5394 				 err);
5395 			kfree(t_alg);
5396 		} else {
5397 			list_add_tail(&t_alg->entry, &hash_list);
5398 		}
5399 
5400 		/* register unkeyed version */
5401 		t_alg = caam_hash_alloc(dev, alg, false);
5402 		if (IS_ERR(t_alg)) {
5403 			err = PTR_ERR(t_alg);
5404 			dev_warn(dev, "%s alg allocation failed: %d\n",
5405 				 alg->driver_name, err);
5406 			continue;
5407 		}
5408 
5409 		err = crypto_register_ahash(&t_alg->ahash_alg);
5410 		if (err) {
5411 			dev_warn(dev, "%s alg registration failed: %d\n",
5412 				 t_alg->ahash_alg.halg.base.cra_driver_name,
5413 				 err);
5414 			kfree(t_alg);
5415 		} else {
5416 			list_add_tail(&t_alg->entry, &hash_list);
5417 		}
5418 	}
5419 	if (!list_empty(&hash_list))
5420 		dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5421 
5422 	return err;
5423 
5424 err_bind:
5425 	dpaa2_dpseci_dpio_free(priv);
5426 err_dpio_setup:
5427 	dpaa2_dpseci_free(priv);
5428 err_dpseci_setup:
5429 	free_percpu(priv->ppriv);
5430 err_alloc_ppriv:
5431 	fsl_mc_portal_free(priv->mc_io);
5432 err_dma_mask:
5433 	kmem_cache_destroy(qi_cache);
5434 
5435 	return err;
5436 }
5437 
dpaa2_caam_remove(struct fsl_mc_device * ls_dev)5438 static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5439 {
5440 	struct device *dev;
5441 	struct dpaa2_caam_priv *priv;
5442 	int i;
5443 
5444 	dev = &ls_dev->dev;
5445 	priv = dev_get_drvdata(dev);
5446 
5447 	dpaa2_dpseci_debugfs_exit(priv);
5448 
5449 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5450 		struct caam_aead_alg *t_alg = driver_aeads + i;
5451 
5452 		if (t_alg->registered)
5453 			crypto_unregister_aead(&t_alg->aead);
5454 	}
5455 
5456 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5457 		struct caam_skcipher_alg *t_alg = driver_algs + i;
5458 
5459 		if (t_alg->registered)
5460 			crypto_unregister_skcipher(&t_alg->skcipher);
5461 	}
5462 
5463 	if (hash_list.next) {
5464 		struct caam_hash_alg *t_hash_alg, *p;
5465 
5466 		list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5467 			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5468 			list_del(&t_hash_alg->entry);
5469 			kfree(t_hash_alg);
5470 		}
5471 	}
5472 
5473 	dpaa2_dpseci_disable(priv);
5474 	dpaa2_dpseci_dpio_free(priv);
5475 	dpaa2_dpseci_free(priv);
5476 	free_percpu(priv->ppriv);
5477 	fsl_mc_portal_free(priv->mc_io);
5478 	kmem_cache_destroy(qi_cache);
5479 }
5480 
dpaa2_caam_enqueue(struct device * dev,struct caam_request * req)5481 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5482 {
5483 	struct dpaa2_fd fd;
5484 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5485 	struct dpaa2_caam_priv_per_cpu *ppriv;
5486 	int err = 0, i;
5487 
5488 	if (IS_ERR(req))
5489 		return PTR_ERR(req);
5490 
5491 	if (priv->cscn_mem) {
5492 		dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5493 					DPAA2_CSCN_SIZE,
5494 					DMA_FROM_DEVICE);
5495 		if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
5496 			dev_dbg_ratelimited(dev, "Dropping request\n");
5497 			return -EBUSY;
5498 		}
5499 	}
5500 
5501 	dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5502 
5503 	req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5504 					 DMA_BIDIRECTIONAL);
5505 	if (dma_mapping_error(dev, req->fd_flt_dma)) {
5506 		dev_err(dev, "DMA mapping error for QI enqueue request\n");
5507 		goto err_out;
5508 	}
5509 
5510 	memset(&fd, 0, sizeof(fd));
5511 	dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5512 	dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5513 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5514 	dpaa2_fd_set_flc(&fd, req->flc_dma);
5515 
5516 	ppriv = raw_cpu_ptr(priv->ppriv);
5517 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5518 		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5519 						  &fd);
5520 		if (err != -EBUSY)
5521 			break;
5522 
5523 		cpu_relax();
5524 	}
5525 
5526 	if (unlikely(err)) {
5527 		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5528 		goto err_out;
5529 	}
5530 
5531 	return -EINPROGRESS;
5532 
5533 err_out:
5534 	dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5535 			 DMA_BIDIRECTIONAL);
5536 	return -EIO;
5537 }
5538 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5539 
5540 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5541 	{
5542 		.vendor = FSL_MC_VENDOR_FREESCALE,
5543 		.obj_type = "dpseci",
5544 	},
5545 	{ .vendor = 0x0 }
5546 };
5547 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5548 
5549 static struct fsl_mc_driver dpaa2_caam_driver = {
5550 	.driver = {
5551 		.name		= KBUILD_MODNAME,
5552 		.owner		= THIS_MODULE,
5553 	},
5554 	.probe		= dpaa2_caam_probe,
5555 	.remove		= dpaa2_caam_remove,
5556 	.match_id_table = dpaa2_caam_match_id_table
5557 };
5558 
5559 MODULE_LICENSE("Dual BSD/GPL");
5560 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5561 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5562 
5563 module_fsl_mc_driver(dpaa2_caam_driver);
5564