xref: /linux/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/authenc.h>
6 #include <crypto/cryptd.h>
7 #include <crypto/des.h>
8 #include <crypto/internal/aead.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/xts.h>
12 #include <crypto/gcm.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/sort.h>
16 #include <linux/module.h>
17 #include "otx2_cptvf.h"
18 #include "otx2_cptvf_algs.h"
19 #include "otx2_cpt_reqmgr.h"
20 
21 /* Size of salt in AES GCM mode */
22 #define AES_GCM_SALT_SIZE 4
23 /* Size of IV in AES GCM mode */
24 #define AES_GCM_IV_SIZE 8
25 /* Size of ICV (Integrity Check Value) in AES GCM mode */
26 #define AES_GCM_ICV_SIZE 16
27 /* Offset of IV in AES GCM mode */
28 #define AES_GCM_IV_OFFSET 8
29 #define CONTROL_WORD_LEN 8
30 #define KEY2_OFFSET 48
31 #define DMA_MODE_FLAG(dma_mode) \
32 	(((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
33 
34 /* Truncated SHA digest size */
35 #define SHA1_TRUNC_DIGEST_SIZE 12
36 #define SHA256_TRUNC_DIGEST_SIZE 16
37 #define SHA384_TRUNC_DIGEST_SIZE 24
38 #define SHA512_TRUNC_DIGEST_SIZE 32
39 
40 static DEFINE_MUTEX(mutex);
41 static int is_crypto_registered;
42 
43 struct cpt_device_desc {
44 	struct pci_dev *dev;
45 	int num_queues;
46 };
47 
48 struct cpt_device_table {
49 	atomic_t count;
50 	struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
51 };
52 
53 static struct cpt_device_table se_devices = {
54 	.count = ATOMIC_INIT(0)
55 };
56 
57 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
58 {
59 	int count;
60 
61 	count = atomic_read(&se_devices.count);
62 	if (count < 1)
63 		return -ENODEV;
64 
65 	*cpu_num = get_cpu();
66 	/*
67 	 * On OcteonTX2 platform CPT instruction queue is bound to each
68 	 * local function LF, in turn LFs can be attached to PF
69 	 * or VF therefore we always use first device. We get maximum
70 	 * performance if one CPT queue is available for each cpu
71 	 * otherwise CPT queues need to be shared between cpus.
72 	 */
73 	if (*cpu_num >= se_devices.desc[0].num_queues)
74 		*cpu_num %= se_devices.desc[0].num_queues;
75 	*pdev = se_devices.desc[0].dev;
76 
77 	put_cpu();
78 
79 	return 0;
80 }
81 
82 static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
83 {
84 	struct otx2_cpt_req_ctx *rctx;
85 	struct aead_request *req;
86 	struct crypto_aead *tfm;
87 
88 	req = container_of(cpt_req->areq, struct aead_request, base);
89 	tfm = crypto_aead_reqtfm(req);
90 	rctx = aead_request_ctx(req);
91 	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
92 		   rctx->fctx.hmac.s.hmac_recv,
93 		   crypto_aead_authsize(tfm)) != 0)
94 		return -EBADMSG;
95 
96 	return 0;
97 }
98 
99 static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
100 {
101 	struct otx2_cpt_inst_info *inst_info = arg2;
102 	struct crypto_async_request *areq = arg1;
103 	struct otx2_cpt_req_info *cpt_req;
104 	struct pci_dev *pdev;
105 
106 	if (inst_info) {
107 		cpt_req = inst_info->req;
108 		if (!status) {
109 			/*
110 			 * When selected cipher is NULL we need to manually
111 			 * verify whether calculated hmac value matches
112 			 * received hmac value
113 			 */
114 			if (cpt_req->req_type ==
115 			    OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
116 			    !cpt_req->is_enc)
117 				status = validate_hmac_cipher_null(cpt_req);
118 		}
119 		pdev = inst_info->pdev;
120 		otx2_cpt_info_destroy(pdev, inst_info);
121 	}
122 	if (areq)
123 		areq->complete(areq, status);
124 }
125 
126 static void output_iv_copyback(struct crypto_async_request *areq)
127 {
128 	struct otx2_cpt_req_info *req_info;
129 	struct otx2_cpt_req_ctx *rctx;
130 	struct skcipher_request *sreq;
131 	struct crypto_skcipher *stfm;
132 	struct otx2_cpt_enc_ctx *ctx;
133 	u32 start, ivsize;
134 
135 	sreq = container_of(areq, struct skcipher_request, base);
136 	stfm = crypto_skcipher_reqtfm(sreq);
137 	ctx = crypto_skcipher_ctx(stfm);
138 	if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
139 	    ctx->cipher_type == OTX2_CPT_DES3_CBC) {
140 		rctx = skcipher_request_ctx(sreq);
141 		req_info = &rctx->cpt_req;
142 		ivsize = crypto_skcipher_ivsize(stfm);
143 		start = sreq->cryptlen - ivsize;
144 
145 		if (req_info->is_enc) {
146 			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
147 						 ivsize, 0);
148 		} else {
149 			if (sreq->src != sreq->dst) {
150 				scatterwalk_map_and_copy(sreq->iv, sreq->src,
151 							 start, ivsize, 0);
152 			} else {
153 				memcpy(sreq->iv, req_info->iv_out, ivsize);
154 				kfree(req_info->iv_out);
155 			}
156 		}
157 	}
158 }
159 
160 static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
161 {
162 	struct otx2_cpt_inst_info *inst_info = arg2;
163 	struct crypto_async_request *areq = arg1;
164 	struct pci_dev *pdev;
165 
166 	if (areq) {
167 		if (!status)
168 			output_iv_copyback(areq);
169 		if (inst_info) {
170 			pdev = inst_info->pdev;
171 			otx2_cpt_info_destroy(pdev, inst_info);
172 		}
173 		areq->complete(areq, status);
174 	}
175 }
176 
177 static inline void update_input_data(struct otx2_cpt_req_info *req_info,
178 				     struct scatterlist *inp_sg,
179 				     u32 nbytes, u32 *argcnt)
180 {
181 	req_info->req.dlen += nbytes;
182 
183 	while (nbytes) {
184 		u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
185 		u8 *ptr = sg_virt(inp_sg);
186 
187 		req_info->in[*argcnt].vptr = (void *)ptr;
188 		req_info->in[*argcnt].size = len;
189 		nbytes -= len;
190 		++(*argcnt);
191 		inp_sg = sg_next(inp_sg);
192 	}
193 }
194 
195 static inline void update_output_data(struct otx2_cpt_req_info *req_info,
196 				      struct scatterlist *outp_sg,
197 				      u32 offset, u32 nbytes, u32 *argcnt)
198 {
199 	u32 len, sg_len;
200 	u8 *ptr;
201 
202 	req_info->rlen += nbytes;
203 
204 	while (nbytes) {
205 		sg_len = outp_sg->length - offset;
206 		len = (nbytes < sg_len) ? nbytes : sg_len;
207 		ptr = sg_virt(outp_sg);
208 
209 		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
210 		req_info->out[*argcnt].size = len;
211 		nbytes -= len;
212 		++(*argcnt);
213 		offset = 0;
214 		outp_sg = sg_next(outp_sg);
215 	}
216 }
217 
218 static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
219 				 u32 *argcnt)
220 {
221 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
222 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
223 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
224 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
225 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
226 	int ivsize = crypto_skcipher_ivsize(stfm);
227 	u32 start = req->cryptlen - ivsize;
228 	gfp_t flags;
229 
230 	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
231 			GFP_KERNEL : GFP_ATOMIC;
232 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
233 	req_info->ctrl.s.se_req = 1;
234 
235 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
236 				DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
237 	if (enc) {
238 		req_info->req.opcode.s.minor = 2;
239 	} else {
240 		req_info->req.opcode.s.minor = 3;
241 		if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
242 		    ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
243 		    req->src == req->dst) {
244 			req_info->iv_out = kmalloc(ivsize, flags);
245 			if (!req_info->iv_out)
246 				return -ENOMEM;
247 
248 			scatterwalk_map_and_copy(req_info->iv_out, req->src,
249 						 start, ivsize, 0);
250 		}
251 	}
252 	/* Encryption data length */
253 	req_info->req.param1 = req->cryptlen;
254 	/* Authentication data length */
255 	req_info->req.param2 = 0;
256 
257 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
258 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
259 	fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
260 
261 	if (ctx->cipher_type == OTX2_CPT_AES_XTS)
262 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
263 	else
264 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
265 
266 	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
267 
268 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
269 
270 	/*
271 	 * Storing  Packet Data Information in offset
272 	 * Control Word First 8 bytes
273 	 */
274 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
275 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
276 	req_info->req.dlen += CONTROL_WORD_LEN;
277 	++(*argcnt);
278 
279 	req_info->in[*argcnt].vptr = (u8 *)fctx;
280 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
281 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
282 
283 	++(*argcnt);
284 
285 	return 0;
286 }
287 
288 static inline int create_input_list(struct skcipher_request *req, u32 enc,
289 				    u32 enc_iv_len)
290 {
291 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
292 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
293 	u32 argcnt =  0;
294 	int ret;
295 
296 	ret = create_ctx_hdr(req, enc, &argcnt);
297 	if (ret)
298 		return ret;
299 
300 	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
301 	req_info->in_cnt = argcnt;
302 
303 	return 0;
304 }
305 
306 static inline void create_output_list(struct skcipher_request *req,
307 				      u32 enc_iv_len)
308 {
309 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
310 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
311 	u32 argcnt = 0;
312 
313 	/*
314 	 * OUTPUT Buffer Processing
315 	 * AES encryption/decryption output would be
316 	 * received in the following format
317 	 *
318 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
319 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
320 	 */
321 	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
322 	req_info->out_cnt = argcnt;
323 }
324 
325 static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
326 {
327 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
328 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
329 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
330 	int ret;
331 
332 	if (ctx->fbk_cipher) {
333 		skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
334 		skcipher_request_set_callback(&rctx->sk_fbk_req,
335 					      req->base.flags,
336 					      req->base.complete,
337 					      req->base.data);
338 		skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
339 					   req->dst, req->cryptlen, req->iv);
340 		ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
341 			       crypto_skcipher_decrypt(&rctx->sk_fbk_req);
342 	} else {
343 		ret = -EINVAL;
344 	}
345 	return ret;
346 }
347 
348 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
349 {
350 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
351 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
352 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
353 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
354 	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
355 	struct pci_dev *pdev;
356 	int status, cpu_num;
357 
358 	if (req->cryptlen == 0)
359 		return 0;
360 
361 	if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
362 		return -EINVAL;
363 
364 	if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
365 		return skcipher_do_fallback(req, enc);
366 
367 	/* Clear control words */
368 	rctx->ctrl_word.flags = 0;
369 	rctx->fctx.enc.enc_ctrl.u = 0;
370 
371 	status = create_input_list(req, enc, enc_iv_len);
372 	if (status)
373 		return status;
374 	create_output_list(req, enc_iv_len);
375 
376 	status = get_se_device(&pdev, &cpu_num);
377 	if (status)
378 		return status;
379 
380 	req_info->callback = otx2_cpt_skcipher_callback;
381 	req_info->areq = &req->base;
382 	req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
383 	req_info->is_enc = enc;
384 	req_info->is_trunc_hmac = false;
385 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
386 
387 	/*
388 	 * We perform an asynchronous send and once
389 	 * the request is completed the driver would
390 	 * intimate through registered call back functions
391 	 */
392 	status = otx2_cpt_do_request(pdev, req_info, cpu_num);
393 
394 	return status;
395 }
396 
397 static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
398 {
399 	return cpt_enc_dec(req, true);
400 }
401 
402 static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
403 {
404 	return cpt_enc_dec(req, false);
405 }
406 
407 static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
408 				       const u8 *key, u32 keylen)
409 {
410 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
411 	const u8 *key2 = key + (keylen / 2);
412 	const u8 *key1 = key;
413 	int ret;
414 
415 	ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
416 	if (ret)
417 		return ret;
418 	ctx->key_len = keylen;
419 	ctx->enc_align_len = 1;
420 	memcpy(ctx->enc_key, key1, keylen / 2);
421 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
422 	ctx->cipher_type = OTX2_CPT_AES_XTS;
423 	switch (ctx->key_len) {
424 	case 2 * AES_KEYSIZE_128:
425 		ctx->key_type = OTX2_CPT_AES_128_BIT;
426 		break;
427 	case 2 * AES_KEYSIZE_192:
428 		ctx->key_type = OTX2_CPT_AES_192_BIT;
429 		break;
430 	case 2 * AES_KEYSIZE_256:
431 		ctx->key_type = OTX2_CPT_AES_256_BIT;
432 		break;
433 	default:
434 		return -EINVAL;
435 	}
436 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
437 }
438 
439 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
440 			  u32 keylen, u8 cipher_type)
441 {
442 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
443 
444 	if (keylen != DES3_EDE_KEY_SIZE)
445 		return -EINVAL;
446 
447 	ctx->key_len = keylen;
448 	ctx->cipher_type = cipher_type;
449 	ctx->enc_align_len = 8;
450 
451 	memcpy(ctx->enc_key, key, keylen);
452 
453 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
454 }
455 
456 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
457 			  u32 keylen, u8 cipher_type)
458 {
459 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
460 
461 	switch (keylen) {
462 	case AES_KEYSIZE_128:
463 		ctx->key_type = OTX2_CPT_AES_128_BIT;
464 		break;
465 	case AES_KEYSIZE_192:
466 		ctx->key_type = OTX2_CPT_AES_192_BIT;
467 		break;
468 	case AES_KEYSIZE_256:
469 		ctx->key_type = OTX2_CPT_AES_256_BIT;
470 		break;
471 	default:
472 		return -EINVAL;
473 	}
474 	if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
475 		ctx->enc_align_len = 16;
476 	else
477 		ctx->enc_align_len = 1;
478 
479 	ctx->key_len = keylen;
480 	ctx->cipher_type = cipher_type;
481 
482 	memcpy(ctx->enc_key, key, keylen);
483 
484 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
485 }
486 
487 static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
488 					    const u8 *key, u32 keylen)
489 {
490 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
491 }
492 
493 static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
494 					    const u8 *key, u32 keylen)
495 {
496 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
497 }
498 
499 static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
500 					     const u8 *key, u32 keylen)
501 {
502 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
503 }
504 
505 static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
506 					     const u8 *key, u32 keylen)
507 {
508 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
509 }
510 
511 static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
512 				      struct crypto_alg *alg)
513 {
514 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
515 		ctx->fbk_cipher =
516 				crypto_alloc_skcipher(alg->cra_name, 0,
517 						      CRYPTO_ALG_ASYNC |
518 						      CRYPTO_ALG_NEED_FALLBACK);
519 		if (IS_ERR(ctx->fbk_cipher)) {
520 			pr_err("%s() failed to allocate fallback for %s\n",
521 				__func__, alg->cra_name);
522 			return PTR_ERR(ctx->fbk_cipher);
523 		}
524 	}
525 	return 0;
526 }
527 
528 static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
529 {
530 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
531 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
532 	struct crypto_alg *alg = tfm->__crt_alg;
533 
534 	memset(ctx, 0, sizeof(*ctx));
535 	/*
536 	 * Additional memory for skcipher_request is
537 	 * allocated since the cryptd daemon uses
538 	 * this memory for request_ctx information
539 	 */
540 	crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) +
541 					sizeof(struct skcipher_request));
542 
543 	return cpt_skcipher_fallback_init(ctx, alg);
544 }
545 
546 static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
547 {
548 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
549 
550 	if (ctx->fbk_cipher) {
551 		crypto_free_skcipher(ctx->fbk_cipher);
552 		ctx->fbk_cipher = NULL;
553 	}
554 }
555 
556 static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
557 				  struct crypto_alg *alg)
558 {
559 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
560 		ctx->fbk_cipher =
561 			    crypto_alloc_aead(alg->cra_name, 0,
562 					      CRYPTO_ALG_ASYNC |
563 					      CRYPTO_ALG_NEED_FALLBACK);
564 		if (IS_ERR(ctx->fbk_cipher)) {
565 			pr_err("%s() failed to allocate fallback for %s\n",
566 				__func__, alg->cra_name);
567 			return PTR_ERR(ctx->fbk_cipher);
568 		}
569 	}
570 	return 0;
571 }
572 
573 static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
574 {
575 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm);
576 	struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
577 	struct crypto_alg *alg = tfm->__crt_alg;
578 
579 	ctx->cipher_type = cipher_type;
580 	ctx->mac_type = mac_type;
581 
582 	/*
583 	 * When selected cipher is NULL we use HMAC opcode instead of
584 	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
585 	 * for calculating ipad and opad
586 	 */
587 	if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
588 		switch (ctx->mac_type) {
589 		case OTX2_CPT_SHA1:
590 			ctx->hashalg = crypto_alloc_shash("sha1", 0,
591 							  CRYPTO_ALG_ASYNC);
592 			if (IS_ERR(ctx->hashalg))
593 				return PTR_ERR(ctx->hashalg);
594 			break;
595 
596 		case OTX2_CPT_SHA256:
597 			ctx->hashalg = crypto_alloc_shash("sha256", 0,
598 							  CRYPTO_ALG_ASYNC);
599 			if (IS_ERR(ctx->hashalg))
600 				return PTR_ERR(ctx->hashalg);
601 			break;
602 
603 		case OTX2_CPT_SHA384:
604 			ctx->hashalg = crypto_alloc_shash("sha384", 0,
605 							  CRYPTO_ALG_ASYNC);
606 			if (IS_ERR(ctx->hashalg))
607 				return PTR_ERR(ctx->hashalg);
608 			break;
609 
610 		case OTX2_CPT_SHA512:
611 			ctx->hashalg = crypto_alloc_shash("sha512", 0,
612 							  CRYPTO_ALG_ASYNC);
613 			if (IS_ERR(ctx->hashalg))
614 				return PTR_ERR(ctx->hashalg);
615 			break;
616 		}
617 	}
618 	switch (ctx->cipher_type) {
619 	case OTX2_CPT_AES_CBC:
620 	case OTX2_CPT_AES_ECB:
621 		ctx->enc_align_len = 16;
622 		break;
623 	case OTX2_CPT_DES3_CBC:
624 	case OTX2_CPT_DES3_ECB:
625 		ctx->enc_align_len = 8;
626 		break;
627 	case OTX2_CPT_AES_GCM:
628 	case OTX2_CPT_CIPHER_NULL:
629 		ctx->enc_align_len = 1;
630 		break;
631 	}
632 	crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx));
633 
634 	return cpt_aead_fallback_init(ctx, alg);
635 }
636 
637 static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
638 {
639 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
640 }
641 
642 static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
643 {
644 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
645 }
646 
647 static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
648 {
649 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
650 }
651 
652 static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
653 {
654 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
655 }
656 
657 static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
658 {
659 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
660 }
661 
662 static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
663 {
664 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
665 }
666 
667 static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
668 {
669 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
670 }
671 
672 static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
673 {
674 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
675 }
676 
677 static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
678 {
679 	return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
680 }
681 
682 static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
683 {
684 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
685 
686 	kfree(ctx->ipad);
687 	kfree(ctx->opad);
688 	if (ctx->hashalg)
689 		crypto_free_shash(ctx->hashalg);
690 	kfree(ctx->sdesc);
691 
692 	if (ctx->fbk_cipher) {
693 		crypto_free_aead(ctx->fbk_cipher);
694 		ctx->fbk_cipher = NULL;
695 	}
696 }
697 
698 static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
699 					  unsigned int authsize)
700 {
701 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
702 
703 	if (crypto_rfc4106_check_authsize(authsize))
704 		return -EINVAL;
705 
706 	tfm->authsize = authsize;
707 	/* Set authsize for fallback case */
708 	if (ctx->fbk_cipher)
709 		ctx->fbk_cipher->authsize = authsize;
710 
711 	return 0;
712 }
713 
714 static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
715 				      unsigned int authsize)
716 {
717 	tfm->authsize = authsize;
718 
719 	return 0;
720 }
721 
722 static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
723 					   unsigned int authsize)
724 {
725 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
726 
727 	ctx->is_trunc_hmac = true;
728 	tfm->authsize = authsize;
729 
730 	return 0;
731 }
732 
733 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
734 {
735 	struct otx2_cpt_sdesc *sdesc;
736 	int size;
737 
738 	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
739 	sdesc = kmalloc(size, GFP_KERNEL);
740 	if (!sdesc)
741 		return NULL;
742 
743 	sdesc->shash.tfm = alg;
744 
745 	return sdesc;
746 }
747 
748 static inline void swap_data32(void *buf, u32 len)
749 {
750 	cpu_to_be32_array(buf, buf, len / 4);
751 }
752 
753 static inline void swap_data64(void *buf, u32 len)
754 {
755 	u64 *src = buf;
756 	int i = 0;
757 
758 	for (i = 0 ; i < len / 8; i++, src++)
759 		cpu_to_be64s(src);
760 }
761 
762 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
763 {
764 	struct sha512_state *sha512;
765 	struct sha256_state *sha256;
766 	struct sha1_state *sha1;
767 
768 	switch (mac_type) {
769 	case OTX2_CPT_SHA1:
770 		sha1 = (struct sha1_state *) in_pad;
771 		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
772 		memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
773 		break;
774 
775 	case OTX2_CPT_SHA256:
776 		sha256 = (struct sha256_state *) in_pad;
777 		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
778 		memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
779 		break;
780 
781 	case OTX2_CPT_SHA384:
782 	case OTX2_CPT_SHA512:
783 		sha512 = (struct sha512_state *) in_pad;
784 		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
785 		memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
786 		break;
787 
788 	default:
789 		return -EINVAL;
790 	}
791 
792 	return 0;
793 }
794 
795 static int aead_hmac_init(struct crypto_aead *cipher)
796 {
797 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
798 	int state_size = crypto_shash_statesize(ctx->hashalg);
799 	int ds = crypto_shash_digestsize(ctx->hashalg);
800 	int bs = crypto_shash_blocksize(ctx->hashalg);
801 	int authkeylen = ctx->auth_key_len;
802 	u8 *ipad = NULL, *opad = NULL;
803 	int ret = 0, icount = 0;
804 
805 	ctx->sdesc = alloc_sdesc(ctx->hashalg);
806 	if (!ctx->sdesc)
807 		return -ENOMEM;
808 
809 	ctx->ipad = kzalloc(bs, GFP_KERNEL);
810 	if (!ctx->ipad) {
811 		ret = -ENOMEM;
812 		goto calc_fail;
813 	}
814 
815 	ctx->opad = kzalloc(bs, GFP_KERNEL);
816 	if (!ctx->opad) {
817 		ret = -ENOMEM;
818 		goto calc_fail;
819 	}
820 
821 	ipad = kzalloc(state_size, GFP_KERNEL);
822 	if (!ipad) {
823 		ret = -ENOMEM;
824 		goto calc_fail;
825 	}
826 
827 	opad = kzalloc(state_size, GFP_KERNEL);
828 	if (!opad) {
829 		ret = -ENOMEM;
830 		goto calc_fail;
831 	}
832 
833 	if (authkeylen > bs) {
834 		ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
835 					  authkeylen, ipad);
836 		if (ret)
837 			goto calc_fail;
838 
839 		authkeylen = ds;
840 	} else {
841 		memcpy(ipad, ctx->key, authkeylen);
842 	}
843 
844 	memset(ipad + authkeylen, 0, bs - authkeylen);
845 	memcpy(opad, ipad, bs);
846 
847 	for (icount = 0; icount < bs; icount++) {
848 		ipad[icount] ^= 0x36;
849 		opad[icount] ^= 0x5c;
850 	}
851 
852 	/*
853 	 * Partial Hash calculated from the software
854 	 * algorithm is retrieved for IPAD & OPAD
855 	 */
856 
857 	/* IPAD Calculation */
858 	crypto_shash_init(&ctx->sdesc->shash);
859 	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
860 	crypto_shash_export(&ctx->sdesc->shash, ipad);
861 	ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
862 	if (ret)
863 		goto calc_fail;
864 
865 	/* OPAD Calculation */
866 	crypto_shash_init(&ctx->sdesc->shash);
867 	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
868 	crypto_shash_export(&ctx->sdesc->shash, opad);
869 	ret = copy_pad(ctx->mac_type, ctx->opad, opad);
870 	if (ret)
871 		goto calc_fail;
872 
873 	kfree(ipad);
874 	kfree(opad);
875 
876 	return 0;
877 
878 calc_fail:
879 	kfree(ctx->ipad);
880 	ctx->ipad = NULL;
881 	kfree(ctx->opad);
882 	ctx->opad = NULL;
883 	kfree(ipad);
884 	kfree(opad);
885 	kfree(ctx->sdesc);
886 	ctx->sdesc = NULL;
887 
888 	return ret;
889 }
890 
891 static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
892 					    const unsigned char *key,
893 					    unsigned int keylen)
894 {
895 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
896 	struct crypto_authenc_key_param *param;
897 	int enckeylen = 0, authkeylen = 0;
898 	struct rtattr *rta = (void *)key;
899 
900 	if (!RTA_OK(rta, keylen))
901 		return -EINVAL;
902 
903 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
904 		return -EINVAL;
905 
906 	if (RTA_PAYLOAD(rta) < sizeof(*param))
907 		return -EINVAL;
908 
909 	param = RTA_DATA(rta);
910 	enckeylen = be32_to_cpu(param->enckeylen);
911 	key += RTA_ALIGN(rta->rta_len);
912 	keylen -= RTA_ALIGN(rta->rta_len);
913 	if (keylen < enckeylen)
914 		return -EINVAL;
915 
916 	if (keylen > OTX2_CPT_MAX_KEY_SIZE)
917 		return -EINVAL;
918 
919 	authkeylen = keylen - enckeylen;
920 	memcpy(ctx->key, key, keylen);
921 
922 	switch (enckeylen) {
923 	case AES_KEYSIZE_128:
924 		ctx->key_type = OTX2_CPT_AES_128_BIT;
925 		break;
926 	case AES_KEYSIZE_192:
927 		ctx->key_type = OTX2_CPT_AES_192_BIT;
928 		break;
929 	case AES_KEYSIZE_256:
930 		ctx->key_type = OTX2_CPT_AES_256_BIT;
931 		break;
932 	default:
933 		/* Invalid key length */
934 		return -EINVAL;
935 	}
936 
937 	ctx->enc_key_len = enckeylen;
938 	ctx->auth_key_len = authkeylen;
939 
940 	return aead_hmac_init(cipher);
941 }
942 
943 static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
944 					     const unsigned char *key,
945 					     unsigned int keylen)
946 {
947 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
948 	struct crypto_authenc_key_param *param;
949 	struct rtattr *rta = (void *)key;
950 	int enckeylen = 0;
951 
952 	if (!RTA_OK(rta, keylen))
953 		return -EINVAL;
954 
955 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
956 		return -EINVAL;
957 
958 	if (RTA_PAYLOAD(rta) < sizeof(*param))
959 		return -EINVAL;
960 
961 	param = RTA_DATA(rta);
962 	enckeylen = be32_to_cpu(param->enckeylen);
963 	key += RTA_ALIGN(rta->rta_len);
964 	keylen -= RTA_ALIGN(rta->rta_len);
965 	if (enckeylen != 0)
966 		return -EINVAL;
967 
968 	if (keylen > OTX2_CPT_MAX_KEY_SIZE)
969 		return -EINVAL;
970 
971 	memcpy(ctx->key, key, keylen);
972 	ctx->enc_key_len = enckeylen;
973 	ctx->auth_key_len = keylen;
974 
975 	return 0;
976 }
977 
978 static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
979 					const unsigned char *key,
980 					unsigned int keylen)
981 {
982 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
983 
984 	/*
985 	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
986 	 * and salt (4 bytes)
987 	 */
988 	switch (keylen) {
989 	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
990 		ctx->key_type = OTX2_CPT_AES_128_BIT;
991 		ctx->enc_key_len = AES_KEYSIZE_128;
992 		break;
993 	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
994 		ctx->key_type = OTX2_CPT_AES_192_BIT;
995 		ctx->enc_key_len = AES_KEYSIZE_192;
996 		break;
997 	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
998 		ctx->key_type = OTX2_CPT_AES_256_BIT;
999 		ctx->enc_key_len = AES_KEYSIZE_256;
1000 		break;
1001 	default:
1002 		/* Invalid key and salt length */
1003 		return -EINVAL;
1004 	}
1005 
1006 	/* Store encryption key and salt */
1007 	memcpy(ctx->key, key, keylen);
1008 
1009 	return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
1010 }
1011 
1012 static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
1013 				      u32 *argcnt)
1014 {
1015 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1016 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1017 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1018 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1019 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
1020 	int mac_len = crypto_aead_authsize(tfm);
1021 	int ds;
1022 
1023 	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
1024 
1025 	switch (ctx->cipher_type) {
1026 	case OTX2_CPT_AES_CBC:
1027 		if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
1028 			return -EINVAL;
1029 
1030 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
1031 		/* Copy encryption key to context */
1032 		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
1033 		       ctx->enc_key_len);
1034 		/* Copy IV to context */
1035 		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
1036 
1037 		ds = crypto_shash_digestsize(ctx->hashalg);
1038 		if (ctx->mac_type == OTX2_CPT_SHA384)
1039 			ds = SHA512_DIGEST_SIZE;
1040 		if (ctx->ipad)
1041 			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
1042 		if (ctx->opad)
1043 			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
1044 		break;
1045 
1046 	case OTX2_CPT_AES_GCM:
1047 		if (crypto_ipsec_check_assoclen(req->assoclen))
1048 			return -EINVAL;
1049 
1050 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
1051 		/* Copy encryption key to context */
1052 		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1053 		/* Copy salt to context */
1054 		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1055 		       AES_GCM_SALT_SIZE);
1056 
1057 		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1058 		break;
1059 
1060 	default:
1061 		/* Unknown cipher type */
1062 		return -EINVAL;
1063 	}
1064 	cpu_to_be64s(&rctx->ctrl_word.flags);
1065 
1066 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1067 	req_info->ctrl.s.se_req = 1;
1068 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
1069 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1070 	if (enc) {
1071 		req_info->req.opcode.s.minor = 2;
1072 		req_info->req.param1 = req->cryptlen;
1073 		req_info->req.param2 = req->cryptlen + req->assoclen;
1074 	} else {
1075 		req_info->req.opcode.s.minor = 3;
1076 		req_info->req.param1 = req->cryptlen - mac_len;
1077 		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1078 	}
1079 
1080 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1081 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1082 	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1083 	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1084 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
1085 
1086 	/*
1087 	 * Storing Packet Data Information in offset
1088 	 * Control Word First 8 bytes
1089 	 */
1090 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1091 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1092 	req_info->req.dlen += CONTROL_WORD_LEN;
1093 	++(*argcnt);
1094 
1095 	req_info->in[*argcnt].vptr = (u8 *)fctx;
1096 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
1097 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
1098 	++(*argcnt);
1099 
1100 	return 0;
1101 }
1102 
1103 static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1104 				      u32 enc)
1105 {
1106 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1107 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1108 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1109 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1110 
1111 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1112 	req_info->ctrl.s.se_req = 1;
1113 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
1114 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1115 	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1116 
1117 	req_info->req.opcode.s.minor = 0;
1118 	req_info->req.param1 = ctx->auth_key_len;
1119 	req_info->req.param2 = ctx->mac_type << 8;
1120 
1121 	/* Add authentication key */
1122 	req_info->in[*argcnt].vptr = ctx->key;
1123 	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1124 	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1125 	++(*argcnt);
1126 }
1127 
1128 static inline int create_aead_input_list(struct aead_request *req, u32 enc)
1129 {
1130 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1131 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1132 	u32 inputlen =  req->cryptlen + req->assoclen;
1133 	u32 status, argcnt = 0;
1134 
1135 	status = create_aead_ctx_hdr(req, enc, &argcnt);
1136 	if (status)
1137 		return status;
1138 	update_input_data(req_info, req->src, inputlen, &argcnt);
1139 	req_info->in_cnt = argcnt;
1140 
1141 	return 0;
1142 }
1143 
1144 static inline void create_aead_output_list(struct aead_request *req, u32 enc,
1145 					   u32 mac_len)
1146 {
1147 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1148 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1149 	u32 argcnt = 0, outputlen = 0;
1150 
1151 	if (enc)
1152 		outputlen = req->cryptlen +  req->assoclen + mac_len;
1153 	else
1154 		outputlen = req->cryptlen + req->assoclen - mac_len;
1155 
1156 	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1157 	req_info->out_cnt = argcnt;
1158 }
1159 
1160 static inline void create_aead_null_input_list(struct aead_request *req,
1161 					       u32 enc, u32 mac_len)
1162 {
1163 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1164 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1165 	u32 inputlen, argcnt = 0;
1166 
1167 	if (enc)
1168 		inputlen =  req->cryptlen + req->assoclen;
1169 	else
1170 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1171 
1172 	create_hmac_ctx_hdr(req, &argcnt, enc);
1173 	update_input_data(req_info, req->src, inputlen, &argcnt);
1174 	req_info->in_cnt = argcnt;
1175 }
1176 
1177 static inline int create_aead_null_output_list(struct aead_request *req,
1178 					       u32 enc, u32 mac_len)
1179 {
1180 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1181 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1182 	struct scatterlist *dst;
1183 	u8 *ptr = NULL;
1184 	int argcnt = 0, status, offset;
1185 	u32 inputlen;
1186 
1187 	if (enc)
1188 		inputlen =  req->cryptlen + req->assoclen;
1189 	else
1190 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1191 
1192 	/*
1193 	 * If source and destination are different
1194 	 * then copy payload to destination
1195 	 */
1196 	if (req->src != req->dst) {
1197 
1198 		ptr = kmalloc(inputlen, (req_info->areq->flags &
1199 					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1200 					 GFP_KERNEL : GFP_ATOMIC);
1201 		if (!ptr)
1202 			return -ENOMEM;
1203 
1204 		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1205 					   inputlen);
1206 		if (status != inputlen) {
1207 			status = -EINVAL;
1208 			goto error_free;
1209 		}
1210 		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1211 					     inputlen);
1212 		if (status != inputlen) {
1213 			status = -EINVAL;
1214 			goto error_free;
1215 		}
1216 		kfree(ptr);
1217 	}
1218 
1219 	if (enc) {
1220 		/*
1221 		 * In an encryption scenario hmac needs
1222 		 * to be appended after payload
1223 		 */
1224 		dst = req->dst;
1225 		offset = inputlen;
1226 		while (offset >= dst->length) {
1227 			offset -= dst->length;
1228 			dst = sg_next(dst);
1229 			if (!dst)
1230 				return -ENOENT;
1231 		}
1232 
1233 		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1234 	} else {
1235 		/*
1236 		 * In a decryption scenario calculated hmac for received
1237 		 * payload needs to be compare with hmac received
1238 		 */
1239 		status = sg_copy_buffer(req->src, sg_nents(req->src),
1240 					rctx->fctx.hmac.s.hmac_recv, mac_len,
1241 					inputlen, true);
1242 		if (status != mac_len)
1243 			return -EINVAL;
1244 
1245 		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1246 		req_info->out[argcnt].size = mac_len;
1247 		argcnt++;
1248 	}
1249 
1250 	req_info->out_cnt = argcnt;
1251 	return 0;
1252 
1253 error_free:
1254 	kfree(ptr);
1255 	return status;
1256 }
1257 
1258 static int aead_do_fallback(struct aead_request *req, bool is_enc)
1259 {
1260 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1261 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1262 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead);
1263 	int ret;
1264 
1265 	if (ctx->fbk_cipher) {
1266 		/* Store the cipher tfm and then use the fallback tfm */
1267 		aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
1268 		aead_request_set_callback(&rctx->fbk_req, req->base.flags,
1269 					  req->base.complete, req->base.data);
1270 		aead_request_set_crypt(&rctx->fbk_req, req->src,
1271 				       req->dst, req->cryptlen, req->iv);
1272 		aead_request_set_ad(&rctx->fbk_req, req->assoclen);
1273 		ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
1274 			       crypto_aead_decrypt(&rctx->fbk_req);
1275 	} else {
1276 		ret = -EINVAL;
1277 	}
1278 
1279 	return ret;
1280 }
1281 
1282 static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1283 {
1284 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
1285 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1286 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1287 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1288 	struct pci_dev *pdev;
1289 	int status, cpu_num;
1290 
1291 	/* Clear control words */
1292 	rctx->ctrl_word.flags = 0;
1293 	rctx->fctx.enc.enc_ctrl.u = 0;
1294 
1295 	req_info->callback = otx2_cpt_aead_callback;
1296 	req_info->areq = &req->base;
1297 	req_info->req_type = reg_type;
1298 	req_info->is_enc = enc;
1299 	req_info->is_trunc_hmac = false;
1300 
1301 	switch (reg_type) {
1302 	case OTX2_CPT_AEAD_ENC_DEC_REQ:
1303 		status = create_aead_input_list(req, enc);
1304 		if (status)
1305 			return status;
1306 		create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
1307 		break;
1308 
1309 	case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
1310 		create_aead_null_input_list(req, enc,
1311 					    crypto_aead_authsize(tfm));
1312 		status = create_aead_null_output_list(req, enc,
1313 						crypto_aead_authsize(tfm));
1314 		if (status)
1315 			return status;
1316 		break;
1317 
1318 	default:
1319 		return -EINVAL;
1320 	}
1321 	if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
1322 		return -EINVAL;
1323 
1324 	if (!req_info->req.param2 ||
1325 	    (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
1326 	    (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
1327 		return aead_do_fallback(req, enc);
1328 
1329 	status = get_se_device(&pdev, &cpu_num);
1330 	if (status)
1331 		return status;
1332 
1333 	req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
1334 
1335 	/*
1336 	 * We perform an asynchronous send and once
1337 	 * the request is completed the driver would
1338 	 * intimate through registered call back functions
1339 	 */
1340 	return otx2_cpt_do_request(pdev, req_info, cpu_num);
1341 }
1342 
1343 static int otx2_cpt_aead_encrypt(struct aead_request *req)
1344 {
1345 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
1346 }
1347 
1348 static int otx2_cpt_aead_decrypt(struct aead_request *req)
1349 {
1350 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
1351 }
1352 
1353 static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
1354 {
1355 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1356 }
1357 
1358 static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
1359 {
1360 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1361 }
1362 
1363 static struct skcipher_alg otx2_cpt_skciphers[] = { {
1364 	.base.cra_name = "xts(aes)",
1365 	.base.cra_driver_name = "cpt_xts_aes",
1366 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1367 	.base.cra_blocksize = AES_BLOCK_SIZE,
1368 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1369 	.base.cra_alignmask = 7,
1370 	.base.cra_priority = 4001,
1371 	.base.cra_module = THIS_MODULE,
1372 
1373 	.init = otx2_cpt_enc_dec_init,
1374 	.exit = otx2_cpt_skcipher_exit,
1375 	.ivsize = AES_BLOCK_SIZE,
1376 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1377 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1378 	.setkey = otx2_cpt_skcipher_xts_setkey,
1379 	.encrypt = otx2_cpt_skcipher_encrypt,
1380 	.decrypt = otx2_cpt_skcipher_decrypt,
1381 }, {
1382 	.base.cra_name = "cbc(aes)",
1383 	.base.cra_driver_name = "cpt_cbc_aes",
1384 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1385 	.base.cra_blocksize = AES_BLOCK_SIZE,
1386 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1387 	.base.cra_alignmask = 7,
1388 	.base.cra_priority = 4001,
1389 	.base.cra_module = THIS_MODULE,
1390 
1391 	.init = otx2_cpt_enc_dec_init,
1392 	.exit = otx2_cpt_skcipher_exit,
1393 	.ivsize = AES_BLOCK_SIZE,
1394 	.min_keysize = AES_MIN_KEY_SIZE,
1395 	.max_keysize = AES_MAX_KEY_SIZE,
1396 	.setkey = otx2_cpt_skcipher_cbc_aes_setkey,
1397 	.encrypt = otx2_cpt_skcipher_encrypt,
1398 	.decrypt = otx2_cpt_skcipher_decrypt,
1399 }, {
1400 	.base.cra_name = "ecb(aes)",
1401 	.base.cra_driver_name = "cpt_ecb_aes",
1402 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1403 	.base.cra_blocksize = AES_BLOCK_SIZE,
1404 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1405 	.base.cra_alignmask = 7,
1406 	.base.cra_priority = 4001,
1407 	.base.cra_module = THIS_MODULE,
1408 
1409 	.init = otx2_cpt_enc_dec_init,
1410 	.exit = otx2_cpt_skcipher_exit,
1411 	.ivsize = 0,
1412 	.min_keysize = AES_MIN_KEY_SIZE,
1413 	.max_keysize = AES_MAX_KEY_SIZE,
1414 	.setkey = otx2_cpt_skcipher_ecb_aes_setkey,
1415 	.encrypt = otx2_cpt_skcipher_encrypt,
1416 	.decrypt = otx2_cpt_skcipher_decrypt,
1417 }, {
1418 	.base.cra_name = "cbc(des3_ede)",
1419 	.base.cra_driver_name = "cpt_cbc_des3_ede",
1420 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1421 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1422 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1423 	.base.cra_alignmask = 7,
1424 	.base.cra_priority = 4001,
1425 	.base.cra_module = THIS_MODULE,
1426 
1427 	.init = otx2_cpt_enc_dec_init,
1428 	.exit = otx2_cpt_skcipher_exit,
1429 	.min_keysize = DES3_EDE_KEY_SIZE,
1430 	.max_keysize = DES3_EDE_KEY_SIZE,
1431 	.ivsize = DES_BLOCK_SIZE,
1432 	.setkey = otx2_cpt_skcipher_cbc_des3_setkey,
1433 	.encrypt = otx2_cpt_skcipher_encrypt,
1434 	.decrypt = otx2_cpt_skcipher_decrypt,
1435 }, {
1436 	.base.cra_name = "ecb(des3_ede)",
1437 	.base.cra_driver_name = "cpt_ecb_des3_ede",
1438 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1439 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1440 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1441 	.base.cra_alignmask = 7,
1442 	.base.cra_priority = 4001,
1443 	.base.cra_module = THIS_MODULE,
1444 
1445 	.init = otx2_cpt_enc_dec_init,
1446 	.exit = otx2_cpt_skcipher_exit,
1447 	.min_keysize = DES3_EDE_KEY_SIZE,
1448 	.max_keysize = DES3_EDE_KEY_SIZE,
1449 	.ivsize = 0,
1450 	.setkey = otx2_cpt_skcipher_ecb_des3_setkey,
1451 	.encrypt = otx2_cpt_skcipher_encrypt,
1452 	.decrypt = otx2_cpt_skcipher_decrypt,
1453 } };
1454 
1455 static struct aead_alg otx2_cpt_aeads[] = { {
1456 	.base = {
1457 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1458 		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1459 		.cra_blocksize = AES_BLOCK_SIZE,
1460 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1461 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1462 		.cra_priority = 4001,
1463 		.cra_alignmask = 0,
1464 		.cra_module = THIS_MODULE,
1465 	},
1466 	.init = otx2_cpt_aead_cbc_aes_sha1_init,
1467 	.exit = otx2_cpt_aead_exit,
1468 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1469 	.setauthsize = otx2_cpt_aead_set_authsize,
1470 	.encrypt = otx2_cpt_aead_encrypt,
1471 	.decrypt = otx2_cpt_aead_decrypt,
1472 	.ivsize = AES_BLOCK_SIZE,
1473 	.maxauthsize = SHA1_DIGEST_SIZE,
1474 }, {
1475 	.base = {
1476 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1477 		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1478 		.cra_blocksize = AES_BLOCK_SIZE,
1479 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1480 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1481 		.cra_priority = 4001,
1482 		.cra_alignmask = 0,
1483 		.cra_module = THIS_MODULE,
1484 	},
1485 	.init = otx2_cpt_aead_cbc_aes_sha256_init,
1486 	.exit = otx2_cpt_aead_exit,
1487 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1488 	.setauthsize = otx2_cpt_aead_set_authsize,
1489 	.encrypt = otx2_cpt_aead_encrypt,
1490 	.decrypt = otx2_cpt_aead_decrypt,
1491 	.ivsize = AES_BLOCK_SIZE,
1492 	.maxauthsize = SHA256_DIGEST_SIZE,
1493 }, {
1494 	.base = {
1495 		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1496 		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1497 		.cra_blocksize = AES_BLOCK_SIZE,
1498 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1499 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1500 		.cra_priority = 4001,
1501 		.cra_alignmask = 0,
1502 		.cra_module = THIS_MODULE,
1503 	},
1504 	.init = otx2_cpt_aead_cbc_aes_sha384_init,
1505 	.exit = otx2_cpt_aead_exit,
1506 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1507 	.setauthsize = otx2_cpt_aead_set_authsize,
1508 	.encrypt = otx2_cpt_aead_encrypt,
1509 	.decrypt = otx2_cpt_aead_decrypt,
1510 	.ivsize = AES_BLOCK_SIZE,
1511 	.maxauthsize = SHA384_DIGEST_SIZE,
1512 }, {
1513 	.base = {
1514 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1515 		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1516 		.cra_blocksize = AES_BLOCK_SIZE,
1517 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1518 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1519 		.cra_priority = 4001,
1520 		.cra_alignmask = 0,
1521 		.cra_module = THIS_MODULE,
1522 	},
1523 	.init = otx2_cpt_aead_cbc_aes_sha512_init,
1524 	.exit = otx2_cpt_aead_exit,
1525 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1526 	.setauthsize = otx2_cpt_aead_set_authsize,
1527 	.encrypt = otx2_cpt_aead_encrypt,
1528 	.decrypt = otx2_cpt_aead_decrypt,
1529 	.ivsize = AES_BLOCK_SIZE,
1530 	.maxauthsize = SHA512_DIGEST_SIZE,
1531 }, {
1532 	.base = {
1533 		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1534 		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1535 		.cra_blocksize = 1,
1536 		.cra_flags = CRYPTO_ALG_ASYNC,
1537 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1538 		.cra_priority = 4001,
1539 		.cra_alignmask = 0,
1540 		.cra_module = THIS_MODULE,
1541 	},
1542 	.init = otx2_cpt_aead_ecb_null_sha1_init,
1543 	.exit = otx2_cpt_aead_exit,
1544 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1545 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1546 	.encrypt = otx2_cpt_aead_null_encrypt,
1547 	.decrypt = otx2_cpt_aead_null_decrypt,
1548 	.ivsize = 0,
1549 	.maxauthsize = SHA1_DIGEST_SIZE,
1550 }, {
1551 	.base = {
1552 		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1553 		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1554 		.cra_blocksize = 1,
1555 		.cra_flags = CRYPTO_ALG_ASYNC,
1556 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1557 		.cra_priority = 4001,
1558 		.cra_alignmask = 0,
1559 		.cra_module = THIS_MODULE,
1560 	},
1561 	.init = otx2_cpt_aead_ecb_null_sha256_init,
1562 	.exit = otx2_cpt_aead_exit,
1563 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1564 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1565 	.encrypt = otx2_cpt_aead_null_encrypt,
1566 	.decrypt = otx2_cpt_aead_null_decrypt,
1567 	.ivsize = 0,
1568 	.maxauthsize = SHA256_DIGEST_SIZE,
1569 }, {
1570 	.base = {
1571 		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1572 		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1573 		.cra_blocksize = 1,
1574 		.cra_flags = CRYPTO_ALG_ASYNC,
1575 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1576 		.cra_priority = 4001,
1577 		.cra_alignmask = 0,
1578 		.cra_module = THIS_MODULE,
1579 	},
1580 	.init = otx2_cpt_aead_ecb_null_sha384_init,
1581 	.exit = otx2_cpt_aead_exit,
1582 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1583 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1584 	.encrypt = otx2_cpt_aead_null_encrypt,
1585 	.decrypt = otx2_cpt_aead_null_decrypt,
1586 	.ivsize = 0,
1587 	.maxauthsize = SHA384_DIGEST_SIZE,
1588 }, {
1589 	.base = {
1590 		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1591 		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1592 		.cra_blocksize = 1,
1593 		.cra_flags = CRYPTO_ALG_ASYNC,
1594 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1595 		.cra_priority = 4001,
1596 		.cra_alignmask = 0,
1597 		.cra_module = THIS_MODULE,
1598 	},
1599 	.init = otx2_cpt_aead_ecb_null_sha512_init,
1600 	.exit = otx2_cpt_aead_exit,
1601 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1602 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1603 	.encrypt = otx2_cpt_aead_null_encrypt,
1604 	.decrypt = otx2_cpt_aead_null_decrypt,
1605 	.ivsize = 0,
1606 	.maxauthsize = SHA512_DIGEST_SIZE,
1607 }, {
1608 	.base = {
1609 		.cra_name = "rfc4106(gcm(aes))",
1610 		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1611 		.cra_blocksize = 1,
1612 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1613 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
1614 		.cra_priority = 4001,
1615 		.cra_alignmask = 0,
1616 		.cra_module = THIS_MODULE,
1617 	},
1618 	.init = otx2_cpt_aead_gcm_aes_init,
1619 	.exit = otx2_cpt_aead_exit,
1620 	.setkey = otx2_cpt_aead_gcm_aes_setkey,
1621 	.setauthsize = otx2_cpt_aead_gcm_set_authsize,
1622 	.encrypt = otx2_cpt_aead_encrypt,
1623 	.decrypt = otx2_cpt_aead_decrypt,
1624 	.ivsize = AES_GCM_IV_SIZE,
1625 	.maxauthsize = AES_GCM_ICV_SIZE,
1626 } };
1627 
1628 static inline int cpt_register_algs(void)
1629 {
1630 	int i, err = 0;
1631 
1632 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
1633 		otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1634 
1635 	err = crypto_register_skciphers(otx2_cpt_skciphers,
1636 					ARRAY_SIZE(otx2_cpt_skciphers));
1637 	if (err)
1638 		return err;
1639 
1640 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
1641 		otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1642 
1643 	err = crypto_register_aeads(otx2_cpt_aeads,
1644 				    ARRAY_SIZE(otx2_cpt_aeads));
1645 	if (err) {
1646 		crypto_unregister_skciphers(otx2_cpt_skciphers,
1647 					    ARRAY_SIZE(otx2_cpt_skciphers));
1648 		return err;
1649 	}
1650 
1651 	return 0;
1652 }
1653 
1654 static inline void cpt_unregister_algs(void)
1655 {
1656 	crypto_unregister_skciphers(otx2_cpt_skciphers,
1657 				    ARRAY_SIZE(otx2_cpt_skciphers));
1658 	crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
1659 }
1660 
1661 static int compare_func(const void *lptr, const void *rptr)
1662 {
1663 	const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1664 	const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1665 
1666 	if (ldesc->dev->devfn < rdesc->dev->devfn)
1667 		return -1;
1668 	if (ldesc->dev->devfn > rdesc->dev->devfn)
1669 		return 1;
1670 	return 0;
1671 }
1672 
1673 static void swap_func(void *lptr, void *rptr, int size)
1674 {
1675 	struct cpt_device_desc *ldesc = lptr;
1676 	struct cpt_device_desc *rdesc = rptr;
1677 
1678 	swap(*ldesc, *rdesc);
1679 }
1680 
1681 int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1682 			 int num_queues, int num_devices)
1683 {
1684 	int ret = 0;
1685 	int count;
1686 
1687 	mutex_lock(&mutex);
1688 	count = atomic_read(&se_devices.count);
1689 	if (count >= OTX2_CPT_MAX_LFS_NUM) {
1690 		dev_err(&pdev->dev, "No space to add a new device\n");
1691 		ret = -ENOSPC;
1692 		goto unlock;
1693 	}
1694 	se_devices.desc[count].num_queues = num_queues;
1695 	se_devices.desc[count++].dev = pdev;
1696 	atomic_inc(&se_devices.count);
1697 
1698 	if (atomic_read(&se_devices.count) == num_devices &&
1699 	    is_crypto_registered == false) {
1700 		if (cpt_register_algs()) {
1701 			dev_err(&pdev->dev,
1702 				"Error in registering crypto algorithms\n");
1703 			ret =  -EINVAL;
1704 			goto unlock;
1705 		}
1706 		try_module_get(mod);
1707 		is_crypto_registered = true;
1708 	}
1709 	sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1710 	     compare_func, swap_func);
1711 
1712 unlock:
1713 	mutex_unlock(&mutex);
1714 	return ret;
1715 }
1716 
1717 void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
1718 {
1719 	struct cpt_device_table *dev_tbl;
1720 	bool dev_found = false;
1721 	int i, j, count;
1722 
1723 	mutex_lock(&mutex);
1724 
1725 	dev_tbl = &se_devices;
1726 	count = atomic_read(&dev_tbl->count);
1727 	for (i = 0; i < count; i++) {
1728 		if (pdev == dev_tbl->desc[i].dev) {
1729 			for (j = i; j < count-1; j++)
1730 				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1731 			dev_found = true;
1732 			break;
1733 		}
1734 	}
1735 
1736 	if (!dev_found) {
1737 		dev_err(&pdev->dev, "%s device not found\n", __func__);
1738 		goto unlock;
1739 	}
1740 	if (atomic_dec_and_test(&se_devices.count)) {
1741 		cpt_unregister_algs();
1742 		module_put(mod);
1743 		is_crypto_registered = false;
1744 	}
1745 
1746 unlock:
1747 	mutex_unlock(&mutex);
1748 }
1749