xref: /linux/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c (revision 746680ec6696585e30db3e18c93a63df9cbec39c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/authenc.h>
6 #include <crypto/cryptd.h>
7 #include <crypto/des.h>
8 #include <crypto/internal/aead.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/xts.h>
12 #include <crypto/gcm.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/sort.h>
15 #include <linux/module.h>
16 #include "otx2_cptvf.h"
17 #include "otx2_cptvf_algs.h"
18 #include "otx2_cpt_reqmgr.h"
19 #include "cn10k_cpt.h"
20 
21 /* Size of salt in AES GCM mode */
22 #define AES_GCM_SALT_SIZE 4
23 /* Size of IV in AES GCM mode */
24 #define AES_GCM_IV_SIZE 8
25 /* Size of ICV (Integrity Check Value) in AES GCM mode */
26 #define AES_GCM_ICV_SIZE 16
27 /* Offset of IV in AES GCM mode */
28 #define AES_GCM_IV_OFFSET 8
29 #define CONTROL_WORD_LEN 8
30 #define KEY2_OFFSET 48
31 #define DMA_MODE_FLAG(dma_mode) \
32 	(((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
33 
34 /* Truncated SHA digest size */
35 #define SHA1_TRUNC_DIGEST_SIZE 12
36 #define SHA256_TRUNC_DIGEST_SIZE 16
37 #define SHA384_TRUNC_DIGEST_SIZE 24
38 #define SHA512_TRUNC_DIGEST_SIZE 32
39 
40 static DEFINE_MUTEX(mutex);
41 static int is_crypto_registered;
42 
43 struct cpt_device_desc {
44 	struct pci_dev *dev;
45 	int num_queues;
46 };
47 
48 struct cpt_device_table {
49 	atomic_t count;
50 	struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
51 };
52 
53 static struct cpt_device_table se_devices = {
54 	.count = ATOMIC_INIT(0)
55 };
56 
57 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
58 
59 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
60 {
61 	int count;
62 
63 	count = atomic_read(&se_devices.count);
64 	if (count < 1)
65 		return -ENODEV;
66 
67 	*cpu_num = get_cpu();
68 	/*
69 	 * On OcteonTX2 platform CPT instruction queue is bound to each
70 	 * local function LF, in turn LFs can be attached to PF
71 	 * or VF therefore we always use first device. We get maximum
72 	 * performance if one CPT queue is available for each cpu
73 	 * otherwise CPT queues need to be shared between cpus.
74 	 */
75 	if (*cpu_num >= se_devices.desc[0].num_queues)
76 		*cpu_num %= se_devices.desc[0].num_queues;
77 	*pdev = se_devices.desc[0].dev;
78 
79 	put_cpu();
80 
81 	return 0;
82 }
83 
84 static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
85 {
86 	struct otx2_cpt_req_ctx *rctx;
87 	struct aead_request *req;
88 	struct crypto_aead *tfm;
89 
90 	req = container_of(cpt_req->areq, struct aead_request, base);
91 	tfm = crypto_aead_reqtfm(req);
92 	rctx = aead_request_ctx_dma(req);
93 	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
94 		   rctx->fctx.hmac.s.hmac_recv,
95 		   crypto_aead_authsize(tfm)) != 0)
96 		return -EBADMSG;
97 
98 	return 0;
99 }
100 
101 static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
102 {
103 	struct otx2_cpt_inst_info *inst_info = arg2;
104 	struct crypto_async_request *areq = arg1;
105 	struct otx2_cpt_req_info *cpt_req;
106 	struct pci_dev *pdev;
107 
108 	if (inst_info) {
109 		cpt_req = inst_info->req;
110 		if (!status) {
111 			/*
112 			 * When selected cipher is NULL we need to manually
113 			 * verify whether calculated hmac value matches
114 			 * received hmac value
115 			 */
116 			if (cpt_req->req_type ==
117 			    OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
118 			    !cpt_req->is_enc)
119 				status = validate_hmac_cipher_null(cpt_req);
120 		}
121 		pdev = inst_info->pdev;
122 		otx2_cpt_info_destroy(pdev, inst_info);
123 	}
124 	if (areq)
125 		crypto_request_complete(areq, status);
126 }
127 
128 static void output_iv_copyback(struct crypto_async_request *areq)
129 {
130 	struct otx2_cpt_req_info *req_info;
131 	struct otx2_cpt_req_ctx *rctx;
132 	struct skcipher_request *sreq;
133 	struct crypto_skcipher *stfm;
134 	struct otx2_cpt_enc_ctx *ctx;
135 	u32 start, ivsize;
136 
137 	sreq = container_of(areq, struct skcipher_request, base);
138 	stfm = crypto_skcipher_reqtfm(sreq);
139 	ctx = crypto_skcipher_ctx(stfm);
140 	if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
141 	    ctx->cipher_type == OTX2_CPT_DES3_CBC) {
142 		rctx = skcipher_request_ctx_dma(sreq);
143 		req_info = &rctx->cpt_req;
144 		ivsize = crypto_skcipher_ivsize(stfm);
145 		start = sreq->cryptlen - ivsize;
146 
147 		if (req_info->is_enc) {
148 			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
149 						 ivsize, 0);
150 		} else {
151 			if (sreq->src != sreq->dst) {
152 				scatterwalk_map_and_copy(sreq->iv, sreq->src,
153 							 start, ivsize, 0);
154 			} else {
155 				memcpy(sreq->iv, req_info->iv_out, ivsize);
156 				kfree(req_info->iv_out);
157 			}
158 		}
159 	}
160 }
161 
162 static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
163 {
164 	struct otx2_cpt_inst_info *inst_info = arg2;
165 	struct crypto_async_request *areq = arg1;
166 	struct pci_dev *pdev;
167 
168 	if (areq) {
169 		if (!status)
170 			output_iv_copyback(areq);
171 		if (inst_info) {
172 			pdev = inst_info->pdev;
173 			otx2_cpt_info_destroy(pdev, inst_info);
174 		}
175 		crypto_request_complete(areq, status);
176 	}
177 }
178 
179 static inline void update_input_data(struct otx2_cpt_req_info *req_info,
180 				     struct scatterlist *inp_sg,
181 				     u32 nbytes, u32 *argcnt)
182 {
183 	req_info->req.dlen += nbytes;
184 
185 	while (nbytes) {
186 		u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
187 		u8 *ptr = sg_virt(inp_sg);
188 
189 		req_info->in[*argcnt].vptr = (void *)ptr;
190 		req_info->in[*argcnt].size = len;
191 		nbytes -= len;
192 		++(*argcnt);
193 		inp_sg = sg_next(inp_sg);
194 	}
195 }
196 
197 static inline void update_output_data(struct otx2_cpt_req_info *req_info,
198 				      struct scatterlist *outp_sg,
199 				      u32 offset, u32 nbytes, u32 *argcnt)
200 {
201 	u32 len, sg_len;
202 	u8 *ptr;
203 
204 	req_info->rlen += nbytes;
205 
206 	while (nbytes) {
207 		sg_len = outp_sg->length - offset;
208 		len = (nbytes < sg_len) ? nbytes : sg_len;
209 		ptr = sg_virt(outp_sg);
210 
211 		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
212 		req_info->out[*argcnt].size = len;
213 		nbytes -= len;
214 		++(*argcnt);
215 		offset = 0;
216 		outp_sg = sg_next(outp_sg);
217 	}
218 }
219 
220 static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
221 				 u32 *argcnt)
222 {
223 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
224 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
225 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
226 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
227 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
228 	int ivsize = crypto_skcipher_ivsize(stfm);
229 	u32 start = req->cryptlen - ivsize;
230 	gfp_t flags;
231 
232 	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
233 			GFP_KERNEL : GFP_ATOMIC;
234 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
235 	req_info->ctrl.s.se_req = 1;
236 
237 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
238 				DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
239 	if (enc) {
240 		req_info->req.opcode.s.minor = 2;
241 	} else {
242 		req_info->req.opcode.s.minor = 3;
243 		if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
244 		    ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
245 		    req->src == req->dst) {
246 			req_info->iv_out = kmalloc(ivsize, flags);
247 			if (!req_info->iv_out)
248 				return -ENOMEM;
249 
250 			scatterwalk_map_and_copy(req_info->iv_out, req->src,
251 						 start, ivsize, 0);
252 		}
253 	}
254 	/* Encryption data length */
255 	req_info->req.param1 = req->cryptlen;
256 	/* Authentication data length */
257 	req_info->req.param2 = 0;
258 
259 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
260 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
261 	fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
262 
263 	if (ctx->cipher_type == OTX2_CPT_AES_XTS)
264 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
265 	else
266 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
267 
268 	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
269 
270 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
271 
272 	/*
273 	 * Storing  Packet Data Information in offset
274 	 * Control Word First 8 bytes
275 	 */
276 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
277 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
278 	req_info->req.dlen += CONTROL_WORD_LEN;
279 	++(*argcnt);
280 
281 	req_info->in[*argcnt].vptr = (u8 *)fctx;
282 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
283 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
284 
285 	++(*argcnt);
286 
287 	return 0;
288 }
289 
290 static inline int create_input_list(struct skcipher_request *req, u32 enc,
291 				    u32 enc_iv_len)
292 {
293 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
294 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
295 	u32 argcnt =  0;
296 	int ret;
297 
298 	ret = create_ctx_hdr(req, enc, &argcnt);
299 	if (ret)
300 		return ret;
301 
302 	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
303 	req_info->in_cnt = argcnt;
304 
305 	return 0;
306 }
307 
308 static inline void create_output_list(struct skcipher_request *req,
309 				      u32 enc_iv_len)
310 {
311 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
312 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
313 	u32 argcnt = 0;
314 
315 	/*
316 	 * OUTPUT Buffer Processing
317 	 * AES encryption/decryption output would be
318 	 * received in the following format
319 	 *
320 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
321 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
322 	 */
323 	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
324 	req_info->out_cnt = argcnt;
325 }
326 
327 static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
328 {
329 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
330 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
331 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
332 	int ret;
333 
334 	if (ctx->fbk_cipher) {
335 		skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
336 		skcipher_request_set_callback(&rctx->sk_fbk_req,
337 					      req->base.flags,
338 					      req->base.complete,
339 					      req->base.data);
340 		skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
341 					   req->dst, req->cryptlen, req->iv);
342 		ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
343 			       crypto_skcipher_decrypt(&rctx->sk_fbk_req);
344 	} else {
345 		ret = -EINVAL;
346 	}
347 	return ret;
348 }
349 
350 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
351 {
352 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
353 	struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
354 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
355 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
356 	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
357 	struct pci_dev *pdev;
358 	int status, cpu_num;
359 
360 	if (req->cryptlen == 0)
361 		return 0;
362 
363 	if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
364 		return -EINVAL;
365 
366 	if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
367 		return skcipher_do_fallback(req, enc);
368 
369 	/* Clear control words */
370 	rctx->ctrl_word.flags = 0;
371 	rctx->fctx.enc.enc_ctrl.u = 0;
372 
373 	status = create_input_list(req, enc, enc_iv_len);
374 	if (status)
375 		return status;
376 	create_output_list(req, enc_iv_len);
377 
378 	status = get_se_device(&pdev, &cpu_num);
379 	if (status)
380 		return status;
381 
382 	req_info->callback = otx2_cpt_skcipher_callback;
383 	req_info->areq = &req->base;
384 	req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
385 	req_info->is_enc = enc;
386 	req_info->is_trunc_hmac = false;
387 	req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
388 							OTX2_CPT_SE_TYPES);
389 
390 	req_info->req.cptr = ctx->er_ctx.hw_ctx;
391 	req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
392 
393 	/*
394 	 * We perform an asynchronous send and once
395 	 * the request is completed the driver would
396 	 * intimate through registered call back functions
397 	 */
398 	status = otx2_cpt_do_request(pdev, req_info, cpu_num);
399 
400 	return status;
401 }
402 
403 static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
404 {
405 	return cpt_enc_dec(req, true);
406 }
407 
408 static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
409 {
410 	return cpt_enc_dec(req, false);
411 }
412 
413 static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
414 				       const u8 *key, u32 keylen)
415 {
416 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
417 	const u8 *key2 = key + (keylen / 2);
418 	const u8 *key1 = key;
419 	int ret;
420 
421 	ret = xts_verify_key(tfm, key, keylen);
422 	if (ret)
423 		return ret;
424 	ctx->key_len = keylen;
425 	ctx->enc_align_len = 1;
426 	memcpy(ctx->enc_key, key1, keylen / 2);
427 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
428 	ctx->cipher_type = OTX2_CPT_AES_XTS;
429 	switch (ctx->key_len) {
430 	case 2 * AES_KEYSIZE_128:
431 		ctx->key_type = OTX2_CPT_AES_128_BIT;
432 		break;
433 	case 2 * AES_KEYSIZE_192:
434 		ctx->key_type = OTX2_CPT_AES_192_BIT;
435 		break;
436 	case 2 * AES_KEYSIZE_256:
437 		ctx->key_type = OTX2_CPT_AES_256_BIT;
438 		break;
439 	default:
440 		return -EINVAL;
441 	}
442 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
443 }
444 
445 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
446 			  u32 keylen, u8 cipher_type)
447 {
448 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
449 
450 	if (keylen != DES3_EDE_KEY_SIZE)
451 		return -EINVAL;
452 
453 	ctx->key_len = keylen;
454 	ctx->cipher_type = cipher_type;
455 	ctx->enc_align_len = 8;
456 
457 	memcpy(ctx->enc_key, key, keylen);
458 
459 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
460 }
461 
462 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
463 			  u32 keylen, u8 cipher_type)
464 {
465 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
466 
467 	switch (keylen) {
468 	case AES_KEYSIZE_128:
469 		ctx->key_type = OTX2_CPT_AES_128_BIT;
470 		break;
471 	case AES_KEYSIZE_192:
472 		ctx->key_type = OTX2_CPT_AES_192_BIT;
473 		break;
474 	case AES_KEYSIZE_256:
475 		ctx->key_type = OTX2_CPT_AES_256_BIT;
476 		break;
477 	default:
478 		return -EINVAL;
479 	}
480 	if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
481 		ctx->enc_align_len = 16;
482 	else
483 		ctx->enc_align_len = 1;
484 
485 	ctx->key_len = keylen;
486 	ctx->cipher_type = cipher_type;
487 
488 	memcpy(ctx->enc_key, key, keylen);
489 
490 	return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
491 }
492 
493 static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
494 					    const u8 *key, u32 keylen)
495 {
496 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
497 }
498 
499 static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
500 					    const u8 *key, u32 keylen)
501 {
502 	return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
503 }
504 
505 static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
506 					     const u8 *key, u32 keylen)
507 {
508 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
509 }
510 
511 static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
512 					     const u8 *key, u32 keylen)
513 {
514 	return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
515 }
516 
517 static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
518 				      struct crypto_alg *alg)
519 {
520 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
521 		ctx->fbk_cipher =
522 				crypto_alloc_skcipher(alg->cra_name, 0,
523 						      CRYPTO_ALG_ASYNC |
524 						      CRYPTO_ALG_NEED_FALLBACK);
525 		if (IS_ERR(ctx->fbk_cipher)) {
526 			pr_err("%s() failed to allocate fallback for %s\n",
527 				__func__, alg->cra_name);
528 			return PTR_ERR(ctx->fbk_cipher);
529 		}
530 	}
531 	return 0;
532 }
533 
534 static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
535 {
536 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
537 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
538 	struct crypto_alg *alg = tfm->__crt_alg;
539 	struct pci_dev *pdev;
540 	int ret, cpu_num;
541 
542 	memset(ctx, 0, sizeof(*ctx));
543 	/*
544 	 * Additional memory for skcipher_request is
545 	 * allocated since the cryptd daemon uses
546 	 * this memory for request_ctx information
547 	 */
548 	crypto_skcipher_set_reqsize_dma(
549 		stfm, sizeof(struct otx2_cpt_req_ctx) +
550 		      sizeof(struct skcipher_request));
551 
552 	ret = get_se_device(&pdev, &cpu_num);
553 	if (ret)
554 		return ret;
555 
556 	ctx->pdev = pdev;
557 	ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
558 	if (ret)
559 		return ret;
560 
561 	return cpt_skcipher_fallback_init(ctx, alg);
562 }
563 
564 static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
565 {
566 	struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
567 
568 	if (ctx->fbk_cipher) {
569 		crypto_free_skcipher(ctx->fbk_cipher);
570 		ctx->fbk_cipher = NULL;
571 	}
572 	cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
573 }
574 
575 static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
576 				  struct crypto_alg *alg)
577 {
578 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
579 		ctx->fbk_cipher =
580 			    crypto_alloc_aead(alg->cra_name, 0,
581 					      CRYPTO_ALG_ASYNC |
582 					      CRYPTO_ALG_NEED_FALLBACK);
583 		if (IS_ERR(ctx->fbk_cipher)) {
584 			pr_err("%s() failed to allocate fallback for %s\n",
585 				__func__, alg->cra_name);
586 			return PTR_ERR(ctx->fbk_cipher);
587 		}
588 	}
589 	return 0;
590 }
591 
592 static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
593 {
594 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
595 	struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
596 	struct crypto_alg *alg = tfm->__crt_alg;
597 	struct pci_dev *pdev;
598 	int ret, cpu_num;
599 
600 	ctx->cipher_type = cipher_type;
601 	ctx->mac_type = mac_type;
602 
603 	switch (ctx->mac_type) {
604 	case OTX2_CPT_SHA1:
605 		ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
606 		break;
607 
608 	case OTX2_CPT_SHA256:
609 		ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
610 		break;
611 
612 	case OTX2_CPT_SHA384:
613 		ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
614 		break;
615 
616 	case OTX2_CPT_SHA512:
617 		ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
618 		break;
619 	}
620 
621 	if (IS_ERR(ctx->hashalg))
622 		return PTR_ERR(ctx->hashalg);
623 
624 	if (ctx->hashalg) {
625 		ctx->sdesc = alloc_sdesc(ctx->hashalg);
626 		if (!ctx->sdesc) {
627 			crypto_free_shash(ctx->hashalg);
628 			return -ENOMEM;
629 		}
630 	}
631 
632 	/*
633 	 * When selected cipher is NULL we use HMAC opcode instead of
634 	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
635 	 * for calculating ipad and opad
636 	 */
637 	if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) {
638 		int ss = crypto_shash_statesize(ctx->hashalg);
639 
640 		ctx->ipad = kzalloc(ss, GFP_KERNEL);
641 		if (!ctx->ipad) {
642 			kfree(ctx->sdesc);
643 			crypto_free_shash(ctx->hashalg);
644 			return -ENOMEM;
645 		}
646 
647 		ctx->opad = kzalloc(ss, GFP_KERNEL);
648 		if (!ctx->opad) {
649 			kfree(ctx->ipad);
650 			kfree(ctx->sdesc);
651 			crypto_free_shash(ctx->hashalg);
652 			return -ENOMEM;
653 		}
654 	}
655 	switch (ctx->cipher_type) {
656 	case OTX2_CPT_AES_CBC:
657 	case OTX2_CPT_AES_ECB:
658 		ctx->enc_align_len = 16;
659 		break;
660 	case OTX2_CPT_DES3_CBC:
661 	case OTX2_CPT_DES3_ECB:
662 		ctx->enc_align_len = 8;
663 		break;
664 	case OTX2_CPT_AES_GCM:
665 	case OTX2_CPT_CIPHER_NULL:
666 		ctx->enc_align_len = 1;
667 		break;
668 	}
669 	crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
670 
671 	ret = get_se_device(&pdev, &cpu_num);
672 	if (ret)
673 		return ret;
674 
675 	ctx->pdev = pdev;
676 	ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
677 	if (ret)
678 		return ret;
679 
680 	return cpt_aead_fallback_init(ctx, alg);
681 }
682 
683 static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
684 {
685 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
686 }
687 
688 static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
689 {
690 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
691 }
692 
693 static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
694 {
695 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
696 }
697 
698 static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
699 {
700 	return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
701 }
702 
703 static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
704 {
705 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
706 }
707 
708 static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
709 {
710 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
711 }
712 
713 static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
714 {
715 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
716 }
717 
718 static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
719 {
720 	return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
721 }
722 
723 static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
724 {
725 	return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
726 }
727 
728 static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
729 {
730 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
731 
732 	kfree(ctx->ipad);
733 	kfree(ctx->opad);
734 	crypto_free_shash(ctx->hashalg);
735 	kfree(ctx->sdesc);
736 
737 	if (ctx->fbk_cipher) {
738 		crypto_free_aead(ctx->fbk_cipher);
739 		ctx->fbk_cipher = NULL;
740 	}
741 	cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
742 }
743 
744 static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
745 					  unsigned int authsize)
746 {
747 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
748 
749 	if (crypto_rfc4106_check_authsize(authsize))
750 		return -EINVAL;
751 
752 	tfm->authsize = authsize;
753 	/* Set authsize for fallback case */
754 	if (ctx->fbk_cipher)
755 		ctx->fbk_cipher->authsize = authsize;
756 
757 	return 0;
758 }
759 
760 static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
761 				      unsigned int authsize)
762 {
763 	tfm->authsize = authsize;
764 
765 	return 0;
766 }
767 
768 static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
769 					   unsigned int authsize)
770 {
771 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
772 
773 	ctx->is_trunc_hmac = true;
774 	tfm->authsize = authsize;
775 
776 	return 0;
777 }
778 
779 static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
780 {
781 	struct otx2_cpt_sdesc *sdesc;
782 	int size;
783 
784 	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
785 	sdesc = kmalloc(size, GFP_KERNEL);
786 	if (!sdesc)
787 		return NULL;
788 
789 	sdesc->shash.tfm = alg;
790 
791 	return sdesc;
792 }
793 
794 static inline void swap_data32(void *buf, u32 len)
795 {
796 	cpu_to_be32_array(buf, buf, len / 4);
797 }
798 
799 static inline void swap_data64(void *buf, u32 len)
800 {
801 	u64 *src = buf;
802 	int i = 0;
803 
804 	for (i = 0 ; i < len / 8; i++, src++)
805 		cpu_to_be64s(src);
806 }
807 
808 static int swap_pad(u8 mac_type, u8 *pad)
809 {
810 	struct sha512_state *sha512;
811 	struct sha256_state *sha256;
812 	struct sha1_state *sha1;
813 
814 	switch (mac_type) {
815 	case OTX2_CPT_SHA1:
816 		sha1 = (struct sha1_state *)pad;
817 		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
818 		break;
819 
820 	case OTX2_CPT_SHA256:
821 		sha256 = (struct sha256_state *)pad;
822 		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
823 		break;
824 
825 	case OTX2_CPT_SHA384:
826 	case OTX2_CPT_SHA512:
827 		sha512 = (struct sha512_state *)pad;
828 		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
829 		break;
830 
831 	default:
832 		return -EINVAL;
833 	}
834 
835 	return 0;
836 }
837 
838 static int aead_hmac_init(struct crypto_aead *cipher,
839 			  struct crypto_authenc_keys *keys)
840 {
841 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
842 	int ds = crypto_shash_digestsize(ctx->hashalg);
843 	int bs = crypto_shash_blocksize(ctx->hashalg);
844 	int authkeylen = keys->authkeylen;
845 	u8 *ipad = NULL, *opad = NULL;
846 	int icount = 0;
847 	int ret;
848 
849 	if (authkeylen > bs) {
850 		ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
851 					  authkeylen, ctx->key);
852 		if (ret)
853 			goto calc_fail;
854 
855 		authkeylen = ds;
856 	} else
857 		memcpy(ctx->key, keys->authkey, authkeylen);
858 
859 	ctx->enc_key_len = keys->enckeylen;
860 	ctx->auth_key_len = authkeylen;
861 
862 	if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL)
863 		return keys->enckeylen ? -EINVAL : 0;
864 
865 	switch (keys->enckeylen) {
866 	case AES_KEYSIZE_128:
867 		ctx->key_type = OTX2_CPT_AES_128_BIT;
868 		break;
869 	case AES_KEYSIZE_192:
870 		ctx->key_type = OTX2_CPT_AES_192_BIT;
871 		break;
872 	case AES_KEYSIZE_256:
873 		ctx->key_type = OTX2_CPT_AES_256_BIT;
874 		break;
875 	default:
876 		/* Invalid key length */
877 		return -EINVAL;
878 	}
879 
880 	memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
881 
882 	ipad = ctx->ipad;
883 	opad = ctx->opad;
884 
885 	memcpy(ipad, ctx->key, authkeylen);
886 	memset(ipad + authkeylen, 0, bs - authkeylen);
887 	memcpy(opad, ipad, bs);
888 
889 	for (icount = 0; icount < bs; icount++) {
890 		ipad[icount] ^= 0x36;
891 		opad[icount] ^= 0x5c;
892 	}
893 
894 	/*
895 	 * Partial Hash calculated from the software
896 	 * algorithm is retrieved for IPAD & OPAD
897 	 */
898 
899 	/* IPAD Calculation */
900 	crypto_shash_init(&ctx->sdesc->shash);
901 	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
902 	crypto_shash_export(&ctx->sdesc->shash, ipad);
903 	ret = swap_pad(ctx->mac_type, ipad);
904 	if (ret)
905 		goto calc_fail;
906 
907 	/* OPAD Calculation */
908 	crypto_shash_init(&ctx->sdesc->shash);
909 	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
910 	crypto_shash_export(&ctx->sdesc->shash, opad);
911 	ret = swap_pad(ctx->mac_type, opad);
912 
913 calc_fail:
914 	return ret;
915 }
916 
917 static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
918 					    const unsigned char *key,
919 					    unsigned int keylen)
920 {
921 	struct crypto_authenc_keys authenc_keys;
922 
923 	return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?:
924 	       aead_hmac_init(cipher, &authenc_keys);
925 }
926 
927 static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
928 					     const unsigned char *key,
929 					     unsigned int keylen)
930 {
931 	return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
932 }
933 
934 static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
935 					const unsigned char *key,
936 					unsigned int keylen)
937 {
938 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
939 
940 	/*
941 	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
942 	 * and salt (4 bytes)
943 	 */
944 	switch (keylen) {
945 	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
946 		ctx->key_type = OTX2_CPT_AES_128_BIT;
947 		ctx->enc_key_len = AES_KEYSIZE_128;
948 		break;
949 	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
950 		ctx->key_type = OTX2_CPT_AES_192_BIT;
951 		ctx->enc_key_len = AES_KEYSIZE_192;
952 		break;
953 	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
954 		ctx->key_type = OTX2_CPT_AES_256_BIT;
955 		ctx->enc_key_len = AES_KEYSIZE_256;
956 		break;
957 	default:
958 		/* Invalid key and salt length */
959 		return -EINVAL;
960 	}
961 
962 	/* Store encryption key and salt */
963 	memcpy(ctx->key, key, keylen);
964 
965 	return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
966 }
967 
968 static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
969 				      u32 *argcnt)
970 {
971 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
972 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
973 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
974 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
975 	struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
976 	int mac_len = crypto_aead_authsize(tfm);
977 	int ds;
978 
979 	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
980 
981 	switch (ctx->cipher_type) {
982 	case OTX2_CPT_AES_CBC:
983 		if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
984 			return -EINVAL;
985 
986 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
987 		/* Copy encryption key to context */
988 		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
989 		       ctx->enc_key_len);
990 		/* Copy IV to context */
991 		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
992 
993 		ds = crypto_shash_digestsize(ctx->hashalg);
994 		if (ctx->mac_type == OTX2_CPT_SHA384)
995 			ds = SHA512_DIGEST_SIZE;
996 		if (ctx->ipad)
997 			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
998 		if (ctx->opad)
999 			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
1000 		break;
1001 
1002 	case OTX2_CPT_AES_GCM:
1003 		if (crypto_ipsec_check_assoclen(req->assoclen))
1004 			return -EINVAL;
1005 
1006 		fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
1007 		/* Copy encryption key to context */
1008 		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
1009 		/* Copy salt to context */
1010 		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1011 		       AES_GCM_SALT_SIZE);
1012 
1013 		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1014 		break;
1015 
1016 	default:
1017 		/* Unknown cipher type */
1018 		return -EINVAL;
1019 	}
1020 	cpu_to_be64s(&rctx->ctrl_word.flags);
1021 
1022 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1023 	req_info->ctrl.s.se_req = 1;
1024 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
1025 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1026 	if (enc) {
1027 		req_info->req.opcode.s.minor = 2;
1028 		req_info->req.param1 = req->cryptlen;
1029 		req_info->req.param2 = req->cryptlen + req->assoclen;
1030 	} else {
1031 		req_info->req.opcode.s.minor = 3;
1032 		req_info->req.param1 = req->cryptlen - mac_len;
1033 		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1034 	}
1035 
1036 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1037 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1038 	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1039 	fctx->enc.enc_ctrl.e.mac_len = mac_len;
1040 	cpu_to_be64s(&fctx->enc.enc_ctrl.u);
1041 
1042 	/*
1043 	 * Storing Packet Data Information in offset
1044 	 * Control Word First 8 bytes
1045 	 */
1046 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1047 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1048 	req_info->req.dlen += CONTROL_WORD_LEN;
1049 	++(*argcnt);
1050 
1051 	req_info->in[*argcnt].vptr = (u8 *)fctx;
1052 	req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
1053 	req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
1054 	++(*argcnt);
1055 
1056 	return 0;
1057 }
1058 
1059 static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1060 				      u32 enc)
1061 {
1062 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1063 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1064 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1065 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1066 
1067 	req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
1068 	req_info->ctrl.s.se_req = 1;
1069 	req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
1070 				 DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
1071 	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1072 
1073 	req_info->req.opcode.s.minor = 0;
1074 	req_info->req.param1 = ctx->auth_key_len;
1075 	req_info->req.param2 = ctx->mac_type << 8;
1076 
1077 	/* Add authentication key */
1078 	req_info->in[*argcnt].vptr = ctx->key;
1079 	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1080 	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1081 	++(*argcnt);
1082 }
1083 
1084 static inline int create_aead_input_list(struct aead_request *req, u32 enc)
1085 {
1086 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1087 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1088 	u32 inputlen =  req->cryptlen + req->assoclen;
1089 	u32 status, argcnt = 0;
1090 
1091 	status = create_aead_ctx_hdr(req, enc, &argcnt);
1092 	if (status)
1093 		return status;
1094 	update_input_data(req_info, req->src, inputlen, &argcnt);
1095 	req_info->in_cnt = argcnt;
1096 
1097 	return 0;
1098 }
1099 
1100 static inline void create_aead_output_list(struct aead_request *req, u32 enc,
1101 					   u32 mac_len)
1102 {
1103 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1104 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1105 	u32 argcnt = 0, outputlen = 0;
1106 
1107 	if (enc)
1108 		outputlen = req->cryptlen +  req->assoclen + mac_len;
1109 	else
1110 		outputlen = req->cryptlen + req->assoclen - mac_len;
1111 
1112 	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1113 	req_info->out_cnt = argcnt;
1114 }
1115 
1116 static inline void create_aead_null_input_list(struct aead_request *req,
1117 					       u32 enc, u32 mac_len)
1118 {
1119 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1120 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1121 	u32 inputlen, argcnt = 0;
1122 
1123 	if (enc)
1124 		inputlen =  req->cryptlen + req->assoclen;
1125 	else
1126 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1127 
1128 	create_hmac_ctx_hdr(req, &argcnt, enc);
1129 	update_input_data(req_info, req->src, inputlen, &argcnt);
1130 	req_info->in_cnt = argcnt;
1131 }
1132 
1133 static inline int create_aead_null_output_list(struct aead_request *req,
1134 					       u32 enc, u32 mac_len)
1135 {
1136 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1137 	struct otx2_cpt_req_info *req_info =  &rctx->cpt_req;
1138 	struct scatterlist *dst;
1139 	u8 *ptr = NULL;
1140 	int argcnt = 0, status, offset;
1141 	u32 inputlen;
1142 
1143 	if (enc)
1144 		inputlen =  req->cryptlen + req->assoclen;
1145 	else
1146 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1147 
1148 	/*
1149 	 * If source and destination are different
1150 	 * then copy payload to destination
1151 	 */
1152 	if (req->src != req->dst) {
1153 
1154 		ptr = kmalloc(inputlen, (req_info->areq->flags &
1155 					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1156 					 GFP_KERNEL : GFP_ATOMIC);
1157 		if (!ptr)
1158 			return -ENOMEM;
1159 
1160 		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1161 					   inputlen);
1162 		if (status != inputlen) {
1163 			status = -EINVAL;
1164 			goto error_free;
1165 		}
1166 		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1167 					     inputlen);
1168 		if (status != inputlen) {
1169 			status = -EINVAL;
1170 			goto error_free;
1171 		}
1172 		kfree(ptr);
1173 	}
1174 
1175 	if (enc) {
1176 		/*
1177 		 * In an encryption scenario hmac needs
1178 		 * to be appended after payload
1179 		 */
1180 		dst = req->dst;
1181 		offset = inputlen;
1182 		while (offset >= dst->length) {
1183 			offset -= dst->length;
1184 			dst = sg_next(dst);
1185 			if (!dst)
1186 				return -ENOENT;
1187 		}
1188 
1189 		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1190 	} else {
1191 		/*
1192 		 * In a decryption scenario calculated hmac for received
1193 		 * payload needs to be compare with hmac received
1194 		 */
1195 		status = sg_copy_buffer(req->src, sg_nents(req->src),
1196 					rctx->fctx.hmac.s.hmac_recv, mac_len,
1197 					inputlen, true);
1198 		if (status != mac_len)
1199 			return -EINVAL;
1200 
1201 		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1202 		req_info->out[argcnt].size = mac_len;
1203 		argcnt++;
1204 	}
1205 
1206 	req_info->out_cnt = argcnt;
1207 	return 0;
1208 
1209 error_free:
1210 	kfree(ptr);
1211 	return status;
1212 }
1213 
1214 static int aead_do_fallback(struct aead_request *req, bool is_enc)
1215 {
1216 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1217 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1218 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(aead);
1219 	int ret;
1220 
1221 	if (ctx->fbk_cipher) {
1222 		/* Store the cipher tfm and then use the fallback tfm */
1223 		aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
1224 		aead_request_set_callback(&rctx->fbk_req, req->base.flags,
1225 					  req->base.complete, req->base.data);
1226 		aead_request_set_crypt(&rctx->fbk_req, req->src,
1227 				       req->dst, req->cryptlen, req->iv);
1228 		aead_request_set_ad(&rctx->fbk_req, req->assoclen);
1229 		ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
1230 			       crypto_aead_decrypt(&rctx->fbk_req);
1231 	} else {
1232 		ret = -EINVAL;
1233 	}
1234 
1235 	return ret;
1236 }
1237 
1238 static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1239 {
1240 	struct otx2_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1241 	struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
1242 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1243 	struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
1244 	struct pci_dev *pdev;
1245 	int status, cpu_num;
1246 
1247 	/* Clear control words */
1248 	rctx->ctrl_word.flags = 0;
1249 	rctx->fctx.enc.enc_ctrl.u = 0;
1250 
1251 	req_info->callback = otx2_cpt_aead_callback;
1252 	req_info->areq = &req->base;
1253 	req_info->req_type = reg_type;
1254 	req_info->is_enc = enc;
1255 	req_info->is_trunc_hmac = false;
1256 
1257 	req_info->req.cptr = ctx->er_ctx.hw_ctx;
1258 	req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
1259 
1260 	switch (reg_type) {
1261 	case OTX2_CPT_AEAD_ENC_DEC_REQ:
1262 		status = create_aead_input_list(req, enc);
1263 		if (status)
1264 			return status;
1265 		create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
1266 		break;
1267 
1268 	case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
1269 		create_aead_null_input_list(req, enc,
1270 					    crypto_aead_authsize(tfm));
1271 		status = create_aead_null_output_list(req, enc,
1272 						crypto_aead_authsize(tfm));
1273 		if (status)
1274 			return status;
1275 		break;
1276 
1277 	default:
1278 		return -EINVAL;
1279 	}
1280 	if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
1281 		return -EINVAL;
1282 
1283 	if (!req_info->req.param2 ||
1284 	    (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
1285 	    (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
1286 		return aead_do_fallback(req, enc);
1287 
1288 	status = get_se_device(&pdev, &cpu_num);
1289 	if (status)
1290 		return status;
1291 
1292 	req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
1293 							OTX2_CPT_SE_TYPES);
1294 
1295 	/*
1296 	 * We perform an asynchronous send and once
1297 	 * the request is completed the driver would
1298 	 * intimate through registered call back functions
1299 	 */
1300 	return otx2_cpt_do_request(pdev, req_info, cpu_num);
1301 }
1302 
1303 static int otx2_cpt_aead_encrypt(struct aead_request *req)
1304 {
1305 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
1306 }
1307 
1308 static int otx2_cpt_aead_decrypt(struct aead_request *req)
1309 {
1310 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
1311 }
1312 
1313 static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
1314 {
1315 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1316 }
1317 
1318 static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
1319 {
1320 	return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1321 }
1322 
1323 static struct skcipher_alg otx2_cpt_skciphers[] = { {
1324 	.base.cra_name = "xts(aes)",
1325 	.base.cra_driver_name = "cpt_xts_aes",
1326 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1327 	.base.cra_blocksize = AES_BLOCK_SIZE,
1328 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1329 	.base.cra_alignmask = 7,
1330 	.base.cra_priority = 4001,
1331 	.base.cra_module = THIS_MODULE,
1332 
1333 	.init = otx2_cpt_enc_dec_init,
1334 	.exit = otx2_cpt_skcipher_exit,
1335 	.ivsize = AES_BLOCK_SIZE,
1336 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1337 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1338 	.setkey = otx2_cpt_skcipher_xts_setkey,
1339 	.encrypt = otx2_cpt_skcipher_encrypt,
1340 	.decrypt = otx2_cpt_skcipher_decrypt,
1341 }, {
1342 	.base.cra_name = "cbc(aes)",
1343 	.base.cra_driver_name = "cpt_cbc_aes",
1344 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1345 	.base.cra_blocksize = AES_BLOCK_SIZE,
1346 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1347 	.base.cra_alignmask = 7,
1348 	.base.cra_priority = 4001,
1349 	.base.cra_module = THIS_MODULE,
1350 
1351 	.init = otx2_cpt_enc_dec_init,
1352 	.exit = otx2_cpt_skcipher_exit,
1353 	.ivsize = AES_BLOCK_SIZE,
1354 	.min_keysize = AES_MIN_KEY_SIZE,
1355 	.max_keysize = AES_MAX_KEY_SIZE,
1356 	.setkey = otx2_cpt_skcipher_cbc_aes_setkey,
1357 	.encrypt = otx2_cpt_skcipher_encrypt,
1358 	.decrypt = otx2_cpt_skcipher_decrypt,
1359 }, {
1360 	.base.cra_name = "ecb(aes)",
1361 	.base.cra_driver_name = "cpt_ecb_aes",
1362 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1363 	.base.cra_blocksize = AES_BLOCK_SIZE,
1364 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1365 	.base.cra_alignmask = 7,
1366 	.base.cra_priority = 4001,
1367 	.base.cra_module = THIS_MODULE,
1368 
1369 	.init = otx2_cpt_enc_dec_init,
1370 	.exit = otx2_cpt_skcipher_exit,
1371 	.ivsize = 0,
1372 	.min_keysize = AES_MIN_KEY_SIZE,
1373 	.max_keysize = AES_MAX_KEY_SIZE,
1374 	.setkey = otx2_cpt_skcipher_ecb_aes_setkey,
1375 	.encrypt = otx2_cpt_skcipher_encrypt,
1376 	.decrypt = otx2_cpt_skcipher_decrypt,
1377 }, {
1378 	.base.cra_name = "cbc(des3_ede)",
1379 	.base.cra_driver_name = "cpt_cbc_des3_ede",
1380 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1381 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1382 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1383 	.base.cra_alignmask = 7,
1384 	.base.cra_priority = 4001,
1385 	.base.cra_module = THIS_MODULE,
1386 
1387 	.init = otx2_cpt_enc_dec_init,
1388 	.exit = otx2_cpt_skcipher_exit,
1389 	.min_keysize = DES3_EDE_KEY_SIZE,
1390 	.max_keysize = DES3_EDE_KEY_SIZE,
1391 	.ivsize = DES_BLOCK_SIZE,
1392 	.setkey = otx2_cpt_skcipher_cbc_des3_setkey,
1393 	.encrypt = otx2_cpt_skcipher_encrypt,
1394 	.decrypt = otx2_cpt_skcipher_decrypt,
1395 }, {
1396 	.base.cra_name = "ecb(des3_ede)",
1397 	.base.cra_driver_name = "cpt_ecb_des3_ede",
1398 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1399 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1400 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
1401 	.base.cra_alignmask = 7,
1402 	.base.cra_priority = 4001,
1403 	.base.cra_module = THIS_MODULE,
1404 
1405 	.init = otx2_cpt_enc_dec_init,
1406 	.exit = otx2_cpt_skcipher_exit,
1407 	.min_keysize = DES3_EDE_KEY_SIZE,
1408 	.max_keysize = DES3_EDE_KEY_SIZE,
1409 	.ivsize = 0,
1410 	.setkey = otx2_cpt_skcipher_ecb_des3_setkey,
1411 	.encrypt = otx2_cpt_skcipher_encrypt,
1412 	.decrypt = otx2_cpt_skcipher_decrypt,
1413 } };
1414 
1415 static struct aead_alg otx2_cpt_aeads[] = { {
1416 	.base = {
1417 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1418 		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1419 		.cra_blocksize = AES_BLOCK_SIZE,
1420 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1421 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1422 		.cra_priority = 4001,
1423 		.cra_alignmask = 0,
1424 		.cra_module = THIS_MODULE,
1425 	},
1426 	.init = otx2_cpt_aead_cbc_aes_sha1_init,
1427 	.exit = otx2_cpt_aead_exit,
1428 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1429 	.setauthsize = otx2_cpt_aead_set_authsize,
1430 	.encrypt = otx2_cpt_aead_encrypt,
1431 	.decrypt = otx2_cpt_aead_decrypt,
1432 	.ivsize = AES_BLOCK_SIZE,
1433 	.maxauthsize = SHA1_DIGEST_SIZE,
1434 }, {
1435 	.base = {
1436 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1437 		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1438 		.cra_blocksize = AES_BLOCK_SIZE,
1439 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1440 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1441 		.cra_priority = 4001,
1442 		.cra_alignmask = 0,
1443 		.cra_module = THIS_MODULE,
1444 	},
1445 	.init = otx2_cpt_aead_cbc_aes_sha256_init,
1446 	.exit = otx2_cpt_aead_exit,
1447 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1448 	.setauthsize = otx2_cpt_aead_set_authsize,
1449 	.encrypt = otx2_cpt_aead_encrypt,
1450 	.decrypt = otx2_cpt_aead_decrypt,
1451 	.ivsize = AES_BLOCK_SIZE,
1452 	.maxauthsize = SHA256_DIGEST_SIZE,
1453 }, {
1454 	.base = {
1455 		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1456 		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1457 		.cra_blocksize = AES_BLOCK_SIZE,
1458 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1459 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1460 		.cra_priority = 4001,
1461 		.cra_alignmask = 0,
1462 		.cra_module = THIS_MODULE,
1463 	},
1464 	.init = otx2_cpt_aead_cbc_aes_sha384_init,
1465 	.exit = otx2_cpt_aead_exit,
1466 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1467 	.setauthsize = otx2_cpt_aead_set_authsize,
1468 	.encrypt = otx2_cpt_aead_encrypt,
1469 	.decrypt = otx2_cpt_aead_decrypt,
1470 	.ivsize = AES_BLOCK_SIZE,
1471 	.maxauthsize = SHA384_DIGEST_SIZE,
1472 }, {
1473 	.base = {
1474 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1475 		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1476 		.cra_blocksize = AES_BLOCK_SIZE,
1477 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1478 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1479 		.cra_priority = 4001,
1480 		.cra_alignmask = 0,
1481 		.cra_module = THIS_MODULE,
1482 	},
1483 	.init = otx2_cpt_aead_cbc_aes_sha512_init,
1484 	.exit = otx2_cpt_aead_exit,
1485 	.setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
1486 	.setauthsize = otx2_cpt_aead_set_authsize,
1487 	.encrypt = otx2_cpt_aead_encrypt,
1488 	.decrypt = otx2_cpt_aead_decrypt,
1489 	.ivsize = AES_BLOCK_SIZE,
1490 	.maxauthsize = SHA512_DIGEST_SIZE,
1491 }, {
1492 	.base = {
1493 		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1494 		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1495 		.cra_blocksize = 1,
1496 		.cra_flags = CRYPTO_ALG_ASYNC,
1497 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1498 		.cra_priority = 4001,
1499 		.cra_alignmask = 0,
1500 		.cra_module = THIS_MODULE,
1501 	},
1502 	.init = otx2_cpt_aead_ecb_null_sha1_init,
1503 	.exit = otx2_cpt_aead_exit,
1504 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1505 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1506 	.encrypt = otx2_cpt_aead_null_encrypt,
1507 	.decrypt = otx2_cpt_aead_null_decrypt,
1508 	.ivsize = 0,
1509 	.maxauthsize = SHA1_DIGEST_SIZE,
1510 }, {
1511 	.base = {
1512 		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1513 		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1514 		.cra_blocksize = 1,
1515 		.cra_flags = CRYPTO_ALG_ASYNC,
1516 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1517 		.cra_priority = 4001,
1518 		.cra_alignmask = 0,
1519 		.cra_module = THIS_MODULE,
1520 	},
1521 	.init = otx2_cpt_aead_ecb_null_sha256_init,
1522 	.exit = otx2_cpt_aead_exit,
1523 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1524 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1525 	.encrypt = otx2_cpt_aead_null_encrypt,
1526 	.decrypt = otx2_cpt_aead_null_decrypt,
1527 	.ivsize = 0,
1528 	.maxauthsize = SHA256_DIGEST_SIZE,
1529 }, {
1530 	.base = {
1531 		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1532 		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1533 		.cra_blocksize = 1,
1534 		.cra_flags = CRYPTO_ALG_ASYNC,
1535 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1536 		.cra_priority = 4001,
1537 		.cra_alignmask = 0,
1538 		.cra_module = THIS_MODULE,
1539 	},
1540 	.init = otx2_cpt_aead_ecb_null_sha384_init,
1541 	.exit = otx2_cpt_aead_exit,
1542 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1543 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1544 	.encrypt = otx2_cpt_aead_null_encrypt,
1545 	.decrypt = otx2_cpt_aead_null_decrypt,
1546 	.ivsize = 0,
1547 	.maxauthsize = SHA384_DIGEST_SIZE,
1548 }, {
1549 	.base = {
1550 		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1551 		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1552 		.cra_blocksize = 1,
1553 		.cra_flags = CRYPTO_ALG_ASYNC,
1554 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1555 		.cra_priority = 4001,
1556 		.cra_alignmask = 0,
1557 		.cra_module = THIS_MODULE,
1558 	},
1559 	.init = otx2_cpt_aead_ecb_null_sha512_init,
1560 	.exit = otx2_cpt_aead_exit,
1561 	.setkey = otx2_cpt_aead_ecb_null_sha_setkey,
1562 	.setauthsize = otx2_cpt_aead_null_set_authsize,
1563 	.encrypt = otx2_cpt_aead_null_encrypt,
1564 	.decrypt = otx2_cpt_aead_null_decrypt,
1565 	.ivsize = 0,
1566 	.maxauthsize = SHA512_DIGEST_SIZE,
1567 }, {
1568 	.base = {
1569 		.cra_name = "rfc4106(gcm(aes))",
1570 		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1571 		.cra_blocksize = 1,
1572 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1573 		.cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1574 		.cra_priority = 4001,
1575 		.cra_alignmask = 0,
1576 		.cra_module = THIS_MODULE,
1577 	},
1578 	.init = otx2_cpt_aead_gcm_aes_init,
1579 	.exit = otx2_cpt_aead_exit,
1580 	.setkey = otx2_cpt_aead_gcm_aes_setkey,
1581 	.setauthsize = otx2_cpt_aead_gcm_set_authsize,
1582 	.encrypt = otx2_cpt_aead_encrypt,
1583 	.decrypt = otx2_cpt_aead_decrypt,
1584 	.ivsize = AES_GCM_IV_SIZE,
1585 	.maxauthsize = AES_GCM_ICV_SIZE,
1586 } };
1587 
1588 static inline int cpt_register_algs(void)
1589 {
1590 	int i, err = 0;
1591 
1592 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
1593 		otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1594 
1595 	err = crypto_register_skciphers(otx2_cpt_skciphers,
1596 					ARRAY_SIZE(otx2_cpt_skciphers));
1597 	if (err)
1598 		return err;
1599 
1600 	for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
1601 		otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1602 
1603 	err = crypto_register_aeads(otx2_cpt_aeads,
1604 				    ARRAY_SIZE(otx2_cpt_aeads));
1605 	if (err) {
1606 		crypto_unregister_skciphers(otx2_cpt_skciphers,
1607 					    ARRAY_SIZE(otx2_cpt_skciphers));
1608 		return err;
1609 	}
1610 
1611 	return 0;
1612 }
1613 
1614 static inline void cpt_unregister_algs(void)
1615 {
1616 	crypto_unregister_skciphers(otx2_cpt_skciphers,
1617 				    ARRAY_SIZE(otx2_cpt_skciphers));
1618 	crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
1619 }
1620 
1621 static int compare_func(const void *lptr, const void *rptr)
1622 {
1623 	const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1624 	const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1625 
1626 	if (ldesc->dev->devfn < rdesc->dev->devfn)
1627 		return -1;
1628 	if (ldesc->dev->devfn > rdesc->dev->devfn)
1629 		return 1;
1630 	return 0;
1631 }
1632 
1633 int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1634 			 int num_queues, int num_devices)
1635 {
1636 	int ret = 0;
1637 	int count;
1638 
1639 	mutex_lock(&mutex);
1640 	count = atomic_read(&se_devices.count);
1641 	if (count >= OTX2_CPT_MAX_LFS_NUM) {
1642 		dev_err(&pdev->dev, "No space to add a new device\n");
1643 		ret = -ENOSPC;
1644 		goto unlock;
1645 	}
1646 	se_devices.desc[count].num_queues = num_queues;
1647 	se_devices.desc[count++].dev = pdev;
1648 	atomic_inc(&se_devices.count);
1649 
1650 	if (atomic_read(&se_devices.count) == num_devices &&
1651 	    is_crypto_registered == false) {
1652 		if (cpt_register_algs()) {
1653 			dev_err(&pdev->dev,
1654 				"Error in registering crypto algorithms\n");
1655 			ret =  -EINVAL;
1656 			goto unlock;
1657 		}
1658 		try_module_get(mod);
1659 		is_crypto_registered = true;
1660 	}
1661 	sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1662 	     compare_func, NULL);
1663 
1664 unlock:
1665 	mutex_unlock(&mutex);
1666 	return ret;
1667 }
1668 
1669 void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
1670 {
1671 	struct cpt_device_table *dev_tbl;
1672 	bool dev_found = false;
1673 	int i, j, count;
1674 
1675 	mutex_lock(&mutex);
1676 
1677 	dev_tbl = &se_devices;
1678 	count = atomic_read(&dev_tbl->count);
1679 	for (i = 0; i < count; i++) {
1680 		if (pdev == dev_tbl->desc[i].dev) {
1681 			for (j = i; j < count-1; j++)
1682 				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1683 			dev_found = true;
1684 			break;
1685 		}
1686 	}
1687 
1688 	if (!dev_found) {
1689 		dev_err(&pdev->dev, "%s device not found\n", __func__);
1690 		goto unlock;
1691 	}
1692 	if (atomic_dec_and_test(&se_devices.count)) {
1693 		cpt_unregister_algs();
1694 		module_put(mod);
1695 		is_crypto_registered = false;
1696 	}
1697 
1698 unlock:
1699 	mutex_unlock(&mutex);
1700 }
1701