xref: /linux/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c (revision 85ffc6e4ed3712f8b3fedb3fbe42afae644a699c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <crypto/aes.h>
12 #include <crypto/authenc.h>
13 #include <crypto/cryptd.h>
14 #include <crypto/des.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/sha1.h>
17 #include <crypto/sha2.h>
18 #include <crypto/xts.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/sort.h>
21 #include <linux/module.h>
22 #include "otx_cptvf.h"
23 #include "otx_cptvf_algs.h"
24 #include "otx_cptvf_reqmgr.h"
25 
26 #define CPT_MAX_VF_NUM	64
27 /* Size of salt in AES GCM mode */
28 #define AES_GCM_SALT_SIZE	4
29 /* Size of IV in AES GCM mode */
30 #define AES_GCM_IV_SIZE		8
31 /* Size of ICV (Integrity Check Value) in AES GCM mode */
32 #define AES_GCM_ICV_SIZE	16
33 /* Offset of IV in AES GCM mode */
34 #define AES_GCM_IV_OFFSET	8
35 #define CONTROL_WORD_LEN	8
36 #define KEY2_OFFSET		48
37 #define DMA_MODE_FLAG(dma_mode) \
38 	(((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
39 
40 /* Truncated SHA digest size */
41 #define SHA1_TRUNC_DIGEST_SIZE		12
42 #define SHA256_TRUNC_DIGEST_SIZE	16
43 #define SHA384_TRUNC_DIGEST_SIZE	24
44 #define SHA512_TRUNC_DIGEST_SIZE	32
45 
46 static DEFINE_MUTEX(mutex);
47 static int is_crypto_registered;
48 
49 struct cpt_device_desc {
50 	enum otx_cptpf_type pf_type;
51 	struct pci_dev *dev;
52 	int num_queues;
53 };
54 
55 struct cpt_device_table {
56 	atomic_t count;
57 	struct cpt_device_desc desc[CPT_MAX_VF_NUM];
58 };
59 
60 static struct cpt_device_table se_devices = {
61 	.count = ATOMIC_INIT(0)
62 };
63 
64 static struct cpt_device_table ae_devices = {
65 	.count = ATOMIC_INIT(0)
66 };
67 
68 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
69 
get_se_device(struct pci_dev ** pdev,int * cpu_num)70 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
71 {
72 	int count, ret = 0;
73 
74 	count = atomic_read(&se_devices.count);
75 	if (count < 1)
76 		return -ENODEV;
77 
78 	*cpu_num = get_cpu();
79 
80 	if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
81 		/*
82 		 * On OcteonTX platform there is one CPT instruction queue bound
83 		 * to each VF. We get maximum performance if one CPT queue
84 		 * is available for each cpu otherwise CPT queues need to be
85 		 * shared between cpus.
86 		 */
87 		if (*cpu_num >= count)
88 			*cpu_num %= count;
89 		*pdev = se_devices.desc[*cpu_num].dev;
90 	} else {
91 		pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
92 		ret = -EINVAL;
93 	}
94 	put_cpu();
95 
96 	return ret;
97 }
98 
validate_hmac_cipher_null(struct otx_cpt_req_info * cpt_req)99 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
100 {
101 	struct otx_cpt_req_ctx *rctx;
102 	struct aead_request *req;
103 	struct crypto_aead *tfm;
104 
105 	req = container_of(cpt_req->areq, struct aead_request, base);
106 	tfm = crypto_aead_reqtfm(req);
107 	rctx = aead_request_ctx_dma(req);
108 	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
109 		   rctx->fctx.hmac.s.hmac_recv,
110 		   crypto_aead_authsize(tfm)) != 0)
111 		return -EBADMSG;
112 
113 	return 0;
114 }
115 
otx_cpt_aead_callback(int status,void * arg1,void * arg2)116 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
117 {
118 	struct otx_cpt_info_buffer *cpt_info = arg2;
119 	struct crypto_async_request *areq = arg1;
120 	struct otx_cpt_req_info *cpt_req;
121 	struct pci_dev *pdev;
122 
123 	if (!cpt_info)
124 		goto complete;
125 
126 	cpt_req = cpt_info->req;
127 	if (!status) {
128 		/*
129 		 * When selected cipher is NULL we need to manually
130 		 * verify whether calculated hmac value matches
131 		 * received hmac value
132 		 */
133 		if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
134 		    !cpt_req->is_enc)
135 			status = validate_hmac_cipher_null(cpt_req);
136 	}
137 	pdev = cpt_info->pdev;
138 	do_request_cleanup(pdev, cpt_info);
139 
140 complete:
141 	if (areq)
142 		crypto_request_complete(areq, status);
143 }
144 
output_iv_copyback(struct crypto_async_request * areq)145 static void output_iv_copyback(struct crypto_async_request *areq)
146 {
147 	struct otx_cpt_req_info *req_info;
148 	struct skcipher_request *sreq;
149 	struct crypto_skcipher *stfm;
150 	struct otx_cpt_req_ctx *rctx;
151 	struct otx_cpt_enc_ctx *ctx;
152 	u32 start, ivsize;
153 
154 	sreq = container_of(areq, struct skcipher_request, base);
155 	stfm = crypto_skcipher_reqtfm(sreq);
156 	ctx = crypto_skcipher_ctx(stfm);
157 	if (ctx->cipher_type == OTX_CPT_AES_CBC ||
158 	    ctx->cipher_type == OTX_CPT_DES3_CBC) {
159 		rctx = skcipher_request_ctx_dma(sreq);
160 		req_info = &rctx->cpt_req;
161 		ivsize = crypto_skcipher_ivsize(stfm);
162 		start = sreq->cryptlen - ivsize;
163 
164 		if (req_info->is_enc) {
165 			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
166 						 ivsize, 0);
167 		} else {
168 			if (sreq->src != sreq->dst) {
169 				scatterwalk_map_and_copy(sreq->iv, sreq->src,
170 							 start, ivsize, 0);
171 			} else {
172 				memcpy(sreq->iv, req_info->iv_out, ivsize);
173 				kfree(req_info->iv_out);
174 			}
175 		}
176 	}
177 }
178 
otx_cpt_skcipher_callback(int status,void * arg1,void * arg2)179 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
180 {
181 	struct otx_cpt_info_buffer *cpt_info = arg2;
182 	struct crypto_async_request *areq = arg1;
183 	struct pci_dev *pdev;
184 
185 	if (areq) {
186 		if (!status)
187 			output_iv_copyback(areq);
188 		if (cpt_info) {
189 			pdev = cpt_info->pdev;
190 			do_request_cleanup(pdev, cpt_info);
191 		}
192 		crypto_request_complete(areq, status);
193 	}
194 }
195 
update_input_data(struct otx_cpt_req_info * req_info,struct scatterlist * inp_sg,u32 nbytes,u32 * argcnt)196 static inline void update_input_data(struct otx_cpt_req_info *req_info,
197 				     struct scatterlist *inp_sg,
198 				     u32 nbytes, u32 *argcnt)
199 {
200 	req_info->req.dlen += nbytes;
201 
202 	while (nbytes) {
203 		u32 len = min(nbytes, inp_sg->length);
204 		u8 *ptr = sg_virt(inp_sg);
205 
206 		req_info->in[*argcnt].vptr = (void *)ptr;
207 		req_info->in[*argcnt].size = len;
208 		nbytes -= len;
209 		++(*argcnt);
210 		inp_sg = sg_next(inp_sg);
211 	}
212 }
213 
update_output_data(struct otx_cpt_req_info * req_info,struct scatterlist * outp_sg,u32 offset,u32 nbytes,u32 * argcnt)214 static inline void update_output_data(struct otx_cpt_req_info *req_info,
215 				      struct scatterlist *outp_sg,
216 				      u32 offset, u32 nbytes, u32 *argcnt)
217 {
218 	req_info->rlen += nbytes;
219 
220 	while (nbytes) {
221 		u32 len = min(nbytes, outp_sg->length - offset);
222 		u8 *ptr = sg_virt(outp_sg);
223 
224 		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
225 		req_info->out[*argcnt].size = len;
226 		nbytes -= len;
227 		++(*argcnt);
228 		offset = 0;
229 		outp_sg = sg_next(outp_sg);
230 	}
231 }
232 
create_ctx_hdr(struct skcipher_request * req,u32 enc,u32 * argcnt)233 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
234 				 u32 *argcnt)
235 {
236 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
237 	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
238 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
239 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
240 	struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
241 	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
242 	int ivsize = crypto_skcipher_ivsize(stfm);
243 	u32 start = req->cryptlen - ivsize;
244 	gfp_t flags;
245 
246 	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
247 			GFP_KERNEL : GFP_ATOMIC;
248 	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
249 	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
250 
251 	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
252 				DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
253 	if (enc) {
254 		req_info->req.opcode.s.minor = 2;
255 	} else {
256 		req_info->req.opcode.s.minor = 3;
257 		if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
258 		    ctx->cipher_type == OTX_CPT_DES3_CBC) &&
259 		    req->src == req->dst) {
260 			req_info->iv_out = kmalloc(ivsize, flags);
261 			if (!req_info->iv_out)
262 				return -ENOMEM;
263 
264 			scatterwalk_map_and_copy(req_info->iv_out, req->src,
265 						 start, ivsize, 0);
266 		}
267 	}
268 	/* Encryption data length */
269 	req_info->req.param1 = req->cryptlen;
270 	/* Authentication data length */
271 	req_info->req.param2 = 0;
272 
273 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
274 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
275 	fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
276 
277 	if (ctx->cipher_type == OTX_CPT_AES_XTS)
278 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
279 	else
280 		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
281 
282 	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
283 
284 	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
285 
286 	/*
287 	 * Storing  Packet Data Information in offset
288 	 * Control Word First 8 bytes
289 	 */
290 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
291 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
292 	req_info->req.dlen += CONTROL_WORD_LEN;
293 	++(*argcnt);
294 
295 	req_info->in[*argcnt].vptr = (u8 *)fctx;
296 	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
297 	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
298 
299 	++(*argcnt);
300 
301 	return 0;
302 }
303 
create_input_list(struct skcipher_request * req,u32 enc,u32 enc_iv_len)304 static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
305 				    u32 enc_iv_len)
306 {
307 	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
308 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
309 	u32 argcnt =  0;
310 	int ret;
311 
312 	ret = create_ctx_hdr(req, enc, &argcnt);
313 	if (ret)
314 		return ret;
315 
316 	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
317 	req_info->incnt = argcnt;
318 
319 	return 0;
320 }
321 
create_output_list(struct skcipher_request * req,u32 enc_iv_len)322 static inline void create_output_list(struct skcipher_request *req,
323 				      u32 enc_iv_len)
324 {
325 	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
326 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
327 	u32 argcnt = 0;
328 
329 	/*
330 	 * OUTPUT Buffer Processing
331 	 * AES encryption/decryption output would be
332 	 * received in the following format
333 	 *
334 	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
335 	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
336 	 */
337 	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
338 	req_info->outcnt = argcnt;
339 }
340 
cpt_enc_dec(struct skcipher_request * req,u32 enc)341 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
342 {
343 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
344 	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req);
345 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
346 	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
347 	struct pci_dev *pdev;
348 	int status, cpu_num;
349 
350 	/* Validate that request doesn't exceed maximum CPT supported size */
351 	if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
352 		return -E2BIG;
353 
354 	/* Clear control words */
355 	rctx->ctrl_word.flags = 0;
356 	rctx->fctx.enc.enc_ctrl.flags = 0;
357 
358 	status = create_input_list(req, enc, enc_iv_len);
359 	if (status)
360 		return status;
361 	create_output_list(req, enc_iv_len);
362 
363 	status = get_se_device(&pdev, &cpu_num);
364 	if (status)
365 		return status;
366 
367 	req_info->callback = (void *)otx_cpt_skcipher_callback;
368 	req_info->areq = &req->base;
369 	req_info->req_type = OTX_CPT_ENC_DEC_REQ;
370 	req_info->is_enc = enc;
371 	req_info->is_trunc_hmac = false;
372 	req_info->ctrl.s.grp = 0;
373 
374 	/*
375 	 * We perform an asynchronous send and once
376 	 * the request is completed the driver would
377 	 * intimate through registered call back functions
378 	 */
379 	status = otx_cpt_do_request(pdev, req_info, cpu_num);
380 
381 	return status;
382 }
383 
otx_cpt_skcipher_encrypt(struct skcipher_request * req)384 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
385 {
386 	return cpt_enc_dec(req, true);
387 }
388 
otx_cpt_skcipher_decrypt(struct skcipher_request * req)389 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
390 {
391 	return cpt_enc_dec(req, false);
392 }
393 
otx_cpt_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)394 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
395 				       const u8 *key, u32 keylen)
396 {
397 	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
398 	const u8 *key2 = key + (keylen / 2);
399 	const u8 *key1 = key;
400 	int ret;
401 
402 	ret = xts_verify_key(tfm, key, keylen);
403 	if (ret)
404 		return ret;
405 	ctx->key_len = keylen;
406 	memcpy(ctx->enc_key, key1, keylen / 2);
407 	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
408 	ctx->cipher_type = OTX_CPT_AES_XTS;
409 	switch (ctx->key_len) {
410 	case 2 * AES_KEYSIZE_128:
411 		ctx->key_type = OTX_CPT_AES_128_BIT;
412 		break;
413 	case 2 * AES_KEYSIZE_256:
414 		ctx->key_type = OTX_CPT_AES_256_BIT;
415 		break;
416 	default:
417 		return -EINVAL;
418 	}
419 
420 	return 0;
421 }
422 
cpt_des_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)423 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
424 			  u32 keylen, u8 cipher_type)
425 {
426 	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
427 
428 	if (keylen != DES3_EDE_KEY_SIZE)
429 		return -EINVAL;
430 
431 	ctx->key_len = keylen;
432 	ctx->cipher_type = cipher_type;
433 
434 	memcpy(ctx->enc_key, key, keylen);
435 
436 	return 0;
437 }
438 
cpt_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)439 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
440 			  u32 keylen, u8 cipher_type)
441 {
442 	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
443 
444 	switch (keylen) {
445 	case AES_KEYSIZE_128:
446 		ctx->key_type = OTX_CPT_AES_128_BIT;
447 		break;
448 	case AES_KEYSIZE_192:
449 		ctx->key_type = OTX_CPT_AES_192_BIT;
450 		break;
451 	case AES_KEYSIZE_256:
452 		ctx->key_type = OTX_CPT_AES_256_BIT;
453 		break;
454 	default:
455 		return -EINVAL;
456 	}
457 	ctx->key_len = keylen;
458 	ctx->cipher_type = cipher_type;
459 
460 	memcpy(ctx->enc_key, key, keylen);
461 
462 	return 0;
463 }
464 
otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)465 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
466 					   const u8 *key, u32 keylen)
467 {
468 	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
469 }
470 
otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)471 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
472 					   const u8 *key, u32 keylen)
473 {
474 	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
475 }
476 
otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)477 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
478 					    const u8 *key, u32 keylen)
479 {
480 	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
481 }
482 
otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)483 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
484 					    const u8 *key, u32 keylen)
485 {
486 	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
487 }
488 
otx_cpt_enc_dec_init(struct crypto_skcipher * tfm)489 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
490 {
491 	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
492 
493 	memset(ctx, 0, sizeof(*ctx));
494 	/*
495 	 * Additional memory for skcipher_request is
496 	 * allocated since the cryptd daemon uses
497 	 * this memory for request_ctx information
498 	 */
499 	crypto_skcipher_set_reqsize_dma(
500 		tfm, sizeof(struct otx_cpt_req_ctx) +
501 		     sizeof(struct skcipher_request));
502 
503 	return 0;
504 }
505 
cpt_aead_init(struct crypto_aead * tfm,u8 cipher_type,u8 mac_type)506 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
507 {
508 	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
509 
510 	ctx->cipher_type = cipher_type;
511 	ctx->mac_type = mac_type;
512 
513 	switch (ctx->mac_type) {
514 	case OTX_CPT_SHA1:
515 		ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
516 		break;
517 
518 	case OTX_CPT_SHA256:
519 		ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
520 		break;
521 
522 	case OTX_CPT_SHA384:
523 		ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
524 		break;
525 
526 	case OTX_CPT_SHA512:
527 		ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
528 		break;
529 	}
530 
531 	if (IS_ERR(ctx->hashalg))
532 		return PTR_ERR(ctx->hashalg);
533 
534 	crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
535 
536 	if (!ctx->hashalg)
537 		return 0;
538 
539 	/*
540 	 * When selected cipher is NULL we use HMAC opcode instead of
541 	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
542 	 * for calculating ipad and opad
543 	 */
544 	if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
545 		int ss = crypto_shash_statesize(ctx->hashalg);
546 
547 		ctx->ipad = kzalloc(ss, GFP_KERNEL);
548 		if (!ctx->ipad) {
549 			crypto_free_shash(ctx->hashalg);
550 			return -ENOMEM;
551 		}
552 
553 		ctx->opad = kzalloc(ss, GFP_KERNEL);
554 		if (!ctx->opad) {
555 			kfree(ctx->ipad);
556 			crypto_free_shash(ctx->hashalg);
557 			return -ENOMEM;
558 		}
559 	}
560 
561 	ctx->sdesc = alloc_sdesc(ctx->hashalg);
562 	if (!ctx->sdesc) {
563 		kfree(ctx->opad);
564 		kfree(ctx->ipad);
565 		crypto_free_shash(ctx->hashalg);
566 		return -ENOMEM;
567 	}
568 
569 	return 0;
570 }
571 
otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead * tfm)572 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
573 {
574 	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
575 }
576 
otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead * tfm)577 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
578 {
579 	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
580 }
581 
otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead * tfm)582 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
583 {
584 	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
585 }
586 
otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead * tfm)587 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
588 {
589 	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
590 }
591 
otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead * tfm)592 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
593 {
594 	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
595 }
596 
otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead * tfm)597 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
598 {
599 	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
600 }
601 
otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead * tfm)602 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
603 {
604 	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
605 }
606 
otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead * tfm)607 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
608 {
609 	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
610 }
611 
otx_cpt_aead_gcm_aes_init(struct crypto_aead * tfm)612 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
613 {
614 	return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
615 }
616 
otx_cpt_aead_exit(struct crypto_aead * tfm)617 static void otx_cpt_aead_exit(struct crypto_aead *tfm)
618 {
619 	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
620 
621 	kfree(ctx->ipad);
622 	kfree(ctx->opad);
623 	crypto_free_shash(ctx->hashalg);
624 	kfree(ctx->sdesc);
625 }
626 
627 /*
628  * This is the Integrity Check Value validation (aka the authentication tag
629  * length)
630  */
otx_cpt_aead_set_authsize(struct crypto_aead * tfm,unsigned int authsize)631 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
632 				     unsigned int authsize)
633 {
634 	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
635 
636 	switch (ctx->mac_type) {
637 	case OTX_CPT_SHA1:
638 		if (authsize != SHA1_DIGEST_SIZE &&
639 		    authsize != SHA1_TRUNC_DIGEST_SIZE)
640 			return -EINVAL;
641 
642 		if (authsize == SHA1_TRUNC_DIGEST_SIZE)
643 			ctx->is_trunc_hmac = true;
644 		break;
645 
646 	case OTX_CPT_SHA256:
647 		if (authsize != SHA256_DIGEST_SIZE &&
648 		    authsize != SHA256_TRUNC_DIGEST_SIZE)
649 			return -EINVAL;
650 
651 		if (authsize == SHA256_TRUNC_DIGEST_SIZE)
652 			ctx->is_trunc_hmac = true;
653 		break;
654 
655 	case OTX_CPT_SHA384:
656 		if (authsize != SHA384_DIGEST_SIZE &&
657 		    authsize != SHA384_TRUNC_DIGEST_SIZE)
658 			return -EINVAL;
659 
660 		if (authsize == SHA384_TRUNC_DIGEST_SIZE)
661 			ctx->is_trunc_hmac = true;
662 		break;
663 
664 	case OTX_CPT_SHA512:
665 		if (authsize != SHA512_DIGEST_SIZE &&
666 		    authsize != SHA512_TRUNC_DIGEST_SIZE)
667 			return -EINVAL;
668 
669 		if (authsize == SHA512_TRUNC_DIGEST_SIZE)
670 			ctx->is_trunc_hmac = true;
671 		break;
672 
673 	case OTX_CPT_MAC_NULL:
674 		if (ctx->cipher_type == OTX_CPT_AES_GCM) {
675 			if (authsize != AES_GCM_ICV_SIZE)
676 				return -EINVAL;
677 		} else
678 			return -EINVAL;
679 		break;
680 
681 	default:
682 		return -EINVAL;
683 	}
684 
685 	tfm->authsize = authsize;
686 	return 0;
687 }
688 
alloc_sdesc(struct crypto_shash * alg)689 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
690 {
691 	struct otx_cpt_sdesc *sdesc;
692 	int size;
693 
694 	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
695 	sdesc = kmalloc(size, GFP_KERNEL);
696 	if (!sdesc)
697 		return NULL;
698 
699 	sdesc->shash.tfm = alg;
700 
701 	return sdesc;
702 }
703 
swap_data32(void * buf,u32 len)704 static inline void swap_data32(void *buf, u32 len)
705 {
706 	cpu_to_be32_array(buf, buf, len / 4);
707 }
708 
swap_data64(void * buf,u32 len)709 static inline void swap_data64(void *buf, u32 len)
710 {
711 	__be64 *dst = buf;
712 	u64 *src = buf;
713 	int i = 0;
714 
715 	for (i = 0 ; i < len / 8; i++, src++, dst++)
716 		*dst = cpu_to_be64p(src);
717 }
718 
swap_pad(u8 mac_type,u8 * pad)719 static int swap_pad(u8 mac_type, u8 *pad)
720 {
721 	struct sha512_state *sha512;
722 	struct sha256_state *sha256;
723 	struct sha1_state *sha1;
724 
725 	switch (mac_type) {
726 	case OTX_CPT_SHA1:
727 		sha1 = (struct sha1_state *)pad;
728 		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
729 		break;
730 
731 	case OTX_CPT_SHA256:
732 		sha256 = (struct sha256_state *)pad;
733 		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
734 		break;
735 
736 	case OTX_CPT_SHA384:
737 	case OTX_CPT_SHA512:
738 		sha512 = (struct sha512_state *)pad;
739 		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
740 		break;
741 
742 	default:
743 		return -EINVAL;
744 	}
745 
746 	return 0;
747 }
748 
aead_hmac_init(struct crypto_aead * cipher,struct crypto_authenc_keys * keys)749 static int aead_hmac_init(struct crypto_aead *cipher,
750 			  struct crypto_authenc_keys *keys)
751 {
752 	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
753 	int ds = crypto_shash_digestsize(ctx->hashalg);
754 	int bs = crypto_shash_blocksize(ctx->hashalg);
755 	int authkeylen = keys->authkeylen;
756 	u8 *ipad = NULL, *opad = NULL;
757 	int icount = 0;
758 	int ret;
759 
760 	if (authkeylen > bs) {
761 		ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
762 					  authkeylen, ctx->key);
763 		if (ret)
764 			return ret;
765 		authkeylen = ds;
766 	} else
767 		memcpy(ctx->key, keys->authkey, authkeylen);
768 
769 	ctx->enc_key_len = keys->enckeylen;
770 	ctx->auth_key_len = authkeylen;
771 
772 	if (ctx->cipher_type == OTX_CPT_CIPHER_NULL)
773 		return keys->enckeylen ? -EINVAL : 0;
774 
775 	switch (keys->enckeylen) {
776 	case AES_KEYSIZE_128:
777 		ctx->key_type = OTX_CPT_AES_128_BIT;
778 		break;
779 	case AES_KEYSIZE_192:
780 		ctx->key_type = OTX_CPT_AES_192_BIT;
781 		break;
782 	case AES_KEYSIZE_256:
783 		ctx->key_type = OTX_CPT_AES_256_BIT;
784 		break;
785 	default:
786 		/* Invalid key length */
787 		return -EINVAL;
788 	}
789 
790 	memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
791 
792 	ipad = ctx->ipad;
793 	opad = ctx->opad;
794 
795 	memcpy(ipad, ctx->key, authkeylen);
796 	memset(ipad + authkeylen, 0, bs - authkeylen);
797 	memcpy(opad, ipad, bs);
798 
799 	for (icount = 0; icount < bs; icount++) {
800 		ipad[icount] ^= 0x36;
801 		opad[icount] ^= 0x5c;
802 	}
803 
804 	/*
805 	 * Partial Hash calculated from the software
806 	 * algorithm is retrieved for IPAD & OPAD
807 	 */
808 
809 	/* IPAD Calculation */
810 	crypto_shash_init(&ctx->sdesc->shash);
811 	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
812 	crypto_shash_export(&ctx->sdesc->shash, ipad);
813 	ret = swap_pad(ctx->mac_type, ipad);
814 	if (ret)
815 		goto calc_fail;
816 
817 	/* OPAD Calculation */
818 	crypto_shash_init(&ctx->sdesc->shash);
819 	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
820 	crypto_shash_export(&ctx->sdesc->shash, opad);
821 	ret = swap_pad(ctx->mac_type, opad);
822 
823 calc_fail:
824 	return ret;
825 }
826 
otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)827 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
828 					   const unsigned char *key,
829 					   unsigned int keylen)
830 {
831 	struct crypto_authenc_keys authenc_keys;
832 	int status;
833 
834 	status = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
835 	if (status)
836 		goto badkey;
837 
838 	status = aead_hmac_init(cipher, &authenc_keys);
839 
840 badkey:
841 	return status;
842 }
843 
otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)844 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
845 					    const unsigned char *key,
846 					    unsigned int keylen)
847 {
848 	return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
849 }
850 
otx_cpt_aead_gcm_aes_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)851 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
852 				       const unsigned char *key,
853 				       unsigned int keylen)
854 {
855 	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
856 
857 	/*
858 	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
859 	 * and salt (4 bytes)
860 	 */
861 	switch (keylen) {
862 	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
863 		ctx->key_type = OTX_CPT_AES_128_BIT;
864 		ctx->enc_key_len = AES_KEYSIZE_128;
865 		break;
866 	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
867 		ctx->key_type = OTX_CPT_AES_192_BIT;
868 		ctx->enc_key_len = AES_KEYSIZE_192;
869 		break;
870 	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
871 		ctx->key_type = OTX_CPT_AES_256_BIT;
872 		ctx->enc_key_len = AES_KEYSIZE_256;
873 		break;
874 	default:
875 		/* Invalid key and salt length */
876 		return -EINVAL;
877 	}
878 
879 	/* Store encryption key and salt */
880 	memcpy(ctx->key, key, keylen);
881 
882 	return 0;
883 }
884 
create_aead_ctx_hdr(struct aead_request * req,u32 enc,u32 * argcnt)885 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
886 				      u32 *argcnt)
887 {
888 	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
889 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
890 	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
891 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
892 	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
893 	int mac_len = crypto_aead_authsize(tfm);
894 	int ds;
895 
896 	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
897 
898 	switch (ctx->cipher_type) {
899 	case OTX_CPT_AES_CBC:
900 		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
901 		/* Copy encryption key to context */
902 		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
903 		       ctx->enc_key_len);
904 		/* Copy IV to context */
905 		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
906 
907 		ds = crypto_shash_digestsize(ctx->hashalg);
908 		if (ctx->mac_type == OTX_CPT_SHA384)
909 			ds = SHA512_DIGEST_SIZE;
910 		if (ctx->ipad)
911 			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
912 		if (ctx->opad)
913 			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
914 		break;
915 
916 	case OTX_CPT_AES_GCM:
917 		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
918 		/* Copy encryption key to context */
919 		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
920 		/* Copy salt to context */
921 		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
922 		       AES_GCM_SALT_SIZE);
923 
924 		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
925 		break;
926 
927 	default:
928 		/* Unknown cipher type */
929 		return -EINVAL;
930 	}
931 	rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
932 
933 	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
934 	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
935 	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
936 				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
937 	if (enc) {
938 		req_info->req.opcode.s.minor = 2;
939 		req_info->req.param1 = req->cryptlen;
940 		req_info->req.param2 = req->cryptlen + req->assoclen;
941 	} else {
942 		req_info->req.opcode.s.minor = 3;
943 		req_info->req.param1 = req->cryptlen - mac_len;
944 		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
945 	}
946 
947 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
948 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
949 	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
950 	fctx->enc.enc_ctrl.e.mac_len = mac_len;
951 	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
952 
953 	/*
954 	 * Storing Packet Data Information in offset
955 	 * Control Word First 8 bytes
956 	 */
957 	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
958 	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
959 	req_info->req.dlen += CONTROL_WORD_LEN;
960 	++(*argcnt);
961 
962 	req_info->in[*argcnt].vptr = (u8 *)fctx;
963 	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
964 	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
965 	++(*argcnt);
966 
967 	return 0;
968 }
969 
create_hmac_ctx_hdr(struct aead_request * req,u32 * argcnt,u32 enc)970 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
971 				      u32 enc)
972 {
973 	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
974 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
975 	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm);
976 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
977 
978 	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
979 	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
980 	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
981 				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
982 	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
983 
984 	req_info->req.opcode.s.minor = 0;
985 	req_info->req.param1 = ctx->auth_key_len;
986 	req_info->req.param2 = ctx->mac_type << 8;
987 
988 	/* Add authentication key */
989 	req_info->in[*argcnt].vptr = ctx->key;
990 	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
991 	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
992 	++(*argcnt);
993 
994 	return 0;
995 }
996 
create_aead_input_list(struct aead_request * req,u32 enc)997 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
998 {
999 	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1000 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1001 	u32 inputlen =  req->cryptlen + req->assoclen;
1002 	u32 status, argcnt = 0;
1003 
1004 	status = create_aead_ctx_hdr(req, enc, &argcnt);
1005 	if (status)
1006 		return status;
1007 	update_input_data(req_info, req->src, inputlen, &argcnt);
1008 	req_info->incnt = argcnt;
1009 
1010 	return 0;
1011 }
1012 
create_aead_output_list(struct aead_request * req,u32 enc,u32 mac_len)1013 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
1014 					  u32 mac_len)
1015 {
1016 	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1017 	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1018 	u32 argcnt = 0, outputlen = 0;
1019 
1020 	if (enc)
1021 		outputlen = req->cryptlen +  req->assoclen + mac_len;
1022 	else
1023 		outputlen = req->cryptlen + req->assoclen - mac_len;
1024 
1025 	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1026 	req_info->outcnt = argcnt;
1027 
1028 	return 0;
1029 }
1030 
create_aead_null_input_list(struct aead_request * req,u32 enc,u32 mac_len)1031 static inline u32 create_aead_null_input_list(struct aead_request *req,
1032 					      u32 enc, u32 mac_len)
1033 {
1034 	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1035 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1036 	u32 inputlen, argcnt = 0;
1037 
1038 	if (enc)
1039 		inputlen =  req->cryptlen + req->assoclen;
1040 	else
1041 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1042 
1043 	create_hmac_ctx_hdr(req, &argcnt, enc);
1044 	update_input_data(req_info, req->src, inputlen, &argcnt);
1045 	req_info->incnt = argcnt;
1046 
1047 	return 0;
1048 }
1049 
create_aead_null_output_list(struct aead_request * req,u32 enc,u32 mac_len)1050 static inline u32 create_aead_null_output_list(struct aead_request *req,
1051 					       u32 enc, u32 mac_len)
1052 {
1053 	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1054 	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
1055 	struct scatterlist *dst;
1056 	u8 *ptr = NULL;
1057 	int argcnt = 0, status, offset;
1058 	u32 inputlen;
1059 
1060 	if (enc)
1061 		inputlen =  req->cryptlen + req->assoclen;
1062 	else
1063 		inputlen =  req->cryptlen + req->assoclen - mac_len;
1064 
1065 	/*
1066 	 * If source and destination are different
1067 	 * then copy payload to destination
1068 	 */
1069 	if (req->src != req->dst) {
1070 
1071 		ptr = kmalloc(inputlen, (req_info->areq->flags &
1072 					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1073 					 GFP_KERNEL : GFP_ATOMIC);
1074 		if (!ptr) {
1075 			status = -ENOMEM;
1076 			goto error;
1077 		}
1078 
1079 		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1080 					   inputlen);
1081 		if (status != inputlen) {
1082 			status = -EINVAL;
1083 			goto error_free;
1084 		}
1085 		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1086 					     inputlen);
1087 		if (status != inputlen) {
1088 			status = -EINVAL;
1089 			goto error_free;
1090 		}
1091 		kfree(ptr);
1092 	}
1093 
1094 	if (enc) {
1095 		/*
1096 		 * In an encryption scenario hmac needs
1097 		 * to be appended after payload
1098 		 */
1099 		dst = req->dst;
1100 		offset = inputlen;
1101 		while (offset >= dst->length) {
1102 			offset -= dst->length;
1103 			dst = sg_next(dst);
1104 			if (!dst) {
1105 				status = -ENOENT;
1106 				goto error;
1107 			}
1108 		}
1109 
1110 		update_output_data(req_info, dst, offset, mac_len, &argcnt);
1111 	} else {
1112 		/*
1113 		 * In a decryption scenario calculated hmac for received
1114 		 * payload needs to be compare with hmac received
1115 		 */
1116 		status = sg_copy_buffer(req->src, sg_nents(req->src),
1117 					rctx->fctx.hmac.s.hmac_recv, mac_len,
1118 					inputlen, true);
1119 		if (status != mac_len) {
1120 			status = -EINVAL;
1121 			goto error;
1122 		}
1123 
1124 		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1125 		req_info->out[argcnt].size = mac_len;
1126 		argcnt++;
1127 	}
1128 
1129 	req_info->outcnt = argcnt;
1130 	return 0;
1131 
1132 error_free:
1133 	kfree(ptr);
1134 error:
1135 	return status;
1136 }
1137 
cpt_aead_enc_dec(struct aead_request * req,u8 reg_type,u8 enc)1138 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1139 {
1140 	struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req);
1141 	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1142 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1143 	struct pci_dev *pdev;
1144 	u32 status, cpu_num;
1145 
1146 	/* Clear control words */
1147 	rctx->ctrl_word.flags = 0;
1148 	rctx->fctx.enc.enc_ctrl.flags = 0;
1149 
1150 	req_info->callback = otx_cpt_aead_callback;
1151 	req_info->areq = &req->base;
1152 	req_info->req_type = reg_type;
1153 	req_info->is_enc = enc;
1154 	req_info->is_trunc_hmac = false;
1155 
1156 	switch (reg_type) {
1157 	case OTX_CPT_AEAD_ENC_DEC_REQ:
1158 		status = create_aead_input_list(req, enc);
1159 		if (status)
1160 			return status;
1161 		status = create_aead_output_list(req, enc,
1162 						 crypto_aead_authsize(tfm));
1163 		if (status)
1164 			return status;
1165 		break;
1166 
1167 	case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
1168 		status = create_aead_null_input_list(req, enc,
1169 						     crypto_aead_authsize(tfm));
1170 		if (status)
1171 			return status;
1172 		status = create_aead_null_output_list(req, enc,
1173 						crypto_aead_authsize(tfm));
1174 		if (status)
1175 			return status;
1176 		break;
1177 
1178 	default:
1179 		return -EINVAL;
1180 	}
1181 
1182 	/* Validate that request doesn't exceed maximum CPT supported size */
1183 	if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
1184 	    req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
1185 		return -E2BIG;
1186 
1187 	status = get_se_device(&pdev, &cpu_num);
1188 	if (status)
1189 		return status;
1190 
1191 	req_info->ctrl.s.grp = 0;
1192 
1193 	status = otx_cpt_do_request(pdev, req_info, cpu_num);
1194 	/*
1195 	 * We perform an asynchronous send and once
1196 	 * the request is completed the driver would
1197 	 * intimate through registered call back functions
1198 	 */
1199 	return status;
1200 }
1201 
otx_cpt_aead_encrypt(struct aead_request * req)1202 static int otx_cpt_aead_encrypt(struct aead_request *req)
1203 {
1204 	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
1205 }
1206 
otx_cpt_aead_decrypt(struct aead_request * req)1207 static int otx_cpt_aead_decrypt(struct aead_request *req)
1208 {
1209 	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
1210 }
1211 
otx_cpt_aead_null_encrypt(struct aead_request * req)1212 static int otx_cpt_aead_null_encrypt(struct aead_request *req)
1213 {
1214 	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1215 }
1216 
otx_cpt_aead_null_decrypt(struct aead_request * req)1217 static int otx_cpt_aead_null_decrypt(struct aead_request *req)
1218 {
1219 	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1220 }
1221 
1222 static struct skcipher_alg otx_cpt_skciphers[] = { {
1223 	.base.cra_name = "xts(aes)",
1224 	.base.cra_driver_name = "cpt_xts_aes",
1225 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1226 	.base.cra_blocksize = AES_BLOCK_SIZE,
1227 	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1228 	.base.cra_alignmask = 7,
1229 	.base.cra_priority = 4001,
1230 	.base.cra_module = THIS_MODULE,
1231 
1232 	.init = otx_cpt_enc_dec_init,
1233 	.ivsize = AES_BLOCK_SIZE,
1234 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1235 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1236 	.setkey = otx_cpt_skcipher_xts_setkey,
1237 	.encrypt = otx_cpt_skcipher_encrypt,
1238 	.decrypt = otx_cpt_skcipher_decrypt,
1239 }, {
1240 	.base.cra_name = "cbc(aes)",
1241 	.base.cra_driver_name = "cpt_cbc_aes",
1242 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1243 	.base.cra_blocksize = AES_BLOCK_SIZE,
1244 	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1245 	.base.cra_alignmask = 7,
1246 	.base.cra_priority = 4001,
1247 	.base.cra_module = THIS_MODULE,
1248 
1249 	.init = otx_cpt_enc_dec_init,
1250 	.ivsize = AES_BLOCK_SIZE,
1251 	.min_keysize = AES_MIN_KEY_SIZE,
1252 	.max_keysize = AES_MAX_KEY_SIZE,
1253 	.setkey = otx_cpt_skcipher_cbc_aes_setkey,
1254 	.encrypt = otx_cpt_skcipher_encrypt,
1255 	.decrypt = otx_cpt_skcipher_decrypt,
1256 }, {
1257 	.base.cra_name = "ecb(aes)",
1258 	.base.cra_driver_name = "cpt_ecb_aes",
1259 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1260 	.base.cra_blocksize = AES_BLOCK_SIZE,
1261 	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1262 	.base.cra_alignmask = 7,
1263 	.base.cra_priority = 4001,
1264 	.base.cra_module = THIS_MODULE,
1265 
1266 	.init = otx_cpt_enc_dec_init,
1267 	.ivsize = 0,
1268 	.min_keysize = AES_MIN_KEY_SIZE,
1269 	.max_keysize = AES_MAX_KEY_SIZE,
1270 	.setkey = otx_cpt_skcipher_ecb_aes_setkey,
1271 	.encrypt = otx_cpt_skcipher_encrypt,
1272 	.decrypt = otx_cpt_skcipher_decrypt,
1273 }, {
1274 	.base.cra_name = "cbc(des3_ede)",
1275 	.base.cra_driver_name = "cpt_cbc_des3_ede",
1276 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1277 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1278 	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1279 	.base.cra_alignmask = 7,
1280 	.base.cra_priority = 4001,
1281 	.base.cra_module = THIS_MODULE,
1282 
1283 	.init = otx_cpt_enc_dec_init,
1284 	.min_keysize = DES3_EDE_KEY_SIZE,
1285 	.max_keysize = DES3_EDE_KEY_SIZE,
1286 	.ivsize = DES_BLOCK_SIZE,
1287 	.setkey = otx_cpt_skcipher_cbc_des3_setkey,
1288 	.encrypt = otx_cpt_skcipher_encrypt,
1289 	.decrypt = otx_cpt_skcipher_decrypt,
1290 }, {
1291 	.base.cra_name = "ecb(des3_ede)",
1292 	.base.cra_driver_name = "cpt_ecb_des3_ede",
1293 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1294 	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1295 	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1296 	.base.cra_alignmask = 7,
1297 	.base.cra_priority = 4001,
1298 	.base.cra_module = THIS_MODULE,
1299 
1300 	.init = otx_cpt_enc_dec_init,
1301 	.min_keysize = DES3_EDE_KEY_SIZE,
1302 	.max_keysize = DES3_EDE_KEY_SIZE,
1303 	.ivsize = 0,
1304 	.setkey = otx_cpt_skcipher_ecb_des3_setkey,
1305 	.encrypt = otx_cpt_skcipher_encrypt,
1306 	.decrypt = otx_cpt_skcipher_decrypt,
1307 } };
1308 
1309 static struct aead_alg otx_cpt_aeads[] = { {
1310 	.base = {
1311 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1312 		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1313 		.cra_blocksize = AES_BLOCK_SIZE,
1314 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1315 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1316 		.cra_priority = 4001,
1317 		.cra_alignmask = 0,
1318 		.cra_module = THIS_MODULE,
1319 	},
1320 	.init = otx_cpt_aead_cbc_aes_sha1_init,
1321 	.exit = otx_cpt_aead_exit,
1322 	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1323 	.setauthsize = otx_cpt_aead_set_authsize,
1324 	.encrypt = otx_cpt_aead_encrypt,
1325 	.decrypt = otx_cpt_aead_decrypt,
1326 	.ivsize = AES_BLOCK_SIZE,
1327 	.maxauthsize = SHA1_DIGEST_SIZE,
1328 }, {
1329 	.base = {
1330 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1331 		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1332 		.cra_blocksize = AES_BLOCK_SIZE,
1333 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1334 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1335 		.cra_priority = 4001,
1336 		.cra_alignmask = 0,
1337 		.cra_module = THIS_MODULE,
1338 	},
1339 	.init = otx_cpt_aead_cbc_aes_sha256_init,
1340 	.exit = otx_cpt_aead_exit,
1341 	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1342 	.setauthsize = otx_cpt_aead_set_authsize,
1343 	.encrypt = otx_cpt_aead_encrypt,
1344 	.decrypt = otx_cpt_aead_decrypt,
1345 	.ivsize = AES_BLOCK_SIZE,
1346 	.maxauthsize = SHA256_DIGEST_SIZE,
1347 }, {
1348 	.base = {
1349 		.cra_name = "authenc(hmac(sha384),cbc(aes))",
1350 		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1351 		.cra_blocksize = AES_BLOCK_SIZE,
1352 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1353 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1354 		.cra_priority = 4001,
1355 		.cra_alignmask = 0,
1356 		.cra_module = THIS_MODULE,
1357 	},
1358 	.init = otx_cpt_aead_cbc_aes_sha384_init,
1359 	.exit = otx_cpt_aead_exit,
1360 	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1361 	.setauthsize = otx_cpt_aead_set_authsize,
1362 	.encrypt = otx_cpt_aead_encrypt,
1363 	.decrypt = otx_cpt_aead_decrypt,
1364 	.ivsize = AES_BLOCK_SIZE,
1365 	.maxauthsize = SHA384_DIGEST_SIZE,
1366 }, {
1367 	.base = {
1368 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1369 		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1370 		.cra_blocksize = AES_BLOCK_SIZE,
1371 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1372 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1373 		.cra_priority = 4001,
1374 		.cra_alignmask = 0,
1375 		.cra_module = THIS_MODULE,
1376 	},
1377 	.init = otx_cpt_aead_cbc_aes_sha512_init,
1378 	.exit = otx_cpt_aead_exit,
1379 	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1380 	.setauthsize = otx_cpt_aead_set_authsize,
1381 	.encrypt = otx_cpt_aead_encrypt,
1382 	.decrypt = otx_cpt_aead_decrypt,
1383 	.ivsize = AES_BLOCK_SIZE,
1384 	.maxauthsize = SHA512_DIGEST_SIZE,
1385 }, {
1386 	.base = {
1387 		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1388 		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
1389 		.cra_blocksize = 1,
1390 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1391 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1392 		.cra_priority = 4001,
1393 		.cra_alignmask = 0,
1394 		.cra_module = THIS_MODULE,
1395 	},
1396 	.init = otx_cpt_aead_ecb_null_sha1_init,
1397 	.exit = otx_cpt_aead_exit,
1398 	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1399 	.setauthsize = otx_cpt_aead_set_authsize,
1400 	.encrypt = otx_cpt_aead_null_encrypt,
1401 	.decrypt = otx_cpt_aead_null_decrypt,
1402 	.ivsize = 0,
1403 	.maxauthsize = SHA1_DIGEST_SIZE,
1404 }, {
1405 	.base = {
1406 		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1407 		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
1408 		.cra_blocksize = 1,
1409 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1410 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1411 		.cra_priority = 4001,
1412 		.cra_alignmask = 0,
1413 		.cra_module = THIS_MODULE,
1414 	},
1415 	.init = otx_cpt_aead_ecb_null_sha256_init,
1416 	.exit = otx_cpt_aead_exit,
1417 	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1418 	.setauthsize = otx_cpt_aead_set_authsize,
1419 	.encrypt = otx_cpt_aead_null_encrypt,
1420 	.decrypt = otx_cpt_aead_null_decrypt,
1421 	.ivsize = 0,
1422 	.maxauthsize = SHA256_DIGEST_SIZE,
1423 }, {
1424 	.base = {
1425 		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1426 		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
1427 		.cra_blocksize = 1,
1428 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1429 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1430 		.cra_priority = 4001,
1431 		.cra_alignmask = 0,
1432 		.cra_module = THIS_MODULE,
1433 	},
1434 	.init = otx_cpt_aead_ecb_null_sha384_init,
1435 	.exit = otx_cpt_aead_exit,
1436 	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1437 	.setauthsize = otx_cpt_aead_set_authsize,
1438 	.encrypt = otx_cpt_aead_null_encrypt,
1439 	.decrypt = otx_cpt_aead_null_decrypt,
1440 	.ivsize = 0,
1441 	.maxauthsize = SHA384_DIGEST_SIZE,
1442 }, {
1443 	.base = {
1444 		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1445 		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
1446 		.cra_blocksize = 1,
1447 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1448 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1449 		.cra_priority = 4001,
1450 		.cra_alignmask = 0,
1451 		.cra_module = THIS_MODULE,
1452 	},
1453 	.init = otx_cpt_aead_ecb_null_sha512_init,
1454 	.exit = otx_cpt_aead_exit,
1455 	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
1456 	.setauthsize = otx_cpt_aead_set_authsize,
1457 	.encrypt = otx_cpt_aead_null_encrypt,
1458 	.decrypt = otx_cpt_aead_null_decrypt,
1459 	.ivsize = 0,
1460 	.maxauthsize = SHA512_DIGEST_SIZE,
1461 }, {
1462 	.base = {
1463 		.cra_name = "rfc4106(gcm(aes))",
1464 		.cra_driver_name = "cpt_rfc4106_gcm_aes",
1465 		.cra_blocksize = 1,
1466 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1467 		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING,
1468 		.cra_priority = 4001,
1469 		.cra_alignmask = 0,
1470 		.cra_module = THIS_MODULE,
1471 	},
1472 	.init = otx_cpt_aead_gcm_aes_init,
1473 	.exit = otx_cpt_aead_exit,
1474 	.setkey = otx_cpt_aead_gcm_aes_setkey,
1475 	.setauthsize = otx_cpt_aead_set_authsize,
1476 	.encrypt = otx_cpt_aead_encrypt,
1477 	.decrypt = otx_cpt_aead_decrypt,
1478 	.ivsize = AES_GCM_IV_SIZE,
1479 	.maxauthsize = AES_GCM_ICV_SIZE,
1480 } };
1481 
is_any_alg_used(void)1482 static inline int is_any_alg_used(void)
1483 {
1484 	int i;
1485 
1486 	for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1487 		if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
1488 			return true;
1489 	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1490 		if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
1491 			return true;
1492 	return false;
1493 }
1494 
cpt_register_algs(void)1495 static inline int cpt_register_algs(void)
1496 {
1497 	int i, err = 0;
1498 
1499 	if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
1500 		for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1501 			otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1502 
1503 		err = crypto_register_skciphers(otx_cpt_skciphers,
1504 						ARRAY_SIZE(otx_cpt_skciphers));
1505 		if (err)
1506 			return err;
1507 	}
1508 
1509 	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1510 		otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1511 
1512 	err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1513 	if (err) {
1514 		crypto_unregister_skciphers(otx_cpt_skciphers,
1515 					    ARRAY_SIZE(otx_cpt_skciphers));
1516 		return err;
1517 	}
1518 
1519 	return 0;
1520 }
1521 
cpt_unregister_algs(void)1522 static inline void cpt_unregister_algs(void)
1523 {
1524 	crypto_unregister_skciphers(otx_cpt_skciphers,
1525 				    ARRAY_SIZE(otx_cpt_skciphers));
1526 	crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1527 }
1528 
compare_func(const void * lptr,const void * rptr)1529 static int compare_func(const void *lptr, const void *rptr)
1530 {
1531 	struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1532 	struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1533 
1534 	if (ldesc->dev->devfn < rdesc->dev->devfn)
1535 		return -1;
1536 	if (ldesc->dev->devfn > rdesc->dev->devfn)
1537 		return 1;
1538 	return 0;
1539 }
1540 
otx_cpt_crypto_init(struct pci_dev * pdev,struct module * mod,enum otx_cptpf_type pf_type,enum otx_cptvf_type engine_type,int num_queues,int num_devices)1541 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1542 			enum otx_cptpf_type pf_type,
1543 			enum otx_cptvf_type engine_type,
1544 			int num_queues, int num_devices)
1545 {
1546 	int ret = 0;
1547 	int count;
1548 
1549 	mutex_lock(&mutex);
1550 	switch (engine_type) {
1551 	case OTX_CPT_SE_TYPES:
1552 		count = atomic_read(&se_devices.count);
1553 		if (count >= CPT_MAX_VF_NUM) {
1554 			dev_err(&pdev->dev, "No space to add a new device\n");
1555 			ret = -ENOSPC;
1556 			goto err;
1557 		}
1558 		se_devices.desc[count].pf_type = pf_type;
1559 		se_devices.desc[count].num_queues = num_queues;
1560 		se_devices.desc[count++].dev = pdev;
1561 		atomic_inc(&se_devices.count);
1562 
1563 		if (atomic_read(&se_devices.count) == num_devices &&
1564 		    is_crypto_registered == false) {
1565 			if (cpt_register_algs()) {
1566 				dev_err(&pdev->dev,
1567 				   "Error in registering crypto algorithms\n");
1568 				ret =  -EINVAL;
1569 				goto err;
1570 			}
1571 			try_module_get(mod);
1572 			is_crypto_registered = true;
1573 		}
1574 		sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1575 		     compare_func, NULL);
1576 		break;
1577 
1578 	case OTX_CPT_AE_TYPES:
1579 		count = atomic_read(&ae_devices.count);
1580 		if (count >= CPT_MAX_VF_NUM) {
1581 			dev_err(&pdev->dev, "No space to a add new device\n");
1582 			ret = -ENOSPC;
1583 			goto err;
1584 		}
1585 		ae_devices.desc[count].pf_type = pf_type;
1586 		ae_devices.desc[count].num_queues = num_queues;
1587 		ae_devices.desc[count++].dev = pdev;
1588 		atomic_inc(&ae_devices.count);
1589 		sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
1590 		     compare_func, NULL);
1591 		break;
1592 
1593 	default:
1594 		dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
1595 		ret = BAD_OTX_CPTVF_TYPE;
1596 	}
1597 err:
1598 	mutex_unlock(&mutex);
1599 	return ret;
1600 }
1601 
otx_cpt_crypto_exit(struct pci_dev * pdev,struct module * mod,enum otx_cptvf_type engine_type)1602 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
1603 			 enum otx_cptvf_type engine_type)
1604 {
1605 	struct cpt_device_table *dev_tbl;
1606 	bool dev_found = false;
1607 	int i, j, count;
1608 
1609 	mutex_lock(&mutex);
1610 
1611 	dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
1612 	count = atomic_read(&dev_tbl->count);
1613 	for (i = 0; i < count; i++)
1614 		if (pdev == dev_tbl->desc[i].dev) {
1615 			for (j = i; j < count-1; j++)
1616 				dev_tbl->desc[j] = dev_tbl->desc[j+1];
1617 			dev_found = true;
1618 			break;
1619 		}
1620 
1621 	if (!dev_found) {
1622 		dev_err(&pdev->dev, "%s device not found\n", __func__);
1623 		goto exit;
1624 	}
1625 
1626 	if (engine_type != OTX_CPT_AE_TYPES) {
1627 		if (atomic_dec_and_test(&se_devices.count) &&
1628 		    !is_any_alg_used()) {
1629 			cpt_unregister_algs();
1630 			module_put(mod);
1631 			is_crypto_registered = false;
1632 		}
1633 	} else
1634 		atomic_dec(&ae_devices.count);
1635 exit:
1636 	mutex_unlock(&mutex);
1637 }
1638